From 41d0adfbdd1599e40a862aa2e48b0bb42b9617c1 Mon Sep 17 00:00:00 2001 From: Zbigniew Kostrzewa Date: Wed, 28 Jun 2017 12:33:53 +0200 Subject: [PATCH 001/138] Add playbook_files to execute multiple ansible playbooks. --- provisioner/ansible-local/provisioner.go | 120 ++++++++++++++++-- provisioner/ansible-local/provisioner_test.go | 44 +++++++ 2 files changed, 150 insertions(+), 14 deletions(-) diff --git a/provisioner/ansible-local/provisioner.go b/provisioner/ansible-local/provisioner.go index 2f6a873f9..a25aca36c 100644 --- a/provisioner/ansible-local/provisioner.go +++ b/provisioner/ansible-local/provisioner.go @@ -38,6 +38,9 @@ type Config struct { // The main playbook file to execute. PlaybookFile string `mapstructure:"playbook_file"` + // The playbook files to execute. + PlaybookFiles []string `mapstructure:"playbook_files"` + // An array of local paths of playbook files to upload. PlaybookPaths []string `mapstructure:"playbook_paths"` @@ -63,6 +66,8 @@ type Config struct { type Provisioner struct { config Config + + playbookFiles []string } func (p *Provisioner) Prepare(raws ...interface{}) error { @@ -77,6 +82,9 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { return err } + // Reset the state. + p.playbookFiles = make([]string, 0, len(p.config.PlaybookFiles)) + // Defaults if p.config.Command == "" { p.config.Command = "ANSIBLE_FORCE_COLOR=1 PYTHONUNBUFFERED=1 ansible-playbook" @@ -91,9 +99,32 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { // Validation var errs *packer.MultiError - err = validateFileConfig(p.config.PlaybookFile, "playbook_file", true) - if err != nil { - errs = packer.MultiErrorAppend(errs, err) + + // Check that either playbook_file or playbook_files is specified + if len(p.config.PlaybookFiles) != 0 && p.config.PlaybookFile != "" { + errs = packer.MultiErrorAppend(errs, fmt.Errorf("Either playbook_file or playbook_files can be specified, not both")) + } + if len(p.config.PlaybookFiles) == 0 && p.config.PlaybookFile == "" { + errs = packer.MultiErrorAppend(errs, fmt.Errorf("Either playbook_file or playbook_files must be specified")) + } + if p.config.PlaybookFile != "" { + err = validateFileConfig(p.config.PlaybookFile, "playbook_file", true) + if err != nil { + errs = packer.MultiErrorAppend(errs, err) + } + } + + for _, playbookFile := range p.config.PlaybookFiles { + if err := validateFileConfig(playbookFile, "playbook_files", true); err != nil { + errs = packer.MultiErrorAppend(errs, err) + } else { + playbookFile, err := filepath.Abs(playbookFile) + if err != nil { + errs = packer.MultiErrorAppend(errs, err) + } else { + p.playbookFiles = append(p.playbookFiles, playbookFile) + } + } } // Check that the inventory file exists, if configured @@ -166,11 +197,15 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { } } - ui.Message("Uploading main Playbook file...") - src := p.config.PlaybookFile - dst := filepath.ToSlash(filepath.Join(p.config.StagingDir, filepath.Base(src))) - if err := p.uploadFile(ui, comm, dst, src); err != nil { - return fmt.Errorf("Error uploading main playbook: %s", err) + if p.config.PlaybookFile != "" { + ui.Message("Uploading main Playbook file...") + src := p.config.PlaybookFile + dst := filepath.ToSlash(filepath.Join(p.config.StagingDir, filepath.Base(src))) + if err := p.uploadFile(ui, comm, dst, src); err != nil { + return fmt.Errorf("Error uploading main playbook: %s", err) + } + } else if err := p.provisionPlaybookFiles(ui, comm); err != nil { + return err } if len(p.config.InventoryFile) == 0 { @@ -201,16 +236,16 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { if len(p.config.GalaxyFile) > 0 { ui.Message("Uploading galaxy file...") - src = p.config.GalaxyFile - dst = filepath.ToSlash(filepath.Join(p.config.StagingDir, filepath.Base(src))) + src := p.config.GalaxyFile + dst := filepath.ToSlash(filepath.Join(p.config.StagingDir, filepath.Base(src))) if err := p.uploadFile(ui, comm, dst, src); err != nil { return fmt.Errorf("Error uploading galaxy file: %s", err) } } ui.Message("Uploading inventory file...") - src = p.config.InventoryFile - dst = filepath.ToSlash(filepath.Join(p.config.StagingDir, filepath.Base(src))) + src := p.config.InventoryFile + dst := filepath.ToSlash(filepath.Join(p.config.StagingDir, filepath.Base(src))) if err := p.uploadFile(ui, comm, dst, src); err != nil { return fmt.Errorf("Error uploading inventory file: %s", err) } @@ -269,6 +304,44 @@ func (p *Provisioner) Cancel() { os.Exit(0) } +func (p *Provisioner) provisionPlaybookFiles(ui packer.Ui, comm packer.Communicator) error { + var playbookDir string + if p.config.PlaybookDir != "" { + var err error + playbookDir, err = filepath.Abs(p.config.PlaybookDir) + if err != nil { + return err + } + } + for index, playbookFile := range p.playbookFiles { + if playbookDir != "" && strings.HasPrefix(playbookFile, playbookDir) { + p.playbookFiles[index] = strings.TrimPrefix(playbookFile, playbookDir) + continue + } + if err := p.provisionPlaybookFile(ui, comm, playbookFile); err != nil { + return err + } + } + return nil +} + +func (p *Provisioner) provisionPlaybookFile(ui packer.Ui, comm packer.Communicator, playbookFile string) error { + ui.Message(fmt.Sprintf("Uploading playbook file: %s", playbookFile)) + + remoteDir := filepath.ToSlash(filepath.Join(p.config.StagingDir, filepath.Dir(playbookFile))) + remotePlaybookFile := filepath.ToSlash(filepath.Join(p.config.StagingDir, playbookFile)) + + if err := p.createDir(ui, comm, remoteDir); err != nil { + return fmt.Errorf("Error uploading playbook file: %s [%s]", playbookFile, err) + } + + if err := p.uploadFile(ui, comm, remotePlaybookFile, playbookFile); err != nil { + return fmt.Errorf("Error uploading playbook: %s [%s]", playbookFile, err) + } + + return nil +} + func (p *Provisioner) executeGalaxy(ui packer.Ui, comm packer.Communicator) error { rolesDir := filepath.ToSlash(filepath.Join(p.config.StagingDir, "roles")) galaxyFile := filepath.ToSlash(filepath.Join(p.config.StagingDir, filepath.Base(p.config.GalaxyFile))) @@ -291,7 +364,6 @@ func (p *Provisioner) executeGalaxy(ui packer.Ui, comm packer.Communicator) erro } func (p *Provisioner) executeAnsible(ui packer.Ui, comm packer.Communicator) error { - playbook := filepath.ToSlash(filepath.Join(p.config.StagingDir, filepath.Base(p.config.PlaybookFile))) inventory := filepath.ToSlash(filepath.Join(p.config.StagingDir, filepath.Base(p.config.InventoryFile))) extraArgs := fmt.Sprintf(" --extra-vars \"packer_build_name=%s packer_builder_type=%s packer_http_addr=%s\" ", @@ -307,8 +379,28 @@ func (p *Provisioner) executeAnsible(ui packer.Ui, comm packer.Communicator) err } } + if p.config.PlaybookFile != "" { + playbookFile := filepath.ToSlash(filepath.Join(p.config.StagingDir, filepath.Base(p.config.PlaybookFile))) + if err := p.executeAnsiblePlaybook(ui, comm, playbookFile, extraArgs, inventory); err != nil { + return err + } + } + + for _, playbookFile := range p.playbookFiles { + playbookFile = filepath.ToSlash(filepath.Join(p.config.StagingDir, playbookFile)) + if err := p.executeAnsiblePlaybook(ui, comm, playbookFile, extraArgs, inventory); err != nil { + return err + } + } + return nil +} + +func (p *Provisioner) executeAnsiblePlaybook( + ui packer.Ui, comm packer.Communicator, playbookFile, extraArgs, inventory string, +) error { command := fmt.Sprintf("cd %s && %s %s%s -c local -i %s", - p.config.StagingDir, p.config.Command, playbook, extraArgs, inventory) + p.config.StagingDir, p.config.Command, playbookFile, extraArgs, inventory, + ) ui.Message(fmt.Sprintf("Executing Ansible: %s", command)) cmd := &packer.RemoteCmd{ Command: command, diff --git a/provisioner/ansible-local/provisioner_test.go b/provisioner/ansible-local/provisioner_test.go index 2195b7107..a074e1331 100644 --- a/provisioner/ansible-local/provisioner_test.go +++ b/provisioner/ansible-local/provisioner_test.go @@ -73,6 +73,50 @@ func TestProvisionerPrepare_PlaybookFile(t *testing.T) { } } +func TestProvisionerPrepare_PlaybookFiles(t *testing.T) { + var p Provisioner + config := testConfig() + + err := p.Prepare(config) + if err == nil { + t.Fatal("should have error") + } + + config["playbook_file"] = "" + config["playbook_files"] = []string{} + err = p.Prepare(config) + if err == nil { + t.Fatal("should have error") + } + + playbook_file, err := ioutil.TempFile("", "playbook") + if err != nil { + t.Fatalf("err: %s", err) + } + defer os.Remove(playbook_file.Name()) + + config["playbook_file"] = playbook_file.Name() + config["playbook_files"] = []string{"some_other_file"} + err = p.Prepare(config) + if err == nil { + t.Fatal("should have error") + } + + config["playbook_file"] = playbook_file.Name() + config["playbook_files"] = []string{} + err = p.Prepare(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + config["playbook_file"] = "" + config["playbook_files"] = []string{playbook_file.Name()} + err = p.Prepare(config) + if err != nil { + t.Fatalf("err: %s", err) + } +} + func TestProvisionerPrepare_InventoryFile(t *testing.T) { var p Provisioner config := testConfig() From f68204534507fa7846b6b6bf477f91def3297c18 Mon Sep 17 00:00:00 2001 From: localghost Date: Mon, 3 Jul 2017 23:44:10 +0200 Subject: [PATCH 002/138] Test for ansible-local playbook_files with mocked Communicator. --- .../ansible-local/communicator_mock.go | 38 +++++++++ provisioner/ansible-local/provisioner_test.go | 84 +++++++++++++++++++ provisioner/ansible-local/ui_stub.go | 15 ++++ 3 files changed, 137 insertions(+) create mode 100644 provisioner/ansible-local/communicator_mock.go create mode 100644 provisioner/ansible-local/ui_stub.go diff --git a/provisioner/ansible-local/communicator_mock.go b/provisioner/ansible-local/communicator_mock.go new file mode 100644 index 000000000..8ed59e9bb --- /dev/null +++ b/provisioner/ansible-local/communicator_mock.go @@ -0,0 +1,38 @@ +package ansiblelocal + +import ( + "github.com/hashicorp/packer/packer" + "io" + "os" +) + +type communicatorMock struct { + startCommand []string + uploadDestination []string +} + +func (c *communicatorMock) Start(cmd *packer.RemoteCmd) error { + c.startCommand = append(c.startCommand, cmd.Command) + cmd.SetExited(0) + return nil +} + +func (c *communicatorMock) Upload(dst string, _ io.Reader, _ *os.FileInfo) error { + c.uploadDestination = append(c.uploadDestination, dst) + return nil +} + +func (c *communicatorMock) UploadDir(dst, src string, exclude []string) error { + return nil +} + +func (c *communicatorMock) Download(src string, dst io.Writer) error { + return nil +} + +func (c *communicatorMock) DownloadDir(src, dst string, exclude []string) error { + return nil +} + +func (c *communicatorMock) verify() { +} diff --git a/provisioner/ansible-local/provisioner_test.go b/provisioner/ansible-local/provisioner_test.go index a074e1331..1e81b1bd6 100644 --- a/provisioner/ansible-local/provisioner_test.go +++ b/provisioner/ansible-local/provisioner_test.go @@ -7,6 +7,7 @@ import ( "strings" "testing" + "fmt" "github.com/hashicorp/packer/packer" ) @@ -15,6 +16,37 @@ func testConfig() map[string]interface{} { return m } +func createTempFile() string { + file, err := ioutil.TempFile("", "") + if err != nil { + panic(fmt.Sprintf("err: %s", err)) + } + return file.Name() +} + +func createTempFiles(numFiles int) []string { + files := make([]string, 0, numFiles) + defer func() { + // Cleanup the files if not all were created. + if len(files) < numFiles { + for _, file := range files { + os.Remove(file) + } + } + }() + + for i := 0; i < numFiles; i++ { + files = append(files, createTempFile()) + } + return files +} + +func removeFiles(files ...string) { + for _, file := range files { + os.Remove(file) + } +} + func TestProvisioner_Impl(t *testing.T) { var raw interface{} raw = &Provisioner{} @@ -117,6 +149,58 @@ func TestProvisionerPrepare_PlaybookFiles(t *testing.T) { } } +func assertPlaybooksExecuted(comm *communicatorMock, playbooks []string) { + cmdIndex := 0 + for _, playbook := range playbooks { + for ; cmdIndex < len(comm.startCommand); cmdIndex++ { + cmd := comm.startCommand[cmdIndex] + if strings.Contains(cmd, "ansible-playbook") && strings.Contains(cmd, playbook) { + break + } + } + if cmdIndex == len(comm.startCommand) { + panic(fmt.Sprintf("Playbook %s was not executed", playbook)) + } + } +} + +func assertPlaybooksUploaded(comm *communicatorMock, playbooks []string) { + uploadIndex := 0 + for _, playbook := range playbooks { + for ; uploadIndex < len(comm.uploadDestination); uploadIndex++ { + dest := comm.uploadDestination[uploadIndex] + if strings.HasSuffix(dest, playbook) { + break + } + } + if uploadIndex == len(comm.uploadDestination) { + panic(fmt.Sprintf("Playbook %s was not uploaded", playbook)) + } + } +} + +func TestProvisionerProvision_PlaybookFiles(t *testing.T) { + var p Provisioner + config := testConfig() + + playbooks := createTempFiles(3) + defer removeFiles(playbooks...) + + config["playbook_files"] = playbooks + err := p.Prepare(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + comm := &communicatorMock{} + if err := p.Provision(&uiStub{}, comm); err != nil { + t.Fatalf("err: %s", err) + } + + assertPlaybooksUploaded(comm, playbooks) + assertPlaybooksExecuted(comm, playbooks) +} + func TestProvisionerPrepare_InventoryFile(t *testing.T) { var p Provisioner config := testConfig() diff --git a/provisioner/ansible-local/ui_stub.go b/provisioner/ansible-local/ui_stub.go new file mode 100644 index 000000000..4faa2a215 --- /dev/null +++ b/provisioner/ansible-local/ui_stub.go @@ -0,0 +1,15 @@ +package ansiblelocal + +type uiStub struct{} + +func (su *uiStub) Ask(string) (string, error) { + return "", nil +} + +func (su *uiStub) Error(string) {} + +func (su *uiStub) Machine(string, ...string) {} + +func (su *uiStub) Message(string) {} + +func (su *uiStub) Say(msg string) {} From 9ea6313b68593b0c15e351f80fdb6f5ef9e1a0ea Mon Sep 17 00:00:00 2001 From: localghost Date: Thu, 6 Jul 2017 21:02:46 +0200 Subject: [PATCH 003/138] Fix playbook_files test on Windows. --- provisioner/ansible-local/provisioner_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/provisioner/ansible-local/provisioner_test.go b/provisioner/ansible-local/provisioner_test.go index 1e81b1bd6..5fdcdfe8e 100644 --- a/provisioner/ansible-local/provisioner_test.go +++ b/provisioner/ansible-local/provisioner_test.go @@ -152,6 +152,7 @@ func TestProvisionerPrepare_PlaybookFiles(t *testing.T) { func assertPlaybooksExecuted(comm *communicatorMock, playbooks []string) { cmdIndex := 0 for _, playbook := range playbooks { + playbook = filepath.ToSlash(playbook) for ; cmdIndex < len(comm.startCommand); cmdIndex++ { cmd := comm.startCommand[cmdIndex] if strings.Contains(cmd, "ansible-playbook") && strings.Contains(cmd, playbook) { @@ -167,6 +168,7 @@ func assertPlaybooksExecuted(comm *communicatorMock, playbooks []string) { func assertPlaybooksUploaded(comm *communicatorMock, playbooks []string) { uploadIndex := 0 for _, playbook := range playbooks { + playbook = filepath.ToSlash(playbook) for ; uploadIndex < len(comm.uploadDestination); uploadIndex++ { dest := comm.uploadDestination[uploadIndex] if strings.HasSuffix(dest, playbook) { From 1bd32d3876df0e0a10f6d57f363e0d124d509bdc Mon Sep 17 00:00:00 2001 From: localghost Date: Sun, 9 Jul 2017 20:04:01 +0200 Subject: [PATCH 004/138] Add documentation about playbook_files option of ansible-local provisioner. --- website/source/docs/provisioners/ansible-local.html.md | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/website/source/docs/provisioners/ansible-local.html.md b/website/source/docs/provisioners/ansible-local.html.md index 3ca775cf4..3f9500534 100644 --- a/website/source/docs/provisioners/ansible-local.html.md +++ b/website/source/docs/provisioners/ansible-local.html.md @@ -43,7 +43,12 @@ Required: - `playbook_file` (string) - The playbook file to be executed by ansible. This file must exist on your local system and will be uploaded to the - remote machine. + remote machine. This option is exclusive with `playbook_files`. + +- `playbook_files` (array of strings) - The playbook files to be executed by ansible. + These files must exist on your local system. If the files don't exist in the `playbook_dir` + or you don't set `playbook_dir` they will be uploaded to the remote machine. This option + is exclusive with `playbook_file`. Optional: From 33ae9cb2bb9134942609d178fe5421507d817c23 Mon Sep 17 00:00:00 2001 From: Zbigniew Kostrzewa Date: Mon, 10 Jul 2017 08:19:29 +0200 Subject: [PATCH 005/138] Add test for playbook_files using docker builder. --- provisioner/ansible-local/provisioner_test.go | 175 ++++++++++++++---- .../ansible-local/test-fixtures/hello.yml | 5 + .../ansible-local/test-fixtures/world.yml | 5 + 3 files changed, 149 insertions(+), 36 deletions(-) create mode 100644 provisioner/ansible-local/test-fixtures/hello.yml create mode 100644 provisioner/ansible-local/test-fixtures/world.yml diff --git a/provisioner/ansible-local/provisioner_test.go b/provisioner/ansible-local/provisioner_test.go index 5fdcdfe8e..bd6568afc 100644 --- a/provisioner/ansible-local/provisioner_test.go +++ b/provisioner/ansible-local/provisioner_test.go @@ -8,45 +8,13 @@ import ( "testing" "fmt" + "github.com/hashicorp/packer/builder/docker" "github.com/hashicorp/packer/packer" + "github.com/hashicorp/packer/provisioner/file" + "github.com/hashicorp/packer/template" + "os/exec" ) -func testConfig() map[string]interface{} { - m := make(map[string]interface{}) - return m -} - -func createTempFile() string { - file, err := ioutil.TempFile("", "") - if err != nil { - panic(fmt.Sprintf("err: %s", err)) - } - return file.Name() -} - -func createTempFiles(numFiles int) []string { - files := make([]string, 0, numFiles) - defer func() { - // Cleanup the files if not all were created. - if len(files) < numFiles { - for _, file := range files { - os.Remove(file) - } - } - }() - - for i := 0; i < numFiles; i++ { - files = append(files, createTempFile()) - } - return files -} - -func removeFiles(files ...string) { - for _, file := range files { - os.Remove(file) - } -} - func TestProvisioner_Impl(t *testing.T) { var raw interface{} raw = &Provisioner{} @@ -318,3 +286,138 @@ func TestProvisionerPrepare_Dirs(t *testing.T) { t.Fatalf("err: %s", err) } } + +func TestProvisionerProvisionDocker_PlaybookFiles(t *testing.T) { + if os.Getenv("PACKER_ACC") == "" { + t.Skip("This test is only run with PACKER_ACC=1") + } + + ui := packer.TestUi(t) + cache := &packer.FileCache{CacheDir: os.TempDir()} + + tpl, err := template.Parse(strings.NewReader(playbookFilesDockerConfig)) + if err != nil { + t.Fatalf("Unable to parse config: %s", err) + } + + // Check if docker executable can be found. + _, err = exec.LookPath("docker") + if err != nil { + t.Error("docker command not found; please make sure docker is installed") + } + + // Setup the builder + builder := &docker.Builder{} + warnings, err := builder.Prepare(tpl.Builders["docker"].Config) + if err != nil { + t.Fatalf("Error preparing configuration %s", err) + } + if len(warnings) > 0 { + t.Fatal("Encountered configuration warnings; aborting") + } + + ansible := &Provisioner{} + err = ansible.Prepare(tpl.Provisioners[0].Config) + if err != nil { + t.Fatalf("Error preparing ansible-local provisioner: %s", err) + } + + download := &file.Provisioner{} + err = download.Prepare(tpl.Provisioners[1].Config) + if err != nil { + t.Fatalf("Error preparing download: %s", err) + } + defer os.Remove("hello_world") + + // Add hooks so the provisioners run during the build + hooks := map[string][]packer.Hook{} + hooks[packer.HookProvision] = []packer.Hook{ + &packer.ProvisionHook{ + Provisioners: []packer.Provisioner{ + ansible, + download, + }, + ProvisionerTypes: []string{tpl.Provisioners[0].Type, tpl.Provisioners[1].Type}, + }, + } + hook := &packer.DispatchHook{Mapping: hooks} + + artifact, err := builder.Run(ui, hook, cache) + if err != nil { + t.Fatalf("Error running build %s", err) + } + defer artifact.Destroy() + + actualContent, err := ioutil.ReadFile("hello_world") + if err != nil { + t.Fatalf("Expected file not found: %s", err) + } + + expectedContent := "Hello world!" + if string(actualContent) != expectedContent { + t.Fatalf(`Unexpected file content: expected="%s", actual="%s"`, expectedContent, actualContent) + } +} + +func testConfig() map[string]interface{} { + m := make(map[string]interface{}) + return m +} + +func createTempFile() string { + file, err := ioutil.TempFile("", "") + if err != nil { + panic(fmt.Sprintf("err: %s", err)) + } + return file.Name() +} + +func createTempFiles(numFiles int) []string { + files := make([]string, 0, numFiles) + defer func() { + // Cleanup the files if not all were created. + if len(files) < numFiles { + for _, file := range files { + os.Remove(file) + } + } + }() + + for i := 0; i < numFiles; i++ { + files = append(files, createTempFile()) + } + return files +} + +func removeFiles(files ...string) { + for _, file := range files { + os.Remove(file) + } +} + +const playbookFilesDockerConfig = ` +{ + "builders": [ + { + "type": "docker", + "image": "williamyeh/ansible:centos7", + "discard": true + } + ], + "provisioners": [ + { + "type": "ansible-local", + "playbook_files": [ + "test-fixtures/hello.yml", + "test-fixtures/world.yml" + ] + }, + { + "type": "file", + "source": "/tmp/hello_world", + "destination": "hello_world", + "direction": "download" + } + ] +} +` diff --git a/provisioner/ansible-local/test-fixtures/hello.yml b/provisioner/ansible-local/test-fixtures/hello.yml new file mode 100644 index 000000000..6bb8797d8 --- /dev/null +++ b/provisioner/ansible-local/test-fixtures/hello.yml @@ -0,0 +1,5 @@ +--- +- hosts: all + tasks: + - name: write Hello + shell: echo -n "Hello" >> /tmp/hello_world \ No newline at end of file diff --git a/provisioner/ansible-local/test-fixtures/world.yml b/provisioner/ansible-local/test-fixtures/world.yml new file mode 100644 index 000000000..98a205c7b --- /dev/null +++ b/provisioner/ansible-local/test-fixtures/world.yml @@ -0,0 +1,5 @@ +--- +- hosts: all + tasks: + - name: write world! + shell: echo -n " world!" >> /tmp/hello_world \ No newline at end of file From 079cbc263fa631ada838d9fab11f7771e51e1b76 Mon Sep 17 00:00:00 2001 From: localghost Date: Mon, 10 Jul 2017 21:58:46 +0200 Subject: [PATCH 006/138] Add tests for playbook_files with playbook_dir. --- provisioner/ansible-local/provisioner_test.go | 161 +++++++++++++----- 1 file changed, 122 insertions(+), 39 deletions(-) diff --git a/provisioner/ansible-local/provisioner_test.go b/provisioner/ansible-local/provisioner_test.go index bd6568afc..461e53539 100644 --- a/provisioner/ansible-local/provisioner_test.go +++ b/provisioner/ansible-local/provisioner_test.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/packer/packer" "github.com/hashicorp/packer/provisioner/file" "github.com/hashicorp/packer/template" + "github.com/moby/moby/pkg/ioutils" "os/exec" ) @@ -117,43 +118,11 @@ func TestProvisionerPrepare_PlaybookFiles(t *testing.T) { } } -func assertPlaybooksExecuted(comm *communicatorMock, playbooks []string) { - cmdIndex := 0 - for _, playbook := range playbooks { - playbook = filepath.ToSlash(playbook) - for ; cmdIndex < len(comm.startCommand); cmdIndex++ { - cmd := comm.startCommand[cmdIndex] - if strings.Contains(cmd, "ansible-playbook") && strings.Contains(cmd, playbook) { - break - } - } - if cmdIndex == len(comm.startCommand) { - panic(fmt.Sprintf("Playbook %s was not executed", playbook)) - } - } -} - -func assertPlaybooksUploaded(comm *communicatorMock, playbooks []string) { - uploadIndex := 0 - for _, playbook := range playbooks { - playbook = filepath.ToSlash(playbook) - for ; uploadIndex < len(comm.uploadDestination); uploadIndex++ { - dest := comm.uploadDestination[uploadIndex] - if strings.HasSuffix(dest, playbook) { - break - } - } - if uploadIndex == len(comm.uploadDestination) { - panic(fmt.Sprintf("Playbook %s was not uploaded", playbook)) - } - } -} - func TestProvisionerProvision_PlaybookFiles(t *testing.T) { var p Provisioner config := testConfig() - playbooks := createTempFiles(3) + playbooks := createTempFiles("", 3) defer removeFiles(playbooks...) config["playbook_files"] = playbooks @@ -171,6 +140,40 @@ func TestProvisionerProvision_PlaybookFiles(t *testing.T) { assertPlaybooksExecuted(comm, playbooks) } +func TestProvisionerProvision_PlaybookFilesWithPlaybookDir(t *testing.T) { + var p Provisioner + config := testConfig() + + playbook_dir, err := ioutils.TempDir("", "") + if err != nil { + t.Fatalf("Failed to create playbook_dir: %s", err) + } + defer os.RemoveAll(playbook_dir) + playbooks := createTempFiles(playbook_dir, 3) + + playbookNames := make([]string, 0, len(playbooks)) + playbooksInPlaybookDir := make([]string, 0, len(playbooks)) + for _, playbook := range playbooks { + playbooksInPlaybookDir = append(playbooksInPlaybookDir, strings.TrimPrefix(playbook, playbook_dir)) + playbookNames = append(playbookNames, filepath.Base(playbook)) + } + + config["playbook_files"] = playbooks + config["playbook_dir"] = playbook_dir + err = p.Prepare(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + comm := &communicatorMock{} + if err := p.Provision(&uiStub{}, comm); err != nil { + t.Fatalf("err: %s", err) + } + + assertPlaybooksNotUploaded(comm, playbookNames) + assertPlaybooksExecuted(comm, playbooksInPlaybookDir) +} + func TestProvisionerPrepare_InventoryFile(t *testing.T) { var p Provisioner config := testConfig() @@ -288,6 +291,14 @@ func TestProvisionerPrepare_Dirs(t *testing.T) { } func TestProvisionerProvisionDocker_PlaybookFiles(t *testing.T) { + testProvisionerProvisionDockerWithPlaybookFiles(t, playbookFilesDockerTemplate) +} + +func TestProvisionerProvisionDocker_PlaybookFilesWithPlaybookDir(t *testing.T) { + testProvisionerProvisionDockerWithPlaybookFiles(t, playbookFilesWithPlaybookDirDockerTemplate) +} + +func testProvisionerProvisionDockerWithPlaybookFiles(t *testing.T, templateString string) { if os.Getenv("PACKER_ACC") == "" { t.Skip("This test is only run with PACKER_ACC=1") } @@ -295,7 +306,7 @@ func TestProvisionerProvisionDocker_PlaybookFiles(t *testing.T) { ui := packer.TestUi(t) cache := &packer.FileCache{CacheDir: os.TempDir()} - tpl, err := template.Parse(strings.NewReader(playbookFilesDockerConfig)) + tpl, err := template.Parse(strings.NewReader(templateString)) if err != nil { t.Fatalf("Unable to parse config: %s", err) } @@ -359,20 +370,64 @@ func TestProvisionerProvisionDocker_PlaybookFiles(t *testing.T) { } } +func assertPlaybooksExecuted(comm *communicatorMock, playbooks []string) { + cmdIndex := 0 + for _, playbook := range playbooks { + playbook = filepath.ToSlash(playbook) + for ; cmdIndex < len(comm.startCommand); cmdIndex++ { + cmd := comm.startCommand[cmdIndex] + if strings.Contains(cmd, "ansible-playbook") && strings.Contains(cmd, playbook) { + break + } + } + if cmdIndex == len(comm.startCommand) { + panic(fmt.Sprintf("Playbook %s was not executed", playbook)) + } + } +} + +func assertPlaybooksUploaded(comm *communicatorMock, playbooks []string) { + fmt.Println(comm.uploadDestination) + uploadIndex := 0 + for _, playbook := range playbooks { + playbook = filepath.ToSlash(playbook) + for ; uploadIndex < len(comm.uploadDestination); uploadIndex++ { + dest := comm.uploadDestination[uploadIndex] + if strings.HasSuffix(dest, playbook) { + break + } + } + if uploadIndex == len(comm.uploadDestination) { + panic(fmt.Sprintf("Playbook %s was not uploaded", playbook)) + } + } +} + +func assertPlaybooksNotUploaded(comm *communicatorMock, playbooks []string) { + for _, playbook := range playbooks { + playbook = filepath.ToSlash(playbook) + for _, destination := range comm.uploadDestination { + if strings.HasSuffix(destination, playbook) { + panic(fmt.Sprintf("Playbook %s was uploaded", playbook)) + } + } + } +} + func testConfig() map[string]interface{} { m := make(map[string]interface{}) return m } -func createTempFile() string { - file, err := ioutil.TempFile("", "") +func createTempFile(dir string) string { + file, err := ioutil.TempFile(dir, "") if err != nil { panic(fmt.Sprintf("err: %s", err)) } return file.Name() } -func createTempFiles(numFiles int) []string { +func createTempFiles(dir string, numFiles int) []string { files := make([]string, 0, numFiles) defer func() { // Cleanup the files if not all were created. @@ -384,7 +439,7 @@ func createTempFiles(numFiles int) []string { }() for i := 0; i < numFiles; i++ { - files = append(files, createTempFile()) + files = append(files, createTempFile(dir)) } return files } @@ -395,7 +450,7 @@ func removeFiles(files ...string) { } } -const playbookFilesDockerConfig = ` +const playbookFilesDockerTemplate = ` { "builders": [ { @@ -421,3 +476,31 @@ const playbookFilesDockerConfig = ` ] } ` + +const playbookFilesWithPlaybookDirDockerTemplate = ` +{ + "builders": [ + { + "type": "docker", + "image": "williamyeh/ansible:centos7", + "discard": true + } + ], + "provisioners": [ + { + "type": "ansible-local", + "playbook_files": [ + "test-fixtures/hello.yml", + "test-fixtures/world.yml" + ], + "playbook_dir": "test-fixtures" + }, + { + "type": "file", + "source": "/tmp/hello_world", + "destination": "hello_world", + "direction": "download" + } + ] +} +` From daca0c2efe8e77986eb06e5ed2ba734bc28ddec5 Mon Sep 17 00:00:00 2001 From: localghost Date: Mon, 10 Jul 2017 22:05:42 +0200 Subject: [PATCH 007/138] Remove accidental dependency to moby's ioutils package. --- provisioner/ansible-local/provisioner_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/provisioner/ansible-local/provisioner_test.go b/provisioner/ansible-local/provisioner_test.go index 461e53539..1b4bbe5e7 100644 --- a/provisioner/ansible-local/provisioner_test.go +++ b/provisioner/ansible-local/provisioner_test.go @@ -12,7 +12,6 @@ import ( "github.com/hashicorp/packer/packer" "github.com/hashicorp/packer/provisioner/file" "github.com/hashicorp/packer/template" - "github.com/moby/moby/pkg/ioutils" "os/exec" ) @@ -144,7 +143,7 @@ func TestProvisionerProvision_PlaybookFilesWithPlaybookDir(t *testing.T) { var p Provisioner config := testConfig() - playbook_dir, err := ioutils.TempDir("", "") + playbook_dir, err := ioutil.TempDir("", "") if err != nil { t.Fatalf("Failed to create playbook_dir: %s", err) } From fdeadfe3f698e8bf6d5733b878409db3cbd4bf3f Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Fri, 10 Feb 2017 00:49:47 -0800 Subject: [PATCH 008/138] builder/vmware: correctly default export format to ovf --- builder/vmware/iso/builder.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/builder/vmware/iso/builder.go b/builder/vmware/iso/builder.go index 60c0fc462..39f221b42 100644 --- a/builder/vmware/iso/builder.go +++ b/builder/vmware/iso/builder.go @@ -212,11 +212,13 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { } } - if b.config.Format != "" { - if !(b.config.Format == "ova" || b.config.Format == "ovf" || b.config.Format == "vmx") { - errs = packer.MultiErrorAppend(errs, - fmt.Errorf("format must be one of ova, ovf, or vmx")) - } + if b.config.Format == "" { + b.config.Format = "ovf" + } + + if !(b.config.Format == "ova" || b.config.Format == "ovf" || b.config.Format == "vmx") { + errs = packer.MultiErrorAppend(errs, + fmt.Errorf("format must be one of ova, ovf, or vmx")) } // Warnings @@ -256,7 +258,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe exportOutputPath := b.config.OutputDir - if b.config.RemoteType != "" && b.config.Format != "" { + if b.config.RemoteType != "" { b.config.OutputDir = b.config.VMName } dir.SetOutputDir(b.config.OutputDir) From 6bbfe7e0bdc693cdd2e024eab3fbf502bbdf8728 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Mon, 23 Apr 2018 10:24:49 -0700 Subject: [PATCH 009/138] remove useless check against format --- builder/vmware/iso/step_export.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/builder/vmware/iso/step_export.go b/builder/vmware/iso/step_export.go index 4f6df9ed4..50b1efd9f 100644 --- a/builder/vmware/iso/step_export.go +++ b/builder/vmware/iso/step_export.go @@ -45,8 +45,8 @@ func (s *StepExport) Run(_ context.Context, state multistep.StateBag) multistep. return multistep.ActionContinue } - if c.RemoteType != "esx5" || s.Format == "" { - ui.Say("Skipping export of virtual machine (export is allowed only for ESXi and the format needs to be specified)...") + if c.RemoteType != "esx5" { + ui.Say("Skipping export of virtual machine (export is allowed only for ESXi)...") return multistep.ActionContinue } From c163fbed35a4abfecf18551fa2dd11bd59a61fd2 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Tue, 1 May 2018 20:39:48 -0700 Subject: [PATCH 010/138] use xargs to check for formatting --- Makefile | 11 ++++++++++- scripts/gofmtcheck.sh | 14 -------------- 2 files changed, 10 insertions(+), 15 deletions(-) delete mode 100755 scripts/gofmtcheck.sh diff --git a/Makefile b/Makefile index 0ccd547b4..8510eee09 100644 --- a/Makefile +++ b/Makefile @@ -5,6 +5,7 @@ GITSHA:=$(shell git rev-parse HEAD) # Get the current local branch name from git (if we can, this may be blank) GITBRANCH:=$(shell git symbolic-ref --short HEAD 2>/dev/null) GOFMT_FILES?=$$(find . -not -path "./vendor/*" -name "*.go") +BAD_FILES=$(shell echo $(GOFMT_FILES) | xargs gofmt -s -l) GOOS=$(shell go env GOOS) GOARCH=$(shell go env GOARCH) GOPATH=$(shell go env GOPATH) @@ -61,7 +62,15 @@ fmt: ## Format Go code @gofmt -w -s $(GOFMT_FILES) fmt-check: ## Check go code formatting - $(CURDIR)/scripts/gofmtcheck.sh $(GOFMT_FILES) + @echo "==> Checking that code complies with gofmt requirements..." + @if [ ! -z "$(BAD_FILES)" ]; then \ + echo "gofmt needs to be run on the following files:"; \ + echo "$(BAD_FILES)" | xargs -n1; \ + echo "You can use the command: \`make fmt\` to reformat code."; \ + exit 1; \ + else \ + echo "Check passed."; \ + fi fmt-docs: @find ./website/source/docs -name "*.md" -exec pandoc --wrap auto --columns 79 --atx-headers -s -f "markdown_github+yaml_metadata_block" -t "markdown_github+yaml_metadata_block" {} -o {} \; diff --git a/scripts/gofmtcheck.sh b/scripts/gofmtcheck.sh deleted file mode 100755 index 5b99bcdc4..000000000 --- a/scripts/gofmtcheck.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env bash - -# Check gofmt -echo "==> Checking that code complies with gofmt requirements..." -gofmt_files=$(gofmt -s -l ${@}) -if [[ -n ${gofmt_files} ]]; then - echo 'gofmt needs running on the following files:' - echo "${gofmt_files}" - echo "You can use the command: \`make fmt\` to reformat code." - exit 1 -fi -echo "Check passed." - -exit 0 From fac6b2e71ba3bd2ca9e62c746d1b51f331027c80 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Tue, 1 May 2018 22:55:10 -0700 Subject: [PATCH 011/138] don't send every source file through the shell --- Makefile | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index 8510eee09..b22d61e93 100644 --- a/Makefile +++ b/Makefile @@ -4,12 +4,13 @@ VET?=$(shell ls -d */ | grep -v vendor | grep -v website) GITSHA:=$(shell git rev-parse HEAD) # Get the current local branch name from git (if we can, this may be blank) GITBRANCH:=$(shell git symbolic-ref --short HEAD 2>/dev/null) -GOFMT_FILES?=$$(find . -not -path "./vendor/*" -name "*.go") -BAD_FILES=$(shell echo $(GOFMT_FILES) | xargs gofmt -s -l) GOOS=$(shell go env GOOS) GOARCH=$(shell go env GOARCH) GOPATH=$(shell go env GOPATH) +# gofmt +UNFORMATTED_FILES=$(shell find . -not -path "./vendor/*" -name "*.go" | xargs gofmt -s -l) + # Get the git commit GIT_DIRTY=$(shell test -n "`git status --porcelain`" && echo "+CHANGES" || true) GIT_COMMIT=$(shell git rev-parse --short HEAD) @@ -59,13 +60,13 @@ dev: deps ## Build and install a development build @cp $(GOPATH)/bin/packer pkg/$(GOOS)_$(GOARCH) fmt: ## Format Go code - @gofmt -w -s $(GOFMT_FILES) + @gofmt -w -s $(UNFORMATTED_FILES) fmt-check: ## Check go code formatting @echo "==> Checking that code complies with gofmt requirements..." - @if [ ! -z "$(BAD_FILES)" ]; then \ + @if [ ! -z "$(UNFORMATTED_FILES)" ]; then \ echo "gofmt needs to be run on the following files:"; \ - echo "$(BAD_FILES)" | xargs -n1; \ + echo "$(UNFORMATTED_FILES)" | xargs -n1; \ echo "You can use the command: \`make fmt\` to reformat code."; \ exit 1; \ else \ From 616b41e58f1acfd445851543634ac61bb7a6b6ae Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Fri, 23 Feb 2018 13:26:31 -0800 Subject: [PATCH 012/138] deduplicate the nearly identical communicators for the shell-local provisioner and post-processor, moving single communicator into a new common/shell-local module --- builder/amazon/chroot/run_local_commands.go | 7 ++- .../shell-local/communicator.go | 38 ++++++++--- .../shell-local/communicator_test.go | 2 +- post-processor/shell-local/communicator.go | 63 ------------------- .../shell-local/communicator_test.go | 43 ------------- post-processor/shell-local/post-processor.go | 6 +- provisioner/shell-local/provisioner.go | 25 +------- 7 files changed, 41 insertions(+), 143 deletions(-) rename {provisioner => common}/shell-local/communicator.go (71%) rename {provisioner => common}/shell-local/communicator_test.go (97%) delete mode 100644 post-processor/shell-local/communicator.go delete mode 100644 post-processor/shell-local/communicator_test.go diff --git a/builder/amazon/chroot/run_local_commands.go b/builder/amazon/chroot/run_local_commands.go index 024a208f8..154d37a4f 100644 --- a/builder/amazon/chroot/run_local_commands.go +++ b/builder/amazon/chroot/run_local_commands.go @@ -3,8 +3,8 @@ package chroot import ( "fmt" + sl "github.com/hashicorp/packer/common/shell-local" "github.com/hashicorp/packer/packer" - "github.com/hashicorp/packer/post-processor/shell-local" "github.com/hashicorp/packer/template/interpolate" ) @@ -21,7 +21,10 @@ func RunLocalCommands(commands []string, wrappedCommand CommandWrapper, ctx inte } ui.Say(fmt.Sprintf("Executing command: %s", command)) - comm := &shell_local.Communicator{} + comm := &sl.Communicator{ + Ctx: ctx, + ExecuteCommand: []string{""}, + } cmd := &packer.RemoteCmd{Command: command} if err := cmd.StartWithUi(comm, ui); err != nil { return fmt.Errorf("Error executing command: %s", err) diff --git a/provisioner/shell-local/communicator.go b/common/shell-local/communicator.go similarity index 71% rename from provisioner/shell-local/communicator.go rename to common/shell-local/communicator.go index 2afbe1028..dc84b575a 100644 --- a/provisioner/shell-local/communicator.go +++ b/common/shell-local/communicator.go @@ -1,10 +1,11 @@ -package shell +package shell_local import ( "fmt" "io" "os" "os/exec" + "runtime" "syscall" "github.com/hashicorp/packer/packer" @@ -17,17 +18,34 @@ type Communicator struct { } func (c *Communicator) Start(cmd *packer.RemoteCmd) error { - // Render the template so that we know how to execute the command - c.Ctx.Data = &ExecuteCommandTemplate{ - Command: cmd.Command, - } - for i, field := range c.ExecuteCommand { - command, err := interpolate.Render(field, &c.Ctx) - if err != nil { - return fmt.Errorf("Error processing command: %s", err) + if len(c.ExecuteCommand) == 0 { + // Get default Execute Command + if runtime.GOOS == "windows" { + c.ExecuteCommand = []string{ + "cmd", + "/C", + "{{.Command}}", + } + } else { + c.ExecuteCommand = []string{ + "/bin/sh", + "-c", + "{{.Command}}", + } } + } else { + // Render the template so that we know how to execute the command + c.Ctx.Data = &ExecuteCommandTemplate{ + Command: cmd.Command, + } + for i, field := range c.ExecuteCommand { + command, err := interpolate.Render(field, &c.Ctx) + if err != nil { + return fmt.Errorf("Error processing command: %s", err) + } - c.ExecuteCommand[i] = command + c.ExecuteCommand[i] = command + } } // Build the local command to execute diff --git a/provisioner/shell-local/communicator_test.go b/common/shell-local/communicator_test.go similarity index 97% rename from provisioner/shell-local/communicator_test.go rename to common/shell-local/communicator_test.go index 8ebd4fa60..903ab154d 100644 --- a/provisioner/shell-local/communicator_test.go +++ b/common/shell-local/communicator_test.go @@ -1,4 +1,4 @@ -package shell +package shell_local import ( "bytes" diff --git a/post-processor/shell-local/communicator.go b/post-processor/shell-local/communicator.go deleted file mode 100644 index b0bfb008f..000000000 --- a/post-processor/shell-local/communicator.go +++ /dev/null @@ -1,63 +0,0 @@ -package shell_local - -import ( - "fmt" - "io" - "os" - "os/exec" - "syscall" - - "github.com/hashicorp/packer/packer" -) - -type Communicator struct{} - -func (c *Communicator) Start(cmd *packer.RemoteCmd) error { - localCmd := exec.Command("sh", "-c", cmd.Command) - localCmd.Stdin = cmd.Stdin - localCmd.Stdout = cmd.Stdout - localCmd.Stderr = cmd.Stderr - - // Start it. If it doesn't work, then error right away. - if err := localCmd.Start(); err != nil { - return err - } - - // We've started successfully. Start a goroutine to wait for - // it to complete and track exit status. - go func() { - var exitStatus int - err := localCmd.Wait() - if err != nil { - if exitErr, ok := err.(*exec.ExitError); ok { - exitStatus = 1 - - // There is no process-independent way to get the REAL - // exit status so we just try to go deeper. - if status, ok := exitErr.Sys().(syscall.WaitStatus); ok { - exitStatus = status.ExitStatus() - } - } - } - - cmd.SetExited(exitStatus) - }() - - return nil -} - -func (c *Communicator) Upload(string, io.Reader, *os.FileInfo) error { - return fmt.Errorf("upload not supported") -} - -func (c *Communicator) UploadDir(string, string, []string) error { - return fmt.Errorf("uploadDir not supported") -} - -func (c *Communicator) Download(string, io.Writer) error { - return fmt.Errorf("download not supported") -} - -func (c *Communicator) DownloadDir(src string, dst string, exclude []string) error { - return fmt.Errorf("downloadDir not supported") -} diff --git a/post-processor/shell-local/communicator_test.go b/post-processor/shell-local/communicator_test.go deleted file mode 100644 index 025deec54..000000000 --- a/post-processor/shell-local/communicator_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package shell_local - -import ( - "bytes" - "runtime" - "strings" - "testing" - - "github.com/hashicorp/packer/packer" -) - -func TestCommunicator_impl(t *testing.T) { - var _ packer.Communicator = new(Communicator) -} - -func TestCommunicator(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("windows not supported for this test") - return - } - - c := &Communicator{} - - var buf bytes.Buffer - cmd := &packer.RemoteCmd{ - Command: "/bin/echo foo", - Stdout: &buf, - } - - if err := c.Start(cmd); err != nil { - t.Fatalf("err: %s", err) - } - - cmd.Wait() - - if cmd.ExitStatus != 0 { - t.Fatalf("err bad exit status: %d", cmd.ExitStatus) - } - - if strings.TrimSpace(buf.String()) != "foo" { - t.Fatalf("bad: %s", buf.String()) - } -} diff --git a/post-processor/shell-local/post-processor.go b/post-processor/shell-local/post-processor.go index c2bd2d5c0..d77086177 100644 --- a/post-processor/shell-local/post-processor.go +++ b/post-processor/shell-local/post-processor.go @@ -11,6 +11,7 @@ import ( "strings" "github.com/hashicorp/packer/common" + sl "github.com/hashicorp/packer/common/shell-local" "github.com/hashicorp/packer/helper/config" "github.com/hashicorp/packer/packer" "github.com/hashicorp/packer/template/interpolate" @@ -178,7 +179,10 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac ui.Say(fmt.Sprintf("Post processing with local shell script: %s", script)) - comm := &Communicator{} + comm := &sl.Communicator{ + Ctx: p.config.ctx, + ExecuteCommand: []string{p.config.ExecuteCommand}, + } cmd := &packer.RemoteCmd{Command: command} diff --git a/provisioner/shell-local/provisioner.go b/provisioner/shell-local/provisioner.go index 3f8222c19..ecd59fa98 100644 --- a/provisioner/shell-local/provisioner.go +++ b/provisioner/shell-local/provisioner.go @@ -3,9 +3,9 @@ package shell import ( "errors" "fmt" - "runtime" "github.com/hashicorp/packer/common" + sl "github.com/hashicorp/packer/common/shell-local" "github.com/hashicorp/packer/helper/config" "github.com/hashicorp/packer/packer" "github.com/hashicorp/packer/template/interpolate" @@ -41,33 +41,12 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { return err } - if len(p.config.ExecuteCommand) == 0 { - if runtime.GOOS == "windows" { - p.config.ExecuteCommand = []string{ - "cmd", - "/C", - "{{.Command}}", - } - } else { - p.config.ExecuteCommand = []string{ - "/bin/sh", - "-c", - "{{.Command}}", - } - } - } - var errs *packer.MultiError if p.config.Command == "" { errs = packer.MultiErrorAppend(errs, errors.New("command must be specified")) } - if len(p.config.ExecuteCommand) == 0 { - errs = packer.MultiErrorAppend(errs, - errors.New("execute_command must not be empty")) - } - if errs != nil && len(errs.Errors) > 0 { return errs } @@ -77,7 +56,7 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { func (p *Provisioner) Provision(ui packer.Ui, _ packer.Communicator) error { // Make another communicator for local - comm := &Communicator{ + comm := &sl.Communicator{ Ctx: p.config.ctx, ExecuteCommand: p.config.ExecuteCommand, } From 926327bebadf68119089074ccd3d0d72033de70b Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Tue, 27 Feb 2018 12:50:42 -0800 Subject: [PATCH 013/138] deduplicate all validation and interpolation of the shell-local config, sharing options between shell-local provisioner and post-processor. Maintain backwards compatibility with shell-local provisioner. --- common/shell-local/config.go | 154 +++++++++++++++++++ post-processor/shell-local/post-processor.go | 112 +------------- provisioner/shell-local/provisioner.go | 42 +---- 3 files changed, 166 insertions(+), 142 deletions(-) create mode 100644 common/shell-local/config.go diff --git a/common/shell-local/config.go b/common/shell-local/config.go new file mode 100644 index 000000000..b54f8d713 --- /dev/null +++ b/common/shell-local/config.go @@ -0,0 +1,154 @@ +package shell_local + +import ( + "errors" + "fmt" + "os" + "runtime" + "strings" + + "github.com/hashicorp/packer/common" + configHelper "github.com/hashicorp/packer/helper/config" + "github.com/hashicorp/packer/packer" + "github.com/hashicorp/packer/template/interpolate" +) + +type Config struct { + common.PackerConfig `mapstructure:",squash"` + + // ** DEPRECATED: USE INLINE INSTEAD ** + // ** Only Present for backwards compatibiltiy ** + // Command is the command to execute + Command string + + // An inline script to execute. Multiple strings are all executed + // in the context of a single shell. + Inline []string + + // The shebang value used when running inline scripts. + InlineShebang string `mapstructure:"inline_shebang"` + + // The local path of the shell script to upload and execute. + Script string + + // An array of multiple scripts to run. + Scripts []string + + // An array of environment variables that will be injected before + // your command(s) are executed. + Vars []string `mapstructure:"environment_vars"` + // End dedupe with postprocessor + + // The command used to execute the script. The '{{ .Path }}' variable + // should be used to specify where the script goes, {{ .Vars }} + // can be used to inject the environment_vars into the environment. + ExecuteCommand []string `mapstructure:"execute_command"` + + Ctx interpolate.Context +} + +func Decode(config *Config, raws ...interface{}) error { + err := configHelper.Decode(&config, &configHelper.DecodeOpts{ + Interpolate: true, + InterpolateContext: &config.Ctx, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{ + "execute_command", + }, + }, + }, raws...) + if err != nil { + return err + } + + return Validate(config) +} + +func Validate(config *Config) error { + var errs *packer.MultiError + + if runtime.GOOS == "windows" { + if config.InlineShebang == "" { + config.InlineShebang = "" + } + if len(config.ExecuteCommand) == 0 { + config.ExecuteCommand = []string{`{{.Vars}} "{{.Script}}"`} + } + } else { + if config.InlineShebang == "" { + // TODO: verify that provisioner defaulted to this as well + config.InlineShebang = "/bin/sh -e" + } + if len(config.ExecuteCommand) == 0 { + config.ExecuteCommand = []string{`chmod +x "{{.Script}}"; {{.Vars}} "{{.Script}}"`} + } + } + + // Clean up input + if config.Inline != nil && len(config.Inline) == 0 { + config.Inline = nil + } + + if config.Scripts == nil { + config.Scripts = make([]string, 0) + } + + if config.Vars == nil { + config.Vars = make([]string, 0) + } + + // Verify that the user has given us a command to run + if config.Command != "" && len(config.Inline) == 0 && + len(config.Scripts) == 0 && config.Script == "" { + errs = packer.MultiErrorAppend(errs, + errors.New("Command, Inline, Script and Scripts options cannot all be empty.")) + } + + if config.Command != "" { + // Backwards Compatibility: Before v1.2.2, the shell-local + // provisioner only allowed a single Command, and to run + // multiple commands you needed to run several provisioners in a + // row, one for each command. In deduplicating the post-processor and + // provisioner code, we've changed this to allow an array of scripts or + // inline commands just like in the post-processor. This conditional + // grandfathers in the "Command" option, allowing the original usage to + // continue to work. + config.Inline = append(config.Inline, config.Command) + } + + if config.Script != "" && len(config.Scripts) > 0 { + errs = packer.MultiErrorAppend(errs, + errors.New("Only one of script or scripts can be specified.")) + } + + if config.Script != "" { + config.Scripts = []string{config.Script} + } + + if len(config.Scripts) > 0 && config.Inline != nil { + errs = packer.MultiErrorAppend(errs, + errors.New("You may specify either a script file(s) or an inline script(s), but not both.")) + } + + for _, path := range config.Scripts { + if _, err := os.Stat(path); err != nil { + errs = packer.MultiErrorAppend(errs, + fmt.Errorf("Bad script '%s': %s", path, err)) + } + } + + // Do a check for bad environment variables, such as '=foo', 'foobar' + for _, kv := range config.Vars { + vs := strings.SplitN(kv, "=", 2) + if len(vs) != 2 || vs[0] == "" { + errs = packer.MultiErrorAppend(errs, + fmt.Errorf("Environment variable not in format 'key=value': %s", kv)) + } + } + + if errs != nil && len(errs.Errors) > 0 { + return errs + } + + return nil +} diff --git a/post-processor/shell-local/post-processor.go b/post-processor/shell-local/post-processor.go index d77086177..818a8b44e 100644 --- a/post-processor/shell-local/post-processor.go +++ b/post-processor/shell-local/post-processor.go @@ -2,7 +2,6 @@ package shell_local import ( "bufio" - "errors" "fmt" "io/ioutil" "log" @@ -10,43 +9,13 @@ import ( "sort" "strings" - "github.com/hashicorp/packer/common" sl "github.com/hashicorp/packer/common/shell-local" - "github.com/hashicorp/packer/helper/config" "github.com/hashicorp/packer/packer" "github.com/hashicorp/packer/template/interpolate" ) -type Config struct { - common.PackerConfig `mapstructure:",squash"` - - // An inline script to execute. Multiple strings are all executed - // in the context of a single shell. - Inline []string - - // The shebang value used when running inline scripts. - InlineShebang string `mapstructure:"inline_shebang"` - - // The local path of the shell script to upload and execute. - Script string - - // An array of multiple scripts to run. - Scripts []string - - // An array of environment variables that will be injected before - // your command(s) are executed. - Vars []string `mapstructure:"environment_vars"` - - // The command used to execute the script. The '{{ .Path }}' variable - // should be used to specify where the script goes, {{ .Vars }} - // can be used to inject the environment_vars into the environment. - ExecuteCommand string `mapstructure:"execute_command"` - - ctx interpolate.Context -} - type PostProcessor struct { - config Config + config sl.Config } type ExecuteCommandTemplate struct { @@ -55,78 +24,12 @@ type ExecuteCommandTemplate struct { } func (p *PostProcessor) Configure(raws ...interface{}) error { - err := config.Decode(&p.config, &config.DecodeOpts{ - Interpolate: true, - InterpolateContext: &p.config.ctx, - InterpolateFilter: &interpolate.RenderFilter{ - Exclude: []string{ - "execute_command", - }, - }, - }, raws...) + err := sl.Decode(&p.config, raws) if err != nil { return err } - if p.config.ExecuteCommand == "" { - p.config.ExecuteCommand = `chmod +x "{{.Script}}"; {{.Vars}} "{{.Script}}"` - } - - if p.config.Inline != nil && len(p.config.Inline) == 0 { - p.config.Inline = nil - } - - if p.config.InlineShebang == "" { - p.config.InlineShebang = "/bin/sh -e" - } - - if p.config.Scripts == nil { - p.config.Scripts = make([]string, 0) - } - - if p.config.Vars == nil { - p.config.Vars = make([]string, 0) - } - - var errs *packer.MultiError - if p.config.Script != "" && len(p.config.Scripts) > 0 { - errs = packer.MultiErrorAppend(errs, - errors.New("Only one of script or scripts can be specified.")) - } - - if p.config.Script != "" { - p.config.Scripts = []string{p.config.Script} - } - - if len(p.config.Scripts) == 0 && p.config.Inline == nil { - errs = packer.MultiErrorAppend(errs, - errors.New("Either a script file or inline script must be specified.")) - } else if len(p.config.Scripts) > 0 && p.config.Inline != nil { - errs = packer.MultiErrorAppend(errs, - errors.New("Only a script file or an inline script can be specified, not both.")) - } - - for _, path := range p.config.Scripts { - if _, err := os.Stat(path); err != nil { - errs = packer.MultiErrorAppend(errs, - fmt.Errorf("Bad script '%s': %s", path, err)) - } - } - - // Do a check for bad environment variables, such as '=foo', 'foobar' - for _, kv := range p.config.Vars { - vs := strings.SplitN(kv, "=", 2) - if len(vs) != 2 || vs[0] == "" { - errs = packer.MultiErrorAppend(errs, - fmt.Errorf("Environment variable not in format 'key=value': %s", kv)) - } - } - - if errs != nil && len(errs.Errors) > 0 { - return errs - } - - return nil + return sl.Validate(&p.config) } func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { @@ -167,12 +70,13 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac for _, script := range scripts { - p.config.ctx.Data = &ExecuteCommandTemplate{ + p.config.Ctx.Data = &ExecuteCommandTemplate{ Vars: flattenedEnvVars, Script: script, } - command, err := interpolate.Render(p.config.ExecuteCommand, &p.config.ctx) + flattenedCmd := strings.Join(p.config.ExecuteCommand, " ") + command, err := interpolate.Render(flattenedCmd, &p.config.Ctx) if err != nil { return nil, false, fmt.Errorf("Error processing command: %s", err) } @@ -180,8 +84,8 @@ func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (pac ui.Say(fmt.Sprintf("Post processing with local shell script: %s", script)) comm := &sl.Communicator{ - Ctx: p.config.ctx, - ExecuteCommand: []string{p.config.ExecuteCommand}, + Ctx: p.config.Ctx, + ExecuteCommand: []string{flattenedCmd}, } cmd := &packer.RemoteCmd{Command: command} diff --git a/provisioner/shell-local/provisioner.go b/provisioner/shell-local/provisioner.go index ecd59fa98..615a7eb24 100644 --- a/provisioner/shell-local/provisioner.go +++ b/provisioner/shell-local/provisioner.go @@ -1,63 +1,29 @@ package shell import ( - "errors" "fmt" - "github.com/hashicorp/packer/common" sl "github.com/hashicorp/packer/common/shell-local" - "github.com/hashicorp/packer/helper/config" "github.com/hashicorp/packer/packer" - "github.com/hashicorp/packer/template/interpolate" ) -type Config struct { - common.PackerConfig `mapstructure:",squash"` - - // Command is the command to execute - Command string - - // ExecuteCommand is the command used to execute the command. - ExecuteCommand []string `mapstructure:"execute_command"` - - ctx interpolate.Context -} - type Provisioner struct { - config Config + config sl.Config } func (p *Provisioner) Prepare(raws ...interface{}) error { - err := config.Decode(&p.config, &config.DecodeOpts{ - Interpolate: true, - InterpolateContext: &p.config.ctx, - InterpolateFilter: &interpolate.RenderFilter{ - Exclude: []string{ - "execute_command", - }, - }, - }, raws...) + err := sl.Decode(&p.config, raws) if err != nil { return err } - var errs *packer.MultiError - if p.config.Command == "" { - errs = packer.MultiErrorAppend(errs, - errors.New("command must be specified")) - } - - if errs != nil && len(errs.Errors) > 0 { - return errs - } - - return nil + return sl.Validate(&p.config) } func (p *Provisioner) Provision(ui packer.Ui, _ packer.Communicator) error { // Make another communicator for local comm := &sl.Communicator{ - Ctx: p.config.ctx, + Ctx: p.config.Ctx, ExecuteCommand: p.config.ExecuteCommand, } From c7c66bedcba477d145dae01a4e07e0aa9d6cf806 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Wed, 28 Feb 2018 09:45:29 -0800 Subject: [PATCH 014/138] set inline to an empty array, rather than nil --- common/shell-local/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/shell-local/config.go b/common/shell-local/config.go index b54f8d713..a6a0a279d 100644 --- a/common/shell-local/config.go +++ b/common/shell-local/config.go @@ -86,7 +86,7 @@ func Validate(config *Config) error { // Clean up input if config.Inline != nil && len(config.Inline) == 0 { - config.Inline = nil + config.Inline = make([]string, 0) } if config.Scripts == nil { From 6dc4b1cbdc1be33ff953d11a1b4643f366f05ae0 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Wed, 28 Feb 2018 11:53:53 -0800 Subject: [PATCH 015/138] move all of the run commands for shell-local provisioner and postprocessor into common library too --- builder/amazon/chroot/run_local_commands.go | 3 +- common/shell-local/communicator.go | 21 +-- common/shell-local/run.go | 160 +++++++++++++++++++ post-processor/shell-local/post-processor.go | 116 +------------- provisioner/shell-local/provisioner.go | 29 +--- 5 files changed, 172 insertions(+), 157 deletions(-) create mode 100644 common/shell-local/run.go diff --git a/builder/amazon/chroot/run_local_commands.go b/builder/amazon/chroot/run_local_commands.go index 154d37a4f..4d5b0f75c 100644 --- a/builder/amazon/chroot/run_local_commands.go +++ b/builder/amazon/chroot/run_local_commands.go @@ -22,8 +22,7 @@ func RunLocalCommands(commands []string, wrappedCommand CommandWrapper, ctx inte ui.Say(fmt.Sprintf("Executing command: %s", command)) comm := &sl.Communicator{ - Ctx: ctx, - ExecuteCommand: []string{""}, + ExecuteCommand: []string{command}, } cmd := &packer.RemoteCmd{Command: command} if err := cmd.StartWithUi(comm, ui); err != nil { diff --git a/common/shell-local/communicator.go b/common/shell-local/communicator.go index dc84b575a..5532143c9 100644 --- a/common/shell-local/communicator.go +++ b/common/shell-local/communicator.go @@ -9,12 +9,10 @@ import ( "syscall" "github.com/hashicorp/packer/packer" - "github.com/hashicorp/packer/template/interpolate" ) type Communicator struct { ExecuteCommand []string - Ctx interpolate.Context } func (c *Communicator) Start(cmd *packer.RemoteCmd) error { @@ -24,28 +22,17 @@ func (c *Communicator) Start(cmd *packer.RemoteCmd) error { c.ExecuteCommand = []string{ "cmd", "/C", + "{{.Vars}}", "{{.Command}}", } } else { c.ExecuteCommand = []string{ "/bin/sh", "-c", + "{{.Vars}}", "{{.Command}}", } } - } else { - // Render the template so that we know how to execute the command - c.Ctx.Data = &ExecuteCommandTemplate{ - Command: cmd.Command, - } - for i, field := range c.ExecuteCommand { - command, err := interpolate.Render(field, &c.Ctx) - if err != nil { - return fmt.Errorf("Error processing command: %s", err) - } - - c.ExecuteCommand[i] = command - } } // Build the local command to execute @@ -97,7 +84,3 @@ func (c *Communicator) Download(string, io.Writer) error { func (c *Communicator) DownloadDir(string, string, []string) error { return fmt.Errorf("downloadDir not supported") } - -type ExecuteCommandTemplate struct { - Command string -} diff --git a/common/shell-local/run.go b/common/shell-local/run.go new file mode 100644 index 000000000..a42cb3216 --- /dev/null +++ b/common/shell-local/run.go @@ -0,0 +1,160 @@ +package shell_local + +import ( + "bufio" + "fmt" + "io/ioutil" + "log" + "os" + "runtime" + "sort" + "strings" + + "github.com/hashicorp/packer/packer" + "github.com/hashicorp/packer/template/interpolate" +) + +type ExecuteCommandTemplate struct { + Vars string + Script string +} + +func Run(ui packer.Ui, config *Config) (bool, error) { + scripts := make([]string, len(config.Scripts)) + copy(scripts, config.Scripts) + + // If we have an inline script, then turn that into a temporary + // shell script and use that. + if config.Inline != nil { + tf, err := ioutil.TempFile("", "packer-shell") + if err != nil { + return false, fmt.Errorf("Error preparing shell script: %s", err) + } + defer os.Remove(tf.Name()) + + // Set the path to the temporary file + scripts = append(scripts, tf.Name()) + + // Write our contents to it + writer := bufio.NewWriter(tf) + writer.WriteString(fmt.Sprintf("#!%s\n", config.InlineShebang)) + for _, command := range config.Inline { + if _, err := writer.WriteString(command + "\n"); err != nil { + return false, fmt.Errorf("Error preparing shell script: %s", err) + } + } + + if err := writer.Flush(); err != nil { + return false, fmt.Errorf("Error preparing shell script: %s", err) + } + + tf.Close() + } + + // Create environment variables to set before executing the command + flattenedEnvVars := createFlattenedEnvVars(config) + + for _, script := range scripts { + interpolatedCmds, err := createInterpolatedCommands(config, script, flattenedEnvVars) + if err != nil { + return false, err + } + ui.Say(fmt.Sprintf("Post processing with local shell script: %s", script)) + + comm := &Communicator{ + ExecuteCommand: interpolatedCmds, + } + + // The remoteCmd generated here isn't actually run, but it allows us to + // use the same interafce for the shell-local communicator as we use for + // the other communicators; ultimately, this command is just used for + // buffers and for reading the final exit status. + flattenedCmd := strings.Join(interpolatedCmds, " ") + cmd := &packer.RemoteCmd{Command: flattenedCmd} + log.Printf("starting local command: %s", flattenedCmd) + + if err := cmd.StartWithUi(comm, ui); err != nil { + return false, fmt.Errorf( + "Error executing script: %s\n\n"+ + "Please see output above for more information.", + script) + } + if cmd.ExitStatus != 0 { + return false, fmt.Errorf( + "Erroneous exit code %d while executing script: %s\n\n"+ + "Please see output above for more information.", + cmd.ExitStatus, + script) + } + } + + return true, nil +} + +// Generates the final command to send to the communicator, using either the +// user-provided ExecuteCommand or defaulting to something that makes sense for +// the host OS +func createInterpolatedCommands(config *Config, script string, flattenedEnvVars string) ([]string, error) { + config.Ctx.Data = &ExecuteCommandTemplate{ + Vars: flattenedEnvVars, + Script: script, + } + + if len(config.ExecuteCommand) == 0 { + // Get default Execute Command + if runtime.GOOS == "windows" { + config.ExecuteCommand = []string{ + "cmd", + "/C", + "{{.Vars}}", + "{{.Script}}", + } + } else { + config.ExecuteCommand = []string{ + "/bin/sh", + "-c", + "{{.Vars}}", + "{{.Script}}", + } + } + } + interpolatedCmds := make([]string, len(config.ExecuteCommand)) + for i, cmd := range config.ExecuteCommand { + interpolatedCmd, err := interpolate.Render(cmd, &config.Ctx) + if err != nil { + return nil, fmt.Errorf("Error processing command: %s", err) + } + interpolatedCmds[i] = interpolatedCmd + } + return interpolatedCmds, nil +} + +func createFlattenedEnvVars(config *Config) (flattened string) { + flattened = "" + envVars := make(map[string]string) + + // Always available Packer provided env vars + envVars["PACKER_BUILD_NAME"] = fmt.Sprintf("%s", config.PackerBuildName) + envVars["PACKER_BUILDER_TYPE"] = fmt.Sprintf("%s", config.PackerBuilderType) + + // Split vars into key/value components + for _, envVar := range config.Vars { + keyValue := strings.SplitN(envVar, "=", 2) + // Store pair, replacing any single quotes in value so they parse + // correctly with required environment variable format + envVars[keyValue[0]] = strings.Replace(keyValue[1], "'", `'"'"'`, -1) + } + + // Create a list of env var keys in sorted order + var keys []string + for k := range envVars { + keys = append(keys, k) + } + sort.Strings(keys) + + // Re-assemble vars surrounding value with single quotes and flatten + for _, key := range keys { + flattened += fmt.Sprintf("%s='%s' ", key, envVars[key]) + } + return +} diff --git a/post-processor/shell-local/post-processor.go b/post-processor/shell-local/post-processor.go index 818a8b44e..b1585a228 100644 --- a/post-processor/shell-local/post-processor.go +++ b/post-processor/shell-local/post-processor.go @@ -1,17 +1,8 @@ package shell_local import ( - "bufio" - "fmt" - "io/ioutil" - "log" - "os" - "sort" - "strings" - sl "github.com/hashicorp/packer/common/shell-local" "github.com/hashicorp/packer/packer" - "github.com/hashicorp/packer/template/interpolate" ) type PostProcessor struct { @@ -33,108 +24,13 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { } func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { + // this particular post-processor doesn't do anything with the artifact + // except to return it. - scripts := make([]string, len(p.config.Scripts)) - copy(scripts, p.config.Scripts) - - // If we have an inline script, then turn that into a temporary - // shell script and use that. - if p.config.Inline != nil { - tf, err := ioutil.TempFile("", "packer-shell") - if err != nil { - return nil, false, fmt.Errorf("Error preparing shell script: %s", err) - } - defer os.Remove(tf.Name()) - - // Set the path to the temporary file - scripts = append(scripts, tf.Name()) - - // Write our contents to it - writer := bufio.NewWriter(tf) - writer.WriteString(fmt.Sprintf("#!%s\n", p.config.InlineShebang)) - for _, command := range p.config.Inline { - if _, err := writer.WriteString(command + "\n"); err != nil { - return nil, false, fmt.Errorf("Error preparing shell script: %s", err) - } - } - - if err := writer.Flush(); err != nil { - return nil, false, fmt.Errorf("Error preparing shell script: %s", err) - } - - tf.Close() + retBool, retErr := sl.Run(ui, &p.config) + if !retBool { + return nil, retBool, retErr } - // Create environment variables to set before executing the command - flattenedEnvVars := p.createFlattenedEnvVars() - - for _, script := range scripts { - - p.config.Ctx.Data = &ExecuteCommandTemplate{ - Vars: flattenedEnvVars, - Script: script, - } - - flattenedCmd := strings.Join(p.config.ExecuteCommand, " ") - command, err := interpolate.Render(flattenedCmd, &p.config.Ctx) - if err != nil { - return nil, false, fmt.Errorf("Error processing command: %s", err) - } - - ui.Say(fmt.Sprintf("Post processing with local shell script: %s", script)) - - comm := &sl.Communicator{ - Ctx: p.config.Ctx, - ExecuteCommand: []string{flattenedCmd}, - } - - cmd := &packer.RemoteCmd{Command: command} - - log.Printf("starting local command: %s", command) - if err := cmd.StartWithUi(comm, ui); err != nil { - return nil, false, fmt.Errorf( - "Error executing script: %s\n\n"+ - "Please see output above for more information.", - script) - } - if cmd.ExitStatus != 0 { - return nil, false, fmt.Errorf( - "Erroneous exit code %d while executing script: %s\n\n"+ - "Please see output above for more information.", - cmd.ExitStatus, - script) - } - } - - return artifact, true, nil -} - -func (p *PostProcessor) createFlattenedEnvVars() (flattened string) { - flattened = "" - envVars := make(map[string]string) - - // Always available Packer provided env vars - envVars["PACKER_BUILD_NAME"] = fmt.Sprintf("%s", p.config.PackerBuildName) - envVars["PACKER_BUILDER_TYPE"] = fmt.Sprintf("%s", p.config.PackerBuilderType) - - // Split vars into key/value components - for _, envVar := range p.config.Vars { - keyValue := strings.SplitN(envVar, "=", 2) - // Store pair, replacing any single quotes in value so they parse - // correctly with required environment variable format - envVars[keyValue[0]] = strings.Replace(keyValue[1], "'", `'"'"'`, -1) - } - - // Create a list of env var keys in sorted order - var keys []string - for k := range envVars { - keys = append(keys, k) - } - sort.Strings(keys) - - // Re-assemble vars surrounding value with single quotes and flatten - for _, key := range keys { - flattened += fmt.Sprintf("%s='%s' ", key, envVars[key]) - } - return + return artifact, retBool, retErr } diff --git a/provisioner/shell-local/provisioner.go b/provisioner/shell-local/provisioner.go index 615a7eb24..a56553245 100644 --- a/provisioner/shell-local/provisioner.go +++ b/provisioner/shell-local/provisioner.go @@ -1,8 +1,6 @@ package shell import ( - "fmt" - sl "github.com/hashicorp/packer/common/shell-local" "github.com/hashicorp/packer/packer" ) @@ -21,30 +19,9 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { } func (p *Provisioner) Provision(ui packer.Ui, _ packer.Communicator) error { - // Make another communicator for local - comm := &sl.Communicator{ - Ctx: p.config.Ctx, - ExecuteCommand: p.config.ExecuteCommand, - } - - // Build the remote command - cmd := &packer.RemoteCmd{Command: p.config.Command} - - ui.Say(fmt.Sprintf( - "Executing local command: %s", - p.config.Command)) - if err := cmd.StartWithUi(comm, ui); err != nil { - return fmt.Errorf( - "Error executing command: %s\n\n"+ - "Please see output above for more information.", - p.config.Command) - } - if cmd.ExitStatus != 0 { - return fmt.Errorf( - "Erroneous exit code %d while executing command: %s\n\n"+ - "Please see output above for more information.", - cmd.ExitStatus, - p.config.Command) + _, retErr := sl.Run(ui, &p.config) + if retErr != nil { + return retErr } return nil From 67739270bb32cb850caee1e086948d80fa71448d Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Wed, 28 Feb 2018 12:17:40 -0800 Subject: [PATCH 016/138] pull temp file writing into its own function for easier testing --- common/shell-local/run.go | 48 ++++++++++++++++++++++----------------- 1 file changed, 27 insertions(+), 21 deletions(-) diff --git a/common/shell-local/run.go b/common/shell-local/run.go index a42cb3216..22366c27f 100644 --- a/common/shell-local/run.go +++ b/common/shell-local/run.go @@ -26,29 +26,12 @@ func Run(ui packer.Ui, config *Config) (bool, error) { // If we have an inline script, then turn that into a temporary // shell script and use that. if config.Inline != nil { - tf, err := ioutil.TempFile("", "packer-shell") + tempScriptFileName, err := createInlineScriptFile(config) if err != nil { - return false, fmt.Errorf("Error preparing shell script: %s", err) + return false, err } - defer os.Remove(tf.Name()) - - // Set the path to the temporary file - scripts = append(scripts, tf.Name()) - - // Write our contents to it - writer := bufio.NewWriter(tf) - writer.WriteString(fmt.Sprintf("#!%s\n", config.InlineShebang)) - for _, command := range config.Inline { - if _, err := writer.WriteString(command + "\n"); err != nil { - return false, fmt.Errorf("Error preparing shell script: %s", err) - } - } - - if err := writer.Flush(); err != nil { - return false, fmt.Errorf("Error preparing shell script: %s", err) - } - - tf.Close() + defer os.Remove(tempScriptFileName) + scripts = append(scripts, tempScriptFileName) } // Create environment variables to set before executing the command @@ -91,6 +74,29 @@ func Run(ui packer.Ui, config *Config) (bool, error) { return true, nil } +func createInlineScriptFile(config *Config) (string, error) { + tf, err := ioutil.TempFile("", "packer-shell") + if err != nil { + return "", fmt.Errorf("Error preparing shell script: %s", err) + } + + // Write our contents to it + writer := bufio.NewWriter(tf) + writer.WriteString(fmt.Sprintf("#!%s\n", config.InlineShebang)) + for _, command := range config.Inline { + if _, err := writer.WriteString(command + "\n"); err != nil { + return "", fmt.Errorf("Error preparing shell script: %s", err) + } + } + + if err := writer.Flush(); err != nil { + return "", fmt.Errorf("Error preparing shell script: %s", err) + } + + tf.Close() + return tf.Name(), nil +} + // Generates the final command to send to the communicator, using either the // user-provided ExecuteCommand or defaulting to something that makes sense for // the host OS From d30423472543a2a81f3b518963c37b495fcf719b Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Wed, 28 Feb 2018 14:35:42 -0800 Subject: [PATCH 017/138] fix tests --- common/shell-local/communicator.go | 18 +-- common/shell-local/communicator_test.go | 5 +- common/shell-local/config.go | 6 +- post-processor/shell-local/post-processor.go | 2 +- .../shell-local/post-processor_test.go | 130 +++++++----------- 5 files changed, 55 insertions(+), 106 deletions(-) diff --git a/common/shell-local/communicator.go b/common/shell-local/communicator.go index 5532143c9..7664bc896 100644 --- a/common/shell-local/communicator.go +++ b/common/shell-local/communicator.go @@ -5,7 +5,6 @@ import ( "io" "os" "os/exec" - "runtime" "syscall" "github.com/hashicorp/packer/packer" @@ -17,22 +16,7 @@ type Communicator struct { func (c *Communicator) Start(cmd *packer.RemoteCmd) error { if len(c.ExecuteCommand) == 0 { - // Get default Execute Command - if runtime.GOOS == "windows" { - c.ExecuteCommand = []string{ - "cmd", - "/C", - "{{.Vars}}", - "{{.Command}}", - } - } else { - c.ExecuteCommand = []string{ - "/bin/sh", - "-c", - "{{.Vars}}", - "{{.Command}}", - } - } + return fmt.Errorf("Error launching command via shell-local communicator: No ExecuteCommand provided") } // Build the local command to execute diff --git a/common/shell-local/communicator_test.go b/common/shell-local/communicator_test.go index 903ab154d..9a8cb9057 100644 --- a/common/shell-local/communicator_test.go +++ b/common/shell-local/communicator_test.go @@ -20,13 +20,12 @@ func TestCommunicator(t *testing.T) { } c := &Communicator{ - ExecuteCommand: []string{"/bin/sh", "-c", "{{.Command}}"}, + ExecuteCommand: []string{"/bin/sh", "-c", "echo foo"}, } var buf bytes.Buffer cmd := &packer.RemoteCmd{ - Command: "echo foo", - Stdout: &buf, + Stdout: &buf, } if err := c.Start(cmd); err != nil { diff --git a/common/shell-local/config.go b/common/shell-local/config.go index a6a0a279d..dfd3623b9 100644 --- a/common/shell-local/config.go +++ b/common/shell-local/config.go @@ -58,10 +58,10 @@ func Decode(config *Config, raws ...interface{}) error { }, }, raws...) if err != nil { - return err + return fmt.Errorf("Error decoding config: %s, config is %#v, and raws is %#v", err, config, raws) } - return Validate(config) + return nil } func Validate(config *Config) error { @@ -98,7 +98,7 @@ func Validate(config *Config) error { } // Verify that the user has given us a command to run - if config.Command != "" && len(config.Inline) == 0 && + if config.Command == "" && len(config.Inline) == 0 && len(config.Scripts) == 0 && config.Script == "" { errs = packer.MultiErrorAppend(errs, errors.New("Command, Inline, Script and Scripts options cannot all be empty.")) diff --git a/post-processor/shell-local/post-processor.go b/post-processor/shell-local/post-processor.go index b1585a228..91bc5acc9 100644 --- a/post-processor/shell-local/post-processor.go +++ b/post-processor/shell-local/post-processor.go @@ -15,7 +15,7 @@ type ExecuteCommandTemplate struct { } func (p *PostProcessor) Configure(raws ...interface{}) error { - err := sl.Decode(&p.config, raws) + err := sl.Decode(&p.config, raws...) if err != nil { return err } diff --git a/post-processor/shell-local/post-processor_test.go b/post-processor/shell-local/post-processor_test.go index 7bdef1c32..caf4f5a42 100644 --- a/post-processor/shell-local/post-processor_test.go +++ b/post-processor/shell-local/post-processor_test.go @@ -28,20 +28,20 @@ func TestPostProcessor_Impl(t *testing.T) { func TestPostProcessorPrepare_Defaults(t *testing.T) { var p PostProcessor - config := testConfig() + raws := testConfig() - err := p.Configure(config) + err := p.Configure(raws) if err != nil { t.Fatalf("err: %s", err) } } func TestPostProcessorPrepare_InlineShebang(t *testing.T) { - config := testConfig() + raws := testConfig() - delete(config, "inline_shebang") + delete(raws, "inline_shebang") p := new(PostProcessor) - err := p.Configure(config) + err := p.Configure(raws) if err != nil { t.Fatalf("should not have error: %s", err) } @@ -51,9 +51,9 @@ func TestPostProcessorPrepare_InlineShebang(t *testing.T) { } // Test with a good one - config["inline_shebang"] = "foo" + raws["inline_shebang"] = "foo" p = new(PostProcessor) - err = p.Configure(config) + err = p.Configure(raws) if err != nil { t.Fatalf("should not have error: %s", err) } @@ -65,23 +65,23 @@ func TestPostProcessorPrepare_InlineShebang(t *testing.T) { func TestPostProcessorPrepare_InvalidKey(t *testing.T) { var p PostProcessor - config := testConfig() + raws := testConfig() // Add a random key - config["i_should_not_be_valid"] = true - err := p.Configure(config) + raws["i_should_not_be_valid"] = true + err := p.Configure(raws) if err == nil { t.Fatal("should have error") } } func TestPostProcessorPrepare_Script(t *testing.T) { - config := testConfig() - delete(config, "inline") + raws := testConfig() + delete(raws, "inline") - config["script"] = "/this/should/not/exist" + raws["script"] = "/this/should/not/exist" p := new(PostProcessor) - err := p.Configure(config) + err := p.Configure(raws) if err == nil { t.Fatal("should have error") } @@ -93,9 +93,9 @@ func TestPostProcessorPrepare_Script(t *testing.T) { } defer os.Remove(tf.Name()) - config["script"] = tf.Name() + raws["script"] = tf.Name() p = new(PostProcessor) - err = p.Configure(config) + err = p.Configure(raws) if err != nil { t.Fatalf("should not have error: %s", err) } @@ -103,13 +103,16 @@ func TestPostProcessorPrepare_Script(t *testing.T) { func TestPostProcessorPrepare_ScriptAndInline(t *testing.T) { var p PostProcessor - config := testConfig() + raws := testConfig() - delete(config, "inline") - delete(config, "script") - err := p.Configure(config) + // Error if no scripts/inline commands provided + delete(raws, "inline") + delete(raws, "script") + delete(raws, "command") + delete(raws, "scripts") + err := p.Configure(raws) if err == nil { - t.Fatal("should have error") + t.Fatalf("should error when no scripts/inline commands are provided: %#v", raws) } // Test with both @@ -119,9 +122,9 @@ func TestPostProcessorPrepare_ScriptAndInline(t *testing.T) { } defer os.Remove(tf.Name()) - config["inline"] = []interface{}{"foo"} - config["script"] = tf.Name() - err = p.Configure(config) + raws["inline"] = []interface{}{"foo"} + raws["script"] = tf.Name() + err = p.Configure(raws) if err == nil { t.Fatal("should have error") } @@ -129,7 +132,7 @@ func TestPostProcessorPrepare_ScriptAndInline(t *testing.T) { func TestPostProcessorPrepare_ScriptAndScripts(t *testing.T) { var p PostProcessor - config := testConfig() + raws := testConfig() // Test with both tf, err := ioutil.TempFile("", "packer") @@ -138,21 +141,21 @@ func TestPostProcessorPrepare_ScriptAndScripts(t *testing.T) { } defer os.Remove(tf.Name()) - config["inline"] = []interface{}{"foo"} - config["scripts"] = []string{tf.Name()} - err = p.Configure(config) + raws["inline"] = []interface{}{"foo"} + raws["scripts"] = []string{tf.Name()} + err = p.Configure(raws) if err == nil { t.Fatal("should have error") } } func TestPostProcessorPrepare_Scripts(t *testing.T) { - config := testConfig() - delete(config, "inline") + raws := testConfig() + delete(raws, "inline") - config["scripts"] = []string{} + raws["scripts"] = []string{} p := new(PostProcessor) - err := p.Configure(config) + err := p.Configure(raws) if err == nil { t.Fatal("should have error") } @@ -164,92 +167,55 @@ func TestPostProcessorPrepare_Scripts(t *testing.T) { } defer os.Remove(tf.Name()) - config["scripts"] = []string{tf.Name()} + raws["scripts"] = []string{tf.Name()} p = new(PostProcessor) - err = p.Configure(config) + err = p.Configure(raws) if err != nil { t.Fatalf("should not have error: %s", err) } } func TestPostProcessorPrepare_EnvironmentVars(t *testing.T) { - config := testConfig() + raws := testConfig() // Test with a bad case - config["environment_vars"] = []string{"badvar", "good=var"} + raws["environment_vars"] = []string{"badvar", "good=var"} p := new(PostProcessor) - err := p.Configure(config) + err := p.Configure(raws) if err == nil { t.Fatal("should have error") } // Test with a trickier case - config["environment_vars"] = []string{"=bad"} + raws["environment_vars"] = []string{"=bad"} p = new(PostProcessor) - err = p.Configure(config) + err = p.Configure(raws) if err == nil { t.Fatal("should have error") } // Test with a good case // Note: baz= is a real env variable, just empty - config["environment_vars"] = []string{"FOO=bar", "baz="} + raws["environment_vars"] = []string{"FOO=bar", "baz="} p = new(PostProcessor) - err = p.Configure(config) + err = p.Configure(raws) if err != nil { t.Fatalf("should not have error: %s", err) } // Test when the env variable value contains an equals sign - config["environment_vars"] = []string{"good=withequals=true"} + raws["environment_vars"] = []string{"good=withequals=true"} p = new(PostProcessor) - err = p.Configure(config) + err = p.Configure(raws) if err != nil { t.Fatalf("should not have error: %s", err) } // Test when the env variable value starts with an equals sign - config["environment_vars"] = []string{"good==true"} + raws["environment_vars"] = []string{"good==true"} p = new(PostProcessor) - err = p.Configure(config) + err = p.Configure(raws) if err != nil { t.Fatalf("should not have error: %s", err) } } - -func TestPostProcessor_createFlattenedEnvVars(t *testing.T) { - var flattenedEnvVars string - config := testConfig() - - userEnvVarTests := [][]string{ - {}, // No user env var - {"FOO=bar"}, // Single user env var - {"FOO=bar's"}, // User env var with single quote in value - {"FOO=bar", "BAZ=qux"}, // Multiple user env vars - {"FOO=bar=baz"}, // User env var with value containing equals - {"FOO==bar"}, // User env var with value starting with equals - } - expected := []string{ - `PACKER_BUILDER_TYPE='iso' PACKER_BUILD_NAME='vmware' `, - `FOO='bar' PACKER_BUILDER_TYPE='iso' PACKER_BUILD_NAME='vmware' `, - `FOO='bar'"'"'s' PACKER_BUILDER_TYPE='iso' PACKER_BUILD_NAME='vmware' `, - `BAZ='qux' FOO='bar' PACKER_BUILDER_TYPE='iso' PACKER_BUILD_NAME='vmware' `, - `FOO='bar=baz' PACKER_BUILDER_TYPE='iso' PACKER_BUILD_NAME='vmware' `, - `FOO='=bar' PACKER_BUILDER_TYPE='iso' PACKER_BUILD_NAME='vmware' `, - } - - p := new(PostProcessor) - p.Configure(config) - - // Defaults provided by Packer - p.config.PackerBuildName = "vmware" - p.config.PackerBuilderType = "iso" - - for i, expectedValue := range expected { - p.config.Vars = userEnvVarTests[i] - flattenedEnvVars = p.createFlattenedEnvVars() - if flattenedEnvVars != expectedValue { - t.Fatalf("expected flattened env vars to be: %s, got %s.", expectedValue, flattenedEnvVars) - } - } -} From 479d36734ded8c59742a5885b11c15a97b030de8 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Wed, 28 Feb 2018 14:43:58 -0800 Subject: [PATCH 018/138] consolidate shell-local defaulting of InlineShebang and ExecuteCommand to the config validation --- common/shell-local/config.go | 18 ++++++++++++------ common/shell-local/run.go | 19 ------------------- 2 files changed, 12 insertions(+), 25 deletions(-) diff --git a/common/shell-local/config.go b/common/shell-local/config.go index dfd3623b9..5b086a794 100644 --- a/common/shell-local/config.go +++ b/common/shell-local/config.go @@ -68,18 +68,24 @@ func Validate(config *Config) error { var errs *packer.MultiError if runtime.GOOS == "windows" { - if config.InlineShebang == "" { - config.InlineShebang = "" - } if len(config.ExecuteCommand) == 0 { - config.ExecuteCommand = []string{`{{.Vars}} "{{.Script}}"`} - } + config.ExecuteCommand = []string{ + "cmd", + "/C", + "{{.Vars}}", + "{{.Script}}", + } } else { if config.InlineShebang == "" { - // TODO: verify that provisioner defaulted to this as well config.InlineShebang = "/bin/sh -e" } if len(config.ExecuteCommand) == 0 { + config.ExecuteCommand = []string{ + "/bin/sh", + "-c", + "{{.Vars}}", + "{{.Script}}", + } config.ExecuteCommand = []string{`chmod +x "{{.Script}}"; {{.Vars}} "{{.Script}}"`} } } diff --git a/common/shell-local/run.go b/common/shell-local/run.go index 22366c27f..04d653389 100644 --- a/common/shell-local/run.go +++ b/common/shell-local/run.go @@ -6,7 +6,6 @@ import ( "io/ioutil" "log" "os" - "runtime" "sort" "strings" @@ -106,24 +105,6 @@ func createInterpolatedCommands(config *Config, script string, flattenedEnvVars Script: script, } - if len(config.ExecuteCommand) == 0 { - // Get default Execute Command - if runtime.GOOS == "windows" { - config.ExecuteCommand = []string{ - "cmd", - "/C", - "{{.Vars}}", - "{{.Script}}", - } - } else { - config.ExecuteCommand = []string{ - "/bin/sh", - "-c", - "{{.Vars}}", - "{{.Script}}", - } - } - } interpolatedCmds := make([]string, len(config.ExecuteCommand)) for i, cmd := range config.ExecuteCommand { interpolatedCmd, err := interpolate.Render(cmd, &config.Ctx) From f799003b66d64d859bff0229550e4123df4e8ef6 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Wed, 28 Feb 2018 15:19:28 -0800 Subject: [PATCH 019/138] tighten up shell-local config validation --- common/shell-local/config.go | 35 ++++++++++++++++++----------------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/common/shell-local/config.go b/common/shell-local/config.go index 5b086a794..76c82c793 100644 --- a/common/shell-local/config.go +++ b/common/shell-local/config.go @@ -75,6 +75,7 @@ func Validate(config *Config) error { "{{.Vars}}", "{{.Script}}", } + } } else { if config.InlineShebang == "" { config.InlineShebang = "/bin/sh -e" @@ -110,32 +111,32 @@ func Validate(config *Config) error { errors.New("Command, Inline, Script and Scripts options cannot all be empty.")) } - if config.Command != "" { - // Backwards Compatibility: Before v1.2.2, the shell-local - // provisioner only allowed a single Command, and to run - // multiple commands you needed to run several provisioners in a - // row, one for each command. In deduplicating the post-processor and - // provisioner code, we've changed this to allow an array of scripts or - // inline commands just like in the post-processor. This conditional - // grandfathers in the "Command" option, allowing the original usage to - // continue to work. - config.Inline = append(config.Inline, config.Command) - } + // Check that user hasn't given us too many commands to run + tooManyOptionsErr := errors.New("You may only specify one of the " + + "following options: Command, Inline, Script or Scripts. Please" + + " consolidate these options in your config.") - if config.Script != "" && len(config.Scripts) > 0 { - errs = packer.MultiErrorAppend(errs, - errors.New("Only one of script or scripts can be specified.")) + if config.Command != "" { + if len(config.Inline) != 0 || len(config.Scripts) != 0 || config.Script != "" { + errs = packer.MultiErrorAppend(errs, tooManyOptionsErr) + } else { + config.Inline = []string{config.Command} + } } if config.Script != "" { - config.Scripts = []string{config.Script} + if len(config.Scripts) > 0 || len(config.Inline) > 0 { + errs = packer.MultiErrorAppend(errs, tooManyOptionsErr) + } else { + config.Scripts = []string{config.Script} + } } if len(config.Scripts) > 0 && config.Inline != nil { - errs = packer.MultiErrorAppend(errs, - errors.New("You may specify either a script file(s) or an inline script(s), but not both.")) + errs = packer.MultiErrorAppend(errs, tooManyOptionsErr) } + // Check that all scripts we need to run exist locally for _, path := range config.Scripts { if _, err := os.Stat(path); err != nil { errs = packer.MultiErrorAppend(errs, From 854d6fb141ae89ccf88c8f5c75ecf0c4de588454 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Thu, 1 Mar 2018 08:48:21 -0800 Subject: [PATCH 020/138] add tests making sure post-processor has backwards compatability --- common/shell-local/config.go | 1 - post-processor/shell-local/post-processor.go | 14 +++++ .../shell-local/post-processor_test.go | 53 +++++++++++++++++-- 3 files changed, 64 insertions(+), 4 deletions(-) diff --git a/common/shell-local/config.go b/common/shell-local/config.go index 76c82c793..2d31b7f01 100644 --- a/common/shell-local/config.go +++ b/common/shell-local/config.go @@ -87,7 +87,6 @@ func Validate(config *Config) error { "{{.Vars}}", "{{.Script}}", } - config.ExecuteCommand = []string{`chmod +x "{{.Script}}"; {{.Vars}} "{{.Script}}"`} } } diff --git a/post-processor/shell-local/post-processor.go b/post-processor/shell-local/post-processor.go index 91bc5acc9..557fe7eba 100644 --- a/post-processor/shell-local/post-processor.go +++ b/post-processor/shell-local/post-processor.go @@ -1,6 +1,8 @@ package shell_local import ( + "runtime" + sl "github.com/hashicorp/packer/common/shell-local" "github.com/hashicorp/packer/packer" ) @@ -19,6 +21,18 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { if err != nil { return err } + if len(p.config.ExecuteCommand) == 0 && runtime.GOOS != "windows" { + // Backwards compatibility from before post-processor merge with + // provisioner. Don't need to default separately for windows becuase the + // post-processor never worked for windows before the merge with the + // provisioner code, so the provisioner defaults are fine. + p.config.ExecuteCommand = []string{"sh", "-c", `chmod +x "{{.Script}}"; {{.Vars}} "{{.Script}}"`} + } else if len(p.config.ExecuteCommand) == 1 { + // Backwards compatibility -- before merge, post-processor didn't have + // configurable call to shell program, meaning users may not have + // defined this in their call + p.config.ExecuteCommand = append([]string{"sh", "-c"}, p.config.ExecuteCommand...) + } return sl.Validate(&p.config) } diff --git a/post-processor/shell-local/post-processor_test.go b/post-processor/shell-local/post-processor_test.go index caf4f5a42..afec79f81 100644 --- a/post-processor/shell-local/post-processor_test.go +++ b/post-processor/shell-local/post-processor_test.go @@ -3,6 +3,8 @@ package shell_local import ( "io/ioutil" "os" + "runtime" + "strings" "testing" "github.com/hashicorp/packer/packer" @@ -45,8 +47,11 @@ func TestPostProcessorPrepare_InlineShebang(t *testing.T) { if err != nil { t.Fatalf("should not have error: %s", err) } - - if p.config.InlineShebang != "/bin/sh -e" { + expected := "" + if runtime.GOOS != "windows" { + expected = "/bin/sh -e" + } + if p.config.InlineShebang != expected { t.Fatalf("bad value: %s", p.config.InlineShebang) } @@ -101,6 +106,48 @@ func TestPostProcessorPrepare_Script(t *testing.T) { } } +func TestPostProcessorPrepare_ExecuteCommand(t *testing.T) { + // Check that passing a string will work (Backwards Compatibility) + p := new(PostProcessor) + raws := testConfig() + raws["execute_command"] = "foo bar" + err := p.Configure(raws) + expected := []string{"sh", "-c", "foo bar"} + if err != nil { + t.Fatalf("should handle backwards compatibility: %s", err) + } + if strings.Compare(strings.Join(p.config.ExecuteCommand, " "), strings.Join(expected, " ")) != 0 { + t.Fatalf("Did not get expected execute_command: expected: %#v; received %#v", expected, p.config.ExecuteCommand) + } + + // Check that passing a list will work + p = new(PostProcessor) + raws = testConfig() + raws["execute_command"] = []string{"foo", "bar"} + err = p.Configure(raws) + if err != nil { + t.Fatalf("should handle backwards compatibility: %s", err) + } + expected = []string{"foo", "bar"} + if strings.Compare(strings.Join(p.config.ExecuteCommand, " "), strings.Join(expected, " ")) != 0 { + t.Fatalf("Did not get expected execute_command: expected: %#v; received %#v", expected, p.config.ExecuteCommand) + } + + // Check that default is as expected + raws = testConfig() + delete(raws, "execute_command") + p = new(PostProcessor) + p.Configure(raws) + if runtime.GOOS != "windows" { + expected = []string{"sh", "-c", `chmod +x "{{.Script}}"; {{.Vars}} "{{.Script}}"`} + } else { + expected = []string{"cmd", "/C", "{{.Vars}}", "{{.Script}}"} + } + if strings.Compare(strings.Join(p.config.ExecuteCommand, " "), strings.Join(expected, " ")) != 0 { + t.Fatalf("Did not get expected default: expected: %#v; received %#v", expected, p.config.ExecuteCommand) + } +} + func TestPostProcessorPrepare_ScriptAndInline(t *testing.T) { var p PostProcessor raws := testConfig() @@ -112,7 +159,7 @@ func TestPostProcessorPrepare_ScriptAndInline(t *testing.T) { delete(raws, "scripts") err := p.Configure(raws) if err == nil { - t.Fatalf("should error when no scripts/inline commands are provided: %#v", raws) + t.Fatalf("should error when no scripts/inline commands are provided") } // Test with both From 5da4377f210d92e922210275aec809a7e2ebb2f9 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Thu, 1 Mar 2018 10:56:30 -0800 Subject: [PATCH 021/138] first pass at docs update --- common/shell-local/communicator.go | 2 + post-processor/shell-local/post-processor.go | 6 ++- .../docs/post-processors/shell-local.html.md | 32 ++++++++--- .../docs/provisioners/shell-local.html.md | 53 +++++++++++++++++-- 4 files changed, 83 insertions(+), 10 deletions(-) diff --git a/common/shell-local/communicator.go b/common/shell-local/communicator.go index 7664bc896..b51d309d9 100644 --- a/common/shell-local/communicator.go +++ b/common/shell-local/communicator.go @@ -3,6 +3,7 @@ package shell_local import ( "fmt" "io" + "log" "os" "os/exec" "syscall" @@ -20,6 +21,7 @@ func (c *Communicator) Start(cmd *packer.RemoteCmd) error { } // Build the local command to execute + log.Printf("Executing local shell command %s", c.ExecuteCommand) localCmd := exec.Command(c.ExecuteCommand[0], c.ExecuteCommand[1:]...) localCmd.Stdin = cmd.Stdin localCmd.Stdout = cmd.Stdout diff --git a/post-processor/shell-local/post-processor.go b/post-processor/shell-local/post-processor.go index 557fe7eba..cc1f2845e 100644 --- a/post-processor/shell-local/post-processor.go +++ b/post-processor/shell-local/post-processor.go @@ -30,7 +30,11 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { } else if len(p.config.ExecuteCommand) == 1 { // Backwards compatibility -- before merge, post-processor didn't have // configurable call to shell program, meaning users may not have - // defined this in their call + // defined this in their call. If users are still using the old way of + // defining ExecuteCommand (e.g. just supplying a single string that is + // now being interpolated as a slice with one item), then assume we need + // to prepend this call still, and use the one that the post-processor + // defaulted to before. p.config.ExecuteCommand = append([]string{"sh", "-c"}, p.config.ExecuteCommand...) } diff --git a/website/source/docs/post-processors/shell-local.html.md b/website/source/docs/post-processors/shell-local.html.md index d23780731..26ef7871f 100644 --- a/website/source/docs/post-processors/shell-local.html.md +++ b/website/source/docs/post-processors/shell-local.html.md @@ -13,7 +13,7 @@ Type: `shell-local` The local shell post processor executes scripts locally during the post processing stage. Shell local provides a convenient way to automate executing -some task with the packer outputs. +some task with packer outputs and variables. ## Basic example @@ -33,6 +33,9 @@ required element is either "inline" or "script". Every other option is optional. Exactly *one* of the following is required: +- `command` (string) - This is a single command to execute. It will be written + to a temporary file and run using the `execute_command` call below. + - `inline` (array of strings) - This is an array of commands to execute. The commands are concatenated by newlines and turned into a single file, so they are all executed within the same context. This allows you to change @@ -52,15 +55,32 @@ Exactly *one* of the following is required: Optional parameters: - `environment_vars` (array of strings) - An array of key/value pairs to - inject prior to the execute\_command. The format should be `key=value`. + inject prior to the `execute_command`. The format should be `key=value`. Packer injects some environmental variables by default into the environment, as well, which are covered in the section below. -- `execute_command` (string) - The command to use to execute the script. By - default this is `chmod +x "{{.Script}}"; {{.Vars}} "{{.Script}}"`. - The value of this is treated as [template engine](/docs/templates/engine.html). +- `execute_command` (array of strings) - The command used to execute the script. By + default this is `["sh", "-c", "chmod +x \"{{.Script}}\"; {{.Vars}} \"{{.Script}}\""]` + on unix and `["cmd", "/c", "{{.Vars}}", "{{.Script}}"]` on windows. + This is treated as a [template engine](/docs/templates/engine.html). There are two available variables: `Script`, which is the path to the script - to run, `Vars`, which is the list of `environment_vars`, if configured. + to run, and `Vars`, which is the list of `environment_vars`, if configured. + If you choose to set this option, make sure that the first element in the + array is the shell program you want to use (for example, "sh" or + "/usr/local/bin/zsh" or even "powershell.exe" although anything other than + a flavor of the shell command language is not explicitly supported and may + be broken by assumptions made within Packer). + + For backwards compatibility, `execute_command` will accept a string insetad + of an array of strings. If a single string or an array of strings with only + one element is provided, Packer will replicate past behavior by appending + your `execute_command` to the array of strings `["sh", "-c"]`. For example, + if you set `"execute_command": "foo bar"`, the final `execute_command` that + Packer runs will be ["sh", "-c", "foo bar"]. If you set `"execute_command": ["foo", "bar"]`, + the final execute_command will remain `["foo", "bar"]`. + + Again, the above is only provided as a backwards compatibility fix; we + strongly recommend that you set execute_command as an array of strings. - `inline_shebang` (string) - The [shebang](http://en.wikipedia.org/wiki/Shebang_%28Unix%29) value to use when diff --git a/website/source/docs/provisioners/shell-local.html.md b/website/source/docs/provisioners/shell-local.html.md index c52bcd893..bd66e74c7 100644 --- a/website/source/docs/provisioners/shell-local.html.md +++ b/website/source/docs/provisioners/shell-local.html.md @@ -37,10 +37,26 @@ The example below is fully functional. The reference of available configuration options is listed below. The only required element is "command". -Required: +Exactly *one* of the following is required: -- `command` (string) - The command to execute. This will be executed within - the context of a shell as specified by `execute_command`. +- `command` (string) - This is a single command to execute. It will be written + to a temporary file and run using the `execute_command` call below. + +- `inline` (array of strings) - This is an array of commands to execute. The + commands are concatenated by newlines and turned into a single file, so they + are all executed within the same context. This allows you to change + directories in one command and use something in the directory in the next + and so on. Inline scripts are the easiest way to pull off simple tasks + within the machine. + +- `script` (string) - The path to a script to execute. This path can be + absolute or relative. If it is relative, it is relative to the working + directory when Packer is executed. + +- `scripts` (array of strings) - An array of scripts to execute. The scripts + will be executed in the order specified. Each script is executed in + isolation, so state such as variables from one script won't carry on to the + next. Optional parameters: @@ -50,3 +66,34 @@ Optional parameters: treated as [configuration template](/docs/templates/engine.html). The only available variable is `Command` which is the command to execute. + +- `environment_vars` (array of strings) - An array of key/value pairs to + inject prior to the `execute_command`. The format should be `key=value`. + Packer injects some environmental variables by default into the environment, + as well, which are covered in the section below. + +- `execute_command` (array of strings) - The command used to execute the script. + By default this is `["/bin/sh", "-c", "{{.Vars}}, "{{.Script}}"]` + on unix and `["cmd", "/c", "{{.Vars}}", "{{.Script}}"]` on windows. + This is treated as a [template engine](/docs/templates/engine.html). + There are two available variables: `Script`, which is the path to the script + to run, and `Vars`, which is the list of `environment_vars`, if configured + If you choose to set this option, make sure that the first element in the + array is the shell program you want to use (for example, "sh" or + "/usr/local/bin/zsh" or even "powershell.exe" although anything other than + a flavor of the shell command language is not explicitly supported and may + be broken by assumptions made within Packer), and a later element in the + array must be `{{.Script}}`. + + For backwards compatability, {{.Command}} is also available to use in + `execute_command` but it is decoded the same way as {{.Script}}. We + recommend using {{.Script}} for the sake of clarity, as even when you set + only a single `command` to run, Packer writes it to a temporary file and + then runs it as a script. + +- `inline_shebang` (string) - The + [shebang](http://en.wikipedia.org/wiki/Shebang_%28Unix%29) value to use when + running commands specified by `inline`. By default, this is `/bin/sh -e`. If + you're not using `inline`, then this configuration has no effect. + **Important:** If you customize this, be sure to include something like the + `-e` flag, otherwise individual steps failing won't fail the provisioner. From e983a94a88251a32d6426269d321117d58e33266 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Fri, 2 Mar 2018 12:32:34 -0800 Subject: [PATCH 022/138] fix default windows bash call for shell-local provisioner and move chmod command from the execute_command array into the portion of code where we actually generate inline scripts, sparing users the need to think about this modification which Packer should really handle on its own make bash call work on windows --- common/shell-local/config.go | 8 +++- common/shell-local/run.go | 6 ++- post-processor/shell-local/post-processor.go | 26 +++++------- provisioner/shell-local/provisioner.go | 43 +++++++++++++++++++- 4 files changed, 62 insertions(+), 21 deletions(-) diff --git a/common/shell-local/config.go b/common/shell-local/config.go index 2d31b7f01..8c73d0f57 100644 --- a/common/shell-local/config.go +++ b/common/shell-local/config.go @@ -67,6 +67,11 @@ func Decode(config *Config, raws ...interface{}) error { func Validate(config *Config) error { var errs *packer.MultiError + // Do not treat these defaults as a source of truth; the shell-local + // provisioner sets these defaults before Validate is called. Eventually + // we will have to bring the provisioner and post-processor defaults in + // line with one another, but for now the following may or may not be + // applied depending on where Validate is being called from. if runtime.GOOS == "windows" { if len(config.ExecuteCommand) == 0 { config.ExecuteCommand = []string{ @@ -84,8 +89,7 @@ func Validate(config *Config) error { config.ExecuteCommand = []string{ "/bin/sh", "-c", - "{{.Vars}}", - "{{.Script}}", + "{{.Vars}} {{.Script}}", } } } diff --git a/common/shell-local/run.go b/common/shell-local/run.go index 04d653389..5545d56d7 100644 --- a/common/shell-local/run.go +++ b/common/shell-local/run.go @@ -41,7 +41,7 @@ func Run(ui packer.Ui, config *Config) (bool, error) { if err != nil { return false, err } - ui.Say(fmt.Sprintf("Post processing with local shell script: %s", script)) + ui.Say(fmt.Sprintf("Running local shell script: %s", script)) comm := &Communicator{ ExecuteCommand: interpolatedCmds, @@ -93,6 +93,10 @@ func createInlineScriptFile(config *Config) (string, error) { } tf.Close() + err = os.Chmod(tf.Name(), 0555) + if err != nil { + log.Printf("error modifying permissions of temp script file: %s", err.Error()) + } return tf.Name(), nil } diff --git a/post-processor/shell-local/post-processor.go b/post-processor/shell-local/post-processor.go index cc1f2845e..c761a19f4 100644 --- a/post-processor/shell-local/post-processor.go +++ b/post-processor/shell-local/post-processor.go @@ -1,8 +1,6 @@ package shell_local import ( - "runtime" - sl "github.com/hashicorp/packer/common/shell-local" "github.com/hashicorp/packer/packer" ) @@ -21,20 +19,16 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { if err != nil { return err } - if len(p.config.ExecuteCommand) == 0 && runtime.GOOS != "windows" { - // Backwards compatibility from before post-processor merge with - // provisioner. Don't need to default separately for windows becuase the - // post-processor never worked for windows before the merge with the - // provisioner code, so the provisioner defaults are fine. - p.config.ExecuteCommand = []string{"sh", "-c", `chmod +x "{{.Script}}"; {{.Vars}} "{{.Script}}"`} - } else if len(p.config.ExecuteCommand) == 1 { - // Backwards compatibility -- before merge, post-processor didn't have - // configurable call to shell program, meaning users may not have - // defined this in their call. If users are still using the old way of - // defining ExecuteCommand (e.g. just supplying a single string that is - // now being interpolated as a slice with one item), then assume we need - // to prepend this call still, and use the one that the post-processor - // defaulted to before. + if len(p.config.ExecuteCommand) == 1 { + // Backwards compatibility -- before we merged the shell-local + // post-processor and provisioners, the post-processor accepted + // execute_command as a string rather than a slice of strings. It didn't + // have a configurable call to shell program, automatically prepending + // the user-supplied execute_command string with "sh -c". If users are + // still using the old way of defining ExecuteCommand (by supplying a + // single string rather than a slice of strings) then we need to + // prepend this command with the call that the post-processor defaulted + // to before. p.config.ExecuteCommand = append([]string{"sh", "-c"}, p.config.ExecuteCommand...) } diff --git a/provisioner/shell-local/provisioner.go b/provisioner/shell-local/provisioner.go index a56553245..a58f0c859 100644 --- a/provisioner/shell-local/provisioner.go +++ b/provisioner/shell-local/provisioner.go @@ -1,6 +1,11 @@ package shell import ( + "fmt" + "path/filepath" + "runtime" + "strings" + sl "github.com/hashicorp/packer/common/shell-local" "github.com/hashicorp/packer/packer" ) @@ -10,12 +15,46 @@ type Provisioner struct { } func (p *Provisioner) Prepare(raws ...interface{}) error { - err := sl.Decode(&p.config, raws) + err := sl.Decode(&p.config, raws...) + if err != nil { + return err + } + convertPath := false + if len(p.config.ExecuteCommand) == 0 && runtime.GOOS == "windows" { + convertPath = true + p.config.ExecuteCommand = []string{ + "bash", + "-c", + "{{.Vars}} {{.Script}}", + } + } + + err = sl.Validate(&p.config) if err != nil { return err } - return sl.Validate(&p.config) + if convertPath { + for index, script := range p.config.Scripts { + p.config.Scripts[index], err = convertToWindowsBashPath(script) + if err != nil { + return err + } + } + } + + return nil +} + +func convertToWindowsBashPath(winPath string) (string, error) { + // get absolute path of script, and morph it into the bash path + winAbsPath, err := filepath.Abs(winPath) + if err != nil { + return "", fmt.Errorf("Error converting %s to absolute path: %s", winPath, err.Error()) + } + winAbsPath = strings.Replace(winAbsPath, "\\", "/", -1) + winBashPath := strings.Replace(winAbsPath, "C:/", "/mnt/c/", 1) + return winBashPath, nil } func (p *Provisioner) Provision(ui packer.Ui, _ packer.Communicator) error { From 51bcc7aa136ccb7331435333cf162d0c5d007feb Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Thu, 8 Mar 2018 16:42:17 -0800 Subject: [PATCH 023/138] add new feature for telling shell-local whether to use linux pathing on windows; update docs with some examples. --- common/shell-local/config.go | 33 ++++++++++++--- common/shell-local/run.go | 10 +++++ provisioner/shell-local/provisioner.go | 34 --------------- .../docs/post-processors/shell-local.html.md | 41 +++++++++++++++++-- 4 files changed, 75 insertions(+), 43 deletions(-) diff --git a/common/shell-local/config.go b/common/shell-local/config.go index 8c73d0f57..80751aee7 100644 --- a/common/shell-local/config.go +++ b/common/shell-local/config.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "os" + "path/filepath" "runtime" "strings" @@ -44,6 +45,8 @@ type Config struct { // can be used to inject the environment_vars into the environment. ExecuteCommand []string `mapstructure:"execute_command"` + UseLinuxPathing bool `mapstructure:"use_linux_pathing"` + Ctx interpolate.Context } @@ -67,11 +70,6 @@ func Decode(config *Config, raws ...interface{}) error { func Validate(config *Config) error { var errs *packer.MultiError - // Do not treat these defaults as a source of truth; the shell-local - // provisioner sets these defaults before Validate is called. Eventually - // we will have to bring the provisioner and post-processor defaults in - // line with one another, but for now the following may or may not be - // applied depending on where Validate is being called from. if runtime.GOOS == "windows" { if len(config.ExecuteCommand) == 0 { config.ExecuteCommand = []string{ @@ -89,7 +87,8 @@ func Validate(config *Config) error { config.ExecuteCommand = []string{ "/bin/sh", "-c", - "{{.Vars}} {{.Script}}", + "{{.Vars}}", + "{{.Script}}", } } } @@ -146,6 +145,15 @@ func Validate(config *Config) error { fmt.Errorf("Bad script '%s': %s", path, err)) } } + if config.UseLinuxPathing { + for index, script := range config.Scripts { + converted, err := convertToLinuxPath(script) + if err != nil { + return err + } + config.Scripts[index] = converted + } + } // Do a check for bad environment variables, such as '=foo', 'foobar' for _, kv := range config.Vars { @@ -162,3 +170,16 @@ func Validate(config *Config) error { return nil } + +// C:/path/to/your/file becomes /mnt/c/path/to/your/file +func convertToLinuxPath(winPath string) (string, error) { + // get absolute path of script, and morph it into the bash path + winAbsPath, err := filepath.Abs(winPath) + if err != nil { + return "", fmt.Errorf("Error converting %s to absolute path: %s", winPath, err.Error()) + } + winAbsPath = strings.Replace(winAbsPath, "\\", "/", -1) + splitPath := strings.SplitN(winAbsPath, ":/", 2) + winBashPath := fmt.Sprintf("/mnt/%s/%s", strings.ToLower(splitPath[0]), splitPath[1]) + return winBashPath, nil +} diff --git a/common/shell-local/run.go b/common/shell-local/run.go index 5545d56d7..ea8043737 100644 --- a/common/shell-local/run.go +++ b/common/shell-local/run.go @@ -6,6 +6,7 @@ import ( "io/ioutil" "log" "os" + "runtime" "sort" "strings" @@ -144,8 +145,17 @@ func createFlattenedEnvVars(config *Config) (flattened string) { sort.Strings(keys) // Re-assemble vars surrounding value with single quotes and flatten + if runtime.GOOS == "windows" { + log.Printf("MEGAN NEED TO IMPLEMENT") + // createEnvVarsSourceFileWindows() + } for _, key := range keys { flattened += fmt.Sprintf("%s='%s' ", key, envVars[key]) } return } + +// func createFlattenedEnvVarsWindows( +// // The default shell, cmd, can set vars via dot sourcing +// // set TESTXYZ=XYZ +// ) diff --git a/provisioner/shell-local/provisioner.go b/provisioner/shell-local/provisioner.go index a58f0c859..16c3806e4 100644 --- a/provisioner/shell-local/provisioner.go +++ b/provisioner/shell-local/provisioner.go @@ -1,11 +1,6 @@ package shell import ( - "fmt" - "path/filepath" - "runtime" - "strings" - sl "github.com/hashicorp/packer/common/shell-local" "github.com/hashicorp/packer/packer" ) @@ -19,44 +14,15 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { if err != nil { return err } - convertPath := false - if len(p.config.ExecuteCommand) == 0 && runtime.GOOS == "windows" { - convertPath = true - p.config.ExecuteCommand = []string{ - "bash", - "-c", - "{{.Vars}} {{.Script}}", - } - } err = sl.Validate(&p.config) if err != nil { return err } - if convertPath { - for index, script := range p.config.Scripts { - p.config.Scripts[index], err = convertToWindowsBashPath(script) - if err != nil { - return err - } - } - } - return nil } -func convertToWindowsBashPath(winPath string) (string, error) { - // get absolute path of script, and morph it into the bash path - winAbsPath, err := filepath.Abs(winPath) - if err != nil { - return "", fmt.Errorf("Error converting %s to absolute path: %s", winPath, err.Error()) - } - winAbsPath = strings.Replace(winAbsPath, "\\", "/", -1) - winBashPath := strings.Replace(winAbsPath, "C:/", "/mnt/c/", 1) - return winBashPath, nil -} - func (p *Provisioner) Provision(ui packer.Ui, _ packer.Communicator) error { _, retErr := sl.Run(ui, &p.config) if retErr != nil { diff --git a/website/source/docs/post-processors/shell-local.html.md b/website/source/docs/post-processors/shell-local.html.md index 26ef7871f..e2bc324da 100644 --- a/website/source/docs/post-processors/shell-local.html.md +++ b/website/source/docs/post-processors/shell-local.html.md @@ -60,7 +60,7 @@ Optional parameters: as well, which are covered in the section below. - `execute_command` (array of strings) - The command used to execute the script. By - default this is `["sh", "-c", "chmod +x \"{{.Script}}\"; {{.Vars}} \"{{.Script}}\""]` + default this is `["/bin/sh", "-c", "{{.Vars}}, "{{.Script}}"]` on unix and `["cmd", "/c", "{{.Vars}}", "{{.Script}}"]` on windows. This is treated as a [template engine](/docs/templates/engine.html). There are two available variables: `Script`, which is the path to the script @@ -69,7 +69,9 @@ Optional parameters: array is the shell program you want to use (for example, "sh" or "/usr/local/bin/zsh" or even "powershell.exe" although anything other than a flavor of the shell command language is not explicitly supported and may - be broken by assumptions made within Packer). + be broken by assumptions made within Packer). It's worth noting that if you + choose to try to use shell-local for Powershell or other Windows commands, + the environment variables will not be set properly for your environment. For backwards compatibility, `execute_command` will accept a string insetad of an array of strings. If a single string or an array of strings with only @@ -89,13 +91,46 @@ Optional parameters: **Important:** If you customize this, be sure to include something like the `-e` flag, otherwise individual steps failing won't fail the provisioner. -## Execute Command Example +- `use_linux_pathing` (bool) - This is only relevant to windows hosts. If you + are running Packer in a Windows environment with the Windows Subsystem for + Linux feature enabled, and would like to invoke a bash script rather than + invoking a Cmd script, you'll need to set this flag to true; it tells Packer + to use the linux subsystem path for your script rather than the Windows path. + (e.g. /mnt/c/path/to/your/file instead of C:/path/to/your/file). + +## Execute Command To many new users, the `execute_command` is puzzling. However, it provides an important function: customization of how the command is executed. The most common use case for this is dealing with **sudo password prompts**. You may also need to customize this if you use a non-POSIX shell, such as `tcsh` on FreeBSD. +### The Windows Linux Subsystem + +If you have a bash script that you'd like to run on your Windows Linux +Subsystem as part of the shell-local post-processor, you must set +`execute_command` and `use_linux_pathing`. + +The example below is a fully functional test config. + +``` +{ + "builders": [ + { + "type": "null", + "communicator": "none" + } + ], + "provisioners": [ + { + "type": "shell-local", + "environment_vars": ["PROVISIONERTEST=ProvisionerTest1"], + "execute_command": ["bash", "-c", "{{.Vars}} {{.Script}}"] + "use_linux_pathing": true + "scripts": ["./scripts/.sh"] + }, +``` + ## Default Environmental Variables In addition to being able to specify custom environmental variables using the From dd183f22d9c78524884de923eaf791e5ca3c7eed Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Fri, 9 Mar 2018 15:14:52 -0800 Subject: [PATCH 024/138] update docs and add warnings around WSL limitations --- common/shell-local/config.go | 12 ++- .../docs/post-processors/shell-local.html.md | 36 +++++-- .../docs/provisioners/shell-local.html.md | 97 +++++++++++++++++++ 3 files changed, 136 insertions(+), 9 deletions(-) diff --git a/common/shell-local/config.go b/common/shell-local/config.go index 80751aee7..64dfe25c1 100644 --- a/common/shell-local/config.go +++ b/common/shell-local/config.go @@ -147,12 +147,20 @@ func Validate(config *Config) error { } if config.UseLinuxPathing { for index, script := range config.Scripts { - converted, err := convertToLinuxPath(script) + converted, err := ConvertToLinuxPath(script) if err != nil { return err } config.Scripts[index] = converted } + // Interoperability issues with WSL makes creating and running tempfiles + // via golang's os package basically impossible. + if len(config.Inline) > 0 { + errs = packer.MultiErrorAppend(errs, + fmt.Errorf("Packer is unable to use the Command and Inline "+ + "features with the Windows Linux Subsystem. Please use "+ + "the Script or Scripts options instead")) + } } // Do a check for bad environment variables, such as '=foo', 'foobar' @@ -172,7 +180,7 @@ func Validate(config *Config) error { } // C:/path/to/your/file becomes /mnt/c/path/to/your/file -func convertToLinuxPath(winPath string) (string, error) { +func ConvertToLinuxPath(winPath string) (string, error) { // get absolute path of script, and morph it into the bash path winAbsPath, err := filepath.Abs(winPath) if err != nil { diff --git a/website/source/docs/post-processors/shell-local.html.md b/website/source/docs/post-processors/shell-local.html.md index e2bc324da..ab2f4bba3 100644 --- a/website/source/docs/post-processors/shell-local.html.md +++ b/website/source/docs/post-processors/shell-local.html.md @@ -96,7 +96,10 @@ Optional parameters: Linux feature enabled, and would like to invoke a bash script rather than invoking a Cmd script, you'll need to set this flag to true; it tells Packer to use the linux subsystem path for your script rather than the Windows path. - (e.g. /mnt/c/path/to/your/file instead of C:/path/to/your/file). + (e.g. /mnt/c/path/to/your/file instead of C:/path/to/your/file). Please see + the example below for more guidance on how to use this feature. If you are + not on a Windows host, or you do not intend to use the shell-local + post-processor to run a bash script, please ignore this option. ## Execute Command @@ -107,12 +110,22 @@ need to customize this if you use a non-POSIX shell, such as `tcsh` on FreeBSD. ### The Windows Linux Subsystem -If you have a bash script that you'd like to run on your Windows Linux -Subsystem as part of the shell-local post-processor, you must set -`execute_command` and `use_linux_pathing`. +The shell-local post-processor was designed with the idea of allowing you to run +commands in your local operating system's native shell. For Windows, we've +assumed in our defaults that this is Cmd. However, it is possible to run a +bash script as part of the Windows Linux Subsystem from the shell-local +post-processor, by modifying the `execute_command` and the `use_linux_pathing` +options in the post-processor config. The example below is a fully functional test config. +One limitation of this offering is that "inline" and "command" options are not +available to you; please limit yourself to using the "script" or "scripts" +options instead. + +Please note that the WSL is a beta feature, and this tool is not guaranteed to +work as you expect it to. + ``` { "builders": [ @@ -125,10 +138,19 @@ The example below is a fully functional test config. { "type": "shell-local", "environment_vars": ["PROVISIONERTEST=ProvisionerTest1"], - "execute_command": ["bash", "-c", "{{.Vars}} {{.Script}}"] - "use_linux_pathing": true - "scripts": ["./scripts/.sh"] + "execute_command": ["bash", "-c", "{{.Vars}} {{.Script}}"], + "use_linux_pathing": true, + "scripts": ["C:/Users/me/scripts/example_bash.sh"] }, + { + "type": "shell-local", + "environment_vars": ["PROVISIONERTEST=ProvisionerTest2"], + "execute_command": ["bash", "-c", "{{.Vars}} {{.Script}}"], + "use_linux_pathing": true, + "script": "C:/Users/me/scripts/example_bash.sh" + } + ] +} ``` ## Default Environmental Variables diff --git a/website/source/docs/provisioners/shell-local.html.md b/website/source/docs/provisioners/shell-local.html.md index bd66e74c7..f40c4e9f0 100644 --- a/website/source/docs/provisioners/shell-local.html.md +++ b/website/source/docs/provisioners/shell-local.html.md @@ -97,3 +97,100 @@ Optional parameters: you're not using `inline`, then this configuration has no effect. **Important:** If you customize this, be sure to include something like the `-e` flag, otherwise individual steps failing won't fail the provisioner. + +- `use_linux_pathing` (bool) - This is only relevant to windows hosts. If you + are running Packer in a Windows environment with the Windows Subsystem for + Linux feature enabled, and would like to invoke a bash script rather than + invoking a Cmd script, you'll need to set this flag to true; it tells Packer + to use the linux subsystem path for your script rather than the Windows path. + (e.g. /mnt/c/path/to/your/file instead of C:/path/to/your/file). Please see + the example below for more guidance on how to use this feature. If you are + not on a Windows host, or you do not intend to use the shell-local + provisioner to run a bash script, please ignore this option. + +## Execute Command + +To many new users, the `execute_command` is puzzling. However, it provides an +important function: customization of how the command is executed. The most +common use case for this is dealing with **sudo password prompts**. You may also +need to customize this if you use a non-POSIX shell, such as `tcsh` on FreeBSD. + +### The Windows Linux Subsystem + +The shell-local provisioner was designed with the idea of allowing you to run +commands in your local operating system's native shell. For Windows, we've +assumed in our defaults that this is Cmd. However, it is possible to run a +bash script as part of the Windows Linux Subsystem from the shell-local +provisioner, by modifying the `execute_command` and the `use_linux_pathing` +options in the provisioner config. + +The example below is a fully functional test config. + +One limitation of this offering is that "inline" and "command" options are not +available to you; please limit yourself to using the "script" or "scripts" +options instead. + +Please note that the WSL is a beta feature, and this tool is not guaranteed to +work as you expect it to. + +``` +{ + "builders": [ + { + "type": "null", + "communicator": "none" + } + ], + "provisioners": [ + { + "type": "shell-local", + "environment_vars": ["PROVISIONERTEST=ProvisionerTest1"], + "execute_command": ["bash", "-c", "{{.Vars}} {{.Script}}"], + "use_linux_pathing": true, + "scripts": ["C:/Users/me/scripts/example_bash.sh"] + }, + { + "type": "shell-local", + "environment_vars": ["PROVISIONERTEST=ProvisionerTest2"], + "execute_command": ["bash", "-c", "{{.Vars}} {{.Script}}"], + "use_linux_pathing": true, + "script": "C:/Users/me/scripts/example_bash.sh" + } + ] +} +``` + +## Default Environmental Variables + +In addition to being able to specify custom environmental variables using the +`environment_vars` configuration, the provisioner automatically defines certain +commonly useful environmental variables: + +- `PACKER_BUILD_NAME` is set to the name of the build that Packer is running. + This is most useful when Packer is making multiple builds and you want to + distinguish them slightly from a common provisioning script. + +- `PACKER_BUILDER_TYPE` is the type of the builder that was used to create the + machine that the script is running on. This is useful if you want to run + only certain parts of the script on systems built with certain builders. + +## Safely Writing A Script + +Whether you use the `inline` option, or pass it a direct `script` or `scripts`, +it is important to understand a few things about how the shell-local +provisioner works to run it safely and easily. This understanding will save +you much time in the process. + +### Once Per Builder + +The `shell-local` script(s) you pass are run once per builder. That means that +if you have an `amazon-ebs` builder and a `docker` builder, your script will be +run twice. If you have 3 builders, it will run 3 times, once for each builder. + +### Always Exit Intentionally + +If any provisioner fails, the `packer build` stops and all interim artifacts +are cleaned up. + +For a shell script, that means the script **must** exit with a zero code. You +*must* be extra careful to `exit 0` when necessary. From 9651432378952cecf47e9b24647021809cb161bc Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Fri, 9 Mar 2018 15:36:11 -0800 Subject: [PATCH 025/138] preserver BC for people using 'command' option --- common/shell-local/run.go | 10 +++++--- .../docs/provisioners/shell-local.html.md | 25 +++++++++++-------- 2 files changed, 20 insertions(+), 15 deletions(-) diff --git a/common/shell-local/run.go b/common/shell-local/run.go index ea8043737..a16573e7a 100644 --- a/common/shell-local/run.go +++ b/common/shell-local/run.go @@ -15,8 +15,9 @@ import ( ) type ExecuteCommandTemplate struct { - Vars string - Script string + Vars string + Script string + Command string } func Run(ui packer.Ui, config *Config) (bool, error) { @@ -106,8 +107,9 @@ func createInlineScriptFile(config *Config) (string, error) { // the host OS func createInterpolatedCommands(config *Config, script string, flattenedEnvVars string) ([]string, error) { config.Ctx.Data = &ExecuteCommandTemplate{ - Vars: flattenedEnvVars, - Script: script, + Vars: flattenedEnvVars, + Script: script, + Command: script, } interpolatedCmds := make([]string, len(config.ExecuteCommand)) diff --git a/website/source/docs/provisioners/shell-local.html.md b/website/source/docs/provisioners/shell-local.html.md index f40c4e9f0..eae9cf2ff 100644 --- a/website/source/docs/provisioners/shell-local.html.md +++ b/website/source/docs/provisioners/shell-local.html.md @@ -78,18 +78,21 @@ Optional parameters: This is treated as a [template engine](/docs/templates/engine.html). There are two available variables: `Script`, which is the path to the script to run, and `Vars`, which is the list of `environment_vars`, if configured - If you choose to set this option, make sure that the first element in the - array is the shell program you want to use (for example, "sh" or - "/usr/local/bin/zsh" or even "powershell.exe" although anything other than - a flavor of the shell command language is not explicitly supported and may - be broken by assumptions made within Packer), and a later element in the - array must be `{{.Script}}`. - For backwards compatability, {{.Command}} is also available to use in - `execute_command` but it is decoded the same way as {{.Script}}. We - recommend using {{.Script}} for the sake of clarity, as even when you set - only a single `command` to run, Packer writes it to a temporary file and - then runs it as a script. + If you choose to set this option, make sure that the first element in the + array is the shell program you want to use (for example, "sh"), and a later + element in the array must be `{{.Script}}`. + + This option provides you a great deal of flexibility. You may choose to + provide your own shell program, for example "/usr/local/bin/zsh" or even + "powershell.exe". However, with great power comes great responsibility - + these commands are not officially supported and things like environment + variables may not work if you use a different shell than the default. + + For backwards compatability, you may also use {{.Command}}, but it is + decoded the same way as {{.Script}}. We recommend using {{.Script}} for the + sake of clarity, as even when you set only a single `command` to run, + Packer writes it to a temporary file and then runs it as a script. - `inline_shebang` (string) - The [shebang](http://en.wikipedia.org/wiki/Shebang_%28Unix%29) value to use when From fabd1a651771c099aec387f0f8b1fa9c5aec0ba1 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Mon, 12 Mar 2018 11:25:39 -0700 Subject: [PATCH 026/138] windows cmd env vars --- common/shell-local/config.go | 11 +++++++++ common/shell-local/run.go | 24 +++++++------------ .../shell-local/post-processor_test.go | 2 +- 3 files changed, 20 insertions(+), 17 deletions(-) diff --git a/common/shell-local/config.go b/common/shell-local/config.go index 64dfe25c1..f4514a014 100644 --- a/common/shell-local/config.go +++ b/common/shell-local/config.go @@ -38,6 +38,8 @@ type Config struct { // An array of environment variables that will be injected before // your command(s) are executed. Vars []string `mapstructure:"environment_vars"` + + EnvVarFormat string // End dedupe with postprocessor // The command used to execute the script. The '{{ .Path }}' variable @@ -162,6 +164,15 @@ func Validate(config *Config) error { "the Script or Scripts options instead")) } } + // This is currently undocumented and not a feature users are expected to + // interact with. + if config.EnvVarFormat == "" { + if (runtime.GOOS == "windows") && !config.UseLinuxPathing { + config.EnvVarFormat = `set "%s=%s" && ` + } else { + config.EnvVarFormat = "%s='%s' " + } + } // Do a check for bad environment variables, such as '=foo', 'foobar' for _, kv := range config.Vars { diff --git a/common/shell-local/run.go b/common/shell-local/run.go index a16573e7a..9e17d2d87 100644 --- a/common/shell-local/run.go +++ b/common/shell-local/run.go @@ -6,7 +6,6 @@ import ( "io/ioutil" "log" "os" - "runtime" "sort" "strings" @@ -36,7 +35,10 @@ func Run(ui packer.Ui, config *Config) (bool, error) { } // Create environment variables to set before executing the command - flattenedEnvVars := createFlattenedEnvVars(config) + flattenedEnvVars, err := createFlattenedEnvVars(config) + if err != nil { + return false, err + } for _, script := range scripts { interpolatedCmds, err := createInterpolatedCommands(config, script, flattenedEnvVars) @@ -123,8 +125,8 @@ func createInterpolatedCommands(config *Config, script string, flattenedEnvVars return interpolatedCmds, nil } -func createFlattenedEnvVars(config *Config) (flattened string) { - flattened = "" +func createFlattenedEnvVars(config *Config) (string, error) { + flattened := "" envVars := make(map[string]string) // Always available Packer provided env vars @@ -146,18 +148,8 @@ func createFlattenedEnvVars(config *Config) (flattened string) { } sort.Strings(keys) - // Re-assemble vars surrounding value with single quotes and flatten - if runtime.GOOS == "windows" { - log.Printf("MEGAN NEED TO IMPLEMENT") - // createEnvVarsSourceFileWindows() - } for _, key := range keys { - flattened += fmt.Sprintf("%s='%s' ", key, envVars[key]) + flattened += fmt.Sprintf(config.EnvVarFormat, key, envVars[key]) } - return + return flattened, nil } - -// func createFlattenedEnvVarsWindows( -// // The default shell, cmd, can set vars via dot sourcing -// // set TESTXYZ=XYZ -// ) diff --git a/post-processor/shell-local/post-processor_test.go b/post-processor/shell-local/post-processor_test.go index afec79f81..5fabac124 100644 --- a/post-processor/shell-local/post-processor_test.go +++ b/post-processor/shell-local/post-processor_test.go @@ -139,7 +139,7 @@ func TestPostProcessorPrepare_ExecuteCommand(t *testing.T) { p = new(PostProcessor) p.Configure(raws) if runtime.GOOS != "windows" { - expected = []string{"sh", "-c", `chmod +x "{{.Script}}"; {{.Vars}} "{{.Script}}"`} + expected = []string{"/bin/sh", "-c", "{{.Vars}}", "{{.Script}}"} } else { expected = []string{"cmd", "/C", "{{.Vars}}", "{{.Script}}"} } From 1bea658e16cb5a45ea043b78276e81f2c4ec62db Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Wed, 4 Apr 2018 11:07:10 -0700 Subject: [PATCH 027/138] fix command and inline calls on windows --- common/shell-local/config.go | 19 +++- common/shell-local/run.go | 17 ++- .../docs/post-processors/shell-local.html.md | 102 ++++++++++++++++++ .../docs/provisioners/shell-local.html.md | 101 +++++++++++++++++ 4 files changed, 231 insertions(+), 8 deletions(-) diff --git a/common/shell-local/config.go b/common/shell-local/config.go index f4514a014..846e4b4a4 100644 --- a/common/shell-local/config.go +++ b/common/shell-local/config.go @@ -29,6 +29,9 @@ type Config struct { // The shebang value used when running inline scripts. InlineShebang string `mapstructure:"inline_shebang"` + // The file extension to use for the file generated from the inline commands + TempfileExtension string `mapstructure:"tempfile_extension"` + // The local path of the shell script to upload and execute. Script string @@ -39,7 +42,7 @@ type Config struct { // your command(s) are executed. Vars []string `mapstructure:"environment_vars"` - EnvVarFormat string + EnvVarFormat string `mapstructure:"env_var_format"` // End dedupe with postprocessor // The command used to execute the script. The '{{ .Path }}' variable @@ -76,8 +79,10 @@ func Validate(config *Config) error { if len(config.ExecuteCommand) == 0 { config.ExecuteCommand = []string{ "cmd", + "/V", "/C", "{{.Vars}}", + "call", "{{.Script}}", } } @@ -89,8 +94,7 @@ func Validate(config *Config) error { config.ExecuteCommand = []string{ "/bin/sh", "-c", - "{{.Vars}}", - "{{.Script}}", + "{{.Vars}} {{.Script}}", } } } @@ -168,12 +172,19 @@ func Validate(config *Config) error { // interact with. if config.EnvVarFormat == "" { if (runtime.GOOS == "windows") && !config.UseLinuxPathing { - config.EnvVarFormat = `set "%s=%s" && ` + config.EnvVarFormat = "set %s=%s && " } else { config.EnvVarFormat = "%s='%s' " } } + // drop unnecessary "." in extension; we add this later. + if config.TempfileExtension != "" { + if strings.HasPrefix(config.TempfileExtension, ".") { + config.TempfileExtension = config.TempfileExtension[1:] + } + } + // Do a check for bad environment variables, such as '=foo', 'foobar' for _, kv := range config.Vars { vs := strings.SplitN(kv, "=", 2) diff --git a/common/shell-local/run.go b/common/shell-local/run.go index 9e17d2d87..6af406522 100644 --- a/common/shell-local/run.go +++ b/common/shell-local/run.go @@ -30,8 +30,13 @@ func Run(ui packer.Ui, config *Config) (bool, error) { if err != nil { return false, err } - defer os.Remove(tempScriptFileName) scripts = append(scripts, tempScriptFileName) + + defer os.Remove(tempScriptFileName) + // figure out what extension the file should have, and rename it. + if config.TempfileExtension != "" { + os.Rename(tempScriptFileName, fmt.Sprintf("%s.%s", tempScriptFileName, config.TempfileExtension)) + } } // Create environment variables to set before executing the command @@ -78,14 +83,18 @@ func Run(ui packer.Ui, config *Config) (bool, error) { } func createInlineScriptFile(config *Config) (string, error) { - tf, err := ioutil.TempFile("", "packer-shell") + tf, err := ioutil.TempFile(os.TempDir(), "packer-shell") if err != nil { return "", fmt.Errorf("Error preparing shell script: %s", err) } - + defer tf.Close() // Write our contents to it writer := bufio.NewWriter(tf) - writer.WriteString(fmt.Sprintf("#!%s\n", config.InlineShebang)) + if config.InlineShebang != "" { + shebang := fmt.Sprintf("#!%s\n", config.InlineShebang) + log.Printf("Prepending inline script with %s", shebang) + writer.WriteString(shebang) + } for _, command := range config.Inline { if _, err := writer.WriteString(command + "\n"); err != nil { return "", fmt.Errorf("Error preparing shell script: %s", err) diff --git a/website/source/docs/post-processors/shell-local.html.md b/website/source/docs/post-processors/shell-local.html.md index ab2f4bba3..3ace72792 100644 --- a/website/source/docs/post-processors/shell-local.html.md +++ b/website/source/docs/post-processors/shell-local.html.md @@ -227,3 +227,105 @@ are cleaned up. For a shell script, that means the script **must** exit with a zero code. You *must* be extra careful to `exit 0` when necessary. + + +## Usage Examples: + +Example of running a .cmd file on windows: + +``` + { + "type": "shell-local", + "environment_vars": ["SHELLLOCALTEST=ShellTest1"], + "scripts": ["./scripts/test_cmd.cmd"] + }, +``` + +Contents of "test_cmd.cmd": + +``` +echo %SHELLLOCALTEST% +``` + +Example of running an inline command on windows: +Required customization: tempfile_extension + +``` + { + "type": "shell-local", + "environment_vars": ["SHELLLOCALTEST=ShellTest2"], + "tempfile_extension": ".cmd", + "inline": ["echo %SHELLLOCALTEST%"] + }, +``` + +Example of running a bash command on windows using WSL: +Required customizations: use_linux_pathing and execute_command + +``` + { + "type": "shell-local", + "environment_vars": ["SHELLLOCALTEST=ShellTest3"], + "execute_command": ["bash", "-c", "{{.Vars}} {{.Script}}"], + "use_linux_pathing": true, + "script": "./scripts/example_bash.sh" + } +``` + +Contents of "example_bash.sh": + +``` +#!/bin/bash +echo $SHELLLOCALTEST +``` + +Example of running a powershell script on windows: +Required customizations: env_var_format and execute_command + +``` + + { + "type": "shell-local", + "environment_vars": ["SHELLLOCALTEST=ShellTest4"], + "execute_command": ["powershell.exe", "{{.Vars}} {{.Script}}"], + "env_var_format": "$env:%s=\"%s\"; ", + } +``` + +Example of running a powershell script on windows as "inline": +Required customizations: env_var_format, tempfile_extension, and execute_command + +``` + { + "type": "shell-local", + "tempfile_extension": ".ps1", + "environment_vars": ["SHELLLOCALTEST=ShellTest5"], + "execute_command": ["powershell.exe", "{{.Vars}} {{.Script}}"], + "env_var_format": "$env:%s=\"%s\"; ", + "inline": ["write-output $env:SHELLLOCALTEST"] + } +``` + + +Example of running a bash script on linux: + +``` + { + "type": "shell-local", + "environment_vars": ["PROVISIONERTEST=ProvisionerTest1"], + "scripts": ["./scripts/dummy_bash.sh"] + } +``` + +Example of running a bash "inline" on linux: + +``` + { + "type": "shell-local", + "environment_vars": ["PROVISIONERTEST=ProvisionerTest2"], + "inline": ["echo hello", + "echo $PROVISIONERTEST"] + } +``` + + diff --git a/website/source/docs/provisioners/shell-local.html.md b/website/source/docs/provisioners/shell-local.html.md index eae9cf2ff..cadb1d6a1 100644 --- a/website/source/docs/provisioners/shell-local.html.md +++ b/website/source/docs/provisioners/shell-local.html.md @@ -197,3 +197,104 @@ are cleaned up. For a shell script, that means the script **must** exit with a zero code. You *must* be extra careful to `exit 0` when necessary. + + +## Usage Examples: + +Example of running a .cmd file on windows: + +``` + { + "type": "shell-local", + "environment_vars": ["SHELLLOCALTEST=ShellTest1"], + "scripts": ["./scripts/test_cmd.cmd"] + }, +``` + +Contents of "test_cmd.cmd": + +``` +echo %SHELLLOCALTEST% +``` + +Example of running an inline command on windows: +Required customization: tempfile_extension + +``` + { + "type": "shell-local", + "environment_vars": ["SHELLLOCALTEST=ShellTest2"], + "tempfile_extension": ".cmd", + "inline": ["echo %SHELLLOCALTEST%"] + }, +``` + +Example of running a bash command on windows using WSL: +Required customizations: use_linux_pathing and execute_command + +``` + { + "type": "shell-local", + "environment_vars": ["SHELLLOCALTEST=ShellTest3"], + "execute_command": ["bash", "-c", "{{.Vars}} {{.Script}}"], + "use_linux_pathing": true, + "script": "./scripts/example_bash.sh" + } +``` + +Contents of "example_bash.sh": + +``` +#!/bin/bash +echo $SHELLLOCALTEST +``` + +Example of running a powershell script on windows: +Required customizations: env_var_format and execute_command + +``` + + { + "type": "shell-local", + "environment_vars": ["SHELLLOCALTEST=ShellTest4"], + "execute_command": ["powershell.exe", "{{.Vars}} {{.Script}}"], + "env_var_format": "$env:%s=\"%s\"; ", + } +``` + +Example of running a powershell script on windows as "inline": +Required customizations: env_var_format, tempfile_extension, and execute_command + +``` + { + "type": "shell-local", + "tempfile_extension": ".ps1", + "environment_vars": ["SHELLLOCALTEST=ShellTest5"], + "execute_command": ["powershell.exe", "{{.Vars}} {{.Script}}"], + "env_var_format": "$env:%s=\"%s\"; ", + "inline": ["write-output $env:SHELLLOCALTEST"] + } +``` + + +Example of running a bash script on linux: + +``` + { + "type": "shell-local", + "environment_vars": ["PROVISIONERTEST=ProvisionerTest1"], + "scripts": ["./scripts/dummy_bash.sh"] + } +``` + +Example of running a bash "inline" on linux: + +``` + { + "type": "shell-local", + "environment_vars": ["PROVISIONERTEST=ProvisionerTest2"], + "inline": ["echo hello", + "echo $PROVISIONERTEST"] + } +``` + From 2b2bd5715c96814caa88b631290945140bfe87e0 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Wed, 4 Apr 2018 15:29:37 -0700 Subject: [PATCH 028/138] fix docs --- .../source/docs/post-processors/shell-local.html.md | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/website/source/docs/post-processors/shell-local.html.md b/website/source/docs/post-processors/shell-local.html.md index 3ace72792..6812fac2b 100644 --- a/website/source/docs/post-processors/shell-local.html.md +++ b/website/source/docs/post-processors/shell-local.html.md @@ -100,6 +100,8 @@ Optional parameters: the example below for more guidance on how to use this feature. If you are not on a Windows host, or you do not intend to use the shell-local post-processor to run a bash script, please ignore this option. + If you set this flag to true, you still need to provide the standard windows + path to the script when providing a `script`. This is a beta feature. ## Execute Command @@ -123,8 +125,10 @@ One limitation of this offering is that "inline" and "command" options are not available to you; please limit yourself to using the "script" or "scripts" options instead. -Please note that the WSL is a beta feature, and this tool is not guaranteed to -work as you expect it to. +Please note that this feature is still in beta, as the underlying WSL is also +still in beta. There will be some limitations as a result. For example, it will +likely not work unless both Packer and the scripts you want to run are both on +the C drive. ``` { @@ -289,6 +293,7 @@ Required customizations: env_var_format and execute_command "environment_vars": ["SHELLLOCALTEST=ShellTest4"], "execute_command": ["powershell.exe", "{{.Vars}} {{.Script}}"], "env_var_format": "$env:%s=\"%s\"; ", + "script": "./scripts/example_ps.ps1" } ``` @@ -313,7 +318,7 @@ Example of running a bash script on linux: { "type": "shell-local", "environment_vars": ["PROVISIONERTEST=ProvisionerTest1"], - "scripts": ["./scripts/dummy_bash.sh"] + "scripts": ["./scripts/example_bash.sh"] } ``` From 58acb7f436cee34c350ff36e3c4084ee24267221 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Wed, 4 Apr 2018 15:52:39 -0700 Subject: [PATCH 029/138] fix windows test --- post-processor/shell-local/post-processor_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/post-processor/shell-local/post-processor_test.go b/post-processor/shell-local/post-processor_test.go index 5fabac124..ee7e27d70 100644 --- a/post-processor/shell-local/post-processor_test.go +++ b/post-processor/shell-local/post-processor_test.go @@ -141,7 +141,7 @@ func TestPostProcessorPrepare_ExecuteCommand(t *testing.T) { if runtime.GOOS != "windows" { expected = []string{"/bin/sh", "-c", "{{.Vars}}", "{{.Script}}"} } else { - expected = []string{"cmd", "/C", "{{.Vars}}", "{{.Script}}"} + expected = []string{"cmd", "/V", "/C", "{{.Vars}}", "call", "{{.Script}}"} } if strings.Compare(strings.Join(p.config.ExecuteCommand, " "), strings.Join(expected, " ")) != 0 { t.Fatalf("Did not get expected default: expected: %#v; received %#v", expected, p.config.ExecuteCommand) From aeadd039b77c6a175982884839327ff3d6818273 Mon Sep 17 00:00:00 2001 From: DanHam Date: Thu, 10 May 2018 13:33:56 +0100 Subject: [PATCH 030/138] Fix #6240 by way of an update to github.com/masterzen/winrm (& winrm/soap) $ govendor fetch -v github.com/masterzen/winrm $ govendor fetch -v github.com/masterzen/winrm/soap * In #6240 users reported problems that could be traced to the use of RunWithString in communicator/winrm/communicator.go. * https://github.com/masterzen/winrm/pull/78 apparently fixed a race condition in RunWithString that only materialises with Go <= 1.10; This is possibly why we are only seeing this with recent releases. Additionally, the intermittent nature of the errors and error messages seen are indicative of this type of problem... so here's hoping this fixes things... --- vendor/github.com/masterzen/winrm/client.go | 33 +++++++++++++++++---- vendor/vendor.json | 10 +++---- 2 files changed, 33 insertions(+), 10 deletions(-) diff --git a/vendor/github.com/masterzen/winrm/client.go b/vendor/github.com/masterzen/winrm/client.go index 732dd61cb..c19515194 100644 --- a/vendor/github.com/masterzen/winrm/client.go +++ b/vendor/github.com/masterzen/winrm/client.go @@ -152,10 +152,20 @@ func (c *Client) RunWithString(command string, stdin string) (string, string, in } var outWriter, errWriter bytes.Buffer - go io.Copy(&outWriter, cmd.Stdout) - go io.Copy(&errWriter, cmd.Stderr) + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + io.Copy(&outWriter, cmd.Stdout) + }() + + go func() { + defer wg.Done() + io.Copy(&errWriter, cmd.Stderr) + }() cmd.Wait() + wg.Wait() return outWriter.String(), errWriter.String(), cmd.ExitCode(), cmd.err } @@ -176,11 +186,24 @@ func (c Client) RunWithInput(command string, stdout, stderr io.Writer, stdin io. return 1, err } - go io.Copy(cmd.Stdin, stdin) - go io.Copy(stdout, cmd.Stdout) - go io.Copy(stderr, cmd.Stderr) + var wg sync.WaitGroup + wg.Add(3) + + go func() { + defer wg.Done() + io.Copy(cmd.Stdin, stdin) + }() + go func() { + defer wg.Done() + io.Copy(stdout, cmd.Stdout) + }() + go func() { + defer wg.Done() + io.Copy(stderr, cmd.Stderr) + }() cmd.Wait() + wg.Wait() return cmd.ExitCode(), cmd.err diff --git a/vendor/vendor.json b/vendor/vendor.json index 1f08d096b..f3e751ef9 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -988,16 +988,16 @@ "revision": "95ba30457eb1121fa27753627c774c7cd4e90083" }, { - "checksumSHA1": "8z5kCCFRsBkhXic9jxxeIV3bBn8=", + "checksumSHA1": "dVQEUn5TxdIAXczK7rh6qUrq44Q=", "path": "github.com/masterzen/winrm", - "revision": "a2df6b1315e6fd5885eb15c67ed259e85854125f", - "revisionTime": "2017-08-14T13:39:27Z" + "revision": "7e40f93ae939004a1ef3bd5ff5c88c756ee762bb", + "revisionTime": "2018-02-24T16:03:50Z" }, { "checksumSHA1": "XFSXma+KmkhkIPsh4dTd/eyja5s=", "path": "github.com/masterzen/winrm/soap", - "revision": "a2df6b1315e6fd5885eb15c67ed259e85854125f", - "revisionTime": "2017-08-14T13:39:27Z" + "revision": "7e40f93ae939004a1ef3bd5ff5c88c756ee762bb", + "revisionTime": "2018-02-24T16:03:50Z" }, { "checksumSHA1": "NkbetqlpWBi3gP08JDneC+axTKw=", From 2c339b99d297bac61831a39afaee50e37b84a018 Mon Sep 17 00:00:00 2001 From: DanHam Date: Sat, 12 May 2018 16:14:48 +0100 Subject: [PATCH 031/138] Sort run config options alphabetically --- builder/amazon/common/run_config.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/builder/amazon/common/run_config.go b/builder/amazon/common/run_config.go index f647182aa..a75c1df9e 100644 --- a/builder/amazon/common/run_config.go +++ b/builder/amazon/common/run_config.go @@ -30,25 +30,25 @@ func (d *AmiFilterOptions) Empty() bool { type RunConfig struct { AssociatePublicIpAddress bool `mapstructure:"associate_public_ip_address"` AvailabilityZone string `mapstructure:"availability_zone"` + DisableStopInstance bool `mapstructure:"disable_stop_instance"` EbsOptimized bool `mapstructure:"ebs_optimized"` IamInstanceProfile string `mapstructure:"iam_instance_profile"` + InstanceInitiatedShutdownBehavior string `mapstructure:"shutdown_behavior"` InstanceType string `mapstructure:"instance_type"` RunTags map[string]string `mapstructure:"run_tags"` + SecurityGroupId string `mapstructure:"security_group_id"` + SecurityGroupIds []string `mapstructure:"security_group_ids"` SourceAmi string `mapstructure:"source_ami"` SourceAmiFilter AmiFilterOptions `mapstructure:"source_ami_filter"` SpotPrice string `mapstructure:"spot_price"` SpotPriceAutoProduct string `mapstructure:"spot_price_auto_product"` - DisableStopInstance bool `mapstructure:"disable_stop_instance"` - SecurityGroupId string `mapstructure:"security_group_id"` - SecurityGroupIds []string `mapstructure:"security_group_ids"` - TemporarySGSourceCidr string `mapstructure:"temporary_security_group_source_cidr"` SubnetId string `mapstructure:"subnet_id"` TemporaryKeyPairName string `mapstructure:"temporary_key_pair_name"` + TemporarySGSourceCidr string `mapstructure:"temporary_security_group_source_cidr"` UserData string `mapstructure:"user_data"` UserDataFile string `mapstructure:"user_data_file"` - WindowsPasswordTimeout time.Duration `mapstructure:"windows_password_timeout"` VpcId string `mapstructure:"vpc_id"` - InstanceInitiatedShutdownBehavior string `mapstructure:"shutdown_behavior"` + WindowsPasswordTimeout time.Duration `mapstructure:"windows_password_timeout"` // Communicator settings Comm communicator.Config `mapstructure:",squash"` From 482629ae9025ba7bb9c4f3e151682b4df399dbc3 Mon Sep 17 00:00:00 2001 From: DanHam Date: Sun, 13 May 2018 15:21:30 +0100 Subject: [PATCH 032/138] Add config option to enable/disable T2 Unlimited for the launched instance --- builder/amazon/common/run_config.go | 1 + 1 file changed, 1 insertion(+) diff --git a/builder/amazon/common/run_config.go b/builder/amazon/common/run_config.go index a75c1df9e..5922060c2 100644 --- a/builder/amazon/common/run_config.go +++ b/builder/amazon/common/run_config.go @@ -32,6 +32,7 @@ type RunConfig struct { AvailabilityZone string `mapstructure:"availability_zone"` DisableStopInstance bool `mapstructure:"disable_stop_instance"` EbsOptimized bool `mapstructure:"ebs_optimized"` + EnableT2Unlimited bool `mapstructure:"enable_t2_unlimited"` IamInstanceProfile string `mapstructure:"iam_instance_profile"` InstanceInitiatedShutdownBehavior string `mapstructure:"shutdown_behavior"` InstanceType string `mapstructure:"instance_type"` From be02b3f61387bb49ce8954ed2cd333f2ffaa97a5 Mon Sep 17 00:00:00 2001 From: DanHam Date: Sat, 12 May 2018 16:20:01 +0100 Subject: [PATCH 033/138] Validate template settings when T2 Unlimited has been enabled * T2 Unlimited cannot be used with anything other than T2 instance types * T2 Unlimited cannot be used with Spot Instances --- builder/amazon/common/run_config.go | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/builder/amazon/common/run_config.go b/builder/amazon/common/run_config.go index 5922060c2..cd40c9dc4 100644 --- a/builder/amazon/common/run_config.go +++ b/builder/amazon/common/run_config.go @@ -6,6 +6,7 @@ import ( "net" "os" "regexp" + "strings" "time" "github.com/hashicorp/packer/common/uuid" @@ -142,6 +143,18 @@ func (c *RunConfig) Prepare(ctx *interpolate.Context) []error { errs = append(errs, fmt.Errorf("shutdown_behavior only accepts 'stop' or 'terminate' values.")) } + if c.EnableT2Unlimited { + if c.SpotPrice != "" { + errs = append(errs, fmt.Errorf("Error: T2 Unlimited cannot be used in conjuction with Spot Instances")) + } + firstDotIndex := strings.Index(c.InstanceType, ".") + if firstDotIndex == -1 { + errs = append(errs, fmt.Errorf("Error determining main Instance Type from: %s", c.InstanceType)) + } else if c.InstanceType[0:firstDotIndex] != "t2" { + errs = append(errs, fmt.Errorf("Error: T2 Unlimited enabled with a non-T2 Instance Type: %s", c.InstanceType)) + } + } + return errs } From df7fb869840e1aec1083ecef91a8ad12b374e1f1 Mon Sep 17 00:00:00 2001 From: DanHam Date: Sun, 13 May 2018 16:13:20 +0100 Subject: [PATCH 034/138] Add tests for T2 Unlimited configuration --- builder/amazon/common/run_config_test.go | 35 ++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/builder/amazon/common/run_config_test.go b/builder/amazon/common/run_config_test.go index a88730e82..212f70c02 100644 --- a/builder/amazon/common/run_config_test.go +++ b/builder/amazon/common/run_config_test.go @@ -79,6 +79,41 @@ func TestRunConfigPrepare_SourceAmiFilterGood(t *testing.T) { } } +func TestRunConfigPrepare_EnableT2UnlimitedGood(t *testing.T) { + c := testConfig() + // Must have a T2 instance type if T2 Unlimited is enabled + c.InstanceType = "t2.micro" + c.EnableT2Unlimited = true + err := c.Prepare(nil) + if len(err) > 0 { + t.Fatalf("err: %s", err) + } +} + +func TestRunConfigPrepare_EnableT2UnlimitedBadInstanceType(t *testing.T) { + c := testConfig() + // T2 Unlimited cannot be used with instance types other than T2 + c.InstanceType = "m5.large" + c.EnableT2Unlimited = true + err := c.Prepare(nil) + if len(err) != 1 { + t.Fatalf("T2 Unlimited should not work with non-T2 instance types") + } +} + +func TestRunConfigPrepare_EnableT2UnlimitedBadWithSpotInstanceRequest(t *testing.T) { + c := testConfig() + // T2 Unlimited cannot be used with Spot Instances + c.InstanceType = "t2.micro" + c.EnableT2Unlimited = true + c.SpotPrice = "auto" + c.SpotPriceAutoProduct = "Linux/UNIX" + err := c.Prepare(nil) + if len(err) != 1 { + t.Fatalf("T2 Unlimited cannot be used in conjuntion with Spot Price requests") + } +} + func TestRunConfigPrepare_SpotAuto(t *testing.T) { c := testConfig() c.SpotPrice = "auto" From 6fc68754d779e4d5fc2c54baf081e7a898497990 Mon Sep 17 00:00:00 2001 From: DanHam Date: Sun, 13 May 2018 16:32:27 +0100 Subject: [PATCH 035/138] Allow use of T2 unlimited by adding appropriate request for the instance --- builder/amazon/common/step_run_source_instance.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/builder/amazon/common/step_run_source_instance.go b/builder/amazon/common/step_run_source_instance.go index 114da38e1..4dd8fbe74 100644 --- a/builder/amazon/common/step_run_source_instance.go +++ b/builder/amazon/common/step_run_source_instance.go @@ -24,6 +24,7 @@ type StepRunSourceInstance struct { Ctx interpolate.Context Debug bool EbsOptimized bool + EnableT2Unlimited bool ExpectedRootDevice string IamInstanceProfile string InstanceInitiatedShutdownBehavior string @@ -116,6 +117,11 @@ func (s *StepRunSourceInstance) Run(ctx context.Context, state multistep.StateBa EbsOptimized: &s.EbsOptimized, } + if s.EnableT2Unlimited { + creditOption := "unlimited" + runOpts.CreditSpecification = &ec2.CreditSpecificationRequest{CpuCredits: &creditOption} + } + // Collect tags for tagging on resource creation var tagSpecs []*ec2.TagSpecification From d5304a25e928dcf43373ba09734fc5a76b796d29 Mon Sep 17 00:00:00 2001 From: DanHam Date: Sun, 13 May 2018 17:16:10 +0100 Subject: [PATCH 036/138] Pass T2 Unlimited settings to run instance step for appropriate EC2 builders --- builder/amazon/ebs/builder.go | 1 + builder/amazon/ebssurrogate/builder.go | 1 + builder/amazon/ebsvolume/builder.go | 1 + builder/amazon/instance/builder.go | 1 + 4 files changed, 4 insertions(+) diff --git a/builder/amazon/ebs/builder.go b/builder/amazon/ebs/builder.go index 5e63b05c8..665bf8098 100644 --- a/builder/amazon/ebs/builder.go +++ b/builder/amazon/ebs/builder.go @@ -148,6 +148,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe Ctx: b.config.ctx, Debug: b.config.PackerDebug, EbsOptimized: b.config.EbsOptimized, + EnableT2Unlimited: b.config.EnableT2Unlimited, ExpectedRootDevice: "ebs", IamInstanceProfile: b.config.IamInstanceProfile, InstanceInitiatedShutdownBehavior: b.config.InstanceInitiatedShutdownBehavior, diff --git a/builder/amazon/ebssurrogate/builder.go b/builder/amazon/ebssurrogate/builder.go index 31f47164f..52a151b22 100644 --- a/builder/amazon/ebssurrogate/builder.go +++ b/builder/amazon/ebssurrogate/builder.go @@ -162,6 +162,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe Ctx: b.config.ctx, Debug: b.config.PackerDebug, EbsOptimized: b.config.EbsOptimized, + EnableT2Unlimited: b.config.EnableT2Unlimited, ExpectedRootDevice: "ebs", IamInstanceProfile: b.config.IamInstanceProfile, InstanceInitiatedShutdownBehavior: b.config.InstanceInitiatedShutdownBehavior, diff --git a/builder/amazon/ebsvolume/builder.go b/builder/amazon/ebsvolume/builder.go index a1cc1fd2a..1a79b964e 100644 --- a/builder/amazon/ebsvolume/builder.go +++ b/builder/amazon/ebsvolume/builder.go @@ -145,6 +145,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe Ctx: b.config.ctx, Debug: b.config.PackerDebug, EbsOptimized: b.config.EbsOptimized, + EnableT2Unlimited: b.config.EnableT2Unlimited, ExpectedRootDevice: "ebs", IamInstanceProfile: b.config.IamInstanceProfile, InstanceInitiatedShutdownBehavior: b.config.InstanceInitiatedShutdownBehavior, diff --git a/builder/amazon/instance/builder.go b/builder/amazon/instance/builder.go index eb3ecdbda..c197cadeb 100644 --- a/builder/amazon/instance/builder.go +++ b/builder/amazon/instance/builder.go @@ -230,6 +230,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe Ctx: b.config.ctx, Debug: b.config.PackerDebug, EbsOptimized: b.config.EbsOptimized, + EnableT2Unlimited: b.config.EnableT2Unlimited, IamInstanceProfile: b.config.IamInstanceProfile, InstanceType: b.config.InstanceType, IsRestricted: b.config.IsChinaCloud() || b.config.IsGovCloud(), From a9aa9908cd41196552603d1894968417e571d131 Mon Sep 17 00:00:00 2001 From: DanHam Date: Sun, 13 May 2018 18:55:21 +0100 Subject: [PATCH 037/138] Document use of T2 Unlimited for enabled Amazon builders --- .../source/docs/builders/amazon-ebs.html.md | 24 +++++++++++++++++++ .../docs/builders/amazon-ebssurrogate.html.md | 24 +++++++++++++++++++ .../docs/builders/amazon-ebsvolume.html.md | 24 +++++++++++++++++++ .../docs/builders/amazon-instance.html.md | 24 +++++++++++++++++++ 4 files changed, 96 insertions(+) diff --git a/website/source/docs/builders/amazon-ebs.html.md b/website/source/docs/builders/amazon-ebs.html.md index d5ed5b497..2901bd249 100644 --- a/website/source/docs/builders/amazon-ebs.html.md +++ b/website/source/docs/builders/amazon-ebs.html.md @@ -169,6 +169,30 @@ builder. Note: you must make sure enhanced networking is enabled on your instance. See [Amazon's documentation on enabling enhanced networking](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking.html#enabling_enhanced_networking). Default `false`. +- `enable_t2_unlimited` (boolean) - Enabling T2 Unlimited allows the source + instance to burst additional CPU beyond its available [CPU Credits] + (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-credits-baseline-concepts.html) + for as long as the demand exists. + This is in contrast to the standard configuration that only allows an + instance to consume up to its available CPU Credits. + See the AWS documentation for [T2 Unlimited] + (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-unlimited.html) + and the 'T2 Unlimited Pricing' section of the [Amazon EC2 On-Demand + Pricing](https://aws.amazon.com/ec2/pricing/on-demand/) document for more + information. + By default this option is disabled and Packer will set up a [T2 + Standard](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-std.html) + instance instead. + + To use T2 Unlimited you must use a T2 instance type e.g. t2.micro. + Additionally, T2 Unlimited cannot be used in conjunction with Spot + Instances e.g. when the `spot_price` option has been configured. + Attempting to do so will cause an error. + + !> **Warning!** Additional costs may be incurred by enabling T2 + Unlimited - even for instances that would usually qualify for the + [AWS Free Tier](https://aws.amazon.com/free/). + - `force_deregister` (boolean) - Force Packer to first deregister an existing AMI if one with the same name already exists. Default `false`. diff --git a/website/source/docs/builders/amazon-ebssurrogate.html.md b/website/source/docs/builders/amazon-ebssurrogate.html.md index 8cf9508b4..720521536 100644 --- a/website/source/docs/builders/amazon-ebssurrogate.html.md +++ b/website/source/docs/builders/amazon-ebssurrogate.html.md @@ -162,6 +162,30 @@ builder. Note: you must make sure enhanced networking is enabled on your instance. See [Amazon's documentation on enabling enhanced networking](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking.html#enabling_enhanced_networking). Default `false`. +- `enable_t2_unlimited` (boolean) - Enabling T2 Unlimited allows the source + instance to burst additional CPU beyond its available [CPU Credits] + (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-credits-baseline-concepts.html) + for as long as the demand exists. + This is in contrast to the standard configuration that only allows an + instance to consume up to its available CPU Credits. + See the AWS documentation for [T2 Unlimited] + (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-unlimited.html) + and the 'T2 Unlimited Pricing' section of the [Amazon EC2 On-Demand + Pricing](https://aws.amazon.com/ec2/pricing/on-demand/) document for more + information. + By default this option is disabled and Packer will set up a [T2 + Standard](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-std.html) + instance instead. + + To use T2 Unlimited you must use a T2 instance type e.g. t2.micro. + Additionally, T2 Unlimited cannot be used in conjunction with Spot + Instances e.g. when the `spot_price` option has been configured. + Attempting to do so will cause an error. + + !> **Warning!** Additional costs may be incurred by enabling T2 + Unlimited - even for instances that would usually qualify for the + [AWS Free Tier](https://aws.amazon.com/free/). + - `force_deregister` (boolean) - Force Packer to first deregister an existing AMI if one with the same name already exists. Default `false`. diff --git a/website/source/docs/builders/amazon-ebsvolume.html.md b/website/source/docs/builders/amazon-ebsvolume.html.md index a39a31fcd..1bab5eb62 100644 --- a/website/source/docs/builders/amazon-ebsvolume.html.md +++ b/website/source/docs/builders/amazon-ebsvolume.html.md @@ -120,6 +120,30 @@ builder. Note: you must make sure enhanced networking is enabled on your instance. See [Amazon's documentation on enabling enhanced networking](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking.html#enabling_enhanced_networking). Default `false`. +- `enable_t2_unlimited` (boolean) - Enabling T2 Unlimited allows the source + instance to burst additional CPU beyond its available [CPU Credits] + (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-credits-baseline-concepts.html) + for as long as the demand exists. + This is in contrast to the standard configuration that only allows an + instance to consume up to its available CPU Credits. + See the AWS documentation for [T2 Unlimited] + (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-unlimited.html) + and the 'T2 Unlimited Pricing' section of the [Amazon EC2 On-Demand + Pricing](https://aws.amazon.com/ec2/pricing/on-demand/) document for more + information. + By default this option is disabled and Packer will set up a [T2 + Standard](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-std.html) + instance instead. + + To use T2 Unlimited you must use a T2 instance type e.g. t2.micro. + Additionally, T2 Unlimited cannot be used in conjunction with Spot + Instances e.g. when the `spot_price` option has been configured. + Attempting to do so will cause an error. + + !> **Warning!** Additional costs may be incurred by enabling T2 + Unlimited - even for instances that would usually qualify for the + [AWS Free Tier](https://aws.amazon.com/free/). + - `iam_instance_profile` (string) - The name of an [IAM instance profile](https://docs.aws.amazon.com/IAM/latest/UserGuide/instance-profiles.html) to launch the EC2 instance with. diff --git a/website/source/docs/builders/amazon-instance.html.md b/website/source/docs/builders/amazon-instance.html.md index 77d7c2f5b..52686aeec 100644 --- a/website/source/docs/builders/amazon-instance.html.md +++ b/website/source/docs/builders/amazon-instance.html.md @@ -193,6 +193,30 @@ builder. Note: you must make sure enhanced networking is enabled on your instance. See [Amazon's documentation on enabling enhanced networking](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking.html#enabling_enhanced_networking). Default `false`. +- `enable_t2_unlimited` (boolean) - Enabling T2 Unlimited allows the source + instance to burst additional CPU beyond its available [CPU Credits] + (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-credits-baseline-concepts.html) + for as long as the demand exists. + This is in contrast to the standard configuration that only allows an + instance to consume up to its available CPU Credits. + See the AWS documentation for [T2 Unlimited] + (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-unlimited.html) + and the 'T2 Unlimited Pricing' section of the [Amazon EC2 On-Demand + Pricing](https://aws.amazon.com/ec2/pricing/on-demand/) document for more + information. + By default this option is disabled and Packer will set up a [T2 + Standard](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/t2-std.html) + instance instead. + + To use T2 Unlimited you must use a T2 instance type e.g. t2.micro. + Additionally, T2 Unlimited cannot be used in conjunction with Spot + Instances e.g. when the `spot_price` option has been configured. + Attempting to do so will cause an error. + + !> **Warning!** Additional costs may be incurred by enabling T2 + Unlimited - even for instances that would usually qualify for the + [AWS Free Tier](https://aws.amazon.com/free/). + - `force_deregister` (boolean) - Force Packer to first deregister an existing AMI if one with the same name already exists. Defaults to `false`. From 99e3487795278bec044684cec5ae94ff21dd34ae Mon Sep 17 00:00:00 2001 From: DanHam Date: Mon, 14 May 2018 00:54:51 +0100 Subject: [PATCH 038/138] Add missing validation and tests for Spot Instance requests --- builder/amazon/common/run_config.go | 7 +++++++ builder/amazon/common/run_config_test.go | 8 +++++++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/builder/amazon/common/run_config.go b/builder/amazon/common/run_config.go index cd40c9dc4..99cdc86ae 100644 --- a/builder/amazon/common/run_config.go +++ b/builder/amazon/common/run_config.go @@ -112,6 +112,13 @@ func (c *RunConfig) Prepare(ctx *interpolate.Context) []error { } } + if c.SpotPriceAutoProduct != "" { + if c.SpotPrice != "auto" { + errs = append(errs, errors.New( + "spot_price should be set to auto when spot_price_auto_product is specified")) + } + } + if c.UserData != "" && c.UserDataFile != "" { errs = append(errs, fmt.Errorf("Only one of user_data or user_data_file can be specified.")) } else if c.UserDataFile != "" { diff --git a/builder/amazon/common/run_config_test.go b/builder/amazon/common/run_config_test.go index 212f70c02..ae9a547c0 100644 --- a/builder/amazon/common/run_config_test.go +++ b/builder/amazon/common/run_config_test.go @@ -118,13 +118,19 @@ func TestRunConfigPrepare_SpotAuto(t *testing.T) { c := testConfig() c.SpotPrice = "auto" if err := c.Prepare(nil); len(err) != 1 { - t.Fatalf("err: %s", err) + t.Fatalf("spot_price_auto_product should be set when spot_price is set to auto") } + // Good - SpotPrice and SpotPriceAutoProduct are correctly set c.SpotPriceAutoProduct = "foo" if err := c.Prepare(nil); len(err) != 0 { t.Fatalf("err: %s", err) } + + c.SpotPrice = "" + if err := c.Prepare(nil); len(err) != 1 { + t.Fatalf("spot_price should be set to auto when spot_price_auto_product is set") + } } func TestRunConfigPrepare_SSHPort(t *testing.T) { From 82c8710af5f03237d8eed6f99ff7d580b68f4983 Mon Sep 17 00:00:00 2001 From: DanHam Date: Tue, 15 May 2018 10:07:09 +0100 Subject: [PATCH 039/138] Use fmt.Errorf over errors.New as we only require basic error reporting --- builder/amazon/common/run_config.go | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/builder/amazon/common/run_config.go b/builder/amazon/common/run_config.go index 99cdc86ae..bc596e580 100644 --- a/builder/amazon/common/run_config.go +++ b/builder/amazon/common/run_config.go @@ -1,7 +1,6 @@ package common import ( - "errors" "fmt" "net" "os" @@ -86,35 +85,35 @@ func (c *RunConfig) Prepare(ctx *interpolate.Context) []error { c.SSHInterface != "public_dns" && c.SSHInterface != "private_dns" && c.SSHInterface != "" { - errs = append(errs, errors.New(fmt.Sprintf("Unknown interface type: %s", c.SSHInterface))) + errs = append(errs, fmt.Errorf(fmt.Sprintf("Unknown interface type: %s", c.SSHInterface))) } if c.SSHKeyPairName != "" { if c.Comm.Type == "winrm" && c.Comm.WinRMPassword == "" && c.Comm.SSHPrivateKey == "" { - errs = append(errs, errors.New("ssh_private_key_file must be provided to retrieve the winrm password when using ssh_keypair_name.")) + errs = append(errs, fmt.Errorf("ssh_private_key_file must be provided to retrieve the winrm password when using ssh_keypair_name.")) } else if c.Comm.SSHPrivateKey == "" && !c.Comm.SSHAgentAuth { - errs = append(errs, errors.New("ssh_private_key_file must be provided or ssh_agent_auth enabled when ssh_keypair_name is specified.")) + errs = append(errs, fmt.Errorf("ssh_private_key_file must be provided or ssh_agent_auth enabled when ssh_keypair_name is specified.")) } } if c.SourceAmi == "" && c.SourceAmiFilter.Empty() { - errs = append(errs, errors.New("A source_ami or source_ami_filter must be specified")) + errs = append(errs, fmt.Errorf("A source_ami or source_ami_filter must be specified")) } if c.InstanceType == "" { - errs = append(errs, errors.New("An instance_type must be specified")) + errs = append(errs, fmt.Errorf("An instance_type must be specified")) } if c.SpotPrice == "auto" { if c.SpotPriceAutoProduct == "" { - errs = append(errs, errors.New( + errs = append(errs, fmt.Errorf( "spot_price_auto_product must be specified when spot_price is auto")) } } if c.SpotPriceAutoProduct != "" { if c.SpotPrice != "auto" { - errs = append(errs, errors.New( + errs = append(errs, fmt.Errorf( "spot_price should be set to auto when spot_price_auto_product is specified")) } } From ec8b70721cf216216128e930de1595b0fbb87eae Mon Sep 17 00:00:00 2001 From: DanHam Date: Tue, 15 May 2018 11:44:58 +0100 Subject: [PATCH 040/138] Use an explicit error message when an error is expected and we don't get one Previously, if the validation check generating the error in the main code is removed, the 'should error' tests would just return an empty message --- builder/amazon/common/run_config_test.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/builder/amazon/common/run_config_test.go b/builder/amazon/common/run_config_test.go index ae9a547c0..fde9ef760 100644 --- a/builder/amazon/common/run_config_test.go +++ b/builder/amazon/common/run_config_test.go @@ -48,7 +48,7 @@ func TestRunConfigPrepare_InstanceType(t *testing.T) { c := testConfig() c.InstanceType = "" if err := c.Prepare(nil); len(err) != 1 { - t.Fatalf("err: %s", err) + t.Fatalf("Should error if an instance_type is not specified") } } @@ -56,14 +56,14 @@ func TestRunConfigPrepare_SourceAmi(t *testing.T) { c := testConfig() c.SourceAmi = "" if err := c.Prepare(nil); len(err) != 1 { - t.Fatalf("err: %s", err) + t.Fatalf("Should error if a source_ami (or source_ami_filter) is not specified") } } func TestRunConfigPrepare_SourceAmiFilterBlank(t *testing.T) { c := testConfigFilter() if err := c.Prepare(nil); len(err) != 1 { - t.Fatalf("err: %s", err) + t.Fatalf("Should error if source_ami_filter is empty or not specified (and source_ami is not specified)") } } @@ -97,7 +97,7 @@ func TestRunConfigPrepare_EnableT2UnlimitedBadInstanceType(t *testing.T) { c.EnableT2Unlimited = true err := c.Prepare(nil) if len(err) != 1 { - t.Fatalf("T2 Unlimited should not work with non-T2 instance types") + t.Fatalf("Should error if T2 Unlimited is enabled with non-T2 instance_type") } } @@ -110,7 +110,7 @@ func TestRunConfigPrepare_EnableT2UnlimitedBadWithSpotInstanceRequest(t *testing c.SpotPriceAutoProduct = "Linux/UNIX" err := c.Prepare(nil) if len(err) != 1 { - t.Fatalf("T2 Unlimited cannot be used in conjuntion with Spot Price requests") + t.Fatalf("Should error if T2 Unlimited has been used in conjuntion with a Spot Price request") } } @@ -118,7 +118,7 @@ func TestRunConfigPrepare_SpotAuto(t *testing.T) { c := testConfig() c.SpotPrice = "auto" if err := c.Prepare(nil); len(err) != 1 { - t.Fatalf("spot_price_auto_product should be set when spot_price is set to auto") + t.Fatalf("Should error if spot_price_auto_product is not set and spot_price is set to auto") } // Good - SpotPrice and SpotPriceAutoProduct are correctly set @@ -129,7 +129,7 @@ func TestRunConfigPrepare_SpotAuto(t *testing.T) { c.SpotPrice = "" if err := c.Prepare(nil); len(err) != 1 { - t.Fatalf("spot_price should be set to auto when spot_price_auto_product is set") + t.Fatalf("Should error if spot_price is not set to auto and spot_price_auto_product is set") } } @@ -166,7 +166,7 @@ func TestRunConfigPrepare_UserData(t *testing.T) { c.UserData = "foo" c.UserDataFile = tf.Name() if err := c.Prepare(nil); len(err) != 1 { - t.Fatalf("err: %s", err) + t.Fatalf("Should error if user_data string and user_data_file have both been specified") } } @@ -178,7 +178,7 @@ func TestRunConfigPrepare_UserDataFile(t *testing.T) { c.UserDataFile = "idontexistidontthink" if err := c.Prepare(nil); len(err) != 1 { - t.Fatalf("err: %s", err) + t.Fatalf("Should error if the file specified by user_data_file does not exist") } tf, err := ioutil.TempFile("", "packer") From 81db142c8a29a45479324e2830cbdb08a5508574 Mon Sep 17 00:00:00 2001 From: localghost Date: Thu, 10 May 2018 22:34:15 +0200 Subject: [PATCH 041/138] Unify handling PlaybookFile and PlaybookFiles. --- provisioner/ansible-local/provisioner.go | 18 +----- provisioner/ansible-local/provisioner_test.go | 55 ++++++++++++++++++- 2 files changed, 56 insertions(+), 17 deletions(-) diff --git a/provisioner/ansible-local/provisioner.go b/provisioner/ansible-local/provisioner.go index a25aca36c..9a5c95daa 100644 --- a/provisioner/ansible-local/provisioner.go +++ b/provisioner/ansible-local/provisioner.go @@ -111,6 +111,8 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { err = validateFileConfig(p.config.PlaybookFile, "playbook_file", true) if err != nil { errs = packer.MultiErrorAppend(errs, err) + } else { + p.playbookFiles = append(p.playbookFiles, p.config.PlaybookFile) } } @@ -197,14 +199,7 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { } } - if p.config.PlaybookFile != "" { - ui.Message("Uploading main Playbook file...") - src := p.config.PlaybookFile - dst := filepath.ToSlash(filepath.Join(p.config.StagingDir, filepath.Base(src))) - if err := p.uploadFile(ui, comm, dst, src); err != nil { - return fmt.Errorf("Error uploading main playbook: %s", err) - } - } else if err := p.provisionPlaybookFiles(ui, comm); err != nil { + if err := p.provisionPlaybookFiles(ui, comm); err != nil { return err } @@ -379,13 +374,6 @@ func (p *Provisioner) executeAnsible(ui packer.Ui, comm packer.Communicator) err } } - if p.config.PlaybookFile != "" { - playbookFile := filepath.ToSlash(filepath.Join(p.config.StagingDir, filepath.Base(p.config.PlaybookFile))) - if err := p.executeAnsiblePlaybook(ui, comm, playbookFile, extraArgs, inventory); err != nil { - return err - } - } - for _, playbookFile := range p.playbookFiles { playbookFile = filepath.ToSlash(filepath.Join(p.config.StagingDir, playbookFile)) if err := p.executeAnsiblePlaybook(ui, comm, playbookFile, extraArgs, inventory); err != nil { diff --git a/provisioner/ansible-local/provisioner_test.go b/provisioner/ansible-local/provisioner_test.go index 1b4bbe5e7..57abc0e87 100644 --- a/provisioner/ansible-local/provisioner_test.go +++ b/provisioner/ansible-local/provisioner_test.go @@ -117,6 +117,58 @@ func TestProvisionerPrepare_PlaybookFiles(t *testing.T) { } } +func TestProvisionerProvision_PlaybookFile(t *testing.T) { + var p Provisioner + config := testConfig() + + playbook := createTempFile("") + defer os.Remove(playbook) + + config["playbook_file"] = playbook + err := p.Prepare(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + comm := &communicatorMock{} + if err := p.Provision(&uiStub{}, comm); err != nil { + t.Fatalf("err: %s", err) + } + + assertPlaybooksUploaded(comm, []string{playbook}) + assertPlaybooksExecuted(comm, []string{playbook}) +} + +func TestProvisionerProvision_PlaybookFileWithPlaybookDir(t *testing.T) { + var p Provisioner + config := testConfig() + + playbook_dir, err := ioutil.TempDir("", "") + if err != nil { + t.Fatalf("Failed to create playbook_dir: %s", err) + } + defer os.RemoveAll(playbook_dir) + playbook := createTempFile(playbook_dir) + + playbookName := filepath.Base(playbook) + playbookInPlaybookDir := strings.TrimPrefix(playbook, playbook_dir) + + config["playbook_file"] = playbook + config["playbook_dir"] = playbook_dir + err = p.Prepare(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + comm := &communicatorMock{} + if err := p.Provision(&uiStub{}, comm); err != nil { + t.Fatalf("err: %s", err) + } + + assertPlaybooksNotUploaded(comm, []string{playbookName}) + assertPlaybooksExecuted(comm, []string{playbookInPlaybookDir}) +} + func TestProvisionerProvision_PlaybookFiles(t *testing.T) { var p Provisioner config := testConfig() @@ -337,7 +389,6 @@ func testProvisionerProvisionDockerWithPlaybookFiles(t *testing.T, templateStrin if err != nil { t.Fatalf("Error preparing download: %s", err) } - defer os.Remove("hello_world") // Add hooks so the provisioners run during the build hooks := map[string][]packer.Hook{} @@ -357,6 +408,7 @@ func testProvisionerProvisionDockerWithPlaybookFiles(t *testing.T, templateStrin t.Fatalf("Error running build %s", err) } defer artifact.Destroy() + defer os.Remove("hello_world") actualContent, err := ioutil.ReadFile("hello_world") if err != nil { @@ -386,7 +438,6 @@ func assertPlaybooksExecuted(comm *communicatorMock, playbooks []string) { } func assertPlaybooksUploaded(comm *communicatorMock, playbooks []string) { - fmt.Println(comm.uploadDestination) uploadIndex := 0 for _, playbook := range playbooks { playbook = filepath.ToSlash(playbook) From 6c7aa724eb1da5c740957a5bea02541a0f298022 Mon Sep 17 00:00:00 2001 From: localghost Date: Wed, 16 May 2018 22:43:30 +0200 Subject: [PATCH 042/138] Fix tests after merging with master. --- provisioner/ansible-local/provisioner_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/provisioner/ansible-local/provisioner_test.go b/provisioner/ansible-local/provisioner_test.go index 6eddd2736..4370b060c 100644 --- a/provisioner/ansible-local/provisioner_test.go +++ b/provisioner/ansible-local/provisioner_test.go @@ -102,6 +102,7 @@ func TestProvisionerPrepare_PlaybookFiles(t *testing.T) { t.Fatal("should have error") } + p = Provisioner{} config["playbook_file"] = playbook_file.Name() config["playbook_files"] = []string{} err = p.Prepare(config) @@ -417,11 +418,10 @@ func testProvisionerProvisionDockerWithPlaybookFiles(t *testing.T, templateStrin hooks := map[string][]packer.Hook{} hooks[packer.HookProvision] = []packer.Hook{ &packer.ProvisionHook{ - Provisioners: []packer.Provisioner{ - ansible, - download, + Provisioners: []*packer.HookedProvisioner{ + {ansible, nil, ""}, + {download, nil, ""}, }, - ProvisionerTypes: []string{tpl.Provisioners[0].Type, tpl.Provisioners[1].Type}, }, } hook := &packer.DispatchHook{Mapping: hooks} From 1f46271a6b2e2eadb4036bdcc49b2b943e761464 Mon Sep 17 00:00:00 2001 From: Hariharan Jayaraman Date: Fri, 18 May 2018 00:32:01 -0700 Subject: [PATCH 043/138] Ensuring device login works for Windows build --- builder/azure/arm/azure_client.go | 2 +- builder/azure/arm/builder.go | 33 ++++- builder/azure/arm/builder_acc_test.go | 51 ++++++- builder/azure/arm/config.go | 3 - builder/azure/arm/config_test.go | 35 ----- builder/azure/common/devicelogin.go | 29 ++-- .../autorest/azure/environments.go | 2 +- .../dgrijalva/jwt-go/MIGRATION_GUIDE.md | 5 +- vendor/github.com/dgrijalva/jwt-go/README.md | 35 +++-- .../dgrijalva/jwt-go/VERSION_HISTORY.md | 13 ++ vendor/github.com/dgrijalva/jwt-go/ecdsa.go | 1 + vendor/github.com/dgrijalva/jwt-go/errors.go | 6 +- vendor/github.com/dgrijalva/jwt-go/hmac.go | 3 +- vendor/github.com/dgrijalva/jwt-go/parser.go | 134 ++++++++++-------- vendor/github.com/dgrijalva/jwt-go/rsa.go | 5 +- .../github.com/dgrijalva/jwt-go/rsa_utils.go | 32 +++++ vendor/vendor.json | 7 +- .../source/docs/builders/azure-setup.html.md | 5 +- website/source/docs/builders/azure.html.md | 7 - 19 files changed, 258 insertions(+), 150 deletions(-) diff --git a/builder/azure/arm/azure_client.go b/builder/azure/arm/azure_client.go index f8477d235..7e4f5324e 100644 --- a/builder/azure/arm/azure_client.go +++ b/builder/azure/arm/azure_client.go @@ -122,7 +122,7 @@ func byConcatDecorators(decorators ...autorest.RespondDecorator) autorest.Respon } func NewAzureClient(subscriptionID, resourceGroupName, storageAccountName string, - cloud *azure.Environment, + cloud *azure.Environment, tenantID string, isDeviceLogin bool, servicePrincipalToken, servicePrincipalTokenVault *adal.ServicePrincipalToken) (*AzureClient, error) { var azureClient = &AzureClient{} diff --git a/builder/azure/arm/builder.go b/builder/azure/arm/builder.go index f71d920cf..8f4b66aaa 100644 --- a/builder/azure/arm/builder.go +++ b/builder/azure/arm/builder.go @@ -4,17 +4,17 @@ import ( "context" "errors" "fmt" + packerAzureCommon "github.com/hashicorp/packer/builder/azure/common" "log" "os" "runtime" "strings" "time" - packerAzureCommon "github.com/hashicorp/packer/builder/azure/common" - armstorage "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage" "github.com/Azure/azure-sdk-for-go/storage" "github.com/Azure/go-autorest/autorest/adal" + "github.com/dgrijalva/jwt-go" "github.com/hashicorp/packer/builder/azure/common/constants" "github.com/hashicorp/packer/builder/azure/common/lin" packerCommon "github.com/hashicorp/packer/common" @@ -52,6 +52,10 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { } func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) { + + claims := jwt.MapClaims{} + var p jwt.Parser + ui.Say("Running builder ...") ctx, cancel := context.WithCancel(context.Background()) @@ -79,9 +83,10 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe b.config.ResourceGroupName, b.config.StorageAccount, b.config.cloudEnvironment, + b.config.TenantID, + b.config.useDeviceLogin, spnCloud, spnKeyVault) - if err != nil { return nil, err } @@ -91,6 +96,18 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe return nil, err } + _, _, err = p.ParseUnverified(spnCloud.OAuthToken(), claims) + + if err != nil { + return nil, err + } + b.config.ObjectID = claims["oid"].(string) + + if b.config.ObjectID == "" && b.config.OSType != constants.Target_Linux { + ui.Error("\n Got empty Object ID in the OAuth token , we need this for Key vault Access, bailing") + return nil, err + } + if b.config.isManagedImage() { group, err := azureClient.GroupsClient.Get(ctx, b.config.ManagedImageResourceGroupName) if err != nil { @@ -371,10 +388,15 @@ func (b *Builder) getServicePrincipalTokens(say func(string)) (*adal.ServicePrin var err error if b.config.useDeviceLogin { - servicePrincipalToken, err = packerAzureCommon.Authenticate(*b.config.cloudEnvironment, b.config.TenantID, say) + servicePrincipalToken, err = packerAzureCommon.Authenticate(*b.config.cloudEnvironment, b.config.TenantID, say, b.config.cloudEnvironment.ServiceManagementEndpoint) if err != nil { return nil, nil, err } + servicePrincipalTokenVault, err = packerAzureCommon.Authenticate(*b.config.cloudEnvironment, b.config.TenantID, say, b.config.cloudEnvironment.KeyVaultEndpoint) + if err != nil { + return nil, nil, err + } + } else { auth := NewAuthenticate(*b.config.cloudEnvironment, b.config.ClientID, b.config.ClientSecret, b.config.TenantID) @@ -382,6 +404,7 @@ func (b *Builder) getServicePrincipalTokens(say func(string)) (*adal.ServicePrin if err != nil { return nil, nil, err } + servicePrincipalToken.EnsureFresh() servicePrincipalTokenVault, err = auth.getServicePrincipalTokenWithResource( strings.TrimRight(b.config.cloudEnvironment.KeyVaultEndpoint, "/")) @@ -389,6 +412,8 @@ func (b *Builder) getServicePrincipalTokens(say func(string)) (*adal.ServicePrin if err != nil { return nil, nil, err } + servicePrincipalTokenVault.EnsureFresh() + } return servicePrincipalToken, servicePrincipalTokenVault, nil diff --git a/builder/azure/arm/builder_acc_test.go b/builder/azure/arm/builder_acc_test.go index 4c579de2f..4f16ea229 100644 --- a/builder/azure/arm/builder_acc_test.go +++ b/builder/azure/arm/builder_acc_test.go @@ -34,6 +34,14 @@ func TestBuilderAcc_ManagedDisk_Windows(t *testing.T) { }) } +func TestBuilderAcc_ManagedDisk_Windows_DeviceLogin(t *testing.T) { + builderT.Test(t, builderT.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Builder: &Builder{}, + Template: testBuilderAccManagedDiskWindowsDeviceLogin, + }) +} + func TestBuilderAcc_ManagedDisk_Linux(t *testing.T) { builderT.Test(t, builderT.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -65,8 +73,7 @@ const testBuilderAccManagedDiskWindows = ` "variables": { "client_id": "{{env ` + "`ARM_CLIENT_ID`" + `}}", "client_secret": "{{env ` + "`ARM_CLIENT_SECRET`" + `}}", - "subscription_id": "{{env ` + "`ARM_SUBSCRIPTION_ID`" + `}}", - "object_id": "{{env ` + "`ARM_OBJECT_ID`" + `}}" + "subscription_id": "{{env ` + "`ARM_SUBSCRIPTION_ID`" + `}}" }, "builders": [{ "type": "test", @@ -74,7 +81,6 @@ const testBuilderAccManagedDiskWindows = ` "client_id": "{{user ` + "`client_id`" + `}}", "client_secret": "{{user ` + "`client_secret`" + `}}", "subscription_id": "{{user ` + "`subscription_id`" + `}}", - "object_id": "{{user ` + "`object_id`" + `}}", "managed_image_resource_group_name": "packer-acceptance-test", "managed_image_name": "testBuilderAccManagedDiskWindows-{{timestamp}}", @@ -89,8 +95,39 @@ const testBuilderAccManagedDiskWindows = ` "winrm_insecure": "true", "winrm_timeout": "3m", "winrm_username": "packer", + "async_resourcegroup_delete": "true", - "location": "West US", + "location": "South Central US", + "vm_size": "Standard_DS2_v2" + }] +} +` + +const testBuilderAccManagedDiskWindowsDeviceLogin = ` +{ + "variables": { + "subscription_id": "{{env ` + "`ARM_SUBSCRIPTION_ID`" + `}}" + }, + "builders": [{ + "type": "test", + + "subscription_id": "{{user ` + "`subscription_id`" + `}}", + + "managed_image_resource_group_name": "packer-acceptance-test", + "managed_image_name": "testBuilderAccManagedDiskWindowsDeviceLogin-{{timestamp}}", + + "os_type": "Windows", + "image_publisher": "MicrosoftWindowsServer", + "image_offer": "WindowsServer", + "image_sku": "2012-R2-Datacenter", + + "communicator": "winrm", + "winrm_use_ssl": "true", + "winrm_insecure": "true", + "winrm_timeout": "3m", + "winrm_username": "packer", + + "location": "South Central US", "vm_size": "Standard_DS2_v2" }] } @@ -118,7 +155,7 @@ const testBuilderAccManagedDiskLinux = ` "image_offer": "UbuntuServer", "image_sku": "16.04-LTS", - "location": "West US", + "location": "South Central US", "vm_size": "Standard_DS2_v2" }] } @@ -157,7 +194,7 @@ const testBuilderAccBlobWindows = ` "winrm_timeout": "3m", "winrm_username": "packer", - "location": "West US", + "location": "South Central US", "vm_size": "Standard_DS2_v2" }] } @@ -188,7 +225,7 @@ const testBuilderAccBlobLinux = ` "image_offer": "UbuntuServer", "image_sku": "16.04-LTS", - "location": "West US", + "location": "South Central US", "vm_size": "Standard_DS2_v2" }] } diff --git a/builder/azure/arm/config.go b/builder/azure/arm/config.go index 2dfb69809..7fef5f0ae 100644 --- a/builder/azure/arm/config.go +++ b/builder/azure/arm/config.go @@ -493,9 +493,6 @@ func assertRequiredParametersSet(c *Config, errs *packer.MultiError) { // readable by the ObjectID of the App. There may be another way to handle // this case, but I am not currently aware of it - send feedback. isUseDeviceLogin := func(c *Config) bool { - if c.OSType == constants.Target_Windows { - return false - } return c.SubscriptionID != "" && c.ClientID == "" && diff --git a/builder/azure/arm/config_test.go b/builder/azure/arm/config_test.go index 8e8bd3d68..a52956917 100644 --- a/builder/azure/arm/config_test.go +++ b/builder/azure/arm/config_test.go @@ -2,13 +2,11 @@ package arm import ( "fmt" - "strings" "testing" "time" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" "github.com/hashicorp/packer/builder/azure/common/constants" - "github.com/hashicorp/packer/packer" ) // List of configuration parameters that are required by the ARM builder. @@ -448,39 +446,6 @@ func TestUserDeviceLoginIsEnabledForLinux(t *testing.T) { } } -func TestUseDeviceLoginIsDisabledForWindows(t *testing.T) { - config := map[string]string{ - "capture_name_prefix": "ignore", - "capture_container_name": "ignore", - "image_offer": "ignore", - "image_publisher": "ignore", - "image_sku": "ignore", - "location": "ignore", - "storage_account": "ignore", - "resource_group_name": "ignore", - "subscription_id": "ignore", - "os_type": constants.Target_Windows, - "communicator": "none", - } - - _, _, err := newConfig(config, getPackerConfiguration()) - if err == nil { - t.Fatal("Expected test to fail, but it succeeded") - } - - multiError, _ := err.(*packer.MultiError) - if len(multiError.Errors) != 2 { - t.Errorf("Expected to find 2 errors, but found %d errors", len(multiError.Errors)) - } - - if !strings.Contains(err.Error(), "client_id must be specified") { - t.Error("Expected to find error for 'client_id must be specified") - } - if !strings.Contains(err.Error(), "client_secret must be specified") { - t.Error("Expected to find error for 'client_secret must be specified") - } -} - func TestConfigShouldRejectMalformedCaptureNamePrefix(t *testing.T) { config := map[string]string{ "capture_container_name": "ignore", diff --git a/builder/azure/common/devicelogin.go b/builder/azure/common/devicelogin.go index a63f34cc1..ea87767ca 100644 --- a/builder/azure/common/devicelogin.go +++ b/builder/azure/common/devicelogin.go @@ -7,6 +7,7 @@ import ( "os" "path/filepath" "regexp" + "strings" "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-06-01/subscriptions" "github.com/Azure/go-autorest/autorest" @@ -40,8 +41,11 @@ var ( // Authenticate fetches a token from the local file cache or initiates a consent // flow and waits for token to be obtained. -func Authenticate(env azure.Environment, tenantID string, say func(string)) (*adal.ServicePrincipalToken, error) { +func Authenticate(env azure.Environment, tenantID string, say func(string), apiScope string) (*adal.ServicePrincipalToken, error) { clientID, ok := clientIDs[env.Name] + var resourceid string + var endpoint string + if !ok { return nil, fmt.Errorf("packer-azure application not set up for Azure environment %q", env.Name) } @@ -53,9 +57,17 @@ func Authenticate(env azure.Environment, tenantID string, say func(string)) (*ad // for AzurePublicCloud (https://management.core.windows.net/), this old // Service Management scope covers both ASM and ARM. - apiScope := env.ServiceManagementEndpoint + //apiScope := env.ServiceManagementEndpoint - tokenPath := tokenCachePath(tenantID) + if strings.Contains(apiScope, "vault") { + resourceid = "vault" + endpoint = env.KeyVaultEndpoint + } else { + resourceid = "mgmt" + endpoint = env.ResourceManagerEndpoint + } + + tokenPath := tokenCachePath(tenantID + resourceid) saveToken := mkTokenCallback(tokenPath) saveTokenCallback := func(t adal.Token) error { say("Azure token expired. Saving the refreshed token...") @@ -82,7 +94,7 @@ func Authenticate(env azure.Environment, tenantID string, say func(string)) (*ad // will go stale every 14 days and we will delete the token file, // re-initiate the device flow. say("Validating the token.") - if err = validateToken(env, spt); err != nil { + if err = validateToken(endpoint, spt); err != nil { say(fmt.Sprintf("Error: %v", err)) say("Stored Azure credentials expired. Please reauthenticate.") say(fmt.Sprintf("Deleting %s", tokenPath)) @@ -187,12 +199,11 @@ func mkTokenCallback(path string) adal.TokenRefreshCallback { // sure if the access_token valid, if not it uses SDK’s functionality to // automatically refresh the token using refresh_token (which might have // expired). This check is essentially to make sure refresh_token is good. -func validateToken(env azure.Environment, token *adal.ServicePrincipalToken) error { - c := subscriptions.NewClientWithBaseURI(env.ResourceManagerEndpoint) - c.Authorizer = autorest.NewBearerAuthorizer(token) - _, err := c.List(context.TODO()) +func validateToken(env string, token *adal.ServicePrincipalToken) error { + err := token.EnsureFresh() + if err != nil { - return fmt.Errorf("Token validity check failed: %v", err) + return fmt.Errorf("%s token validity check failed: %v", env,err) } return nil } diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go index 7e41f7fd9..b6b4010b1 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go @@ -67,7 +67,7 @@ var ( ResourceManagerEndpoint: "https://management.azure.com/", ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", GalleryEndpoint: "https://gallery.azure.com/", - KeyVaultEndpoint: "https://vault.azure.net/", + KeyVaultEndpoint: "https://vault.azure.net", GraphEndpoint: "https://graph.windows.net/", ServiceBusEndpoint: "https://servicebus.windows.net/", BatchManagementEndpoint: "https://batch.core.windows.net/", diff --git a/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md b/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md index fd62e9490..7fc1f793c 100644 --- a/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md +++ b/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md @@ -56,8 +56,9 @@ This simple parsing example: is directly mapped to: ```go - if token, err := request.ParseFromRequest(tokenString, request.OAuth2Extractor, req, keyLookupFunc); err == nil { - fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) + if token, err := request.ParseFromRequest(req, request.OAuth2Extractor, keyLookupFunc); err == nil { + claims := token.Claims.(jwt.MapClaims) + fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) } ``` diff --git a/vendor/github.com/dgrijalva/jwt-go/README.md b/vendor/github.com/dgrijalva/jwt-go/README.md index 00f613672..d358d881b 100644 --- a/vendor/github.com/dgrijalva/jwt-go/README.md +++ b/vendor/github.com/dgrijalva/jwt-go/README.md @@ -1,11 +1,15 @@ -A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) +# jwt-go [![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go) +[![GoDoc](https://godoc.org/github.com/dgrijalva/jwt-go?status.svg)](https://godoc.org/github.com/dgrijalva/jwt-go) -**BREAKING CHANGES:*** Version 3.0.0 is here. It includes _a lot_ of changes including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. +A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) -**NOTICE:** A vulnerability in JWT was [recently published](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). As this library doesn't force users to validate the `alg` is what they expected, it's possible your usage is effected. There will be an update soon to remedy this, and it will likey require backwards-incompatible changes to the API. In the short term, please make sure your implementation verifies the `alg` is what you expect. +**NEW VERSION COMING:** There have been a lot of improvements suggested since the version 3.0.0 released in 2016. I'm working now on cutting two different releases: 3.2.0 will contain any non-breaking changes or enhancements. 4.0.0 will follow shortly which will include breaking changes. See the 4.0.0 milestone to get an idea of what's coming. If you have other ideas, or would like to participate in 4.0.0, now's the time. If you depend on this library and don't want to be interrupted, I recommend you use your dependency mangement tool to pin to version 3. +**SECURITY NOTICE:** Some older versions of Go have a security issue in the cryotp/elliptic. Recommendation is to upgrade to at least 1.8.3. See issue #216 for more detail. + +**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided. ## What the heck is a JWT? @@ -25,8 +29,8 @@ This library supports the parsing and verification as well as the generation and See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage: -* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example_Parse_hmac) -* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example_New_hmac) +* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac) +* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac) * [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples) ## Extensions @@ -37,7 +41,7 @@ Here's an example of an extension that integrates with the Google App Engine sig ## Compliance -This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences: +This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences: * In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. @@ -47,7 +51,10 @@ This library is considered production ready. Feedback and feature requests are This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases). -While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v2`. It will do the right thing WRT semantic versioning. +While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v3`. It will do the right thing WRT semantic versioning. + +**BREAKING CHANGES:*** +* Version 3.0.0 includes _a lot_ of changes from the 2.x line, including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. ## Usage Tips @@ -68,18 +75,26 @@ Symmetric signing methods, such as HSA, use only a single secret. This is probab Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification. +### Signing Methods and Key Types + +Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones: + +* The [HMAC signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation +* The [RSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation +* The [ECDSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation + ### JWT and OAuth It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication. Without going too far down the rabbit hole, here's a description of the interaction of these technologies: -* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth. +* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth. * OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token. * Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL. - + ## More Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go). -The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in to documentation. +The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation. diff --git a/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md b/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md index b605b4509..637029831 100644 --- a/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md +++ b/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md @@ -1,5 +1,18 @@ ## `jwt-go` Version History +#### 3.2.0 + +* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation +* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate +* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before. +* Deprecated `ParseFromRequestWithClaims` to simplify API in the future. + +#### 3.1.0 + +* Improvements to `jwt` command line tool +* Added `SkipClaimsValidation` option to `Parser` +* Documentation updates + #### 3.0.0 * **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa.go index 2f59a2223..f97738124 100644 --- a/vendor/github.com/dgrijalva/jwt-go/ecdsa.go +++ b/vendor/github.com/dgrijalva/jwt-go/ecdsa.go @@ -14,6 +14,7 @@ var ( ) // Implements the ECDSA family of signing methods signing methods +// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification type SigningMethodECDSA struct { Name string Hash crypto.Hash diff --git a/vendor/github.com/dgrijalva/jwt-go/errors.go b/vendor/github.com/dgrijalva/jwt-go/errors.go index 662df19d4..1c93024aa 100644 --- a/vendor/github.com/dgrijalva/jwt-go/errors.go +++ b/vendor/github.com/dgrijalva/jwt-go/errors.go @@ -51,13 +51,9 @@ func (e ValidationError) Error() string { } else { return "token is invalid" } - return e.Inner.Error() } // No errors func (e *ValidationError) valid() bool { - if e.Errors > 0 { - return false - } - return true + return e.Errors == 0 } diff --git a/vendor/github.com/dgrijalva/jwt-go/hmac.go b/vendor/github.com/dgrijalva/jwt-go/hmac.go index c22991925..addbe5d40 100644 --- a/vendor/github.com/dgrijalva/jwt-go/hmac.go +++ b/vendor/github.com/dgrijalva/jwt-go/hmac.go @@ -7,6 +7,7 @@ import ( ) // Implements the HMAC-SHA family of signing methods signing methods +// Expects key type of []byte for both signing and validation type SigningMethodHMAC struct { Name string Hash crypto.Hash @@ -90,5 +91,5 @@ func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, return EncodeSegment(hasher.Sum(nil)), nil } - return "", ErrInvalidKey + return "", ErrInvalidKeyType } diff --git a/vendor/github.com/dgrijalva/jwt-go/parser.go b/vendor/github.com/dgrijalva/jwt-go/parser.go index 7020c52a1..d6901d9ad 100644 --- a/vendor/github.com/dgrijalva/jwt-go/parser.go +++ b/vendor/github.com/dgrijalva/jwt-go/parser.go @@ -8,8 +8,9 @@ import ( ) type Parser struct { - ValidMethods []string // If populated, only these methods will be considered valid - UseJSONNumber bool // Use JSON Number format in JSON decoder + ValidMethods []string // If populated, only these methods will be considered valid + UseJSONNumber bool // Use JSON Number format in JSON decoder + SkipClaimsValidation bool // Skip claims validation during token parsing } // Parse, validate, and return a token. @@ -20,55 +21,9 @@ func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { } func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { - parts := strings.Split(tokenString, ".") - if len(parts) != 3 { - return nil, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed) - } - - var err error - token := &Token{Raw: tokenString} - - // parse Header - var headerBytes []byte - if headerBytes, err = DecodeSegment(parts[0]); err != nil { - if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { - return token, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed) - } - return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - if err = json.Unmarshal(headerBytes, &token.Header); err != nil { - return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - - // parse Claims - var claimBytes []byte - token.Claims = claims - - if claimBytes, err = DecodeSegment(parts[1]); err != nil { - return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) - if p.UseJSONNumber { - dec.UseNumber() - } - // JSON Decode. Special case for map type to avoid weird pointer behavior - if c, ok := token.Claims.(MapClaims); ok { - err = dec.Decode(&c) - } else { - err = dec.Decode(&claims) - } - // Handle decode error + token, parts, err := p.ParseUnverified(tokenString, claims) if err != nil { - return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - - // Lookup signature method - if method, ok := token.Header["alg"].(string); ok { - if token.Method = GetSigningMethod(method); token.Method == nil { - return token, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable) - } - } else { - return token, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable) + return token, err } // Verify signing method is in the required set @@ -95,20 +50,25 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf } if key, err = keyFunc(token); err != nil { // keyFunc returned an error + if ve, ok := err.(*ValidationError); ok { + return token, ve + } return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable} } vErr := &ValidationError{} // Validate Claims - if err := token.Claims.Valid(); err != nil { + if !p.SkipClaimsValidation { + if err := token.Claims.Valid(); err != nil { - // If the Claims Valid returned an error, check if it is a validation error, - // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set - if e, ok := err.(*ValidationError); !ok { - vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid} - } else { - vErr = e + // If the Claims Valid returned an error, check if it is a validation error, + // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set + if e, ok := err.(*ValidationError); !ok { + vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid} + } else { + vErr = e + } } } @@ -126,3 +86,63 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf return token, vErr } + +// WARNING: Don't use this method unless you know what you're doing +// +// This method parses the token but doesn't validate the signature. It's only +// ever useful in cases where you know the signature is valid (because it has +// been checked previously in the stack) and you want to extract values from +// it. +func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) { + parts = strings.Split(tokenString, ".") + if len(parts) != 3 { + return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed) + } + + token = &Token{Raw: tokenString} + + // parse Header + var headerBytes []byte + if headerBytes, err = DecodeSegment(parts[0]); err != nil { + if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { + return token, parts, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed) + } + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + if err = json.Unmarshal(headerBytes, &token.Header); err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // parse Claims + var claimBytes []byte + token.Claims = claims + + if claimBytes, err = DecodeSegment(parts[1]); err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) + if p.UseJSONNumber { + dec.UseNumber() + } + // JSON Decode. Special case for map type to avoid weird pointer behavior + if c, ok := token.Claims.(MapClaims); ok { + err = dec.Decode(&c) + } else { + err = dec.Decode(&claims) + } + // Handle decode error + if err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // Lookup signature method + if method, ok := token.Header["alg"].(string); ok { + if token.Method = GetSigningMethod(method); token.Method == nil { + return token, parts, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable) + } + } else { + return token, parts, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable) + } + + return token, parts, nil +} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa.go b/vendor/github.com/dgrijalva/jwt-go/rsa.go index 0ae0b1984..e4caf1ca4 100644 --- a/vendor/github.com/dgrijalva/jwt-go/rsa.go +++ b/vendor/github.com/dgrijalva/jwt-go/rsa.go @@ -7,6 +7,7 @@ import ( ) // Implements the RSA family of signing methods signing methods +// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation type SigningMethodRSA struct { Name string Hash crypto.Hash @@ -44,7 +45,7 @@ func (m *SigningMethodRSA) Alg() string { } // Implements the Verify method from SigningMethod -// For this signing method, must be an rsa.PublicKey structure. +// For this signing method, must be an *rsa.PublicKey structure. func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error { var err error @@ -73,7 +74,7 @@ func (m *SigningMethodRSA) Verify(signingString, signature string, key interface } // Implements the Sign method from SigningMethod -// For this signing method, must be an rsa.PrivateKey structure. +// For this signing method, must be an *rsa.PrivateKey structure. func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) { var rsaKey *rsa.PrivateKey var ok bool diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go index 213a90dbb..a5ababf95 100644 --- a/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go +++ b/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go @@ -39,6 +39,38 @@ func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { return pkey, nil } +// Parse PEM encoded PKCS1 or PKCS8 private key protected with password +func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + + var blockDecrypted []byte + if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil { + return nil, err + } + + if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + // Parse PEM encoded PKCS1 or PKCS8 public key func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { var err error diff --git a/vendor/vendor.json b/vendor/vendor.json index 1f08d096b..0680363b1 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -629,10 +629,11 @@ "revisionTime": "2017-11-27T16:20:29Z" }, { - "checksumSHA1": "D37uI+U+FYvTJIdG2TTozXe7i7U=", - "comment": "v3.0.0", + "checksumSHA1": "4772zXrOaPVeDeSgdiV7Vp4KEjk=", + "comment": "v3.2.0", "path": "github.com/dgrijalva/jwt-go", - "revision": "d2709f9f1f31ebcda9651b03077758c1f3a0018c" + "revision": "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e", + "revisionTime": "2018-03-08T23:13:08Z" }, { "checksumSHA1": "W1LGm0UNirwMDVCMFv5vZrOpUJI=", diff --git a/website/source/docs/builders/azure-setup.html.md b/website/source/docs/builders/azure-setup.html.md index 92ae04325..22df6be63 100644 --- a/website/source/docs/builders/azure-setup.html.md +++ b/website/source/docs/builders/azure-setup.html.md @@ -31,8 +31,7 @@ In order to get all of the items above, you will need a username and password fo Device login is an alternative way to authorize in Azure Packer. Device login only requires you to know your Subscription ID. (Device login is only supported for Linux based VMs.) Device login is intended for those who are first -time users, and just want to ''kick the tires.'' We recommend the SPN approach if you intend to automate Packer, or for -deploying Windows VMs. +time users, and just want to ''kick the tires.'' We recommend the SPN approach if you intend to automate Packer. > Device login is for **interactive** builds, and SPN is **automated** builds. @@ -44,7 +43,7 @@ There are three pieces of information you must provide to enable device login mo > Device login mode is enabled by not setting client\_id and client\_secret. -> Device login mode is for the Public and US Gov clouds only, and Linux VMs only. +> Device login mode is for the Public and US Gov clouds only. The device login flow asks that you open a web browser, navigate to , and input the supplied code. This authorizes the Packer for Azure application to act on your behalf. An OAuth token will be created, and stored diff --git a/website/source/docs/builders/azure.html.md b/website/source/docs/builders/azure.html.md index 4b62c6764..3946f00d3 100644 --- a/website/source/docs/builders/azure.html.md +++ b/website/source/docs/builders/azure.html.md @@ -140,11 +140,6 @@ Providing `temp_resource_group_name` or `location` in combination with `build_re account type for a managed image. Valid values are Standard_LRS and Premium\_LRS. The default is Standard\_LRS. -- `object_id` (string) Specify an OAuth Object ID to protect WinRM certificates - created at runtime. This variable is required when creating images based on - Windows; this variable is not used by non-Windows builds. See `Windows` - behavior for `os_type`, below. - - `os_disk_size_gb` (number) Specify the size of the OS disk in GB (gigabytes). Values of zero or less than zero are ignored. @@ -412,8 +407,6 @@ A Windows build requires two templates and two deployments. Unfortunately, the K the same time hence the need for two templates and deployments. The time required to deploy a KeyVault template is minimal, so overall impact is small. -> The KeyVault certificate is protected using the object\_id of the SPN. This is why Windows builds require object\_id, -> and an SPN. The KeyVault is deleted when the resource group is deleted. See the [examples/azure](https://github.com/hashicorp/packer/tree/master/examples/azure) folder in the packer project for more examples. From df5cc234fc02f3e996ca53ce9a1eb1e15a01af70 Mon Sep 17 00:00:00 2001 From: Hariharan Jayaraman Date: Fri, 18 May 2018 00:39:57 -0700 Subject: [PATCH 044/138] updates --- builder/azure/arm/azure_client.go | 2 +- builder/azure/arm/builder.go | 2 -- builder/azure/common/devicelogin.go | 2 +- .../github.com/Azure/go-autorest/autorest/azure/environments.go | 2 +- 4 files changed, 3 insertions(+), 5 deletions(-) diff --git a/builder/azure/arm/azure_client.go b/builder/azure/arm/azure_client.go index 7e4f5324e..f8477d235 100644 --- a/builder/azure/arm/azure_client.go +++ b/builder/azure/arm/azure_client.go @@ -122,7 +122,7 @@ func byConcatDecorators(decorators ...autorest.RespondDecorator) autorest.Respon } func NewAzureClient(subscriptionID, resourceGroupName, storageAccountName string, - cloud *azure.Environment, tenantID string, isDeviceLogin bool, + cloud *azure.Environment, servicePrincipalToken, servicePrincipalTokenVault *adal.ServicePrincipalToken) (*AzureClient, error) { var azureClient = &AzureClient{} diff --git a/builder/azure/arm/builder.go b/builder/azure/arm/builder.go index 8f4b66aaa..4575ede15 100644 --- a/builder/azure/arm/builder.go +++ b/builder/azure/arm/builder.go @@ -83,8 +83,6 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe b.config.ResourceGroupName, b.config.StorageAccount, b.config.cloudEnvironment, - b.config.TenantID, - b.config.useDeviceLogin, spnCloud, spnKeyVault) if err != nil { diff --git a/builder/azure/common/devicelogin.go b/builder/azure/common/devicelogin.go index ea87767ca..ad4a2ce36 100644 --- a/builder/azure/common/devicelogin.go +++ b/builder/azure/common/devicelogin.go @@ -203,7 +203,7 @@ func validateToken(env string, token *adal.ServicePrincipalToken) error { err := token.EnsureFresh() if err != nil { - return fmt.Errorf("%s token validity check failed: %v", env,err) + return fmt.Errorf("%s token validity check failed: %v", env, err) } return nil } diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go index b6b4010b1..7e41f7fd9 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go @@ -67,7 +67,7 @@ var ( ResourceManagerEndpoint: "https://management.azure.com/", ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", GalleryEndpoint: "https://gallery.azure.com/", - KeyVaultEndpoint: "https://vault.azure.net", + KeyVaultEndpoint: "https://vault.azure.net/", GraphEndpoint: "https://graph.windows.net/", ServiceBusEndpoint: "https://servicebus.windows.net/", BatchManagementEndpoint: "https://batch.core.windows.net/", From 91eed4da524386d43009ba983c49593de39228f9 Mon Sep 17 00:00:00 2001 From: Hariharan Jayaraman Date: Fri, 18 May 2018 00:44:25 -0700 Subject: [PATCH 045/138] trim right of the keyvault url --- builder/azure/arm/builder.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/azure/arm/builder.go b/builder/azure/arm/builder.go index 4575ede15..6493fa54b 100644 --- a/builder/azure/arm/builder.go +++ b/builder/azure/arm/builder.go @@ -390,7 +390,7 @@ func (b *Builder) getServicePrincipalTokens(say func(string)) (*adal.ServicePrin if err != nil { return nil, nil, err } - servicePrincipalTokenVault, err = packerAzureCommon.Authenticate(*b.config.cloudEnvironment, b.config.TenantID, say, b.config.cloudEnvironment.KeyVaultEndpoint) + servicePrincipalTokenVault, err = packerAzureCommon.Authenticate(*b.config.cloudEnvironment, b.config.TenantID, say, strings.TrimRight(b.config.cloudEnvironment.KeyVaultEndpoint, "/")) if err != nil { return nil, nil, err } From de1783240fbae8c1c69a61abe33e6b3565e9c2bd Mon Sep 17 00:00:00 2001 From: Hariharan Jayaraman Date: Fri, 18 May 2018 00:53:44 -0700 Subject: [PATCH 046/138] Updates to remove space changes --- builder/azure/arm/builder.go | 1 + 1 file changed, 1 insertion(+) diff --git a/builder/azure/arm/builder.go b/builder/azure/arm/builder.go index 6493fa54b..f7876b463 100644 --- a/builder/azure/arm/builder.go +++ b/builder/azure/arm/builder.go @@ -85,6 +85,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe b.config.cloudEnvironment, spnCloud, spnKeyVault) + if err != nil { return nil, err } From 77fe1bffe4cfb48cd939f2219cc319e45b6e6948 Mon Sep 17 00:00:00 2001 From: Hariharan Jayaraman Date: Fri, 18 May 2018 01:25:19 -0700 Subject: [PATCH 047/138] Ensure that Device Login tests dont block general acceptance tests --- builder/azure/arm/builder_acc_test.go | 49 +++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/builder/azure/arm/builder_acc_test.go b/builder/azure/arm/builder_acc_test.go index 4f16ea229..df2cc8d9d 100644 --- a/builder/azure/arm/builder_acc_test.go +++ b/builder/azure/arm/builder_acc_test.go @@ -24,8 +24,12 @@ import ( "testing" builderT "github.com/hashicorp/packer/helper/builder/testing" + "os" + "fmt" ) +const DeviceLoginAcceptanceTest = "DEVICELOGIN_TEST" + func TestBuilderAcc_ManagedDisk_Windows(t *testing.T) { builderT.Test(t, builderT.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -35,6 +39,12 @@ func TestBuilderAcc_ManagedDisk_Windows(t *testing.T) { } func TestBuilderAcc_ManagedDisk_Windows_DeviceLogin(t *testing.T) { + if os.Getenv(DeviceLoginAcceptanceTest) == "" { + t.Skip(fmt.Sprintf( + "Device Login Acceptance tests skipped unless env '%s' set, as its requires manual step during execution", + DeviceLoginAcceptanceTest)) + return + } builderT.Test(t, builderT.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Builder: &Builder{}, @@ -50,6 +60,21 @@ func TestBuilderAcc_ManagedDisk_Linux(t *testing.T) { }) } +func TestBuilderAcc_ManagedDisk_Linux_DeviceLogin(t *testing.T) { + if os.Getenv(DeviceLoginAcceptanceTest) == "" { + t.Skip(fmt.Sprintf( + "Device Login Acceptance tests skipped unless env '%s' set, as its requires manual step during execution", + DeviceLoginAcceptanceTest)) + return + } + builderT.Test(t, builderT.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Builder: &Builder{}, + Template: testBuilderAccManagedDiskLinuxDeviceLogin, + }) +} + + func TestBuilderAcc_Blob_Windows(t *testing.T) { builderT.Test(t, builderT.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -160,6 +185,30 @@ const testBuilderAccManagedDiskLinux = ` }] } ` +const testBuilderAccManagedDiskLinuxDeviceLogin = ` +{ + "variables": { + "subscription_id": "{{env ` + "`ARM_SUBSCRIPTION_ID`" + `}}" + }, + "builders": [{ + "type": "test", + + "subscription_id": "{{user ` + "`subscription_id`" + `}}", + + "managed_image_resource_group_name": "packer-acceptance-test", + "managed_image_name": "testBuilderAccManagedDiskLinuxDeviceLogin-{{timestamp}}", + + "os_type": "Linux", + "image_publisher": "Canonical", + "image_offer": "UbuntuServer", + "image_sku": "16.04-LTS", + "async_resourcegroup_delete": "true", + + "location": "South Central US", + "vm_size": "Standard_DS2_v2" + }] +} +` const testBuilderAccBlobWindows = ` { From 7f2277676a20665f44297ed718a22f3f54e50cbf Mon Sep 17 00:00:00 2001 From: Hariharan Jayaraman Date: Fri, 18 May 2018 01:34:12 -0700 Subject: [PATCH 048/138] Ensure that Device Login tests dont block general acceptance tests --- builder/azure/arm/builder_acc_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/azure/arm/builder_acc_test.go b/builder/azure/arm/builder_acc_test.go index df2cc8d9d..63bfe82c0 100644 --- a/builder/azure/arm/builder_acc_test.go +++ b/builder/azure/arm/builder_acc_test.go @@ -12,7 +12,7 @@ package arm // The subscription in question should have a resource group // called "packer-acceptance-test" in "West US" region. The // storage account refered to in the above variable should -// be inside this resource group and in "West US" as well. +// be inside this resource group and in "South Central US" as well. // // In addition, the PACKER_ACC variable should also be set to // a non-empty value to enable Packer acceptance tests and the From 667113338a1eb98051d3a1575e51b4a62401774c Mon Sep 17 00:00:00 2001 From: Hariharan Jayaraman Date: Fri, 18 May 2018 01:41:00 -0700 Subject: [PATCH 049/138] missed formating --- builder/azure/arm/builder.go | 2 +- builder/azure/arm/builder_acc_test.go | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/builder/azure/arm/builder.go b/builder/azure/arm/builder.go index f7876b463..1c7badb8d 100644 --- a/builder/azure/arm/builder.go +++ b/builder/azure/arm/builder.go @@ -85,7 +85,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe b.config.cloudEnvironment, spnCloud, spnKeyVault) - + if err != nil { return nil, err } diff --git a/builder/azure/arm/builder_acc_test.go b/builder/azure/arm/builder_acc_test.go index 63bfe82c0..056ee6e0b 100644 --- a/builder/azure/arm/builder_acc_test.go +++ b/builder/azure/arm/builder_acc_test.go @@ -23,9 +23,9 @@ package arm import ( "testing" + "fmt" builderT "github.com/hashicorp/packer/helper/builder/testing" "os" - "fmt" ) const DeviceLoginAcceptanceTest = "DEVICELOGIN_TEST" @@ -74,7 +74,6 @@ func TestBuilderAcc_ManagedDisk_Linux_DeviceLogin(t *testing.T) { }) } - func TestBuilderAcc_Blob_Windows(t *testing.T) { builderT.Test(t, builderT.TestCase{ PreCheck: func() { testAccPreCheck(t) }, From 3ca4a7208fcc4db3f9b153a14758cc12be4abba0 Mon Sep 17 00:00:00 2001 From: Hariharan Jayaraman Date: Fri, 18 May 2018 08:12:43 -0700 Subject: [PATCH 050/138] Updated Samples and added a windows quick start as well --- examples/azure/windows.json | 4 +-- examples/azure/windows_quickstart.json | 36 ++++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 3 deletions(-) create mode 100644 examples/azure/windows_quickstart.json diff --git a/examples/azure/windows.json b/examples/azure/windows.json index b2e0e49fd..e0b03b7ae 100644 --- a/examples/azure/windows.json +++ b/examples/azure/windows.json @@ -2,8 +2,7 @@ "variables": { "client_id": "{{env `ARM_CLIENT_ID`}}", "client_secret": "{{env `ARM_CLIENT_SECRET`}}", - "subscription_id": "{{env `ARM_SUBSCRIPTION_ID`}}", - "object_id": "{{env `ARM_OBJECT_ID`}}" + "subscription_id": "{{env `ARM_SUBSCRIPTION_ID`}}" }, "builders": [{ "type": "azure-arm", @@ -11,7 +10,6 @@ "client_id": "{{user `client_id`}}", "client_secret": "{{user `client_secret`}}", "subscription_id": "{{user `subscription_id`}}", - "object_id": "{{user `object_id`}}", "managed_image_resource_group_name": "packertest", "managed_image_name": "MyWindowsOSImage", diff --git a/examples/azure/windows_quickstart.json b/examples/azure/windows_quickstart.json new file mode 100644 index 000000000..3f7a1e9bb --- /dev/null +++ b/examples/azure/windows_quickstart.json @@ -0,0 +1,36 @@ +{ + "variables": { + "subscription_id": "{{env `ARM_SUBSCRIPTION_ID`}}" + }, + "builders": [{ + "type": "azure-arm", + + "subscription_id": "{{user `subscription_id`}}", + + "managed_image_resource_group_name": "packertest", + "managed_image_name": "MyWindowsOSImage", + + "os_type": "Windows", + "image_publisher": "MicrosoftWindowsServer", + "image_offer": "WindowsServer", + "image_sku": "2012-R2-Datacenter", + + "communicator": "winrm", + "winrm_use_ssl": "true", + "winrm_insecure": "true", + "winrm_timeout": "3m", + "winrm_username": "packer", + + "location": "South Central US", + "vm_size": "Standard_DS2_v2" + }], + "provisioners": [{ + "type": "powershell", + "inline": [ + "if( Test-Path $Env:SystemRoot\\windows\\system32\\Sysprep\\unattend.xml ){ rm $Env:SystemRoot\\windows\\system32\\Sysprep\\unattend.xml -Force}", + "& $env:SystemRoot\\System32\\Sysprep\\Sysprep.exe /oobe /generalize /quiet /quit", + "while($true) { $imageState = Get-ItemProperty HKLM:\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Setup\\State | Select ImageState; if($imageState.ImageState -ne 'IMAGE_STATE_GENERALIZE_RESEAL_TO_OOBE') { Write-Output $imageState.ImageState; Start-Sleep -s 10 } else { break } }" + ] + }] +} + From ea9b2a8b5fdef856da8fefa4d2a591ba9928ed09 Mon Sep 17 00:00:00 2001 From: Hariharan Jayaraman Date: Fri, 18 May 2018 12:09:42 -0700 Subject: [PATCH 051/138] review feedback --- builder/azure/arm/builder.go | 26 +++++++++----- builder/azure/arm/builder_acc_test.go | 2 +- builder/azure/common/devicelogin.go | 50 +++------------------------ 3 files changed, 23 insertions(+), 55 deletions(-) diff --git a/builder/azure/arm/builder.go b/builder/azure/arm/builder.go index 1c7badb8d..7eb9e47f3 100644 --- a/builder/azure/arm/builder.go +++ b/builder/azure/arm/builder.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - packerAzureCommon "github.com/hashicorp/packer/builder/azure/common" "log" "os" "runtime" @@ -15,6 +14,7 @@ import ( "github.com/Azure/azure-sdk-for-go/storage" "github.com/Azure/go-autorest/autorest/adal" "github.com/dgrijalva/jwt-go" + packerAzureCommon "github.com/hashicorp/packer/builder/azure/common" "github.com/hashicorp/packer/builder/azure/common/constants" "github.com/hashicorp/packer/builder/azure/common/lin" packerCommon "github.com/hashicorp/packer/common" @@ -53,9 +53,6 @@ func (b *Builder) Prepare(raws ...interface{}) ([]string, error) { func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) { - claims := jwt.MapClaims{} - var p jwt.Parser - ui.Say("Running builder ...") ctx, cancel := context.WithCancel(context.Background()) @@ -95,6 +92,9 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe return nil, err } + claims := jwt.MapClaims{} + var p jwt.Parser + _, _, err = p.ParseUnverified(spnCloud.OAuthToken(), claims) if err != nil { @@ -103,8 +103,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe b.config.ObjectID = claims["oid"].(string) if b.config.ObjectID == "" && b.config.OSType != constants.Target_Linux { - ui.Error("\n Got empty Object ID in the OAuth token , we need this for Key vault Access, bailing") - return nil, err + return nil, fmt.Errorf("could not determined the ObjectID for the user, which is required for Windows builds") } if b.config.isManagedImage() { @@ -403,17 +402,26 @@ func (b *Builder) getServicePrincipalTokens(say func(string)) (*adal.ServicePrin if err != nil { return nil, nil, err } - servicePrincipalToken.EnsureFresh() servicePrincipalTokenVault, err = auth.getServicePrincipalTokenWithResource( strings.TrimRight(b.config.cloudEnvironment.KeyVaultEndpoint, "/")) - if err != nil { return nil, nil, err } - servicePrincipalTokenVault.EnsureFresh() } + err = servicePrincipalToken.EnsureFresh() + + if err != nil { + return nil, nil, err + } + + err = servicePrincipalTokenVault.EnsureFresh() + + if err != nil { + return nil, nil, err + } + return servicePrincipalToken, servicePrincipalTokenVault, nil } diff --git a/builder/azure/arm/builder_acc_test.go b/builder/azure/arm/builder_acc_test.go index 056ee6e0b..3b03025d4 100644 --- a/builder/azure/arm/builder_acc_test.go +++ b/builder/azure/arm/builder_acc_test.go @@ -10,7 +10,7 @@ package arm // * ARM_STORAGE_ACCOUNT // // The subscription in question should have a resource group -// called "packer-acceptance-test" in "West US" region. The +// called "packer-acceptance-test" in "South Central US" region. The // storage account refered to in the above variable should // be inside this resource group and in "South Central US" as well. // diff --git a/builder/azure/common/devicelogin.go b/builder/azure/common/devicelogin.go index ad4a2ce36..8d053f802 100644 --- a/builder/azure/common/devicelogin.go +++ b/builder/azure/common/devicelogin.go @@ -41,10 +41,9 @@ var ( // Authenticate fetches a token from the local file cache or initiates a consent // flow and waits for token to be obtained. -func Authenticate(env azure.Environment, tenantID string, say func(string), apiScope string) (*adal.ServicePrincipalToken, error) { +func Authenticate(env azure.Environment, tenantID string, say func(string), scope string) (*adal.ServicePrincipalToken, error) { clientID, ok := clientIDs[env.Name] var resourceid string - var endpoint string if !ok { return nil, fmt.Errorf("packer-azure application not set up for Azure environment %q", env.Name) @@ -57,14 +56,11 @@ func Authenticate(env azure.Environment, tenantID string, say func(string), apiS // for AzurePublicCloud (https://management.core.windows.net/), this old // Service Management scope covers both ASM and ARM. - //apiScope := env.ServiceManagementEndpoint - if strings.Contains(apiScope, "vault") { + if strings.Contains(scope, "vault") { resourceid = "vault" - endpoint = env.KeyVaultEndpoint } else { resourceid = "mgmt" - endpoint = env.ResourceManagerEndpoint } tokenPath := tokenCachePath(tenantID + resourceid) @@ -75,41 +71,18 @@ func Authenticate(env azure.Environment, tenantID string, say func(string), apiS } // Lookup the token cache file for an existing token. - spt, err := tokenFromFile(say, *oauthCfg, tokenPath, clientID, apiScope, saveTokenCallback) + spt, err := tokenFromFile(say, *oauthCfg, tokenPath, clientID, scope, saveTokenCallback) if err != nil { return nil, err } if spt != nil { say(fmt.Sprintf("Auth token found in file: %s", tokenPath)) - - // NOTE(ahmetalpbalkan): The token file we found may contain an - // expired access_token. In that case, the first call to Azure SDK will - // attempt to refresh the token using refresh_token, which might have - // expired[1], in that case we will get an error and we shall remove the - // token file and initiate token flow again so that the user would not - // need removing the token cache file manually. - // - // [1]: expiration date of refresh_token is not returned in AAD /token - // response, we just know it is 14 days. Therefore user’s token - // will go stale every 14 days and we will delete the token file, - // re-initiate the device flow. - say("Validating the token.") - if err = validateToken(endpoint, spt); err != nil { - say(fmt.Sprintf("Error: %v", err)) - say("Stored Azure credentials expired. Please reauthenticate.") - say(fmt.Sprintf("Deleting %s", tokenPath)) - if err := os.RemoveAll(tokenPath); err != nil { - return nil, fmt.Errorf("Error deleting stale token file: %v", err) - } - } else { - say("Token works.") - return spt, nil - } + return spt, nil } // Start an OAuth 2.0 device flow say(fmt.Sprintf("Initiating device flow: %s", tokenPath)) - spt, err = tokenFromDeviceFlow(say, *oauthCfg, clientID, apiScope) + spt, err = tokenFromDeviceFlow(say, *oauthCfg, clientID, scope) if err != nil { return nil, err } @@ -195,19 +168,6 @@ func mkTokenCallback(path string) adal.TokenRefreshCallback { } } -// validateToken makes a call to Azure SDK with given token, essentially making -// sure if the access_token valid, if not it uses SDK’s functionality to -// automatically refresh the token using refresh_token (which might have -// expired). This check is essentially to make sure refresh_token is good. -func validateToken(env string, token *adal.ServicePrincipalToken) error { - err := token.EnsureFresh() - - if err != nil { - return fmt.Errorf("%s token validity check failed: %v", env, err) - } - return nil -} - // FindTenantID figures out the AAD tenant ID of the subscription by making an // unauthenticated request to the Get Subscription Details endpoint and parses // the value from WWW-Authenticate header. From 00e809cb7e10944f2e2e13f292cfca57694217df Mon Sep 17 00:00:00 2001 From: Hariharan Jayaraman Date: Fri, 18 May 2018 15:21:49 -0700 Subject: [PATCH 052/138] Refactored the change into a new function --- builder/azure/arm/builder.go | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/builder/azure/arm/builder.go b/builder/azure/arm/builder.go index 7eb9e47f3..3aa7bcd5b 100644 --- a/builder/azure/arm/builder.go +++ b/builder/azure/arm/builder.go @@ -92,15 +92,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe return nil, err } - claims := jwt.MapClaims{} - var p jwt.Parser - - _, _, err = p.ParseUnverified(spnCloud.OAuthToken(), claims) - - if err != nil { - return nil, err - } - b.config.ObjectID = claims["oid"].(string) + b.config.ObjectID = getObjectIdFromToken(spnCloud) if b.config.ObjectID == "" && b.config.OSType != constants.Target_Linux { return nil, fmt.Errorf("could not determined the ObjectID for the user, which is required for Windows builds") @@ -425,3 +417,18 @@ func (b *Builder) getServicePrincipalTokens(say func(string)) (*adal.ServicePrin return servicePrincipalToken, servicePrincipalTokenVault, nil } + +func getObjectIdFromToken(token *adal.ServicePrincipalToken) (oid string) { + claims := jwt.MapClaims{} + var p jwt.Parser + + var err error + + _, _, err = p.ParseUnverified(token.OAuthToken(), claims) + + if err != nil { + return "" + } + return claims["oid"].(string) + +} From 4992429e8c83f7a6189ffcdff471a85a5cb503d0 Mon Sep 17 00:00:00 2001 From: Hariharan Jayaraman Date: Fri, 18 May 2018 17:34:01 -0700 Subject: [PATCH 053/138] Minor comment fixes --- builder/azure/arm/builder.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/azure/arm/builder.go b/builder/azure/arm/builder.go index 3aa7bcd5b..5da549a29 100644 --- a/builder/azure/arm/builder.go +++ b/builder/azure/arm/builder.go @@ -418,7 +418,7 @@ func (b *Builder) getServicePrincipalTokens(say func(string)) (*adal.ServicePrin return servicePrincipalToken, servicePrincipalTokenVault, nil } -func getObjectIdFromToken(token *adal.ServicePrincipalToken) (oid string) { +func getObjectIdFromToken(token *adal.ServicePrincipalToken) string { claims := jwt.MapClaims{} var p jwt.Parser From da67df6d03a8c7f15fbf49ccfe1e64d380f5e487 Mon Sep 17 00:00:00 2001 From: Hariharan Jayaraman Date: Fri, 18 May 2018 21:17:19 -0700 Subject: [PATCH 054/138] space fix --- builder/azure/arm/builder_acc_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/builder/azure/arm/builder_acc_test.go b/builder/azure/arm/builder_acc_test.go index 3b03025d4..0b1f85656 100644 --- a/builder/azure/arm/builder_acc_test.go +++ b/builder/azure/arm/builder_acc_test.go @@ -119,7 +119,7 @@ const testBuilderAccManagedDiskWindows = ` "winrm_insecure": "true", "winrm_timeout": "3m", "winrm_username": "packer", - "async_resourcegroup_delete": "true", + "async_resourcegroup_delete": "true", "location": "South Central US", "vm_size": "Standard_DS2_v2" @@ -201,7 +201,7 @@ const testBuilderAccManagedDiskLinuxDeviceLogin = ` "image_publisher": "Canonical", "image_offer": "UbuntuServer", "image_sku": "16.04-LTS", - "async_resourcegroup_delete": "true", + "async_resourcegroup_delete": "true", "location": "South Central US", "vm_size": "Standard_DS2_v2" From a54fcc9efe3840c81c8599da3612cea2a3dbbf89 Mon Sep 17 00:00:00 2001 From: Hariharan Jayaraman Date: Sat, 19 May 2018 13:16:57 -0700 Subject: [PATCH 055/138] missed doc fixes to remove referece for object_id, note keeping the command for now for how to get object ID for older releases --- website/source/docs/builders/azure-setup.html.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/website/source/docs/builders/azure-setup.html.md b/website/source/docs/builders/azure-setup.html.md index 22df6be63..07e292e24 100644 --- a/website/source/docs/builders/azure-setup.html.md +++ b/website/source/docs/builders/azure-setup.html.md @@ -17,8 +17,6 @@ In order to build VMs in Azure Packer needs 6 configuration options to be specif - `client_secret` - service principal secret / password -- `object_id` - service principal object id (OSType = Windows Only) - - `resource_group_name` - name of the resource group where your VHD(s) will be stored - `storage_account` - name of the storage account where your VHD(s) will be stored From 8a3e599cad771a59b39cb5306c982e5efebb4a71 Mon Sep 17 00:00:00 2001 From: Hariharan Jayaraman Date: Mon, 21 May 2018 11:05:59 -0700 Subject: [PATCH 056/138] Added text to point out two device auth --- builder/azure/arm/builder.go | 2 ++ website/source/docs/builders/azure-setup.html.md | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/builder/azure/arm/builder.go b/builder/azure/arm/builder.go index 5da549a29..eecef1b19 100644 --- a/builder/azure/arm/builder.go +++ b/builder/azure/arm/builder.go @@ -378,10 +378,12 @@ func (b *Builder) getServicePrincipalTokens(say func(string)) (*adal.ServicePrin var err error if b.config.useDeviceLogin { + say("Getting auth token for Service management endpoint") servicePrincipalToken, err = packerAzureCommon.Authenticate(*b.config.cloudEnvironment, b.config.TenantID, say, b.config.cloudEnvironment.ServiceManagementEndpoint) if err != nil { return nil, nil, err } + say("Getting token for Vault resource") servicePrincipalTokenVault, err = packerAzureCommon.Authenticate(*b.config.cloudEnvironment, b.config.TenantID, say, strings.TrimRight(b.config.cloudEnvironment.KeyVaultEndpoint, "/")) if err != nil { return nil, nil, err diff --git a/website/source/docs/builders/azure-setup.html.md b/website/source/docs/builders/azure-setup.html.md index 07e292e24..fe57033c4 100644 --- a/website/source/docs/builders/azure-setup.html.md +++ b/website/source/docs/builders/azure-setup.html.md @@ -46,7 +46,8 @@ There are three pieces of information you must provide to enable device login mo The device login flow asks that you open a web browser, navigate to , and input the supplied code. This authorizes the Packer for Azure application to act on your behalf. An OAuth token will be created, and stored in the user's home directory (~/.azure/packer/oauth-TenantID.json). This token is used if the token file exists, and it -is refreshed as necessary. The token file prevents the need to continually execute the device login flow. +is refreshed as necessary. The token file prevents the need to continually execute the device login flow. Packer will ask +for two device login auth, one for service management endpoint and another for accessing temp keyvault secrets that it creates. ## Install the Azure CLI From 1fdf763d0f5ddd6e2541a0b825dfce1ed13b61c2 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Mon, 21 May 2018 11:25:51 -0700 Subject: [PATCH 057/138] fancier logging --- common/shell-local/communicator.go | 2 +- common/shell-local/run.go | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/common/shell-local/communicator.go b/common/shell-local/communicator.go index b51d309d9..4055c96b5 100644 --- a/common/shell-local/communicator.go +++ b/common/shell-local/communicator.go @@ -21,7 +21,7 @@ func (c *Communicator) Start(cmd *packer.RemoteCmd) error { } // Build the local command to execute - log.Printf("Executing local shell command %s", c.ExecuteCommand) + log.Printf("[INFO] (shell-local communicator): Executing local shell command %s", c.ExecuteCommand) localCmd := exec.Command(c.ExecuteCommand[0], c.ExecuteCommand[1:]...) localCmd.Stdin = cmd.Stdin localCmd.Stdout = cmd.Stdout diff --git a/common/shell-local/run.go b/common/shell-local/run.go index 6af406522..0457536fa 100644 --- a/common/shell-local/run.go +++ b/common/shell-local/run.go @@ -62,7 +62,7 @@ func Run(ui packer.Ui, config *Config) (bool, error) { // buffers and for reading the final exit status. flattenedCmd := strings.Join(interpolatedCmds, " ") cmd := &packer.RemoteCmd{Command: flattenedCmd} - log.Printf("starting local command: %s", flattenedCmd) + log.Printf("[INFO] (shell-local): starting local command: %s", flattenedCmd) if err := cmd.StartWithUi(comm, ui); err != nil { return false, fmt.Errorf( @@ -92,7 +92,7 @@ func createInlineScriptFile(config *Config) (string, error) { writer := bufio.NewWriter(tf) if config.InlineShebang != "" { shebang := fmt.Sprintf("#!%s\n", config.InlineShebang) - log.Printf("Prepending inline script with %s", shebang) + log.Printf("[INFO] (shell-local): Prepending inline script with %s", shebang) writer.WriteString(shebang) } for _, command := range config.Inline { @@ -108,7 +108,7 @@ func createInlineScriptFile(config *Config) (string, error) { tf.Close() err = os.Chmod(tf.Name(), 0555) if err != nil { - log.Printf("error modifying permissions of temp script file: %s", err.Error()) + log.Printf("[ERROR] (shell-local): error modifying permissions of temp script file: %s", err.Error()) } return tf.Name(), nil } From 969201a2d4e60e3d26e682355417c12e0484aa9f Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Mon, 21 May 2018 14:56:44 -0700 Subject: [PATCH 058/138] handle minor shell-local PR suggestions and corrections --- common/shell-local/config.go | 12 ++++++------ common/shell-local/run.go | 8 ++++---- .../source/docs/post-processors/shell-local.html.md | 2 +- website/source/docs/provisioners/shell-local.html.md | 2 +- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/common/shell-local/config.go b/common/shell-local/config.go index 846e4b4a4..9eb657ff9 100644 --- a/common/shell-local/config.go +++ b/common/shell-local/config.go @@ -153,7 +153,11 @@ func Validate(config *Config) error { } if config.UseLinuxPathing { for index, script := range config.Scripts { - converted, err := ConvertToLinuxPath(script) + scriptAbsPath, err := filepath.Abs(script) + if err != nil { + return fmt.Errorf("Error converting %s to absolute path: %s", script, err.Error()) + } + converted, err := ConvertToLinuxPath(scriptAbsPath) if err != nil { return err } @@ -202,12 +206,8 @@ func Validate(config *Config) error { } // C:/path/to/your/file becomes /mnt/c/path/to/your/file -func ConvertToLinuxPath(winPath string) (string, error) { +func ConvertToLinuxPath(winAbsPath string) (string, error) { // get absolute path of script, and morph it into the bash path - winAbsPath, err := filepath.Abs(winPath) - if err != nil { - return "", fmt.Errorf("Error converting %s to absolute path: %s", winPath, err.Error()) - } winAbsPath = strings.Replace(winAbsPath, "\\", "/", -1) splitPath := strings.SplitN(winAbsPath, ":/", 2) winBashPath := fmt.Sprintf("/mnt/%s/%s", strings.ToLower(splitPath[0]), splitPath[1]) diff --git a/common/shell-local/run.go b/common/shell-local/run.go index 0457536fa..7ab93e346 100644 --- a/common/shell-local/run.go +++ b/common/shell-local/run.go @@ -32,11 +32,12 @@ func Run(ui packer.Ui, config *Config) (bool, error) { } scripts = append(scripts, tempScriptFileName) - defer os.Remove(tempScriptFileName) // figure out what extension the file should have, and rename it. if config.TempfileExtension != "" { os.Rename(tempScriptFileName, fmt.Sprintf("%s.%s", tempScriptFileName, config.TempfileExtension)) + tempScriptFileName = fmt.Sprintf("%s.%s", tempScriptFileName, config.TempfileExtension) } + defer os.Remove(tempScriptFileName) } // Create environment variables to set before executing the command @@ -83,7 +84,7 @@ func Run(ui packer.Ui, config *Config) (bool, error) { } func createInlineScriptFile(config *Config) (string, error) { - tf, err := ioutil.TempFile(os.TempDir(), "packer-shell") + tf, err := ioutil.TempFile("", "packer-shell") if err != nil { return "", fmt.Errorf("Error preparing shell script: %s", err) } @@ -105,8 +106,7 @@ func createInlineScriptFile(config *Config) (string, error) { return "", fmt.Errorf("Error preparing shell script: %s", err) } - tf.Close() - err = os.Chmod(tf.Name(), 0555) + err = os.Chmod(tf.Name(), 0700) if err != nil { log.Printf("[ERROR] (shell-local): error modifying permissions of temp script file: %s", err.Error()) } diff --git a/website/source/docs/post-processors/shell-local.html.md b/website/source/docs/post-processors/shell-local.html.md index 6812fac2b..ac8056407 100644 --- a/website/source/docs/post-processors/shell-local.html.md +++ b/website/source/docs/post-processors/shell-local.html.md @@ -73,7 +73,7 @@ Optional parameters: choose to try to use shell-local for Powershell or other Windows commands, the environment variables will not be set properly for your environment. - For backwards compatibility, `execute_command` will accept a string insetad + For backwards compatibility, `execute_command` will accept a string instead of an array of strings. If a single string or an array of strings with only one element is provided, Packer will replicate past behavior by appending your `execute_command` to the array of strings `["sh", "-c"]`. For example, diff --git a/website/source/docs/provisioners/shell-local.html.md b/website/source/docs/provisioners/shell-local.html.md index cadb1d6a1..a7400c589 100644 --- a/website/source/docs/provisioners/shell-local.html.md +++ b/website/source/docs/provisioners/shell-local.html.md @@ -89,7 +89,7 @@ Optional parameters: these commands are not officially supported and things like environment variables may not work if you use a different shell than the default. - For backwards compatability, you may also use {{.Command}}, but it is + For backwards compatibility, you may also use {{.Command}}, but it is decoded the same way as {{.Script}}. We recommend using {{.Script}} for the sake of clarity, as even when you set only a single `command` to run, Packer writes it to a temporary file and then runs it as a script. From d1e31c0f2360f8460aae16ed934ad7a35addb3db Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Mon, 21 May 2018 15:19:27 -0700 Subject: [PATCH 059/138] use if/else to clarify code --- common/shell-local/run.go | 10 ++++---- .../Azure/azure-sdk-for-go/storage/README.md | 1 + vendor/vendor.json | 24 +++++++++---------- 3 files changed, 18 insertions(+), 17 deletions(-) diff --git a/common/shell-local/run.go b/common/shell-local/run.go index 7ab93e346..b65196ea9 100644 --- a/common/shell-local/run.go +++ b/common/shell-local/run.go @@ -21,11 +21,11 @@ type ExecuteCommandTemplate struct { func Run(ui packer.Ui, config *Config) (bool, error) { scripts := make([]string, len(config.Scripts)) - copy(scripts, config.Scripts) - - // If we have an inline script, then turn that into a temporary - // shell script and use that. - if config.Inline != nil { + if len(config.Scripts) > 0 { + copy(scripts, config.Scripts) + } else if config.Inline != nil { + // If we have an inline script, then turn that into a temporary + // shell script and use that. tempScriptFileName, err := createInlineScriptFile(config) if err != nil { return false, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md b/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md index ed90cf8bc..49e48cdf1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md @@ -15,3 +15,4 @@ at [github.com/Azure/azure-sdk-for-go/services/storage](https://github.com/Azure This package also supports the [Azure Storage Emulator](https://azure.microsoft.com/documentation/articles/storage-use-emulator/) (Windows only). + diff --git a/vendor/vendor.json b/vendor/vendor.json index a979c6b6f..1f08d096b 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -9,7 +9,7 @@ "revisionTime": "2016-08-11T22:04:02Z" }, { - "checksumSHA1": "XZVCJXyy79hy5KBOI6flZ6iHnHY=", + "checksumSHA1": "cJxhrzJRtddboU3S0TPyvEPBqsc=", "path": "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute", "revision": "56332fec5b308fbb6615fa1af6117394cdba186d", "revisionTime": "2018-03-26T23:29:47Z", @@ -17,7 +17,7 @@ "versionExact": "v15.0.0" }, { - "checksumSHA1": "738URn/O+S8TN9psssjK7cteZXA=", + "checksumSHA1": "VDwUBYd9RVKy09Y17al0EQ7ivYI=", "path": "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-01-01/network", "revision": "56332fec5b308fbb6615fa1af6117394cdba186d", "revisionTime": "2018-03-26T23:29:47Z", @@ -25,7 +25,7 @@ "versionExact": "v15.0.0" }, { - "checksumSHA1": "KDrlouaRfBHk+qH/yljC0JnsV4Y=", + "checksumSHA1": "woz67BK+/NdoZm4GzVYnJwzl61A=", "path": "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-06-01/subscriptions", "revision": "56332fec5b308fbb6615fa1af6117394cdba186d", "revisionTime": "2018-03-26T23:29:47Z", @@ -33,7 +33,7 @@ "versionExact": "v15.0.0" }, { - "checksumSHA1": "BMd5SfQ0KfqEUvi9zAt+QAB/JPQ=", + "checksumSHA1": "1W8UIxg6Rycuzg41FQFu35vkCEU=", "path": "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-02-01/resources", "revision": "56332fec5b308fbb6615fa1af6117394cdba186d", "revisionTime": "2018-03-26T23:29:47Z", @@ -41,7 +41,7 @@ "versionExact": "v15.0.0" }, { - "checksumSHA1": "qHMzicMTsihjgKyS/VB8oguXmmc=", + "checksumSHA1": "g9eP5AgV9yXRkY36M8h7aDW9oi8=", "path": "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage", "revision": "56332fec5b308fbb6615fa1af6117394cdba186d", "revisionTime": "2018-03-26T23:29:47Z", @@ -49,7 +49,7 @@ "versionExact": "v15.0.0" }, { - "checksumSHA1": "s/831Hsxh0h6PCHCoMOiOdh1Hwg=", + "checksumSHA1": "3N5Et8QnWsHJYN+v/0J/VSQUkJ0=", "path": "github.com/Azure/azure-sdk-for-go/storage", "revision": "56332fec5b308fbb6615fa1af6117394cdba186d", "revisionTime": "2018-03-26T23:29:47Z", @@ -57,13 +57,13 @@ "versionExact": "v15.0.0" }, { - "checksumSHA1": "kbpNrLhdZinIK0H1vsJh7eSB2JM=", + "checksumSHA1": "Fb2OanEbwZVaGHYLf9Y4FAajsOM=", "path": "github.com/Azure/azure-sdk-for-go/version", "revision": "56332fec5b308fbb6615fa1af6117394cdba186d", "revisionTime": "2018-03-26T23:29:47Z" }, { - "checksumSHA1": "LaWzRZq1p8T0iqZTD4+QL7qlJPg=", + "checksumSHA1": "+P6HOINDh/n2z4GqEkluzuGP5p0=", "comment": "v7.0.7", "path": "github.com/Azure/go-autorest/autorest", "revision": "ed4b7f5bf1ec0c9ede1fda2681d96771282f2862", @@ -72,7 +72,7 @@ "versionExact": "v10.4.0" }, { - "checksumSHA1": "HzA52MbMWnsR31CFrub5biN90/Q=", + "checksumSHA1": "4Z3yO++uYspufDkuaIydTpT787c=", "path": "github.com/Azure/go-autorest/autorest/adal", "revision": "ed4b7f5bf1ec0c9ede1fda2681d96771282f2862", "revisionTime": "2018-03-26T17:06:54Z", @@ -80,7 +80,7 @@ "versionExact": "v10.4.0" }, { - "checksumSHA1": "5698vgeScEFD2bOOCssAfMFP4Mg=", + "checksumSHA1": "bDFbLGwpCT8TRmqEKtPY/U1DAY8=", "comment": "v7.0.7", "path": "github.com/Azure/go-autorest/autorest/azure", "revision": "ed4b7f5bf1ec0c9ede1fda2681d96771282f2862", @@ -107,7 +107,7 @@ "versionExact": "v8.0.0" }, { - "checksumSHA1": "CdDkG+J8wqXQVQ0f0xal+eolB1w=", + "checksumSHA1": "5UH4IFIB/98iowPCzzVs4M4MXiQ=", "path": "github.com/Azure/go-autorest/autorest/validation", "revision": "ed4b7f5bf1ec0c9ede1fda2681d96771282f2862", "revisionTime": "2018-03-26T17:06:54Z", @@ -965,7 +965,7 @@ "revision": "2788f0dbd16903de03cb8186e5c7d97b69ad387b" }, { - "checksumSHA1": "9Ok54so+GJLC4rMpb7XqZzlfieI=", + "checksumSHA1": "T9E+5mKBQ/BX4wlNxgaPfetxdeI=", "path": "github.com/marstr/guid", "revision": "8bdf7d1a087ccc975cf37dd6507da50698fd19ca", "revisionTime": "2017-04-27T23:51:15Z" From 7e9a653da7d565a51ee11e20c83e7b6f016fbd7c Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Mon, 21 May 2018 15:26:57 -0700 Subject: [PATCH 060/138] use testify instead of homegrown string compare --- .../shell-local/post-processor_test.go | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/post-processor/shell-local/post-processor_test.go b/post-processor/shell-local/post-processor_test.go index ee7e27d70..515704f9d 100644 --- a/post-processor/shell-local/post-processor_test.go +++ b/post-processor/shell-local/post-processor_test.go @@ -4,10 +4,10 @@ import ( "io/ioutil" "os" "runtime" - "strings" "testing" "github.com/hashicorp/packer/packer" + "github.com/stretchr/testify/assert" ) func TestPostProcessor_ImplementsPostProcessor(t *testing.T) { @@ -116,9 +116,8 @@ func TestPostProcessorPrepare_ExecuteCommand(t *testing.T) { if err != nil { t.Fatalf("should handle backwards compatibility: %s", err) } - if strings.Compare(strings.Join(p.config.ExecuteCommand, " "), strings.Join(expected, " ")) != 0 { - t.Fatalf("Did not get expected execute_command: expected: %#v; received %#v", expected, p.config.ExecuteCommand) - } + assert.Equal(t, p.config.ExecuteCommand, expected, + "Did not get expected execute_command: expected: %#v; received %#v", expected, p.config.ExecuteCommand) // Check that passing a list will work p = new(PostProcessor) @@ -129,9 +128,8 @@ func TestPostProcessorPrepare_ExecuteCommand(t *testing.T) { t.Fatalf("should handle backwards compatibility: %s", err) } expected = []string{"foo", "bar"} - if strings.Compare(strings.Join(p.config.ExecuteCommand, " "), strings.Join(expected, " ")) != 0 { - t.Fatalf("Did not get expected execute_command: expected: %#v; received %#v", expected, p.config.ExecuteCommand) - } + assert.Equal(t, p.config.ExecuteCommand, expected, + "Did not get expected execute_command: expected: %#v; received %#v", expected, p.config.ExecuteCommand) // Check that default is as expected raws = testConfig() @@ -139,13 +137,12 @@ func TestPostProcessorPrepare_ExecuteCommand(t *testing.T) { p = new(PostProcessor) p.Configure(raws) if runtime.GOOS != "windows" { - expected = []string{"/bin/sh", "-c", "{{.Vars}}", "{{.Script}}"} + expected = []string{"/bin/sh", "-c", "{{.Vars}} {{.Script}}"} } else { expected = []string{"cmd", "/V", "/C", "{{.Vars}}", "call", "{{.Script}}"} } - if strings.Compare(strings.Join(p.config.ExecuteCommand, " "), strings.Join(expected, " ")) != 0 { - t.Fatalf("Did not get expected default: expected: %#v; received %#v", expected, p.config.ExecuteCommand) - } + assert.Equal(t, p.config.ExecuteCommand, expected, + "Did not get expected default: expected: %#v; received %#v", expected, p.config.ExecuteCommand) } func TestPostProcessorPrepare_ScriptAndInline(t *testing.T) { From 1bd7aa534e55aa21d1cd79d0e5b8533b15b8b29a Mon Sep 17 00:00:00 2001 From: Hariharan Jayaraman Date: Mon, 21 May 2018 21:38:41 -0700 Subject: [PATCH 061/138] Addressed PR feedback --- builder/azure/arm/builder.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/builder/azure/arm/builder.go b/builder/azure/arm/builder.go index eecef1b19..bb67e5622 100644 --- a/builder/azure/arm/builder.go +++ b/builder/azure/arm/builder.go @@ -91,11 +91,14 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe if err := resolver.Resolve(b.config); err != nil { return nil, err } - - b.config.ObjectID = getObjectIdFromToken(spnCloud) + if b.config.ObjectID == "" { + b.config.ObjectID = getObjectIdFromToken(spnCloud) + } else { + ui.Message("You have provided Object_ID which is no longer needed, azure packer builder determines this dynamically from the authentication token") + } if b.config.ObjectID == "" && b.config.OSType != constants.Target_Linux { - return nil, fmt.Errorf("could not determined the ObjectID for the user, which is required for Windows builds") + return nil, fmt.Errorf("could not determine the ObjectID for the user, which is required for Windows builds") } if b.config.isManagedImage() { From a13a2511f986bddd4bdefc04e3a7555bf6faf831 Mon Sep 17 00:00:00 2001 From: Hariharan Jayaraman Date: Mon, 21 May 2018 22:20:36 -0700 Subject: [PATCH 062/138] Added additional error message if we failed to parse token --- builder/azure/arm/builder.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/builder/azure/arm/builder.go b/builder/azure/arm/builder.go index bb67e5622..23d3d2181 100644 --- a/builder/azure/arm/builder.go +++ b/builder/azure/arm/builder.go @@ -92,7 +92,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe return nil, err } if b.config.ObjectID == "" { - b.config.ObjectID = getObjectIdFromToken(spnCloud) + b.config.ObjectID = getObjectIdFromToken(ui, spnCloud) } else { ui.Message("You have provided Object_ID which is no longer needed, azure packer builder determines this dynamically from the authentication token") } @@ -423,7 +423,7 @@ func (b *Builder) getServicePrincipalTokens(say func(string)) (*adal.ServicePrin return servicePrincipalToken, servicePrincipalTokenVault, nil } -func getObjectIdFromToken(token *adal.ServicePrincipalToken) string { +func getObjectIdFromToken(ui packer.Ui, token *adal.ServicePrincipalToken) string { claims := jwt.MapClaims{} var p jwt.Parser @@ -432,6 +432,7 @@ func getObjectIdFromToken(token *adal.ServicePrincipalToken) string { _, _, err = p.ParseUnverified(token.OAuthToken(), claims) if err != nil { + ui.Error(fmt.Sprintf("Failed to parse the token,Error: %s", err.Error())) return "" } return claims["oid"].(string) From 97652b62e538a30d5a190948316521f6874cbcfe Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Tue, 22 May 2018 11:28:00 -0700 Subject: [PATCH 063/138] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 57cd1230d..8fde73f0c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ ### IMPROVEMENTS: * builder/azure: Updated Azure SDK to v15.0.0 [GH-6224] +* builder/azure: Devicelogin Support for Windows [GH-6285] ## 1.2.3 (April 25, 2018) From e670eed315a628ed089db3f42c822e11e0bea698 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Wed, 25 Apr 2018 11:58:04 -0700 Subject: [PATCH 064/138] Add new option, nvme_device_path, so that we can properly mount nvme block devices. --- builder/amazon/chroot/builder.go | 1 + builder/amazon/chroot/step_mount_device.go | 7 ++++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/builder/amazon/chroot/builder.go b/builder/amazon/chroot/builder.go index 02923ce31..a8b20276a 100644 --- a/builder/amazon/chroot/builder.go +++ b/builder/amazon/chroot/builder.go @@ -33,6 +33,7 @@ type Config struct { CommandWrapper string `mapstructure:"command_wrapper"` CopyFiles []string `mapstructure:"copy_files"` DevicePath string `mapstructure:"device_path"` + NVMEDevicePath string `mapstructure:"nvme_device_path"` FromScratch bool `mapstructure:"from_scratch"` MountOptions []string `mapstructure:"mount_options"` MountPartition string `mapstructure:"mount_partition"` diff --git a/builder/amazon/chroot/step_mount_device.go b/builder/amazon/chroot/step_mount_device.go index c05ae2e77..38ec62164 100644 --- a/builder/amazon/chroot/step_mount_device.go +++ b/builder/amazon/chroot/step_mount_device.go @@ -35,6 +35,10 @@ func (s *StepMountDevice) Run(_ context.Context, state multistep.StateBag) multi config := state.Get("config").(*Config) ui := state.Get("ui").(packer.Ui) device := state.Get("device").(string) + if config.NVMEDevicePath != "" { + // customizable device path for mounting NVME block devices on c5 and m5 HVM + device = config.NVMEDevicePath + } wrappedCommand := state.Get("wrappedCommand").(CommandWrapper) var virtualizationType string @@ -47,6 +51,7 @@ func (s *StepMountDevice) Run(_ context.Context, state multistep.StateBag) multi } ctx := config.ctx + ctx.Data = &mountPathData{Device: filepath.Base(device)} mountPath, err := interpolate.Render(config.MountPath, &ctx) @@ -98,7 +103,7 @@ func (s *StepMountDevice) Run(_ context.Context, state multistep.StateBag) multi ui.Error(err.Error()) return multistep.ActionHalt } - + log.Printf("[DEBUG] (step mount) mount command is %s", mountCommand) cmd := ShellCommand(mountCommand) cmd.Stderr = stderr if err := cmd.Run(); err != nil { From b5095539a7e85b9ebc4f0cf16f0654fc8690e954 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Wed, 23 May 2018 09:58:15 -0700 Subject: [PATCH 065/138] add docs --- .../docs/builders/amazon-chroot.html.md | 58 +++++++++++++++++++ 1 file changed, 58 insertions(+) diff --git a/website/source/docs/builders/amazon-chroot.html.md b/website/source/docs/builders/amazon-chroot.html.md index da1752144..ce03970ee 100644 --- a/website/source/docs/builders/amazon-chroot.html.md +++ b/website/source/docs/builders/amazon-chroot.html.md @@ -225,6 +225,15 @@ each category, the available configuration keys are alphabetized. command](http://linuxcommand.org/man_pages/mount8.html) for valid file system specific options +- `nvme_device_path` (string) - When we call the mount command (by default + `mount -o device dir`), the string provided in `nvme_mount_path` will + replace `device` in that command. When this option is not set, `device` in + that command will be something like `/dev/sdf1`, mirroring the attached + device name. This assumption works for most instances but will fail with c5 + and m5 instances. In order to use the chroot builder with c5 and m5 + instances, you must manually set `nvme_device_path`, `device_path`, and + `mount_path`. + - `pre_mount_commands` (array of strings) - A series of commands to execute after attaching the root volume and before mounting the chroot. This is not required unless using `from_scratch`. If so, this should include any @@ -370,6 +379,7 @@ its internals such as finding an available device. ## Gotchas +### Unmounting the Filesystem One of the difficulties with using the chroot builder is that your provisioning scripts must not leave any processes running or packer will be unable to unmount the filesystem. @@ -399,6 +409,54 @@ services: } ``` +### Using Instances with NVMe block devices. +In C5, C5d, M5, and i3.metal instances, EBS volumes are exposed as NVMe block +devices [reference](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nvme-ebs-volumes.html). +In order to correctly mount these devices, you have to do some extra legwork, +involving the `nvme_device_path` option above. Read that for more information. + +A working example for mounting an NVMe device is below: + +``` +{ + "variables": { + "region" : "us-east-2" + }, + "builders": [ + { + "type": "amazon-chroot", + "region": "{{user `region`}}", + "source_ami_filter": { + "filters": { + "virtualization-type": "hvm", + "name": "amzn-ami-hvm-*", + "root-device-type": "ebs" + }, + "owners": ["137112412989"], + "most_recent": true + }, + "ena_support": true, + "ami_name": "amazon-chroot-test-{{timestamp}}", + "mount_path": "/mnt/my/mount/path/", + "nvme_device_path": "/dev/nvme1n1p", + "device_path": "/dev/sdf" + } + ], + + "provisioners": [ + { + "type": "shell", + "inline": ["echo Test > /tmp/test.txt"] + } + ] +} +``` + +Note that in the `nvme_device_path` you must end with the `p`; if you try to +define the partition in this path (e.g. "nvme_device_path": `/dev/nvme1n1p1`) +and haven't also set the `"mount_partition": 0`, a `1` will be appended to the +`nvme_device_path` and Packer will fail. + ## Building From Scratch This example demonstrates the essentials of building an image from scratch. A From 974e464f356b0446f59878b476db47a78a704063 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Wed, 23 May 2018 13:34:56 -0700 Subject: [PATCH 066/138] fix docs becuase we dont need to actually set the mount path --- website/source/docs/builders/amazon-chroot.html.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/website/source/docs/builders/amazon-chroot.html.md b/website/source/docs/builders/amazon-chroot.html.md index ce03970ee..9c16fc320 100644 --- a/website/source/docs/builders/amazon-chroot.html.md +++ b/website/source/docs/builders/amazon-chroot.html.md @@ -231,8 +231,7 @@ each category, the available configuration keys are alphabetized. that command will be something like `/dev/sdf1`, mirroring the attached device name. This assumption works for most instances but will fail with c5 and m5 instances. In order to use the chroot builder with c5 and m5 - instances, you must manually set `nvme_device_path`, `device_path`, and - `mount_path`. + instances, you must manually set `nvme_device_path` and `device_path`. - `pre_mount_commands` (array of strings) - A series of commands to execute after attaching the root volume and before mounting the chroot. This is not @@ -437,7 +436,6 @@ A working example for mounting an NVMe device is below: }, "ena_support": true, "ami_name": "amazon-chroot-test-{{timestamp}}", - "mount_path": "/mnt/my/mount/path/", "nvme_device_path": "/dev/nvme1n1p", "device_path": "/dev/sdf" } From 0a8f7f28a0de31556ede83894ff1b11c672f9b63 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Wed, 23 May 2018 15:24:18 -0700 Subject: [PATCH 067/138] add note about key interval to virtualbox docs --- website/source/docs/builders/virtualbox-iso.html.md.erb | 5 ++++- website/source/docs/builders/virtualbox-ovf.html.md.erb | 5 ++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/website/source/docs/builders/virtualbox-iso.html.md.erb b/website/source/docs/builders/virtualbox-iso.html.md.erb index 78de79af8..788f5e709 100644 --- a/website/source/docs/builders/virtualbox-iso.html.md.erb +++ b/website/source/docs/builders/virtualbox-iso.html.md.erb @@ -335,7 +335,10 @@ all typed in sequence. It is an array only to improve readability within the template. The boot command is sent to the VM through the `VBoxManage` utility in as few -invocations as possible. +invocations as possible. We send each character in groups of 25, with a default +delay of 100ms between groups. The delay alleviates issues with latency and CPU +contention. If you notice missing keys, you can tune this delay by specifying e.g. +`PACKER_KEY_INTERVAL=500ms` to wait longer between each group of characters. <%= partial "partials/builders/boot-command" %> diff --git a/website/source/docs/builders/virtualbox-ovf.html.md.erb b/website/source/docs/builders/virtualbox-ovf.html.md.erb index 89edcd89f..ae4317240 100644 --- a/website/source/docs/builders/virtualbox-ovf.html.md.erb +++ b/website/source/docs/builders/virtualbox-ovf.html.md.erb @@ -298,7 +298,10 @@ all typed in sequence. It is an array only to improve readability within the template. The boot command is sent to the VM through the `VBoxManage` utility in as few -invocations as possible. +invocations as possible. We send each character in groups of 25, with a default +delay of 100ms between groups. The delay alleviates issues with latency and CPU +contention. If you notice missing keys, you can tune this delay by specifying e.g. +`PACKER_KEY_INTERVAL=500ms` to wait longer between each group of characters. <%= partial "partials/builders/boot-command" %> From 846f94c96487cf2a3fac245b4a4f5bda7c35e331 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Tue, 8 May 2018 10:35:09 -0700 Subject: [PATCH 068/138] implement template variable for accessing WinRM password in either environemnt variables or directly in execute_command or inline commands. --- common/shell-local/config.go | 5 ++ common/shell-local/config_test.go | 16 ++++++ common/shell-local/run.go | 56 ++++++++++++++++--- .../docs/provisioners/shell-local.html.md | 25 ++++++--- 4 files changed, 85 insertions(+), 17 deletions(-) create mode 100644 common/shell-local/config_test.go diff --git a/common/shell-local/config.go b/common/shell-local/config.go index 9eb657ff9..82a41a192 100644 --- a/common/shell-local/config.go +++ b/common/shell-local/config.go @@ -56,6 +56,11 @@ type Config struct { } func Decode(config *Config, raws ...interface{}) error { + //Create passthrough for winrm password so we can fill it in once we know it + config.Ctx.Data = &EnvVarsTemplate{ + WinRMPassword: `{{.WinRMPassword}}`, + } + err := configHelper.Decode(&config, &configHelper.DecodeOpts{ Interpolate: true, InterpolateContext: &config.Ctx, diff --git a/common/shell-local/config_test.go b/common/shell-local/config_test.go new file mode 100644 index 000000000..7c74581ae --- /dev/null +++ b/common/shell-local/config_test.go @@ -0,0 +1,16 @@ +package shell_local + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestConvertToLinuxPath(t *testing.T) { + winPath := "C:/path/to/your/file" + winBashPath := "/mnt/c/path/to/your/file" + converted, _ := ConvertToLinuxPath(winPath) + assert.Equal(t, winBashPath, converted, + "Should have converted %s to %s -- not %s", winPath, winBashPath, converted) + +} diff --git a/common/shell-local/run.go b/common/shell-local/run.go index b65196ea9..705a5ff90 100644 --- a/common/shell-local/run.go +++ b/common/shell-local/run.go @@ -9,14 +9,20 @@ import ( "sort" "strings" + commonhelper "github.com/hashicorp/packer/helper/common" "github.com/hashicorp/packer/packer" "github.com/hashicorp/packer/template/interpolate" ) type ExecuteCommandTemplate struct { - Vars string - Script string - Command string + Vars string + Script string + Command string + WinRMPassword string +} + +type EnvVarsTemplate struct { + WinRMPassword string } func Run(ui packer.Ui, config *Config) (bool, error) { @@ -63,8 +69,12 @@ func Run(ui packer.Ui, config *Config) (bool, error) { // buffers and for reading the final exit status. flattenedCmd := strings.Join(interpolatedCmds, " ") cmd := &packer.RemoteCmd{Command: flattenedCmd} - log.Printf("[INFO] (shell-local): starting local command: %s", flattenedCmd) - + sanitized := flattenedCmd + if len(getWinRMPassword(config.PackerBuildName)) > 0 { + sanitized = strings.Replace(flattenedCmd, + getWinRMPassword(config.PackerBuildName), "*****", -1) + } + log.Printf("[INFO] (shell-local): starting local command: %s", sanitized) if err := cmd.StartWithUi(comm, ui); err != nil { return false, fmt.Errorf( "Error executing script: %s\n\n"+ @@ -96,7 +106,22 @@ func createInlineScriptFile(config *Config) (string, error) { log.Printf("[INFO] (shell-local): Prepending inline script with %s", shebang) writer.WriteString(shebang) } + config.Ctx.Data = &EnvVarsTemplate{ + WinRMPassword: getWinRMPassword(config.PackerBuildName), + } + + // generate context so you can interpolate the command + config.Ctx.Data = &EnvVarsTemplate{ + WinRMPassword: getWinRMPassword(config.PackerBuildName), + } + for _, command := range config.Inline { + // interpolate command to check for template variables. + command, err := interpolate.Render(command, &config.Ctx) + if err != nil { + return "", err + } + if _, err := writer.WriteString(command + "\n"); err != nil { return "", fmt.Errorf("Error preparing shell script: %s", err) } @@ -118,9 +143,10 @@ func createInlineScriptFile(config *Config) (string, error) { // the host OS func createInterpolatedCommands(config *Config, script string, flattenedEnvVars string) ([]string, error) { config.Ctx.Data = &ExecuteCommandTemplate{ - Vars: flattenedEnvVars, - Script: script, - Command: script, + Vars: flattenedEnvVars, + Script: script, + Command: script, + WinRMPassword: getWinRMPassword(config.PackerBuildName), } interpolatedCmds := make([]string, len(config.ExecuteCommand)) @@ -142,8 +168,17 @@ func createFlattenedEnvVars(config *Config) (string, error) { envVars["PACKER_BUILD_NAME"] = fmt.Sprintf("%s", config.PackerBuildName) envVars["PACKER_BUILDER_TYPE"] = fmt.Sprintf("%s", config.PackerBuilderType) + // interpolate environment variables + config.Ctx.Data = &EnvVarsTemplate{ + WinRMPassword: getWinRMPassword(config.PackerBuildName), + } // Split vars into key/value components for _, envVar := range config.Vars { + envVar, err := interpolate.Render(envVar, &config.Ctx) + if err != nil { + return "", err + } + // Split vars into key/value components keyValue := strings.SplitN(envVar, "=", 2) // Store pair, replacing any single quotes in value so they parse // correctly with required environment variable format @@ -162,3 +197,8 @@ func createFlattenedEnvVars(config *Config) (string, error) { } return flattened, nil } + +func getWinRMPassword(buildName string) string { + winRMPass, _ := commonhelper.RetrieveSharedState("winrm_password", buildName) + return winRMPass +} diff --git a/website/source/docs/provisioners/shell-local.html.md b/website/source/docs/provisioners/shell-local.html.md index a7400c589..6a04272ad 100644 --- a/website/source/docs/provisioners/shell-local.html.md +++ b/website/source/docs/provisioners/shell-local.html.md @@ -41,6 +41,10 @@ Exactly *one* of the following is required: - `command` (string) - This is a single command to execute. It will be written to a temporary file and run using the `execute_command` call below. + If you are building a windows vm on AWS, Azure or Google Compute and would + like to access the generated password that Packer uses to connect to the + instance via WinRM, you can use the template variable `{{.WinRMPassword}}` + to set this as an environment variable. - `inline` (array of strings) - This is an array of commands to execute. The commands are concatenated by newlines and turned into a single file, so they @@ -60,24 +64,22 @@ Exactly *one* of the following is required: Optional parameters: -- `execute_command` (array of strings) - The command to use to execute - the script. By default this is `["/bin/sh", "-c", "{{.Command}}"]`. The value - is an array of arguments executed directly by the OS. The value of this is - treated as [configuration - template](/docs/templates/engine.html). The only available - variable is `Command` which is the command to execute. - - `environment_vars` (array of strings) - An array of key/value pairs to inject prior to the `execute_command`. The format should be `key=value`. Packer injects some environmental variables by default into the environment, - as well, which are covered in the section below. + as well, which are covered in the section below. If you are building a + windows vm on AWS, Azure or Google Compute and would like to access the + generated password that Packer uses to connect to the instance via WinRM, + you can use the template variable `{{.WinRMPassword}}` to set this as an + environment variable. For example: + `"environment_vars": "WINRMPASS={{.WinRMPassword}}"` - `execute_command` (array of strings) - The command used to execute the script. By default this is `["/bin/sh", "-c", "{{.Vars}}, "{{.Script}}"]` on unix and `["cmd", "/c", "{{.Vars}}", "{{.Script}}"]` on windows. This is treated as a [template engine](/docs/templates/engine.html). There are two available variables: `Script`, which is the path to the script - to run, and `Vars`, which is the list of `environment_vars`, if configured + to run, and `Vars`, which is the list of `environment_vars`, if configured. If you choose to set this option, make sure that the first element in the array is the shell program you want to use (for example, "sh"), and a later @@ -94,6 +96,11 @@ Optional parameters: sake of clarity, as even when you set only a single `command` to run, Packer writes it to a temporary file and then runs it as a script. + If you are building a windows vm on AWS, Azure or Google Compute and would + like to access the generated password that Packer uses to connect to the + instance via WinRM, you can use the template variable `{{.WinRMPassword}}` + to set this as an environment variable. + - `inline_shebang` (string) - The [shebang](http://en.wikipedia.org/wiki/Shebang_%28Unix%29) value to use when running commands specified by `inline`. By default, this is `/bin/sh -e`. If From 08218222dc17c7c1da4d738e9ad71bcd6f2f6080 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Thu, 24 May 2018 10:47:37 -0700 Subject: [PATCH 069/138] add get windows default credentials to oci builder --- builder/oracle/oci/builder.go | 5 ++ builder/oracle/oci/driver_oci.go | 11 ++++ .../oci/step_get_default_credentials.go | 62 +++++++++++++++++++ 3 files changed, 78 insertions(+) create mode 100644 builder/oracle/oci/step_get_default_credentials.go diff --git a/builder/oracle/oci/builder.go b/builder/oracle/oci/builder.go index 6ceaaf2d2..1554f6013 100644 --- a/builder/oracle/oci/builder.go +++ b/builder/oracle/oci/builder.go @@ -58,6 +58,11 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe }, &stepCreateInstance{}, &stepInstanceInfo{}, + &stepGetDefaultCredentials{ + Debug: b.config.PackerDebug, + Comm: &b.config.Comm, + BuildName: b.config.PackerBuildName, + }, &communicator.StepConnect{ Config: &b.config.Comm, Host: ocommon.CommHost, diff --git a/builder/oracle/oci/driver_oci.go b/builder/oracle/oci/driver_oci.go index 90d05ea0a..ffb3732c1 100644 --- a/builder/oracle/oci/driver_oci.go +++ b/builder/oracle/oci/driver_oci.go @@ -112,6 +112,17 @@ func (d *driverOCI) GetInstanceIP(id string) (string, error) { return *vnic.PublicIp, nil } +func (d *driverOCI) GetInstanceInitialCredentials(id string) (string, string, error) { + credentials, err := d.computeClient.GetWindowsInstanceInitialCredentials(context.TODO(), core.GetWindowsInstanceInitialCredentialsRequest{ + InstanceId: &id, + }) + if err != nil { + return "", "", err + } + + return *credentials.InstanceCredentials.Username, *credentials.InstanceCredentials.Password, err +} + // TerminateInstance terminates a compute instance. func (d *driverOCI) TerminateInstance(id string) error { _, err := d.computeClient.TerminateInstance(context.TODO(), core.TerminateInstanceRequest{ diff --git a/builder/oracle/oci/step_get_default_credentials.go b/builder/oracle/oci/step_get_default_credentials.go new file mode 100644 index 000000000..7503b853c --- /dev/null +++ b/builder/oracle/oci/step_get_default_credentials.go @@ -0,0 +1,62 @@ +package oci + +import ( + "context" + "fmt" + "log" + + commonhelper "github.com/hashicorp/packer/helper/common" + "github.com/hashicorp/packer/helper/communicator" + "github.com/hashicorp/packer/helper/multistep" + "github.com/hashicorp/packer/packer" +) + +type stepGetDefaultCredentials struct { + Debug bool + Comm *communicator.Config + BuildName string +} + +func (s *stepGetDefaultCredentials) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { + var ( + driver = state.Get("driver").(*driverOCI) + ui = state.Get("ui").(packer.Ui) + id = state.Get("instance_id").(string) + ) + + // Skip if we're not using winrm + if s.Comm.Type != "winrm" { + log.Printf("[INFO] Not using winrm communicator, skipping get password...") + return multistep.ActionContinue + } + + // If we already have a password, skip it + if s.Comm.WinRMPassword != "" { + ui.Say("Skipping waiting for password since WinRM password set...") + return multistep.ActionContinue + } + + username, password, err := driver.GetInstanceInitialCredentials(id) + if err != nil { + err = fmt.Errorf("Error getting instance's credentials: %s", err) + ui.Error(err.Error()) + state.Put("error", err) + return multistep.ActionHalt + } + s.Comm.WinRMPassword = password + s.Comm.WinRMUser = username + + if s.Debug { + ui.Message(fmt.Sprintf( + "[DEBUG] (OCI default credentials): Credentials (since debug is enabled): %s", password)) + } + + // store so that we can access this later during provisioning + commonhelper.SetSharedState("winrm_password", s.Comm.WinRMPassword, s.BuildName) + + return multistep.ActionContinue +} + +func (s *stepGetDefaultCredentials) Cleanup(state multistep.StateBag) { + // no cleanup +} From af7f7f2ce9149e2d7b853e6d91547316c959f92b Mon Sep 17 00:00:00 2001 From: DanHam Date: Thu, 24 May 2018 21:05:03 +0100 Subject: [PATCH 070/138] Fix redundant Sprintf --- builder/amazon/common/run_config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/amazon/common/run_config.go b/builder/amazon/common/run_config.go index bc596e580..129d4d541 100644 --- a/builder/amazon/common/run_config.go +++ b/builder/amazon/common/run_config.go @@ -85,7 +85,7 @@ func (c *RunConfig) Prepare(ctx *interpolate.Context) []error { c.SSHInterface != "public_dns" && c.SSHInterface != "private_dns" && c.SSHInterface != "" { - errs = append(errs, fmt.Errorf(fmt.Sprintf("Unknown interface type: %s", c.SSHInterface))) + errs = append(errs, fmt.Errorf("Unknown interface type: %s", c.SSHInterface)) } if c.SSHKeyPairName != "" { From 494e8f9d82d2640a6ad2c4b725d8e602d407a3d4 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Thu, 24 May 2018 21:27:24 +0100 Subject: [PATCH 071/138] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8fde73f0c..be9b5ceff 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ ### IMPROVEMENTS: +* builder/amazon: Amazon builders other than `chroot` now support T2 unlimited instances [GH-6265] * builder/azure: Updated Azure SDK to v15.0.0 [GH-6224] * builder/azure: Devicelogin Support for Windows [GH-6285] From 4dc074e786bf84583a8eac30f23ee100121a2ae8 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Thu, 24 May 2018 16:26:00 -0700 Subject: [PATCH 072/138] add qemu templates --- website/source/community-tools.html.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/website/source/community-tools.html.md b/website/source/community-tools.html.md index 6dcf6445b..cffed03da 100644 --- a/website/source/community-tools.html.md +++ b/website/source/community-tools.html.md @@ -38,6 +38,9 @@ power of Packer templates. * [geerlingguy/packer-ubuntu-1604](https://github.com/geerlingguy/packer-ubuntu-1604) \- Ubuntu 16.04 minimal Vagrant Box using Ansible provisioner +* [jakobadam/packer-qemu-templates](https://github.com/jakobadam/packer-qemu-templates) + - QEMU templates for various operating systems + ## Wrappers - [packer-config](https://github.com/ianchesal/packer-config) - a Ruby model that lets you build Packer configurations in Ruby From cd6390ca171db4598764042c5a5c54e9f4a673af Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Thu, 24 May 2018 17:09:17 -0700 Subject: [PATCH 073/138] update changelog pre- 1.2.4 release --- CHANGELOG.md | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index be9b5ceff..abfd31afb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,12 +4,34 @@ * builder/vmware-esxi: Remove floppy files from the remote server on cleanup. [GH-6206] * core: When using `-on-error=[abort|ask]`, output the error to the user. [GH-6252] +* builder/amazon: Can now force the chroot builder to mount an entire block device instead of a partition [GH-6194] +* builder/chroot: A new template option, `nvme_device_path` has been added to provide a workaround for users who need the amazon-chroot builder to mount a NVMe volume on their instances. [GH-6295] +* communicator/winrm: Updated dependencies to fix a race condition [GH-6261] +* builder/hyper-v: Fix command for mounting multiple disks [GH-6267] +* provisioner/shell: Remove file stat that was causing problems uploading files [GH-6239] +* provisioner/puppet: Extra-Arguments are no longer prematurely interpolated.[GH-6215] +* builder/azure: windows-sql-cloud is now in the default list of projects to check for provided images. [GH-6210] +* builder/hyperv: Enable IP retrieval for Server 2008 R2 hosts. [GH-6219] +* builder/hyperv: Fix bug in MAC address specification on Hyper-V. [GH-6187] +* builder/parallels-pvm: Add missing disk compaction step. [GH-6202] ### IMPROVEMENTS: * builder/amazon: Amazon builders other than `chroot` now support T2 unlimited instances [GH-6265] * builder/azure: Updated Azure SDK to v15.0.0 [GH-6224] * builder/azure: Devicelogin Support for Windows [GH-6285] +* builder/hyper-v: Hyper-V builds now connect to vnc display by default when building [GH-6243] +* provisoner/shell-local: New options have been added to create feature parity with the shell-local post-processor. This feature now works on Windows hosts. [GH-5956] +* post-processor/shell-local: New options have been added to create feature parity with the shell-local provisioner. This feature now works on Windows hosts. [GH-5956] +* builder/hyper-v: New `use_fixed_vhd_format` allows vm export in an Azure-compatible format [GH-6101] +* builder/azure: Faster deletion of Azure Resource Groups. [GH-6269] +* builder/hyperv: New config option for specifying what secure boot template to use, allowing secure boot of linux vms. [GH-5883] +* provisioner/chef: New config option allows user to skip cleanup of chef client staging directory. [GH-4300] +* builder/azure: Allow device login for US government cloud. [GH-6105] +* builder/qemu: Add support for hvf accelerator. [GH-6193] +* builder/azure: Enable simultaneous builds within one resource group. [GH-6231] +* builder/scaleway: Fix SSH communicator connection issue. [GH-6238] +* core: Add opt-in Packer top-level command autocomplete [GH-5454] ## 1.2.3 (April 25, 2018) From 788418cff20a047a9f0124a02d465c8d2eeda2c7 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Fri, 25 May 2018 03:42:40 -0700 Subject: [PATCH 074/138] Add unit test to show that we handle tars safely --- .../decompress-tar/outside_parent.tar | Bin 0 -> 10240 bytes post-processor/vagrant/virtualbox.go | 8 ++++++++ post-processor/vagrant/virtualbox_test.go | 18 ++++++++++++++++++ 3 files changed, 26 insertions(+) create mode 100644 common/test-fixtures/decompress-tar/outside_parent.tar diff --git a/common/test-fixtures/decompress-tar/outside_parent.tar b/common/test-fixtures/decompress-tar/outside_parent.tar new file mode 100644 index 0000000000000000000000000000000000000000..f08df1e6560b1137d6103ab0968518339bb3566c GIT binary patch literal 10240 zcmeIxOAdlC5P;#XIYo_oC@t12cmvnSBanbKg7>#*OppZ|Bq8RX&9t;>r(c*bjPjy& z;mu`Ytx!I!QewzQdw6A~lXo6I8Y5z(4^|s(wG}E({6- Date: Sat, 26 May 2018 21:58:08 +0200 Subject: [PATCH 075/138] Revert "Unify handling PlaybookFile and PlaybookFiles." This reverts commit 81db142c8a29a45479324e2830cbdb08a5508574. --- provisioner/ansible-local/provisioner.go | 18 +++++- provisioner/ansible-local/provisioner_test.go | 55 +------------------ 2 files changed, 17 insertions(+), 56 deletions(-) diff --git a/provisioner/ansible-local/provisioner.go b/provisioner/ansible-local/provisioner.go index 098f67ef1..bd4559388 100644 --- a/provisioner/ansible-local/provisioner.go +++ b/provisioner/ansible-local/provisioner.go @@ -114,8 +114,6 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { err = validateFileConfig(p.config.PlaybookFile, "playbook_file", true) if err != nil { errs = packer.MultiErrorAppend(errs, err) - } else { - p.playbookFiles = append(p.playbookFiles, p.config.PlaybookFile) } } @@ -202,7 +200,14 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { } } - if err := p.provisionPlaybookFiles(ui, comm); err != nil { + if p.config.PlaybookFile != "" { + ui.Message("Uploading main Playbook file...") + src := p.config.PlaybookFile + dst := filepath.ToSlash(filepath.Join(p.config.StagingDir, filepath.Base(src))) + if err := p.uploadFile(ui, comm, dst, src); err != nil { + return fmt.Errorf("Error uploading main playbook: %s", err) + } + } else if err := p.provisionPlaybookFiles(ui, comm); err != nil { return err } @@ -384,6 +389,13 @@ func (p *Provisioner) executeAnsible(ui packer.Ui, comm packer.Communicator) err } } + if p.config.PlaybookFile != "" { + playbookFile := filepath.ToSlash(filepath.Join(p.config.StagingDir, filepath.Base(p.config.PlaybookFile))) + if err := p.executeAnsiblePlaybook(ui, comm, playbookFile, extraArgs, inventory); err != nil { + return err + } + } + for _, playbookFile := range p.playbookFiles { playbookFile = filepath.ToSlash(filepath.Join(p.config.StagingDir, playbookFile)) if err := p.executeAnsiblePlaybook(ui, comm, playbookFile, extraArgs, inventory); err != nil { diff --git a/provisioner/ansible-local/provisioner_test.go b/provisioner/ansible-local/provisioner_test.go index 4370b060c..2f6174f3e 100644 --- a/provisioner/ansible-local/provisioner_test.go +++ b/provisioner/ansible-local/provisioner_test.go @@ -118,58 +118,6 @@ func TestProvisionerPrepare_PlaybookFiles(t *testing.T) { } } -func TestProvisionerProvision_PlaybookFile(t *testing.T) { - var p Provisioner - config := testConfig() - - playbook := createTempFile("") - defer os.Remove(playbook) - - config["playbook_file"] = playbook - err := p.Prepare(config) - if err != nil { - t.Fatalf("err: %s", err) - } - - comm := &communicatorMock{} - if err := p.Provision(&uiStub{}, comm); err != nil { - t.Fatalf("err: %s", err) - } - - assertPlaybooksUploaded(comm, []string{playbook}) - assertPlaybooksExecuted(comm, []string{playbook}) -} - -func TestProvisionerProvision_PlaybookFileWithPlaybookDir(t *testing.T) { - var p Provisioner - config := testConfig() - - playbook_dir, err := ioutil.TempDir("", "") - if err != nil { - t.Fatalf("Failed to create playbook_dir: %s", err) - } - defer os.RemoveAll(playbook_dir) - playbook := createTempFile(playbook_dir) - - playbookName := filepath.Base(playbook) - playbookInPlaybookDir := strings.TrimPrefix(playbook, playbook_dir) - - config["playbook_file"] = playbook - config["playbook_dir"] = playbook_dir - err = p.Prepare(config) - if err != nil { - t.Fatalf("err: %s", err) - } - - comm := &communicatorMock{} - if err := p.Provision(&uiStub{}, comm); err != nil { - t.Fatalf("err: %s", err) - } - - assertPlaybooksNotUploaded(comm, []string{playbookName}) - assertPlaybooksExecuted(comm, []string{playbookInPlaybookDir}) -} - func TestProvisionerProvision_PlaybookFiles(t *testing.T) { var p Provisioner config := testConfig() @@ -413,6 +361,7 @@ func testProvisionerProvisionDockerWithPlaybookFiles(t *testing.T, templateStrin if err != nil { t.Fatalf("Error preparing download: %s", err) } + defer os.Remove("hello_world") // Add hooks so the provisioners run during the build hooks := map[string][]packer.Hook{} @@ -431,7 +380,6 @@ func testProvisionerProvisionDockerWithPlaybookFiles(t *testing.T, templateStrin t.Fatalf("Error running build %s", err) } defer artifact.Destroy() - defer os.Remove("hello_world") actualContent, err := ioutil.ReadFile("hello_world") if err != nil { @@ -461,6 +409,7 @@ func assertPlaybooksExecuted(comm *communicatorMock, playbooks []string) { } func assertPlaybooksUploaded(comm *communicatorMock, playbooks []string) { + fmt.Println(comm.uploadDestination) uploadIndex := 0 for _, playbook := range playbooks { playbook = filepath.ToSlash(playbook) From 263a3c6910a943cd159d41318a89b544e036852b Mon Sep 17 00:00:00 2001 From: localghost Date: Sat, 26 May 2018 21:59:43 +0200 Subject: [PATCH 076/138] Include some of the refactoring reverted by previous revert commit. --- provisioner/ansible-local/provisioner_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/provisioner/ansible-local/provisioner_test.go b/provisioner/ansible-local/provisioner_test.go index 2f6174f3e..f3dec49cb 100644 --- a/provisioner/ansible-local/provisioner_test.go +++ b/provisioner/ansible-local/provisioner_test.go @@ -361,7 +361,6 @@ func testProvisionerProvisionDockerWithPlaybookFiles(t *testing.T, templateStrin if err != nil { t.Fatalf("Error preparing download: %s", err) } - defer os.Remove("hello_world") // Add hooks so the provisioners run during the build hooks := map[string][]packer.Hook{} @@ -379,6 +378,7 @@ func testProvisionerProvisionDockerWithPlaybookFiles(t *testing.T, templateStrin if err != nil { t.Fatalf("Error running build %s", err) } + defer os.Remove("hello_world") defer artifact.Destroy() actualContent, err := ioutil.ReadFile("hello_world") @@ -409,7 +409,6 @@ func assertPlaybooksExecuted(comm *communicatorMock, playbooks []string) { } func assertPlaybooksUploaded(comm *communicatorMock, playbooks []string) { - fmt.Println(comm.uploadDestination) uploadIndex := 0 for _, playbook := range playbooks { playbook = filepath.ToSlash(playbook) From 67185f61a53fffe79d6e4251f2faecb12ff2dd11 Mon Sep 17 00:00:00 2001 From: Johann Queuniet Date: Mon, 28 May 2018 15:21:02 +0200 Subject: [PATCH 077/138] Clarify scaleway token definitions in doc Clarify the definition of both tokens, the previous one was a bit confusing and not consistent with Terraform where "organization" designates the access key instead of the authentication tokens. --- website/source/docs/builders/scaleway.html.md | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/website/source/docs/builders/scaleway.html.md b/website/source/docs/builders/scaleway.html.md index c712ce2c6..9788cd918 100644 --- a/website/source/docs/builders/scaleway.html.md +++ b/website/source/docs/builders/scaleway.html.md @@ -35,17 +35,19 @@ In addition to the options listed here, a builder. ### Required: + -- `api_access_key` (string) - The api\_access\_key to use to access your - account. It can also be specified via environment variable +- `api_access_key` (string) - The organization access key to use to identify your + organization. It can also be specified via environment variable `SCALEWAY_API_ACCESS_KEY`. Your access key is available in the ["Credentials" section](https://cloud.scaleway.com/#/credentials) of the control panel. -- `api_token` (string) - The organization TOKEN to use to access your +- `api_token` (string) - The token to use to authenticate with your account. It can also be specified via environment variable - `SCALEWAY_API_TOKEN`. Your tokens are available in the ["Credentials" - section](https://cloud.scaleway.com/#/credentials) of the control panel. + `SCALEWAY_API_TOKEN`. You can see and generate tokens in the + ["Credentials" section](https://cloud.scaleway.com/#/credentials) of the + control panel. - `image` (string) - The UUID of the base image to use. This is the image that will be used to launch a new server and provision it. See From dba9f1b01535623a505b12ebdfcb17b46c3da374 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9my=20L=C3=A9one?= Date: Mon, 28 May 2018 16:18:59 +0200 Subject: [PATCH 078/138] Remove the VC product line --- builder/scaleway/builder_test.go | 4 ++-- website/source/docs/builders/scaleway.html.md | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/builder/scaleway/builder_test.go b/builder/scaleway/builder_test.go index 230c9c0c6..1d42ed7e9 100644 --- a/builder/scaleway/builder_test.go +++ b/builder/scaleway/builder_test.go @@ -12,7 +12,7 @@ func testConfig() map[string]interface{} { "api_access_key": "foo", "api_token": "bar", "region": "ams1", - "commercial_type": "VC1S", + "commercial_type": "START1-S", "ssh_username": "root", "image": "image-uuid", } @@ -98,7 +98,7 @@ func TestBuilderPrepare_CommercialType(t *testing.T) { t.Fatalf("should error") } - expected := "VC1S" + expected := "START1-S" config["commercial_type"] = expected b = Builder{} diff --git a/website/source/docs/builders/scaleway.html.md b/website/source/docs/builders/scaleway.html.md index c712ce2c6..081871643 100644 --- a/website/source/docs/builders/scaleway.html.md +++ b/website/source/docs/builders/scaleway.html.md @@ -58,7 +58,7 @@ builder. - `commercial_type` (string) - The name of the server commercial type: `ARM64-128GB`,`ARM64-16GB`,`ARM64-2GB`,`ARM64-32GB`,`ARM64-4GB`, `ARM64-64GB`, - `ARM64-8GB`,`C1`,`C2L`,`C2M`,`C2S`,`VC1L`,`VC1M`,`VC1S`, + `ARM64-8GB`,`C1`,`C2L`,`C2M`,`C2S`,`START1-L`,`START1-M`,`START1-S`,`START1-XS`, `X64-120GB`,`X64-15GB`,`X64-30GB`,`X64-60GB` ### Optional: @@ -84,7 +84,7 @@ access tokens: "api_token": "YOUR TOKEN", "image": "UUID OF THE BASE IMAGE", "region": "par1", - "commercial_type": "VC1S", + "commercial_type": "START1-S", "ssh_username": "root", "ssh_private_key_file": "~/.ssh/id_rsa" } From 6a769f08d87442ab0c95585429e5b000bcb8a9c7 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Tue, 29 May 2018 11:17:51 -0700 Subject: [PATCH 079/138] regenerate boot command code --- common/bootcommand/boot_command.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/common/bootcommand/boot_command.go b/common/bootcommand/boot_command.go index 18578a42f..95133f334 100644 --- a/common/bootcommand/boot_command.go +++ b/common/bootcommand/boot_command.go @@ -1,3 +1,5 @@ +// Code generated by pigeon; DO NOT EDIT. + package bootcommand import ( From 3afca6905b7a8631f4049c33bfb8a3e742b101d2 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Tue, 29 May 2018 11:47:27 -0700 Subject: [PATCH 080/138] remove duplicate assignmnet --- common/shell-local/run.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/common/shell-local/run.go b/common/shell-local/run.go index 705a5ff90..51bb047b0 100644 --- a/common/shell-local/run.go +++ b/common/shell-local/run.go @@ -106,9 +106,6 @@ func createInlineScriptFile(config *Config) (string, error) { log.Printf("[INFO] (shell-local): Prepending inline script with %s", shebang) writer.WriteString(shebang) } - config.Ctx.Data = &EnvVarsTemplate{ - WinRMPassword: getWinRMPassword(config.PackerBuildName), - } // generate context so you can interpolate the command config.Ctx.Data = &EnvVarsTemplate{ From 03889610e526ff814b74292dd1f416674d04aa6e Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Tue, 29 May 2018 12:18:42 -0700 Subject: [PATCH 081/138] update and format changelog --- CHANGELOG.md | 60 ++++++++++++++++++++++++++++++++++------------------ 1 file changed, 40 insertions(+), 20 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index abfd31afb..5c37764f7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,37 +1,57 @@ -## (UNRELEASED) +## 1.2.4 (May 29, 2018) ### BUG FIXES: -* builder/vmware-esxi: Remove floppy files from the remote server on cleanup. [GH-6206] -* core: When using `-on-error=[abort|ask]`, output the error to the user. [GH-6252] -* builder/amazon: Can now force the chroot builder to mount an entire block device instead of a partition [GH-6194] -* builder/chroot: A new template option, `nvme_device_path` has been added to provide a workaround for users who need the amazon-chroot builder to mount a NVMe volume on their instances. [GH-6295] -* communicator/winrm: Updated dependencies to fix a race condition [GH-6261] +* builder/amazon: Can now force the chroot builder to mount an entire block + device instead of a partition [GH-6194] +* builder/azure: windows-sql-cloud is now in the default list of projects to + check for provided images. [GH-6210] +* builder/chroot: A new template option, `nvme_device_path` has been added to + provide a workaround for users who need the amazon-chroot builder to mount + a NVMe volume on their instances. [GH-6295] * builder/hyper-v: Fix command for mounting multiple disks [GH-6267] -* provisioner/shell: Remove file stat that was causing problems uploading files [GH-6239] -* provisioner/puppet: Extra-Arguments are no longer prematurely interpolated.[GH-6215] -* builder/azure: windows-sql-cloud is now in the default list of projects to check for provided images. [GH-6210] * builder/hyperv: Enable IP retrieval for Server 2008 R2 hosts. [GH-6219] * builder/hyperv: Fix bug in MAC address specification on Hyper-V. [GH-6187] * builder/parallels-pvm: Add missing disk compaction step. [GH-6202] +* builder/vmware-esxi: Remove floppy files from the remote server on cleanup. + [GH-6206] +* communicator/winrm: Updated dependencies to fix a race condition [GH-6261] +* core: When using `-on-error=[abort|ask]`, output the error to the user. + [GH-6252] +* provisioner/puppet: Extra-Arguments are no longer prematurely + interpolated.[GH-6215] +* provisioner/shell: Remove file stat that was causing problems uploading files + [GH-6239] ### IMPROVEMENTS: -* builder/amazon: Amazon builders other than `chroot` now support T2 unlimited instances [GH-6265] -* builder/azure: Updated Azure SDK to v15.0.0 [GH-6224] -* builder/azure: Devicelogin Support for Windows [GH-6285] -* builder/hyper-v: Hyper-V builds now connect to vnc display by default when building [GH-6243] -* provisoner/shell-local: New options have been added to create feature parity with the shell-local post-processor. This feature now works on Windows hosts. [GH-5956] -* post-processor/shell-local: New options have been added to create feature parity with the shell-local provisioner. This feature now works on Windows hosts. [GH-5956] -* builder/hyper-v: New `use_fixed_vhd_format` allows vm export in an Azure-compatible format [GH-6101] -* builder/azure: Faster deletion of Azure Resource Groups. [GH-6269] -* builder/hyperv: New config option for specifying what secure boot template to use, allowing secure boot of linux vms. [GH-5883] -* provisioner/chef: New config option allows user to skip cleanup of chef client staging directory. [GH-4300] +* builder/amazon: Amazon builders other than `chroot` now support T2 unlimited + instances [GH-6265] * builder/azure: Allow device login for US government cloud. [GH-6105] +* builder/azure: Devicelogin Support for Windows [GH-6285] +* builder/azure: Enable simultaneous builds within one resource group. + [GH-6231] +* builder/azure: Faster deletion of Azure Resource Groups. [GH-6269] +* builder/azure: Updated Azure SDK to v15.0.0 [GH-6224] +* builder/hyper-v: Hyper-V builds now connect to vnc display by default when + building [GH-6243] +* builder/hyper-v: New `use_fixed_vhd_format` allows vm export in an Azure- + compatible format [GH-6101] +* builder/hyperv: New config option for specifying what secure boot template to + use, allowing secure boot of linux vms. [GH-5883] * builder/qemu: Add support for hvf accelerator. [GH-6193] -* builder/azure: Enable simultaneous builds within one resource group. [GH-6231] * builder/scaleway: Fix SSH communicator connection issue. [GH-6238] * core: Add opt-in Packer top-level command autocomplete [GH-5454] +* post-processor/shell-local: New options have been added to create feature + parity with the shell-local provisioner. This feature now works on Windows + hosts. [GH-5956] +* provisioner/chef: New config option allows user to skip cleanup of chef + client staging directory. [GH-4300] +* provisioner/shell-local: Can now access automatically-generated WinRM + password as variable [GH-6251] +* provisoner/shell-local: New options have been added to create feature parity + with the shell-local post-processor. This feature now works on Windows + hosts. [GH-5956] ## 1.2.3 (April 25, 2018) From e3b615e2adec70aa99d19a450592e8bb5b71fad8 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Tue, 29 May 2018 12:20:22 -0700 Subject: [PATCH 082/138] update version in website and version.go --- version/version.go | 2 +- website/config.rb | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/version/version.go b/version/version.go index baaac8187..efcb6380f 100644 --- a/version/version.go +++ b/version/version.go @@ -14,7 +14,7 @@ const Version = "1.2.4" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. -const VersionPrerelease = "dev" +const VersionPrerelease = "" func FormattedVersion() string { var versionString bytes.Buffer diff --git a/website/config.rb b/website/config.rb index 7601f6b78..1a0f09c95 100644 --- a/website/config.rb +++ b/website/config.rb @@ -2,7 +2,7 @@ set :base_url, "https://www.packer.io/" activate :hashicorp do |h| h.name = "packer" - h.version = "1.2.3" + h.version = "1.2.4" h.github_slug = "hashicorp/packer" h.website_root = "website" end From 4078cfe1622de705333994d037ac93e181a7c884 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Tue, 29 May 2018 12:26:52 -0700 Subject: [PATCH 083/138] Cut version 1.2.4 From a70a304911cc01a29e89df48a5ab596ca141075f Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Tue, 29 May 2018 12:30:58 -0700 Subject: [PATCH 084/138] update version to 1.3.0 dev --- version/version.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/version/version.go b/version/version.go index efcb6380f..76bf8120b 100644 --- a/version/version.go +++ b/version/version.go @@ -9,12 +9,12 @@ import ( var GitCommit string // The main version number that is being run at the moment. -const Version = "1.2.4" +const Version = "1.3.0" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. -const VersionPrerelease = "" +const VersionPrerelease = "dev" func FormattedVersion() string { var versionString bytes.Buffer From 10f118b14cfec14d002d716ad39ad8d73dd38060 Mon Sep 17 00:00:00 2001 From: deepuashokan85 Date: Thu, 31 May 2018 11:29:57 +0530 Subject: [PATCH 085/138] Update driver_gce.go Added "rhel-sap-cloud" Projects_ID. --- builder/googlecompute/driver_gce.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/googlecompute/driver_gce.go b/builder/googlecompute/driver_gce.go index e24c4c768..971d0f95f 100644 --- a/builder/googlecompute/driver_gce.go +++ b/builder/googlecompute/driver_gce.go @@ -168,7 +168,7 @@ func (d *driverGCE) DeleteDisk(zone, name string) (<-chan error, error) { } func (d *driverGCE) GetImage(name string, fromFamily bool) (*Image, error) { - projects := []string{d.projectId, "centos-cloud", "coreos-cloud", "cos-cloud", "debian-cloud", "google-containers", "opensuse-cloud", "rhel-cloud", "suse-cloud", "ubuntu-os-cloud", "windows-cloud", "gce-nvme", "windows-sql-cloud"} + projects := []string{d.projectId, "centos-cloud", "coreos-cloud", "cos-cloud", "debian-cloud", "google-containers", "opensuse-cloud", "rhel-cloud", "suse-cloud", "ubuntu-os-cloud", "windows-cloud", "gce-nvme", "windows-sql-cloud", "rhel-sap-cloud"} var errs error for _, project := range projects { image, err := d.GetImageFromProject(project, name, fromFamily) From c0ae1b77905b74cf282d5f22ca76a4e865716670 Mon Sep 17 00:00:00 2001 From: Rickard von Essen Date: Sun, 3 Jun 2018 16:37:38 +0200 Subject: [PATCH 086/138] digitalocean: artifact should include build region and fixed docs - Add the build region to the list of regions for the artefact - Corrected docs: `snapshot_name` doesn't have to be unique. - Updated example to use Ubuntu 16.04 --- builder/digitalocean/step_snapshot.go | 1 + website/source/docs/builders/digitalocean.html.md | 7 +++---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/builder/digitalocean/step_snapshot.go b/builder/digitalocean/step_snapshot.go index ec978c8bc..0ccec359d 100644 --- a/builder/digitalocean/step_snapshot.go +++ b/builder/digitalocean/step_snapshot.go @@ -111,6 +111,7 @@ func (s *stepSnapshot) Run(_ context.Context, state multistep.StateBag) multiste ui.Error(err.Error()) return multistep.ActionHalt } + snapshotRegions = append(snapshotRegions, c.Region) log.Printf("Snapshot image ID: %d", imageId) state.Put("snapshot_image_id", imageId) diff --git a/website/source/docs/builders/digitalocean.html.md b/website/source/docs/builders/digitalocean.html.md index 595c1a62f..5683491df 100644 --- a/website/source/docs/builders/digitalocean.html.md +++ b/website/source/docs/builders/digitalocean.html.md @@ -73,9 +73,8 @@ builder. for the droplet being created. This defaults to `false`, or not enabled. - `snapshot_name` (string) - The name of the resulting snapshot that will - appear in your account. This must be unique. To help make this unique, use a - function like `timestamp` (see [configuration - templates](/docs/templates/engine.html) for more info) + appear in your account. Defaults to "packer-{{timestamp}}" (see + [configuration templates](/docs/templates/engine.html) for more info). - `snapshot_regions` (array of strings) - The regions of the resulting snapshot that will appear in your account. @@ -98,7 +97,7 @@ access tokens: { "type": "digitalocean", "api_token": "YOUR API KEY", - "image": "ubuntu-14-04-x64", + "image": "ubuntu-16-04-x64", "region": "nyc3", "size": "512mb", "ssh_username": "root" From b7b25ec7b23b9581b7d2906cb90c0649c3cc2005 Mon Sep 17 00:00:00 2001 From: PascalB Date: Mon, 4 Jun 2018 10:04:55 +0200 Subject: [PATCH 087/138] avoid name conflict On CentOS /usr/sbin/packer (from cracklib-dicts RPM) is already present and is required by systemd. This is a symlink to cracklib-packer. To avoid trouble, two solutions are available : - put packer in /usr/local/bin - remove /usr/sbin in the PATH --- website/source/docs/install/index.html.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/install/index.html.md b/website/source/docs/install/index.html.md index aefe52650..ffda91a05 100644 --- a/website/source/docs/install/index.html.md +++ b/website/source/docs/install/index.html.md @@ -30,7 +30,7 @@ inside is all that is necessary to run Packer (or `packer.exe` for Windows). Any additional files, if any, aren't required to run Packer. Copy the binary to anywhere on your system. If you intend to access it from the -command-line, make sure to place it somewhere on your `PATH`. +command-line, make sure to place it somewhere on your `PATH` before /usr/sbin. ## Compiling from Source From 852973a001e9d323fea491840744ca10cf4076cf Mon Sep 17 00:00:00 2001 From: Robert Neumayer Date: Sun, 3 Jun 2018 22:29:56 +0200 Subject: [PATCH 088/138] Update puppet provisioner docs * replace options with extra_arguments so the example works --- website/source/docs/provisioners/puppet-server.html.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/provisioners/puppet-server.html.md b/website/source/docs/provisioners/puppet-server.html.md index b8528822a..56461fc62 100644 --- a/website/source/docs/provisioners/puppet-server.html.md +++ b/website/source/docs/provisioners/puppet-server.html.md @@ -28,7 +28,7 @@ accessible from your network. ``` json { "type": "puppet-server", - "options": "--test --pluginsync", + "extra_arguments": "--test --pluginsync", "facter": { "server_role": "webserver" } From b23d6c9589e4917617facae9ebcaafe15186a102 Mon Sep 17 00:00:00 2001 From: Hariharan Jayaraman Date: Wed, 6 Jun 2018 15:25:19 -0700 Subject: [PATCH 089/138] fixing infinite loop issue deleting existing resources if we have a empty target resource --- builder/azure/arm/builder_acc_test.go | 42 +++++++++++++++++++ .../azure/arm/step_delete_resource_group.go | 1 + builder/azure/arm/step_deploy_template.go | 1 + 3 files changed, 44 insertions(+) diff --git a/builder/azure/arm/builder_acc_test.go b/builder/azure/arm/builder_acc_test.go index 0b1f85656..8f0c605f9 100644 --- a/builder/azure/arm/builder_acc_test.go +++ b/builder/azure/arm/builder_acc_test.go @@ -38,6 +38,14 @@ func TestBuilderAcc_ManagedDisk_Windows(t *testing.T) { }) } +func TestBuilderAcc_ManagedDisk_Windows_Build_Resource_Group(t *testing.T) { + builderT.Test(t, builderT.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Builder: &Builder{}, + Template: testBuilderAccManagedDiskWindowsBuildResourceGroup, + }) +} + func TestBuilderAcc_ManagedDisk_Windows_DeviceLogin(t *testing.T) { if os.Getenv(DeviceLoginAcceptanceTest) == "" { t.Skip(fmt.Sprintf( @@ -126,6 +134,40 @@ const testBuilderAccManagedDiskWindows = ` }] } ` +const testBuilderAccManagedDiskWindowsBuildResourceGroup = ` +{ + "variables": { + "client_id": "{{env ` + "`ARM_CLIENT_ID`" + `}}", + "client_secret": "{{env ` + "`ARM_CLIENT_SECRET`" + `}}", + "subscription_id": "{{env ` + "`ARM_SUBSCRIPTION_ID`" + `}}" + }, + "builders": [{ + "type": "test", + + "client_id": "{{user ` + "`client_id`" + `}}", + "client_secret": "{{user ` + "`client_secret`" + `}}", + "subscription_id": "{{user ` + "`subscription_id`" + `}}", + + "build_resource_group_name" : "packer-acceptance-test", + "managed_image_resource_group_name": "packer-acceptance-test", + "managed_image_name": "testBuilderAccManagedDiskWindows-{{timestamp}}", + + "os_type": "Windows", + "image_publisher": "MicrosoftWindowsServer", + "image_offer": "WindowsServer", + "image_sku": "2012-R2-Datacenter", + + "communicator": "winrm", + "winrm_use_ssl": "true", + "winrm_insecure": "true", + "winrm_timeout": "3m", + "winrm_username": "packer", + "async_resourcegroup_delete": "true", + + "vm_size": "Standard_DS2_v2" + }] +} +` const testBuilderAccManagedDiskWindowsDeviceLogin = ` { diff --git a/builder/azure/arm/step_delete_resource_group.go b/builder/azure/arm/step_delete_resource_group.go index 56d2f15c5..c5b92fcf1 100644 --- a/builder/azure/arm/step_delete_resource_group.go +++ b/builder/azure/arm/step_delete_resource_group.go @@ -82,6 +82,7 @@ func (s *StepDeleteResourceGroup) deleteDeploymentResources(ctx context.Context, deploymentOperation := deploymentOperations.Value() // Sometimes an empty operation is added to the list by Azure if deploymentOperation.Properties.TargetResource == nil { + deploymentOperations.Next() continue } diff --git a/builder/azure/arm/step_deploy_template.go b/builder/azure/arm/step_deploy_template.go index e089ad4bf..83590afd7 100644 --- a/builder/azure/arm/step_deploy_template.go +++ b/builder/azure/arm/step_deploy_template.go @@ -185,6 +185,7 @@ func (s *StepDeployTemplate) Cleanup(state multistep.StateBag) { deploymentOperation := deploymentOperations.Value() // Sometimes an empty operation is added to the list by Azure if deploymentOperation.Properties.TargetResource == nil { + deploymentOperations.Next() continue } ui.Say(fmt.Sprintf(" -> %s : '%s'", From 93f5bbbf45d6107c96e8950be1a52ae03058942e Mon Sep 17 00:00:00 2001 From: Marc Carmier Date: Sun, 12 Mar 2017 23:46:35 +0100 Subject: [PATCH 090/138] Add waiting for key between provisioner --- packer/core.go | 1 + packer/provisioner.go | 88 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 89 insertions(+) diff --git a/packer/core.go b/packer/core.go index c8ac2cfb7..e6d82aaed 100644 --- a/packer/core.go +++ b/packer/core.go @@ -152,6 +152,7 @@ func (c *Core) Build(n string) (Build, error) { Provisioner: provisioner, } } + if config.PackerDebug provisioners = append(provisioners, coreBuildProvisioner{ pType: rawP.Type, diff --git a/packer/provisioner.go b/packer/provisioner.go index 93411808d..c9af06f54 100644 --- a/packer/provisioner.go +++ b/packer/provisioner.go @@ -2,6 +2,7 @@ package packer import ( "fmt" + "log" "sync" "time" ) @@ -168,3 +169,90 @@ func (p *PausedProvisioner) Cancel() { func (p *PausedProvisioner) provision(result chan<- error, ui Ui, comm Communicator) { result <- p.Provisioner.Provision(ui, comm) } + +// DebuggedProvisioner is a Provisioner implementation that wait key press before +// the provisioner is actually run. +type DebuggedProvisioner struct { + Provisioner Provisioner + + cancelCh chan struct{} + doneCh chan struct{} + lock sync.Mutex +} + +func (p *DebuggedProvisioner) Prepare(raws ...interface{}) error { + return p.Provisioner.Prepare(raws...) +} + +func (p *DebuggedProvisioner) Provision(ui Ui, comm Communicator) error { + p.lock.Lock() + cancelCh := make(chan struct{}) + p.cancelCh = cancelCh + + // Setup the done channel, which is trigger when we're done + doneCh := make(chan struct{}) + defer close(doneCh) + p.doneCh = doneCh + p.lock.Unlock() + + defer func() { + p.lock.Lock() + defer p.lock.Unlock() + if p.cancelCh == cancelCh { + p.cancelCh = nil + } + if p.doneCh == doneCh { + p.doneCh = nil + } + }() + + // Use a select to determine if we get cancelled during the wait + message := "Pausing before the next provisioner . Press enter to continue." + + result := make(chan string, 1) + go func() { + line, err := ui.Ask(message) + if err != nil { + log.Printf("Error asking for input: %s", err) + } + + result <- line + }() + + select { + case <-result: + case <-cancelCh: + return nil + } + + provDoneCh := make(chan error, 1) + go p.provision(provDoneCh, ui, comm) + + select { + case err := <-provDoneCh: + return err + case <-cancelCh: + p.Provisioner.Cancel() + return <-provDoneCh + } +} + +func (p *DebuggedProvisioner) Cancel() { + var doneCh chan struct{} + + p.lock.Lock() + if p.cancelCh != nil { + close(p.cancelCh) + p.cancelCh = nil + } + if p.doneCh != nil { + doneCh = p.doneCh + } + p.lock.Unlock() + + <-doneCh +} + +func (p *DebuggedProvisioner) provision(result chan<- error, ui Ui, comm Communicator) { + result <- p.Provisioner.Provision(ui, comm) +} From a5f802ec01e75dcf49db495794a2c8ac71d4f6a7 Mon Sep 17 00:00:00 2001 From: Marc Carmier Date: Thu, 16 Mar 2017 23:03:30 +0100 Subject: [PATCH 091/138] Add unit tests for DebuggedProvisionner --- packer/provisioner_test.go | 64 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) diff --git a/packer/provisioner_test.go b/packer/provisioner_test.go index 22d34806f..4d370ef39 100644 --- a/packer/provisioner_test.go +++ b/packer/provisioner_test.go @@ -197,3 +197,67 @@ func TestPausedProvisionerCancel(t *testing.T) { t.Fatal("cancel should be called") } } + +func TestDebuggedProvisioner_impl(t *testing.T) { + var _ Provisioner = new(DebuggedProvisioner) +} + +func TestDebuggedProvisionerPrepare(t *testing.T) { + mock := new(MockProvisioner) + prov := &DebuggedProvisioner{ + Provisioner: mock, + } + + prov.Prepare(42) + if !mock.PrepCalled { + t.Fatal("prepare should be called") + } + if mock.PrepConfigs[0] != 42 { + t.Fatal("should have proper configs") + } +} + +func TestDebuggedProvisionerProvision(t *testing.T) { + mock := new(MockProvisioner) + prov := &DebuggedProvisioner{ + Provisioner: mock, + } + + ui := testUi() + comm := new(MockCommunicator) + writeReader(ui, "\n") + prov.Provision(ui, comm) + if !mock.ProvCalled { + t.Fatal("prov should be called") + } + if mock.ProvUi != ui { + t.Fatal("should have proper ui") + } + if mock.ProvCommunicator != comm { + t.Fatal("should have proper comm") + } +} + +func TestDebuggedProvisionerCancel(t *testing.T) { + mock := new(MockProvisioner) + prov := &DebuggedProvisioner{ + Provisioner: mock, + } + + provCh := make(chan struct{}) + mock.ProvFunc = func() error { + close(provCh) + time.Sleep(10 * time.Millisecond) + return nil + } + + // Start provisioning and wait for it to start + go prov.Provision(testUi(), new(MockCommunicator)) + <-provCh + + // Cancel it + prov.Cancel() + if !mock.CancelCalled { + t.Fatal("cancel should be called") + } +} From 91aa5f8bbbdbe0ac75ffa012e572d9ca19ee4416 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Thu, 7 Jun 2018 15:35:36 -0700 Subject: [PATCH 092/138] resolve conflicts; fix to work with hookedprovisioner which has been added since PR was made --- packer/build.go | 16 ++++++++++++---- packer/core.go | 1 - packer/provisioner.go | 4 ++-- 3 files changed, 14 insertions(+), 7 deletions(-) diff --git a/packer/build.go b/packer/build.go index 1187e49d7..bb441b522 100644 --- a/packer/build.go +++ b/packer/build.go @@ -200,10 +200,18 @@ func (b *coreBuild) Run(originalUi Ui, cache Cache) ([]Artifact, error) { if len(p.config) > 0 { pConfig = p.config[0] } - hookedProvisioners[i] = &HookedProvisioner{ - p.provisioner, - pConfig, - p.pType, + if b.debug { + hookedProvisioners[i] = &HookedProvisioner{ + &DebuggedProvisioner{Provisioner: p.provisioner}, + pConfig, + p.pType, + } + } else { + hookedProvisioners[i] = &HookedProvisioner{ + p.provisioner, + pConfig, + p.pType, + } } } diff --git a/packer/core.go b/packer/core.go index e6d82aaed..c8ac2cfb7 100644 --- a/packer/core.go +++ b/packer/core.go @@ -152,7 +152,6 @@ func (c *Core) Build(n string) (Build, error) { Provisioner: provisioner, } } - if config.PackerDebug provisioners = append(provisioners, coreBuildProvisioner{ pType: rawP.Type, diff --git a/packer/provisioner.go b/packer/provisioner.go index c9af06f54..a565c30e2 100644 --- a/packer/provisioner.go +++ b/packer/provisioner.go @@ -170,8 +170,8 @@ func (p *PausedProvisioner) provision(result chan<- error, ui Ui, comm Communica result <- p.Provisioner.Provision(ui, comm) } -// DebuggedProvisioner is a Provisioner implementation that wait key press before -// the provisioner is actually run. +// DebuggedProvisioner is a Provisioner implementation that waits until a key +// press before the provisioner is actually run. type DebuggedProvisioner struct { Provisioner Provisioner From 1bc471073f1ad96faa2ee27d68977a164ab3f541 Mon Sep 17 00:00:00 2001 From: stack72 Date: Fri, 8 Jun 2018 03:04:22 +0300 Subject: [PATCH 093/138] docs/aws: Update documentation for AWS Spot Instances The docs didn't specify that `ec2:DescribeSpotInstanceRequests` was required. This causes an error as follows: ``` Error waiting for spot request (sir-yg6866gj) to become ready: UnauthorizedOperation: You are not authorized to perform this operation. ``` This is because the permission to describe instance state is not available --- website/source/docs/builders/amazon.html.md | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/website/source/docs/builders/amazon.html.md b/website/source/docs/builders/amazon.html.md index 9bff6561f..2072d5312 100644 --- a/website/source/docs/builders/amazon.html.md +++ b/website/source/docs/builders/amazon.html.md @@ -168,10 +168,15 @@ for Packer to work: "Resource" : "*" }] } -``` +``` -Note that if you'd like to create a spot instance, you must also add -`ec2:RequestSpotInstances` and `ec2:CancelSpotInstanceRequests` +Note that if you'd like to create a spot instance, you must also add: + +``` json +ec2:RequestSpotInstances, +ec2:CancelSpotInstanceRequests, +ec2:DescribeSpotInstanceRequests +``` ## Troubleshooting From 57b8d58d7e124d8d03a83f822d0e8064be1e0787 Mon Sep 17 00:00:00 2001 From: Rickard von Essen Date: Tue, 5 Jun 2018 10:40:26 +0200 Subject: [PATCH 094/138] Doc fixes --- website/source/docs/builders/openstack.html.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/website/source/docs/builders/openstack.html.md b/website/source/docs/builders/openstack.html.md index 37cc0de71..cbdea1f2a 100644 --- a/website/source/docs/builders/openstack.html.md +++ b/website/source/docs/builders/openstack.html.md @@ -33,7 +33,8 @@ builder with OpenStack Liberty (Oct 2015) or later you need to have OpenSSL installed *if you are using temporary key pairs*, i.e. don't use [`ssh_keypair_name`](openstack.html#ssh_keypair_name) nor [`ssh_password`](/docs/templates/communicator.html#ssh_password). All major -OS'es have OpenSSL installed by default except Windows. +OS'es have OpenSSL installed by default except Windows. This have been +resolved in OpenStack Ocata(Feb 2017). ## Configuration Reference @@ -177,8 +178,8 @@ builder. - `tenant_id` or `tenant_name` (string) - The tenant ID or name to boot the instance into. Some OpenStack installations require this. If not specified, - Packer will use the environment variable `OS_TENANT_NAME`, if set. Tenant - is also called Project in later versions of OpenStack. + Packer will use the environment variable `OS_TENANT_NAME` or `OS_TENANT_ID`, + if set. Tenant is also called Project in later versions of OpenStack. - `use_floating_ip` (boolean) - *Deprecated* use `floating_ip` or `floating_ip_pool` instead. From dc78b30467a085bea6a543bc4f5e2f70076ca5e0 Mon Sep 17 00:00:00 2001 From: Rickard von Essen Date: Tue, 5 Jun 2018 10:17:19 +0200 Subject: [PATCH 095/138] Updated gophercloud to 7112fcd50da --- .../github.com/gophercloud/gophercloud/FAQ.md | 148 ---------- .../gophercloud/gophercloud/MIGRATING.md | 32 --- .../gophercloud/gophercloud/README.md | 18 +- .../gophercloud/gophercloud/STYLEGUIDE.md | 74 ----- .../gophercloud/gophercloud/auth_options.go | 112 +++++--- .../github.com/gophercloud/gophercloud/doc.go | 34 ++- .../gophercloud/endpoint_search.go | 2 +- .../gophercloud/gophercloud/errors.go | 63 +++- .../gophercloud/openstack/auth_env.go | 47 ++- .../gophercloud/openstack/client.go | 201 ++++++++++--- .../openstack/common/extensions/doc.go | 15 - .../openstack/common/extensions/errors.go | 1 - .../openstack/common/extensions/requests.go | 20 -- .../openstack/common/extensions/results.go | 53 ---- .../openstack/common/extensions/urls.go | 13 - .../compute/v2/extensions/delegate.go | 23 -- .../openstack/compute/v2/extensions/doc.go | 3 - .../compute/v2/extensions/floatingips/doc.go | 69 ++++- .../v2/extensions/floatingips/requests.go | 38 +-- .../v2/extensions/floatingips/results.go | 36 ++- .../compute/v2/extensions/keypairs/doc.go | 72 ++++- .../v2/extensions/keypairs/requests.go | 20 +- .../compute/v2/extensions/keypairs/results.go | 39 +-- .../compute/v2/extensions/startstop/doc.go | 14 + .../v2/extensions/startstop/requests.go | 4 +- .../v2/extensions/startstop/results.go | 15 + .../openstack/compute/v2/flavors/doc.go | 142 +++++++++- .../openstack/compute/v2/flavors/requests.go | 268 +++++++++++++++--- .../openstack/compute/v2/flavors/results.go | 169 +++++++++-- .../openstack/compute/v2/flavors/urls.go | 32 +++ .../openstack/compute/v2/images/doc.go | 37 ++- .../openstack/compute/v2/images/requests.go | 31 +- .../openstack/compute/v2/images/results.go | 40 ++- .../openstack/compute/v2/servers/doc.go | 119 +++++++- .../openstack/compute/v2/servers/requests.go | 234 +++++++++------ .../openstack/compute/v2/servers/results.go | 156 +++++++--- .../openstack/compute/v2/servers/util.go | 5 +- .../gophercloud/gophercloud/openstack/doc.go | 14 + .../openstack/endpoint_location.go | 34 ++- .../openstack/identity/v2/tenants/doc.go | 70 ++++- .../openstack/identity/v2/tenants/requests.go | 19 +- .../openstack/identity/v2/tenants/results.go | 17 +- .../openstack/identity/v2/tokens/doc.go | 49 +++- .../openstack/identity/v2/tokens/requests.go | 28 +- .../openstack/identity/v2/tokens/results.go | 67 +++-- .../openstack/identity/v3/tokens/doc.go | 112 +++++++- .../openstack/identity/v3/tokens/requests.go | 95 ++----- .../openstack/identity/v3/tokens/results.go | 55 ++-- .../openstack/imageservice/v2/images/doc.go | 60 ++++ .../imageservice/v2/images/requests.go | 141 +++++++-- .../imageservice/v2/images/results.go | 59 ++-- .../openstack/imageservice/v2/images/types.go | 45 ++- .../openstack/imageservice/v2/members/doc.go | 58 ++++ .../imageservice/v2/members/requests.go | 44 +-- .../imageservice/v2/members/results.go | 21 +- .../openstack/utils/base_endpoint.go | 29 ++ .../openstack/utils/choose_version.go | 31 +- .../gophercloud/pagination/pager.go | 1 - .../gophercloud/gophercloud/params.go | 56 +++- .../gophercloud/provider_client.go | 108 ++++++- .../gophercloud/gophercloud/results.go | 73 ++++- .../gophercloud/gophercloud/service_client.go | 28 ++ vendor/vendor.json | 106 +++---- 63 files changed, 2657 insertions(+), 1162 deletions(-) delete mode 100644 vendor/github.com/gophercloud/gophercloud/FAQ.md delete mode 100644 vendor/github.com/gophercloud/gophercloud/MIGRATING.md delete mode 100644 vendor/github.com/gophercloud/gophercloud/STYLEGUIDE.md delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/common/extensions/doc.go delete mode 100755 vendor/github.com/gophercloud/gophercloud/openstack/common/extensions/errors.go delete mode 100755 vendor/github.com/gophercloud/gophercloud/openstack/common/extensions/requests.go delete mode 100755 vendor/github.com/gophercloud/gophercloud/openstack/common/extensions/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/common/extensions/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/delegate.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/doc.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/results.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/doc.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/doc.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/members/doc.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/utils/base_endpoint.go diff --git a/vendor/github.com/gophercloud/gophercloud/FAQ.md b/vendor/github.com/gophercloud/gophercloud/FAQ.md deleted file mode 100644 index 88a366a28..000000000 --- a/vendor/github.com/gophercloud/gophercloud/FAQ.md +++ /dev/null @@ -1,148 +0,0 @@ -# Tips - -## Implementing default logging and re-authentication attempts - -You can implement custom logging and/or limit re-auth attempts by creating a custom HTTP client -like the following and setting it as the provider client's HTTP Client (via the -`gophercloud.ProviderClient.HTTPClient` field): - -```go -//... - -// LogRoundTripper satisfies the http.RoundTripper interface and is used to -// customize the default Gophercloud RoundTripper to allow for logging. -type LogRoundTripper struct { - rt http.RoundTripper - numReauthAttempts int -} - -// newHTTPClient return a custom HTTP client that allows for logging relevant -// information before and after the HTTP request. -func newHTTPClient() http.Client { - return http.Client{ - Transport: &LogRoundTripper{ - rt: http.DefaultTransport, - }, - } -} - -// RoundTrip performs a round-trip HTTP request and logs relevant information about it. -func (lrt *LogRoundTripper) RoundTrip(request *http.Request) (*http.Response, error) { - glog.Infof("Request URL: %s\n", request.URL) - - response, err := lrt.rt.RoundTrip(request) - if response == nil { - return nil, err - } - - if response.StatusCode == http.StatusUnauthorized { - if lrt.numReauthAttempts == 3 { - return response, fmt.Errorf("Tried to re-authenticate 3 times with no success.") - } - lrt.numReauthAttempts++ - } - - glog.Debugf("Response Status: %s\n", response.Status) - - return response, nil -} - -endpoint := "https://127.0.0.1/auth" -pc := openstack.NewClient(endpoint) -pc.HTTPClient = newHTTPClient() - -//... -``` - - -## Implementing custom objects - -OpenStack request/response objects may differ among variable names or types. - -### Custom request objects - -To pass custom options to a request, implement the desired `OptsBuilder` interface. For -example, to pass in - -```go -type MyCreateServerOpts struct { - Name string - Size int -} -``` - -to `servers.Create`, simply implement the `servers.CreateOptsBuilder` interface: - -```go -func (o MyCreateServeropts) ToServerCreateMap() (map[string]interface{}, error) { - return map[string]interface{}{ - "name": o.Name, - "size": o.Size, - }, nil -} -``` - -create an instance of your custom options object, and pass it to `servers.Create`: - -```go -// ... -myOpts := MyCreateServerOpts{ - Name: "s1", - Size: "100", -} -server, err := servers.Create(computeClient, myOpts).Extract() -// ... -``` - -### Custom response objects - -Some OpenStack services have extensions. Extensions that are supported in Gophercloud can be -combined to create a custom object: - -```go -// ... -type MyVolume struct { - volumes.Volume - tenantattr.VolumeExt -} - -var v struct { - MyVolume `json:"volume"` -} - -err := volumes.Get(client, volID).ExtractInto(&v) -// ... -``` - -## Overriding default `UnmarshalJSON` method - -For some response objects, a field may be a custom type or may be allowed to take on -different types. In these cases, overriding the default `UnmarshalJSON` method may be -necessary. To do this, declare the JSON `struct` field tag as "-" and create an `UnmarshalJSON` -method on the type: - -```go -// ... -type MyVolume struct { - ID string `json: "id"` - TimeCreated time.Time `json: "-"` -} - -func (r *MyVolume) UnmarshalJSON(b []byte) error { - type tmp MyVolume - var s struct { - tmp - TimeCreated gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - *r = Volume(s.tmp) - - r.TimeCreated = time.Time(s.CreatedAt) - - return err -} -// ... -``` diff --git a/vendor/github.com/gophercloud/gophercloud/MIGRATING.md b/vendor/github.com/gophercloud/gophercloud/MIGRATING.md deleted file mode 100644 index aa383c9cc..000000000 --- a/vendor/github.com/gophercloud/gophercloud/MIGRATING.md +++ /dev/null @@ -1,32 +0,0 @@ -# Compute - -## Floating IPs - -* `github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingip` is now `github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips` -* `floatingips.Associate` and `floatingips.Disassociate` have been removed. -* `floatingips.DisassociateOpts` is now required to disassociate a Floating IP. - -## Security Groups - -* `secgroups.AddServerToGroup` is now `secgroups.AddServer`. -* `secgroups.RemoveServerFromGroup` is now `secgroups.RemoveServer`. - -## Servers - -* `servers.Reboot` now requires a `servers.RebootOpts` struct: - - ```golang - rebootOpts := &servers.RebootOpts{ - Type: servers.SoftReboot, - } - res := servers.Reboot(client, server.ID, rebootOpts) - ``` - -# Identity - -## V3 - -### Tokens - -* `Token.ExpiresAt` is now of type `gophercloud.JSONRFC3339Milli` instead of - `time.Time` diff --git a/vendor/github.com/gophercloud/gophercloud/README.md b/vendor/github.com/gophercloud/gophercloud/README.md index 60ca479de..8c5bfce79 100644 --- a/vendor/github.com/gophercloud/gophercloud/README.md +++ b/vendor/github.com/gophercloud/gophercloud/README.md @@ -127,7 +127,7 @@ new resource in the `server` variable (a ## Advanced Usage -Have a look at the [FAQ](./FAQ.md) for some tips on customizing the way Gophercloud works. +Have a look at the [FAQ](./docs/FAQ.md) for some tips on customizing the way Gophercloud works. ## Backwards-Compatibility Guarantees @@ -141,3 +141,19 @@ See the [contributing guide](./.github/CONTRIBUTING.md). If you're struggling with something or have spotted a potential bug, feel free to submit an issue to our [bug tracker](/issues). + +## Thank You + +We'd like to extend special thanks and appreciation to the following: + +### OpenLab + + + +OpenLab is providing a full CI environment to test each PR and merge for a variety of OpenStack releases. + +### VEXXHOST + + + +VEXXHOST is providing their services to assist with the development and testing of Gophercloud. diff --git a/vendor/github.com/gophercloud/gophercloud/STYLEGUIDE.md b/vendor/github.com/gophercloud/gophercloud/STYLEGUIDE.md deleted file mode 100644 index e7531a83d..000000000 --- a/vendor/github.com/gophercloud/gophercloud/STYLEGUIDE.md +++ /dev/null @@ -1,74 +0,0 @@ - -## On Pull Requests - -- Before you start a PR there needs to be a Github issue and a discussion about it - on that issue with a core contributor, even if it's just a 'SGTM'. - -- A PR's description must reference the issue it closes with a `For ` (e.g. For #293). - -- A PR's description must contain link(s) to the line(s) in the OpenStack - source code (on Github) that prove(s) the PR code to be valid. Links to documentation - are not good enough. The link(s) should be to a non-`master` branch. For example, - a pull request implementing the creation of a Neutron v2 subnet might put the - following link in the description: - - https://github.com/openstack/neutron/blob/stable/mitaka/neutron/api/v2/attributes.py#L749 - - From that link, a reviewer (or user) can verify the fields in the request/response - objects in the PR. - -- A PR that is in-progress should have `[wip]` in front of the PR's title. When - ready for review, remove the `[wip]` and ping a core contributor with an `@`. - -- Forcing PRs to be small can have the effect of users submitting PRs in a hierarchical chain, with - one depending on the next. If a PR depends on another one, it should have a [Pending #PRNUM] - prefix in the PR title. In addition, it will be the PR submitter's responsibility to remove the - [Pending #PRNUM] tag once the PR has been updated with the merged, dependent PR. That will - let reviewers know it is ready to review. - -- A PR should be small. Even if you intend on implementing an entire - service, a PR should only be one route of that service - (e.g. create server or get server, but not both). - -- Unless explicitly asked, do not squash commits in the middle of a review; only - append. It makes it difficult for the reviewer to see what's changed from one - review to the next. - -## On Code - -- In re design: follow as closely as is reasonable the code already in the library. - Most operations (e.g. create, delete) admit the same design. - -- Unit tests and acceptance (integration) tests must be written to cover each PR. - Tests for operations with several options (e.g. list, create) should include all - the options in the tests. This will allow users to verify an operation on their - own infrastructure and see an example of usage. - -- If in doubt, ask in-line on the PR. - -### File Structure - -- The following should be used in most cases: - - - `requests.go`: contains all the functions that make HTTP requests and the - types associated with the HTTP request (parameters for URL, body, etc) - - `results.go`: contains all the response objects and their methods - - `urls.go`: contains the endpoints to which the requests are made - -### Naming - -- For methods on a type in `results.go`, the receiver should be named `r` and the - variable into which it will be unmarshalled `s`. - -- Functions in `requests.go`, with the exception of functions that return a - `pagination.Pager`, should be named returns of the name `r`. - -- Functions in `requests.go` that accept request bodies should accept as their - last parameter an `interface` named `OptsBuilder` (eg `CreateOptsBuilder`). - This `interface` should have at the least a method named `ToMap` - (eg `ToPortCreateMap`). - -- Functions in `requests.go` that accept query strings should accept as their - last parameter an `interface` named `OptsBuilder` (eg `ListOptsBuilder`). - This `interface` should have at the least a method named `ToQuery` - (eg `ToServerListQuery`). diff --git a/vendor/github.com/gophercloud/gophercloud/auth_options.go b/vendor/github.com/gophercloud/gophercloud/auth_options.go index 19c08341a..5e693585c 100644 --- a/vendor/github.com/gophercloud/gophercloud/auth_options.go +++ b/vendor/github.com/gophercloud/gophercloud/auth_options.go @@ -9,12 +9,32 @@ ProviderClient representing an active session on that provider. Its fields are the union of those recognized by each identity implementation and provider. + +An example of manually providing authentication information: + + opts := gophercloud.AuthOptions{ + IdentityEndpoint: "https://openstack.example.com:5000/v2.0", + Username: "{username}", + Password: "{password}", + TenantID: "{tenant_id}", + } + + provider, err := openstack.AuthenticatedClient(opts) + +An example of using AuthOptionsFromEnv(), where the environment variables can +be read from a file, such as a standard openrc file: + + opts, err := openstack.AuthOptionsFromEnv() + provider, err := openstack.AuthenticatedClient(opts) */ type AuthOptions struct { // IdentityEndpoint specifies the HTTP endpoint that is required to work with // the Identity API of the appropriate version. While it's ultimately needed by // all of the identity services, it will often be populated by a provider-level // function. + // + // The IdentityEndpoint is typically referred to as the "auth_url" or + // "OS_AUTH_URL" in the information provided by the cloud operator. IdentityEndpoint string `json:"-"` // Username is required if using Identity V2 API. Consult with your provider's @@ -39,7 +59,7 @@ type AuthOptions struct { // If DomainID or DomainName are provided, they will also apply to TenantName. // It is not currently possible to authenticate with Username and a Domain // and scope to a Project in a different Domain by using TenantName. To - // accomplish that, the ProjectID will need to be provided to the TenantID + // accomplish that, the ProjectID will need to be provided as the TenantID // option. TenantID string `json:"tenantId,omitempty"` TenantName string `json:"tenantName,omitempty"` @@ -50,15 +70,28 @@ type AuthOptions struct { // false, it will not cache these settings, but re-authentication will not be // possible. This setting defaults to false. // - // NOTE: The reauth function will try to re-authenticate endlessly if left unchecked. - // The way to limit the number of attempts is to provide a custom HTTP client to the provider client - // and provide a transport that implements the RoundTripper interface and stores the number of failed retries. - // For an example of this, see here: https://github.com/rackspace/rack/blob/1.0.0/auth/clients.go#L311 + // NOTE: The reauth function will try to re-authenticate endlessly if left + // unchecked. The way to limit the number of attempts is to provide a custom + // HTTP client to the provider client and provide a transport that implements + // the RoundTripper interface and stores the number of failed retries. For an + // example of this, see here: + // https://github.com/rackspace/rack/blob/1.0.0/auth/clients.go#L311 AllowReauth bool `json:"-"` // TokenID allows users to authenticate (possibly as another user) with an // authentication token ID. TokenID string `json:"-"` + + // Scope determines the scoping of the authentication request. + Scope *AuthScope `json:"-"` +} + +// AuthScope allows a created token to be limited to a specific domain or project. +type AuthScope struct { + ProjectID string + ProjectName string + DomainID string + DomainName string } // ToTokenV2CreateMap allows AuthOptions to satisfy the AuthOptionsBuilder @@ -241,82 +274,85 @@ func (opts *AuthOptions) ToTokenV3CreateMap(scope map[string]interface{}) (map[s } func (opts *AuthOptions) ToTokenV3ScopeMap() (map[string]interface{}, error) { - - var scope struct { - ProjectID string - ProjectName string - DomainID string - DomainName string - } - - if opts.TenantID != "" { - scope.ProjectID = opts.TenantID - } else { - if opts.TenantName != "" { - scope.ProjectName = opts.TenantName - scope.DomainID = opts.DomainID - scope.DomainName = opts.DomainName + // For backwards compatibility. + // If AuthOptions.Scope was not set, try to determine it. + // This works well for common scenarios. + if opts.Scope == nil { + opts.Scope = new(AuthScope) + if opts.TenantID != "" { + opts.Scope.ProjectID = opts.TenantID + } else { + if opts.TenantName != "" { + opts.Scope.ProjectName = opts.TenantName + opts.Scope.DomainID = opts.DomainID + opts.Scope.DomainName = opts.DomainName + } } } - if scope.ProjectName != "" { + if opts.Scope.ProjectName != "" { // ProjectName provided: either DomainID or DomainName must also be supplied. // ProjectID may not be supplied. - if scope.DomainID == "" && scope.DomainName == "" { + if opts.Scope.DomainID == "" && opts.Scope.DomainName == "" { return nil, ErrScopeDomainIDOrDomainName{} } - if scope.ProjectID != "" { + if opts.Scope.ProjectID != "" { return nil, ErrScopeProjectIDOrProjectName{} } - if scope.DomainID != "" { + if opts.Scope.DomainID != "" { // ProjectName + DomainID return map[string]interface{}{ "project": map[string]interface{}{ - "name": &scope.ProjectName, - "domain": map[string]interface{}{"id": &scope.DomainID}, + "name": &opts.Scope.ProjectName, + "domain": map[string]interface{}{"id": &opts.Scope.DomainID}, }, }, nil } - if scope.DomainName != "" { + if opts.Scope.DomainName != "" { // ProjectName + DomainName return map[string]interface{}{ "project": map[string]interface{}{ - "name": &scope.ProjectName, - "domain": map[string]interface{}{"name": &scope.DomainName}, + "name": &opts.Scope.ProjectName, + "domain": map[string]interface{}{"name": &opts.Scope.DomainName}, }, }, nil } - } else if scope.ProjectID != "" { + } else if opts.Scope.ProjectID != "" { // ProjectID provided. ProjectName, DomainID, and DomainName may not be provided. - if scope.DomainID != "" { + if opts.Scope.DomainID != "" { return nil, ErrScopeProjectIDAlone{} } - if scope.DomainName != "" { + if opts.Scope.DomainName != "" { return nil, ErrScopeProjectIDAlone{} } // ProjectID return map[string]interface{}{ "project": map[string]interface{}{ - "id": &scope.ProjectID, + "id": &opts.Scope.ProjectID, }, }, nil - } else if scope.DomainID != "" { + } else if opts.Scope.DomainID != "" { // DomainID provided. ProjectID, ProjectName, and DomainName may not be provided. - if scope.DomainName != "" { + if opts.Scope.DomainName != "" { return nil, ErrScopeDomainIDOrDomainName{} } // DomainID return map[string]interface{}{ "domain": map[string]interface{}{ - "id": &scope.DomainID, + "id": &opts.Scope.DomainID, + }, + }, nil + } else if opts.Scope.DomainName != "" { + // DomainName + return map[string]interface{}{ + "domain": map[string]interface{}{ + "name": &opts.Scope.DomainName, }, }, nil - } else if scope.DomainName != "" { - return nil, ErrScopeDomainName{} } return nil, nil diff --git a/vendor/github.com/gophercloud/gophercloud/doc.go b/vendor/github.com/gophercloud/gophercloud/doc.go index b559516f9..30067aa35 100644 --- a/vendor/github.com/gophercloud/gophercloud/doc.go +++ b/vendor/github.com/gophercloud/gophercloud/doc.go @@ -3,11 +3,17 @@ Package gophercloud provides a multi-vendor interface to OpenStack-compatible clouds. The library has a three-level hierarchy: providers, services, and resources. -Provider structs represent the service providers that offer and manage a -collection of services. The IdentityEndpoint is typically refered to as -"auth_url" in information provided by the cloud operator. Additionally, -the cloud may refer to TenantID or TenantName as project_id and project_name. -These are defined like so: +Authenticating with Providers + +Provider structs represent the cloud providers that offer and manage a +collection of services. You will generally want to create one Provider +client per OpenStack cloud. + +Use your OpenStack credentials to create a Provider client. The +IdentityEndpoint is typically refered to as "auth_url" or "OS_AUTH_URL" in +information provided by the cloud operator. Additionally, the cloud may refer to +TenantID or TenantName as project_id and project_name. Credentials are +specified like so: opts := gophercloud.AuthOptions{ IdentityEndpoint: "https://openstack.example.com:5000/v2.0", @@ -18,6 +24,16 @@ These are defined like so: provider, err := openstack.AuthenticatedClient(opts) +You may also use the openstack.AuthOptionsFromEnv() helper function. This +function reads in standard environment variables frequently found in an +OpenStack `openrc` file. Again note that Gophercloud currently uses "tenant" +instead of "project". + + opts, err := openstack.AuthOptionsFromEnv() + provider, err := openstack.AuthenticatedClient(opts) + +Service Clients + Service structs are specific to a provider and handle all of the logic and operations for a particular OpenStack service. Examples of services include: Compute, Object Storage, Block Storage. In order to define one, you need to @@ -27,6 +43,8 @@ pass in the parent provider, like so: client := openstack.NewComputeV2(provider, opts) +Resources + Resource structs are the domain models that services make use of in order to work with and represent the state of API resources: @@ -62,6 +80,12 @@ of results: return true, nil }) +If you want to obtain the entire collection of pages without doing any +intermediary processing on each page, you can use the AllPages method: + + allPages, err := servers.List(client, nil).AllPages() + allServers, err := servers.ExtractServers(allPages) + This top-level package contains utility functions and data types that are used throughout the provider and service packages. Of particular note for end users are the AuthOptions and EndpointOpts structs. diff --git a/vendor/github.com/gophercloud/gophercloud/endpoint_search.go b/vendor/github.com/gophercloud/gophercloud/endpoint_search.go index 9887947f6..2fbc3c97f 100644 --- a/vendor/github.com/gophercloud/gophercloud/endpoint_search.go +++ b/vendor/github.com/gophercloud/gophercloud/endpoint_search.go @@ -27,7 +27,7 @@ const ( // unambiguously identify one, and only one, endpoint within the catalog. // // Usually, these are passed to service client factory functions in a provider -// package, like "rackspace.NewComputeV2()". +// package, like "openstack.NewComputeV2()". type EndpointOpts struct { // Type [required] is the service type for the client (e.g., "compute", // "object-store"). Generally, this will be supplied by the service client diff --git a/vendor/github.com/gophercloud/gophercloud/errors.go b/vendor/github.com/gophercloud/gophercloud/errors.go index e0fe7c1e0..a5fa68d6d 100644 --- a/vendor/github.com/gophercloud/gophercloud/errors.go +++ b/vendor/github.com/gophercloud/gophercloud/errors.go @@ -1,6 +1,9 @@ package gophercloud -import "fmt" +import ( + "fmt" + "strings" +) // BaseError is an error type that all other error types embed. type BaseError struct { @@ -43,6 +46,33 @@ func (e ErrInvalidInput) Error() string { return e.choseErrString() } +// ErrMissingEnvironmentVariable is the error when environment variable is required +// in a particular situation but not provided by the user +type ErrMissingEnvironmentVariable struct { + BaseError + EnvironmentVariable string +} + +func (e ErrMissingEnvironmentVariable) Error() string { + e.DefaultErrString = fmt.Sprintf("Missing environment variable [%s]", e.EnvironmentVariable) + return e.choseErrString() +} + +// ErrMissingAnyoneOfEnvironmentVariables is the error when anyone of the environment variables +// is required in a particular situation but not provided by the user +type ErrMissingAnyoneOfEnvironmentVariables struct { + BaseError + EnvironmentVariables []string +} + +func (e ErrMissingAnyoneOfEnvironmentVariables) Error() string { + e.DefaultErrString = fmt.Sprintf( + "Missing one of the following environment variables [%s]", + strings.Join(e.EnvironmentVariables, ", "), + ) + return e.choseErrString() +} + // ErrUnexpectedResponseCode is returned by the Request method when a response code other than // those listed in OkCodes is encountered. type ErrUnexpectedResponseCode struct { @@ -72,6 +102,11 @@ type ErrDefault401 struct { ErrUnexpectedResponseCode } +// ErrDefault403 is the default error type returned on a 403 HTTP response code. +type ErrDefault403 struct { + ErrUnexpectedResponseCode +} + // ErrDefault404 is the default error type returned on a 404 HTTP response code. type ErrDefault404 struct { ErrUnexpectedResponseCode @@ -103,11 +138,22 @@ type ErrDefault503 struct { } func (e ErrDefault400) Error() string { - return "Invalid request due to incorrect syntax or missing required parameters." + e.DefaultErrString = fmt.Sprintf( + "Bad request with: [%s %s], error message: %s", + e.Method, e.URL, e.Body, + ) + return e.choseErrString() } func (e ErrDefault401) Error() string { return "Authentication failed" } +func (e ErrDefault403) Error() string { + e.DefaultErrString = fmt.Sprintf( + "Request forbidden: [%s %s], error message: %s", + e.Method, e.URL, e.Body, + ) + return e.choseErrString() +} func (e ErrDefault404) Error() string { return "Resource not found" } @@ -141,6 +187,12 @@ type Err401er interface { Error401(ErrUnexpectedResponseCode) error } +// Err403er is the interface resource error types implement to override the error message +// from a 403 error. +type Err403er interface { + Error403(ErrUnexpectedResponseCode) error +} + // Err404er is the interface resource error types implement to override the error message // from a 404 error. type Err404er interface { @@ -393,13 +445,6 @@ func (e ErrScopeProjectIDAlone) Error() string { return "ProjectID must be supplied alone in a Scope" } -// ErrScopeDomainName indicates that a DomainName was provided alone in a Scope. -type ErrScopeDomainName struct{ BaseError } - -func (e ErrScopeDomainName) Error() string { - return "DomainName must be supplied with a ProjectName or ProjectID in a Scope" -} - // ErrScopeEmpty indicates that no credentials were provided in a Scope. type ErrScopeEmpty struct{ BaseError } diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go b/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go index f6d2eb194..994b5550c 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go @@ -8,10 +8,27 @@ import ( var nilOptions = gophercloud.AuthOptions{} -// AuthOptionsFromEnv fills out an identity.AuthOptions structure with the settings found on the various OpenStack -// OS_* environment variables. The following variables provide sources of truth: OS_AUTH_URL, OS_USERNAME, -// OS_PASSWORD, OS_TENANT_ID, and OS_TENANT_NAME. Of these, OS_USERNAME, OS_PASSWORD, and OS_AUTH_URL must -// have settings, or an error will result. OS_TENANT_ID and OS_TENANT_NAME are optional. +/* +AuthOptionsFromEnv fills out an identity.AuthOptions structure with the +settings found on the various OpenStack OS_* environment variables. + +The following variables provide sources of truth: OS_AUTH_URL, OS_USERNAME, +OS_PASSWORD, OS_TENANT_ID, and OS_TENANT_NAME. + +Of these, OS_USERNAME, OS_PASSWORD, and OS_AUTH_URL must have settings, +or an error will result. OS_TENANT_ID, OS_TENANT_NAME, OS_PROJECT_ID, and +OS_PROJECT_NAME are optional. + +OS_TENANT_ID and OS_TENANT_NAME are mutually exclusive to OS_PROJECT_ID and +OS_PROJECT_NAME. If OS_PROJECT_ID and OS_PROJECT_NAME are set, they will +still be referred as "tenant" in Gophercloud. + +To use this function, first set the OS_* environment variables (for example, +by sourcing an `openrc` file), then: + + opts, err := openstack.AuthOptionsFromEnv() + provider, err := openstack.AuthenticatedClient(opts) +*/ func AuthOptionsFromEnv() (gophercloud.AuthOptions, error) { authURL := os.Getenv("OS_AUTH_URL") username := os.Getenv("OS_USERNAME") @@ -22,18 +39,34 @@ func AuthOptionsFromEnv() (gophercloud.AuthOptions, error) { domainID := os.Getenv("OS_DOMAIN_ID") domainName := os.Getenv("OS_DOMAIN_NAME") + // If OS_PROJECT_ID is set, overwrite tenantID with the value. + if v := os.Getenv("OS_PROJECT_ID"); v != "" { + tenantID = v + } + + // If OS_PROJECT_NAME is set, overwrite tenantName with the value. + if v := os.Getenv("OS_PROJECT_NAME"); v != "" { + tenantName = v + } + if authURL == "" { - err := gophercloud.ErrMissingInput{Argument: "authURL"} + err := gophercloud.ErrMissingEnvironmentVariable{ + EnvironmentVariable: "OS_AUTH_URL", + } return nilOptions, err } if username == "" && userID == "" { - err := gophercloud.ErrMissingInput{Argument: "username"} + err := gophercloud.ErrMissingAnyoneOfEnvironmentVariables{ + EnvironmentVariables: []string{"OS_USERNAME", "OS_USERID"}, + } return nilOptions, err } if password == "" { - err := gophercloud.ErrMissingInput{Argument: "password"} + err := gophercloud.ErrMissingEnvironmentVariable{ + EnvironmentVariable: "OS_PASSWORD", + } return nilOptions, err } diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/client.go b/vendor/github.com/gophercloud/gophercloud/openstack/client.go index 09120e8fa..e554b7bc3 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/client.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/client.go @@ -2,7 +2,6 @@ package openstack import ( "fmt" - "net/url" "reflect" "github.com/gophercloud/gophercloud" @@ -12,43 +11,66 @@ import ( ) const ( - v20 = "v2.0" - v30 = "v3.0" + // v2 represents Keystone v2. + // It should never increase beyond 2.0. + v2 = "v2.0" + + // v3 represents Keystone v3. + // The version can be anything from v3 to v3.x. + v3 = "v3" ) -// NewClient prepares an unauthenticated ProviderClient instance. -// Most users will probably prefer using the AuthenticatedClient function instead. -// This is useful if you wish to explicitly control the version of the identity service that's used for authentication explicitly, -// for example. +/* +NewClient prepares an unauthenticated ProviderClient instance. +Most users will probably prefer using the AuthenticatedClient function +instead. + +This is useful if you wish to explicitly control the version of the identity +service that's used for authentication explicitly, for example. + +A basic example of using this would be: + + ao, err := openstack.AuthOptionsFromEnv() + provider, err := openstack.NewClient(ao.IdentityEndpoint) + client, err := openstack.NewIdentityV3(provider, gophercloud.EndpointOpts{}) +*/ func NewClient(endpoint string) (*gophercloud.ProviderClient, error) { - u, err := url.Parse(endpoint) + base, err := utils.BaseEndpoint(endpoint) if err != nil { return nil, err } - hadPath := u.Path != "" - u.Path, u.RawQuery, u.Fragment = "", "", "" - base := u.String() endpoint = gophercloud.NormalizeURL(endpoint) base = gophercloud.NormalizeURL(base) - if hadPath { - return &gophercloud.ProviderClient{ - IdentityBase: base, - IdentityEndpoint: endpoint, - }, nil - } + p := new(gophercloud.ProviderClient) + p.IdentityBase = base + p.IdentityEndpoint = endpoint + p.UseTokenLock() - return &gophercloud.ProviderClient{ - IdentityBase: base, - IdentityEndpoint: "", - }, nil + return p, nil } -// AuthenticatedClient logs in to an OpenStack cloud found at the identity endpoint specified by options, acquires a token, and -// returns a Client instance that's ready to operate. -// It first queries the root identity endpoint to determine which versions of the identity service are supported, then chooses -// the most recent identity service available to proceed. +/* +AuthenticatedClient logs in to an OpenStack cloud found at the identity endpoint +specified by the options, acquires a token, and returns a Provider Client +instance that's ready to operate. + +If the full path to a versioned identity endpoint was specified (example: +http://example.com:5000/v3), that path will be used as the endpoint to query. + +If a versionless endpoint was specified (example: http://example.com:5000/), +the endpoint will be queried to determine which versions of the identity service +are available, then chooses the most recent or most supported version. + +Example: + + ao, err := openstack.AuthOptionsFromEnv() + provider, err := openstack.AuthenticatedClient(ao) + client, err := openstack.NewNetworkV2(client, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) +*/ func AuthenticatedClient(options gophercloud.AuthOptions) (*gophercloud.ProviderClient, error) { client, err := NewClient(options.IdentityEndpoint) if err != nil { @@ -62,11 +84,12 @@ func AuthenticatedClient(options gophercloud.AuthOptions) (*gophercloud.Provider return client, nil } -// Authenticate or re-authenticate against the most recent identity service supported at the provided endpoint. +// Authenticate or re-authenticate against the most recent identity service +// supported at the provided endpoint. func Authenticate(client *gophercloud.ProviderClient, options gophercloud.AuthOptions) error { versions := []*utils.Version{ - {ID: v20, Priority: 20, Suffix: "/v2.0/"}, - {ID: v30, Priority: 30, Suffix: "/v3/"}, + {ID: v2, Priority: 20, Suffix: "/v2.0/"}, + {ID: v3, Priority: 30, Suffix: "/v3/"}, } chosen, endpoint, err := utils.ChooseVersion(client, versions) @@ -75,9 +98,9 @@ func Authenticate(client *gophercloud.ProviderClient, options gophercloud.AuthOp } switch chosen.ID { - case v20: + case v2: return v2auth(client, endpoint, options, gophercloud.EndpointOpts{}) - case v30: + case v3: return v3auth(client, endpoint, &options, gophercloud.EndpointOpts{}) default: // The switch statement must be out of date from the versions list. @@ -123,9 +146,21 @@ func v2auth(client *gophercloud.ProviderClient, endpoint string, options gopherc } if options.AllowReauth { + // here we're creating a throw-away client (tac). it's a copy of the user's provider client, but + // with the token and reauth func zeroed out. combined with setting `AllowReauth` to `false`, + // this should retry authentication only once + tac := *client + tac.ReauthFunc = nil + tac.TokenID = "" + tao := options + tao.AllowReauth = false client.ReauthFunc = func() error { - client.TokenID = "" - return v2auth(client, endpoint, options, eo) + err := v2auth(&tac, endpoint, tao, eo) + if err != nil { + return err + } + client.TokenID = tac.TokenID + return nil } } client.TokenID = token.ID @@ -167,9 +202,32 @@ func v3auth(client *gophercloud.ProviderClient, endpoint string, opts tokens3.Au client.TokenID = token.ID if opts.CanReauth() { + // here we're creating a throw-away client (tac). it's a copy of the user's provider client, but + // with the token and reauth func zeroed out. combined with setting `AllowReauth` to `false`, + // this should retry authentication only once + tac := *client + tac.ReauthFunc = nil + tac.TokenID = "" + var tao tokens3.AuthOptionsBuilder + switch ot := opts.(type) { + case *gophercloud.AuthOptions: + o := *ot + o.AllowReauth = false + tao = &o + case *tokens3.AuthOptions: + o := *ot + o.AllowReauth = false + tao = &o + default: + tao = opts + } client.ReauthFunc = func() error { - client.TokenID = "" - return v3auth(client, endpoint, opts, eo) + err := v3auth(&tac, endpoint, tao, eo) + if err != nil { + return err + } + client.TokenID = tac.TokenID + return nil } } client.EndpointLocator = func(opts gophercloud.EndpointOpts) (string, error) { @@ -179,7 +237,8 @@ func v3auth(client *gophercloud.ProviderClient, endpoint string, opts tokens3.Au return nil } -// NewIdentityV2 creates a ServiceClient that may be used to interact with the v2 identity service. +// NewIdentityV2 creates a ServiceClient that may be used to interact with the +// v2 identity service. func NewIdentityV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { endpoint := client.IdentityBase + "v2.0/" clientType := "identity" @@ -199,7 +258,8 @@ func NewIdentityV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOp }, nil } -// NewIdentityV3 creates a ServiceClient that may be used to access the v3 identity service. +// NewIdentityV3 creates a ServiceClient that may be used to access the v3 +// identity service. func NewIdentityV3(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { endpoint := client.IdentityBase + "v3/" clientType := "identity" @@ -212,6 +272,19 @@ func NewIdentityV3(client *gophercloud.ProviderClient, eo gophercloud.EndpointOp } } + // Ensure endpoint still has a suffix of v3. + // This is because EndpointLocator might have found a versionless + // endpoint or the published endpoint is still /v2.0. In both + // cases, we need to fix the endpoint to point to /v3. + base, err := utils.BaseEndpoint(endpoint) + if err != nil { + return nil, err + } + + base = gophercloud.NormalizeURL(base) + + endpoint = base + "v3/" + return &gophercloud.ServiceClient{ ProviderClient: client, Endpoint: endpoint, @@ -232,33 +305,43 @@ func initClientOpts(client *gophercloud.ProviderClient, eo gophercloud.EndpointO return sc, nil } -// NewObjectStorageV1 creates a ServiceClient that may be used with the v1 object storage package. +// NewObjectStorageV1 creates a ServiceClient that may be used with the v1 +// object storage package. func NewObjectStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { return initClientOpts(client, eo, "object-store") } -// NewComputeV2 creates a ServiceClient that may be used with the v2 compute package. +// NewComputeV2 creates a ServiceClient that may be used with the v2 compute +// package. func NewComputeV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { return initClientOpts(client, eo, "compute") } -// NewNetworkV2 creates a ServiceClient that may be used with the v2 network package. +// NewNetworkV2 creates a ServiceClient that may be used with the v2 network +// package. func NewNetworkV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { sc, err := initClientOpts(client, eo, "network") sc.ResourceBase = sc.Endpoint + "v2.0/" return sc, err } -// NewBlockStorageV1 creates a ServiceClient that may be used to access the v1 block storage service. +// NewBlockStorageV1 creates a ServiceClient that may be used to access the v1 +// block storage service. func NewBlockStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { return initClientOpts(client, eo, "volume") } -// NewBlockStorageV2 creates a ServiceClient that may be used to access the v2 block storage service. +// NewBlockStorageV2 creates a ServiceClient that may be used to access the v2 +// block storage service. func NewBlockStorageV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { return initClientOpts(client, eo, "volumev2") } +// NewBlockStorageV3 creates a ServiceClient that may be used to access the v3 block storage service. +func NewBlockStorageV3(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "volumev3") +} + // NewSharedFileSystemV2 creates a ServiceClient that may be used to access the v2 shared file system service. func NewSharedFileSystemV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { return initClientOpts(client, eo, "sharev2") @@ -270,7 +353,8 @@ func NewCDNV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) ( return initClientOpts(client, eo, "cdn") } -// NewOrchestrationV1 creates a ServiceClient that may be used to access the v1 orchestration service. +// NewOrchestrationV1 creates a ServiceClient that may be used to access the v1 +// orchestration service. func NewOrchestrationV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { return initClientOpts(client, eo, "orchestration") } @@ -280,16 +364,45 @@ func NewDBV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (* return initClientOpts(client, eo, "database") } -// NewDNSV2 creates a ServiceClient that may be used to access the v2 DNS service. +// NewDNSV2 creates a ServiceClient that may be used to access the v2 DNS +// service. func NewDNSV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { sc, err := initClientOpts(client, eo, "dns") sc.ResourceBase = sc.Endpoint + "v2/" return sc, err } -// NewImageServiceV2 creates a ServiceClient that may be used to access the v2 image service. +// NewImageServiceV2 creates a ServiceClient that may be used to access the v2 +// image service. func NewImageServiceV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { sc, err := initClientOpts(client, eo, "image") sc.ResourceBase = sc.Endpoint + "v2/" return sc, err } + +// NewLoadBalancerV2 creates a ServiceClient that may be used to access the v2 +// load balancer service. +func NewLoadBalancerV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + sc, err := initClientOpts(client, eo, "load-balancer") + sc.ResourceBase = sc.Endpoint + "v2.0/" + return sc, err +} + +// NewClusteringV1 creates a ServiceClient that may be used with the v1 clustering +// package. +func NewClusteringV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "clustering") +} + +// NewMessagingV2 creates a ServiceClient that may be used with the v2 messaging +// service. +func NewMessagingV2(client *gophercloud.ProviderClient, clientID string, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + sc, err := initClientOpts(client, eo, "messaging") + sc.MoreHeaders = map[string]string{"Client-ID": clientID} + return sc, err +} + +// NewContainerV1 creates a ServiceClient that may be used with v1 container package +func NewContainerV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "container") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/common/extensions/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/common/extensions/doc.go deleted file mode 100644 index 4a168f4b2..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/common/extensions/doc.go +++ /dev/null @@ -1,15 +0,0 @@ -// Package extensions provides information and interaction with the different extensions available -// for an OpenStack service. -// -// The purpose of OpenStack API extensions is to: -// -// - Introduce new features in the API without requiring a version change. -// - Introduce vendor-specific niche functionality. -// - Act as a proving ground for experimental functionalities that might be included in a future -// version of the API. -// -// Extensions usually have tags that prevent conflicts with other extensions that define attributes -// or resources with the same names, and with core resources and attributes. -// Because an extension might not be supported by all plug-ins, its availability varies with deployments -// and the specific plug-in. -package extensions diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/common/extensions/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/common/extensions/errors.go deleted file mode 100755 index aeec0fa75..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/common/extensions/errors.go +++ /dev/null @@ -1 +0,0 @@ -package extensions diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/common/extensions/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/common/extensions/requests.go deleted file mode 100755 index 46b7d60cd..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/common/extensions/requests.go +++ /dev/null @@ -1,20 +0,0 @@ -package extensions - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// Get retrieves information for a specific extension using its alias. -func Get(c *gophercloud.ServiceClient, alias string) (r GetResult) { - _, r.Err = c.Get(ExtensionURL(c, alias), &r.Body, nil) - return -} - -// List returns a Pager which allows you to iterate over the full collection of extensions. -// It does not accept query parameters. -func List(c *gophercloud.ServiceClient) pagination.Pager { - return pagination.NewPager(c, ListExtensionURL(c), func(r pagination.PageResult) pagination.Page { - return ExtensionPage{pagination.SinglePageBase(r)} - }) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/common/extensions/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/common/extensions/results.go deleted file mode 100755 index d5f865091..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/common/extensions/results.go +++ /dev/null @@ -1,53 +0,0 @@ -package extensions - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// GetResult temporarily stores the result of a Get call. -// Use its Extract() method to interpret it as an Extension. -type GetResult struct { - gophercloud.Result -} - -// Extract interprets a GetResult as an Extension. -func (r GetResult) Extract() (*Extension, error) { - var s struct { - Extension *Extension `json:"extension"` - } - err := r.ExtractInto(&s) - return s.Extension, err -} - -// Extension is a struct that represents an OpenStack extension. -type Extension struct { - Updated string `json:"updated"` - Name string `json:"name"` - Links []interface{} `json:"links"` - Namespace string `json:"namespace"` - Alias string `json:"alias"` - Description string `json:"description"` -} - -// ExtensionPage is the page returned by a pager when traversing over a collection of extensions. -type ExtensionPage struct { - pagination.SinglePageBase -} - -// IsEmpty checks whether an ExtensionPage struct is empty. -func (r ExtensionPage) IsEmpty() (bool, error) { - is, err := ExtractExtensions(r) - return len(is) == 0, err -} - -// ExtractExtensions accepts a Page struct, specifically an ExtensionPage struct, and extracts the -// elements into a slice of Extension structs. -// In other words, a generic collection is mapped into a relevant slice. -func ExtractExtensions(r pagination.Page) ([]Extension, error) { - var s struct { - Extensions []Extension `json:"extensions"` - } - err := (r.(ExtensionPage)).ExtractInto(&s) - return s.Extensions, err -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/common/extensions/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/common/extensions/urls.go deleted file mode 100644 index eaf38b2d1..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/common/extensions/urls.go +++ /dev/null @@ -1,13 +0,0 @@ -package extensions - -import "github.com/gophercloud/gophercloud" - -// ExtensionURL generates the URL for an extension resource by name. -func ExtensionURL(c *gophercloud.ServiceClient, name string) string { - return c.ServiceURL("extensions", name) -} - -// ListExtensionURL generates the URL for the extensions resource collection. -func ListExtensionURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL("extensions") -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/delegate.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/delegate.go deleted file mode 100644 index 00e7c3bec..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/delegate.go +++ /dev/null @@ -1,23 +0,0 @@ -package extensions - -import ( - "github.com/gophercloud/gophercloud" - common "github.com/gophercloud/gophercloud/openstack/common/extensions" - "github.com/gophercloud/gophercloud/pagination" -) - -// ExtractExtensions interprets a Page as a slice of Extensions. -func ExtractExtensions(page pagination.Page) ([]common.Extension, error) { - return common.ExtractExtensions(page) -} - -// Get retrieves information for a specific extension using its alias. -func Get(c *gophercloud.ServiceClient, alias string) common.GetResult { - return common.Get(c, alias) -} - -// List returns a Pager which allows you to iterate over the full collection of extensions. -// It does not accept query parameters. -func List(c *gophercloud.ServiceClient) pagination.Pager { - return common.List(c) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/doc.go deleted file mode 100644 index 2b447da1d..000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package extensions provides information and interaction with the -// different extensions available for the OpenStack Compute service. -package extensions diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/doc.go index 6682fa629..f5dbdbf8b 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/doc.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/doc.go @@ -1,3 +1,68 @@ -// Package floatingips provides the ability to manage floating ips through -// nova-network +/* +Package floatingips provides the ability to manage floating ips through the +Nova API. + +This API has been deprecated and will be removed from a future release of the +Nova API service. + +For environements that support this extension, this package can be used +regardless of if either Neutron or nova-network is used as the cloud's network +service. + +Example to List Floating IPs + + allPages, err := floatingips.List(computeClient).AllPages() + if err != nil { + panic(err) + } + + allFloatingIPs, err := floatingips.ExtractFloatingIPs(allPages) + if err != nil { + panic(err) + } + + for _, fip := range allFloatingIPs { + fmt.Printf("%+v\n", fip) + } + +Example to Create a Floating IP + + createOpts := floatingips.CreateOpts{ + Pool: "nova", + } + + fip, err := floatingips.Create(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Floating IP + + err := floatingips.Delete(computeClient, "floatingip-id").ExtractErr() + if err != nil { + panic(err) + } + +Example to Associate a Floating IP With a Server + + associateOpts := floatingips.AssociateOpts{ + FloatingIP: "10.10.10.2", + } + + err := floatingips.AssociateInstance(computeClient, "server-id", associateOpts).ExtractErr() + if err != nil { + panic(err) + } + +Example to Disassociate a Floating IP From a Server + + disassociateOpts := floatingips.DisassociateOpts{ + FloatingIP: "10.10.10.2", + } + + err := floatingips.DisassociateInstance(computeClient, "server-id", disassociateOpts).ExtractErr() + if err != nil { + panic(err) + } +*/ package floatingips diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/requests.go index b36aeba59..a922639de 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/requests.go @@ -12,15 +12,15 @@ func List(client *gophercloud.ServiceClient) pagination.Pager { }) } -// CreateOptsBuilder describes struct types that can be accepted by the Create call. Notable, the -// CreateOpts struct in this package does. +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. type CreateOptsBuilder interface { ToFloatingIPCreateMap() (map[string]interface{}, error) } -// CreateOpts specifies a Floating IP allocation request +// CreateOpts specifies a Floating IP allocation request. type CreateOpts struct { - // Pool is the pool of floating IPs to allocate one from + // Pool is the pool of Floating IPs to allocate one from. Pool string `json:"pool" required:"true"` } @@ -29,7 +29,7 @@ func (opts CreateOpts) ToFloatingIPCreateMap() (map[string]interface{}, error) { return gophercloud.BuildRequestBody(opts, "") } -// Create requests the creation of a new floating IP +// Create requests the creation of a new Floating IP. func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { b, err := opts.ToFloatingIPCreateMap() if err != nil { @@ -42,29 +42,30 @@ func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r Create return } -// Get returns data about a previously created FloatingIP. +// Get returns data about a previously created Floating IP. func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { _, r.Err = client.Get(getURL(client, id), &r.Body, nil) return } -// Delete requests the deletion of a previous allocated FloatingIP. +// Delete requests the deletion of a previous allocated Floating IP. func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { _, r.Err = client.Delete(deleteURL(client, id), nil) return } -// AssociateOptsBuilder is the interface types must satfisfy to be used as -// Associate options +// AssociateOptsBuilder allows extensions to add additional parameters to the +// Associate request. type AssociateOptsBuilder interface { ToFloatingIPAssociateMap() (map[string]interface{}, error) } -// AssociateOpts specifies the required information to associate a floating IP with an instance +// AssociateOpts specifies the required information to associate a Floating IP with an instance type AssociateOpts struct { - // FloatingIP is the floating IP to associate with an instance + // FloatingIP is the Floating IP to associate with an instance. FloatingIP string `json:"address" required:"true"` - // FixedIP is an optional fixed IP address of the server + + // FixedIP is an optional fixed IP address of the server. FixedIP string `json:"fixed_address,omitempty"` } @@ -73,7 +74,7 @@ func (opts AssociateOpts) ToFloatingIPAssociateMap() (map[string]interface{}, er return gophercloud.BuildRequestBody(opts, "addFloatingIp") } -// AssociateInstance pairs an allocated floating IP with an instance. +// AssociateInstance pairs an allocated Floating IP with a server. func AssociateInstance(client *gophercloud.ServiceClient, serverID string, opts AssociateOptsBuilder) (r AssociateResult) { b, err := opts.ToFloatingIPAssociateMap() if err != nil { @@ -84,23 +85,24 @@ func AssociateInstance(client *gophercloud.ServiceClient, serverID string, opts return } -// DisassociateOptsBuilder is the interface types must satfisfy to be used as -// Disassociate options +// DisassociateOptsBuilder allows extensions to add additional parameters to +// the Disassociate request. type DisassociateOptsBuilder interface { ToFloatingIPDisassociateMap() (map[string]interface{}, error) } -// DisassociateOpts specifies the required information to disassociate a floating IP with an instance +// DisassociateOpts specifies the required information to disassociate a +// Floating IP with a server. type DisassociateOpts struct { FloatingIP string `json:"address" required:"true"` } -// ToFloatingIPDisassociateMap constructs a request body from AssociateOpts. +// ToFloatingIPDisassociateMap constructs a request body from DisassociateOpts. func (opts DisassociateOpts) ToFloatingIPDisassociateMap() (map[string]interface{}, error) { return gophercloud.BuildRequestBody(opts, "removeFloatingIp") } -// DisassociateInstance decouples an allocated floating IP from an instance +// DisassociateInstance decouples an allocated Floating IP from an instance func DisassociateInstance(client *gophercloud.ServiceClient, serverID string, opts DisassociateOptsBuilder) (r DisassociateResult) { b, err := opts.ToFloatingIPDisassociateMap() if err != nil { diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/results.go index 2f5b33844..da4e9da0e 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/results.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/results.go @@ -8,21 +8,21 @@ import ( "github.com/gophercloud/gophercloud/pagination" ) -// A FloatingIP is an IP that can be associated with an instance +// A FloatingIP is an IP that can be associated with a server. type FloatingIP struct { // ID is a unique ID of the Floating IP ID string `json:"-"` - // FixedIP is the IP of the instance related to the Floating IP + // FixedIP is a specific IP on the server to pair the Floating IP with. FixedIP string `json:"fixed_ip,omitempty"` - // InstanceID is the ID of the instance that is using the Floating IP + // InstanceID is the ID of the server that is using the Floating IP. InstanceID string `json:"instance_id"` - // IP is the actual Floating IP + // IP is the actual Floating IP. IP string `json:"ip"` - // Pool is the pool of floating IPs that this floating IP belongs to + // Pool is the pool of Floating IPs that this Floating IP belongs to. Pool string `json:"pool"` } @@ -49,8 +49,7 @@ func (r *FloatingIP) UnmarshalJSON(b []byte) error { return err } -// FloatingIPPage stores a single, only page of FloatingIPs -// results from a List call. +// FloatingIPPage stores a single page of FloatingIPs from a List call. type FloatingIPPage struct { pagination.SinglePageBase } @@ -61,8 +60,7 @@ func (page FloatingIPPage) IsEmpty() (bool, error) { return len(va) == 0, err } -// ExtractFloatingIPs interprets a page of results as a slice of -// FloatingIPs. +// ExtractFloatingIPs interprets a page of results as a slice of FloatingIPs. func ExtractFloatingIPs(r pagination.Page) ([]FloatingIP, error) { var s struct { FloatingIPs []FloatingIP `json:"floating_ips"` @@ -86,32 +84,32 @@ func (r FloatingIPResult) Extract() (*FloatingIP, error) { return s.FloatingIP, err } -// CreateResult is the response from a Create operation. Call its Extract method to interpret it -// as a FloatingIP. +// CreateResult is the response from a Create operation. Call its Extract method +// to interpret it as a FloatingIP. type CreateResult struct { FloatingIPResult } -// GetResult is the response from a Get operation. Call its Extract method to interpret it -// as a FloatingIP. +// GetResult is the response from a Get operation. Call its Extract method to +// interpret it as a FloatingIP. type GetResult struct { FloatingIPResult } -// DeleteResult is the response from a Delete operation. Call its Extract method to determine if -// the call succeeded or failed. +// DeleteResult is the response from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. type DeleteResult struct { gophercloud.ErrResult } -// AssociateResult is the response from a Delete operation. Call its Extract method to determine if -// the call succeeded or failed. +// AssociateResult is the response from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. type AssociateResult struct { gophercloud.ErrResult } -// DisassociateResult is the response from a Delete operation. Call its Extract method to determine if -// the call succeeded or failed. +// DisassociateResult is the response from a Delete operation. Call its +// ExtractErr method to determine if the call succeeded or failed. type DisassociateResult struct { gophercloud.ErrResult } diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/doc.go index 856f41bac..24c460772 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/doc.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/doc.go @@ -1,3 +1,71 @@ -// Package keypairs provides information and interaction with the Keypairs -// extension for the OpenStack Compute service. +/* +Package keypairs provides the ability to manage key pairs as well as create +servers with a specified key pair. + +Example to List Key Pairs + + allPages, err := keypairs.List(computeClient).AllPages() + if err != nil { + panic(err) + } + + allKeyPairs, err := keypairs.ExtractKeyPairs(allPages) + if err != nil { + panic(err) + } + + for _, kp := range allKeyPairs { + fmt.Printf("%+v\n", kp) + } + +Example to Create a Key Pair + + createOpts := keypairs.CreateOpts{ + Name: "keypair-name", + } + + keypair, err := keypairs.Create(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v", keypair) + +Example to Import a Key Pair + + createOpts := keypairs.CreateOpts{ + Name: "keypair-name", + PublicKey: "public-key", + } + + keypair, err := keypairs.Create(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Key Pair + + err := keypairs.Delete(computeClient, "keypair-name").ExtractErr() + if err != nil { + panic(err) + } + +Example to Create a Server With a Key Pair + + serverCreateOpts := servers.CreateOpts{ + Name: "server_name", + ImageRef: "image-uuid", + FlavorRef: "flavor-uuid", + } + + createOpts := keypairs.CreateOptsExt{ + CreateOptsBuilder: serverCreateOpts, + KeyName: "keypair-name", + } + + server, err := servers.Create(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } +*/ package keypairs diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/requests.go index adf1e5596..4e5e499e3 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/requests.go @@ -9,11 +9,12 @@ import ( // CreateOptsExt adds a KeyPair option to the base CreateOpts. type CreateOptsExt struct { servers.CreateOptsBuilder + + // KeyName is the name of the key pair. KeyName string `json:"key_name,omitempty"` } -// ToServerCreateMap adds the key_name and, optionally, key_data options to -// the base server creation options. +// ToServerCreateMap adds the key_name to the base server creation options. func (opts CreateOptsExt) ToServerCreateMap() (map[string]interface{}, error) { base, err := opts.CreateOptsBuilder.ToServerCreateMap() if err != nil { @@ -37,18 +38,19 @@ func List(client *gophercloud.ServiceClient) pagination.Pager { }) } -// CreateOptsBuilder describes struct types that can be accepted by the Create call. Notable, the -// CreateOpts struct in this package does. +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. type CreateOptsBuilder interface { ToKeyPairCreateMap() (map[string]interface{}, error) } -// CreateOpts specifies keypair creation or import parameters. +// CreateOpts specifies KeyPair creation or import parameters. type CreateOpts struct { // Name is a friendly name to refer to this KeyPair in other services. Name string `json:"name" required:"true"` - // PublicKey [optional] is a pregenerated OpenSSH-formatted public key. If provided, this key - // will be imported and no new key will be created. + + // PublicKey [optional] is a pregenerated OpenSSH-formatted public key. + // If provided, this key will be imported and no new key will be created. PublicKey string `json:"public_key,omitempty"` } @@ -57,8 +59,8 @@ func (opts CreateOpts) ToKeyPairCreateMap() (map[string]interface{}, error) { return gophercloud.BuildRequestBody(opts, "keypair") } -// Create requests the creation of a new keypair on the server, or to import a pre-existing -// keypair. +// Create requests the creation of a new KeyPair on the server, or to import a +// pre-existing keypair. func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { b, err := opts.ToKeyPairCreateMap() if err != nil { diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/results.go index 4c785a24c..2d71034b1 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/results.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/results.go @@ -5,29 +5,33 @@ import ( "github.com/gophercloud/gophercloud/pagination" ) -// KeyPair is an SSH key known to the OpenStack Cloud that is available to be injected into -// servers. +// KeyPair is an SSH key known to the OpenStack Cloud that is available to be +// injected into servers. type KeyPair struct { - // Name is used to refer to this keypair from other services within this region. + // Name is used to refer to this keypair from other services within this + // region. Name string `json:"name"` - // Fingerprint is a short sequence of bytes that can be used to authenticate or validate a longer - // public key. + // Fingerprint is a short sequence of bytes that can be used to authenticate + // or validate a longer public key. Fingerprint string `json:"fingerprint"` - // PublicKey is the public key from this pair, in OpenSSH format. "ssh-rsa AAAAB3Nz..." + // PublicKey is the public key from this pair, in OpenSSH format. + // "ssh-rsa AAAAB3Nz..." PublicKey string `json:"public_key"` // PrivateKey is the private key from this pair, in PEM format. - // "-----BEGIN RSA PRIVATE KEY-----\nMIICXA..." It is only present if this keypair was just - // returned from a Create call + // "-----BEGIN RSA PRIVATE KEY-----\nMIICXA..." + // It is only present if this KeyPair was just returned from a Create call. PrivateKey string `json:"private_key"` - // UserID is the user who owns this keypair. + // UserID is the user who owns this KeyPair. UserID string `json:"user_id"` } -// KeyPairPage stores a single, only page of KeyPair results from a List call. +// KeyPairPage stores a single page of all KeyPair results from a List call. +// Use the ExtractKeyPairs function to convert the results to a slice of +// KeyPairs. type KeyPairPage struct { pagination.SinglePageBase } @@ -58,7 +62,8 @@ type keyPairResult struct { gophercloud.Result } -// Extract is a method that attempts to interpret any KeyPair resource response as a KeyPair struct. +// Extract is a method that attempts to interpret any KeyPair resource response +// as a KeyPair struct. func (r keyPairResult) Extract() (*KeyPair, error) { var s struct { KeyPair *KeyPair `json:"keypair"` @@ -67,20 +72,20 @@ func (r keyPairResult) Extract() (*KeyPair, error) { return s.KeyPair, err } -// CreateResult is the response from a Create operation. Call its Extract method to interpret it -// as a KeyPair. +// CreateResult is the response from a Create operation. Call its Extract method +// to interpret it as a KeyPair. type CreateResult struct { keyPairResult } -// GetResult is the response from a Get operation. Call its Extract method to interpret it -// as a KeyPair. +// GetResult is the response from a Get operation. Call its Extract method to +// interpret it as a KeyPair. type GetResult struct { keyPairResult } -// DeleteResult is the response from a Delete operation. Call its Extract method to determine if -// the call succeeded or failed. +// DeleteResult is the response from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. type DeleteResult struct { gophercloud.ErrResult } diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/doc.go index d2729f874..ab97edb77 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/doc.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/doc.go @@ -1,5 +1,19 @@ /* Package startstop provides functionality to start and stop servers that have been provisioned by the OpenStack Compute service. + +Example to Stop and Start a Server + + serverID := "47b6b7b7-568d-40e4-868c-d5c41735532e" + + err := startstop.Stop(computeClient, serverID).ExtractErr() + if err != nil { + panic(err) + } + + err := startstop.Start(computeClient, serverID).ExtractErr() + if err != nil { + panic(err) + } */ package startstop diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/requests.go index 1d8a593b9..5b4f3f39d 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/requests.go @@ -7,13 +7,13 @@ func actionURL(client *gophercloud.ServiceClient, id string) string { } // Start is the operation responsible for starting a Compute server. -func Start(client *gophercloud.ServiceClient, id string) (r gophercloud.ErrResult) { +func Start(client *gophercloud.ServiceClient, id string) (r StartResult) { _, r.Err = client.Post(actionURL(client, id), map[string]interface{}{"os-start": nil}, nil, nil) return } // Stop is the operation responsible for stopping a Compute server. -func Stop(client *gophercloud.ServiceClient, id string) (r gophercloud.ErrResult) { +func Stop(client *gophercloud.ServiceClient, id string) (r StopResult) { _, r.Err = client.Post(actionURL(client, id), map[string]interface{}{"os-stop": nil}, nil, nil) return } diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/results.go new file mode 100644 index 000000000..834968933 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/results.go @@ -0,0 +1,15 @@ +package startstop + +import "github.com/gophercloud/gophercloud" + +// StartResult is the response from a Start operation. Call its ExtractErr +// method to determine if the request succeeded or failed. +type StartResult struct { + gophercloud.ErrResult +} + +// StopResult is the response from Stop operation. Call its ExtractErr +// method to determine if the request succeeded or failed. +type StopResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/doc.go index 5822e1bcf..34d8764fa 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/doc.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/doc.go @@ -1,7 +1,137 @@ -// Package flavors provides information and interaction with the flavor API -// resource in the OpenStack Compute service. -// -// A flavor is an available hardware configuration for a server. Each flavor -// has a unique combination of disk space, memory capacity and priority for CPU -// time. +/* +Package flavors provides information and interaction with the flavor API +in the OpenStack Compute service. + +A flavor is an available hardware configuration for a server. Each flavor +has a unique combination of disk space, memory capacity and priority for CPU +time. + +Example to List Flavors + + listOpts := flavors.ListOpts{ + AccessType: flavors.PublicAccess, + } + + allPages, err := flavors.ListDetail(computeClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allFlavors, err := flavors.ExtractFlavors(allPages) + if err != nil { + panic(err) + } + + for _, flavor := range allFlavors { + fmt.Printf("%+v\n", flavor) + } + +Example to Create a Flavor + + createOpts := flavors.CreateOpts{ + ID: "1", + Name: "m1.tiny", + Disk: gophercloud.IntToPointer(1), + RAM: 512, + VCPUs: 1, + RxTxFactor: 1.0, + } + + flavor, err := flavors.Create(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to List Flavor Access + + flavorID := "e91758d6-a54a-4778-ad72-0c73a1cb695b" + + allPages, err := flavors.ListAccesses(computeClient, flavorID).AllPages() + if err != nil { + panic(err) + } + + allAccesses, err := flavors.ExtractAccesses(allPages) + if err != nil { + panic(err) + } + + for _, access := range allAccesses { + fmt.Printf("%+v", access) + } + +Example to Grant Access to a Flavor + + flavorID := "e91758d6-a54a-4778-ad72-0c73a1cb695b" + + accessOpts := flavors.AddAccessOpts{ + Tenant: "15153a0979884b59b0592248ef947921", + } + + accessList, err := flavors.AddAccess(computeClient, flavor.ID, accessOpts).Extract() + if err != nil { + panic(err) + } + +Example to Remove/Revoke Access to a Flavor + + flavorID := "e91758d6-a54a-4778-ad72-0c73a1cb695b" + + accessOpts := flavors.RemoveAccessOpts{ + Tenant: "15153a0979884b59b0592248ef947921", + } + + accessList, err := flavors.RemoveAccess(computeClient, flavor.ID, accessOpts).Extract() + if err != nil { + panic(err) + } + +Example to Create Extra Specs for a Flavor + + flavorID := "e91758d6-a54a-4778-ad72-0c73a1cb695b" + + createOpts := flavors.ExtraSpecsOpts{ + "hw:cpu_policy": "CPU-POLICY", + "hw:cpu_thread_policy": "CPU-THREAD-POLICY", + } + createdExtraSpecs, err := flavors.CreateExtraSpecs(computeClient, flavorID, createOpts).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v", createdExtraSpecs) + +Example to Get Extra Specs for a Flavor + + flavorID := "e91758d6-a54a-4778-ad72-0c73a1cb695b" + + extraSpecs, err := flavors.ListExtraSpecs(computeClient, flavorID).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v", extraSpecs) + +Example to Update Extra Specs for a Flavor + + flavorID := "e91758d6-a54a-4778-ad72-0c73a1cb695b" + + updateOpts := flavors.ExtraSpecsOpts{ + "hw:cpu_thread_policy": "CPU-THREAD-POLICY-UPDATED", + } + updatedExtraSpec, err := flavors.UpdateExtraSpec(computeClient, flavorID, updateOpts).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v", updatedExtraSpec) + +Example to Delete an Extra Spec for a Flavor + + flavorID := "e91758d6-a54a-4778-ad72-0c73a1cb695b" + err := flavors.DeleteExtraSpec(computeClient, flavorID, "hw:cpu_thread_policy").ExtractErr() + if err != nil { + panic(err) + } +*/ package flavors diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/requests.go index 03d7e8724..4b406df95 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/requests.go @@ -11,33 +11,44 @@ type ListOptsBuilder interface { ToFlavorListQuery() (string, error) } -// AccessType maps to OpenStack's Flavor.is_public field. Although the is_public field is boolean, the -// request options are ternary, which is why AccessType is a string. The following values are -// allowed: -// -// PublicAccess (the default): Returns public flavors and private flavors associated with that project. -// PrivateAccess (admin only): Returns private flavors, across all projects. -// AllAccess (admin only): Returns public and private flavors across all projects. -// -// The AccessType arguement is optional, and if it is not supplied, OpenStack returns the PublicAccess -// flavors. +/* + AccessType maps to OpenStack's Flavor.is_public field. Although the is_public + field is boolean, the request options are ternary, which is why AccessType is + a string. The following values are allowed: + + The AccessType arguement is optional, and if it is not supplied, OpenStack + returns the PublicAccess flavors. +*/ type AccessType string const ( - PublicAccess AccessType = "true" + // PublicAccess returns public flavors and private flavors associated with + // that project. + PublicAccess AccessType = "true" + + // PrivateAccess (admin only) returns private flavors, across all projects. PrivateAccess AccessType = "false" - AllAccess AccessType = "None" + + // AllAccess (admin only) returns public and private flavors across all + // projects. + AllAccess AccessType = "None" ) -// ListOpts helps control the results returned by the List() function. -// For example, a flavor with a minDisk field of 10 will not be returned if you specify MinDisk set to 20. -// Typically, software will use the last ID of the previous call to List to set the Marker for the current call. -type ListOpts struct { +/* + ListOpts filters the results returned by the List() function. + For example, a flavor with a minDisk field of 10 will not be returned if you + specify MinDisk set to 20. - // ChangesSince, if provided, instructs List to return only those things which have changed since the timestamp provided. + Typically, software will use the last ID of the previous call to List to set + the Marker for the current call. +*/ +type ListOpts struct { + // ChangesSince, if provided, instructs List to return only those things which + // have changed since the timestamp provided. ChangesSince string `q:"changes-since"` - // MinDisk and MinRAM, if provided, elides flavors which do not meet your criteria. + // MinDisk and MinRAM, if provided, elides flavors which do not meet your + // criteria. MinDisk int `q:"minDisk"` MinRAM int `q:"minRam"` @@ -45,11 +56,12 @@ type ListOpts struct { // Marker instructs List where to start listing from. Marker string `q:"marker"` - // Limit instructs List to refrain from sending excessively large lists of flavors. + // Limit instructs List to refrain from sending excessively large lists of + // flavors. Limit int `q:"limit"` - // AccessType, if provided, instructs List which set of flavors to return. If IsPublic not provided, - // flavors for the current project are returned. + // AccessType, if provided, instructs List which set of flavors to return. + // If IsPublic not provided, flavors for the current project are returned. AccessType AccessType `q:"is_public"` } @@ -60,8 +72,8 @@ func (opts ListOpts) ToFlavorListQuery() (string, error) { } // ListDetail instructs OpenStack to provide a list of flavors. -// You may provide criteria by which List curtails its results for easier processing. -// See ListOpts for more details. +// You may provide criteria by which List curtails its results for easier +// processing. func ListDetail(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { url := listURL(client) if opts != nil { @@ -80,31 +92,42 @@ type CreateOptsBuilder interface { ToFlavorCreateMap() (map[string]interface{}, error) } -// CreateOpts is passed to Create to create a flavor -// Source: -// https://github.com/openstack/nova/blob/stable/newton/nova/api/openstack/compute/schemas/flavor_manage.py#L20 +// CreateOpts specifies parameters used for creating a flavor. type CreateOpts struct { + // Name is the name of the flavor. Name string `json:"name" required:"true"` - // memory size, in MBs - RAM int `json:"ram" required:"true"` + + // RAM is the memory of the flavor, measured in MB. + RAM int `json:"ram" required:"true"` + + // VCPUs is the number of vcpus for the flavor. VCPUs int `json:"vcpus" required:"true"` - // disk size, in GBs - Disk *int `json:"disk" required:"true"` - ID string `json:"id,omitempty"` - // non-zero, positive - Swap *int `json:"swap,omitempty"` + + // Disk the amount of root disk space, measured in GB. + Disk *int `json:"disk" required:"true"` + + // ID is a unique ID for the flavor. + ID string `json:"id,omitempty"` + + // Swap is the amount of swap space for the flavor, measured in MB. + Swap *int `json:"swap,omitempty"` + + // RxTxFactor alters the network bandwidth of a flavor. RxTxFactor float64 `json:"rxtx_factor,omitempty"` - IsPublic *bool `json:"os-flavor-access:is_public,omitempty"` - // ephemeral disk size, in GBs, non-zero, positive + + // IsPublic flags a flavor as being available to all projects or not. + IsPublic *bool `json:"os-flavor-access:is_public,omitempty"` + + // Ephemeral is the amount of ephemeral disk space, measured in GB. Ephemeral *int `json:"OS-FLV-EXT-DATA:ephemeral,omitempty"` } -// ToFlavorCreateMap satisfies the CreateOptsBuilder interface -func (opts *CreateOpts) ToFlavorCreateMap() (map[string]interface{}, error) { +// ToFlavorCreateMap constructs a request body from CreateOpts. +func (opts CreateOpts) ToFlavorCreateMap() (map[string]interface{}, error) { return gophercloud.BuildRequestBody(opts, "flavor") } -// Create a flavor +// Create requests the creation of a new flavor. func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { b, err := opts.ToFlavorCreateMap() if err != nil { @@ -117,14 +140,177 @@ func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r Create return } -// Get instructs OpenStack to provide details on a single flavor, identified by its ID. -// Use ExtractFlavor to convert its result into a Flavor. +// Get retrieves details of a single flavor. Use ExtractFlavor to convert its +// result into a Flavor. func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { _, r.Err = client.Get(getURL(client, id), &r.Body, nil) return } -// IDFromName is a convienience function that returns a flavor's ID given its name. +// Delete deletes the specified flavor ID. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// ListAccesses retrieves the tenants which have access to a flavor. +func ListAccesses(client *gophercloud.ServiceClient, id string) pagination.Pager { + url := accessURL(client, id) + + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return AccessPage{pagination.SinglePageBase(r)} + }) +} + +// AddAccessOptsBuilder allows extensions to add additional parameters to the +// AddAccess requests. +type AddAccessOptsBuilder interface { + ToFlavorAddAccessMap() (map[string]interface{}, error) +} + +// AddAccessOpts represents options for adding access to a flavor. +type AddAccessOpts struct { + // Tenant is the project/tenant ID to grant access. + Tenant string `json:"tenant"` +} + +// ToFlavorAddAccessMap constructs a request body from AddAccessOpts. +func (opts AddAccessOpts) ToFlavorAddAccessMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "addTenantAccess") +} + +// AddAccess grants a tenant/project access to a flavor. +func AddAccess(client *gophercloud.ServiceClient, id string, opts AddAccessOptsBuilder) (r AddAccessResult) { + b, err := opts.ToFlavorAddAccessMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(accessActionURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// RemoveAccessOptsBuilder allows extensions to add additional parameters to the +// RemoveAccess requests. +type RemoveAccessOptsBuilder interface { + ToFlavorRemoveAccessMap() (map[string]interface{}, error) +} + +// RemoveAccessOpts represents options for removing access to a flavor. +type RemoveAccessOpts struct { + // Tenant is the project/tenant ID to grant access. + Tenant string `json:"tenant"` +} + +// ToFlavorRemoveAccessMap constructs a request body from RemoveAccessOpts. +func (opts RemoveAccessOpts) ToFlavorRemoveAccessMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "removeTenantAccess") +} + +// RemoveAccess removes/revokes a tenant/project access to a flavor. +func RemoveAccess(client *gophercloud.ServiceClient, id string, opts RemoveAccessOptsBuilder) (r RemoveAccessResult) { + b, err := opts.ToFlavorRemoveAccessMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(accessActionURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// ExtraSpecs requests all the extra-specs for the given flavor ID. +func ListExtraSpecs(client *gophercloud.ServiceClient, flavorID string) (r ListExtraSpecsResult) { + _, r.Err = client.Get(extraSpecsListURL(client, flavorID), &r.Body, nil) + return +} + +func GetExtraSpec(client *gophercloud.ServiceClient, flavorID string, key string) (r GetExtraSpecResult) { + _, r.Err = client.Get(extraSpecsGetURL(client, flavorID, key), &r.Body, nil) + return +} + +// CreateExtraSpecsOptsBuilder allows extensions to add additional parameters to the +// CreateExtraSpecs requests. +type CreateExtraSpecsOptsBuilder interface { + ToFlavorExtraSpecsCreateMap() (map[string]interface{}, error) +} + +// ExtraSpecsOpts is a map that contains key-value pairs. +type ExtraSpecsOpts map[string]string + +// ToFlavorExtraSpecsCreateMap assembles a body for a Create request based on +// the contents of ExtraSpecsOpts. +func (opts ExtraSpecsOpts) ToFlavorExtraSpecsCreateMap() (map[string]interface{}, error) { + return map[string]interface{}{"extra_specs": opts}, nil +} + +// CreateExtraSpecs will create or update the extra-specs key-value pairs for +// the specified Flavor. +func CreateExtraSpecs(client *gophercloud.ServiceClient, flavorID string, opts CreateExtraSpecsOptsBuilder) (r CreateExtraSpecsResult) { + b, err := opts.ToFlavorExtraSpecsCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(extraSpecsCreateURL(client, flavorID), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// UpdateExtraSpecOptsBuilder allows extensions to add additional parameters to +// the Update request. +type UpdateExtraSpecOptsBuilder interface { + ToFlavorExtraSpecUpdateMap() (map[string]string, string, error) +} + +// ToFlavorExtraSpecUpdateMap assembles a body for an Update request based on +// the contents of a ExtraSpecOpts. +func (opts ExtraSpecsOpts) ToFlavorExtraSpecUpdateMap() (map[string]string, string, error) { + if len(opts) != 1 { + err := gophercloud.ErrInvalidInput{} + err.Argument = "flavors.ExtraSpecOpts" + err.Info = "Must have 1 and only one key-value pair" + return nil, "", err + } + + var key string + for k := range opts { + key = k + } + + return opts, key, nil +} + +// UpdateExtraSpec will updates the value of the specified flavor's extra spec +// for the key in opts. +func UpdateExtraSpec(client *gophercloud.ServiceClient, flavorID string, opts UpdateExtraSpecOptsBuilder) (r UpdateExtraSpecResult) { + b, key, err := opts.ToFlavorExtraSpecUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(extraSpecUpdateURL(client, flavorID, key), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// DeleteExtraSpec will delete the key-value pair with the given key for the given +// flavor ID. +func DeleteExtraSpec(client *gophercloud.ServiceClient, flavorID, key string) (r DeleteExtraSpecResult) { + _, r.Err = client.Delete(extraSpecDeleteURL(client, flavorID, key), &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// IDFromName is a convienience function that returns a flavor's ID given its +// name. func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { count := 0 id := "" diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/results.go index 121abbb8d..525cddaea 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/results.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/results.go @@ -12,16 +12,26 @@ type commonResult struct { gophercloud.Result } +// CreateResult is the response of a Get operations. Call its Extract method to +// interpret it as a Flavor. type CreateResult struct { commonResult } -// GetResult temporarily holds the response from a Get call. +// GetResult is the response of a Get operations. Call its Extract method to +// interpret it as a Flavor. type GetResult struct { commonResult } -// Extract provides access to the individual Flavor returned by the Get and Create functions. +// DeleteResult is the result from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// Extract provides access to the individual Flavor returned by the Get and +// Create functions. func (r commonResult) Extract() (*Flavor, error) { var s struct { Flavor *Flavor `json:"flavor"` @@ -30,24 +40,35 @@ func (r commonResult) Extract() (*Flavor, error) { return s.Flavor, err } -// Flavor records represent (virtual) hardware configurations for server resources in a region. +// Flavor represent (virtual) hardware configurations for server resources +// in a region. type Flavor struct { - // The Id field contains the flavor's unique identifier. - // For example, this identifier will be useful when specifying which hardware configuration to use for a new server instance. + // ID is the flavor's unique ID. ID string `json:"id"` - // The Disk and RA< fields provide a measure of storage space offered by the flavor, in GB and MB, respectively. + + // Disk is the amount of root disk, measured in GB. Disk int `json:"disk"` - RAM int `json:"ram"` - // The Name field provides a human-readable moniker for the flavor. - Name string `json:"name"` + + // RAM is the amount of memory, measured in MB. + RAM int `json:"ram"` + + // Name is the name of the flavor. + Name string `json:"name"` + + // RxTxFactor describes bandwidth alterations of the flavor. RxTxFactor float64 `json:"rxtx_factor"` - // Swap indicates how much space is reserved for swap. - // If not provided, this field will be set to 0. + + // Swap is the amount of swap space, measured in MB. Swap int `json:"swap"` + // VCPUs indicates how many (virtual) CPUs are available for this flavor. VCPUs int `json:"vcpus"` + // IsPublic indicates whether the flavor is public. - IsPublic bool `json:"is_public"` + IsPublic bool `json:"os-flavor-access:is_public"` + + // Ephemeral is the amount of ephemeral disk space, measured in GB. + Ephemeral int `json:"OS-FLV-EXT-DATA:ephemeral"` } func (r *Flavor) UnmarshalJSON(b []byte) error { @@ -82,18 +103,19 @@ func (r *Flavor) UnmarshalJSON(b []byte) error { return nil } -// FlavorPage contains a single page of the response from a List call. +// FlavorPage contains a single page of all flavors from a ListDetails call. type FlavorPage struct { pagination.LinkedPageBase } -// IsEmpty determines if a page contains any results. +// IsEmpty determines if a FlavorPage contains any results. func (page FlavorPage) IsEmpty() (bool, error) { flavors, err := ExtractFlavors(page) return len(flavors) == 0, err } -// NextPageURL uses the response's embedded link reference to navigate to the next page of results. +// NextPageURL uses the response's embedded link reference to navigate to the +// next page of results. func (page FlavorPage) NextPageURL() (string, error) { var s struct { Links []gophercloud.Link `json:"flavors_links"` @@ -105,7 +127,8 @@ func (page FlavorPage) NextPageURL() (string, error) { return gophercloud.ExtractNextURL(s.Links) } -// ExtractFlavors provides access to the list of flavors in a page acquired from the List operation. +// ExtractFlavors provides access to the list of flavors in a page acquired +// from the ListDetail operation. func ExtractFlavors(r pagination.Page) ([]Flavor, error) { var s struct { Flavors []Flavor `json:"flavors"` @@ -113,3 +136,117 @@ func ExtractFlavors(r pagination.Page) ([]Flavor, error) { err := (r.(FlavorPage)).ExtractInto(&s) return s.Flavors, err } + +// AccessPage contains a single page of all FlavorAccess entries for a flavor. +type AccessPage struct { + pagination.SinglePageBase +} + +// IsEmpty indicates whether an AccessPage is empty. +func (page AccessPage) IsEmpty() (bool, error) { + v, err := ExtractAccesses(page) + return len(v) == 0, err +} + +// ExtractAccesses interprets a page of results as a slice of FlavorAccess. +func ExtractAccesses(r pagination.Page) ([]FlavorAccess, error) { + var s struct { + FlavorAccesses []FlavorAccess `json:"flavor_access"` + } + err := (r.(AccessPage)).ExtractInto(&s) + return s.FlavorAccesses, err +} + +type accessResult struct { + gophercloud.Result +} + +// AddAccessResult is the response of an AddAccess operation. Call its +// Extract method to interpret it as a slice of FlavorAccess. +type AddAccessResult struct { + accessResult +} + +// RemoveAccessResult is the response of a RemoveAccess operation. Call its +// Extract method to interpret it as a slice of FlavorAccess. +type RemoveAccessResult struct { + accessResult +} + +// Extract provides access to the result of an access create or delete. +// The result will be all accesses that the flavor has. +func (r accessResult) Extract() ([]FlavorAccess, error) { + var s struct { + FlavorAccesses []FlavorAccess `json:"flavor_access"` + } + err := r.ExtractInto(&s) + return s.FlavorAccesses, err +} + +// FlavorAccess represents an ACL of tenant access to a specific Flavor. +type FlavorAccess struct { + // FlavorID is the unique ID of the flavor. + FlavorID string `json:"flavor_id"` + + // TenantID is the unique ID of the tenant. + TenantID string `json:"tenant_id"` +} + +// Extract interprets any extraSpecsResult as ExtraSpecs, if possible. +func (r extraSpecsResult) Extract() (map[string]string, error) { + var s struct { + ExtraSpecs map[string]string `json:"extra_specs"` + } + err := r.ExtractInto(&s) + return s.ExtraSpecs, err +} + +// extraSpecsResult contains the result of a call for (potentially) multiple +// key-value pairs. Call its Extract method to interpret it as a +// map[string]interface. +type extraSpecsResult struct { + gophercloud.Result +} + +// ListExtraSpecsResult contains the result of a Get operation. Call its Extract +// method to interpret it as a map[string]interface. +type ListExtraSpecsResult struct { + extraSpecsResult +} + +// CreateExtraSpecResult contains the result of a Create operation. Call its +// Extract method to interpret it as a map[string]interface. +type CreateExtraSpecsResult struct { + extraSpecsResult +} + +// extraSpecResult contains the result of a call for individual a single +// key-value pair. +type extraSpecResult struct { + gophercloud.Result +} + +// GetExtraSpecResult contains the result of a Get operation. Call its Extract +// method to interpret it as a map[string]interface. +type GetExtraSpecResult struct { + extraSpecResult +} + +// UpdateExtraSpecResult contains the result of an Update operation. Call its +// Extract method to interpret it as a map[string]interface. +type UpdateExtraSpecResult struct { + extraSpecResult +} + +// DeleteExtraSpecResult contains the result of a Delete operation. Call its +// ExtractErr method to determine if the call succeeded or failed. +type DeleteExtraSpecResult struct { + gophercloud.ErrResult +} + +// Extract interprets any extraSpecResult as an ExtraSpec, if possible. +func (r extraSpecResult) Extract() (map[string]string, error) { + var s map[string]string + err := r.ExtractInto(&s) + return s, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/urls.go index 2fc21796f..8620dd78a 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/urls.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/urls.go @@ -15,3 +15,35 @@ func listURL(client *gophercloud.ServiceClient) string { func createURL(client *gophercloud.ServiceClient) string { return client.ServiceURL("flavors") } + +func deleteURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("flavors", id) +} + +func accessURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("flavors", id, "os-flavor-access") +} + +func accessActionURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("flavors", id, "action") +} + +func extraSpecsListURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("flavors", id, "os-extra_specs") +} + +func extraSpecsGetURL(client *gophercloud.ServiceClient, id, key string) string { + return client.ServiceURL("flavors", id, "os-extra_specs", key) +} + +func extraSpecsCreateURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("flavors", id, "os-extra_specs") +} + +func extraSpecUpdateURL(client *gophercloud.ServiceClient, id, key string) string { + return client.ServiceURL("flavors", id, "os-extra_specs", key) +} + +func extraSpecDeleteURL(client *gophercloud.ServiceClient, id, key string) string { + return client.ServiceURL("flavors", id, "os-extra_specs", key) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/doc.go index 0edaa3f02..22410a79a 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/doc.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/doc.go @@ -1,7 +1,32 @@ -// Package images provides information and interaction with the image API -// resource in the OpenStack Compute service. -// -// An image is a collection of files used to create or rebuild a server. -// Operators provide a number of pre-built OS images by default. You may also -// create custom images from cloud servers you have launched. +/* +Package images provides information and interaction with the images through +the OpenStack Compute service. + +This API is deprecated and will be removed from a future version of the Nova +API service. + +An image is a collection of files used to create or rebuild a server. +Operators provide a number of pre-built OS images by default. You may also +create custom images from cloud servers you have launched. + +Example to List Images + + listOpts := images.ListOpts{ + Limit: 2, + } + + allPages, err := images.ListDetail(computeClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allImages, err := images.ExtractImages(allPages) + if err != nil { + panic(err) + } + + for _, image := range allImages { + fmt.Printf("%+v\n", image) + } +*/ package images diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/requests.go index df9f1da8f..558b481b9 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/requests.go @@ -6,26 +6,33 @@ import ( ) // ListOptsBuilder allows extensions to add additional parameters to the -// List request. +// ListDetail request. type ListOptsBuilder interface { ToImageListQuery() (string, error) } -// ListOpts contain options for limiting the number of Images returned from a call to ListDetail. +// ListOpts contain options filtering Images returned from a call to ListDetail. type ListOpts struct { - // When the image last changed status (in date-time format). + // ChangesSince filters Images based on the last changed status (in date-time + // format). ChangesSince string `q:"changes-since"` - // The number of Images to return. + + // Limit limits the number of Images to return. Limit int `q:"limit"` - // UUID of the Image at which to set a marker. + + // Mark is an Image UUID at which to set a marker. Marker string `q:"marker"` - // The name of the Image. + + // Name is the name of the Image. Name string `q:"name"` - // The name of the Server (in URL format). + + // Server is the name of the Server (in URL format). Server string `q:"server"` - // The current status of the Image. + + // Status is the current status of the Image. Status string `q:"status"` - // The value of the type of image (e.g. BASE, SERVER, ALL) + + // Type is the type of image (e.g. BASE, SERVER, ALL). Type string `q:"type"` } @@ -50,8 +57,7 @@ func ListDetail(client *gophercloud.ServiceClient, opts ListOptsBuilder) paginat }) } -// Get acquires additional detail about a specific image by ID. -// Use ExtractImage() to interpret the result as an openstack Image. +// Get returns data about a specific image by its ID. func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { _, r.Err = client.Get(getURL(client, id), &r.Body, nil) return @@ -63,7 +69,8 @@ func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { return } -// IDFromName is a convienience function that returns an image's ID given its name. +// IDFromName is a convienience function that returns an image's ID given its +// name. func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { count := 0 id := "" diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/results.go index f9ebc69e9..70d1018c7 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/results.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/results.go @@ -5,12 +5,14 @@ import ( "github.com/gophercloud/gophercloud/pagination" ) -// GetResult temporarily stores a Get response. +// GetResult is the response from a Get operation. Call its Extract method to +// interpret it as an Image. type GetResult struct { gophercloud.Result } -// DeleteResult represents the result of an image.Delete operation. +// DeleteResult is the result from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. type DeleteResult struct { gophercloud.ErrResult } @@ -24,44 +26,53 @@ func (r GetResult) Extract() (*Image, error) { return s.Image, err } -// Image is used for JSON (un)marshalling. -// It provides a description of an OS image. +// Image represents an Image returned by the Compute API. type Image struct { - // ID contains the image's unique identifier. + // ID is the unique ID of an image. ID string + // Created is the date when the image was created. Created string - // MinDisk and MinRAM specify the minimum resources a server must provide to be able to install the image. + // MinDisk is the minimum amount of disk a flavor must have to be able + // to create a server based on the image, measured in GB. MinDisk int - MinRAM int + + // MinRAM is the minimum amount of RAM a flavor must have to be able + // to create a server based on the image, measured in MB. + MinRAM int // Name provides a human-readable moniker for the OS image. Name string // The Progress and Status fields indicate image-creation status. - // Any usable image will have 100% progress. Progress int - Status string + // Status is the current status of the image. + Status string + + // Update is the date when the image was updated. Updated string + // Metadata provides free-form key/value pairs that further describe the + // image. Metadata map[string]interface{} } -// ImagePage contains a single page of results from a List operation. -// Use ExtractImages to convert it into a slice of usable structs. +// ImagePage contains a single page of all Images returne from a ListDetail +// operation. Use ExtractImages to convert it into a slice of usable structs. type ImagePage struct { pagination.LinkedPageBase } -// IsEmpty returns true if a page contains no Image results. +// IsEmpty returns true if an ImagePage contains no Image results. func (page ImagePage) IsEmpty() (bool, error) { images, err := ExtractImages(page) return len(images) == 0, err } -// NextPageURL uses the response's embedded link reference to navigate to the next page of results. +// NextPageURL uses the response's embedded link reference to navigate to the +// next page of results. func (page ImagePage) NextPageURL() (string, error) { var s struct { Links []gophercloud.Link `json:"images_links"` @@ -73,7 +84,8 @@ func (page ImagePage) NextPageURL() (string, error) { return gophercloud.ExtractNextURL(s.Links) } -// ExtractImages converts a page of List results into a slice of usable Image structs. +// ExtractImages converts a page of List results into a slice of usable Image +// structs. func ExtractImages(r pagination.Page) ([]Image, error) { var s struct { Images []Image `json:"images"` diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/doc.go index fe4567120..3b0ab7836 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/doc.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/doc.go @@ -1,6 +1,115 @@ -// Package servers provides information and interaction with the server API -// resource in the OpenStack Compute service. -// -// A server is a virtual machine instance in the compute system. In order for -// one to be provisioned, a valid flavor and image are required. +/* +Package servers provides information and interaction with the server API +resource in the OpenStack Compute service. + +A server is a virtual machine instance in the compute system. In order for +one to be provisioned, a valid flavor and image are required. + +Example to List Servers + + listOpts := servers.ListOpts{ + AllTenants: true, + } + + allPages, err := servers.List(computeClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allServers, err := servers.ExtractServers(allPages) + if err != nil { + panic(err) + } + + for _, server := range allServers { + fmt.Printf("%+v\n", server) + } + +Example to Create a Server + + createOpts := servers.CreateOpts{ + Name: "server_name", + ImageRef: "image-uuid", + FlavorRef: "flavor-uuid", + } + + server, err := servers.Create(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Server + + serverID := "d9072956-1560-487c-97f2-18bdf65ec749" + err := servers.Delete(computeClient, serverID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Force Delete a Server + + serverID := "d9072956-1560-487c-97f2-18bdf65ec749" + err := servers.ForceDelete(computeClient, serverID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Reboot a Server + + rebootOpts := servers.RebootOpts{ + Type: servers.SoftReboot, + } + + serverID := "d9072956-1560-487c-97f2-18bdf65ec749" + + err := servers.Reboot(computeClient, serverID, rebootOpts).ExtractErr() + if err != nil { + panic(err) + } + +Example to Rebuild a Server + + rebuildOpts := servers.RebuildOpts{ + Name: "new_name", + ImageID: "image-uuid", + } + + serverID := "d9072956-1560-487c-97f2-18bdf65ec749" + + server, err := servers.Rebuilt(computeClient, serverID, rebuildOpts).Extract() + if err != nil { + panic(err) + } + +Example to Resize a Server + + resizeOpts := servers.ResizeOpts{ + FlavorRef: "flavor-uuid", + } + + serverID := "d9072956-1560-487c-97f2-18bdf65ec749" + + err := servers.Resize(computeClient, serverID, resizeOpts).ExtractErr() + if err != nil { + panic(err) + } + + err = servers.ConfirmResize(computeClient, serverID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Snapshot a Server + + snapshotOpts := servers.CreateImageOpts{ + Name: "snapshot_name", + } + + serverID := "d9072956-1560-487c-97f2-18bdf65ec749" + + image, err := servers.CreateImage(computeClient, serverID, snapshotOpts).ExtractImageID() + if err != nil { + panic(err) + } +*/ package servers diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go index 961863731..626eb63e9 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go @@ -21,13 +21,13 @@ type ListOptsBuilder interface { // the server attributes you want to see returned. Marker and Limit are used // for pagination. type ListOpts struct { - // A time/date stamp for when the server last changed status. + // ChangesSince is a time/date stamp for when the server last changed status. ChangesSince string `q:"changes-since"` - // Name of the image in URL format. + // Image is the name of the image in URL format. Image string `q:"image"` - // Name of the flavor in URL format. + // Flavor is the name of the flavor in URL format. Flavor string `q:"flavor"` // Name of the server as a string; can be queried with regular expressions. @@ -36,20 +36,25 @@ type ListOpts struct { // underlying database server implemented for Compute. Name string `q:"name"` - // Value of the status of the server so that you can filter on "ACTIVE" for example. + // Status is the value of the status of the server so that you can filter on + // "ACTIVE" for example. Status string `q:"status"` - // Name of the host as a string. + // Host is the name of the host as a string. Host string `q:"host"` - // UUID of the server at which you want to set a marker. + // Marker is a UUID of the server at which you want to set a marker. Marker string `q:"marker"` - // Integer value for the limit of values to return. + // Limit is an integer value for the limit of values to return. Limit int `q:"limit"` - // Bool to show all tenants + // AllTenants is a bool to show all tenants. AllTenants bool `q:"all_tenants"` + + // TenantID lists servers for a particular tenant. + // Setting "AllTenants = true" is required. + TenantID string `q:"tenant_id"` } // ToServerListQuery formats a ListOpts into a query string. @@ -73,15 +78,16 @@ func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pa }) } -// CreateOptsBuilder describes struct types that can be accepted by the Create call. -// The CreateOpts struct in this package does. +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. type CreateOptsBuilder interface { ToServerCreateMap() (map[string]interface{}, error) } -// Network is used within CreateOpts to control a new server's network attachments. +// Network is used within CreateOpts to control a new server's network +// attachments. type Network struct { - // UUID of a nova-network to attach to the newly provisioned server. + // UUID of a network to attach to the newly provisioned server. // Required unless Port is provided. UUID string @@ -89,19 +95,21 @@ type Network struct { // Required unless UUID is provided. Port string - // FixedIP [optional] specifies a fixed IPv4 address to be used on this network. + // FixedIP specifies a fixed IPv4 address to be used on this network. FixedIP string } // Personality is an array of files that are injected into the server at launch. type Personality []*File -// File is used within CreateOpts and RebuildOpts to inject a file into the server at launch. -// File implements the json.Marshaler interface, so when a Create or Rebuild operation is requested, -// json.Marshal will call File's MarshalJSON method. +// File is used within CreateOpts and RebuildOpts to inject a file into the +// server at launch. +// File implements the json.Marshaler interface, so when a Create or Rebuild +// operation is requested, json.Marshal will call File's MarshalJSON method. type File struct { - // Path of the file + // Path of the file. Path string + // Contents of the file. Maximum content size is 255 bytes. Contents []byte } @@ -123,13 +131,13 @@ type CreateOpts struct { // Name is the name to assign to the newly launched server. Name string `json:"name" required:"true"` - // ImageRef [optional; required if ImageName is not provided] is the ID or full - // URL to the image that contains the server's OS and initial state. + // ImageRef [optional; required if ImageName is not provided] is the ID or + // full URL to the image that contains the server's OS and initial state. // Also optional if using the boot-from-volume extension. ImageRef string `json:"imageRef"` - // ImageName [optional; required if ImageRef is not provided] is the name of the - // image that contains the server's OS and initial state. + // ImageName [optional; required if ImageRef is not provided] is the name of + // the image that contains the server's OS and initial state. // Also optional if using the boot-from-volume extension. ImageName string `json:"-"` @@ -141,7 +149,8 @@ type CreateOpts struct { // the flavor that describes the server's specs. FlavorName string `json:"-"` - // SecurityGroups lists the names of the security groups to which this server should belong. + // SecurityGroups lists the names of the security groups to which this server + // should belong. SecurityGroups []string `json:"-"` // UserData contains configuration information or scripts to use upon launch. @@ -152,10 +161,12 @@ type CreateOpts struct { AvailabilityZone string `json:"availability_zone,omitempty"` // Networks dictates how this server will be attached to available networks. - // By default, the server will be attached to all isolated networks for the tenant. + // By default, the server will be attached to all isolated networks for the + // tenant. Networks []Network `json:"-"` - // Metadata contains key-value pairs (up to 255 bytes each) to attach to the server. + // Metadata contains key-value pairs (up to 255 bytes each) to attach to the + // server. Metadata map[string]string `json:"metadata,omitempty"` // Personality includes files to inject into the server at launch. @@ -166,7 +177,7 @@ type CreateOpts struct { ConfigDrive *bool `json:"config_drive,omitempty"` // AdminPass sets the root user password. If not set, a randomly-generated - // password will be created and returned in the rponse. + // password will be created and returned in the response. AdminPass string `json:"adminPass,omitempty"` // AccessIPv4 specifies an IPv4 address for the instance. @@ -180,7 +191,8 @@ type CreateOpts struct { ServiceClient *gophercloud.ServiceClient `json:"-"` } -// ToServerCreateMap assembles a request body based on the contents of a CreateOpts. +// ToServerCreateMap assembles a request body based on the contents of a +// CreateOpts. func (opts CreateOpts) ToServerCreateMap() (map[string]interface{}, error) { sc := opts.ServiceClient opts.ServiceClient = nil @@ -274,13 +286,14 @@ func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r Create return } -// Delete requests that a server previously provisioned be removed from your account. +// Delete requests that a server previously provisioned be removed from your +// account. func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { _, r.Err = client.Delete(deleteURL(client, id), nil) return } -// ForceDelete forces the deletion of a server +// ForceDelete forces the deletion of a server. func ForceDelete(client *gophercloud.ServiceClient, id string) (r ActionResult) { _, r.Err = client.Post(actionURL(client, id), map[string]interface{}{"forceDelete": ""}, nil, nil) return @@ -294,12 +307,14 @@ func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { return } -// UpdateOptsBuilder allows extensions to add additional attributes to the Update request. +// UpdateOptsBuilder allows extensions to add additional attributes to the +// Update request. type UpdateOptsBuilder interface { ToServerUpdateMap() (map[string]interface{}, error) } -// UpdateOpts specifies the base attributes that may be updated on an existing server. +// UpdateOpts specifies the base attributes that may be updated on an existing +// server. type UpdateOpts struct { // Name changes the displayed name of the server. // The server host name will *not* change. @@ -331,7 +346,8 @@ func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder return } -// ChangeAdminPassword alters the administrator or root password for a specified server. +// ChangeAdminPassword alters the administrator or root password for a specified +// server. func ChangeAdminPassword(client *gophercloud.ServiceClient, id, newPassword string) (r ActionResult) { b := map[string]interface{}{ "changePassword": map[string]string{ @@ -354,33 +370,38 @@ const ( PowerCycle = HardReboot ) -// RebootOptsBuilder is an interface that options must satisfy in order to be -// used when rebooting a server instance +// RebootOptsBuilder allows extensions to add additional parameters to the +// reboot request. type RebootOptsBuilder interface { ToServerRebootMap() (map[string]interface{}, error) } -// RebootOpts satisfies the RebootOptsBuilder interface +// RebootOpts provides options to the reboot request. type RebootOpts struct { + // Type is the type of reboot to perform on the server. Type RebootMethod `json:"type" required:"true"` } -// ToServerRebootMap allows RebootOpts to satisfiy the RebootOptsBuilder -// interface +// ToServerRebootMap builds a body for the reboot request. func (opts *RebootOpts) ToServerRebootMap() (map[string]interface{}, error) { return gophercloud.BuildRequestBody(opts, "reboot") } -// Reboot requests that a given server reboot. -// Two methods exist for rebooting a server: -// -// HardReboot (aka PowerCycle) starts the server instance by physically cutting power to the machine, or if a VM, -// terminating it at the hypervisor level. -// It's done. Caput. Full stop. -// Then, after a brief while, power is rtored or the VM instance rtarted. -// -// SoftReboot (aka OSReboot) simply tells the OS to rtart under its own procedur. -// E.g., in Linux, asking it to enter runlevel 6, or executing "sudo shutdown -r now", or by asking Windows to rtart the machine. +/* + Reboot requests that a given server reboot. + + Two methods exist for rebooting a server: + + HardReboot (aka PowerCycle) starts the server instance by physically cutting + power to the machine, or if a VM, terminating it at the hypervisor level. + It's done. Caput. Full stop. + Then, after a brief while, power is rtored or the VM instance restarted. + + SoftReboot (aka OSReboot) simply tells the OS to restart under its own + procedure. + E.g., in Linux, asking it to enter runlevel 6, or executing + "sudo shutdown -r now", or by asking Windows to rtart the machine. +*/ func Reboot(client *gophercloud.ServiceClient, id string, opts RebootOptsBuilder) (r ActionResult) { b, err := opts.ToServerRebootMap() if err != nil { @@ -391,31 +412,43 @@ func Reboot(client *gophercloud.ServiceClient, id string, opts RebootOptsBuilder return } -// RebuildOptsBuilder is an interface that allows extensions to override the -// default behaviour of rebuild options +// RebuildOptsBuilder allows extensions to provide additional parameters to the +// rebuild request. type RebuildOptsBuilder interface { ToServerRebuildMap() (map[string]interface{}, error) } // RebuildOpts represents the configuration options used in a server rebuild -// operation +// operation. type RebuildOpts struct { - // The server's admin password + // AdminPass is the server's admin password AdminPass string `json:"adminPass,omitempty"` - // The ID of the image you want your server to be provisioned on - ImageID string `json:"imageRef"` + + // ImageID is the ID of the image you want your server to be provisioned on. + ImageID string `json:"imageRef"` + + // ImageName is readable name of an image. ImageName string `json:"-"` + // Name to set the server to Name string `json:"name,omitempty"` + // AccessIPv4 [optional] provides a new IPv4 address for the instance. AccessIPv4 string `json:"accessIPv4,omitempty"` + // AccessIPv6 [optional] provides a new IPv6 address for the instance. AccessIPv6 string `json:"accessIPv6,omitempty"` - // Metadata [optional] contains key-value pairs (up to 255 bytes each) to attach to the server. + + // Metadata [optional] contains key-value pairs (up to 255 bytes each) + // to attach to the server. Metadata map[string]string `json:"metadata,omitempty"` + // Personality [optional] includes files to inject into the server at launch. // Rebuild will base64-encode file contents for you. - Personality Personality `json:"personality,omitempty"` + Personality Personality `json:"personality,omitempty"` + + // ServiceClient will allow calls to be made to retrieve an image or + // flavor ID by name. ServiceClient *gophercloud.ServiceClient `json:"-"` } @@ -458,31 +491,34 @@ func Rebuild(client *gophercloud.ServiceClient, id string, opts RebuildOptsBuild return } -// ResizeOptsBuilder is an interface that allows extensions to override the default structure of -// a Resize request. +// ResizeOptsBuilder allows extensions to add additional parameters to the +// resize request. type ResizeOptsBuilder interface { ToServerResizeMap() (map[string]interface{}, error) } -// ResizeOpts represents the configuration options used to control a Resize operation. +// ResizeOpts represents the configuration options used to control a Resize +// operation. type ResizeOpts struct { // FlavorRef is the ID of the flavor you wish your server to become. FlavorRef string `json:"flavorRef" required:"true"` } -// ToServerResizeMap formats a ResizeOpts as a map that can be used as a JSON request body for the -// Resize request. +// ToServerResizeMap formats a ResizeOpts as a map that can be used as a JSON +// request body for the Resize request. func (opts ResizeOpts) ToServerResizeMap() (map[string]interface{}, error) { return gophercloud.BuildRequestBody(opts, "resize") } // Resize instructs the provider to change the flavor of the server. +// // Note that this implies rebuilding it. +// // Unfortunately, one cannot pass rebuild parameters to the resize function. -// When the resize completes, the server will be in RESIZE_VERIFY state. -// While in this state, you can explore the use of the new server's configuration. -// If you like it, call ConfirmResize() to commit the resize permanently. -// Otherwise, call RevertResize() to restore the old configuration. +// When the resize completes, the server will be in VERIFY_RESIZE state. +// While in this state, you can explore the use of the new server's +// configuration. If you like it, call ConfirmResize() to commit the resize +// permanently. Otherwise, call RevertResize() to restore the old configuration. func Resize(client *gophercloud.ServiceClient, id string, opts ResizeOptsBuilder) (r ActionResult) { b, err := opts.ToServerResizeMap() if err != nil { @@ -542,8 +578,8 @@ func Rescue(client *gophercloud.ServiceClient, id string, opts RescueOptsBuilder return } -// ResetMetadataOptsBuilder allows extensions to add additional parameters to the -// Reset request. +// ResetMetadataOptsBuilder allows extensions to add additional parameters to +// the Reset request. type ResetMetadataOptsBuilder interface { ToMetadataResetMap() (map[string]interface{}, error) } @@ -551,20 +587,23 @@ type ResetMetadataOptsBuilder interface { // MetadataOpts is a map that contains key-value pairs. type MetadataOpts map[string]string -// ToMetadataResetMap assembles a body for a Reset request based on the contents of a MetadataOpts. +// ToMetadataResetMap assembles a body for a Reset request based on the contents +// of a MetadataOpts. func (opts MetadataOpts) ToMetadataResetMap() (map[string]interface{}, error) { return map[string]interface{}{"metadata": opts}, nil } -// ToMetadataUpdateMap assembles a body for an Update request based on the contents of a MetadataOpts. +// ToMetadataUpdateMap assembles a body for an Update request based on the +// contents of a MetadataOpts. func (opts MetadataOpts) ToMetadataUpdateMap() (map[string]interface{}, error) { return map[string]interface{}{"metadata": opts}, nil } -// ResetMetadata will create multiple new key-value pairs for the given server ID. -// Note: Using this operation will erase any already-existing metadata and create -// the new metadata provided. To keep any already-existing metadata, use the -// UpdateMetadatas or UpdateMetadata function. +// ResetMetadata will create multiple new key-value pairs for the given server +// ID. +// Note: Using this operation will erase any already-existing metadata and +// create the new metadata provided. To keep any already-existing metadata, +// use the UpdateMetadatas or UpdateMetadata function. func ResetMetadata(client *gophercloud.ServiceClient, id string, opts ResetMetadataOptsBuilder) (r ResetMetadataResult) { b, err := opts.ToMetadataResetMap() if err != nil { @@ -583,15 +622,15 @@ func Metadata(client *gophercloud.ServiceClient, id string) (r GetMetadataResult return } -// UpdateMetadataOptsBuilder allows extensions to add additional parameters to the -// Create request. +// UpdateMetadataOptsBuilder allows extensions to add additional parameters to +// the Create request. type UpdateMetadataOptsBuilder interface { ToMetadataUpdateMap() (map[string]interface{}, error) } -// UpdateMetadata updates (or creates) all the metadata specified by opts for the given server ID. -// This operation does not affect already-existing metadata that is not specified -// by opts. +// UpdateMetadata updates (or creates) all the metadata specified by opts for +// the given server ID. This operation does not affect already-existing metadata +// that is not specified by opts. func UpdateMetadata(client *gophercloud.ServiceClient, id string, opts UpdateMetadataOptsBuilder) (r UpdateMetadataResult) { b, err := opts.ToMetadataUpdateMap() if err != nil { @@ -613,7 +652,8 @@ type MetadatumOptsBuilder interface { // MetadatumOpts is a map of length one that contains a key-value pair. type MetadatumOpts map[string]string -// ToMetadatumCreateMap assembles a body for a Create request based on the contents of a MetadataumOpts. +// ToMetadatumCreateMap assembles a body for a Create request based on the +// contents of a MetadataumOpts. func (opts MetadatumOpts) ToMetadatumCreateMap() (map[string]interface{}, string, error) { if len(opts) != 1 { err := gophercloud.ErrInvalidInput{} @@ -629,7 +669,8 @@ func (opts MetadatumOpts) ToMetadatumCreateMap() (map[string]interface{}, string return metadatum, key, nil } -// CreateMetadatum will create or update the key-value pair with the given key for the given server ID. +// CreateMetadatum will create or update the key-value pair with the given key +// for the given server ID. func CreateMetadatum(client *gophercloud.ServiceClient, id string, opts MetadatumOptsBuilder) (r CreateMetadatumResult) { b, key, err := opts.ToMetadatumCreateMap() if err != nil { @@ -642,53 +683,60 @@ func CreateMetadatum(client *gophercloud.ServiceClient, id string, opts Metadatu return } -// Metadatum requests the key-value pair with the given key for the given server ID. +// Metadatum requests the key-value pair with the given key for the given +// server ID. func Metadatum(client *gophercloud.ServiceClient, id, key string) (r GetMetadatumResult) { _, r.Err = client.Get(metadatumURL(client, id, key), &r.Body, nil) return } -// DeleteMetadatum will delete the key-value pair with the given key for the given server ID. +// DeleteMetadatum will delete the key-value pair with the given key for the +// given server ID. func DeleteMetadatum(client *gophercloud.ServiceClient, id, key string) (r DeleteMetadatumResult) { _, r.Err = client.Delete(metadatumURL(client, id, key), nil) return } -// ListAddresses makes a request against the API to list the servers IP addresses. +// ListAddresses makes a request against the API to list the servers IP +// addresses. func ListAddresses(client *gophercloud.ServiceClient, id string) pagination.Pager { return pagination.NewPager(client, listAddressesURL(client, id), func(r pagination.PageResult) pagination.Page { return AddressPage{pagination.SinglePageBase(r)} }) } -// ListAddressesByNetwork makes a request against the API to list the servers IP addresses -// for the given network. +// ListAddressesByNetwork makes a request against the API to list the servers IP +// addresses for the given network. func ListAddressesByNetwork(client *gophercloud.ServiceClient, id, network string) pagination.Pager { return pagination.NewPager(client, listAddressesByNetworkURL(client, id, network), func(r pagination.PageResult) pagination.Page { return NetworkAddressPage{pagination.SinglePageBase(r)} }) } -// CreateImageOptsBuilder is the interface types must satisfy in order to be -// used as CreateImage options +// CreateImageOptsBuilder allows extensions to add additional parameters to the +// CreateImage request. type CreateImageOptsBuilder interface { ToServerCreateImageMap() (map[string]interface{}, error) } -// CreateImageOpts satisfies the CreateImageOptsBuilder +// CreateImageOpts provides options to pass to the CreateImage request. type CreateImageOpts struct { - // Name of the image/snapshot + // Name of the image/snapshot. Name string `json:"name" required:"true"` - // Metadata contains key-value pairs (up to 255 bytes each) to attach to the created image. + + // Metadata contains key-value pairs (up to 255 bytes each) to attach to + // the created image. Metadata map[string]string `json:"metadata,omitempty"` } -// ToServerCreateImageMap formats a CreateImageOpts structure into a request body. +// ToServerCreateImageMap formats a CreateImageOpts structure into a request +// body. func (opts CreateImageOpts) ToServerCreateImageMap() (map[string]interface{}, error) { return gophercloud.BuildRequestBody(opts, "createImage") } -// CreateImage makes a request against the nova API to schedule an image to be created of the server +// CreateImage makes a request against the nova API to schedule an image to be +// created of the server func CreateImage(client *gophercloud.ServiceClient, id string, opts CreateImageOptsBuilder) (r CreateImageResult) { b, err := opts.ToServerCreateImageMap() if err != nil { @@ -703,7 +751,8 @@ func CreateImage(client *gophercloud.ServiceClient, id string, opts CreateImageO return } -// IDFromName is a convienience function that returns a server's ID given its name. +// IDFromName is a convienience function that returns a server's ID given its +// name. func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { count := 0 id := "" @@ -734,7 +783,8 @@ func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) } } -// GetPassword makes a request against the nova API to get the encrypted administrative password. +// GetPassword makes a request against the nova API to get the encrypted +// administrative password. func GetPassword(client *gophercloud.ServiceClient, serverId string) (r GetPasswordResult) { _, r.Err = client.Get(passwordURL(client, serverId), &r.Body, nil) return diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/results.go index 1ae1e91c7..c6c1ff43f 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/results.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/results.go @@ -32,54 +32,64 @@ func ExtractServersInto(r pagination.Page, v interface{}) error { return r.(ServerPage).Result.ExtractIntoSlicePtr(v, "servers") } -// CreateResult temporarily contains the response from a Create call. +// CreateResult is the response from a Create operation. Call its Extract +// method to interpret it as a Server. type CreateResult struct { serverResult } -// GetResult temporarily contains the response from a Get call. +// GetResult is the response from a Get operation. Call its Extract +// method to interpret it as a Server. type GetResult struct { serverResult } -// UpdateResult temporarily contains the response from an Update call. +// UpdateResult is the response from an Update operation. Call its Extract +// method to interpret it as a Server. type UpdateResult struct { serverResult } -// DeleteResult temporarily contains the response from a Delete call. +// DeleteResult is the response from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. type DeleteResult struct { gophercloud.ErrResult } -// RebuildResult temporarily contains the response from a Rebuild call. +// RebuildResult is the response from a Rebuild operation. Call its Extract +// method to interpret it as a Server. type RebuildResult struct { serverResult } -// ActionResult represents the result of server action operations, like reboot +// ActionResult represents the result of server action operations, like reboot. +// Call its ExtractErr method to determine if the action succeeded or failed. type ActionResult struct { gophercloud.ErrResult } -// RescueResult represents the result of a server rescue operation +// RescueResult is the response from a Rescue operation. Call its ExtractErr +// method to determine if the call succeeded or failed. type RescueResult struct { ActionResult } -// CreateImageResult represents the result of an image creation operation +// CreateImageResult is the response from a CreateImage operation. Call its +// ExtractImageID method to retrieve the ID of the newly created image. type CreateImageResult struct { gophercloud.Result } // GetPasswordResult represent the result of a get os-server-password operation. +// Call its ExtractPassword method to retrieve the password. type GetPasswordResult struct { gophercloud.Result } // ExtractPassword gets the encrypted password. // If privateKey != nil the password is decrypted with the private key. -// If privateKey == nil the encrypted password is returned and can be decrypted with: +// If privateKey == nil the encrypted password is returned and can be decrypted +// with: // echo '' | base64 -D | openssl rsautl -decrypt -inkey func (r GetPasswordResult) ExtractPassword(privateKey *rsa.PrivateKey) (string, error) { var s struct { @@ -107,7 +117,7 @@ func decryptPassword(encryptedPassword string, privateKey *rsa.PrivateKey) (stri return string(password), nil } -// ExtractImageID gets the ID of the newly created server image from the header +// ExtractImageID gets the ID of the newly created server image from the header. func (r CreateImageResult) ExtractImageID() (string, error) { if r.Err != nil { return "", r.Err @@ -133,45 +143,84 @@ func (r RescueResult) Extract() (string, error) { return s.AdminPass, err } -// Server exposes only the standard OpenStack fields corresponding to a given server on the user's account. +// Server represents a server/instance in the OpenStack cloud. type Server struct { - // ID uniquely identifies this server amongst all other servers, including those not accessible to the current tenant. + // ID uniquely identifies this server amongst all other servers, + // including those not accessible to the current tenant. ID string `json:"id"` + // TenantID identifies the tenant owning this server resource. TenantID string `json:"tenant_id"` + // UserID uniquely identifies the user account owning the tenant. UserID string `json:"user_id"` + // Name contains the human-readable name for the server. Name string `json:"name"` - // Updated and Created contain ISO-8601 timestamps of when the state of the server last changed, and when it was created. + + // Updated and Created contain ISO-8601 timestamps of when the state of the + // server last changed, and when it was created. Updated time.Time `json:"updated"` Created time.Time `json:"created"` - HostID string `json:"hostid"` - // Status contains the current operational status of the server, such as IN_PROGRESS or ACTIVE. + + // HostID is the host where the server is located in the cloud. + HostID string `json:"hostid"` + + // Status contains the current operational status of the server, + // such as IN_PROGRESS or ACTIVE. Status string `json:"status"` + // Progress ranges from 0..100. // A request made against the server completes only once Progress reaches 100. Progress int `json:"progress"` - // AccessIPv4 and AccessIPv6 contain the IP addresses of the server, suitable for remote access for administration. + + // AccessIPv4 and AccessIPv6 contain the IP addresses of the server, + // suitable for remote access for administration. AccessIPv4 string `json:"accessIPv4"` AccessIPv6 string `json:"accessIPv6"` - // Image refers to a JSON object, which itself indicates the OS image used to deploy the server. + + // Image refers to a JSON object, which itself indicates the OS image used to + // deploy the server. Image map[string]interface{} `json:"-"` - // Flavor refers to a JSON object, which itself indicates the hardware configuration of the deployed server. + + // Flavor refers to a JSON object, which itself indicates the hardware + // configuration of the deployed server. Flavor map[string]interface{} `json:"flavor"` - // Addresses includes a list of all IP addresses assigned to the server, keyed by pool. + + // Addresses includes a list of all IP addresses assigned to the server, + // keyed by pool. Addresses map[string]interface{} `json:"addresses"` - // Metadata includes a list of all user-specified key-value pairs attached to the server. + + // Metadata includes a list of all user-specified key-value pairs attached + // to the server. Metadata map[string]string `json:"metadata"` - // Links includes HTTP references to the itself, useful for passing along to other APIs that might want a server reference. + + // Links includes HTTP references to the itself, useful for passing along to + // other APIs that might want a server reference. Links []interface{} `json:"links"` + // KeyName indicates which public key was injected into the server on launch. KeyName string `json:"key_name"` - // AdminPass will generally be empty (""). However, it will contain the administrative password chosen when provisioning a new server without a set AdminPass setting in the first place. + + // AdminPass will generally be empty (""). However, it will contain the + // administrative password chosen when provisioning a new server without a + // set AdminPass setting in the first place. // Note that this is the ONLY time this field will be valid. AdminPass string `json:"adminPass"` - // SecurityGroups includes the security groups that this instance has applied to it + + // SecurityGroups includes the security groups that this instance has applied + // to it. SecurityGroups []map[string]interface{} `json:"security_groups"` + + // Fault contains failure information about a server. + Fault Fault `json:"fault"` +} + +type Fault struct { + Code int `json:"code"` + Created time.Time `json:"created"` + Details string `json:"details"` + Message string `json:"message"` } func (r *Server) UnmarshalJSON(b []byte) error { @@ -200,9 +249,10 @@ func (r *Server) UnmarshalJSON(b []byte) error { return err } -// ServerPage abstracts the raw results of making a List() request against the API. -// As OpenStack extensions may freely alter the response bodies of structures returned to the client, you may only safely access the -// data provided through the ExtractServers call. +// ServerPage abstracts the raw results of making a List() request against +// the API. As OpenStack extensions may freely alter the response bodies of +// structures returned to the client, you may only safely access the data +// provided through the ExtractServers call. type ServerPage struct { pagination.LinkedPageBase } @@ -213,7 +263,8 @@ func (r ServerPage) IsEmpty() (bool, error) { return len(s) == 0, err } -// NextPageURL uses the response's embedded link reference to navigate to the next page of results. +// NextPageURL uses the response's embedded link reference to navigate to the +// next page of results. func (r ServerPage) NextPageURL() (string, error) { var s struct { Links []gophercloud.Link `json:"servers_links"` @@ -225,49 +276,59 @@ func (r ServerPage) NextPageURL() (string, error) { return gophercloud.ExtractNextURL(s.Links) } -// ExtractServers interprets the results of a single page from a List() call, producing a slice of Server entities. +// ExtractServers interprets the results of a single page from a List() call, +// producing a slice of Server entities. func ExtractServers(r pagination.Page) ([]Server, error) { var s []Server err := ExtractServersInto(r, &s) return s, err } -// MetadataResult contains the result of a call for (potentially) multiple key-value pairs. +// MetadataResult contains the result of a call for (potentially) multiple +// key-value pairs. Call its Extract method to interpret it as a +// map[string]interface. type MetadataResult struct { gophercloud.Result } -// GetMetadataResult temporarily contains the response from a metadata Get call. +// GetMetadataResult contains the result of a Get operation. Call its Extract +// method to interpret it as a map[string]interface. type GetMetadataResult struct { MetadataResult } -// ResetMetadataResult temporarily contains the response from a metadata Reset call. +// ResetMetadataResult contains the result of a Reset operation. Call its +// Extract method to interpret it as a map[string]interface. type ResetMetadataResult struct { MetadataResult } -// UpdateMetadataResult temporarily contains the response from a metadata Update call. +// UpdateMetadataResult contains the result of an Update operation. Call its +// Extract method to interpret it as a map[string]interface. type UpdateMetadataResult struct { MetadataResult } -// MetadatumResult contains the result of a call for individual a single key-value pair. +// MetadatumResult contains the result of a call for individual a single +// key-value pair. type MetadatumResult struct { gophercloud.Result } -// GetMetadatumResult temporarily contains the response from a metadatum Get call. +// GetMetadatumResult contains the result of a Get operation. Call its Extract +// method to interpret it as a map[string]interface. type GetMetadatumResult struct { MetadatumResult } -// CreateMetadatumResult temporarily contains the response from a metadatum Create call. +// CreateMetadatumResult contains the result of a Create operation. Call its +// Extract method to interpret it as a map[string]interface. type CreateMetadatumResult struct { MetadatumResult } -// DeleteMetadatumResult temporarily contains the response from a metadatum Delete call. +// DeleteMetadatumResult contains the result of a Delete operation. Call its +// ExtractErr method to determine if the call succeeded or failed. type DeleteMetadatumResult struct { gophercloud.ErrResult } @@ -296,9 +357,10 @@ type Address struct { Address string `json:"addr"` } -// AddressPage abstracts the raw results of making a ListAddresses() request against the API. -// As OpenStack extensions may freely alter the response bodies of structures returned -// to the client, you may only safely access the data provided through the ExtractAddresses call. +// AddressPage abstracts the raw results of making a ListAddresses() request +// against the API. As OpenStack extensions may freely alter the response bodies +// of structures returned to the client, you may only safely access the data +// provided through the ExtractAddresses call. type AddressPage struct { pagination.SinglePageBase } @@ -309,8 +371,8 @@ func (r AddressPage) IsEmpty() (bool, error) { return len(addresses) == 0, err } -// ExtractAddresses interprets the results of a single page from a ListAddresses() call, -// producing a map of addresses. +// ExtractAddresses interprets the results of a single page from a +// ListAddresses() call, producing a map of addresses. func ExtractAddresses(r pagination.Page) (map[string][]Address, error) { var s struct { Addresses map[string][]Address `json:"addresses"` @@ -319,9 +381,11 @@ func ExtractAddresses(r pagination.Page) (map[string][]Address, error) { return s.Addresses, err } -// NetworkAddressPage abstracts the raw results of making a ListAddressesByNetwork() request against the API. -// As OpenStack extensions may freely alter the response bodies of structures returned -// to the client, you may only safely access the data provided through the ExtractAddresses call. +// NetworkAddressPage abstracts the raw results of making a +// ListAddressesByNetwork() request against the API. +// As OpenStack extensions may freely alter the response bodies of structures +// returned to the client, you may only safely access the data provided through +// the ExtractAddresses call. type NetworkAddressPage struct { pagination.SinglePageBase } @@ -332,8 +396,8 @@ func (r NetworkAddressPage) IsEmpty() (bool, error) { return len(addresses) == 0, err } -// ExtractNetworkAddresses interprets the results of a single page from a ListAddressesByNetwork() call, -// producing a slice of addresses. +// ExtractNetworkAddresses interprets the results of a single page from a +// ListAddressesByNetwork() call, producing a slice of addresses. func ExtractNetworkAddresses(r pagination.Page) ([]Address, error) { var s map[string][]Address err := (r.(NetworkAddressPage)).ExtractInto(&s) diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/util.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/util.go index 494a0e4dc..cadef0545 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/util.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/util.go @@ -2,8 +2,9 @@ package servers import "github.com/gophercloud/gophercloud" -// WaitForStatus will continually poll a server until it successfully transitions to a specified -// status. It will do this for at most the number of seconds specified. +// WaitForStatus will continually poll a server until it successfully +// transitions to a specified status. It will do this for at most the number +// of seconds specified. func WaitForStatus(c *gophercloud.ServiceClient, id, status string, secs int) error { return gophercloud.WaitFor(secs, func() (bool, error) { current, err := Get(c, id).Extract() diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/doc.go new file mode 100644 index 000000000..cedf1f4d3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/doc.go @@ -0,0 +1,14 @@ +/* +Package openstack contains resources for the individual OpenStack projects +supported in Gophercloud. It also includes functions to authenticate to an +OpenStack cloud and for provisioning various service-level clients. + +Example of Creating a Service Client + + ao, err := openstack.AuthOptionsFromEnv() + provider, err := openstack.AuthenticatedClient(ao) + client, err := openstack.NewNetworkV2(client, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) +*/ +package openstack diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go b/vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go index ea37f5b27..12c8aebcf 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go @@ -6,12 +6,16 @@ import ( tokens3 "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" ) -// V2EndpointURL discovers the endpoint URL for a specific service from a ServiceCatalog acquired -// during the v2 identity service. The specified EndpointOpts are used to identify a unique, -// unambiguous endpoint to return. It's an error both when multiple endpoints match the provided -// criteria and when none do. The minimum that can be specified is a Type, but you will also often -// need to specify a Name and/or a Region depending on what's available on your OpenStack -// deployment. +/* +V2EndpointURL discovers the endpoint URL for a specific service from a +ServiceCatalog acquired during the v2 identity service. + +The specified EndpointOpts are used to identify a unique, unambiguous endpoint +to return. It's an error both when multiple endpoints match the provided +criteria and when none do. The minimum that can be specified is a Type, but you +will also often need to specify a Name and/or a Region depending on what's +available on your OpenStack deployment. +*/ func V2EndpointURL(catalog *tokens2.ServiceCatalog, opts gophercloud.EndpointOpts) (string, error) { // Extract Endpoints from the catalog entries that match the requested Type, Name if provided, and Region if provided. var endpoints = make([]tokens2.Endpoint, 0, 1) @@ -54,12 +58,16 @@ func V2EndpointURL(catalog *tokens2.ServiceCatalog, opts gophercloud.EndpointOpt return "", err } -// V3EndpointURL discovers the endpoint URL for a specific service from a Catalog acquired -// during the v3 identity service. The specified EndpointOpts are used to identify a unique, -// unambiguous endpoint to return. It's an error both when multiple endpoints match the provided -// criteria and when none do. The minimum that can be specified is a Type, but you will also often -// need to specify a Name and/or a Region depending on what's available on your OpenStack -// deployment. +/* +V3EndpointURL discovers the endpoint URL for a specific service from a Catalog +acquired during the v3 identity service. + +The specified EndpointOpts are used to identify a unique, unambiguous endpoint +to return. It's an error both when multiple endpoints match the provided +criteria and when none do. The minimum that can be specified is a Type, but you +will also often need to specify a Name and/or a Region depending on what's +available on your OpenStack deployment. +*/ func V3EndpointURL(catalog *tokens3.ServiceCatalog, opts gophercloud.EndpointOpts) (string, error) { // Extract Endpoints from the catalog entries that match the requested Type, Interface, // Name if provided, and Region if provided. @@ -76,7 +84,7 @@ func V3EndpointURL(catalog *tokens3.ServiceCatalog, opts gophercloud.EndpointOpt return "", err } if (opts.Availability == gophercloud.Availability(endpoint.Interface)) && - (opts.Region == "" || endpoint.Region == opts.Region) { + (opts.Region == "" || endpoint.Region == opts.Region || endpoint.RegionID == opts.Region) { endpoints = append(endpoints, endpoint) } } diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go index 0c2d49d56..45623369e 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go @@ -1,7 +1,65 @@ -// Package tenants provides information and interaction with the -// tenants API resource for the OpenStack Identity service. -// -// See http://developer.openstack.org/api-ref-identity-v2.html#identity-auth-v2 -// and http://developer.openstack.org/api-ref-identity-v2.html#admin-tenants -// for more information. +/* +Package tenants provides information and interaction with the +tenants API resource for the OpenStack Identity service. + +See http://developer.openstack.org/api-ref-identity-v2.html#identity-auth-v2 +and http://developer.openstack.org/api-ref-identity-v2.html#admin-tenants +for more information. + +Example to List Tenants + + listOpts := tenants.ListOpts{ + Limit: 2, + } + + allPages, err := tenants.List(identityClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allTenants, err := tenants.ExtractTenants(allPages) + if err != nil { + panic(err) + } + + for _, tenant := range allTenants { + fmt.Printf("%+v\n", tenant) + } + +Example to Create a Tenant + + createOpts := tenants.CreateOpts{ + Name: "tenant_name", + Description: "this is a tenant", + Enabled: gophercloud.Enabled, + } + + tenant, err := tenants.Create(identityClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Tenant + + tenantID := "e6db6ed6277c461a853458589063b295" + + updateOpts := tenants.UpdateOpts{ + Description: "this is a new description", + Enabled: gophercloud.Disabled, + } + + tenant, err := tenants.Update(identityClient, tenantID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Tenant + + tenantID := "e6db6ed6277c461a853458589063b295" + + err := tenants.Delete(identitYClient, tenantID).ExtractErr() + if err != nil { + panic(err) + } +*/ package tenants diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go index b6550ce60..60f58c8ce 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go @@ -9,6 +9,7 @@ import ( type ListOpts struct { // Marker is the ID of the last Tenant on the previous page. Marker string `q:"marker"` + // Limit specifies the page size. Limit int `q:"limit"` } @@ -32,18 +33,22 @@ func List(client *gophercloud.ServiceClient, opts *ListOpts) pagination.Pager { type CreateOpts struct { // Name is the name of the tenant. Name string `json:"name" required:"true"` + // Description is the description of the tenant. Description string `json:"description,omitempty"` + // Enabled sets the tenant status to enabled or disabled. Enabled *bool `json:"enabled,omitempty"` } -// CreateOptsBuilder describes struct types that can be accepted by the Create call. +// CreateOptsBuilder enables extensions to add additional parameters to the +// Create request. type CreateOptsBuilder interface { ToTenantCreateMap() (map[string]interface{}, error) } -// ToTenantCreateMap assembles a request body based on the contents of a CreateOpts. +// ToTenantCreateMap assembles a request body based on the contents of +// a CreateOpts. func (opts CreateOpts) ToTenantCreateMap() (map[string]interface{}, error) { return gophercloud.BuildRequestBody(opts, "tenant") } @@ -67,17 +72,21 @@ func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { return } -// UpdateOptsBuilder allows extensions to add additional attributes to the Update request. +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. type UpdateOptsBuilder interface { ToTenantUpdateMap() (map[string]interface{}, error) } -// UpdateOpts specifies the base attributes that may be updated on an existing server. +// UpdateOpts specifies the base attributes that may be updated on an existing +// tenant. type UpdateOpts struct { // Name is the name of the tenant. Name string `json:"name,omitempty"` + // Description is the description of the tenant. Description string `json:"description,omitempty"` + // Enabled sets the tenant status to enabled or disabled. Enabled *bool `json:"enabled,omitempty"` } @@ -100,7 +109,7 @@ func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder return } -// Delete is the operation responsible for permanently deleting an API tenant. +// Delete is the operation responsible for permanently deleting a tenant. func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { _, r.Err = client.Delete(deleteURL(client, id), nil) return diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go index 5a319de5c..bb6c2c6b0 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go @@ -43,7 +43,8 @@ func (r TenantPage) NextPageURL() (string, error) { return gophercloud.ExtractNextURL(s.Links) } -// ExtractTenants returns a slice of Tenants contained in a single page of results. +// ExtractTenants returns a slice of Tenants contained in a single page of +// results. func ExtractTenants(r pagination.Page) ([]Tenant, error) { var s struct { Tenants []Tenant `json:"tenants"` @@ -56,7 +57,7 @@ type tenantResult struct { gophercloud.Result } -// Extract interprets any tenantResults as a tenant. +// Extract interprets any tenantResults as a Tenant. func (r tenantResult) Extract() (*Tenant, error) { var s struct { Tenant *Tenant `json:"tenant"` @@ -65,22 +66,26 @@ func (r tenantResult) Extract() (*Tenant, error) { return s.Tenant, err } -// GetResult temporarily contains the response from the Get call. +// GetResult is the response from a Get request. Call its Extract method to +// interpret it as a Tenant. type GetResult struct { tenantResult } -// CreateResult temporarily contains the reponse from the Create call. +// CreateResult is the response from a Create request. Call its Extract method +// to interpret it as a Tenant. type CreateResult struct { tenantResult } -// DeleteResult temporarily contains the response from the Delete call. +// DeleteResult is the response from a Get request. Call its ExtractErr method +// to determine if the call succeeded or failed. type DeleteResult struct { gophercloud.ErrResult } -// UpdateResult temporarily contains the response from the Update call. +// UpdateResult is the response from a Update request. Call its Extract method +// to interpret it as a Tenant. type UpdateResult struct { tenantResult } diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/doc.go index 31cacc5e1..5375eea87 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/doc.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/doc.go @@ -1,5 +1,46 @@ -// Package tokens provides information and interaction with the token API -// resource for the OpenStack Identity service. -// For more information, see: -// http://developer.openstack.org/api-ref-identity-v2.html#identity-auth-v2 +/* +Package tokens provides information and interaction with the token API +resource for the OpenStack Identity service. + +For more information, see: +http://developer.openstack.org/api-ref-identity-v2.html#identity-auth-v2 + +Example to Create an Unscoped Token from a Password + + authOpts := gophercloud.AuthOptions{ + Username: "user", + Password: "pass" + } + + token, err := tokens.Create(identityClient, authOpts).ExtractToken() + if err != nil { + panic(err) + } + +Example to Create a Token from a Tenant ID and Password + + authOpts := gophercloud.AuthOptions{ + Username: "user", + Password: "password", + TenantID: "fc394f2ab2df4114bde39905f800dc57" + } + + token, err := tokens.Create(identityClient, authOpts).ExtractToken() + if err != nil { + panic(err) + } + +Example to Create a Token from a Tenant Name and Password + + authOpts := gophercloud.AuthOptions{ + Username: "user", + Password: "password", + TenantName: "tenantname" + } + + token, err := tokens.Create(identityClient, authOpts).ExtractToken() + if err != nil { + panic(err) + } +*/ package tokens diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go index 4983031e7..ab32368cc 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go @@ -2,17 +2,21 @@ package tokens import "github.com/gophercloud/gophercloud" +// PasswordCredentialsV2 represents the required options to authenticate +// with a username and password. type PasswordCredentialsV2 struct { Username string `json:"username" required:"true"` Password string `json:"password" required:"true"` } +// TokenCredentialsV2 represents the required options to authenticate +// with a token. type TokenCredentialsV2 struct { ID string `json:"id,omitempty" required:"true"` } -// AuthOptionsV2 wraps a gophercloud AuthOptions in order to adhere to the AuthOptionsBuilder -// interface. +// AuthOptionsV2 wraps a gophercloud AuthOptions in order to adhere to the +// AuthOptionsBuilder interface. type AuthOptionsV2 struct { PasswordCredentials *PasswordCredentialsV2 `json:"passwordCredentials,omitempty" xor:"TokenCredentials"` @@ -23,15 +27,16 @@ type AuthOptionsV2 struct { TenantID string `json:"tenantId,omitempty"` TenantName string `json:"tenantName,omitempty"` - // TokenCredentials allows users to authenticate (possibly as another user) with an - // authentication token ID. + // TokenCredentials allows users to authenticate (possibly as another user) + // with an authentication token ID. TokenCredentials *TokenCredentialsV2 `json:"token,omitempty" xor:"PasswordCredentials"` } -// AuthOptionsBuilder describes any argument that may be passed to the Create call. +// AuthOptionsBuilder allows extensions to add additional parameters to the +// token create request. type AuthOptionsBuilder interface { - // ToTokenCreateMap assembles the Create request body, returning an error if parameters are - // missing or inconsistent. + // ToTokenCreateMap assembles the Create request body, returning an error + // if parameters are missing or inconsistent. ToTokenV2CreateMap() (map[string]interface{}, error) } @@ -47,8 +52,7 @@ type AuthOptions struct { TokenID string } -// ToTokenV2CreateMap allows AuthOptions to satisfy the AuthOptionsBuilder -// interface in the v2 tokens package +// ToTokenV2CreateMap builds a token request body from the given AuthOptions. func (opts AuthOptions) ToTokenV2CreateMap() (map[string]interface{}, error) { v2Opts := AuthOptionsV2{ TenantID: opts.TenantID, @@ -74,9 +78,9 @@ func (opts AuthOptions) ToTokenV2CreateMap() (map[string]interface{}, error) { } // Create authenticates to the identity service and attempts to acquire a Token. -// If successful, the CreateResult -// Generally, rather than interact with this call directly, end users should call openstack.AuthenticatedClient(), -// which abstracts all of the gory details about navigating service catalogs and such. +// Generally, rather than interact with this call directly, end users should +// call openstack.AuthenticatedClient(), which abstracts all of the gory details +// about navigating service catalogs and such. func Create(client *gophercloud.ServiceClient, auth AuthOptionsBuilder) (r CreateResult) { b, err := auth.ToTokenV2CreateMap() if err != nil { diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go index 6b3649370..b11326772 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go @@ -7,20 +7,24 @@ import ( "github.com/gophercloud/gophercloud/openstack/identity/v2/tenants" ) -// Token provides only the most basic information related to an authentication token. +// Token provides only the most basic information related to an authentication +// token. type Token struct { // ID provides the primary means of identifying a user to the OpenStack API. - // OpenStack defines this field as an opaque value, so do not depend on its content. - // It is safe, however, to compare for equality. + // OpenStack defines this field as an opaque value, so do not depend on its + // content. It is safe, however, to compare for equality. ID string - // ExpiresAt provides a timestamp in ISO 8601 format, indicating when the authentication token becomes invalid. - // After this point in time, future API requests made using this authentication token will respond with errors. - // Either the caller will need to reauthenticate manually, or more preferably, the caller should exploit automatic re-authentication. + // ExpiresAt provides a timestamp in ISO 8601 format, indicating when the + // authentication token becomes invalid. After this point in time, future + // API requests made using this authentication token will respond with + // errors. Either the caller will need to reauthenticate manually, or more + // preferably, the caller should exploit automatic re-authentication. // See the AuthOptions structure for more details. ExpiresAt time.Time - // Tenant provides information about the tenant to which this token grants access. + // Tenant provides information about the tenant to which this token grants + // access. Tenant tenants.Tenant } @@ -38,13 +42,17 @@ type User struct { } // Endpoint represents a single API endpoint offered by a service. -// It provides the public and internal URLs, if supported, along with a region specifier, again if provided. +// It provides the public and internal URLs, if supported, along with a region +// specifier, again if provided. +// // The significance of the Region field will depend upon your provider. // -// In addition, the interface offered by the service will have version information associated with it -// through the VersionId, VersionInfo, and VersionList fields, if provided or supported. +// In addition, the interface offered by the service will have version +// information associated with it through the VersionId, VersionInfo, and +// VersionList fields, if provided or supported. // -// In all cases, fields which aren't supported by the provider and service combined will assume a zero-value (""). +// In all cases, fields which aren't supported by the provider and service +// combined will assume a zero-value (""). type Endpoint struct { TenantID string `json:"tenantId"` PublicURL string `json:"publicURL"` @@ -56,38 +64,44 @@ type Endpoint struct { VersionList string `json:"versionList"` } -// CatalogEntry provides a type-safe interface to an Identity API V2 service catalog listing. -// Each class of service, such as cloud DNS or block storage services, will have a single -// CatalogEntry representing it. +// CatalogEntry provides a type-safe interface to an Identity API V2 service +// catalog listing. // -// Note: when looking for the desired service, try, whenever possible, to key off the type field. -// Otherwise, you'll tie the representation of the service to a specific provider. +// Each class of service, such as cloud DNS or block storage services, will have +// a single CatalogEntry representing it. +// +// Note: when looking for the desired service, try, whenever possible, to key +// off the type field. Otherwise, you'll tie the representation of the service +// to a specific provider. type CatalogEntry struct { // Name will contain the provider-specified name for the service. Name string `json:"name"` - // Type will contain a type string if OpenStack defines a type for the service. - // Otherwise, for provider-specific services, the provider may assign their own type strings. + // Type will contain a type string if OpenStack defines a type for the + // service. Otherwise, for provider-specific services, the provider may assign + // their own type strings. Type string `json:"type"` - // Endpoints will let the caller iterate over all the different endpoints that may exist for - // the service. + // Endpoints will let the caller iterate over all the different endpoints that + // may exist for the service. Endpoints []Endpoint `json:"endpoints"` } -// ServiceCatalog provides a view into the service catalog from a previous, successful authentication. +// ServiceCatalog provides a view into the service catalog from a previous, +// successful authentication. type ServiceCatalog struct { Entries []CatalogEntry } -// CreateResult defers the interpretation of a created token. -// Use ExtractToken() to interpret it as a Token, or ExtractServiceCatalog() to interpret it as a service catalog. +// CreateResult is the response from a Create request. Use ExtractToken() to +// interpret it as a Token, or ExtractServiceCatalog() to interpret it as a +// service catalog. type CreateResult struct { gophercloud.Result } -// GetResult is the deferred response from a Get call, which is the same with a Created token. -// Use ExtractUser() to interpret it as a User. +// GetResult is the deferred response from a Get call, which is the same with a +// Created token. Use ExtractUser() to interpret it as a User. type GetResult struct { CreateResult } @@ -121,7 +135,8 @@ func (r CreateResult) ExtractToken() (*Token, error) { }, nil } -// ExtractServiceCatalog returns the ServiceCatalog that was generated along with the user's Token. +// ExtractServiceCatalog returns the ServiceCatalog that was generated along +// with the user's Token. func (r CreateResult) ExtractServiceCatalog() (*ServiceCatalog, error) { var s struct { Access struct { diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go index 76ff5f473..966e128f1 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go @@ -1,6 +1,108 @@ -// Package tokens provides information and interaction with the token API -// resource for the OpenStack Identity service. -// -// For more information, see: -// http://developer.openstack.org/api-ref-identity-v3.html#tokens-v3 +/* +Package tokens provides information and interaction with the token API +resource for the OpenStack Identity service. + +For more information, see: +http://developer.openstack.org/api-ref-identity-v3.html#tokens-v3 + +Example to Create a Token From a Username and Password + + authOptions := tokens.AuthOptions{ + UserID: "username", + Password: "password", + } + + token, err := tokens.Create(identityClient, authOptions).ExtractToken() + if err != nil { + panic(err) + } + +Example to Create a Token From a Username, Password, and Domain + + authOptions := tokens.AuthOptions{ + UserID: "username", + Password: "password", + DomainID: "default", + } + + token, err := tokens.Create(identityClient, authOptions).ExtractToken() + if err != nil { + panic(err) + } + + authOptions = tokens.AuthOptions{ + UserID: "username", + Password: "password", + DomainName: "default", + } + + token, err = tokens.Create(identityClient, authOptions).ExtractToken() + if err != nil { + panic(err) + } + +Example to Create a Token From a Token + + authOptions := tokens.AuthOptions{ + TokenID: "token_id", + } + + token, err := tokens.Create(identityClient, authOptions).ExtractToken() + if err != nil { + panic(err) + } + +Example to Create a Token from a Username and Password with Project ID Scope + + scope := tokens.Scope{ + ProjectID: "0fe36e73809d46aeae6705c39077b1b3", + } + + authOptions := tokens.AuthOptions{ + Scope: &scope, + UserID: "username", + Password: "password", + } + + token, err = tokens.Create(identityClient, authOptions).ExtractToken() + if err != nil { + panic(err) + } + +Example to Create a Token from a Username and Password with Domain ID Scope + + scope := tokens.Scope{ + DomainID: "default", + } + + authOptions := tokens.AuthOptions{ + Scope: &scope, + UserID: "username", + Password: "password", + } + + token, err = tokens.Create(identityClient, authOptions).ExtractToken() + if err != nil { + panic(err) + } + +Example to Create a Token from a Username and Password with Project Name Scope + + scope := tokens.Scope{ + ProjectName: "project_name", + DomainID: "default", + } + + authOptions := tokens.AuthOptions{ + Scope: &scope, + UserID: "username", + Password: "password", + } + + token, err = tokens.Create(identityClient, authOptions).ExtractToken() + if err != nil { + panic(err) + } + +*/ package tokens diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go index 39c19aee3..6e99a793c 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go @@ -10,20 +10,22 @@ type Scope struct { DomainName string } -// AuthOptionsBuilder describes any argument that may be passed to the Create call. +// AuthOptionsBuilder provides the ability for extensions to add additional +// parameters to AuthOptions. Extensions must satisfy all required methods. type AuthOptionsBuilder interface { - // ToTokenV3CreateMap assembles the Create request body, returning an error if parameters are - // missing or inconsistent. + // ToTokenV3CreateMap assembles the Create request body, returning an error + // if parameters are missing or inconsistent. ToTokenV3CreateMap(map[string]interface{}) (map[string]interface{}, error) ToTokenV3ScopeMap() (map[string]interface{}, error) CanReauth() bool } +// AuthOptions represents options for authenticating a user. type AuthOptions struct { // IdentityEndpoint specifies the HTTP endpoint that is required to work with - // the Identity API of the appropriate version. While it's ultimately needed by - // all of the identity services, it will often be populated by a provider-level - // function. + // the Identity API of the appropriate version. While it's ultimately needed + // by all of the identity services, it will often be populated by a + // provider-level function. IdentityEndpoint string `json:"-"` // Username is required if using Identity V2 API. Consult with your provider's @@ -39,11 +41,11 @@ type AuthOptions struct { DomainID string `json:"-"` DomainName string `json:"name,omitempty"` - // AllowReauth should be set to true if you grant permission for Gophercloud to - // cache your credentials in memory, and to allow Gophercloud to attempt to - // re-authenticate automatically if/when your token expires. If you set it to - // false, it will not cache these settings, but re-authentication will not be - // possible. This setting defaults to false. + // AllowReauth should be set to true if you grant permission for Gophercloud + // to cache your credentials in memory, and to allow Gophercloud to attempt + // to re-authenticate automatically if/when your token expires. If you set + // it to false, it will not cache these settings, but re-authentication will + // not be possible. This setting defaults to false. AllowReauth bool `json:"-"` // TokenID allows users to authenticate (possibly as another user) with an @@ -53,6 +55,7 @@ type AuthOptions struct { Scope Scope `json:"-"` } +// ToTokenV3CreateMap builds a request body from AuthOptions. func (opts *AuthOptions) ToTokenV3CreateMap(scope map[string]interface{}) (map[string]interface{}, error) { gophercloudAuthOpts := gophercloud.AuthOptions{ Username: opts.Username, @@ -67,68 +70,17 @@ func (opts *AuthOptions) ToTokenV3CreateMap(scope map[string]interface{}) (map[s return gophercloudAuthOpts.ToTokenV3CreateMap(scope) } +// ToTokenV3CreateMap builds a scope request body from AuthOptions. func (opts *AuthOptions) ToTokenV3ScopeMap() (map[string]interface{}, error) { - if opts.Scope.ProjectName != "" { - // ProjectName provided: either DomainID or DomainName must also be supplied. - // ProjectID may not be supplied. - if opts.Scope.DomainID == "" && opts.Scope.DomainName == "" { - return nil, gophercloud.ErrScopeDomainIDOrDomainName{} - } - if opts.Scope.ProjectID != "" { - return nil, gophercloud.ErrScopeProjectIDOrProjectName{} - } + scope := gophercloud.AuthScope(opts.Scope) - if opts.Scope.DomainID != "" { - // ProjectName + DomainID - return map[string]interface{}{ - "project": map[string]interface{}{ - "name": &opts.Scope.ProjectName, - "domain": map[string]interface{}{"id": &opts.Scope.DomainID}, - }, - }, nil - } - - if opts.Scope.DomainName != "" { - // ProjectName + DomainName - return map[string]interface{}{ - "project": map[string]interface{}{ - "name": &opts.Scope.ProjectName, - "domain": map[string]interface{}{"name": &opts.Scope.DomainName}, - }, - }, nil - } - } else if opts.Scope.ProjectID != "" { - // ProjectID provided. ProjectName, DomainID, and DomainName may not be provided. - if opts.Scope.DomainID != "" { - return nil, gophercloud.ErrScopeProjectIDAlone{} - } - if opts.Scope.DomainName != "" { - return nil, gophercloud.ErrScopeProjectIDAlone{} - } - - // ProjectID - return map[string]interface{}{ - "project": map[string]interface{}{ - "id": &opts.Scope.ProjectID, - }, - }, nil - } else if opts.Scope.DomainID != "" { - // DomainID provided. ProjectID, ProjectName, and DomainName may not be provided. - if opts.Scope.DomainName != "" { - return nil, gophercloud.ErrScopeDomainIDOrDomainName{} - } - - // DomainID - return map[string]interface{}{ - "domain": map[string]interface{}{ - "id": &opts.Scope.DomainID, - }, - }, nil - } else if opts.Scope.DomainName != "" { - return nil, gophercloud.ErrScopeDomainName{} + gophercloudAuthOpts := gophercloud.AuthOptions{ + Scope: &scope, + DomainID: opts.DomainID, + DomainName: opts.DomainName, } - return nil, nil + return gophercloudAuthOpts.ToTokenV3ScopeMap() } func (opts *AuthOptions) CanReauth() bool { @@ -141,7 +93,8 @@ func subjectTokenHeaders(c *gophercloud.ServiceClient, subjectToken string) map[ } } -// Create authenticates and either generates a new token, or changes the Scope of an existing token. +// Create authenticates and either generates a new token, or changes the Scope +// of an existing token. func Create(c *gophercloud.ServiceClient, opts AuthOptionsBuilder) (r CreateResult) { scope, err := opts.ToTokenV3ScopeMap() if err != nil { @@ -180,7 +133,7 @@ func Get(c *gophercloud.ServiceClient, token string) (r GetResult) { // Validate determines if a specified token is valid or not. func Validate(c *gophercloud.ServiceClient, token string) (bool, error) { - resp, err := c.Request("HEAD", tokenURL(c), &gophercloud.RequestOpts{ + resp, err := c.Head(tokenURL(c), &gophercloud.RequestOpts{ MoreHeaders: subjectTokenHeaders(c, token), OkCodes: []int{200, 204, 404}, }) diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go index 7c306e83f..ebdca58f6 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go @@ -13,41 +13,50 @@ import ( type Endpoint struct { ID string `json:"id"` Region string `json:"region"` + RegionID string `json:"region_id"` Interface string `json:"interface"` URL string `json:"url"` } -// CatalogEntry provides a type-safe interface to an Identity API V3 service catalog listing. -// Each class of service, such as cloud DNS or block storage services, could have multiple -// CatalogEntry representing it (one by interface type, e.g public, admin or internal). +// CatalogEntry provides a type-safe interface to an Identity API V3 service +// catalog listing. Each class of service, such as cloud DNS or block storage +// services, could have multiple CatalogEntry representing it (one by interface +// type, e.g public, admin or internal). // -// Note: when looking for the desired service, try, whenever possible, to key off the type field. -// Otherwise, you'll tie the representation of the service to a specific provider. +// Note: when looking for the desired service, try, whenever possible, to key +// off the type field. Otherwise, you'll tie the representation of the service +// to a specific provider. type CatalogEntry struct { // Service ID ID string `json:"id"` + // Name will contain the provider-specified name for the service. Name string `json:"name"` - // Type will contain a type string if OpenStack defines a type for the service. - // Otherwise, for provider-specific services, the provider may assign their own type strings. + + // Type will contain a type string if OpenStack defines a type for the + // service. Otherwise, for provider-specific services, the provider may + // assign their own type strings. Type string `json:"type"` - // Endpoints will let the caller iterate over all the different endpoints that may exist for - // the service. + + // Endpoints will let the caller iterate over all the different endpoints that + // may exist for the service. Endpoints []Endpoint `json:"endpoints"` } -// ServiceCatalog provides a view into the service catalog from a previous, successful authentication. +// ServiceCatalog provides a view into the service catalog from a previous, +// successful authentication. type ServiceCatalog struct { Entries []CatalogEntry `json:"catalog"` } -// Domain provides information about the domain to which this token grants access. +// Domain provides information about the domain to which this token grants +// access. type Domain struct { ID string `json:"id"` Name string `json:"name"` } -// User represents a user resource that exists on the API. +// User represents a user resource that exists in the Identity Service. type User struct { Domain Domain `json:"domain"` ID string `json:"id"` @@ -67,7 +76,8 @@ type Project struct { Name string `json:"name"` } -// commonResult is the deferred result of a Create or a Get call. +// commonResult is the response from a request. A commonResult has various +// methods which can be used to extract different details about the result. type commonResult struct { gophercloud.Result } @@ -92,7 +102,8 @@ func (r commonResult) ExtractToken() (*Token, error) { return &s, err } -// ExtractServiceCatalog returns the ServiceCatalog that was generated along with the user's Token. +// ExtractServiceCatalog returns the ServiceCatalog that was generated along +// with the user's Token. func (r commonResult) ExtractServiceCatalog() (*ServiceCatalog, error) { var s ServiceCatalog err := r.ExtractInto(&s) @@ -126,27 +137,31 @@ func (r commonResult) ExtractProject() (*Project, error) { return s.Project, err } -// CreateResult defers the interpretation of a created token. -// Use ExtractToken() to interpret it as a Token, or ExtractServiceCatalog() to interpret it as a service catalog. +// CreateResult is the response from a Create request. Use ExtractToken() +// to interpret it as a Token, or ExtractServiceCatalog() to interpret it +// as a service catalog. type CreateResult struct { commonResult } -// GetResult is the deferred response from a Get call. +// GetResult is the response from a Get request. Use ExtractToken() +// to interpret it as a Token, or ExtractServiceCatalog() to interpret it +// as a service catalog. type GetResult struct { commonResult } -// RevokeResult is the deferred response from a Revoke call. +// RevokeResult is response from a Revoke request. type RevokeResult struct { commonResult } -// Token is a string that grants a user access to a controlled set of services in an OpenStack provider. -// Each Token is valid for a set length of time. +// Token is a string that grants a user access to a controlled set of services +// in an OpenStack provider. Each Token is valid for a set length of time. type Token struct { // ID is the issued token. ID string `json:"id"` + // ExpiresAt is the timestamp at which this token will no longer be accepted. ExpiresAt time.Time `json:"expires_at"` } diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/doc.go new file mode 100644 index 000000000..14da9ac90 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/doc.go @@ -0,0 +1,60 @@ +/* +Package images enables management and retrieval of images from the OpenStack +Image Service. + +Example to List Images + + images.ListOpts{ + Owner: "a7509e1ae65945fda83f3e52c6296017", + } + + allPages, err := images.List(imagesClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allImages, err := images.ExtractImages(allPages) + if err != nil { + panic(err) + } + + for _, image := range allImages { + fmt.Printf("%+v\n", image) + } + +Example to Create an Image + + createOpts := images.CreateOpts{ + Name: "image_name", + Visibility: images.ImageVisibilityPrivate, + } + + image, err := images.Create(imageClient, createOpts) + if err != nil { + panic(err) + } + +Example to Update an Image + + imageID := "1bea47ed-f6a9-463b-b423-14b9cca9ad27" + + updateOpts := images.UpdateOpts{ + images.ReplaceImageName{ + NewName: "new_name", + }, + } + + image, err := images.Update(imageClient, imageID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete an Image + + imageID := "1bea47ed-f6a9-463b-b423-14b9cca9ad27" + err := images.Delete(imageClient, imageID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package images diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/requests.go index 044b5cb95..88cd4d265 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/requests.go @@ -1,6 +1,10 @@ package images import ( + "fmt" + "net/url" + "time" + "github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud/pagination" ) @@ -15,33 +19,106 @@ type ListOptsBuilder interface { // the API. Filtering is achieved by passing in struct field values that map to // the server attributes you want to see returned. Marker and Limit are used // for pagination. -//http://developer.openstack.org/api-ref-image-v2.html +// +// http://developer.openstack.org/api-ref-image-v2.html type ListOpts struct { + // ID is the ID of the image. + // Multiple IDs can be specified by constructing a string + // such as "in:uuid1,uuid2,uuid3". + ID string `q:"id"` + // Integer value for the limit of values to return. Limit int `q:"limit"` // UUID of the server at which you want to set a marker. Marker string `q:"marker"` - Name string `q:"name"` - Visibility ImageVisibility `q:"visibility"` + // Name filters on the name of the image. + // Multiple names can be specified by constructing a string + // such as "in:name1,name2,name3". + Name string `q:"name"` + + // Visibility filters on the visibility of the image. + Visibility ImageVisibility `q:"visibility"` + + // MemberStatus filters on the member status of the image. MemberStatus ImageMemberStatus `q:"member_status"` - Owner string `q:"owner"` - Status ImageStatus `q:"status"` - SizeMin int64 `q:"size_min"` - SizeMax int64 `q:"size_max"` - SortKey string `q:"sort_key"` - SortDir string `q:"sort_dir"` - Tag string `q:"tag"` + + // Owner filters on the project ID of the image. + Owner string `q:"owner"` + + // Status filters on the status of the image. + // Multiple statuses can be specified by constructing a string + // such as "in:saving,queued". + Status ImageStatus `q:"status"` + + // SizeMin filters on the size_min image property. + SizeMin int64 `q:"size_min"` + + // SizeMax filters on the size_max image property. + SizeMax int64 `q:"size_max"` + + // Sort sorts the results using the new style of sorting. See the OpenStack + // Image API reference for the exact syntax. + // + // Sort cannot be used with the classic sort options (sort_key and sort_dir). + Sort string `q:"sort"` + + // SortKey will sort the results based on a specified image property. + SortKey string `q:"sort_key"` + + // SortDir will sort the list results either ascending or decending. + SortDir string `q:"sort_dir"` + + // Tags filters on specific image tags. + Tags []string `q:"tag"` + + // CreatedAtQuery filters images based on their creation date. + CreatedAtQuery *ImageDateQuery + + // UpdatedAtQuery filters images based on their updated date. + UpdatedAtQuery *ImageDateQuery + + // ContainerFormat filters images based on the container_format. + // Multiple container formats can be specified by constructing a + // string such as "in:bare,ami". + ContainerFormat string `q:"container_format"` + + // DiskFormat filters images based on the disk_format. + // Multiple disk formats can be specified by constructing a string + // such as "in:qcow2,iso". + DiskFormat string `q:"disk_format"` } // ToImageListQuery formats a ListOpts into a query string. func (opts ListOpts) ToImageListQuery() (string, error) { q, err := gophercloud.BuildQueryString(opts) + params := q.Query() + + if opts.CreatedAtQuery != nil { + createdAt := opts.CreatedAtQuery.Date.Format(time.RFC3339) + if v := opts.CreatedAtQuery.Filter; v != "" { + createdAt = fmt.Sprintf("%s:%s", v, createdAt) + } + + params.Add("created_at", createdAt) + } + + if opts.UpdatedAtQuery != nil { + updatedAt := opts.UpdatedAtQuery.Date.Format(time.RFC3339) + if v := opts.UpdatedAtQuery.Filter; v != "" { + updatedAt = fmt.Sprintf("%s:%s", v, updatedAt) + } + + params.Add("updated_at", updatedAt) + } + + q = &url.URL{RawQuery: params.Encode()} + return q.String(), err } -// List implements image list request +// List implements image list request. func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { url := listURL(c) if opts != nil { @@ -56,14 +133,13 @@ func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { }) } -// CreateOptsBuilder describes struct types that can be accepted by the Create call. -// The CreateOpts struct in this package does. +// CreateOptsBuilder allows extensions to add parameters to the Create request. type CreateOptsBuilder interface { // Returns value that can be passed to json.Marshal ToImageCreateMap() (map[string]interface{}, error) } -// CreateOpts implements CreateOptsBuilder +// CreateOpts represents options used to create an image. type CreateOpts struct { // Name is the name of the new image. Name string `json:"name" required:"true"` @@ -118,7 +194,7 @@ func (opts CreateOpts) ToImageCreateMap() (map[string]interface{}, error) { return b, nil } -// Create implements create image request +// Create implements create image request. func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { b, err := opts.ToImageCreateMap() if err != nil { @@ -129,19 +205,19 @@ func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r Create return } -// Delete implements image delete request +// Delete implements image delete request. func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { _, r.Err = client.Delete(deleteURL(client, id), nil) return } -// Get implements image get request +// Get implements image get request. func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { _, r.Err = client.Get(getURL(client, id), &r.Body, nil) return } -// Update implements image updated request +// Update implements image updated request. func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { b, err := opts.ToImageUpdateMap() if err != nil { @@ -155,9 +231,11 @@ func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder return } -// UpdateOptsBuilder implements UpdateOptsBuilder +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. type UpdateOptsBuilder interface { - // returns value implementing json.Marshaler which when marshaled matches the patch schema: + // returns value implementing json.Marshaler which when marshaled matches + // the patch schema: // http://specs.openstack.org/openstack/glance-specs/specs/api/v2/http-patch-image-api-v2.html ToImageUpdateMap() ([]interface{}, error) } @@ -165,7 +243,8 @@ type UpdateOptsBuilder interface { // UpdateOpts implements UpdateOpts type UpdateOpts []Patch -// ToImageUpdateMap builder +// ToImageUpdateMap assembles a request body based on the contents of +// UpdateOpts. func (opts UpdateOpts) ToImageUpdateMap() ([]interface{}, error) { m := make([]interface{}, len(opts)) for i, patch := range opts { @@ -175,18 +254,18 @@ func (opts UpdateOpts) ToImageUpdateMap() ([]interface{}, error) { return m, nil } -// Patch represents a single update to an existing image. Multiple updates to an image can be -// submitted at the same time. +// Patch represents a single update to an existing image. Multiple updates +// to an image can be submitted at the same time. type Patch interface { ToImagePatchMap() map[string]interface{} } -// UpdateVisibility updated visibility +// UpdateVisibility represents an updated visibility property request. type UpdateVisibility struct { Visibility ImageVisibility } -// ToImagePatchMap builder +// ToImagePatchMap assembles a request body based on UpdateVisibility. func (u UpdateVisibility) ToImagePatchMap() map[string]interface{} { return map[string]interface{}{ "op": "replace", @@ -195,12 +274,12 @@ func (u UpdateVisibility) ToImagePatchMap() map[string]interface{} { } } -// ReplaceImageName implements Patch +// ReplaceImageName represents an updated image_name property request. type ReplaceImageName struct { NewName string } -// ToImagePatchMap builder +// ToImagePatchMap assembles a request body based on ReplaceImageName. func (r ReplaceImageName) ToImagePatchMap() map[string]interface{} { return map[string]interface{}{ "op": "replace", @@ -209,12 +288,12 @@ func (r ReplaceImageName) ToImagePatchMap() map[string]interface{} { } } -// ReplaceImageChecksum implements Patch +// ReplaceImageChecksum represents an updated checksum property request. type ReplaceImageChecksum struct { Checksum string } -// ReplaceImageChecksum builder +// ReplaceImageChecksum assembles a request body based on ReplaceImageChecksum. func (rc ReplaceImageChecksum) ToImagePatchMap() map[string]interface{} { return map[string]interface{}{ "op": "replace", @@ -223,12 +302,12 @@ func (rc ReplaceImageChecksum) ToImagePatchMap() map[string]interface{} { } } -// ReplaceImageTags implements Patch +// ReplaceImageTags represents an updated tags property request. type ReplaceImageTags struct { NewTags []string } -// ToImagePatchMap builder +// ToImagePatchMap assembles a request body based on ReplaceImageTags. func (r ReplaceImageTags) ToImagePatchMap() map[string]interface{} { return map[string]interface{}{ "op": "replace", diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/results.go index 632186b72..e25606884 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/results.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/results.go @@ -11,11 +11,9 @@ import ( "github.com/gophercloud/gophercloud/pagination" ) -// Image model -// Does not include the literal image data; just metadata. -// returned by listing images, and by fetching a specific image. +// Image represents an image found in the OpenStack Image service. type Image struct { - // ID is the image UUID + // ID is the image UUID. ID string `json:"id"` // Name is the human-readable display name for the image. @@ -34,16 +32,19 @@ type Image struct { ContainerFormat string `json:"container_format"` // DiskFormat is the format of the disk. - // If set, valid values are ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso. + // If set, valid values are ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, + // and iso. DiskFormat string `json:"disk_format"` - // MinDiskGigabytes is the amount of disk space in GB that is required to boot the image. + // MinDiskGigabytes is the amount of disk space in GB that is required to + // boot the image. MinDiskGigabytes int `json:"min_disk"` - // MinRAMMegabytes [optional] is the amount of RAM in MB that is required to boot the image. + // MinRAMMegabytes [optional] is the amount of RAM in MB that is required to + // boot the image. MinRAMMegabytes int `json:"min_ram"` - // Owner is the tenant the image belongs to. + // Owner is the tenant ID the image belongs to. Owner string `json:"owner"` // Protected is whether the image is deletable or not. @@ -52,7 +53,7 @@ type Image struct { // Visibility defines who can see/use the image. Visibility ImageVisibility `json:"visibility"` - // Checksum is the checksum of the data that's associated with the image + // Checksum is the checksum of the data that's associated with the image. Checksum string `json:"checksum"` // SizeBytes is the size of the data that's associated with the image. @@ -60,23 +61,27 @@ type Image struct { // Metadata is a set of metadata associated with the image. // Image metadata allow for meaningfully define the image properties - // and tags. See http://docs.openstack.org/developer/glance/metadefs-concepts.html. + // and tags. + // See http://docs.openstack.org/developer/glance/metadefs-concepts.html. Metadata map[string]string `json:"metadata"` - // Properties is a set of key-value pairs, if any, that are associated with the image. + // Properties is a set of key-value pairs, if any, that are associated with + // the image. Properties map[string]interface{} `json:"-"` // CreatedAt is the date when the image has been created. CreatedAt time.Time `json:"created_at"` - // UpdatedAt is the date when the last change has been made to the image or it's properties. + // UpdatedAt is the date when the last change has been made to the image or + // it's properties. UpdatedAt time.Time `json:"updated_at"` - // File is the trailing path after the glance endpoint that represent the location - // of the image or the path to retrieve it. + // File is the trailing path after the glance endpoint that represent the + // location of the image or the path to retrieve it. File string `json:"file"` - // Schema is the path to the JSON-schema that represent the image or image entity. + // Schema is the path to the JSON-schema that represent the image or image + // entity. Schema string `json:"schema"` // VirtualSize is the virtual size of the image @@ -97,7 +102,7 @@ func (r *Image) UnmarshalJSON(b []byte) error { switch t := s.SizeBytes.(type) { case nil: - return nil + r.SizeBytes = 0 case float32: r.SizeBytes = int64(t) case float64: @@ -131,38 +136,43 @@ func (r commonResult) Extract() (*Image, error) { return s, err } -// CreateResult represents the result of a Create operation +// CreateResult represents the result of a Create operation. Call its Extract +// method to interpret it as an Image. type CreateResult struct { commonResult } -// UpdateResult represents the result of an Update operation +// UpdateResult represents the result of an Update operation. Call its Extract +// method to interpret it as an Image. type UpdateResult struct { commonResult } -// GetResult represents the result of a Get operation +// GetResult represents the result of a Get operation. Call its Extract +// method to interpret it as an Image. type GetResult struct { commonResult } -//DeleteResult model +// DeleteResult represents the result of a Delete operation. Call its +// ExtractErr method to interpret it as an Image. type DeleteResult struct { gophercloud.ErrResult } -// ImagePage represents page +// ImagePage represents the results of a List request. type ImagePage struct { pagination.LinkedPageBase } -// IsEmpty returns true if a page contains no Images results. +// IsEmpty returns true if an ImagePage contains no Images results. func (r ImagePage) IsEmpty() (bool, error) { images, err := ExtractImages(r) return len(images) == 0, err } -// NextPageURL uses the response's embedded link reference to navigate to the next page of results. +// NextPageURL uses the response's embedded link reference to navigate to +// the next page of results. func (r ImagePage) NextPageURL() (string, error) { var s struct { Next string `json:"next"` @@ -179,7 +189,8 @@ func (r ImagePage) NextPageURL() (string, error) { return nextPageURL(r.URL.String(), s.Next) } -// ExtractImages interprets the results of a single page from a List() call, producing a slice of Image entities. +// ExtractImages interprets the results of a single page from a List() call, +// producing a slice of Image entities. func ExtractImages(r pagination.Page) ([]Image, error) { var s struct { Images []Image `json:"images"` diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/types.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/types.go index 086e7e5d5..d2f9cbd3b 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/types.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/types.go @@ -1,5 +1,9 @@ package images +import ( + "time" +) + // ImageStatus image statuses // http://docs.openstack.org/developer/glance/statuses.html type ImageStatus string @@ -9,7 +13,8 @@ const ( // been reserved for an image in the image registry. ImageStatusQueued ImageStatus = "queued" - // ImageStatusSaving denotes that an image’s raw data is currently being uploaded to Glance + // ImageStatusSaving denotes that an image’s raw data is currently being + // uploaded to Glance ImageStatusSaving ImageStatus = "saving" // ImageStatusActive denotes an image that is fully available in Glance. @@ -23,16 +28,18 @@ const ( // The image information is retained in the image registry. ImageStatusDeleted ImageStatus = "deleted" - // ImageStatusPendingDelete is similar to Delete, but the image is not yet deleted. + // ImageStatusPendingDelete is similar to Delete, but the image is not yet + // deleted. ImageStatusPendingDelete ImageStatus = "pending_delete" - // ImageStatusDeactivated denotes that access to image data is not allowed to any non-admin user. + // ImageStatusDeactivated denotes that access to image data is not allowed to + // any non-admin user. ImageStatusDeactivated ImageStatus = "deactivated" ) // ImageVisibility denotes an image that is fully available in Glance. -// This occurs when the image data is uploaded, or the image size -// is explicitly set to zero on creation. +// This occurs when the image data is uploaded, or the image size is explicitly +// set to zero on creation. // According to design // https://wiki.openstack.org/wiki/Glance-v2-community-image-visibility-design type ImageVisibility string @@ -52,12 +59,13 @@ const ( // ImageVisibilityCommunity images: // - all users can see and boot it - // - users with tenantId in the member-list of the image with member_status == 'accepted' - // have this image in their default image-list + // - users with tenantId in the member-list of the image with + // member_status == 'accepted' have this image in their default image-list. ImageVisibilityCommunity ImageVisibility = "community" ) -// MemberStatus is a status for adding a new member (tenant) to an image member list. +// MemberStatus is a status for adding a new member (tenant) to an image +// member list. type ImageMemberStatus string const ( @@ -73,3 +81,24 @@ const ( // ImageMemberStatusAll ImageMemberStatusAll ImageMemberStatus = "all" ) + +// ImageDateFilter represents a valid filter to use for filtering +// images by their date during a List. +type ImageDateFilter string + +const ( + FilterGT ImageDateFilter = "gt" + FilterGTE ImageDateFilter = "gte" + FilterLT ImageDateFilter = "lt" + FilterLTE ImageDateFilter = "lte" + FilterNEQ ImageDateFilter = "neq" + FilterEQ ImageDateFilter = "eq" +) + +// ImageDateQuery represents a date field to be used for listing images. +// If no filter is specified, the query will act as though FilterEQ was +// set. +type ImageDateQuery struct { + Date time.Time + Filter ImageDateFilter +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/members/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/members/doc.go new file mode 100644 index 000000000..1a7132045 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/members/doc.go @@ -0,0 +1,58 @@ +/* +Package members enables management and retrieval of image members. + +Members are projects other than the image owner who have access to the image. + +Example to List Members of an Image + + imageID := "2b6cacd4-cfd6-4b95-8302-4c04ccf0be3f" + + allPages, err := members.List(imageID).AllPages() + if err != nil { + panic(err) + } + + allMembers, err := members.ExtractMembers(allPages) + if err != nil { + panic(err) + } + + for _, member := range allMembers { + fmt.Printf("%+v\n", member) + } + +Example to Add a Member to an Image + + imageID := "2b6cacd4-cfd6-4b95-8302-4c04ccf0be3f" + projectID := "fc404778935a4cebaddcb4788fb3ff2c" + + member, err := members.Create(imageClient, imageID, projectID).Extract() + if err != nil { + panic(err) + } + +Example to Update the Status of a Member + + imageID := "2b6cacd4-cfd6-4b95-8302-4c04ccf0be3f" + projectID := "fc404778935a4cebaddcb4788fb3ff2c" + + updateOpts := members.UpdateOpts{ + Status: "accepted", + } + + member, err := members.Update(imageClient, imageID, projectID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Member from an Image + + imageID := "2b6cacd4-cfd6-4b95-8302-4c04ccf0be3f" + projectID := "fc404778935a4cebaddcb4788fb3ff2c" + + err := members.Delete(imageClient, imageID, projectID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package members diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/members/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/members/requests.go index b16fb82d4..b80e54e83 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/members/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/members/requests.go @@ -5,16 +5,24 @@ import ( "github.com/gophercloud/gophercloud/pagination" ) -// Create member for specific image -// -// Preconditions -// The specified images must exist. -// You can only add a new member to an image which 'visibility' attribute is private. -// You must be the owner of the specified image. -// Synchronous Postconditions -// With correct permissions, you can see the member status of the image as pending through API calls. -// -// More details here: http://developer.openstack.org/api-ref-image-v2.html#createImageMember-v2 +/* + Create member for specific image + + Preconditions + + * The specified images must exist. + * You can only add a new member to an image which 'visibility' attribute is + private. + * You must be the owner of the specified image. + + Synchronous Postconditions + + With correct permissions, you can see the member status of the image as + pending through API calls. + + More details here: + http://developer.openstack.org/api-ref-image-v2.html#createImageMember-v2 +*/ func Create(client *gophercloud.ServiceClient, id string, member string) (r CreateResult) { b := map[string]interface{}{"member": member} _, r.Err = client.Post(createMemberURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ @@ -23,8 +31,7 @@ func Create(client *gophercloud.ServiceClient, id string, member string) (r Crea return } -// List members returns list of members for specifed image id -// More details: http://developer.openstack.org/api-ref-image-v2.html#listImageMembers-v2 +// List members returns list of members for specifed image id. func List(client *gophercloud.ServiceClient, id string) pagination.Pager { return pagination.NewPager(client, listMembersURL(client, id), func(r pagination.PageResult) pagination.Page { return MemberPage{pagination.SinglePageBase(r)} @@ -32,26 +39,24 @@ func List(client *gophercloud.ServiceClient, id string) pagination.Pager { } // Get image member details. -// More details: http://developer.openstack.org/api-ref-image-v2.html#getImageMember-v2 func Get(client *gophercloud.ServiceClient, imageID string, memberID string) (r DetailsResult) { _, r.Err = client.Get(getMemberURL(client, imageID, memberID), &r.Body, &gophercloud.RequestOpts{OkCodes: []int{200}}) return } -// Delete membership for given image. -// Callee should be image owner -// More details: http://developer.openstack.org/api-ref-image-v2.html#deleteImageMember-v2 +// Delete membership for given image. Callee should be image owner. func Delete(client *gophercloud.ServiceClient, imageID string, memberID string) (r DeleteResult) { _, r.Err = client.Delete(deleteMemberURL(client, imageID, memberID), &gophercloud.RequestOpts{OkCodes: []int{204}}) return } -// UpdateOptsBuilder allows extensions to add additional attributes to the Update request. +// UpdateOptsBuilder allows extensions to add additional attributes to the +// Update request. type UpdateOptsBuilder interface { ToImageMemberUpdateMap() (map[string]interface{}, error) } -// UpdateOpts implements UpdateOptsBuilder +// UpdateOpts represents options to an Update request. type UpdateOpts struct { Status string } @@ -63,8 +68,7 @@ func (opts UpdateOpts) ToImageMemberUpdateMap() (map[string]interface{}, error) }, nil } -// Update function updates member -// More details: http://developer.openstack.org/api-ref-image-v2.html#updateImageMember-v2 +// Update function updates member. func Update(client *gophercloud.ServiceClient, imageID string, memberID string, opts UpdateOptsBuilder) (r UpdateResult) { b, err := opts.ToImageMemberUpdateMap() if err != nil { diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/members/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/members/results.go index d3cc1ceaf..ab694bdc0 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/members/results.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/members/results.go @@ -7,7 +7,7 @@ import ( "github.com/gophercloud/gophercloud/pagination" ) -// Member model +// Member represents a member of an Image. type Member struct { CreatedAt time.Time `json:"created_at"` ImageID string `json:"image_id"` @@ -17,7 +17,7 @@ type Member struct { UpdatedAt time.Time `json:"updated_at"` } -// Extract Member model from request if possible +// Extract Member model from a request. func (r commonResult) Extract() (*Member, error) { var s *Member err := r.ExtractInto(&s) @@ -29,7 +29,8 @@ type MemberPage struct { pagination.SinglePageBase } -// ExtractMembers returns a slice of Members contained in a single page of results. +// ExtractMembers returns a slice of Members contained in a single page +// of results. func ExtractMembers(r pagination.Page) ([]Member, error) { var s struct { Members []Member `json:"members"` @@ -38,7 +39,7 @@ func ExtractMembers(r pagination.Page) ([]Member, error) { return s.Members, err } -// IsEmpty determines whether or not a page of Members contains any results. +// IsEmpty determines whether or not a MemberPage contains any results. func (r MemberPage) IsEmpty() (bool, error) { members, err := ExtractMembers(r) return len(members) == 0, err @@ -48,22 +49,26 @@ type commonResult struct { gophercloud.Result } -// CreateResult result model +// CreateResult represents the result of a Create operation. Call its Extract +// method to interpret it as a Member. type CreateResult struct { commonResult } -// DetailsResult model +// DetailsResult represents the result of a Get operation. Call its Extract +// method to interpret it as a Member. type DetailsResult struct { commonResult } -// UpdateResult model +// UpdateResult represents the result of an Update operation. Call its Extract +// method to interpret it as a Member. type UpdateResult struct { commonResult } -// DeleteResult model +// DeleteResult represents the result of a Delete operation. Call its +// ExtractErr method to determine if the request succeeded or failed. type DeleteResult struct { gophercloud.ErrResult } diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/utils/base_endpoint.go b/vendor/github.com/gophercloud/gophercloud/openstack/utils/base_endpoint.go new file mode 100644 index 000000000..d6f9e34ea --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/utils/base_endpoint.go @@ -0,0 +1,29 @@ +package utils + +import ( + "net/url" + "regexp" + "strings" +) + +// BaseEndpoint will return a URL without the /vX.Y +// portion of the URL. +func BaseEndpoint(endpoint string) (string, error) { + var base string + + u, err := url.Parse(endpoint) + if err != nil { + return base, err + } + + u.RawQuery, u.Fragment = "", "" + + versionRe := regexp.MustCompile("v[0-9.]+/?") + if version := versionRe.FindString(u.Path); version != "" { + base = strings.Replace(u.String(), version, "", -1) + } else { + base = u.String() + } + + return base, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go b/vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go index c605d0844..27da19f91 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go @@ -68,11 +68,6 @@ func ChooseVersion(client *gophercloud.ProviderClient, recognized []*Version) (* return nil, "", err } - byID := make(map[string]*Version) - for _, version := range recognized { - byID[version.ID] = version - } - var highest *Version var endpoint string @@ -84,20 +79,22 @@ func ChooseVersion(client *gophercloud.ProviderClient, recognized []*Version) (* } } - if matching, ok := byID[value.ID]; ok { - // Prefer a version that exactly matches the provided endpoint. - if href == identityEndpoint { - if href == "" { - return nil, "", fmt.Errorf("Endpoint missing in version %s response from %s", value.ID, client.IdentityBase) + for _, version := range recognized { + if strings.Contains(value.ID, version.ID) { + // Prefer a version that exactly matches the provided endpoint. + if href == identityEndpoint { + if href == "" { + return nil, "", fmt.Errorf("Endpoint missing in version %s response from %s", value.ID, client.IdentityBase) + } + return version, href, nil } - return matching, href, nil - } - // Otherwise, find the highest-priority version with a whitelisted status. - if goodStatus[strings.ToLower(value.Status)] { - if highest == nil || matching.Priority > highest.Priority { - highest = matching - endpoint = href + // Otherwise, find the highest-priority version with a whitelisted status. + if goodStatus[strings.ToLower(value.Status)] { + if highest == nil || version.Priority > highest.Priority { + highest = version + endpoint = href + } } } } diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/pager.go b/vendor/github.com/gophercloud/gophercloud/pagination/pager.go index 6f1609ef2..7c65926b7 100644 --- a/vendor/github.com/gophercloud/gophercloud/pagination/pager.go +++ b/vendor/github.com/gophercloud/gophercloud/pagination/pager.go @@ -22,7 +22,6 @@ var ( // Depending on the pagination strategy of a particular resource, there may be an additional subinterface that the result type // will need to implement. type Page interface { - // NextPageURL generates the URL for the page of data that follows this collection. // Return "" if no such page exists. NextPageURL() (string, error) diff --git a/vendor/github.com/gophercloud/gophercloud/params.go b/vendor/github.com/gophercloud/gophercloud/params.go index e484fe1c1..19b8cf7bf 100644 --- a/vendor/github.com/gophercloud/gophercloud/params.go +++ b/vendor/github.com/gophercloud/gophercloud/params.go @@ -10,10 +10,28 @@ import ( "time" ) -// BuildRequestBody builds a map[string]interface from the given `struct`. If -// parent is not the empty string, the final map[string]interface returned will -// encapsulate the built one -// +/* +BuildRequestBody builds a map[string]interface from the given `struct`. If +parent is not an empty string, the final map[string]interface returned will +encapsulate the built one. For example: + + disk := 1 + createOpts := flavors.CreateOpts{ + ID: "1", + Name: "m1.tiny", + Disk: &disk, + RAM: 512, + VCPUs: 1, + RxTxFactor: 1.0, + } + + body, err := gophercloud.BuildRequestBody(createOpts, "flavor") + +The above example can be run as-is, however it is recommended to look at how +BuildRequestBody is used within Gophercloud to more fully understand how it +fits within the request process as a whole rather than use it directly as shown +above. +*/ func BuildRequestBody(opts interface{}, parent string) (map[string]interface{}, error) { optsValue := reflect.ValueOf(opts) if optsValue.Kind() == reflect.Ptr { @@ -97,10 +115,15 @@ func BuildRequestBody(opts interface{}, parent string) (map[string]interface{}, } } + jsonTag := f.Tag.Get("json") + if jsonTag == "-" { + continue + } + if v.Kind() == reflect.Struct || (v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct) { if zero { //fmt.Printf("value before change: %+v\n", optsValue.Field(i)) - if jsonTag := f.Tag.Get("json"); jsonTag != "" { + if jsonTag != "" { jsonTagPieces := strings.Split(jsonTag, ",") if len(jsonTagPieces) > 1 && jsonTagPieces[1] == "omitempty" { if v.CanSet() { @@ -329,12 +352,20 @@ func BuildQueryString(opts interface{}) (*url.URL, error) { params.Add(tags[0], v.Index(i).String()) } } + case reflect.Map: + if v.Type().Key().Kind() == reflect.String && v.Type().Elem().Kind() == reflect.String { + var s []string + for _, k := range v.MapKeys() { + value := v.MapIndex(k).String() + s = append(s, fmt.Sprintf("'%s':'%s'", k.String(), value)) + } + params.Add(tags[0], fmt.Sprintf("{%s}", strings.Join(s, ", "))) + } } } else { - // Otherwise, the field is not set. - if len(tags) == 2 && tags[1] == "required" { - // And the field is required. Return an error. - return nil, fmt.Errorf("Required query parameter [%s] not set.", f.Name) + // if the field has a 'required' tag, it can't have a zero-value + if requiredTag := f.Tag.Get("required"); requiredTag == "true" { + return &url.URL{}, fmt.Errorf("Required query parameter [%s] not set.", f.Name) } } } @@ -407,10 +438,9 @@ func BuildHeaders(opts interface{}) (map[string]string, error) { optsMap[tags[0]] = strconv.FormatBool(v.Bool()) } } else { - // Otherwise, the field is not set. - if len(tags) == 2 && tags[1] == "required" { - // And the field is required. Return an error. - return optsMap, fmt.Errorf("Required header not set.") + // if the field has a 'required' tag, it can't have a zero-value + if requiredTag := f.Tag.Get("required"); requiredTag == "true" { + return optsMap, fmt.Errorf("Required header [%s] not set.", f.Name) } } } diff --git a/vendor/github.com/gophercloud/gophercloud/provider_client.go b/vendor/github.com/gophercloud/gophercloud/provider_client.go index f88682381..17e451274 100644 --- a/vendor/github.com/gophercloud/gophercloud/provider_client.go +++ b/vendor/github.com/gophercloud/gophercloud/provider_client.go @@ -7,6 +7,7 @@ import ( "io/ioutil" "net/http" "strings" + "sync" ) // DefaultUserAgent is the default User-Agent string set in the request header. @@ -51,6 +52,8 @@ type ProviderClient struct { IdentityEndpoint string // TokenID is the ID of the most recently issued valid token. + // NOTE: Aside from within a custom ReauthFunc, this field shouldn't be set by an application. + // To safely read or write this value, call `Token` or `SetToken`, respectively TokenID string // EndpointLocator describes how this provider discovers the endpoints for @@ -68,16 +71,89 @@ type ProviderClient struct { // authentication functions for different Identity service versions. ReauthFunc func() error - Debug bool + mut *sync.RWMutex + + reauthmut *reauthlock +} + +type reauthlock struct { + sync.RWMutex + reauthing bool } // AuthenticatedHeaders returns a map of HTTP headers that are common for all // authenticated service requests. -func (client *ProviderClient) AuthenticatedHeaders() map[string]string { - if client.TokenID == "" { - return map[string]string{} +func (client *ProviderClient) AuthenticatedHeaders() (m map[string]string) { + if client.reauthmut != nil { + client.reauthmut.RLock() + if client.reauthmut.reauthing { + client.reauthmut.RUnlock() + return + } + client.reauthmut.RUnlock() } - return map[string]string{"X-Auth-Token": client.TokenID} + t := client.Token() + if t == "" { + return + } + return map[string]string{"X-Auth-Token": t} +} + +// UseTokenLock creates a mutex that is used to allow safe concurrent access to the auth token. +// If the application's ProviderClient is not used concurrently, this doesn't need to be called. +func (client *ProviderClient) UseTokenLock() { + client.mut = new(sync.RWMutex) + client.reauthmut = new(reauthlock) +} + +// Token safely reads the value of the auth token from the ProviderClient. Applications should +// call this method to access the token instead of the TokenID field +func (client *ProviderClient) Token() string { + if client.mut != nil { + client.mut.RLock() + defer client.mut.RUnlock() + } + return client.TokenID +} + +// SetToken safely sets the value of the auth token in the ProviderClient. Applications may +// use this method in a custom ReauthFunc +func (client *ProviderClient) SetToken(t string) { + if client.mut != nil { + client.mut.Lock() + defer client.mut.Unlock() + } + client.TokenID = t +} + +//Reauthenticate calls client.ReauthFunc in a thread-safe way. If this is +//called because of a 401 response, the caller may pass the previous token. In +//this case, the reauthentication can be skipped if another thread has already +//reauthenticated in the meantime. If no previous token is known, an empty +//string should be passed instead to force unconditional reauthentication. +func (client *ProviderClient) Reauthenticate(previousToken string) (err error) { + if client.ReauthFunc == nil { + return nil + } + + if client.mut == nil { + return client.ReauthFunc() + } + client.mut.Lock() + defer client.mut.Unlock() + + client.reauthmut.Lock() + client.reauthmut.reauthing = true + client.reauthmut.Unlock() + + if previousToken == "" || client.TokenID == previousToken { + err = client.ReauthFunc() + } + + client.reauthmut.Lock() + client.reauthmut.reauthing = false + client.reauthmut.Unlock() + return } // RequestOpts customizes the behavior of the provider.Request() method. @@ -145,10 +221,6 @@ func (client *ProviderClient) Request(method, url string, options *RequestOpts) } req.Header.Set("Accept", applicationJSON) - for k, v := range client.AuthenticatedHeaders() { - req.Header.Add(k, v) - } - // Set the User-Agent header req.Header.Set("User-Agent", client.UserAgent.Join()) @@ -162,9 +234,16 @@ func (client *ProviderClient) Request(method, url string, options *RequestOpts) } } + // get latest token from client + for k, v := range client.AuthenticatedHeaders() { + req.Header.Set(k, v) + } + // Set connection parameter to close the connection immediately when we've got the response req.Close = true + prereqtok := req.Header.Get("X-Auth-Token") + // Issue the request. resp, err := client.HTTPClient.Do(req) if err != nil { @@ -188,9 +267,6 @@ func (client *ProviderClient) Request(method, url string, options *RequestOpts) if !ok { body, _ := ioutil.ReadAll(resp.Body) resp.Body.Close() - //pc := make([]uintptr, 1) - //runtime.Callers(2, pc) - //f := runtime.FuncForPC(pc[0]) respErr := ErrUnexpectedResponseCode{ URL: url, Method: method, @@ -198,7 +274,6 @@ func (client *ProviderClient) Request(method, url string, options *RequestOpts) Actual: resp.StatusCode, Body: body, } - //respErr.Function = "gophercloud.ProviderClient.Request" errType := options.ErrorContext switch resp.StatusCode { @@ -209,7 +284,7 @@ func (client *ProviderClient) Request(method, url string, options *RequestOpts) } case http.StatusUnauthorized: if client.ReauthFunc != nil { - err = client.ReauthFunc() + err = client.Reauthenticate(prereqtok) if err != nil { e := &ErrUnableToReauthenticate{} e.ErrOriginal = respErr @@ -239,6 +314,11 @@ func (client *ProviderClient) Request(method, url string, options *RequestOpts) if error401er, ok := errType.(Err401er); ok { err = error401er.Error401(respErr) } + case http.StatusForbidden: + err = ErrDefault403{respErr} + if error403er, ok := errType.(Err403er); ok { + err = error403er.Error403(respErr) + } case http.StatusNotFound: err = ErrDefault404{respErr} if error404er, ok := errType.(Err404er); ok { diff --git a/vendor/github.com/gophercloud/gophercloud/results.go b/vendor/github.com/gophercloud/gophercloud/results.go index 76c16ef8f..fdd4830ec 100644 --- a/vendor/github.com/gophercloud/gophercloud/results.go +++ b/vendor/github.com/gophercloud/gophercloud/results.go @@ -78,6 +78,53 @@ func (r Result) extractIntoPtr(to interface{}, label string) error { return err } + toValue := reflect.ValueOf(to) + if toValue.Kind() == reflect.Ptr { + toValue = toValue.Elem() + } + + switch toValue.Kind() { + case reflect.Slice: + typeOfV := toValue.Type().Elem() + if typeOfV.Kind() == reflect.Struct { + if typeOfV.NumField() > 0 && typeOfV.Field(0).Anonymous { + newSlice := reflect.MakeSlice(reflect.SliceOf(typeOfV), 0, 0) + newType := reflect.New(typeOfV).Elem() + + for _, v := range m[label].([]interface{}) { + b, err := json.Marshal(v) + if err != nil { + return err + } + + for i := 0; i < newType.NumField(); i++ { + s := newType.Field(i).Addr().Interface() + err = json.NewDecoder(bytes.NewReader(b)).Decode(s) + if err != nil { + return err + } + } + newSlice = reflect.Append(newSlice, newType) + } + toValue.Set(newSlice) + } + } + case reflect.Struct: + typeOfV := toValue.Type() + if typeOfV.NumField() > 0 && typeOfV.Field(0).Anonymous { + for i := 0; i < toValue.NumField(); i++ { + toField := toValue.Field(i) + if toField.Kind() == reflect.Struct { + s := toField.Addr().Interface() + err = json.NewDecoder(bytes.NewReader(b)).Decode(s) + if err != nil { + return err + } + } + } + } + } + err = json.Unmarshal(b, &to) return err } @@ -177,9 +224,8 @@ type HeaderResult struct { Result } -// ExtractHeader will return the http.Header and error from the HeaderResult. -// -// header, err := objects.Create(client, "my_container", objects.CreateOpts{}).ExtractHeader() +// ExtractInto allows users to provide an object into which `Extract` will +// extract the http.Header headers of the result. func (r HeaderResult) ExtractInto(to interface{}) error { if r.Err != nil { return r.Err @@ -299,6 +345,27 @@ func (jt *JSONRFC3339NoZ) UnmarshalJSON(data []byte) error { return nil } +// RFC3339ZNoT is the time format used in Zun (Containers Service). +const RFC3339ZNoT = "2006-01-02 15:04:05-07:00" + +type JSONRFC3339ZNoT time.Time + +func (jt *JSONRFC3339ZNoT) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if s == "" { + return nil + } + t, err := time.Parse(RFC3339ZNoT, s) + if err != nil { + return err + } + *jt = JSONRFC3339ZNoT(t) + return nil +} + /* Link is an internal type to be used in packages of collection resources that are paginated in a certain way. diff --git a/vendor/github.com/gophercloud/gophercloud/service_client.go b/vendor/github.com/gophercloud/gophercloud/service_client.go index 1160fefa7..2734510e1 100644 --- a/vendor/github.com/gophercloud/gophercloud/service_client.go +++ b/vendor/github.com/gophercloud/gophercloud/service_client.go @@ -28,6 +28,10 @@ type ServiceClient struct { // The microversion of the service to use. Set this to use a particular microversion. Microversion string + + // MoreHeaders allows users (or Gophercloud) to set service-wide headers on requests. Put another way, + // values set in this field will be set on all the HTTP requests the service client sends. + MoreHeaders map[string]string } // ResourceBaseURL returns the base URL of any resources used by this service. It MUST end with a /. @@ -108,15 +112,39 @@ func (client *ServiceClient) Delete(url string, opts *RequestOpts) (*http.Respon return client.Request("DELETE", url, opts) } +// Head calls `Request` with the "HEAD" HTTP verb. +func (client *ServiceClient) Head(url string, opts *RequestOpts) (*http.Response, error) { + if opts == nil { + opts = new(RequestOpts) + } + client.initReqOpts(url, nil, nil, opts) + return client.Request("HEAD", url, opts) +} + func (client *ServiceClient) setMicroversionHeader(opts *RequestOpts) { switch client.Type { case "compute": opts.MoreHeaders["X-OpenStack-Nova-API-Version"] = client.Microversion case "sharev2": opts.MoreHeaders["X-OpenStack-Manila-API-Version"] = client.Microversion + case "volume": + opts.MoreHeaders["X-OpenStack-Volume-API-Version"] = client.Microversion } if client.Type != "" { opts.MoreHeaders["OpenStack-API-Version"] = client.Type + " " + client.Microversion } } + +// Request carries out the HTTP operation for the service client +func (client *ServiceClient) Request(method, url string, options *RequestOpts) (*http.Response, error) { + if len(client.MoreHeaders) > 0 { + if options == nil { + options = new(RequestOpts) + } + for k, v := range client.MoreHeaders { + options.MoreHeaders[k] = v + } + } + return client.ProviderClient.Request(method, url, options) +} diff --git a/vendor/vendor.json b/vendor/vendor.json index 7412caaae..89fbc937d 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -694,112 +694,100 @@ "revisionTime": "2015-01-27T13:39:51Z" }, { - "checksumSHA1": "QTqcF26Y2e0SHe2Z+2wj+fedud4=", + "checksumSHA1": "qduT9GZUhXc00XoHEwLx16Xn9gM=", "path": "github.com/gophercloud/gophercloud", - "revision": "95a28eb606def6aaaed082b6b82d3244b0552184", - "revisionTime": "2017-06-23T01:44:30Z" + "revision": "7112fcd50da4ea27e8d4d499b30f04eea143bec2", + "revisionTime": "2018-05-31T02:06:30Z" }, { "checksumSHA1": "b7g9TcU1OmW7e2UySYeOAmcfHpY=", "path": "github.com/gophercloud/gophercloud/internal", - "revision": "95a28eb606def6aaaed082b6b82d3244b0552184", - "revisionTime": "2017-06-23T01:44:30Z" + "revision": "7112fcd50da4ea27e8d4d499b30f04eea143bec2", + "revisionTime": "2018-05-31T02:06:30Z" }, { - "checksumSHA1": "24DO5BEQdFKNl1rfWgI2b4+ry5U=", + "checksumSHA1": "YDNvjFNQS+UxqZMLm/shFs7aLNU=", "path": "github.com/gophercloud/gophercloud/openstack", - "revision": "95a28eb606def6aaaed082b6b82d3244b0552184", - "revisionTime": "2017-06-23T01:44:30Z" + "revision": "7112fcd50da4ea27e8d4d499b30f04eea143bec2", + "revisionTime": "2018-05-31T02:06:30Z" }, { - "checksumSHA1": "Au6MAsI90lewLByg9n+Yjtdqdh8=", - "path": "github.com/gophercloud/gophercloud/openstack/common/extensions", - "revision": "95a28eb606def6aaaed082b6b82d3244b0552184", - "revisionTime": "2017-06-23T01:44:30Z" - }, - { - "checksumSHA1": "4XWDCGMYqipwJymi9xJo9UffD7g=", - "path": "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions", - "revision": "95a28eb606def6aaaed082b6b82d3244b0552184", - "revisionTime": "2017-06-23T01:44:30Z" - }, - { - "checksumSHA1": "e7AW3YDVYJPKUjpqsB4AL9RRlTw=", + "checksumSHA1": "vFS5BwnCdQIfKm1nNWrR+ijsAZA=", "path": "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips", - "revision": "95a28eb606def6aaaed082b6b82d3244b0552184", - "revisionTime": "2017-06-23T01:44:30Z" + "revision": "7112fcd50da4ea27e8d4d499b30f04eea143bec2", + "revisionTime": "2018-05-31T02:06:30Z" }, { - "checksumSHA1": "bx6QnHtpgB6nKmN4QRVKa5PszqY=", + "checksumSHA1": "lAQuKIqTuQ9JuMgN0pPkNtRH2RM=", "path": "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs", - "revision": "95a28eb606def6aaaed082b6b82d3244b0552184", - "revisionTime": "2017-06-23T01:44:30Z" + "revision": "7112fcd50da4ea27e8d4d499b30f04eea143bec2", + "revisionTime": "2018-05-31T02:06:30Z" }, { - "checksumSHA1": "qBpGbX7LQMPATdO8XyQmU7IXDiI=", + "checksumSHA1": "qfVZltu1fYTYXS97WbjeLuLPgUc=", "path": "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop", - "revision": "95a28eb606def6aaaed082b6b82d3244b0552184", - "revisionTime": "2017-06-23T01:44:30Z" + "revision": "7112fcd50da4ea27e8d4d499b30f04eea143bec2", + "revisionTime": "2018-05-31T02:06:30Z" }, { - "checksumSHA1": "vTyXSR+Znw7/o/70UBOWG0F09r8=", + "checksumSHA1": "1/1G6O0CUVYyTFF/IqzWThGyuPQ=", "path": "github.com/gophercloud/gophercloud/openstack/compute/v2/flavors", - "revision": "95a28eb606def6aaaed082b6b82d3244b0552184", - "revisionTime": "2017-06-23T01:44:30Z" + "revision": "7112fcd50da4ea27e8d4d499b30f04eea143bec2", + "revisionTime": "2018-05-31T02:06:30Z" }, { - "checksumSHA1": "Rnzx2YgOD41k8KoPA08tR992PxQ=", + "checksumSHA1": "CSnfH01hSas0bdc/3m/f5Rt6SFY=", "path": "github.com/gophercloud/gophercloud/openstack/compute/v2/images", - "revision": "95a28eb606def6aaaed082b6b82d3244b0552184", - "revisionTime": "2017-06-23T01:44:30Z" + "revision": "7112fcd50da4ea27e8d4d499b30f04eea143bec2", + "revisionTime": "2018-05-31T02:06:30Z" }, { - "checksumSHA1": "IjCvcaNnRW++hclt21WUkMYinaA=", + "checksumSHA1": "FKVrvZnE/223fpKVGPqaIX4JP9I=", "path": "github.com/gophercloud/gophercloud/openstack/compute/v2/servers", - "revision": "95a28eb606def6aaaed082b6b82d3244b0552184", - "revisionTime": "2017-06-23T01:44:30Z" + "revision": "7112fcd50da4ea27e8d4d499b30f04eea143bec2", + "revisionTime": "2018-05-31T02:06:30Z" }, { - "checksumSHA1": "S8bHmOP+NjtlYioJC89zIBVvhYc=", + "checksumSHA1": "oOJkelRgWx0NzUmxuI3kTS27gM0=", "path": "github.com/gophercloud/gophercloud/openstack/identity/v2/tenants", - "revision": "95a28eb606def6aaaed082b6b82d3244b0552184", - "revisionTime": "2017-06-23T01:44:30Z" + "revision": "7112fcd50da4ea27e8d4d499b30f04eea143bec2", + "revisionTime": "2018-05-31T02:06:30Z" }, { - "checksumSHA1": "AvUU5En9YpG25iLlcAPDgcQODjI=", + "checksumSHA1": "z5NsqMZX3TLMzpmwzOOXE4M5D9w=", "path": "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens", - "revision": "95a28eb606def6aaaed082b6b82d3244b0552184", - "revisionTime": "2017-06-23T01:44:30Z" + "revision": "7112fcd50da4ea27e8d4d499b30f04eea143bec2", + "revisionTime": "2018-05-31T02:06:30Z" }, { - "checksumSHA1": "rqE0NwmQ9qhXADXxg3DcuZ4A3wk=", + "checksumSHA1": "2PYxD2MOrbp4JCWy5794sEtDYD4=", "path": "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens", - "revision": "95a28eb606def6aaaed082b6b82d3244b0552184", - "revisionTime": "2017-06-23T01:44:30Z" + "revision": "7112fcd50da4ea27e8d4d499b30f04eea143bec2", + "revisionTime": "2018-05-31T02:06:30Z" }, { - "checksumSHA1": "p2ivHupXGBmyHkusnob2NsbsCQk=", + "checksumSHA1": "Pop8rylL583hCZ0RjO+9TkrScCo=", "path": "github.com/gophercloud/gophercloud/openstack/imageservice/v2/images", - "revision": "95a28eb606def6aaaed082b6b82d3244b0552184", - "revisionTime": "2017-06-23T01:44:30Z" + "revision": "7112fcd50da4ea27e8d4d499b30f04eea143bec2", + "revisionTime": "2018-05-31T02:06:30Z" }, { - "checksumSHA1": "KA5YKF9TwIsTy9KssO27y+wk/6U=", + "checksumSHA1": "GFqX1Y5SpZvvyx0LPaP9D9Xp5k0=", "path": "github.com/gophercloud/gophercloud/openstack/imageservice/v2/members", - "revision": "95a28eb606def6aaaed082b6b82d3244b0552184", - "revisionTime": "2017-06-23T01:44:30Z" + "revision": "7112fcd50da4ea27e8d4d499b30f04eea143bec2", + "revisionTime": "2018-05-31T02:06:30Z" }, { - "checksumSHA1": "TDOZnaS0TO0NirpxV1QwPerAQTY=", + "checksumSHA1": "8KE4bJzhbFZKsYMxcRg6xLqqfTg=", "path": "github.com/gophercloud/gophercloud/openstack/utils", - "revision": "95a28eb606def6aaaed082b6b82d3244b0552184", - "revisionTime": "2017-06-23T01:44:30Z" + "revision": "7112fcd50da4ea27e8d4d499b30f04eea143bec2", + "revisionTime": "2018-05-31T02:06:30Z" }, { - "checksumSHA1": "YspETi3tOMvawKIT91HyuqaA5lM=", + "checksumSHA1": "jWdt1lN75UC0GrLG5Tmng+qP+ZI=", "path": "github.com/gophercloud/gophercloud/pagination", - "revision": "95a28eb606def6aaaed082b6b82d3244b0552184", - "revisionTime": "2017-06-23T01:44:30Z" + "revision": "7112fcd50da4ea27e8d4d499b30f04eea143bec2", + "revisionTime": "2018-05-31T02:06:30Z" }, { "checksumSHA1": "xSmii71kfQASGNG2C8ttmHx9KTE=", From 8f2fa9c8ecf32a24996504a80a0a1450d01b40cf Mon Sep 17 00:00:00 2001 From: Rickard von Essen Date: Tue, 5 Jun 2018 14:59:59 +0200 Subject: [PATCH 096/138] Vendored github.com/gophercloud/utils and gopkg.in/yaml.v2 --- vendor/github.com/gophercloud/utils/LICENSE | 201 ++ .../utils/openstack/clientconfig/doc.go | 49 + .../utils/openstack/clientconfig/requests.go | 596 ++++ .../utils/openstack/clientconfig/results.go | 88 + .../utils/openstack/clientconfig/utils.go | 67 + vendor/gopkg.in/yaml.v2/LICENSE | 201 ++ vendor/gopkg.in/yaml.v2/LICENSE.libyaml | 31 + vendor/gopkg.in/yaml.v2/NOTICE | 13 + vendor/gopkg.in/yaml.v2/README.md | 133 + vendor/gopkg.in/yaml.v2/apic.go | 739 +++++ vendor/gopkg.in/yaml.v2/decode.go | 775 +++++ vendor/gopkg.in/yaml.v2/emitterc.go | 1685 +++++++++++ vendor/gopkg.in/yaml.v2/encode.go | 362 +++ vendor/gopkg.in/yaml.v2/go.mod | 5 + vendor/gopkg.in/yaml.v2/parserc.go | 1095 +++++++ vendor/gopkg.in/yaml.v2/readerc.go | 412 +++ vendor/gopkg.in/yaml.v2/resolve.go | 258 ++ vendor/gopkg.in/yaml.v2/scannerc.go | 2696 +++++++++++++++++ vendor/gopkg.in/yaml.v2/sorter.go | 113 + vendor/gopkg.in/yaml.v2/writerc.go | 26 + vendor/gopkg.in/yaml.v2/yaml.go | 466 +++ vendor/gopkg.in/yaml.v2/yamlh.go | 738 +++++ vendor/gopkg.in/yaml.v2/yamlprivateh.go | 173 ++ vendor/vendor.json | 12 + 24 files changed, 10934 insertions(+) create mode 100644 vendor/github.com/gophercloud/utils/LICENSE create mode 100644 vendor/github.com/gophercloud/utils/openstack/clientconfig/doc.go create mode 100644 vendor/github.com/gophercloud/utils/openstack/clientconfig/requests.go create mode 100644 vendor/github.com/gophercloud/utils/openstack/clientconfig/results.go create mode 100644 vendor/github.com/gophercloud/utils/openstack/clientconfig/utils.go create mode 100644 vendor/gopkg.in/yaml.v2/LICENSE create mode 100644 vendor/gopkg.in/yaml.v2/LICENSE.libyaml create mode 100644 vendor/gopkg.in/yaml.v2/NOTICE create mode 100644 vendor/gopkg.in/yaml.v2/README.md create mode 100644 vendor/gopkg.in/yaml.v2/apic.go create mode 100644 vendor/gopkg.in/yaml.v2/decode.go create mode 100644 vendor/gopkg.in/yaml.v2/emitterc.go create mode 100644 vendor/gopkg.in/yaml.v2/encode.go create mode 100644 vendor/gopkg.in/yaml.v2/go.mod create mode 100644 vendor/gopkg.in/yaml.v2/parserc.go create mode 100644 vendor/gopkg.in/yaml.v2/readerc.go create mode 100644 vendor/gopkg.in/yaml.v2/resolve.go create mode 100644 vendor/gopkg.in/yaml.v2/scannerc.go create mode 100644 vendor/gopkg.in/yaml.v2/sorter.go create mode 100644 vendor/gopkg.in/yaml.v2/writerc.go create mode 100644 vendor/gopkg.in/yaml.v2/yaml.go create mode 100644 vendor/gopkg.in/yaml.v2/yamlh.go create mode 100644 vendor/gopkg.in/yaml.v2/yamlprivateh.go diff --git a/vendor/github.com/gophercloud/utils/LICENSE b/vendor/github.com/gophercloud/utils/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/github.com/gophercloud/utils/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/gophercloud/utils/openstack/clientconfig/doc.go b/vendor/github.com/gophercloud/utils/openstack/clientconfig/doc.go new file mode 100644 index 000000000..1f3be2128 --- /dev/null +++ b/vendor/github.com/gophercloud/utils/openstack/clientconfig/doc.go @@ -0,0 +1,49 @@ +/* +Package clientconfig provides convienent functions for creating OpenStack +clients. It is based on the Python os-client-config library. + +See https://docs.openstack.org/os-client-config/latest for details. + +Example to Create a Provider Client From clouds.yaml + + opts := &clientconfig.ClientOpts{ + Name: "hawaii", + } + + pClient, err := clientconfig.AuthenticatedClient(opts) + if err != nil { + panic(err) + } + + +Example to Manually Create a Provider Client + + opts := &clientconfig.ClientOpts{ + AuthInfo: &clientconfig.AuthInfo{ + AuthURL: "https://hi.example.com:5000/v3", + Username: "jdoe", + Password: "password", + ProjectName: "Some Project", + DomainName: "default", + }, + } + + pClient, err := clientconfig.AuthenticatedClient(opts) + if err != nil { + panic(err) + } + + +Example to Create a Service Client from clouds.yaml + + opts := &clientconfig.ClientOpts{ + Name: "hawaii", + } + + computeClient, err := clientconfig.NewServiceClient("compute", opts) + if err != nil { + panic(err) + } + +*/ +package clientconfig diff --git a/vendor/github.com/gophercloud/utils/openstack/clientconfig/requests.go b/vendor/github.com/gophercloud/utils/openstack/clientconfig/requests.go new file mode 100644 index 000000000..254f72776 --- /dev/null +++ b/vendor/github.com/gophercloud/utils/openstack/clientconfig/requests.go @@ -0,0 +1,596 @@ +package clientconfig + +import ( + "fmt" + "os" + "strings" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack" + + "gopkg.in/yaml.v2" +) + +// AuthType respresents a valid method of authentication. +type AuthType string + +const ( + AuthPassword AuthType = "password" + AuthToken AuthType = "token" + + AuthV2Password AuthType = "v2password" + AuthV2Token AuthType = "v2token" + + AuthV3Password AuthType = "v3password" + AuthV3Token AuthType = "v3token" +) + +// ClientOpts represents options to customize the way a client is +// configured. +type ClientOpts struct { + // Cloud is the cloud entry in clouds.yaml to use. + Cloud string + + // EnvPrefix allows a custom environment variable prefix to be used. + EnvPrefix string + + // AuthType specifies the type of authentication to use. + // By default, this is "password". + AuthType AuthType + + // AuthInfo defines the authentication information needed to + // authenticate to a cloud when clouds.yaml isn't used. + AuthInfo *AuthInfo +} + +// LoadYAML will load a clouds.yaml file and return the full config. +func LoadYAML() (map[string]Cloud, error) { + content, err := findAndReadYAML() + if err != nil { + return nil, err + } + + var clouds Clouds + err = yaml.Unmarshal(content, &clouds) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal yaml: %v", err) + } + + return clouds.Clouds, nil +} + +// GetCloudFromYAML will return a cloud entry from a clouds.yaml file. +func GetCloudFromYAML(opts *ClientOpts) (*Cloud, error) { + clouds, err := LoadYAML() + if err != nil { + return nil, fmt.Errorf("unable to load clouds.yaml: %s", err) + } + + // Determine which cloud to use. + // First see if a cloud name was explicitly set in opts. + var cloudName string + if opts != nil && opts.Cloud != "" { + cloudName = opts.Cloud + } + + // Next see if a cloud name was specified as an environment variable. + // This is supposed to override an explicit opts setting. + envPrefix := "OS_" + if opts.EnvPrefix != "" { + envPrefix = opts.EnvPrefix + } + + if v := os.Getenv(envPrefix + "CLOUD"); v != "" { + cloudName = v + } + + var cloud *Cloud + if cloudName != "" { + v, ok := clouds[cloudName] + if !ok { + return nil, fmt.Errorf("cloud %s does not exist in clouds.yaml", cloudName) + } + cloud = &v + } + + // If a cloud was not specified, and clouds only contains + // a single entry, use that entry. + if cloudName == "" && len(clouds) == 1 { + for _, v := range clouds { + cloud = &v + } + } + + if cloud == nil { + return nil, fmt.Errorf("Unable to determine a valid entry in clouds.yaml") + } + + return cloud, nil +} + +// AuthOptions creates a gophercloud.AuthOptions structure with the +// settings found in a specific cloud entry of a clouds.yaml file or +// based on authentication settings given in ClientOpts. +// +// This attempts to be a single point of entry for all OpenStack authentication. +// +// See http://docs.openstack.org/developer/os-client-config and +// https://github.com/openstack/os-client-config/blob/master/os_client_config/config.py. +func AuthOptions(opts *ClientOpts) (*gophercloud.AuthOptions, error) { + cloud := new(Cloud) + + // If no opts were passed in, create an empty ClientOpts. + if opts == nil { + opts = new(ClientOpts) + } + + // Determine if a clouds.yaml entry should be retrieved. + // Start by figuring out the cloud name. + // First check if one was explicitly specified in opts. + var cloudName string + if opts.Cloud != "" { + cloudName = opts.Cloud + } + + // Next see if a cloud name was specified as an environment variable. + envPrefix := "OS_" + if opts.EnvPrefix != "" { + envPrefix = opts.EnvPrefix + } + + if v := os.Getenv(envPrefix + "CLOUD"); v != "" { + cloudName = v + } + + // If a cloud name was determined, try to look it up in clouds.yaml. + if cloudName != "" { + // Get the requested cloud. + var err error + cloud, err = GetCloudFromYAML(opts) + if err != nil { + return nil, err + } + } + + // If cloud.AuthInfo is nil, then no cloud was specified. + if cloud.AuthInfo == nil { + // If opts.Auth is not nil, then try using the auth settings from it. + if opts.AuthInfo != nil { + cloud.AuthInfo = opts.AuthInfo + } + + // If cloud.AuthInfo is still nil, then set it to an empty Auth struct + // and rely on environment variables to do the authentication. + if cloud.AuthInfo == nil { + cloud.AuthInfo = new(AuthInfo) + } + } + + identityAPI := determineIdentityAPI(cloud, opts) + switch identityAPI { + case "2.0", "2": + return v2auth(cloud, opts) + case "3": + return v3auth(cloud, opts) + } + + return nil, fmt.Errorf("Unable to build AuthOptions") +} + +func determineIdentityAPI(cloud *Cloud, opts *ClientOpts) string { + var identityAPI string + if cloud.IdentityAPIVersion != "" { + identityAPI = cloud.IdentityAPIVersion + } + + envPrefix := "OS_" + if opts != nil && opts.EnvPrefix != "" { + envPrefix = opts.EnvPrefix + } + + if v := os.Getenv(envPrefix + "IDENTITY_API_VERSION"); v != "" { + identityAPI = v + } + + if identityAPI == "" { + if cloud.AuthInfo != nil { + if strings.Contains(cloud.AuthInfo.AuthURL, "v2.0") { + identityAPI = "2.0" + } + + if strings.Contains(cloud.AuthInfo.AuthURL, "v3") { + identityAPI = "3" + } + } + } + + if identityAPI == "" { + switch cloud.AuthType { + case AuthV2Password: + identityAPI = "2.0" + case AuthV2Token: + identityAPI = "2.0" + case AuthV3Password: + identityAPI = "3" + case AuthV3Token: + identityAPI = "3" + } + } + + // If an Identity API version could not be determined, + // default to v3. + if identityAPI == "" { + identityAPI = "3" + } + + return identityAPI +} + +// v2auth creates a v2-compatible gophercloud.AuthOptions struct. +func v2auth(cloud *Cloud, opts *ClientOpts) (*gophercloud.AuthOptions, error) { + // Environment variable overrides. + envPrefix := "OS_" + if opts != nil && opts.EnvPrefix != "" { + envPrefix = opts.EnvPrefix + } + + if v := os.Getenv(envPrefix + "AUTH_URL"); v != "" { + cloud.AuthInfo.AuthURL = v + } + + if v := os.Getenv(envPrefix + "TOKEN"); v != "" { + cloud.AuthInfo.Token = v + } + + if v := os.Getenv(envPrefix + "AUTH_TOKEN"); v != "" { + cloud.AuthInfo.Token = v + } + + if v := os.Getenv(envPrefix + "USERNAME"); v != "" { + cloud.AuthInfo.Username = v + } + + if v := os.Getenv(envPrefix + "PASSWORD"); v != "" { + cloud.AuthInfo.Password = v + } + + if v := os.Getenv(envPrefix + "TENANT_ID"); v != "" { + cloud.AuthInfo.ProjectID = v + } + + if v := os.Getenv(envPrefix + "PROJECT_ID"); v != "" { + cloud.AuthInfo.ProjectID = v + } + + if v := os.Getenv(envPrefix + "TENANT_NAME"); v != "" { + cloud.AuthInfo.ProjectName = v + } + + if v := os.Getenv(envPrefix + "PROJECT_NAME"); v != "" { + cloud.AuthInfo.ProjectName = v + } + + ao := &gophercloud.AuthOptions{ + IdentityEndpoint: cloud.AuthInfo.AuthURL, + TokenID: cloud.AuthInfo.Token, + Username: cloud.AuthInfo.Username, + Password: cloud.AuthInfo.Password, + TenantID: cloud.AuthInfo.ProjectID, + TenantName: cloud.AuthInfo.ProjectName, + } + + return ao, nil +} + +// v3auth creates a v3-compatible gophercloud.AuthOptions struct. +func v3auth(cloud *Cloud, opts *ClientOpts) (*gophercloud.AuthOptions, error) { + // Environment variable overrides. + envPrefix := "OS_" + if opts != nil && opts.EnvPrefix != "" { + envPrefix = opts.EnvPrefix + } + + if v := os.Getenv(envPrefix + "AUTH_URL"); v != "" { + cloud.AuthInfo.AuthURL = v + } + + if v := os.Getenv(envPrefix + "TOKEN"); v != "" { + cloud.AuthInfo.Token = v + } + + if v := os.Getenv(envPrefix + "AUTH_TOKEN"); v != "" { + cloud.AuthInfo.Token = v + } + + if v := os.Getenv(envPrefix + "USERNAME"); v != "" { + cloud.AuthInfo.Username = v + } + + if v := os.Getenv(envPrefix + "USER_ID"); v != "" { + cloud.AuthInfo.UserID = v + } + + if v := os.Getenv(envPrefix + "PASSWORD"); v != "" { + cloud.AuthInfo.Password = v + } + + if v := os.Getenv(envPrefix + "TENANT_ID"); v != "" { + cloud.AuthInfo.ProjectID = v + } + + if v := os.Getenv(envPrefix + "PROJECT_ID"); v != "" { + cloud.AuthInfo.ProjectID = v + } + + if v := os.Getenv(envPrefix + "TENANT_NAME"); v != "" { + cloud.AuthInfo.ProjectName = v + } + + if v := os.Getenv(envPrefix + "PROJECT_NAME"); v != "" { + cloud.AuthInfo.ProjectName = v + } + + if v := os.Getenv(envPrefix + "DOMAIN_ID"); v != "" { + cloud.AuthInfo.DomainID = v + } + + if v := os.Getenv(envPrefix + "DOMAIN_NAME"); v != "" { + cloud.AuthInfo.DomainName = v + } + + if v := os.Getenv(envPrefix + "DEFAULT_DOMAIN"); v != "" { + cloud.AuthInfo.DefaultDomain = v + } + + if v := os.Getenv(envPrefix + "PROJECT_DOMAIN_ID"); v != "" { + cloud.AuthInfo.ProjectDomainID = v + } + + if v := os.Getenv(envPrefix + "PROJECT_DOMAIN_NAME"); v != "" { + cloud.AuthInfo.ProjectDomainName = v + } + + if v := os.Getenv(envPrefix + "USER_DOMAIN_ID"); v != "" { + cloud.AuthInfo.UserDomainID = v + } + + if v := os.Getenv(envPrefix + "USER_DOMAIN_NAME"); v != "" { + cloud.AuthInfo.UserDomainName = v + } + + // Build a scope and try to do it correctly. + // https://github.com/openstack/os-client-config/blob/master/os_client_config/config.py#L595 + scope := new(gophercloud.AuthScope) + + if !isProjectScoped(cloud.AuthInfo) { + if cloud.AuthInfo.DomainID != "" { + scope.DomainID = cloud.AuthInfo.DomainID + } else if cloud.AuthInfo.DomainName != "" { + scope.DomainName = cloud.AuthInfo.DomainName + } + } else { + // If Domain* is set, but UserDomain* or ProjectDomain* aren't, + // then use Domain* as the default setting. + cloud = setDomainIfNeeded(cloud) + + if cloud.AuthInfo.ProjectID != "" { + scope.ProjectID = cloud.AuthInfo.ProjectID + } else { + scope.ProjectName = cloud.AuthInfo.ProjectName + scope.DomainID = cloud.AuthInfo.ProjectDomainID + scope.DomainName = cloud.AuthInfo.ProjectDomainName + } + } + + ao := &gophercloud.AuthOptions{ + Scope: scope, + IdentityEndpoint: cloud.AuthInfo.AuthURL, + TokenID: cloud.AuthInfo.Token, + Username: cloud.AuthInfo.Username, + UserID: cloud.AuthInfo.UserID, + Password: cloud.AuthInfo.Password, + TenantID: cloud.AuthInfo.ProjectID, + TenantName: cloud.AuthInfo.ProjectName, + DomainID: cloud.AuthInfo.UserDomainID, + DomainName: cloud.AuthInfo.UserDomainName, + } + + // If an auth_type of "token" was specified, then make sure + // Gophercloud properly authenticates with a token. This involves + // unsetting a few other auth options. The reason this is done + // here is to wait until all auth settings (both in clouds.yaml + // and via environment variables) are set and then unset them. + if strings.Contains(string(cloud.AuthType), "token") || ao.TokenID != "" { + ao.Username = "" + ao.Password = "" + ao.UserID = "" + ao.DomainID = "" + ao.DomainName = "" + } + + // Check for absolute minimum requirements. + if ao.IdentityEndpoint == "" { + err := gophercloud.ErrMissingInput{Argument: "auth_url"} + return nil, err + } + + return ao, nil +} + +// AuthenticatedClient is a convenience function to get a new provider client +// based on a clouds.yaml entry. +func AuthenticatedClient(opts *ClientOpts) (*gophercloud.ProviderClient, error) { + ao, err := AuthOptions(opts) + if err != nil { + return nil, err + } + + return openstack.AuthenticatedClient(*ao) +} + +// NewServiceClient is a convenience function to get a new service client. +func NewServiceClient(service string, opts *ClientOpts) (*gophercloud.ServiceClient, error) { + cloud := new(Cloud) + + // If no opts were passed in, create an empty ClientOpts. + if opts == nil { + opts = new(ClientOpts) + } + + // Determine if a clouds.yaml entry should be retrieved. + // Start by figuring out the cloud name. + // First check if one was explicitly specified in opts. + var cloudName string + if opts.Cloud != "" { + cloudName = opts.Cloud + } + + // Next see if a cloud name was specified as an environment variable. + envPrefix := "OS_" + if opts.EnvPrefix != "" { + envPrefix = opts.EnvPrefix + } + + if v := os.Getenv(envPrefix + "CLOUD"); v != "" { + cloudName = v + } + + // If a cloud name was determined, try to look it up in clouds.yaml. + if cloudName != "" { + // Get the requested cloud. + var err error + cloud, err = GetCloudFromYAML(opts) + if err != nil { + return nil, err + } + } + + // Get a Provider Client + pClient, err := AuthenticatedClient(opts) + if err != nil { + return nil, err + } + + // Determine the region to use. + var region string + if v := cloud.RegionName; v != "" { + region = cloud.RegionName + } + + if v := os.Getenv(envPrefix + "REGION_NAME"); v != "" { + region = v + } + + eo := gophercloud.EndpointOpts{ + Region: region, + } + + switch service { + case "clustering": + return openstack.NewClusteringV1(pClient, eo) + case "compute": + return openstack.NewComputeV2(pClient, eo) + case "container": + return openstack.NewContainerV1(pClient, eo) + case "database": + return openstack.NewDBV1(pClient, eo) + case "dns": + return openstack.NewDNSV2(pClient, eo) + case "identity": + identityVersion := "3" + if v := cloud.IdentityAPIVersion; v != "" { + identityVersion = v + } + + switch identityVersion { + case "v2", "2", "2.0": + return openstack.NewIdentityV2(pClient, eo) + case "v3", "3": + return openstack.NewIdentityV3(pClient, eo) + default: + return nil, fmt.Errorf("invalid identity API version") + } + case "image": + return openstack.NewImageServiceV2(pClient, eo) + case "load-balancer": + return openstack.NewLoadBalancerV2(pClient, eo) + case "network": + return openstack.NewNetworkV2(pClient, eo) + case "object-store": + return openstack.NewObjectStorageV1(pClient, eo) + case "orchestration": + return openstack.NewOrchestrationV1(pClient, eo) + case "sharev2": + return openstack.NewSharedFileSystemV2(pClient, eo) + case "volume": + volumeVersion := "2" + if v := cloud.VolumeAPIVersion; v != "" { + volumeVersion = v + } + + switch volumeVersion { + case "v1", "1": + return openstack.NewBlockStorageV1(pClient, eo) + case "v2", "2": + return openstack.NewBlockStorageV2(pClient, eo) + case "v3", "3": + return openstack.NewBlockStorageV3(pClient, eo) + default: + return nil, fmt.Errorf("invalid volume API version") + } + } + + return nil, fmt.Errorf("unable to create a service client for %s", service) +} + +// isProjectScoped determines if an auth struct is project scoped. +func isProjectScoped(authInfo *AuthInfo) bool { + if authInfo.ProjectID == "" && authInfo.ProjectName == "" { + return false + } + + return true +} + +// setDomainIfNeeded will set a DomainID and DomainName +// to ProjectDomain* and UserDomain* if not already set. +func setDomainIfNeeded(cloud *Cloud) *Cloud { + if cloud.AuthInfo.DomainID != "" { + if cloud.AuthInfo.UserDomainID == "" { + cloud.AuthInfo.UserDomainID = cloud.AuthInfo.DomainID + } + + if cloud.AuthInfo.ProjectDomainID == "" { + cloud.AuthInfo.ProjectDomainID = cloud.AuthInfo.DomainID + } + + cloud.AuthInfo.DomainID = "" + } + + if cloud.AuthInfo.DomainName != "" { + if cloud.AuthInfo.UserDomainName == "" { + cloud.AuthInfo.UserDomainName = cloud.AuthInfo.DomainName + } + + if cloud.AuthInfo.ProjectDomainName == "" { + cloud.AuthInfo.ProjectDomainName = cloud.AuthInfo.DomainName + } + + cloud.AuthInfo.DomainName = "" + } + + // If Domain fields are still not set, and if DefaultDomain has a value, + // set UserDomainID and ProjectDomainID to DefaultDomain. + // https://github.com/openstack/osc-lib/blob/86129e6f88289ef14bfaa3f7c9cdfbea8d9fc944/osc_lib/cli/client_config.py#L117-L146 + if cloud.AuthInfo.DefaultDomain != "" { + if cloud.AuthInfo.UserDomainName == "" && cloud.AuthInfo.UserDomainID == "" { + cloud.AuthInfo.UserDomainID = cloud.AuthInfo.DefaultDomain + } + + if cloud.AuthInfo.ProjectDomainName == "" && cloud.AuthInfo.ProjectDomainID == "" { + cloud.AuthInfo.ProjectDomainID = cloud.AuthInfo.DefaultDomain + } + } + + return cloud +} diff --git a/vendor/github.com/gophercloud/utils/openstack/clientconfig/results.go b/vendor/github.com/gophercloud/utils/openstack/clientconfig/results.go new file mode 100644 index 000000000..cb7603700 --- /dev/null +++ b/vendor/github.com/gophercloud/utils/openstack/clientconfig/results.go @@ -0,0 +1,88 @@ +package clientconfig + +// Clouds represents a collection of Cloud entries in a clouds.yaml file. +// The format of clouds.yaml is documented at +// https://docs.openstack.org/os-client-config/latest/user/configuration.html. +type Clouds struct { + Clouds map[string]Cloud `yaml:"clouds"` +} + +// Cloud represents an entry in a clouds.yaml file. +type Cloud struct { + AuthInfo *AuthInfo `yaml:"auth"` + AuthType AuthType `yaml:"auth_type"` + RegionName string `yaml:"region_name"` + Regions []interface{} `yaml:"regions"` + + // API Version overrides. + IdentityAPIVersion string `yaml:"identity_api_version"` + VolumeAPIVersion string `yaml:"volume_api_version"` +} + +// Auth represents the auth section of a cloud entry or +// auth options entered explicitly in ClientOpts. +type AuthInfo struct { + // AuthURL is the keystone/identity endpoint URL. + AuthURL string `yaml:"auth_url"` + + // Token is a pre-generated authentication token. + Token string `yaml:"token"` + + // Username is the username of the user. + Username string `yaml:"username"` + + // UserID is the unique ID of a user. + UserID string `yaml:"user_id"` + + // Password is the password of the user. + Password string `yaml:"password"` + + // ProjectName is the common/human-readable name of a project. + // Users can be scoped to a project. + // ProjectName on its own is not enough to ensure a unique scope. It must + // also be combined with either a ProjectDomainName or ProjectDomainID. + // ProjectName cannot be combined with ProjectID in a scope. + ProjectName string `yaml:"project_name"` + + // ProjectID is the unique ID of a project. + // It can be used to scope a user to a specific project. + ProjectID string `yaml:"project_id"` + + // UserDomainName is the name of the domain where a user resides. + // It is used to identify the source domain of a user. + UserDomainName string `yaml:"user_domain_name"` + + // UserDomainID is the unique ID of the domain where a user resides. + // It is used to identify the source domain of a user. + UserDomainID string `yaml:"user_domain_id"` + + // ProjectDomainName is the name of the domain where a project resides. + // It is used to identify the source domain of a project. + // ProjectDomainName can be used in addition to a ProjectName when scoping + // a user to a specific project. + ProjectDomainName string `yaml:"project_domain_name"` + + // ProjectDomainID is the name of the domain where a project resides. + // It is used to identify the source domain of a project. + // ProjectDomainID can be used in addition to a ProjectName when scoping + // a user to a specific project. + ProjectDomainID string `yaml:"project_domain_id"` + + // DomainName is the name of a domain which can be used to identify the + // source domain of either a user or a project. + // If UserDomainName and ProjectDomainName are not specified, then DomainName + // is used as a default choice. + // It can also be used be used to specify a domain-only scope. + DomainName string `yaml:"domain_name"` + + // DomainID is the unique ID of a domain which can be used to identify the + // source domain of eitehr a user or a project. + // If UserDomainID and ProjectDomainID are not specified, then DomainID is + // used as a default choice. + // It can also be used be used to specify a domain-only scope. + DomainID string `yaml:"domain_id"` + + // DefaultDomain is the domain ID to fall back on if no other domain has + // been specified and a domain is required for scope. + DefaultDomain string `yaml:"default_domain"` +} diff --git a/vendor/github.com/gophercloud/utils/openstack/clientconfig/utils.go b/vendor/github.com/gophercloud/utils/openstack/clientconfig/utils.go new file mode 100644 index 000000000..34518c6ed --- /dev/null +++ b/vendor/github.com/gophercloud/utils/openstack/clientconfig/utils.go @@ -0,0 +1,67 @@ +package clientconfig + +import ( + "fmt" + "io/ioutil" + "os" + "os/user" + "path/filepath" +) + +// findAndLoadYAML attempts to locate a clouds.yaml file in the following +// locations: +// +// 1. OS_CLIENT_CONFIG_FILE +// 2. Current directory. +// 3. unix-specific user_config_dir (~/.config/openstack/clouds.yaml) +// 4. unix-specific site_config_dir (/etc/openstack/clouds.yaml) +// +// If found, the contents of the file is returned. +func findAndReadYAML() ([]byte, error) { + // OS_CLIENT_CONFIG_FILE + if v := os.Getenv("OS_CLIENT_CONFIG_FILE"); v != "" { + if ok := fileExists(v); ok { + return ioutil.ReadFile(v) + } + } + + // current directory + cwd, err := os.Getwd() + if err != nil { + return nil, fmt.Errorf("unable to determine working directory: %s", err) + } + + filename := filepath.Join(cwd, "clouds.yaml") + if ok := fileExists(filename); ok { + return ioutil.ReadFile(filename) + } + + // unix user config directory: ~/.config/openstack. + currentUser, err := user.Current() + if err != nil { + return nil, fmt.Errorf("unable to get current user: %s", err) + } + + homeDir := currentUser.HomeDir + if homeDir != "" { + filename := filepath.Join(homeDir, ".config/openstack/clouds.yaml") + if ok := fileExists(filename); ok { + return ioutil.ReadFile(filename) + } + } + + // unix-specific site config directory: /etc/openstack. + if ok := fileExists("/etc/openstack/clouds.yaml"); ok { + return ioutil.ReadFile("/etc/openstack/clouds.yaml") + } + + return nil, fmt.Errorf("no clouds.yaml file found") +} + +// fileExists checks for the existence of a file at a given location. +func fileExists(filename string) bool { + if _, err := os.Stat(filename); err == nil { + return true + } + return false +} diff --git a/vendor/gopkg.in/yaml.v2/LICENSE b/vendor/gopkg.in/yaml.v2/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml new file mode 100644 index 000000000..8da58fbf6 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml @@ -0,0 +1,31 @@ +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original copyright and license: + + apic.go + emitterc.go + parserc.go + readerc.go + scannerc.go + writerc.go + yamlh.go + yamlprivateh.go + +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/gopkg.in/yaml.v2/NOTICE b/vendor/gopkg.in/yaml.v2/NOTICE new file mode 100644 index 000000000..866d74a7a --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/NOTICE @@ -0,0 +1,13 @@ +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/gopkg.in/yaml.v2/README.md b/vendor/gopkg.in/yaml.v2/README.md new file mode 100644 index 000000000..b50c6e877 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/README.md @@ -0,0 +1,133 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.1 and 1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *gopkg.in/yaml.v2*. + +To install it, run: + + go get gopkg.in/yaml.v2 + +API documentation +----------------- + +If opened in a browser, the import path itself leads to the API documentation: + + * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) + +API stability +------------- + +The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v2" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go new file mode 100644 index 000000000..1f7e87e67 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/apic.go @@ -0,0 +1,739 @@ +package yaml + +import ( + "io" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + } +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +//// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } +} + +///* +// * Create ALIAS. +// */ +// +//YAML_DECLARE(int) +//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) +//{ +// mark yaml_mark_t = { 0, 0, 0 } +// anchor_copy *yaml_char_t = NULL +// +// assert(event) // Non-NULL event object is expected. +// assert(anchor) // Non-NULL anchor is expected. +// +// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 +// +// anchor_copy = yaml_strdup(anchor) +// if (!anchor_copy) +// return 0 +// +// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) +// +// return 1 +//} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go new file mode 100644 index 000000000..e4e56e28e --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/decode.go @@ -0,0 +1,775 @@ +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "math" + "reflect" + "strconv" + "time" +) + +const ( + documentNode = 1 << iota + mappingNode + sequenceNode + scalarNode + aliasNode +) + +type node struct { + kind int + line, column int + tag string + // For an alias node, alias holds the resolved alias. + alias *node + value string + implicit bool + children []*node + anchors map[string]*node +} + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *node + doneInit bool +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + if len(b) == 0 { + b = []byte{'\n'} + } + yaml_parser_set_input_string(&p.parser, b) + return &p +} + +func newParserFromReader(r io.Reader) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + yaml_parser_set_input_reader(&p.parser, r) + return &p +} + +func (p *parser) init() { + if p.doneInit { + return + } + p.expect(yaml_STREAM_START_EVENT) + p.doneInit = true +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e yaml_event_type_t) { + if p.event.typ == yaml_NO_EVENT { + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + } + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if p.event.typ != e { + p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) + p.fail() + } + yaml_event_delete(&p.event) + p.event.typ = yaml_NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() yaml_event_type_t { + if p.event.typ != yaml_NO_EVENT { + return p.event.typ + } + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + return p.event.typ +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *node, anchor []byte) { + if anchor != nil { + p.doc.anchors[string(anchor)] = n + } +} + +func (p *parser) parse() *node { + p.init() + switch p.peek() { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + default: + panic("attempted to parse unknown event: " + p.event.typ.String()) + } +} + +func (p *parser) node(kind int) *node { + return &node{ + kind: kind, + line: p.event.start_mark.line, + column: p.event.start_mark.column, + } +} + +func (p *parser) document() *node { + n := p.node(documentNode) + n.anchors = make(map[string]*node) + p.doc = n + p.expect(yaml_DOCUMENT_START_EVENT) + n.children = append(n.children, p.parse()) + p.expect(yaml_DOCUMENT_END_EVENT) + return n +} + +func (p *parser) alias() *node { + n := p.node(aliasNode) + n.value = string(p.event.anchor) + n.alias = p.doc.anchors[n.value] + if n.alias == nil { + failf("unknown anchor '%s' referenced", n.value) + } + p.expect(yaml_ALIAS_EVENT) + return n +} + +func (p *parser) scalar() *node { + n := p.node(scalarNode) + n.value = string(p.event.value) + n.tag = string(p.event.tag) + n.implicit = p.event.implicit + p.anchor(n, p.event.anchor) + p.expect(yaml_SCALAR_EVENT) + return n +} + +func (p *parser) sequence() *node { + n := p.node(sequenceNode) + p.anchor(n, p.event.anchor) + p.expect(yaml_SEQUENCE_START_EVENT) + for p.peek() != yaml_SEQUENCE_END_EVENT { + n.children = append(n.children, p.parse()) + } + p.expect(yaml_SEQUENCE_END_EVENT) + return n +} + +func (p *parser) mapping() *node { + n := p.node(mappingNode) + p.anchor(n, p.event.anchor) + p.expect(yaml_MAPPING_START_EVENT) + for p.peek() != yaml_MAPPING_END_EVENT { + n.children = append(n.children, p.parse(), p.parse()) + } + p.expect(yaml_MAPPING_END_EVENT) + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *node + aliases map[*node]bool + mapType reflect.Type + terrors []string + strict bool +} + +var ( + mapItemType = reflect.TypeOf(MapItem{}) + durationType = reflect.TypeOf(time.Duration(0)) + defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = defaultMapType.Elem() + timeType = reflect.TypeOf(time.Time{}) + ptrTimeType = reflect.TypeOf(&time.Time{}) +) + +func newDecoder(strict bool) *decoder { + d := &decoder{mapType: defaultMapType, strict: strict} + d.aliases = make(map[*node]bool) + return d +} + +func (d *decoder) terror(n *node, tag string, out reflect.Value) { + if n.tag != "" { + tag = n.tag + } + value := n.value + if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + if u, ok := out.Addr().Interface().(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { + switch n.kind { + case documentNode: + return d.document(n, out) + case aliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.kind { + case scalarNode: + good = d.scalar(n, out) + case mappingNode: + good = d.mapping(n, out) + case sequenceNode: + good = d.sequence(n, out) + default: + panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) + } + return good +} + +func (d *decoder) document(n *node, out reflect.Value) (good bool) { + if len(n.children) == 1 { + d.doc = n + d.unmarshal(n.children[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *node, out reflect.Value) (good bool) { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. + failf("anchor '%s' value contains itself", n.value) + } + d.aliases[n] = true + good = d.unmarshal(n.alias, out) + delete(d.aliases, n) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) scalar(n *node, out reflect.Value) bool { + var tag string + var resolved interface{} + if n.tag == "" && !n.implicit { + tag = yaml_STR_TAG + resolved = n.value + } else { + tag, resolved = resolve(n.tag, n.value) + if tag == yaml_BINARY_TAG { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + if out.Kind() == reflect.Map && !out.CanAddr() { + resetMap(out) + } else { + out.Set(reflect.Zero(out.Type())) + } + return true + } + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == yaml_BINARY_TAG { + text = []byte(resolved.(string)) + } else { + // We let any value be unmarshaled into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.value) + } + err := u.UnmarshalText(text) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == yaml_BINARY_TAG { + out.SetString(resolved.(string)) + return true + } + if resolved != nil { + out.SetString(n.value) + return true + } + case reflect.Interface: + if resolved == nil { + out.Set(reflect.Zero(out.Type())) + } else if tag == yaml_TIMESTAMP_TAG { + // It looks like a timestamp but for backward compatibility + // reasons we set it as a string, so that code that unmarshals + // timestamp-like values into interface{} will continue to + // see a string and not a time.Time. + // TODO(v3) Drop this. + out.Set(reflect.ValueOf(n.value)) + } else { + out.Set(reflect.ValueOf(resolved)) + } + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch resolved := resolved.(type) { + case int: + if !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case int64: + if !out.OverflowInt(resolved) { + out.SetInt(resolved) + return true + } + case uint64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case float64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + return true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + return true + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + return true + case int64: + out.SetFloat(float64(resolved)) + return true + case uint64: + out.SetFloat(float64(resolved)) + return true + case float64: + out.SetFloat(resolved) + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true + } + case reflect.Ptr: + if out.Type().Elem() == reflect.TypeOf(resolved) { + // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? + elem := reflect.New(out.Type().Elem()) + elem.Elem().Set(reflect.ValueOf(resolved)) + out.Set(elem) + return true + } + } + d.terror(n, tag, out) + return false +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { + l := len(n.children) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, yaml_SEQ_TAG, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.children[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Slice: + return d.mappingSlice(n, out) + case reflect.Map: + // okay + case reflect.Interface: + if d.mapType.Kind() == reflect.Map { + iface := out + out = reflect.MakeMap(d.mapType) + iface.Set(out) + } else { + slicev := reflect.New(d.mapType).Elem() + if !d.mappingSlice(n, slicev) { + return false + } + out.Set(slicev) + return true + } + default: + d.terror(n, yaml_MAP_TAG, out) + return false + } + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + mapType := d.mapType + if outt.Key() == ifaceType && outt.Elem() == ifaceType { + d.mapType = outt + } + + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + } + l := len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.children[i], k) { + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.children[i+1], e) { + d.setMapIndex(n.children[i+1], out, k, e) + } + } + } + d.mapType = mapType + return true +} + +func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) { + if d.strict && out.MapIndex(k) != zeroValue { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface())) + return + } + out.SetMapIndex(k, v) +} + +func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { + outt := out.Type() + if outt.Elem() != mapItemType { + d.terror(n, yaml_MAP_TAG, out) + return false + } + + mapType := d.mapType + d.mapType = outt + + var slice []MapItem + var l = len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + item := MapItem{} + k := reflect.ValueOf(&item.Key).Elem() + if d.unmarshal(n.children[i], k) { + v := reflect.ValueOf(&item.Value).Elem() + if d.unmarshal(n.children[i+1], v) { + slice = append(slice, item) + } + } + } + out.Set(reflect.ValueOf(slice)) + d.mapType = mapType + return true +} + +func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + name := settableValueOf("") + l := len(n.children) + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) + elemType = inlineMap.Type().Elem() + } + + var doneFields []bool + if d.strict { + doneFields = make([]bool, len(sinfo.FieldsList)) + } + for i := 0; i < l; i += 2 { + ni := n.children[i] + if isMerge(ni) { + d.merge(n.children[i+1], out) + continue + } + if !d.unmarshal(ni, name) { + continue + } + if info, ok := sinfo.FieldsMap[name.String()]; ok { + if d.strict { + if doneFields[info.Id] { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type())) + continue + } + doneFields[info.Id] = true + } + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = out.FieldByIndex(info.Inline) + } + d.unmarshal(n.children[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.children[i+1], value) + d.setMapIndex(n.children[i+1], inlineMap, name, value) + } else if d.strict { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type())) + } + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(n *node, out reflect.Value) { + switch n.kind { + case mappingNode: + d.unmarshal(n, out) + case aliasNode: + an, ok := d.doc.anchors[n.value] + if ok && an.kind != mappingNode { + failWantMap() + } + d.unmarshal(n, out) + case sequenceNode: + // Step backwards as earlier nodes take precedence. + for i := len(n.children) - 1; i >= 0; i-- { + ni := n.children[i] + if ni.kind == aliasNode { + an, ok := d.doc.anchors[ni.value] + if ok && an.kind != mappingNode { + failWantMap() + } + } else if ni.kind != mappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } +} + +func isMerge(n *node) bool { + return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) +} diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v2/emitterc.go new file mode 100644 index 000000000..a1c2cc526 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/emitterc.go @@ -0,0 +1,1685 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + emitter.column = 0 + emitter.line++ + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + emitter.column = 0 + emitter.line++ + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + emitter.indent += emitter.best_indent + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + return yaml_emitter_emit_node(emitter, event, true, false, false, false) +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an anchor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + emitter.indention = true + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + emitter.whitespace = false + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/gopkg.in/yaml.v2/encode.go new file mode 100644 index 000000000..a14435e82 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/encode.go @@ -0,0 +1,362 @@ +package yaml + +import ( + "encoding" + "fmt" + "io" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool + // doneInit holds whether the initial stream_start_event has been + // emitted. + doneInit bool +} + +func newEncoder() *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func newEncoderWithWriter(w io.Writer) *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_writer(&e.emitter, w) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func (e *encoder) init() { + if e.doneInit { + return + } + yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) + e.emit() + e.doneInit = true +} + +func (e *encoder) finish() { + e.emitter.open_ended = false + yaml_stream_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + e.must(yaml_emitter_emit(&e.emitter, &e.event)) +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshalDoc(tag string, in reflect.Value) { + e.init() + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.emit() + e.marshal(tag, in) + yaml_document_end_event_initialize(&e.event, true) + e.emit() +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { + e.nilv() + return + } + iface := in.Interface() + switch m := iface.(type) { + case time.Time, *time.Time: + // Although time.Time implements TextMarshaler, + // we don't want to treat it as a string for YAML + // purposes because YAML has special support for + // timestamps. + case Marshaler: + v, err := m.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + in = reflect.ValueOf(v) + case encoding.TextMarshaler: + text, err := m.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + case nil: + e.nilv() + return + } + switch in.Kind() { + case reflect.Interface: + e.marshal(tag, in.Elem()) + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + if in.Type() == ptrTimeType { + e.timev(tag, in.Elem()) + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Struct: + if in.Type() == timeType { + e.timev(tag, in) + } else { + e.structv(tag, in) + } + case reflect.Slice, reflect.Array: + if in.Type().Elem() == mapItemType { + e.itemsv(tag, in) + } else { + e.slicev(tag, in) + } + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if in.Type() == durationType { + e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) + } else { + e.intv(tag, in) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) itemsv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) + for _, item := range slice { + e.marshal("", reflect.ValueOf(item.Key)) + e.marshal("", reflect.ValueOf(item.Value)) + } + }) +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = in.FieldByIndex(info.Inline) + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) + e.emit() + f() + yaml_mapping_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == yaml_BINARY_TAG { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = yaml_BINARY_TAG + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when encoded unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s) + } + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): + style = yaml_LITERAL_SCALAR_STYLE + case canUsePlain: + style = yaml_PLAIN_SCALAR_STYLE + default: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { + implicit := tag == "" + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.emit() +} diff --git a/vendor/gopkg.in/yaml.v2/go.mod b/vendor/gopkg.in/yaml.v2/go.mod new file mode 100644 index 000000000..1934e8769 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/go.mod @@ -0,0 +1,5 @@ +module "gopkg.in/yaml.v2" + +require ( + "gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405 +) diff --git a/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/gopkg.in/yaml.v2/parserc.go new file mode 100644 index 000000000..81d05dfe5 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/parserc.go @@ -0,0 +1,1095 @@ +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + return &parser.tokens[parser.tokens_head] + } + return nil +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + return true +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v2/readerc.go new file mode 100644 index 000000000..7c1f5fac3 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/readerc.go @@ -0,0 +1,412 @@ +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go new file mode 100644 index 000000000..6c151db6f --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/resolve.go @@ -0,0 +1,258 @@ +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, + {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, + {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, + {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, + {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, + {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, + {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", yaml_MERGE_TAG, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + // TODO This can easily be made faster and produce less garbage. + if strings.HasPrefix(tag, longTagPrefix) { + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out interface{}) { + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: + return + case yaml_FLOAT_TAG: + if rtag == yaml_INT_TAG { + switch v := out.(type) { + case int64: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + case int: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + } + } + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == yaml_TIMESTAMP_TAG { + t, ok := parseTimestamp(in) + if ok { + return yaml_TIMESTAMP_TAG, t + } + } + + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt("-" + plain[3:], 2, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + } + default: + panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") + } + } + return yaml_STR_TAG, in +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go new file mode 100644 index 000000000..077fd1dd2 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/scannerc.go @@ -0,0 +1,2696 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // Check if we really need to fetch more tokens. + need_more_tokens := false + + if parser.tokens_head == len(parser.tokens) { + // Queue is empty. + need_more_tokens = true + } else { + // Check if any potential simple key may occupy the head position. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + if simple_key.possible && simple_key.token_number == parser.tokens_parsed { + need_more_tokens = true + break + } + } + } + + // We are finished. + if !need_more_tokens { + break + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // Remove obsolete potential simple keys. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +// Check the list of potential simple keys and remove the positions that +// cannot contain simple keys anymore. +func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { + // Check for a potential simple key for each flow level. + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + + // The specification requires that a simple key + // + // - is limited to a single line, + // - is shorter than 1024 characters. + if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) { + + // Check if the potential simple key to be removed is required. + if simple_key.required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + } + } + return true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + } + simple_key.mark = parser.mark + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + return true +} + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // Increase the flow level. + parser.flow_level++ + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] + } + return true +} + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + // Loop through the indentation levels in the stack. + for parser.indent > column { + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if simple_key.possible { + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab characters that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violates indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/gopkg.in/yaml.v2/sorter.go new file mode 100644 index 000000000..4c45e660a --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/sorter.go @@ -0,0 +1,113 @@ +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + return bl + } + var ai, bi int + var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/gopkg.in/yaml.v2/writerc.go new file mode 100644 index 000000000..a2dde608c --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/writerc.go @@ -0,0 +1,26 @@ +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true +} diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go new file mode 100644 index 000000000..de85aa4cd --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yaml.go @@ -0,0 +1,466 @@ +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml + +import ( + "errors" + "fmt" + "io" + "reflect" + "strings" + "sync" +) + +// MapSlice encodes and decodes as a YAML map. +// The order of keys is preserved when encoding and decoding. +type MapSlice []MapItem + +// MapItem is an item in a MapSlice. +type MapItem struct { + Key, Value interface{} +} + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. The UnmarshalYAML +// method receives a function that may be called to unmarshal the original +// YAML value into a field or variable. It is safe to call the unmarshal +// function parameter more than once if necessary. +type Unmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// UnmarshalStrict is like Unmarshal except that any fields that are found +// in the data that do not have corresponding struct members, or mapping +// keys that are duplicates, will result in +// an error. +func UnmarshalStrict(in []byte, out interface{}) (err error) { + return unmarshal(in, out, true) +} + +// A Decorder reads and decodes YAML values from an input stream. +type Decoder struct { + strict bool + parser *parser +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// SetStrict sets whether strict decoding behaviour is enabled when +// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict. +func (dec *Decoder) SetStrict(strict bool) { + dec.strict = strict +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + d := newDecoder(dec.strict) + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder(strict) + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be included if that method returns true. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v interface{}) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + //return nil, errors.New("Option ,inline needs a struct value or map field") + return nil, errors.New("Option ,inline needs a struct value field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + +func isZero(v reflect.Value) bool { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go new file mode 100644 index 000000000..e25cee563 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yamlh.go @@ -0,0 +1,738 @@ +package yaml + +import ( + "fmt" + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota + + yaml_PLAIN_SCALAR_STYLE // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. +) + +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_writer io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/vendor/gopkg.in/yaml.v2/yamlprivateh.go new file mode 100644 index 000000000..8110ce3c3 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yamlprivateh.go @@ -0,0 +1,173 @@ +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/vendor.json b/vendor/vendor.json index 89fbc937d..3f2ba9413 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -789,6 +789,12 @@ "revision": "7112fcd50da4ea27e8d4d499b30f04eea143bec2", "revisionTime": "2018-05-31T02:06:30Z" }, + { + "checksumSHA1": "ujo1JDey6cxwnGs4HXVCJNVrhHw=", + "path": "github.com/gophercloud/utils/openstack/clientconfig", + "revision": "afce78e977c56ca5407957bf67e8ecc56aab601d", + "revisionTime": "2018-05-22T20:53:45Z" + }, { "checksumSHA1": "xSmii71kfQASGNG2C8ttmHx9KTE=", "path": "github.com/gorilla/websocket", @@ -1672,6 +1678,12 @@ "checksumSHA1": "U7dGDNwEHORvJFMoNSXErKE7ITg=", "path": "google.golang.org/cloud/internal", "revision": "5a3b06f8b5da3b7c3a93da43163b872c86c509ef" + }, + { + "checksumSHA1": "ZSWoOPUNRr5+3dhkLK3C4cZAQPk=", + "path": "gopkg.in/yaml.v2", + "revision": "5420a8b6744d3b0345ab293f6fcba19c978f1183", + "revisionTime": "2018-03-28T19:50:20Z" } ], "rootPath": "github.com/hashicorp/packer" From 3bdf1f184940ac460f5f56e69ab27b42a5a351d8 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Tue, 17 Jan 2017 14:38:49 -0800 Subject: [PATCH 097/138] openstack: Add support for token authorization and cloud.yaml via config options `cloud` and `token` and environment variables OS_CLOUD and OS_TOKEN. --- builder/openstack/access_config.go | 59 +++++++++++++++---- .../source/docs/builders/openstack.html.md | 49 +++++++++++---- 2 files changed, 85 insertions(+), 23 deletions(-) diff --git a/builder/openstack/access_config.go b/builder/openstack/access_config.go index 5d9c2b39b..a5ab91b60 100644 --- a/builder/openstack/access_config.go +++ b/builder/openstack/access_config.go @@ -2,14 +2,14 @@ package openstack import ( "crypto/tls" - "fmt" - "os" - "crypto/x509" + "fmt" "io/ioutil" + "os" "github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud/openstack" + "github.com/gophercloud/utils/openstack/clientconfig" "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/packer/template/interpolate" ) @@ -30,6 +30,8 @@ type AccessConfig struct { CACertFile string `mapstructure:"cacert"` ClientCertFile string `mapstructure:"cert"` ClientKeyFile string `mapstructure:"key"` + Token string `mapstructure:"token"` + Cloud string `mapstructure:"cloud"` osClient *gophercloud.ProviderClient } @@ -42,10 +44,6 @@ func (c *AccessConfig) Prepare(ctx *interpolate.Context) []error { return []error{fmt.Errorf("Invalid endpoint type provided")} } - if c.Region == "" { - c.Region = os.Getenv("OS_REGION_NAME") - } - // Legacy RackSpace stuff. We're keeping this around to keep things BC. if c.Password == "" { c.Password = os.Getenv("SDK_PASSWORD") @@ -59,6 +57,15 @@ func (c *AccessConfig) Prepare(ctx *interpolate.Context) []error { if c.Username == "" { c.Username = os.Getenv("SDK_USERNAME") } + // End RackSpace + + if c.Cloud == "" { + c.Cloud = os.Getenv("OS_CLOUD") + } + if c.Region == "" { + c.Region = os.Getenv("OS_REGION_NAME") + } + if c.CACertFile == "" { c.CACertFile = os.Getenv("OS_CACERT") } @@ -69,8 +76,39 @@ func (c *AccessConfig) Prepare(ctx *interpolate.Context) []error { c.ClientKeyFile = os.Getenv("OS_KEY") } - // Get as much as possible from the end - ao, _ := openstack.AuthOptionsFromEnv() + clientOpts := new(clientconfig.ClientOpts) + + // If a cloud entry was given, base AuthOptions on a clouds.yaml file. + if c.Cloud != "" { + clientOpts.Cloud = c.Cloud + + cloud, err := clientconfig.GetCloudFromYAML(clientOpts) + if err != nil { + return []error{err} + } + + if c.Region == "" && cloud.RegionName != "" { + c.Region = cloud.RegionName + } + } else { + authInfo := &clientconfig.AuthInfo{ + AuthURL: c.IdentityEndpoint, + DomainID: c.DomainID, + DomainName: c.DomainName, + Password: c.Password, + ProjectID: c.TenantID, + ProjectName: c.TenantName, + Token: c.Token, + Username: c.Username, + UserID: c.UserID, + } + clientOpts.AuthInfo = authInfo + } + + ao, err := clientconfig.AuthOptions(clientOpts) + if err != nil { + return []error{err} + } // Make sure we reauth as needed ao.AllowReauth = true @@ -87,6 +125,7 @@ func (c *AccessConfig) Prepare(ctx *interpolate.Context) []error { {&c.TenantName, &ao.TenantName}, {&c.DomainID, &ao.DomainID}, {&c.DomainName, &ao.DomainName}, + {&c.Token, &ao.TokenID}, } for _, s := range overrides { if *s.From != "" { @@ -132,7 +171,7 @@ func (c *AccessConfig) Prepare(ctx *interpolate.Context) []error { client.HTTPClient.Transport = transport // Auth - err = openstack.Authenticate(client, ao) + err = openstack.Authenticate(client, *ao) if err != nil { return []error{err} } diff --git a/website/source/docs/builders/openstack.html.md b/website/source/docs/builders/openstack.html.md index cbdea1f2a..c6059da50 100644 --- a/website/source/docs/builders/openstack.html.md +++ b/website/source/docs/builders/openstack.html.md @@ -25,7 +25,7 @@ created. This simplifies configuration quite a bit. The builder does *not* manage images. Once it creates an image, it is up to you to use it or delete it. -~> **Note:** To use OpenStack builder with the OpenStack Newton (Oct 2016) +~> **Note:** To use OpenStack builder with the OpenStack Newton (Oct 2016) or earlier, we recommend you use Packer v1.1.2 or earlier version. ~> **OpenStack Liberty or later requires OpenSSL!** To use the OpenStack @@ -56,7 +56,7 @@ builder. - `identity_endpoint` (string) - The URL to the OpenStack Identity service. If not specified, Packer will use the environment variables `OS_AUTH_URL`, - if set. + if set. This is not required if using `cloud.yaml`. - `source_image` (string) - The ID or full URL to the base image to use. This is the image that will be used to launch a new server and provision it. @@ -69,11 +69,14 @@ builder. - `username` or `user_id` (string) - The username or id used to connect to the OpenStack service. If not specified, Packer will use the environment - variable `OS_USERNAME` or `OS_USERID`, if set. + variable `OS_USERNAME` or `OS_USERID`, if set. This is not required if + using access token instead of password or if using `cloud.yaml`. - `password` (string) - The password used to connect to the OpenStack service. If not specified, Packer will use the environment variables `OS_PASSWORD`, - if set. + if set. This is not required if using access token instead of password or + if using `cloud.yaml`. + ### Optional: @@ -82,14 +85,20 @@ builder. cluster will be used. This may be required for some OpenStack clusters. - `cacert` (string) - Custom CA certificate file path. - If omitted the OS\_CACERT environment variable can be used. + If omitted the `OS_CACERT` environment variable can be used. + +- `cert` (string) - Client certificate file path for SSL client authentication. + If omitted the `OS_CERT` environment variable can be used. + +- `cloud` (string) - An entry in a `clouds.yaml` file. See the OpenStack + os-client-config + [documentation](https://docs.openstack.org/os-client-config/latest/user/configuration.html) + for more information about `clouds.yaml` files. If omitted, the `OS_CLOUD` + environment variable is used. - `config_drive` (boolean) - Whether or not nova should use ConfigDrive for cloud-init metadata. -- `cert` (string) - Client certificate file path for SSL client authentication. - If omitted the OS\_CERT environment variable can be used. - - `domain_name` or `domain_id` (string) - The Domain name or ID you are authenticating with. OpenStack installations require this if identity v3 is used. Packer will use the environment variable `OS_DOMAIN_NAME` or `OS_DOMAIN_ID`, if set. @@ -105,7 +114,7 @@ builder. - `image_members` (array of strings) - List of members to add to the image after creation. An image member is usually a project (also called the - “tenant”) with whom the image is shared. + "tenant") with whom the image is shared. - `image_visibility` (string) - One of "public", "private", "shared", or "community". @@ -114,7 +123,7 @@ builder. done over an insecure connection. By default this is false. - `key` (string) - Client private key file path for SSL client authentication. - If omitted the OS\_KEY environment variable can be used. + If omitted the `OS_KEY` environment variable can be used. - `metadata` (object of key/value strings) - Glance metadata that will be applied to the image. @@ -181,6 +190,9 @@ builder. Packer will use the environment variable `OS_TENANT_NAME` or `OS_TENANT_ID`, if set. Tenant is also called Project in later versions of OpenStack. +- `token` (string) - the token (id) to use with token based authorization. + Packer will use the environment variable `OS_TOKEN`, if set. + - `use_floating_ip` (boolean) - *Deprecated* use `floating_ip` or `floating_ip_pool` instead. @@ -280,11 +292,12 @@ export OS_PROJECT_DOMAIN_NAME="mydomain" ## Notes on OpenStack Authorization -The simplest way to get all settings for authorization agains OpenStack is to +The simplest way to get all settings for authorization against OpenStack is to go into the OpenStack Dashboard (Horizon) select your *Project* and navigate *Project, Access & Security*, select *API Access* and *Download OpenStack RC -File v3*. Source the file, and select your wanted region by setting -environment variable `OS_REGION_NAME` or `OS_REGION_ID` and `export OS_TENANT_NAME=$OS_PROJECT_NAME` or `export OS_TENANT_ID=$OS_PROJECT_ID`. +File v3*. Source the file, and select your wanted region +by setting environment variable `OS_REGION_NAME` or `OS_REGION_ID` and +`export OS_TENANT_NAME=$OS_PROJECT_NAME` or `export OS_TENANT_ID=$OS_PROJECT_ID`. ~> `OS_TENANT_NAME` or `OS_TENANT_ID` must be used even with Identity v3, `OS_PROJECT_NAME` and `OS_PROJECT_ID` has no effect in Packer. @@ -293,3 +306,13 @@ To troubleshoot authorization issues test you environment variables with the OpenStack cli. It can be installed with $ pip install --user python-openstackclient + +### Authorize Using Tokens + +To authorize with a access token only `identity_endpoint` and `token` is needed, +and possibly `tenant_name` or `tenant_id` depending on your token type. Or use +the following environment variables: + +- `OS_AUTH_URL` +- `OS_TOKEN` +- One of `OS_TENANT_NAME` or `OS_TENANT_ID` From 87a93e84acdfce67163f9ca1ff1794390e1c1ec3 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Fri, 8 Jun 2018 13:14:55 -0700 Subject: [PATCH 098/138] sh -c to execute command to accomodate communicator change --- builder/amazon/chroot/run_local_commands.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/amazon/chroot/run_local_commands.go b/builder/amazon/chroot/run_local_commands.go index 4d5b0f75c..fc1c01e2b 100644 --- a/builder/amazon/chroot/run_local_commands.go +++ b/builder/amazon/chroot/run_local_commands.go @@ -22,7 +22,7 @@ func RunLocalCommands(commands []string, wrappedCommand CommandWrapper, ctx inte ui.Say(fmt.Sprintf("Executing command: %s", command)) comm := &sl.Communicator{ - ExecuteCommand: []string{command}, + ExecuteCommand: []string{"sh", "-c", command}, } cmd := &packer.RemoteCmd{Command: command} if err := cmd.StartWithUi(comm, ui); err != nil { From 6b1187666ca9831c4b22b462da006d220b95f402 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Fri, 8 Jun 2018 14:28:09 -0700 Subject: [PATCH 099/138] we should only have an installation guide in one place, to keep it up-to-date more easily. --- website/source/docs/install/index.html.md | 57 +------------------ .../intro/getting-started/install.html.md | 54 ++++++++++++++---- 2 files changed, 45 insertions(+), 66 deletions(-) diff --git a/website/source/docs/install/index.html.md b/website/source/docs/install/index.html.md index ffda91a05..635ffb134 100644 --- a/website/source/docs/install/index.html.md +++ b/website/source/docs/install/index.html.md @@ -9,58 +9,5 @@ sidebar_current: 'docs-install' # Install Packer -Installing Packer is simple. There are two approaches to installing Packer: - -1. Using a [precompiled binary](#precompiled-binaries) - -2. Installing [from source](#compiling-from-source) - -Downloading a precompiled binary is easiest, and we provide downloads over TLS -along with SHA256 sums to verify the binary. We also distribute a PGP signature -with the SHA256 sums that can be verified. - -## Precompiled Binaries - -To install the precompiled binary, [download](/downloads.html) the appropriate -package for your system. Packer is currently packaged as a zip file. We do not -have any near term plans to provide system packages. - -Once the zip is downloaded, unzip it into any directory. The `packer` binary -inside is all that is necessary to run Packer (or `packer.exe` for Windows). Any -additional files, if any, aren't required to run Packer. - -Copy the binary to anywhere on your system. If you intend to access it from the -command-line, make sure to place it somewhere on your `PATH` before /usr/sbin. - -## Compiling from Source - -To compile from source, you will need [Go](https://golang.org) installed and -configured properly (including a `GOPATH` environment variable set), as well -as a copy of [`git`](https://www.git-scm.com/) in your `PATH`. - -1. Clone the Packer repository from GitHub into your `GOPATH`: - - ``` shell - $ mkdir -p $GOPATH/src/github.com/hashicorp && cd $_ - $ git clone https://github.com/hashicorp/packer.git - $ cd packer - ``` - -2. Build Packer for your current system and put the - binary in `./bin/` (relative to the git checkout). The `make dev` target is - just a shortcut that builds `packer` for only your local build environment (no - cross-compiled targets). - - ``` shell - $ make dev - ``` - -## Verifying the Installation - -To verify Packer is properly installed, run `packer -v` on your system. You -should see help output. If you are executing it from the command line, make sure -it is on your PATH or you may get an error about Packer not being found. - -``` shell -$ packer -v -``` +For detailed instructions on how to install Packer, see [this page](/intro/getting-started/install.html) in our +getting-started guide. \ No newline at end of file diff --git a/website/source/intro/getting-started/install.html.md b/website/source/intro/getting-started/install.html.md index 884683e9f..e66328e61 100644 --- a/website/source/intro/getting-started/install.html.md +++ b/website/source/intro/getting-started/install.html.md @@ -10,24 +10,33 @@ description: |- advanced users. --- -# Install Packer +# Install Options -Packer must first be installed on the machine you want to run it on. To make -installation easier, Packer is distributed as a [binary package](/downloads.html) -for all supported platforms and architectures. This page will not cover how to -compile Packer from source, as that is covered in the -[README](https://github.com/hashicorp/packer/blob/master/README.md) and is only -recommended for advanced users. +Packer may be installed in the following ways: -## Installing Packer +1. Using a [precompiled binary](#precompiled-binaries); We release binaries + for all supported platforms and architectures. This method is recommended for + most users. -To install packer, first find the [appropriate package](/downloads.html) for -your system and download it. Packer is packaged as a "zip" file. +2. Installing [from source](#compiling-from-source) This method is only + recommended for advanced users. + +3. An unoffical [alternative installation method](#alternative-installation-methods) + +## Precompiled Binaries + +To install the precompiled binary, [download](/downloads.html) the appropriate +package for your system. Packer is currently packaged as a zip file. We do not +have any near term plans to provide system packages. Next, unzip the downloaded package into a directory where Packer will be installed. On Unix systems, `~/packer` or `/usr/local/packer` is generally good, depending on whether you want to restrict the install to just your user or -install it system-wide. On Windows systems, you can put it wherever you'd like. +install it system-wide. If you intend to access it from the command-line, make +sure to place it somewhere on your `PATH` before `/usr/sbin`. On Windows +systems, you can put it wherever you'd like. The `packer` (or `packer.exe` for +Windows) binary inside is all that is necessary to run Packer. Any additional +files aren't required to run Packer. After unzipping the package, the directory should contain a single binary program called `packer`. The final step to @@ -38,6 +47,29 @@ for instructions on setting the PATH on Linux and Mac. [This page](https://stackoverflow.com/questions/1618280/where-can-i-set-path-to-make-exe-on-windows) contains instructions for setting the PATH on Windows. +## Compiling from Source + +To compile from source, you will need [Go](https://golang.org) installed and +configured properly (including a `GOPATH` environment variable set), as well +as a copy of [`git`](https://www.git-scm.com/) in your `PATH`. + +1. Clone the Packer repository from GitHub into your `GOPATH`: + + ``` shell + $ mkdir -p $GOPATH/src/github.com/hashicorp && cd $_ + $ git clone https://github.com/hashicorp/packer.git + $ cd packer + ``` + +2. Build Packer for your current system and put the + binary in `./bin/` (relative to the git checkout). The `make dev` target is + just a shortcut that builds `packer` for only your local build environment (no + cross-compiled targets). + + ``` shell + $ make dev + ``` + ## Verifying the Installation After installing Packer, verify the installation worked by opening a new command From f9b26a266651c59f3fee95a01b6e0f04569648e6 Mon Sep 17 00:00:00 2001 From: Rickard von Essen Date: Sat, 9 Jun 2018 08:59:00 +0200 Subject: [PATCH 100/138] Updated installation docs to not require GOPATH env var --- website/source/intro/getting-started/install.html.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/source/intro/getting-started/install.html.md b/website/source/intro/getting-started/install.html.md index e66328e61..ce8a8e5f4 100644 --- a/website/source/intro/getting-started/install.html.md +++ b/website/source/intro/getting-started/install.html.md @@ -50,13 +50,13 @@ contains instructions for setting the PATH on Windows. ## Compiling from Source To compile from source, you will need [Go](https://golang.org) installed and -configured properly (including a `GOPATH` environment variable set), as well -as a copy of [`git`](https://www.git-scm.com/) in your `PATH`. +configured properly as well as a copy of [`git`](https://www.git-scm.com/) +in your `PATH`. 1. Clone the Packer repository from GitHub into your `GOPATH`: ``` shell - $ mkdir -p $GOPATH/src/github.com/hashicorp && cd $_ + $ mkdir -p $(go enc GOPATH)/src/github.com/hashicorp && cd $_ $ git clone https://github.com/hashicorp/packer.git $ cd packer ``` From 9b30c9aed0fc7aaed71b5197ce9422b8b8e3d18f Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Mon, 11 Jun 2018 17:53:54 -0500 Subject: [PATCH 101/138] Allow StepAttachIso in the VirtualBox builder to resolve symbolic links when processing the IsoPath. This just closes out a really old issue (#3437) by using `filepath.EvalSymLinks` to resolve the symbolic link that the user specifies for the IsoPath. --- builder/virtualbox/iso/step_attach_iso.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/builder/virtualbox/iso/step_attach_iso.go b/builder/virtualbox/iso/step_attach_iso.go index 619d61afd..928a76761 100644 --- a/builder/virtualbox/iso/step_attach_iso.go +++ b/builder/virtualbox/iso/step_attach_iso.go @@ -34,6 +34,16 @@ func (s *stepAttachISO) Run(_ context.Context, state multistep.StateBag) multist device = "0" } + // If it's a symlink, resolve it to it's target. + resolvedIsoPath, err := filepath.EvalSymlinks(isoPath) + if err != nil { + err := fmt.Errorf("Error resolving symlink for ISO: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + isoPath = resolvedIsoPath + // Attach the disk to the controller command := []string{ "storageattach", vmName, From 963932699ec96335d54cf26eedb0bb723863958e Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Mon, 11 Jun 2018 18:02:52 -0500 Subject: [PATCH 102/138] Remove a stray tab that resulted from poor usage of Github's file editor. That's what I get for not making a proper branch... --- builder/virtualbox/iso/step_attach_iso.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/virtualbox/iso/step_attach_iso.go b/builder/virtualbox/iso/step_attach_iso.go index 928a76761..94bdc7a2f 100644 --- a/builder/virtualbox/iso/step_attach_iso.go +++ b/builder/virtualbox/iso/step_attach_iso.go @@ -43,7 +43,7 @@ func (s *stepAttachISO) Run(_ context.Context, state multistep.StateBag) multist return multistep.ActionHalt } isoPath = resolvedIsoPath - + // Attach the disk to the controller command := []string{ "storageattach", vmName, From 47a3315fdef09e1ab0b5a4f577c9e1c37cf107f4 Mon Sep 17 00:00:00 2001 From: Ali Rizvi-Santiago Date: Mon, 11 Jun 2018 18:07:43 -0500 Subject: [PATCH 103/138] Added a missing reference to the "path/filepath" module. Lol. Dammit. --- builder/virtualbox/iso/step_attach_iso.go | 1 + 1 file changed, 1 insertion(+) diff --git a/builder/virtualbox/iso/step_attach_iso.go b/builder/virtualbox/iso/step_attach_iso.go index 94bdc7a2f..624696482 100644 --- a/builder/virtualbox/iso/step_attach_iso.go +++ b/builder/virtualbox/iso/step_attach_iso.go @@ -3,6 +3,7 @@ package iso import ( "context" "fmt" + "path/filepath" vboxcommon "github.com/hashicorp/packer/builder/virtualbox/common" "github.com/hashicorp/packer/helper/multistep" From 5153b22b7eb40f7dcb431d51d2dea61f6035d95e Mon Sep 17 00:00:00 2001 From: DanHam Date: Tue, 12 Jun 2018 13:31:29 +0100 Subject: [PATCH 104/138] Fix typo in command: enc -> env --- website/source/intro/getting-started/install.html.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/intro/getting-started/install.html.md b/website/source/intro/getting-started/install.html.md index ce8a8e5f4..789694f68 100644 --- a/website/source/intro/getting-started/install.html.md +++ b/website/source/intro/getting-started/install.html.md @@ -56,7 +56,7 @@ in your `PATH`. 1. Clone the Packer repository from GitHub into your `GOPATH`: ``` shell - $ mkdir -p $(go enc GOPATH)/src/github.com/hashicorp && cd $_ + $ mkdir -p $(go env GOPATH)/src/github.com/hashicorp && cd $_ $ git clone https://github.com/hashicorp/packer.git $ cd packer ``` From 6226992757975826291954272a650ff2efde423e Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Tue, 12 Jun 2018 16:16:39 -0700 Subject: [PATCH 105/138] fix file provisioner docs --- website/source/docs/provisioners/file.html.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/website/source/docs/provisioners/file.html.md b/website/source/docs/provisioners/file.html.md index d2baa868b..abb7f10b9 100644 --- a/website/source/docs/provisioners/file.html.md +++ b/website/source/docs/provisioners/file.html.md @@ -32,7 +32,9 @@ The file provisioner can upload both single files and complete directories. ## Configuration Reference -The available configuration options are listed below. All elements are required. +The available configuration options are listed below. + +### Required - `source` (string) - The path to a local file or directory to upload to the machine. The path can be absolute or relative. If it is relative, it is @@ -48,6 +50,8 @@ The available configuration options are listed below. All elements are required. "upload". If it is set to "download" then the file "source" in the machine will be downloaded locally to "destination" +### Optional + - `generated` (boolean) - For advanced users only. If true, check the file existence only before uploading, rather than upon pre-build validation. This allows to upload files created on-the-fly. This defaults to false. We From 286a5aa8c727b8be6ceaf5a75d167c0cdd811227 Mon Sep 17 00:00:00 2001 From: EximChua <40189902+EximChua@users.noreply.github.com> Date: Wed, 13 Jun 2018 15:34:06 +0800 Subject: [PATCH 106/138] Update info on format of the credentials file Update info on format of the credentials file. --- website/source/docs/builders/amazon.html.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/website/source/docs/builders/amazon.html.md b/website/source/docs/builders/amazon.html.md index 2072d5312..d54a28cef 100644 --- a/website/source/docs/builders/amazon.html.md +++ b/website/source/docs/builders/amazon.html.md @@ -102,6 +102,14 @@ credentials inline, or in the environment, Packer will check this location. You can optionally specify a different location in the configuration by setting the environment with the `AWS_SHARED_CREDENTIALS_FILE` variable. +The format for the credentials file is like so + +``` +[default] +aws_access_key_id= +aws_secret_access_key= +``` + You may also configure the profile to use by setting the `profile` configuration option, or setting the `AWS_PROFILE` environment variable: From 6068320752eabc490147d98b2b7e7ea93aa57377 Mon Sep 17 00:00:00 2001 From: egazrigh Date: Wed, 13 Jun 2018 15:52:50 +0200 Subject: [PATCH 107/138] Update example for -var-file On Windows version v1.2.4, cannot use -var-file= syntax invalid value "" for flag -var-file: open : The system cannot find the file specified. Have to use -var-file .\file syntax --- website/source/docs/templates/user-variables.html.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/website/source/docs/templates/user-variables.html.md b/website/source/docs/templates/user-variables.html.md index bcc22339b..8c3c8dfc9 100644 --- a/website/source/docs/templates/user-variables.html.md +++ b/website/source/docs/templates/user-variables.html.md @@ -176,7 +176,10 @@ variable values. Assuming this file is in `variables.json`, we can build our template using the following command: ``` text +On Linux : $ packer build -var-file=variables.json template.json +On Windows : +packer build -var-file variables.json template.json ``` The `-var-file` flag can be specified multiple times and variables from multiple From b7ea0b44fc5ad453314c0a460640e0ca6eb31385 Mon Sep 17 00:00:00 2001 From: Giovanni Tirloni Date: Wed, 13 Jun 2018 17:42:27 -0300 Subject: [PATCH 108/138] HTTPDownloader - Fix invalid error handling --- common/download.go | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/common/download.go b/common/download.go index 74bef6313..9d2fa45f4 100644 --- a/common/download.go +++ b/common/download.go @@ -278,20 +278,27 @@ func (d *HTTPDownloader) Download(dst *os.File, src *url.URL) error { } resp, err := httpClient.Do(req) - if err == nil && (resp.StatusCode >= 200 && resp.StatusCode < 300) { - // If the HEAD request succeeded, then attempt to set the range - // query if we can. - if resp.Header.Get("Accept-Ranges") == "bytes" { - if fi, err := dst.Stat(); err == nil { - if _, err = dst.Seek(0, os.SEEK_END); err == nil { - req.Header.Set("Range", fmt.Sprintf("bytes=%d-", fi.Size())) + if err == nil { - d.current = uint64(fi.Size()) + if resp.StatusCode >= 200 && resp.StatusCode < 300 { + // If the HEAD request succeeded, then attempt to set the range + // query if we can. + if resp.Header.Get("Accept-Ranges") == "bytes" { + if fi, err := dst.Stat(); err == nil { + if _, err = dst.Seek(0, os.SEEK_END); err == nil { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-", fi.Size())) + + d.current = uint64(fi.Size()) + } } } } - } else if err != nil || (resp.StatusCode >= 400 && resp.StatusCode < 600) { - return fmt.Errorf("%s", resp.Status) + + if resp.StatusCode >= 400 && resp.StatusCode < 600 { + return fmt.Errorf("Received HTTP error: %s", resp.Status) + } + } else { + return err } // Set the request to GET now, and redo the query to download From 28095cf027d1904f7af6dea2390c86228c2102ed Mon Sep 17 00:00:00 2001 From: Giovanni Tirloni Date: Wed, 13 Jun 2018 20:30:19 -0300 Subject: [PATCH 109/138] Do not return error on initial HEAD request --- common/download.go | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/common/download.go b/common/download.go index 9d2fa45f4..59d882b50 100644 --- a/common/download.go +++ b/common/download.go @@ -278,8 +278,9 @@ func (d *HTTPDownloader) Download(dst *os.File, src *url.URL) error { } resp, err := httpClient.Do(req) - if err == nil { - + if err != nil { + log.Printf("[DEBUG] (download) Error making HTTP HEAD request: %s", err.Error()) + } else { if resp.StatusCode >= 200 && resp.StatusCode < 300 { // If the HEAD request succeeded, then attempt to set the range // query if we can. @@ -292,13 +293,9 @@ func (d *HTTPDownloader) Download(dst *os.File, src *url.URL) error { } } } + } else { + log.Printf("[DEBUG] (download) Unexpected HTTP response during HEAD request: %s", resp.Status) } - - if resp.StatusCode >= 400 && resp.StatusCode < 600 { - return fmt.Errorf("Received HTTP error: %s", resp.Status) - } - } else { - return err } // Set the request to GET now, and redo the query to download @@ -307,8 +304,10 @@ func (d *HTTPDownloader) Download(dst *os.File, src *url.URL) error { resp, err = httpClient.Do(req) if err != nil { return err - } else if err != nil || (resp.StatusCode >= 400 && resp.StatusCode < 600) { - return fmt.Errorf("%s", resp.Status) + } else { + if resp.StatusCode >= 400 && resp.StatusCode < 600 { + return fmt.Errorf("Error making HTTP GET request: %s", resp.Status) + } } d.total = d.current + uint64(resp.ContentLength) From 83a8537db54f7962ead12225df24e3d0759bd24c Mon Sep 17 00:00:00 2001 From: DanHam Date: Thu, 14 Jun 2018 12:24:00 +0100 Subject: [PATCH 110/138] Hyper-V ISO: Fix long lines --- .../docs/builders/hyperv-iso.html.md.erb | 289 ++++++++++-------- 1 file changed, 156 insertions(+), 133 deletions(-) diff --git a/website/source/docs/builders/hyperv-iso.html.md.erb b/website/source/docs/builders/hyperv-iso.html.md.erb index a2a97a789..297a55e41 100644 --- a/website/source/docs/builders/hyperv-iso.html.md.erb +++ b/website/source/docs/builders/hyperv-iso.html.md.erb @@ -13,19 +13,20 @@ sidebar_current: 'docs-builders-hyperv-iso' Type: `hyperv-iso` -The Hyper-V Packer builder is able to create [Hyper-V](https://www.microsoft.com/en-us/server-cloud/solutions/virtualization.aspx) +The Hyper-V Packer builder is able to create +[Hyper-V](https://www.microsoft.com/en-us/server-cloud/solutions/virtualization.aspx) virtual machines and export them, starting from an ISO image. -The builder builds a virtual machine by creating a new virtual machine -from scratch, booting it, installing an OS, provisioning software within -the OS, then shutting it down. The result of the Hyper-V builder is a directory +The builder builds a virtual machine by creating a new virtual machine from +scratch, booting it, installing an OS, provisioning software within the OS, +then shutting it down. The result of the Hyper-V builder is a directory containing all the files necessary to run the virtual machine portably. ## Basic Example -Here is a basic example. This example is not functional. It will start the -OS installer but then fail because we don't provide the preseed file for -Ubuntu to self-install. Still, the example serves to show the basic configuration: +Here is a basic example. This example is not functional. It will start the OS +installer but then fail because we don't provide the preseed file for Ubuntu +to self-install. Still, the example serves to show the basic configuration: ``` json { @@ -45,13 +46,13 @@ provisioner might not be saved. ## Configuration Reference -There are many configuration options available for the Hyper-V builder. -They are organized below into two categories: required and optional. Within -each category, the available options are alphabetized and described. +There are many configuration options available for the Hyper-V builder. They +are organized below into two categories: required and optional. Within each +category, the available options are alphabetized and described. In addition to the options listed here, a -[communicator](/docs/templates/communicator.html) -can be configured for this builder. +[communicator](/docs/templates/communicator.html) can be configured for this +builder. ### Required: @@ -64,85 +65,93 @@ can be configured for this builder. - `iso_checksum_type` (string) - The type of the checksum specified in `iso_checksum`. Valid values are "none", "md5", "sha1", "sha256", or "sha512" currently. While "none" will skip checksumming, this is not - recommended since ISO files and virtual harddrive files are generally large - and corruption does happen from time to time. + recommended since ISO files and virtual harddrive files are generally + large and corruption does happen from time to time. - `iso_url` (string) - A URL to the ISO containing the installation image or - virtual harddrive vhd or vhdx file to clone. This URL can be either an HTTP - URL or a file URL (or path to a file). If this is an HTTP URL, Packer will - download the file and cache it between runs. + virtual harddrive vhd or vhdx file to clone. This URL can be either an + HTTP URL or a file URL (or path to a file). If this is an HTTP URL, Packer + will download the file and cache it between runs. ### Optional: - `boot_command` (array of strings) - This is an array of commands to type - when the virtual machine is first booted. The goal of these commands should - be to type just enough to initialize the operating system installer. Special - keys can be typed as well, and are covered in the section below on the boot - command. If this is not specified, it is assumed the installer will start - itself. + when the virtual machine is first booted. The goal of these commands + should be to type just enough to initialize the operating system + installer. Special keys can be typed as well, and are covered in the + section below on the boot command. If this is not specified, it is assumed + the installer will start itself. - `boot_wait` (string) - The time to wait after booting the initial virtual - machine before typing the `boot_command`. The value of this should be - a duration. Examples are "5s" and "1m30s" which will cause Packer to wait - five seconds and one minute 30 seconds, respectively. If this isn't specified, - the default is 10 seconds. + machine before typing the `boot_command`. The value of this should be a + duration. Examples are "5s" and "1m30s" which will cause Packer to wait + five seconds and one minute 30 seconds, respectively. If this isn't + specified, the default is 10 seconds. -- `cpu` (number) - The number of cpus the virtual machine should use. If this isn't specified, - the default is 1 cpu. +- `cpu` (number) - The number of cpus the virtual machine should use. If + this isn't specified, the default is 1 cpu. - `disk_additional_size` (array of integers) - The size(s) of any additional hard disks for the VM in megabytes. If this is not specified then the VM - will only contain a primary hard disk. Additional drives will be attached to the SCSI - interface only. The builder uses expandable, not fixed-size virtual hard disks, - so the actual file representing the disk will not use the full size unless it is full. + will only contain a primary hard disk. Additional drives will be attached + to the SCSI interface only. The builder uses expandable, not fixed-size + virtual hard disks, so the actual file representing the disk will not use + the full size unless it is full. - `disk_size` (number) - The size, in megabytes, of the hard disk to create for the VM. By default, this is 40 GB. -- `differencing_disk` (boolean) - If true enables differencing disks. Only the changes will be written to the new disk. This is especially useful if your - source is a vhd/vhdx. This defaults to false. +- `differencing_disk` (boolean) - If true enables differencing disks. Only + the changes will be written to the new disk. This is especially useful if + your source is a vhd/vhdx. This defaults to false. - `headless` (boolean) - Packer defaults to building Hyper-V virtual - machines by launching a GUI that shows the console of the machine - being built. When this value is set to true, the machine will start without - a console. + machines by launching a GUI that shows the console of the machine being + built. When this value is set to true, the machine will start without a + console. -- `skip_export` (boolean) - If true skips VM export. If you are interested only in the vhd/vhdx files, you can enable this option. This will create - inline disks which improves the build performance. There will not be any copying of source vhds to temp directory. This defaults to false. +- `skip_export` (boolean) - If true skips VM export. If you are interested + only in the vhd/vhdx files, you can enable this option. This will create + inline disks which improves the build performance. There will not be any + copying of source vhds to temp directory. This defaults to false. -- `enable_dynamic_memory` (boolean) - If true enable dynamic memory for virtual machine. - This defaults to false. +- `enable_dynamic_memory` (boolean) - If true enable dynamic memory for + virtual machine. This defaults to false. -- `enable_mac_spoofing` (boolean) - If true enable mac spoofing for virtual machine. - This defaults to false. +- `enable_mac_spoofing` (boolean) - If true enable mac spoofing for virtual + machine. This defaults to false. -- `enable_secure_boot` (boolean) - If true enable secure boot for virtual machine. This defaults to false. +- `enable_secure_boot` (boolean) - If true enable secure boot for virtual + machine. This defaults to false. -- `secure_boot_template` (string) - The secure boot template to be configured. Valid values are "MicrosoftWindows" (Windows) or - "MicrosoftUEFICertificateAuthority" (Linux). This only takes effect if enable_secure_boot is set to "true". This defaults to "MicrosoftWindows". +- `secure_boot_template` (string) - The secure boot template to be + configured. Valid values are "MicrosoftWindows" (Windows) or + "MicrosoftUEFICertificateAuthority" (Linux). This only takes effect if + enable_secure_boot is set to "true". This defaults to "MicrosoftWindows". -- `enable_virtualization_extensions` (boolean) - If true enable virtualization extensions for virtual machine. - This defaults to false. For nested virtualization you need to enable mac spoofing, disable dynamic memory - and have at least 4GB of RAM for virtual machine. +- `enable_virtualization_extensions` (boolean) - If true enable + virtualization extensions for virtual machine. This defaults to false. + For nested virtualization you need to enable mac spoofing, disable dynamic + memory and have at least 4GB of RAM for virtual machine. - `floppy_files` (array of strings) - A list of files to place onto a floppy - disk that is attached when the VM is booted. This is most useful - for unattended Windows installs, which look for an `Autounattend.xml` file - on removable media. By default, no floppy will be attached. All files - listed in this setting get placed into the root directory of the floppy - and the floppy is attached as the first floppy device. Currently, no - support exists for creating sub-directories on the floppy. Wildcard - characters (`*`, `?`, and `[]`) are allowed. Directory names are also allowed, - which will add all the files found in the directory to the floppy. + disk that is attached when the VM is booted. This is most useful for + unattended Windows installs, which look for an `Autounattend.xml` file on + removable media. By default, no floppy will be attached. All files listed + in this setting get placed into the root directory of the floppy and the + floppy is attached as the first floppy device. Currently, no support + exists for creating sub-directories on the floppy. Wildcard characters + (`*`, `?`, and `[]`) are allowed. Directory names are also allowed, which + will add all the files found in the directory to the floppy. - `floppy_dirs` (array of strings) - A list of directories to place onto the floppy disk recursively. This is similar to the `floppy_files` option except that the directory structure is preserved. This is useful for when your floppy disk includes drivers or if you just want to organize it's - contents as a hierarchy. Wildcard characters (\*, ?, and \[\]) are allowed. - The maximum summary size of all files in the listed directories are the - same as in `floppy_files`. + contents as a hierarchy. Wildcard characters (\*, ?, and \[\]) are + allowed. The maximum summary size of all files in the listed directories + are the same as in `floppy_files`. - `generation` (number) - The Hyper-V generation for the virtual machine. By default, this is 1. Generation 2 Hyper-V virtual machines do not support @@ -150,10 +159,11 @@ can be configured for this builder. drives and dvd drives will also be scsi and not ide. - `guest_additions_mode` (string) - How should guest additions be installed. - If value `attach` then attach iso image with by specified by `guest_additions_path`. - Otherwise guest additions is not installed. + If value `attach` then attach iso image with by specified by + `guest_additions_path`. Otherwise guest additions is not installed. -- `guest_additions_path` (string) - The path to the iso image for guest additions. +- `guest_additions_path` (string) - The path to the iso image for guest + additions. - `http_directory` (string) - Path to a directory to serve using an HTTP server. The files in this directory will be available over HTTP that will @@ -164,17 +174,19 @@ can be configured for this builder. below. - `http_port_min` and `http_port_max` (number) - These are the minimum and - maximum port to use for the HTTP server started to serve the `http_directory`. - Because Packer often runs in parallel, Packer will choose a randomly available - port in this range to run the HTTP server. If you want to force the HTTP - server to be on one port, make this minimum and maximum port the same. - By default the values are 8000 and 9000, respectively. + maximum port to use for the HTTP server started to serve the + `http_directory`. Because Packer often runs in parallel, Packer will + choose a randomly available port in this range to run the HTTP server. If + you want to force the HTTP server to be on one port, make this minimum and + maximum port the same. By default the values are 8000 and 9000, + respectively. - `iso_urls` (array of strings) - Multiple URLs for the ISO to download. - Packer will try these in order. If anything goes wrong attempting to download - or while downloading a single URL, it will move on to the next. All URLs - must point to the same file (same checksum). By default this is empty - and `iso_url` is used. Only one of `iso_url` or `iso_urls` can be specified. + Packer will try these in order. If anything goes wrong attempting to + download or while downloading a single URL, it will move on to the next. + All URLs must point to the same file (same checksum). By default this is + empty and `iso_url` is used. Only one of `iso_url` or `iso_urls` can be + specified. - `iso_target_extension` (string) - The extension of the iso file after download. This defaults to "iso". @@ -184,89 +196,96 @@ can be configured for this builder. original filename as its name. - `output_directory` (string) - This is the path to the directory where the - resulting virtual machine will be created. This may be relative or absolute. - If relative, the path is relative to the working directory when `packer` - is executed. This directory must not exist or be empty prior to running the builder. - By default this is "output-BUILDNAME" where "BUILDNAME" is the name - of the build. + resulting virtual machine will be created. This may be relative or + absolute. If relative, the path is relative to the working directory when + `packer` is executed. This directory must not exist or be empty prior to + running the builder. By default this is "output-BUILDNAME" where + "BUILDNAME" is the name of the build. -- `ram_size` (number) - The size, in megabytes, of the ram to create - for the VM. By default, this is 1 GB. +- `ram_size` (number) - The size, in megabytes, of the ram to create for the + VM. By default, this is 1 GB. -- `secondary_iso_images` (array of strings) - A list of iso paths to attached to a - VM when it is booted. This is most useful for unattended Windows installs, which - look for an `Autounattend.xml` file on removable media. By default, no - secondary iso will be attached. +- `secondary_iso_images` (array of strings) - A list of iso paths to + attached to a VM when it is booted. This is most useful for unattended + Windows installs, which look for an `Autounattend.xml` file on removable + media. By default, no secondary iso will be attached. -- `shutdown_command` (string) - The command to use to gracefully shut down the machine once all - the provisioning is done. By default this is an empty string, which tells Packer to just - forcefully shut down the machine unless a shutdown command takes place inside script so this may - safely be omitted. If one or more scripts require a reboot it is suggested to leave this blank - since reboots may fail and specify the final shutdown command in your last script. +- `shutdown_command` (string) - The command to use to gracefully shut down + the machine once all the provisioning is done. By default this is an empty + string, which tells Packer to just forcefully shut down the machine unless + a shutdown command takes place inside script so this may safely be + omitted. If one or more scripts require a reboot it is suggested to leave + this blank since reboots may fail and specify the final shutdown command + in your last script. - `shutdown_timeout` (string) - The amount of time to wait after executing - the `shutdown_command` for the virtual machine to actually shut down. - If it doesn't shut down in this time, it is an error. By default, the timeout + the `shutdown_command` for the virtual machine to actually shut down. If + it doesn't shut down in this time, it is an error. By default, the timeout is "5m", or five minutes. -- `skip_compaction` (boolean) - If true skip compacting the hard disk for virtual machine when - exporting. This defaults to false. +- `skip_compaction` (boolean) - If true skip compacting the hard disk for + virtual machine when exporting. This defaults to false. -- `switch_name` (string) - The name of the switch to connect the virtual machine to. Be defaulting - this to an empty string, Packer will try to determine the switch to use by looking for - external switch that is up and running. +- `switch_name` (string) - The name of the switch to connect the virtual + machine to. Be defaulting this to an empty string, Packer will try to + determine the switch to use by looking for external switch that is up and + running. -- `switch_vlan_id` (string) - This is the vlan of the virtual switch's network card. - By default none is set. If none is set then a vlan is not set on the switch's network card. - If this value is set it should match the vlan specified in by `vlan_id`. +- `switch_vlan_id` (string) - This is the vlan of the virtual switch's + network card. By default none is set. If none is set then a vlan is not + set on the switch's network card. If this value is set it should match + the vlan specified in by `vlan_id`. -- `use_fixed_vhd_format` (boolean) - If true, creates the boot disk on the virtual machine as - a fixed VHD format disk. The default is false, which creates a dynamic VHDX format disk. This - option requires setting `generation` to 1, `skip_compaction` to true, and `differencing_disk` to false. - Additionally, any value entered for `disk_block_size` will be ignored. The most likely use case for this - option is outputing a disk that is in the format required for upload to Azure. +- `use_fixed_vhd_format` (boolean) - If true, creates the boot disk on the + virtual machine as a fixed VHD format disk. The default is false, which + creates a dynamic VHDX format disk. This option requires setting + `generation` to 1, `skip_compaction` to true, and `differencing_disk` to + false. Additionally, any value entered for `disk_block_size` will be + ignored. The most likely use case for this option is outputing a disk that + is in the format required for upload to Azure. - `vhd_temp_path` (string) - A separate path to be used for storing the VM's disk image. The purpose is to enable reading and writing to take place on different physical disks (read from VHD temp path, write to regular temp path while exporting the VM) to eliminate a single-disk bottleneck. -- `vlan_id` (string) - This is the vlan of the virtual machine's network card - for the new virtual machine. By default none is set. If none is set then - vlans are not set on the virtual machine's network card. +- `vlan_id` (string) - This is the vlan of the virtual machine's network + card for the new virtual machine. By default none is set. If none is set + then vlans are not set on the virtual machine's network card. -- `mac_address` (string) - This allows a specific MAC address to be used on the - default virtual network card. The MAC address must be a string with no - delimiters, for example "0000deadbeef". +- `mac_address` (string) - This allows a specific MAC address to be used on + the default virtual network card. The MAC address must be a string with + no delimiters, for example "0000deadbeef". -- `vm_name` (string) - This is the name of the virtual machine for the new virtual - machine, without the file extension. By default this is "packer-BUILDNAME", - where "BUILDNAME" is the name of the build. +- `vm_name` (string) - This is the name of the virtual machine for the new + virtual machine, without the file extension. By default this is + "packer-BUILDNAME", where "BUILDNAME" is the name of the build. -- `temp_path` (string) - This is the temporary path in which Packer will create the virtual - machine. Default value is system `%temp%` +- `temp_path` (string) - This is the temporary path in which Packer will + create the virtual machine. Default value is system `%temp%` -- `disk_block_size` (string) - The block size of the VHD to be created. - Recommended disk block size for Linux hyper-v guests is 1 MiB. This defaults to "32 MiB". +- `disk_block_size` (string) - The block size of the VHD to be created. + Recommended disk block size for Linux hyper-v guests is 1 MiB. This + defaults to "32 MiB". ## Boot Command -The `boot_command` configuration is very important: it specifies the keys -to type when the virtual machine is first booted in order to start the -OS installer. This command is typed after `boot_wait`, which gives the -virtual machine some time to actually load the ISO. +The `boot_command` configuration is very important: it specifies the keys to +type when the virtual machine is first booted in order to start the OS +installer. This command is typed after `boot_wait`, which gives the virtual +machine some time to actually load the ISO. -As documented above, the `boot_command` is an array of strings. The -strings are all typed in sequence. It is an array only to improve readability -within the template. +As documented above, the `boot_command` is an array of strings. The strings +are all typed in sequence. It is an array only to improve readability within +the template. The boot command is "typed" character for character over the virtual keyboard to the machine, simulating a human actually typing the keyboard. <%= partial "partials/builders/boot-command" %> -Example boot command. This is actually a working boot command used to start -an Ubuntu 12.04 installer: +Example boot command. This is actually a working boot command used to start an +Ubuntu 12.04 installer: ``` json [ @@ -292,18 +311,22 @@ for the version of Hyper-V that is running. ## Generation 1 vs Generation 2 -Floppy drives are no longer supported by generation 2 machines. This requires you to -take another approach when dealing with preseed or answer files. Two possible options -are using virtual dvd drives or using the built in web server. +Floppy drives are no longer supported by generation 2 machines. This requires +you to take another approach when dealing with preseed or answer files. Two +possible options are using virtual dvd drives or using the built in web +server. -When dealing with Windows you need to enable UEFI drives for generation 2 virtual machines. +When dealing with Windows you need to enable UEFI drives for generation 2 +virtual machines. ## Creating iso from directory -Programs like mkisofs can be used to create an iso from a directory. -There is a [windows version of mkisofs](http://opensourcepack.blogspot.co.uk/p/cdrtools.html). +Programs like mkisofs can be used to create an iso from a directory. There is +a [windows version of +mkisofs](http://opensourcepack.blogspot.co.uk/p/cdrtools.html). -Example powershell script. This is an actually working powershell script used to create a Windows answer iso: +Example powershell script. This is an actually working powershell script used +to create a Windows answer iso: ``` powershell $isoFolder = "answer-iso" @@ -866,8 +889,8 @@ Finish proxy after sysprep --> ## Example For Ubuntu Vivid Generation 2 -If you are running Windows under virtualization, you may need to create -a virtual switch with an `External` connection type. +If you are running Windows under virtualization, you may need to create a +virtual switch with an `External` connection type. ### Packer config: From 647aef98550d519f917aa3e912958ec1f513856b Mon Sep 17 00:00:00 2001 From: DanHam Date: Thu, 14 Jun 2018 16:26:23 +0100 Subject: [PATCH 111/138] Hyper-V ISO: Sort options alphabetically. Minor word changes and fixes --- .../docs/builders/hyperv-iso.html.md.erb | 301 +++++++++--------- 1 file changed, 156 insertions(+), 145 deletions(-) diff --git a/website/source/docs/builders/hyperv-iso.html.md.erb b/website/source/docs/builders/hyperv-iso.html.md.erb index 297a55e41..437076663 100644 --- a/website/source/docs/builders/hyperv-iso.html.md.erb +++ b/website/source/docs/builders/hyperv-iso.html.md.erb @@ -18,9 +18,10 @@ The Hyper-V Packer builder is able to create virtual machines and export them, starting from an ISO image. The builder builds a virtual machine by creating a new virtual machine from -scratch, booting it, installing an OS, provisioning software within the OS, -then shutting it down. The result of the Hyper-V builder is a directory -containing all the files necessary to run the virtual machine portably. +scratch. Typically, the VM is booted, an OS is installed, and software is +provisioned within the OS. Finally the VM is shut down. The result of the +Hyper-V builder is a directory containing all the files necessary to run +the virtual machine portably. ## Basic Example @@ -40,9 +41,12 @@ to self-install. Still, the example serves to show the basic configuration: } ``` -It is important to add a `shutdown_command`. By default Packer halts the -virtual machine and the file system may not be sync'd. Thus, changes made in a -provisioner might not be saved. +By default Packer will perform a hard power off of a virtual machine. +However, when a machine is powered off this way, it is possible that +changes made to the VMs file system may not be fully synced, possibly +leading to corruption of files or lost changes. As such, it is important to +add a `shutdown_command`. This tells Packer how to safely shutdown and +power off the VM. ## Configuration Reference @@ -56,20 +60,22 @@ builder. ### Required: -- `iso_checksum` (string) - The checksum for the OS ISO file or virtual - harddrive file. Because these files are so large, this is required and - Packer will verify it prior to booting a virtual machine with the ISO or - virtual harddrive attached. The type of the checksum is specified with - `iso_checksum_type`, documented below. +- `iso_checksum` (string) - The checksum for the ISO file or virtual + hard drive file. The algorithm to use when computing the checksum is + specified with `iso_checksum_type`. -- `iso_checksum_type` (string) - The type of the checksum specified in - `iso_checksum`. Valid values are "none", "md5", "sha1", "sha256", or - "sha512" currently. While "none" will skip checksumming, this is not - recommended since ISO files and virtual harddrive files are generally - large and corruption does happen from time to time. +- `iso_checksum_type` (string) - The algorithm to be used when computing + the checksum of the file specified in `iso_checksum`. Currently, valid + values are "none", "md5", "sha1", "sha256", or "sha512". Since the + validity of ISO and virtual disk files are typically crucial to a + successful build, Packer performs a check of any supplied media by + default. While setting "none" will cause Packer to skip this check, + corruption of large files such as ISOs and virtual hard drives can + occur from time to time. As such, skipping this check is not + recommended. - `iso_url` (string) - A URL to the ISO containing the installation image or - virtual harddrive vhd or vhdx file to clone. This URL can be either an + virtual hard drive (VHD or VHDX) file to clone. This URL can be either an HTTP URL or a file URL (or path to a file). If this is an HTTP URL, Packer will download the file and cache it between runs. @@ -83,57 +89,56 @@ builder. the installer will start itself. - `boot_wait` (string) - The time to wait after booting the initial virtual - machine before typing the `boot_command`. The value of this should be a - duration. Examples are "5s" and "1m30s" which will cause Packer to wait - five seconds and one minute 30 seconds, respectively. If this isn't - specified, the default is 10 seconds. + machine before typing the `boot_command`. The value specified should be + a duration. For example, setting a duration of "1m30s" would cause + Packer to wait for 1 minute 30 seconds before typing the boot command. + The default duration is "10s" (10 seconds). -- `cpu` (number) - The number of cpus the virtual machine should use. If - this isn't specified, the default is 1 cpu. +- `cpu` (number) - The number of CPUs the virtual machine should use. If + this isn't specified, the default is 1 CPU. -- `disk_additional_size` (array of integers) - The size(s) of any additional - hard disks for the VM in megabytes. If this is not specified then the VM - will only contain a primary hard disk. Additional drives will be attached - to the SCSI interface only. The builder uses expandable, not fixed-size - virtual hard disks, so the actual file representing the disk will not use - the full size unless it is full. +- `differencing_disk` (boolean) - If true enables differencing disks. Only + the changes will be written to the new disk. This is especially useful if + your source is a VHD/VHDX. This defaults to `false`. + +- `disk_additional_size` (array of integers) - The size or sizes of any + additional hard disks for the VM in megabytes. If this is not specified + then the VM will only contain a primary hard disk. Additional drives + will be attached to the SCSI interface only. The builder uses + expandable rather than fixed-size virtual hard disks, so the actual + file representing the disk will not use the full size unless it is + full. + +- `disk_block_size` (string) - The block size of the VHD to be created. + Recommended disk block size for Linux hyper-v guests is 1 MiB. This + defaults to "32 MiB". - `disk_size` (number) - The size, in megabytes, of the hard disk to create for the VM. By default, this is 40 GB. -- `differencing_disk` (boolean) - If true enables differencing disks. Only - the changes will be written to the new disk. This is especially useful if - your source is a vhd/vhdx. This defaults to false. +- `enable_dynamic_memory` (boolean) - If `true` enable dynamic memory for + the virtual machine. This defaults to `false`. -- `headless` (boolean) - Packer defaults to building Hyper-V virtual - machines by launching a GUI that shows the console of the machine being - built. When this value is set to true, the machine will start without a - console. +- `enable_mac_spoofing` (boolean) - If `true` enable MAC address spoofing + for the virtual machine. This defaults to `false`. -- `skip_export` (boolean) - If true skips VM export. If you are interested - only in the vhd/vhdx files, you can enable this option. This will create - inline disks which improves the build performance. There will not be any - copying of source vhds to temp directory. This defaults to false. +- `enable_secure_boot` (boolean) - If `true` enable secure boot for the + virtual machine. This defaults to `false`. See `secure_boot_template` + below for additional settings. -- `enable_dynamic_memory` (boolean) - If true enable dynamic memory for - virtual machine. This defaults to false. +- `enable_virtualization_extensions` (boolean) - If `true` enable + virtualization extensions for the virtual machine. This defaults to + `false`. For nested virtualization you need to enable MAC spoofing, + disable dynamic memory and have at least 4GB of RAM assigned to the + virtual machine. -- `enable_mac_spoofing` (boolean) - If true enable mac spoofing for virtual - machine. This defaults to false. - -- `enable_secure_boot` (boolean) - If true enable secure boot for virtual - machine. This defaults to false. - -- `secure_boot_template` (string) - The secure boot template to be - configured. Valid values are "MicrosoftWindows" (Windows) or - "MicrosoftUEFICertificateAuthority" (Linux). This only takes effect if - enable_secure_boot is set to "true". This defaults to "MicrosoftWindows". - - -- `enable_virtualization_extensions` (boolean) - If true enable - virtualization extensions for virtual machine. This defaults to false. - For nested virtualization you need to enable mac spoofing, disable dynamic - memory and have at least 4GB of RAM for virtual machine. +- `floppy_dirs` (array of strings) - A list of directories to place onto + the floppy disk recursively. This is similar to the `floppy_files` option + except that the directory structure is preserved. This is useful for when + your floppy disk includes drivers or if you just want to organize it's + contents as a hierarchy. Wildcard characters (\*, ?, and \[\]) are + allowed. The maximum summary size of all files in the listed directories + are the same as in `floppy_files`. - `floppy_files` (array of strings) - A list of files to place onto a floppy disk that is attached when the VM is booted. This is most useful for @@ -145,41 +150,45 @@ builder. (`*`, `?`, and `[]`) are allowed. Directory names are also allowed, which will add all the files found in the directory to the floppy. -- `floppy_dirs` (array of strings) - A list of directories to place onto - the floppy disk recursively. This is similar to the `floppy_files` option - except that the directory structure is preserved. This is useful for when - your floppy disk includes drivers or if you just want to organize it's - contents as a hierarchy. Wildcard characters (\*, ?, and \[\]) are - allowed. The maximum summary size of all files in the listed directories - are the same as in `floppy_files`. - - `generation` (number) - The Hyper-V generation for the virtual machine. By default, this is 1. Generation 2 Hyper-V virtual machines do not support floppy drives. In this scenario use `secondary_iso_images` instead. Hard - drives and dvd drives will also be scsi and not ide. + drives and DVD drives will also be SCSI and not IDE. -- `guest_additions_mode` (string) - How should guest additions be installed. - If value `attach` then attach iso image with by specified by - `guest_additions_path`. Otherwise guest additions is not installed. +- `guest_additions_mode` (string) - If set to `attach` then attach and + mount the ISO image specified in `guest_additions_path`. If set to + `none` then guest additions are not attached and mounted; This is the + default. -- `guest_additions_path` (string) - The path to the iso image for guest +- `guest_additions_path` (string) - The path to the ISO image for guest additions. -- `http_directory` (string) - Path to a directory to serve using an HTTP - server. The files in this directory will be available over HTTP that will - be requestable from the virtual machine. This is useful for hosting - kickstart files and so on. By default this is "", which means no HTTP - server will be started. The address and port of the HTTP server will be - available as variables in `boot_command`. This is covered in more detail - below. +- `headless` (boolean) - Packer defaults to building Hyper-V virtual + machines by launching a GUI that shows the console of the machine being + built. When this value is set to true, the machine will start without a + console. + +- `http_directory` (string) - Path to a directory to serve using Packers + inbuilt HTTP server. The files in this directory will be available + over HTTP to the virtual machine. This is useful for hosting kickstart + files and so on. By default this value is unset and the HTTP server is + not started. The address and port of the HTTP server will be available + as variables in `boot_command`. This is covered in more detail below. - `http_port_min` and `http_port_max` (number) - These are the minimum and maximum port to use for the HTTP server started to serve the - `http_directory`. Because Packer often runs in parallel, Packer will - choose a randomly available port in this range to run the HTTP server. If - you want to force the HTTP server to be on one port, make this minimum and - maximum port the same. By default the values are 8000 and 9000, - respectively. + `http_directory`. Since Packer often runs in parallel, a randomly + available port in this range will be repeatedly chosen until an + available port is found. To force the HTTP server to use a specific + port, set an identical value for `http_port_min` and `http_port_max`. + By default the values are 8000 and 9000, respectively. + +- `iso_target_extension` (string) - The extension of the ISO file after + download. This defaults to "iso". + +- `iso_target_path` (string) - The path where the ISO should be saved after + download. By default the ISO will be saved in the Packer cache + directory with a hash of the original filename as its name. - `iso_urls` (array of strings) - Multiple URLs for the ISO to download. Packer will try these in order. If anything goes wrong attempting to @@ -188,85 +197,87 @@ builder. empty and `iso_url` is used. Only one of `iso_url` or `iso_urls` can be specified. -- `iso_target_extension` (string) - The extension of the iso file after - download. This defaults to "iso". - -- `iso_target_path` (string) - The path where the iso should be saved after - download. By default will go in the packer cache, with a hash of the - original filename as its name. +- `mac_address` (string) - This allows a specific MAC address to be used on + the default virtual network card. The MAC address must be a string with + no delimiters, for example "0000deadbeef". - `output_directory` (string) - This is the path to the directory where the resulting virtual machine will be created. This may be relative or - absolute. If relative, the path is relative to the working directory when + absolute. If relative, the path is relative to the working directory when `packer` is executed. This directory must not exist or be empty prior to - running the builder. By default this is "output-BUILDNAME" where + running the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the name of the build. -- `ram_size` (number) - The size, in megabytes, of the ram to create for the +- `ram_size` (number) - The amount, in megabytes, of RAM to assign to the VM. By default, this is 1 GB. -- `secondary_iso_images` (array of strings) - A list of iso paths to - attached to a VM when it is booted. This is most useful for unattended +- `secondary_iso_images` (array of strings) - A list of ISO paths to + attach to a VM when it is booted. This is most useful for unattended Windows installs, which look for an `Autounattend.xml` file on removable - media. By default, no secondary iso will be attached. + media. By default, no secondary ISO will be attached. + +- `secure_boot_template` (string) - The secure boot template to be + configured. Valid values are "MicrosoftWindows" (Windows) or + "MicrosoftUEFICertificateAuthority" (Linux). This only takes effect if + `enable_secure_boot` is set to "true". This defaults to "MicrosoftWindows". - `shutdown_command` (string) - The command to use to gracefully shut down - the machine once all the provisioning is done. By default this is an empty - string, which tells Packer to just forcefully shut down the machine unless - a shutdown command takes place inside script so this may safely be - omitted. If one or more scripts require a reboot it is suggested to leave - this blank since reboots may fail and specify the final shutdown command - in your last script. + the machine once all provisioning is complete. By default this is an + empty string, which tells Packer to just forcefully shut down the + machine. This setting can be safely omitted if for example, a shutdown + command to gracefully halt the machine is configured inside a + provisioning script. If one or more scripts require a reboot it is + suggested to leave this blank (since reboots may fail) and instead + specify the final shutdown command in your last script. - `shutdown_timeout` (string) - The amount of time to wait after executing - the `shutdown_command` for the virtual machine to actually shut down. If - it doesn't shut down in this time, it is an error. By default, the timeout - is "5m", or five minutes. + the `shutdown_command` for the virtual machine to actually shut down. + If the machine doesn't shut down in this time it is considered an + error. By default, the time out is "5m" (five minutes). -- `skip_compaction` (boolean) - If true skip compacting the hard disk for - virtual machine when exporting. This defaults to false. +- `skip_compaction` (boolean) - If `true` skip compacting the hard disk for + the virtual machine when exporting. This defaults to `false`. + +- `skip_export` (boolean) - If `true` Packer will skip the export of the + VM. If you are interested only in the VHD/VHDX files, you can enable + this option. This will create inline disks which improves the build + performance. There will not be any copying of source VHDs to the temp + directory. This defaults to `false`. - `switch_name` (string) - The name of the switch to connect the virtual - machine to. Be defaulting this to an empty string, Packer will try to - determine the switch to use by looking for external switch that is up and - running. + machine to. By default, leaving this value unset will cause Packer to + try and determine the switch to use by looking for an external switch + that is up and running. -- `switch_vlan_id` (string) - This is the vlan of the virtual switch's - network card. By default none is set. If none is set then a vlan is not - set on the switch's network card. If this value is set it should match - the vlan specified in by `vlan_id`. +- `switch_vlan_id` (string) - This is the VLAN of the virtual switch's + network card. By default none is set. If none is set then a VLAN is not + set on the switch's network card. If this value is set it should match + the VLAN specified in by `vlan_id`. + +- `temp_path` (string) - This is the temporary path in which Packer will + create the virtual machine. By default the value is the system `%temp%`. - `use_fixed_vhd_format` (boolean) - If true, creates the boot disk on the - virtual machine as a fixed VHD format disk. The default is false, which + virtual machine as a fixed VHD format disk. The default is `false`, which creates a dynamic VHDX format disk. This option requires setting - `generation` to 1, `skip_compaction` to true, and `differencing_disk` to - false. Additionally, any value entered for `disk_block_size` will be - ignored. The most likely use case for this option is outputing a disk that - is in the format required for upload to Azure. + `generation` to `1`, `skip_compaction` to `true`, and + `differencing_disk` to `false`. Additionally, any value entered for + `disk_block_size` will be ignored. The most likely use case for this + option is outputing a disk that is in the format required for upload to + Azure. - `vhd_temp_path` (string) - A separate path to be used for storing the VM's disk image. The purpose is to enable reading and writing to take place on different physical disks (read from VHD temp path, write to regular temp path while exporting the VM) to eliminate a single-disk bottleneck. -- `vlan_id` (string) - This is the vlan of the virtual machine's network +- `vlan_id` (string) - This is the VLAN of the virtual machine's network card for the new virtual machine. By default none is set. If none is set - then vlans are not set on the virtual machine's network card. + then VLANs are not set on the virtual machine's network card. -- `mac_address` (string) - This allows a specific MAC address to be used on - the default virtual network card. The MAC address must be a string with - no delimiters, for example "0000deadbeef". - -- `vm_name` (string) - This is the name of the virtual machine for the new - virtual machine, without the file extension. By default this is - "packer-BUILDNAME", where "BUILDNAME" is the name of the build. - -- `temp_path` (string) - This is the temporary path in which Packer will - create the virtual machine. Default value is system `%temp%` - -- `disk_block_size` (string) - The block size of the VHD to be created. - Recommended disk block size for Linux hyper-v guests is 1 MiB. This - defaults to "32 MiB". +- `vm_name` (string) - This is the name of the new virtual machine, + without the file extension. By default this is "packer-BUILDNAME", + where "BUILDNAME" is the name of the build. ## Boot Command @@ -284,8 +295,8 @@ to the machine, simulating a human actually typing the keyboard. <%= partial "partials/builders/boot-command" %> -Example boot command. This is actually a working boot command used to start an -Ubuntu 12.04 installer: +The example shown below is a working boot command used to start an Ubuntu +12.04 installer: ``` json [ @@ -306,27 +317,27 @@ For more examples of various boot commands, see the sample projects from our ## Integration Services -Packer will automatically attach the integration services iso as a dvd drive +Packer will automatically attach the integration services ISO as a DVD drive for the version of Hyper-V that is running. ## Generation 1 vs Generation 2 Floppy drives are no longer supported by generation 2 machines. This requires you to take another approach when dealing with preseed or answer files. Two -possible options are using virtual dvd drives or using the built in web +possible options are using virtual DVD drives or using Packers built in web server. When dealing with Windows you need to enable UEFI drives for generation 2 virtual machines. -## Creating iso from directory +## Creating an ISO From a Directory -Programs like mkisofs can be used to create an iso from a directory. There is +Programs like mkisofs can be used to create an ISO from a directory. There is a [windows version of -mkisofs](http://opensourcepack.blogspot.co.uk/p/cdrtools.html). +mkisofs](http://opensourcepack.blogspot.co.uk/p/cdrtools.html) available. -Example powershell script. This is an actually working powershell script used -to create a Windows answer iso: +Below is a working PowerShell script that can be used to create a Windows +answer ISO: ``` powershell $isoFolder = "answer-iso" @@ -825,7 +836,7 @@ Finish Setup cache proxy during installation --> sysprep-unattend.xml: -``` text +``` xml @@ -939,7 +950,7 @@ virtual switch with an `External` connection type. "generation": 2, "enable_secure_boot": false } -] + ] } ``` From 9f528d6eb7ebd7aa545ff1a73574bf16ab9507fa Mon Sep 17 00:00:00 2001 From: DanHam Date: Thu, 14 Jun 2018 16:36:24 +0100 Subject: [PATCH 112/138] Hyper-V VMCX: Fix long lines --- .../docs/builders/hyperv-vmcx.html.md.erb | 181 +++++++++--------- 1 file changed, 95 insertions(+), 86 deletions(-) diff --git a/website/source/docs/builders/hyperv-vmcx.html.md.erb b/website/source/docs/builders/hyperv-vmcx.html.md.erb index 2bb00b0b8..2230d41b1 100644 --- a/website/source/docs/builders/hyperv-vmcx.html.md.erb +++ b/website/source/docs/builders/hyperv-vmcx.html.md.erb @@ -12,20 +12,21 @@ page_title: "Hyper-V Builder (from an vmcx)" Type: `hyperv-vmcx` -The Hyper-V Packer builder is able to use exported virtual machines or clone existing +The Hyper-V Packer builder is able to use exported virtual machines or clone +existing [Hyper-V](https://www.microsoft.com/en-us/server-cloud/solutions/virtualization.aspx) virtual machines. -The builder imports a virtual machine or clones an existing virtual machine boots it, -and provisioning software within the OS, then shutting it down. The result of the -Hyper-V builder is a directory containing all the files necessary to run the virtual -machine portably. +The builder imports a virtual machine or clones an existing virtual machine +boots it, and provisioning software within the OS, then shutting it down. The +result of the Hyper-V builder is a directory containing all the files +necessary to run the virtual machine portably. ## Basic Example -Here is a basic example. This example is not functional. It will start the -OS installer but then fail because we don't provide the preseed file for -Ubuntu to self-install. Still, the example serves to show the basic configuration: +Here is a basic example. This example is not functional. It will start the OS +installer but then fail because we don't provide the preseed file for Ubuntu +to self-install. Still, the example serves to show the basic configuration: Import from folder: @@ -57,13 +58,13 @@ provisioner might not be saved. ## Configuration Reference -There are many configuration options available for the Hyper-V builder. -They are organized below into two categories: required and optional. Within -each category, the available options are alphabetized and described. +There are many configuration options available for the Hyper-V builder. They +are organized below into two categories: required and optional. Within each +category, the available options are alphabetized and described. In addition to the options listed here, a -[communicator](/docs/templates/communicator.html) -can be configured for this builder. +[communicator](/docs/templates/communicator.html) can be configured for this +builder. ### Required for virtual machine import: @@ -83,11 +84,11 @@ can be configured for this builder. machine is cloned. - `boot_command` (array of strings) - This is an array of commands to type - when the virtual machine is first booted. The goal of these commands should - be to type just enough to initialize the operating system installer. - Special keys can be typed as well, and are covered in the section below on - the boot command. If this is not specified, it is assumed the installer - will start itself. + when the virtual machine is first booted. The goal of these commands + should be to type just enough to initialize the operating system + installer. Special keys can be typed as well, and are covered in the + section below on the boot command. If this is not specified, it is assumed + the installer will start itself. - `boot_wait` (string) - The time to wait after booting the initial virtual machine before typing the `boot_command`. The value of this should be @@ -98,39 +99,42 @@ can be configured for this builder. - `cpu` (number) - The number of cpus the virtual machine should use. If this isn't specified, the default is 1 cpu. -- `enable_dynamic_memory` (boolean) - If true enable dynamic memory for virtual - machine. This defaults to false. +- `enable_dynamic_memory` (boolean) - If true enable dynamic memory for + virtual machine. This defaults to false. - `enable_mac_spoofing` (boolean) - If true enable mac spoofing for virtual machine. This defaults to false. -- `enable_secure_boot` (boolean) - If true enable secure boot for virtual machine. This defaults to false. +- `enable_secure_boot` (boolean) - If true enable secure boot for virtual + machine. This defaults to false. -- `secure_boot_template` (string) - The secure boot template to be configured. Valid values are "MicrosoftWindows" (Windows) or - "MicrosoftUEFICertificateAuthority" (Linux). This only takes effect if enable_secure_boot is set to "true". This defaults to "MicrosoftWindows". +- `secure_boot_template` (string) - The secure boot template to be + configured. Valid values are "MicrosoftWindows" (Windows) or + "MicrosoftUEFICertificateAuthority" (Linux). This only takes effect if + enable_secure_boot is set to "true". This defaults to "MicrosoftWindows". -- `enable_virtualization_extensions` (boolean) - If true enable virtualization - extensions for virtual machine. This defaults to false. For nested - virtualization you need to enable mac spoofing, disable dynamic memory and - have at least 4GB of RAM for virtual machine. +- `enable_virtualization_extensions` (boolean) - If true enable + virtualization extensions for virtual machine. This defaults to false. For + nested virtualization you need to enable mac spoofing, disable dynamic + memory and have at least 4GB of RAM for virtual machine. - `floppy_files` (array of strings) - A list of files to place onto a floppy disk that is attached when the VM is booted. This is most useful for unattended Windows installs, which look for an `Autounattend.xml` file on removable media. By default, no floppy will be attached. All files listed in this setting get placed into the root directory of the floppy and the - floppy is attached as the first floppy device. Currently, no support exists - for creating sub-directories on the floppy. Wildcard characters (*, ?, and - []) are allowed. Directory names are also allowed, which will add all the - files found in the directory to the floppy. + floppy is attached as the first floppy device. Currently, no support + exists for creating sub-directories on the floppy. Wildcard characters (*, + ?, and []) are allowed. Directory names are also allowed, which will add + all the files found in the directory to the floppy. - `floppy_dirs` (array of strings) - A list of directories to place onto the floppy disk recursively. This is similar to the `floppy_files` option except that the directory structure is preserved. This is useful for when your floppy disk includes drivers or if you just want to organize it's - contents as a hierarchy. Wildcard characters (\*, ?, and \[\]) are allowed. - The maximum summary size of all files in the listed directories are the - same as in `floppy_files`. + contents as a hierarchy. Wildcard characters (\*, ?, and \[\]) are + allowed. The maximum summary size of all files in the listed directories + are the same as in `floppy_files`. - `guest_additions_mode` (string) - How should guest additions be installed. If value `attach` then attach iso image with by specified by @@ -140,9 +144,9 @@ can be configured for this builder. additions. - `headless` (boolean) - Packer defaults to building Hyper-V virtual - machines by launching a GUI that shows the console of the machine - being built. When this value is set to true, the machine will start without - a console. + machines by launching a GUI that shows the console of the machine being + built. When this value is set to true, the machine will start without a + console. - `http_directory` (string) - Path to a directory to serve using an HTTP server. The files in this directory will be available over HTTP that will @@ -154,10 +158,11 @@ can be configured for this builder. - `http_port_min` and `http_port_max` (number) - These are the minimum and maximum port to use for the HTTP server started to serve the - `http_directory`. Because Packer often runs in parallel, Packer will choose - a randomly available port in this range to run the HTTP server. If you want - to force the HTTP server to be on one port, make this minimum and maximum - port the same. By default the values are 8000 and 9000, respectively. + `http_directory`. Because Packer often runs in parallel, Packer will + choose a randomly available port in this range to run the HTTP server. If + you want to force the HTTP server to be on one port, make this minimum and + maximum port the same. By default the values are 8000 and 9000, + respectively. - `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO files are so large, this is required and Packer will verify it prior to @@ -171,16 +176,16 @@ can be configured for this builder. from time to time. - `iso_url` (string) - A URL to the ISO or VHD containing the installation - image. This URL can be either an HTTP URL or a file URL (or path to - a file). If this is an HTTP URL, Packer will download iso and cache it + image. This URL can be either an HTTP URL or a file URL (or path to a + file). If this is an HTTP URL, Packer will download iso and cache it between runs. - `iso_urls` (array of strings) - Multiple URLs for the ISO or VHD to - download. Packer will try these in order. If anything goes wrong attempting - to download or while downloading a single URL, it will move on to the next. - All URLs must point to the same file (same checksum). By default this is - empty and `iso_url` is used. Only one of `iso_url` or `iso_urls` can be - specified. + download. Packer will try these in order. If anything goes wrong + attempting to download or while downloading a single URL, it will move on + to the next. All URLs must point to the same file (same checksum). By + default this is empty and `iso_url` is used. Only one of `iso_url` or + `iso_urls` can be specified. - `iso_target_extension` (string) - The extension of the iso file after download. This defaults to "iso". @@ -199,23 +204,23 @@ can be configured for this builder. - `ram_size` (number) - The size, in megabytes, of the ram to create for the VM. By default, this is 1 GB. -* `secondary_iso_images` (array of strings) - A list of iso paths to attached - to a VM when it is booted. This is most useful for unattended Windows - installs, which look for an `Autounattend.xml` file on removable media. By - default, no secondary iso will be attached. +* `secondary_iso_images` (array of strings) - A list of iso paths to + attached to a VM when it is booted. This is most useful for unattended + Windows installs, which look for an `Autounattend.xml` file on removable + media. By default, no secondary iso will be attached. - `shutdown_command` (string) - The command to use to gracefully shut down the machine once all the provisioning is done. By default this is an empty string, which tells Packer to just forcefully shut down the machine unless - a shutdown command takes place inside script so this may safely be omitted. - If one or more scripts require a reboot it is suggested to leave this blank - since reboots may fail and specify the final shutdown command in your last - script. + a shutdown command takes place inside script so this may safely be + omitted. If one or more scripts require a reboot it is suggested to leave + this blank since reboots may fail and specify the final shutdown command + in your last script. - `shutdown_timeout` (string) - The amount of time to wait after executing - the `shutdown_command` for the virtual machine to actually shut down. If it - doesn't shut down in this time, it is an error. By default, the timeout is - "5m", or five minutes. + the `shutdown_command` for the virtual machine to actually shut down. If + it doesn't shut down in this time, it is an error. By default, the timeout + is "5m", or five minutes. - `skip_compaction` (boolean) - If true skip compacting the hard disk for virtual machine when exporting. This defaults to false. @@ -226,17 +231,17 @@ can be configured for this builder. running. - `switch_vlan_id` (string) - This is the vlan of the virtual switch's - network card. By default none is set. If none is set then a vlan is not set - on the switch's network card. If this value is set it should match the vlan - specified in by `vlan_id`. + network card. By default none is set. If none is set then a vlan is not + set on the switch's network card. If this value is set it should match the + vlan specified in by `vlan_id`. -- `vlan_id` (string) - This is the vlan of the virtual machine's network card - for the new virtual machine. By default none is set. If none is set then - vlans are not set on the virtual machine's network card. +- `vlan_id` (string) - This is the vlan of the virtual machine's network + card for the new virtual machine. By default none is set. If none is set + then vlans are not set on the virtual machine's network card. -- `mac_address` (string) - This allows a specific MAC address to be used on the - default virtual network card. The MAC address must be a string with no - delimiters, for example "0000deadbeef". +- `mac_address` (string) - This allows a specific MAC address to be used on + the default virtual network card. The MAC address must be a string with + no delimiters, for example "0000deadbeef". - `vm_name` (string) - This is the name of the virtual machine for the new virtual machine, without the file extension. By default this is @@ -244,22 +249,22 @@ can be configured for this builder. ## Boot Command -The `boot_command` configuration is very important: it specifies the keys -to type when the virtual machine is first booted in order to start the -OS installer. This command is typed after `boot_wait`, which gives the -virtual machine some time to actually load the ISO. +The `boot_command` configuration is very important: it specifies the keys to +type when the virtual machine is first booted in order to start the OS +installer. This command is typed after `boot_wait`, which gives the virtual +machine some time to actually load the ISO. -As documented above, the `boot_command` is an array of strings. The -strings are all typed in sequence. It is an array only to improve readability -within the template. +As documented above, the `boot_command` is an array of strings. The strings +are all typed in sequence. It is an array only to improve readability within +the template. The boot command is "typed" character for character over the virtual keyboard to the machine, simulating a human actually typing the keyboard. <%= partial "partials/builders/boot-command" %> -Example boot command. This is actually a working boot command used to start -an Ubuntu 12.04 installer: +Example boot command. This is actually a working boot command used to start an +Ubuntu 12.04 installer: ```text [ @@ -282,18 +287,22 @@ for the version of Hyper-V that is running. ## Generation 1 vs Generation 2 -Floppy drives are no longer supported by generation 2 machines. This requires you to -take another approach when dealing with preseed or answer files. Two possible options -are using virtual dvd drives or using the built in web server. +Floppy drives are no longer supported by generation 2 machines. This requires +you to take another approach when dealing with preseed or answer files. Two +possible options are using virtual dvd drives or using the built in web +server. -When dealing with Windows you need to enable UEFI drives for generation 2 virtual machines. +When dealing with Windows you need to enable UEFI drives for generation 2 +virtual machines. ## Creating iso from directory -Programs like mkisofs can be used to create an iso from a directory. -There is a [windows version of mkisofs](http://opensourcepack.blogspot.co.uk/p/cdrtools.html). +Programs like mkisofs can be used to create an iso from a directory. There is +a [windows version of +mkisofs](http://opensourcepack.blogspot.co.uk/p/cdrtools.html). -Example powershell script. This is an actually working powershell script used to create a Windows answer iso: +Example powershell script. This is an actually working powershell script used +to create a Windows answer iso: ```text $isoFolder = "answer-iso" @@ -855,8 +864,8 @@ Finish proxy after sysprep --> ## Example For Ubuntu Vivid Generation 2 -If you are running Windows under virtualization, you may need to create -a virtual switch with an `External` connection type. +If you are running Windows under virtualization, you may need to create a +virtual switch with an `External` connection type. ### Packer config: From 866ee26771614979cbebbb6d2ddf56e3b2334705 Mon Sep 17 00:00:00 2001 From: DanHam Date: Thu, 14 Jun 2018 16:50:38 +0100 Subject: [PATCH 113/138] Hyper-V VMCX: Fix missing option - skip_export. Sort options alphabetically. --- .../docs/builders/hyperv-vmcx.html.md.erb | 73 ++++++++++--------- 1 file changed, 39 insertions(+), 34 deletions(-) diff --git a/website/source/docs/builders/hyperv-vmcx.html.md.erb b/website/source/docs/builders/hyperv-vmcx.html.md.erb index 2230d41b1..beef04a9f 100644 --- a/website/source/docs/builders/hyperv-vmcx.html.md.erb +++ b/website/source/docs/builders/hyperv-vmcx.html.md.erb @@ -78,11 +78,6 @@ builder. ### Optional: -- `clone_from_snapshot_name` (string) - The name of the snapshot - -- `clone_all_snapshots` (boolean) - Should all snapshots be cloned when the - machine is cloned. - - `boot_command` (array of strings) - This is an array of commands to type when the virtual machine is first booted. The goal of these commands should be to type just enough to initialize the operating system @@ -96,6 +91,10 @@ builder. five seconds and one minute 30 seconds, respectively. If this isn't specified, the default is 10 seconds. +- `clone_all_snapshots` (boolean) - Should all snapshots be cloned when the + machine is cloned. + +- `clone_from_snapshot_name` (string) - The name of the snapshot - `cpu` (number) - The number of cpus the virtual machine should use. If this isn't specified, the default is 1 cpu. @@ -108,16 +107,19 @@ builder. - `enable_secure_boot` (boolean) - If true enable secure boot for virtual machine. This defaults to false. -- `secure_boot_template` (string) - The secure boot template to be - configured. Valid values are "MicrosoftWindows" (Windows) or - "MicrosoftUEFICertificateAuthority" (Linux). This only takes effect if - enable_secure_boot is set to "true". This defaults to "MicrosoftWindows". - - `enable_virtualization_extensions` (boolean) - If true enable virtualization extensions for virtual machine. This defaults to false. For nested virtualization you need to enable mac spoofing, disable dynamic memory and have at least 4GB of RAM for virtual machine. +- `floppy_dirs` (array of strings) - A list of directories to place onto the + floppy disk recursively. This is similar to the `floppy_files` option + except that the directory structure is preserved. This is useful for when + your floppy disk includes drivers or if you just want to organize it's + contents as a hierarchy. Wildcard characters (\*, ?, and \[\]) are + allowed. The maximum summary size of all files in the listed directories + are the same as in `floppy_files`. + - `floppy_files` (array of strings) - A list of files to place onto a floppy disk that is attached when the VM is booted. This is most useful for unattended Windows installs, which look for an `Autounattend.xml` file on @@ -128,14 +130,6 @@ builder. ?, and []) are allowed. Directory names are also allowed, which will add all the files found in the directory to the floppy. -- `floppy_dirs` (array of strings) - A list of directories to place onto the - floppy disk recursively. This is similar to the `floppy_files` option - except that the directory structure is preserved. This is useful for when - your floppy disk includes drivers or if you just want to organize it's - contents as a hierarchy. Wildcard characters (\*, ?, and \[\]) are - allowed. The maximum summary size of all files in the listed directories - are the same as in `floppy_files`. - - `guest_additions_mode` (string) - How should guest additions be installed. If value `attach` then attach iso image with by specified by `guest_additions_path`. Otherwise guest additions is not installed. @@ -164,17 +158,24 @@ builder. maximum port the same. By default the values are 8000 and 9000, respectively. -- `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO - files are so large, this is required and Packer will verify it prior to - booting a virtual machine with the ISO attached. The type of the checksum - is specified with `iso_checksum_type`, documented below. - - `iso_checksum_type` (string) - The type of the checksum specified in `iso_checksum`. Valid values are "none", "md5", "sha1", "sha256", or "sha512" currently. While "none" will skip checksumming, this is not recommended since ISO files are generally large and corruption does happen from time to time. +- `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO + files are so large, this is required and Packer will verify it prior to + booting a virtual machine with the ISO attached. The type of the checksum + is specified with `iso_checksum_type`, documented below. + +- `iso_target_extension` (string) - The extension of the iso file after + download. This defaults to "iso". + +- `iso_target_path` (string) - The path where the iso should be saved after + download. By default will go in the packer cache, with a hash of the + original filename as its name. + - `iso_url` (string) - A URL to the ISO or VHD containing the installation image. This URL can be either an HTTP URL or a file URL (or path to a file). If this is an HTTP URL, Packer will download iso and cache it @@ -187,12 +188,9 @@ builder. default this is empty and `iso_url` is used. Only one of `iso_url` or `iso_urls` can be specified. -- `iso_target_extension` (string) - The extension of the iso file after - download. This defaults to "iso". - -- `iso_target_path` (string) - The path where the iso should be saved after - download. By default will go in the packer cache, with a hash of the - original filename as its name. +- `mac_address` (string) - This allows a specific MAC address to be used on + the default virtual network card. The MAC address must be a string with + no delimiters, for example "0000deadbeef". - `output_directory` (string) - This is the path to the directory where the resulting virtual machine will be created. This may be relative or @@ -204,11 +202,16 @@ builder. - `ram_size` (number) - The size, in megabytes, of the ram to create for the VM. By default, this is 1 GB. -* `secondary_iso_images` (array of strings) - A list of iso paths to +- `secondary_iso_images` (array of strings) - A list of iso paths to attached to a VM when it is booted. This is most useful for unattended Windows installs, which look for an `Autounattend.xml` file on removable media. By default, no secondary iso will be attached. +- `secure_boot_template` (string) - The secure boot template to be + configured. Valid values are "MicrosoftWindows" (Windows) or + "MicrosoftUEFICertificateAuthority" (Linux). This only takes effect if + enable_secure_boot is set to "true". This defaults to "MicrosoftWindows". + - `shutdown_command` (string) - The command to use to gracefully shut down the machine once all the provisioning is done. By default this is an empty string, which tells Packer to just forcefully shut down the machine unless @@ -225,6 +228,12 @@ builder. - `skip_compaction` (boolean) - If true skip compacting the hard disk for virtual machine when exporting. This defaults to false. +- `skip_export` (boolean) - If `true` Packer will skip the export of the + VM. If you are interested only in the VHD/VHDX files, you can enable + this option. This will create inline disks which improves the build + performance. There will not be any copying of source VHDs to the temp + directory. This defaults to false. + - `switch_name` (string) - The name of the switch to connect the virtual machine to. Be defaulting this to an empty string, Packer will try to determine the switch to use by looking for external switch that is up and @@ -239,10 +248,6 @@ builder. card for the new virtual machine. By default none is set. If none is set then vlans are not set on the virtual machine's network card. -- `mac_address` (string) - This allows a specific MAC address to be used on - the default virtual network card. The MAC address must be a string with - no delimiters, for example "0000deadbeef". - - `vm_name` (string) - This is the name of the virtual machine for the new virtual machine, without the file extension. By default this is "packer-BUILDNAME", where "BUILDNAME" is the name of the build. From 644f509f1d09d115aa355e70ade6c506ffc24404 Mon Sep 17 00:00:00 2001 From: DanHam Date: Thu, 14 Jun 2018 18:32:33 +0100 Subject: [PATCH 114/138] Hyper-V VMCX: Minor word changes and fixes --- .../docs/builders/hyperv-vmcx.html.md.erb | 386 +++++++++--------- 1 file changed, 201 insertions(+), 185 deletions(-) diff --git a/website/source/docs/builders/hyperv-vmcx.html.md.erb b/website/source/docs/builders/hyperv-vmcx.html.md.erb index beef04a9f..e8ffe9071 100644 --- a/website/source/docs/builders/hyperv-vmcx.html.md.erb +++ b/website/source/docs/builders/hyperv-vmcx.html.md.erb @@ -5,7 +5,7 @@ description: |- The Hyper-V Packer builder is able to clone an existing Hyper-V virtual machine and export them. layout: "docs" sidebar_current: 'docs-builders-hyperv-vmcx' -page_title: "Hyper-V Builder (from an vmcx)" +page_title: "Hyper-V Builder (from a vmcx)" --- # Hyper-V Builder (from a vmcx) @@ -17,8 +17,8 @@ existing [Hyper-V](https://www.microsoft.com/en-us/server-cloud/solutions/virtualization.aspx) virtual machines. -The builder imports a virtual machine or clones an existing virtual machine -boots it, and provisioning software within the OS, then shutting it down. The +Typically, the builder imports or clones an existing virtual machine, +boots it, provisions software within the OS, and then shuts it down. The result of the Hyper-V builder is a directory containing all the files necessary to run the virtual machine portably. @@ -30,7 +30,7 @@ to self-install. Still, the example serves to show the basic configuration: Import from folder: -```json +``` json { "type": "hyperv-vmcx", "clone_from_vmxc_path": "c:\\virtual machines\\ubuntu-12.04.5-server-amd64", @@ -42,7 +42,7 @@ Import from folder: Clone from existing virtual machine: -```json +``` json { "clone_from_vm_name": "ubuntu-12.04.5-server-amd64", "shutdown_command": "echo 'packer' | sudo -S shutdown -P now", @@ -52,13 +52,16 @@ Clone from existing virtual machine: } ``` -It is important to add a `shutdown_command`. By default Packer halts the -virtual machine and the file system may not be sync'd. Thus, changes made in a -provisioner might not be saved. +By default Packer will perform a hard power off of a virtual machine. +However, when a machine is powered off this way, it is possible that +changes made to the VMs file system may not be fully synced, possibly +leading to corruption of files or lost changes. As such, it is important to +add a `shutdown_command`. This tells Packer how to safely shutdown and +power off the VM. ## Configuration Reference -There are many configuration options available for the Hyper-V builder. They +There are many configuration options available for the Hyper-V builder. They are organized below into two categories: required and optional. Within each category, the available options are alphabetized and described. @@ -81,43 +84,47 @@ builder. - `boot_command` (array of strings) - This is an array of commands to type when the virtual machine is first booted. The goal of these commands should be to type just enough to initialize the operating system - installer. Special keys can be typed as well, and are covered in the + installer. Special keys can be typed as well, and are covered in the section below on the boot command. If this is not specified, it is assumed the installer will start itself. - `boot_wait` (string) - The time to wait after booting the initial virtual - machine before typing the `boot_command`. The value of this should be - a duration. Examples are "5s" and "1m30s" which will cause Packer to wait - five seconds and one minute 30 seconds, respectively. If this isn't - specified, the default is 10 seconds. + machine before typing the `boot_command`. The value specified should be + a duration. For example, setting a duration of "1m30s" would cause + Packer to wait for 1 minute 30 seconds before typing the boot command. + The default duration is "10s" (10 seconds). -- `clone_all_snapshots` (boolean) - Should all snapshots be cloned when the - machine is cloned. +- `clone_all_snapshots` (boolean) - If set to `true` all snapshots will be + cloned when the machine is cloned. -- `clone_from_snapshot_name` (string) - The name of the snapshot -- `cpu` (number) - The number of cpus the virtual machine should use. If - this isn't specified, the default is 1 cpu. +- `clone_from_snapshot_name` (string) - The name of the snapshot to clone + from. -- `enable_dynamic_memory` (boolean) - If true enable dynamic memory for - virtual machine. This defaults to false. +- `cpu` (number) - The number of CPUs the virtual machine should use. If + this isn't specified, the default is 1 CPU. -- `enable_mac_spoofing` (boolean) - If true enable mac spoofing for virtual - machine. This defaults to false. +- `enable_dynamic_memory` (boolean) - If `true` enable dynamic memory for + the virtual machine. This defaults to `false`. -- `enable_secure_boot` (boolean) - If true enable secure boot for virtual - machine. This defaults to false. +- `enable_mac_spoofing` (boolean) - If `true` enable MAC address spoofing + for the virtual machine. This defaults to `false`. -- `enable_virtualization_extensions` (boolean) - If true enable - virtualization extensions for virtual machine. This defaults to false. For - nested virtualization you need to enable mac spoofing, disable dynamic - memory and have at least 4GB of RAM for virtual machine. +- `enable_secure_boot` (boolean) - If `true` enable secure boot for the + virtual machine. This defaults to `false`. See `secure_boot_template` + below for additional settings. -- `floppy_dirs` (array of strings) - A list of directories to place onto the - floppy disk recursively. This is similar to the `floppy_files` option +- `enable_virtualization_extensions` (boolean) - If `true` enable + virtualization extensions for the virtual machine. This defaults to + `false`. For nested virtualization you need to enable MAC spoofing, + disable dynamic memory and have at least 4GB of RAM assigned to the + virtual machine. + +- `floppy_dirs` (array of strings) - A list of directories to place onto + the floppy disk recursively. This is similar to the `floppy_files` option except that the directory structure is preserved. This is useful for when your floppy disk includes drivers or if you just want to organize it's contents as a hierarchy. Wildcard characters (\*, ?, and \[\]) are - allowed. The maximum summary size of all files in the listed directories + allowed. The maximum summary size of all files in the listed directories are the same as in `floppy_files`. - `floppy_files` (array of strings) - A list of files to place onto a floppy @@ -126,15 +133,16 @@ builder. removable media. By default, no floppy will be attached. All files listed in this setting get placed into the root directory of the floppy and the floppy is attached as the first floppy device. Currently, no support - exists for creating sub-directories on the floppy. Wildcard characters (*, - ?, and []) are allowed. Directory names are also allowed, which will add - all the files found in the directory to the floppy. + exists for creating sub-directories on the floppy. Wildcard characters + (`*`, `?`, and `[]`) are allowed. Directory names are also allowed, which + will add all the files found in the directory to the floppy. -- `guest_additions_mode` (string) - How should guest additions be installed. - If value `attach` then attach iso image with by specified by - `guest_additions_path`. Otherwise guest additions is not installed. +- `guest_additions_mode` (string) - If set to `attach` then attach and + mount the ISO image specified in `guest_additions_path`. If set to + `none` then guest additions are not attached and mounted; This is the + default. -- `guest_additions_path` (string) - The path to the iso image for guest +- `guest_additions_path` (string) - The path to the ISO image for guest additions. - `headless` (boolean) - Packer defaults to building Hyper-V virtual @@ -142,39 +150,41 @@ builder. built. When this value is set to true, the machine will start without a console. -- `http_directory` (string) - Path to a directory to serve using an HTTP - server. The files in this directory will be available over HTTP that will - be requestable from the virtual machine. This is useful for hosting - kickstart files and so on. By default this is "", which means no HTTP - server will be started. The address and port of the HTTP server will be - available as variables in `boot_command`. This is covered in more detail - below. +- `http_directory` (string) - Path to a directory to serve using Packers + inbuilt HTTP server. The files in this directory will be available + over HTTP to the virtual machine. This is useful for hosting kickstart + files and so on. By default this value is unset and the HTTP server is + not started. The address and port of the HTTP server will be available + as variables in `boot_command`. This is covered in more detail below. - `http_port_min` and `http_port_max` (number) - These are the minimum and maximum port to use for the HTTP server started to serve the - `http_directory`. Because Packer often runs in parallel, Packer will - choose a randomly available port in this range to run the HTTP server. If - you want to force the HTTP server to be on one port, make this minimum and - maximum port the same. By default the values are 8000 and 9000, - respectively. + `http_directory`. Since Packer often runs in parallel, a randomly + available port in this range will be repeatedly chosen until an + available port is found. To force the HTTP server to use a specific + port, set an identical value for `http_port_min` and `http_port_max`. + By default the values are 8000 and 9000, respectively. -- `iso_checksum_type` (string) - The type of the checksum specified in - `iso_checksum`. Valid values are "none", "md5", "sha1", "sha256", or - "sha512" currently. While "none" will skip checksumming, this is not - recommended since ISO files are generally large and corruption does happen - from time to time. +- `iso_checksum_type` (string) - The algorithm to be used when computing + the checksum of the file specified in `iso_checksum`. Currently, valid + values are "none", "md5", "sha1", "sha256", or "sha512". Since the + validity of ISO and virtual disk files are typically crucial to a + successful build, Packer performs a check of any supplied media by + default. While setting "none" will cause Packer to skip this check, + corruption of large files such as ISOs and virtual hard drives can + occur from time to time. As such, skipping this check is not + recommended. -- `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO - files are so large, this is required and Packer will verify it prior to - booting a virtual machine with the ISO attached. The type of the checksum - is specified with `iso_checksum_type`, documented below. +- `iso_checksum` (string) - The checksum for the ISO file or virtual + hard drive file. The algorithm to use when computing the checksum is + specified with `iso_checksum_type`. -- `iso_target_extension` (string) - The extension of the iso file after +- `iso_target_extension` (string) - The extension of the ISO file after download. This defaults to "iso". -- `iso_target_path` (string) - The path where the iso should be saved after - download. By default will go in the packer cache, with a hash of the - original filename as its name. +- `iso_target_path` (string) - The path where the ISO should be saved after + download. By default the ISO will be saved in the Packer cache + directory with a hash of the original filename as its name. - `iso_url` (string) - A URL to the ISO or VHD containing the installation image. This URL can be either an HTTP URL or a file URL (or path to a @@ -189,7 +199,7 @@ builder. `iso_urls` can be specified. - `mac_address` (string) - This allows a specific MAC address to be used on - the default virtual network card. The MAC address must be a string with + the default virtual network card. The MAC address must be a string with no delimiters, for example "0000deadbeef". - `output_directory` (string) - This is the path to the directory where the @@ -199,58 +209,59 @@ builder. running the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the name of the build. -- `ram_size` (number) - The size, in megabytes, of the ram to create for the +- `ram_size` (number) - The amount, in megabytes, of RAM to assign to the VM. By default, this is 1 GB. -- `secondary_iso_images` (array of strings) - A list of iso paths to - attached to a VM when it is booted. This is most useful for unattended +- `secondary_iso_images` (array of strings) - A list of ISO paths to + attach to a VM when it is booted. This is most useful for unattended Windows installs, which look for an `Autounattend.xml` file on removable - media. By default, no secondary iso will be attached. + media. By default, no secondary ISO will be attached. - `secure_boot_template` (string) - The secure boot template to be configured. Valid values are "MicrosoftWindows" (Windows) or "MicrosoftUEFICertificateAuthority" (Linux). This only takes effect if - enable_secure_boot is set to "true". This defaults to "MicrosoftWindows". + `enable_secure_boot` is set to "true". This defaults to "MicrosoftWindows". - `shutdown_command` (string) - The command to use to gracefully shut down - the machine once all the provisioning is done. By default this is an empty - string, which tells Packer to just forcefully shut down the machine unless - a shutdown command takes place inside script so this may safely be - omitted. If one or more scripts require a reboot it is suggested to leave - this blank since reboots may fail and specify the final shutdown command - in your last script. + the machine once all provisioning is complete. By default this is an + empty string, which tells Packer to just forcefully shut down the + machine. This setting can be safely omitted if for example, a shutdown + command to gracefully halt the machine is configured inside a + provisioning script. If one or more scripts require a reboot it is + suggested to leave this blank (since reboots may fail) and instead + specify the final shutdown command in your last script. - `shutdown_timeout` (string) - The amount of time to wait after executing - the `shutdown_command` for the virtual machine to actually shut down. If - it doesn't shut down in this time, it is an error. By default, the timeout - is "5m", or five minutes. + the `shutdown_command` for the virtual machine to actually shut down. + If the machine doesn't shut down in this time it is considered an + error. By default, the time out is "5m" (five minutes). -- `skip_compaction` (boolean) - If true skip compacting the hard disk for - virtual machine when exporting. This defaults to false. +- `skip_compaction` (boolean) - If `true` skip compacting the hard disk for + the virtual machine when exporting. This defaults to `false`. - `skip_export` (boolean) - If `true` Packer will skip the export of the VM. If you are interested only in the VHD/VHDX files, you can enable this option. This will create inline disks which improves the build performance. There will not be any copying of source VHDs to the temp - directory. This defaults to false. + directory. This defaults to `false`. - `switch_name` (string) - The name of the switch to connect the virtual - machine to. Be defaulting this to an empty string, Packer will try to - determine the switch to use by looking for external switch that is up and - running. + machine to. By default, leaving this value unset will cause Packer to + try and determine the switch to use by looking for an external switch + that is up and running. -- `switch_vlan_id` (string) - This is the vlan of the virtual switch's - network card. By default none is set. If none is set then a vlan is not - set on the switch's network card. If this value is set it should match the - vlan specified in by `vlan_id`. +- `switch_vlan_id` (string) - This is the VLAN of the virtual switch's + network card. By default none is set. If none is set then a VLAN is not + set on the switch's network card. If this value is set it should match + the VLAN specified in by `vlan_id`. -- `vlan_id` (string) - This is the vlan of the virtual machine's network +- `vlan_id` (string) - This is the VLAN of the virtual machine's network card for the new virtual machine. By default none is set. If none is set - then vlans are not set on the virtual machine's network card. + then VLANs are not set on the virtual machine's network card. -- `vm_name` (string) - This is the name of the virtual machine for the new - virtual machine, without the file extension. By default this is - "packer-BUILDNAME", where "BUILDNAME" is the name of the build. +- `vm_name` (string) - This is the name of the new virtual machine, + without the file extension. By default this is "packer-BUILDNAME", + where "BUILDNAME" is the name of the build. ## Boot Command @@ -268,10 +279,10 @@ to the machine, simulating a human actually typing the keyboard. <%= partial "partials/builders/boot-command" %> -Example boot command. This is actually a working boot command used to start an -Ubuntu 12.04 installer: +The example shown below is a working boot command used to start an Ubuntu +12.04 installer: -```text +``` json [ "", "/install/vmlinuz noapic ", @@ -285,31 +296,34 @@ Ubuntu 12.04 installer: ] ``` +For more examples of various boot commands, see the sample projects from our +[community templates page](/community-tools.html#templates). + ## Integration Services -Packer will automatically attach the integration services iso as a dvd drive +Packer will automatically attach the integration services ISO as a DVD drive for the version of Hyper-V that is running. ## Generation 1 vs Generation 2 Floppy drives are no longer supported by generation 2 machines. This requires you to take another approach when dealing with preseed or answer files. Two -possible options are using virtual dvd drives or using the built in web +possible options are using virtual DVD drives or using Packers built in web server. When dealing with Windows you need to enable UEFI drives for generation 2 virtual machines. -## Creating iso from directory +## Creating an ISO From a Directory -Programs like mkisofs can be used to create an iso from a directory. There is +Programs like mkisofs can be used to create an ISO from a directory. There is a [windows version of -mkisofs](http://opensourcepack.blogspot.co.uk/p/cdrtools.html). +mkisofs](http://opensourcepack.blogspot.co.uk/p/cdrtools.html) available. -Example powershell script. This is an actually working powershell script used -to create a Windows answer iso: +Below is a working PowerShell script that can be used to create a Windows +answer ISO: -```text +``` powershell $isoFolder = "answer-iso" if (test-path $isoFolder){ remove-item $isoFolder -Force -Recurse @@ -343,54 +357,56 @@ if (test-path $isoFolder){ } ``` - ## Example For Windows Server 2012 R2 Generation 2 Packer config: -```javascript +``` json { "builders": [ - { - "vm_name":"windows2012r2", - "type": "hyperv-iso", - "disk_size": 61440, - "floppy_files": [], - "secondary_iso_images": [ - "./windows/windows-2012R2-serverdatacenter-amd64/answer.iso" - ], - "http_directory": "./windows/common/http/", - "boot_wait": "0s", - "boot_command": [ - "aaa" - ], - "iso_url": "http://download.microsoft.com/download/6/2/A/62A76ABB-9990-4EFC-A4FE-C7D698DAEB96/9600.16384.WINBLUE_RTM.130821-1623_X64FRE_SERVER_EVAL_EN-US-IRM_SSS_X64FREE_EN-US_DV5.ISO", - "iso_checksum_type": "md5", - "iso_checksum": "458ff91f8abc21b75cb544744bf92e6a", - "communicator":"winrm", - "winrm_username": "vagrant", - "winrm_password": "vagrant", - "winrm_timeout" : "4h", - "shutdown_command": "f:\\run-sysprep.cmd", - "ram_size": 4096, - "cpu": 4, - "generation": 2, - "switch_name":"LAN", - "enable_secure_boot":true - }], - "provisioners": [{ - "type": "powershell", - "elevated_user":"vagrant", - "elevated_password":"vagrant", - "scripts": [ - "./windows/common/install-7zip.ps1", - "./windows/common/install-chef.ps1", - "./windows/common/compile-dotnet-assemblies.ps1", - "./windows/common/cleanup.ps1", - "./windows/common/ultradefrag.ps1", - "./windows/common/sdelete.ps1" - ] - }], + { + "vm_name":"windows2012r2", + "type": "hyperv-iso", + "disk_size": 61440, + "floppy_files": [], + "secondary_iso_images": [ + "./windows/windows-2012R2-serverdatacenter-amd64/answer.iso" + ], + "http_directory": "./windows/common/http/", + "boot_wait": "0s", + "boot_command": [ + "aaa" + ], + "iso_url": "http://download.microsoft.com/download/6/2/A/62A76ABB-9990-4EFC-A4FE-C7D698DAEB96/9600.16384.WINBLUE_RTM.130821-1623_X64FRE_SERVER_EVAL_EN-US-IRM_SSS_X64FREE_EN-US_DV5.ISO", + "iso_checksum_type": "md5", + "iso_checksum": "458ff91f8abc21b75cb544744bf92e6a", + "communicator":"winrm", + "winrm_username": "vagrant", + "winrm_password": "vagrant", + "winrm_timeout" : "4h", + "shutdown_command": "f:\\run-sysprep.cmd", + "ram_size": 4096, + "cpu": 4, + "generation": 2, + "switch_name":"LAN", + "enable_secure_boot":true + } + ], + "provisioners": [ + { + "type": "powershell", + "elevated_user":"vagrant", + "elevated_password":"vagrant", + "scripts": [ + "./windows/common/install-7zip.ps1", + "./windows/common/install-chef.ps1", + "./windows/common/compile-dotnet-assemblies.ps1", + "./windows/common/cleanup.ps1", + "./windows/common/ultradefrag.ps1", + "./windows/common/sdelete.ps1" + ] + } + ], "post-processors": [ { "type": "vagrant", @@ -403,7 +419,7 @@ Packer config: autounattend.xml: -```xml +``` xml @@ -800,12 +816,11 @@ Finish Setup cache proxy during installation --> - ``` sysprep-unattend.xml: -```text +``` xml @@ -874,7 +889,7 @@ virtual switch with an `External` connection type. ### Packer config: -```javascript +``` json { "variables": { "vm_name": "ubuntu-xenial", @@ -886,45 +901,46 @@ virtual switch with an `External` connection type. "iso_checksum": "DE5EE8665048F009577763EFBF4A6F0558833E59" }, "builders": [ - { - "vm_name":"{{user `vm_name`}}", - "type": "hyperv-iso", - "disk_size": "{{user `disk_size`}}", - "guest_additions_mode": "disable", - "iso_url": "{{user `iso_url`}}", - "iso_checksum_type": "{{user `iso_checksum_type`}}", - "iso_checksum": "{{user `iso_checksum`}}", - "communicator":"ssh", - "ssh_username": "packer", - "ssh_password": "packer", - "ssh_timeout" : "4h", - "http_directory": "./", - "boot_wait": "5s", - "boot_command": [ - "", - "set gfxpayload=1024x768", - "linux /install/vmlinuz ", - "preseed/url=http://{{.HTTPIP}}:{{.HTTPPort}}/hyperv-taliesins.cfg ", - "debian-installer=en_US auto locale=en_US kbd-chooser/method=us ", - "hostname={{.Name}} ", - "fb=false debconf/frontend=noninteractive ", - "keyboard-configuration/modelcode=SKIP keyboard-configuration/layout=USA ", - "keyboard-configuration/variant=USA console-setup/ask_detect=false ", - "initrd /install/initrd.gz", - "boot" - ], - "shutdown_command": "echo 'packer' | sudo -S -E shutdown -P now", - "ram_size": "{{user `ram_size`}}", - "cpu": "{{user `cpu`}}", - "generation": 2, - "enable_secure_boot": false - }] + { + "vm_name":"{{user `vm_name`}}", + "type": "hyperv-iso", + "disk_size": "{{user `disk_size`}}", + "guest_additions_mode": "disable", + "iso_url": "{{user `iso_url`}}", + "iso_checksum_type": "{{user `iso_checksum_type`}}", + "iso_checksum": "{{user `iso_checksum`}}", + "communicator":"ssh", + "ssh_username": "packer", + "ssh_password": "packer", + "ssh_timeout" : "4h", + "http_directory": "./", + "boot_wait": "5s", + "boot_command": [ + "", + "set gfxpayload=1024x768", + "linux /install/vmlinuz ", + "preseed/url=http://{{.HTTPIP}}:{{.HTTPPort}}/hyperv-taliesins.cfg ", + "debian-installer=en_US auto locale=en_US kbd-chooser/method=us ", + "hostname={{.Name}} ", + "fb=false debconf/frontend=noninteractive ", + "keyboard-configuration/modelcode=SKIP keyboard-configuration/layout=USA ", + "keyboard-configuration/variant=USA console-setup/ask_detect=false ", + "initrd /install/initrd.gz", + "boot" + ], + "shutdown_command": "echo 'packer' | sudo -S -E shutdown -P now", + "ram_size": "{{user `ram_size`}}", + "cpu": "{{user `cpu`}}", + "generation": 2, + "enable_secure_boot": false + } + ] } ``` ### preseed.cfg: -```text +``` text ## Options to set on the command line d-i debian-installer/locale string en_US.utf8 d-i console-setup/ask_detect boolean false From e0bcba491358347aafc6ba2d422ea3c485d19eec Mon Sep 17 00:00:00 2001 From: DanHam Date: Fri, 15 Jun 2018 00:09:50 +0100 Subject: [PATCH 115/138] Make the upload of env vars retryable in case of restarts --- provisioner/powershell/provisioner.go | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/provisioner/powershell/provisioner.go b/provisioner/powershell/provisioner.go index 6eb9c5572..8f749ef7c 100644 --- a/provisioner/powershell/provisioner.go +++ b/provisioner/powershell/provisioner.go @@ -426,12 +426,20 @@ func (p *Provisioner) createFlattenedEnvVars(elevated bool) (flattened string) { } func (p *Provisioner) uploadEnvVars(flattenedEnvVars string) (err error) { - // Upload all env vars to a powershell script on the target build file system + // Upload all env vars to a powershell script on the target build file + // system. Do this in the context of a single retryable function so + // that we gracefully handle any errors created by transient conditions + // such as a system restart envVarReader := strings.NewReader(flattenedEnvVars) log.Printf("Uploading env vars to %s", p.config.RemoteEnvVarPath) - err = p.communicator.Upload(p.config.RemoteEnvVarPath, envVarReader, nil) + err = p.retryable(func() error { + if err := p.communicator.Upload(p.config.RemoteEnvVarPath, envVarReader, nil); err != nil { + return fmt.Errorf("Error uploading ps script containing env vars: %s", err) + } + return err + }) if err != nil { - return fmt.Errorf("Error uploading ps script containing env vars: %s", err) + return err } return } From 485d565e32a55698b7ab493931d4b198c9fb5bd5 Mon Sep 17 00:00:00 2001 From: DanHam Date: Fri, 15 Jun 2018 00:49:15 +0100 Subject: [PATCH 116/138] Ensure comments are easily readable in a standard terminal --- provisioner/powershell/provisioner.go | 130 ++++++++++++++------------ 1 file changed, 68 insertions(+), 62 deletions(-) diff --git a/provisioner/powershell/provisioner.go b/provisioner/powershell/provisioner.go index 8f749ef7c..f3600a4c3 100644 --- a/provisioner/powershell/provisioner.go +++ b/provisioner/powershell/provisioner.go @@ -1,5 +1,5 @@ -// This package implements a provisioner for Packer that executes -// powershell scripts within the remote machine. +// This package implements a provisioner for Packer that executes powershell +// scripts within the remote machine. package powershell import ( @@ -39,8 +39,8 @@ type Config struct { // converted from Windows to Unix-style. Binary bool - // An inline script to execute. Multiple strings are all executed - // in the context of a single shell. + // An inline script to execute. Multiple strings are all executed in the + // context of a single shell. Inline []string // The local path of the powershell script to upload and execute. @@ -49,32 +49,33 @@ type Config struct { // An array of multiple scripts to run. Scripts []string - // An array of environment variables that will be injected before - // your command(s) are executed. + // An array of environment variables that will be injected before your + // command(s) are executed. Vars []string `mapstructure:"environment_vars"` // The remote path where the local powershell script will be uploaded to. - // This should be set to a writable file that is in a pre-existing directory. + // This should be set to a writable file that is in a pre-existing + // directory. RemotePath string `mapstructure:"remote_path"` // The remote path where the file containing the environment variables - // will be uploaded to. This should be set to a writable file that is - // in a pre-existing directory. + // will be uploaded to. This should be set to a writable file that is in a + // pre-existing directory. RemoteEnvVarPath string `mapstructure:"remote_env_var_path"` // The command used to execute the script. The '{{ .Path }}' variable - // should be used to specify where the script goes, {{ .Vars }} - // can be used to inject the environment_vars into the environment. + // should be used to specify where the script goes, {{ .Vars }} can be + // used to inject the environment_vars into the environment. ExecuteCommand string `mapstructure:"execute_command"` - // The command used to execute the elevated script. The '{{ .Path }}' variable - // should be used to specify where the script goes, {{ .Vars }} + // The command used to execute the elevated script. The '{{ .Path }}' + // variable should be used to specify where the script goes, {{ .Vars }} // can be used to inject the environment_vars into the environment. ElevatedExecuteCommand string `mapstructure:"elevated_execute_command"` - // The timeout for retrying to start the process. Until this timeout - // is reached, if the provisioner can't start a process, it retries. - // This can be set high to allow for reboots. + // The timeout for retrying to start the process. Until this timeout is + // reached, if the provisioner can't start a process, it retries. This + // can be set high to allow for reboots. StartRetryTimeout time.Duration `mapstructure:"start_retry_timeout"` // This is used in the template generation to format environment variables @@ -85,15 +86,16 @@ type Config struct { // inside the `ElevatedExecuteCommand` template. ElevatedEnvVarFormat string `mapstructure:"elevated_env_var_format"` - // Instructs the communicator to run the remote script as a - // Windows scheduled task, effectively elevating the remote - // user by impersonating a logged-in user + // Instructs the communicator to run the remote script as a Windows + // scheduled task, effectively elevating the remote user by impersonating + // a logged-in user ElevatedUser string `mapstructure:"elevated_user"` ElevatedPassword string `mapstructure:"elevated_password"` - // Valid Exit Codes - 0 is not always the only valid error code! - // See http://www.symantec.com/connect/articles/windows-system-error-codes-exit-codes-description for examples - // such as 3010 - "The requested operation is successful. Changes will not be effective until the system is rebooted." + // Valid Exit Codes - 0 is not always the only valid error code! See + // http://www.symantec.com/connect/articles/windows-system-error-codes-exit-codes-description + // for examples such as 3010 - "The requested operation is successful. + // Changes will not be effective until the system is rebooted." ValidExitCodes []int `mapstructure:"valid_exit_codes"` ctx interpolate.Context @@ -115,7 +117,8 @@ type EnvVarsTemplate struct { } func (p *Provisioner) Prepare(raws ...interface{}) error { - //Create passthrough for winrm password so we can fill it in once we know it + // Create passthrough for winrm password so we can fill it in once we know + // it p.config.ctx.Data = &EnvVarsTemplate{ WinRMPassword: `{{.WinRMPassword}}`, } @@ -232,9 +235,8 @@ func (p *Provisioner) Prepare(raws ...interface{}) error { return nil } -// Takes the inline scripts, concatenates them -// into a temporary file and returns a string containing the location -// of said file. +// Takes the inline scripts, concatenates them into a temporary file and +// returns a string containing the location of said file. func extractScript(p *Provisioner) (string, error) { temp, err := ioutil.TempFile(os.TempDir(), "packer-powershell-provisioner") if err != nil { @@ -288,11 +290,10 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { return fmt.Errorf("Error processing command: %s", err) } - // Upload the file and run the command. Do this in the context of - // a single retryable function so that we don't end up with - // the case that the upload succeeded, a restart is initiated, - // and then the command is executed but the file doesn't exist - // any longer. + // Upload the file and run the command. Do this in the context of a + // single retryable function so that we don't end up with the case + // that the upload succeeded, a restart is initiated, and then the + // command is executed but the file doesn't exist any longer. var cmd *packer.RemoteCmd err = p.retryable(func() error { if _, err := f.Seek(0, 0); err != nil { @@ -330,13 +331,13 @@ func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error { } func (p *Provisioner) Cancel() { - // Just hard quit. It isn't a big deal if what we're doing keeps - // running on the other side. + // Just hard quit. It isn't a big deal if what we're doing keeps running + // on the other side. os.Exit(0) } -// retryable will retry the given function over and over until a -// non-error is returned. +// retryable will retry the given function over and over until a non-error is +// returned. func (p *Provisioner) retryable(f func() error) error { startTimeout := time.After(p.config.StartRetryTimeout) for { @@ -349,9 +350,8 @@ func (p *Provisioner) retryable(f func() error) error { err = fmt.Errorf("Retryable error: %s", err) log.Print(err.Error()) - // Check if we timed out, otherwise we retry. It is safe to - // retry since the only error case above is if the command - // failed to START. + // Check if we timed out, otherwise we retry. It is safe to retry + // since the only error case above is if the command failed to START. select { case <-startTimeout: return err @@ -361,12 +361,15 @@ func (p *Provisioner) retryable(f func() error) error { } } -// Environment variables required within the remote environment are uploaded within a PS script and -// then enabled by 'dot sourcing' the script immediately prior to execution of the main command +// Environment variables required within the remote environment are uploaded +// within a PS script and then enabled by 'dot sourcing' the script +// immediately prior to execution of the main command func (p *Provisioner) prepareEnvVars(elevated bool) (err error) { - // Collate all required env vars into a plain string with required formatting applied + // Collate all required env vars into a plain string with required + // formatting applied flattenedEnvVars := p.createFlattenedEnvVars(elevated) - // Create a powershell script on the target build fs containing the flattened env vars + // Create a powershell script on the target build fs containing the + // flattened env vars err = p.uploadEnvVars(flattenedEnvVars) if err != nil { return err @@ -427,9 +430,9 @@ func (p *Provisioner) createFlattenedEnvVars(elevated bool) (flattened string) { func (p *Provisioner) uploadEnvVars(flattenedEnvVars string) (err error) { // Upload all env vars to a powershell script on the target build file - // system. Do this in the context of a single retryable function so - // that we gracefully handle any errors created by transient conditions - // such as a system restart + // system. Do this in the context of a single retryable function so that + // we gracefully handle any errors created by transient conditions such as + // a system restart envVarReader := strings.NewReader(flattenedEnvVars) log.Printf("Uploading env vars to %s", p.config.RemoteEnvVarPath) err = p.retryable(func() error { @@ -454,7 +457,8 @@ func (p *Provisioner) createCommandText() (command string, err error) { } func (p *Provisioner) createCommandTextNonPrivileged() (command string, err error) { - // Prepare everything needed to enable the required env vars within the remote environment + // Prepare everything needed to enable the required env vars within the + // remote environment err = p.prepareEnvVars(false) if err != nil { return "", err @@ -481,7 +485,8 @@ func getWinRMPassword(buildName string) string { } func (p *Provisioner) createCommandTextPrivileged() (command string, err error) { - // Prepare everything needed to enable the required env vars within the remote environment + // Prepare everything needed to enable the required env vars within the + // remote environment err = p.prepareEnvVars(true) if err != nil { return "", err @@ -497,8 +502,9 @@ func (p *Provisioner) createCommandTextPrivileged() (command string, err error) return "", fmt.Errorf("Error processing command: %s", err) } - // OK so we need an elevated shell runner to wrap our command, this is going to have its own path - // generate the script and update the command runner in the process + // OK so we need an elevated shell runner to wrap our command, this is + // going to have its own path generate the script and update the command + // runner in the process path, err := p.generateElevatedRunner(command) if err != nil { return "", fmt.Errorf("Error generating elevated runner: %s", err) @@ -515,23 +521,23 @@ func (p *Provisioner) generateElevatedRunner(command string) (uploadedPath strin var buffer bytes.Buffer - // Output from the elevated command cannot be returned directly to - // the Packer console. In order to be able to view output from elevated - // commands and scripts an indirect approach is used by which the - // commands output is first redirected to file. The output file is then - // 'watched' by Packer while the elevated command is running and any - // content appearing in the file is written out to the console. - // Below the portion of command required to redirect output from the - // command to file is built and appended to the existing command string + // Output from the elevated command cannot be returned directly to the + // Packer console. In order to be able to view output from elevated + // commands and scripts an indirect approach is used by which the commands + // output is first redirected to file. The output file is then 'watched' + // by Packer while the elevated command is running and any content + // appearing in the file is written out to the console. Below the portion + // of command required to redirect output from the command to file is + // built and appended to the existing command string taskName := fmt.Sprintf("packer-%s", uuid.TimeOrderedUUID()) - // Only use %ENVVAR% format for environment variables when setting - // the log file path; Do NOT use $env:ENVVAR format as it won't be - // expanded correctly in the elevatedTemplate + // Only use %ENVVAR% format for environment variables when setting the log + // file path; Do NOT use $env:ENVVAR format as it won't be expanded + // correctly in the elevatedTemplate logFile := `%SYSTEMROOT%/Temp/` + taskName + ".out" command += fmt.Sprintf(" > %s 2>&1", logFile) - // elevatedTemplate wraps the command in a single quoted XML text - // string so we need to escape characters considered 'special' in XML. + // elevatedTemplate wraps the command in a single quoted XML text string + // so we need to escape characters considered 'special' in XML. err = xml.EscapeText(&buffer, []byte(command)) if err != nil { return "", fmt.Errorf("Error escaping characters special to XML in command %s: %s", command, err) From 4e2b1756f92b2ce7a473175873ca47b3661273b2 Mon Sep 17 00:00:00 2001 From: Pavel Vasilevich Date: Fri, 15 Jun 2018 11:32:53 +0700 Subject: [PATCH 117/138] Fix misprints in steps names --- builder/alicloud/ecs/builder.go | 10 +++++----- builder/alicloud/ecs/step_attach_keypair.go | 6 +++--- builder/alicloud/ecs/step_config_eip.go | 6 +++--- builder/alicloud/ecs/step_config_key_pair.go | 6 +++--- builder/alicloud/ecs/step_region_copy_image.go | 6 +++--- builder/alicloud/ecs/step_share_image.go | 6 +++--- 6 files changed, 20 insertions(+), 20 deletions(-) diff --git a/builder/alicloud/ecs/builder.go b/builder/alicloud/ecs/builder.go index e0d7c0d7c..892910c3d 100644 --- a/builder/alicloud/ecs/builder.go +++ b/builder/alicloud/ecs/builder.go @@ -94,7 +94,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe &stepCheckAlicloudSourceImage{ SourceECSImageId: b.config.AlicloudSourceImage, }, - &StepConfigAlicloudKeyPair{ + &stepConfigAlicloudKeyPair{ Debug: b.config.PackerDebug, KeyPairName: b.config.SSHKeyPairName, PrivateKeyFile: b.config.Comm.SSHPrivateKey, @@ -136,7 +136,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe ZoneId: b.config.ZoneId, }) if b.chooseNetworkType() == VpcNet { - steps = append(steps, &setpConfigAlicloudEIP{ + steps = append(steps, &stepConfigAlicloudEIP{ AssociatePublicIpAddress: b.config.AssociatePublicIpAddress, RegionId: b.config.AlicloudRegion, InternetChargeType: b.config.InternetChargeType, @@ -147,7 +147,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe }) } steps = append(steps, - &stepAttachKeyPar{}, + &stepAttachKeyPair{}, &stepRunAlicloudInstance{}, &stepMountAlicloudDisk{}, &communicator.StepConnect{ @@ -170,12 +170,12 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe AlicloudImageName: b.config.AlicloudImageName, }, &stepCreateAlicloudImage{}, - &setpRegionCopyAlicloudImage{ + &stepRegionCopyAlicloudImage{ AlicloudImageDestinationRegions: b.config.AlicloudImageDestinationRegions, AlicloudImageDestinationNames: b.config.AlicloudImageDestinationNames, RegionId: b.config.AlicloudRegion, }, - &setpShareAlicloudImage{ + &stepShareAlicloudImage{ AlicloudImageShareAccounts: b.config.AlicloudImageShareAccounts, AlicloudImageUNShareAccounts: b.config.AlicloudImageUNShareAccounts, RegionId: b.config.AlicloudRegion, diff --git a/builder/alicloud/ecs/step_attach_keypair.go b/builder/alicloud/ecs/step_attach_keypair.go index 8fa2eedfb..9a565d144 100644 --- a/builder/alicloud/ecs/step_attach_keypair.go +++ b/builder/alicloud/ecs/step_attach_keypair.go @@ -12,10 +12,10 @@ import ( "github.com/hashicorp/packer/packer" ) -type stepAttachKeyPar struct { +type stepAttachKeyPair struct { } -func (s *stepAttachKeyPar) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { +func (s *stepAttachKeyPair) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { keyPairName := state.Get("keyPair").(string) if keyPairName == "" { return multistep.ActionContinue @@ -50,7 +50,7 @@ func (s *stepAttachKeyPar) Run(_ context.Context, state multistep.StateBag) mult return multistep.ActionContinue } -func (s *stepAttachKeyPar) Cleanup(state multistep.StateBag) { +func (s *stepAttachKeyPair) Cleanup(state multistep.StateBag) { keyPairName := state.Get("keyPair").(string) if keyPairName == "" { return diff --git a/builder/alicloud/ecs/step_config_eip.go b/builder/alicloud/ecs/step_config_eip.go index c9446bc47..397d8fd0a 100644 --- a/builder/alicloud/ecs/step_config_eip.go +++ b/builder/alicloud/ecs/step_config_eip.go @@ -10,14 +10,14 @@ import ( "github.com/hashicorp/packer/packer" ) -type setpConfigAlicloudEIP struct { +type stepConfigAlicloudEIP struct { AssociatePublicIpAddress bool RegionId string InternetChargeType string allocatedId string } -func (s *setpConfigAlicloudEIP) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { +func (s *stepConfigAlicloudEIP) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { client := state.Get("client").(*ecs.Client) ui := state.Get("ui").(packer.Ui) instance := state.Get("instance").(*ecs.InstanceAttributesType) @@ -55,7 +55,7 @@ func (s *setpConfigAlicloudEIP) Run(_ context.Context, state multistep.StateBag) return multistep.ActionContinue } -func (s *setpConfigAlicloudEIP) Cleanup(state multistep.StateBag) { +func (s *stepConfigAlicloudEIP) Cleanup(state multistep.StateBag) { if len(s.allocatedId) == 0 { return } diff --git a/builder/alicloud/ecs/step_config_key_pair.go b/builder/alicloud/ecs/step_config_key_pair.go index e49ae65fb..5a1d4c91e 100644 --- a/builder/alicloud/ecs/step_config_key_pair.go +++ b/builder/alicloud/ecs/step_config_key_pair.go @@ -13,7 +13,7 @@ import ( "github.com/hashicorp/packer/packer" ) -type StepConfigAlicloudKeyPair struct { +type stepConfigAlicloudKeyPair struct { Debug bool SSHAgentAuth bool DebugKeyPath string @@ -25,7 +25,7 @@ type StepConfigAlicloudKeyPair struct { keyName string } -func (s *StepConfigAlicloudKeyPair) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { +func (s *stepConfigAlicloudKeyPair) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { ui := state.Get("ui").(packer.Ui) if s.PrivateKeyFile != "" { @@ -108,7 +108,7 @@ func (s *StepConfigAlicloudKeyPair) Run(_ context.Context, state multistep.State return multistep.ActionContinue } -func (s *StepConfigAlicloudKeyPair) Cleanup(state multistep.StateBag) { +func (s *stepConfigAlicloudKeyPair) Cleanup(state multistep.StateBag) { // If no key name is set, then we never created it, so just return // If we used an SSH private key file, do not go about deleting // keypairs diff --git a/builder/alicloud/ecs/step_region_copy_image.go b/builder/alicloud/ecs/step_region_copy_image.go index dddf08488..c0de5eeb9 100644 --- a/builder/alicloud/ecs/step_region_copy_image.go +++ b/builder/alicloud/ecs/step_region_copy_image.go @@ -10,13 +10,13 @@ import ( "github.com/hashicorp/packer/packer" ) -type setpRegionCopyAlicloudImage struct { +type stepRegionCopyAlicloudImage struct { AlicloudImageDestinationRegions []string AlicloudImageDestinationNames []string RegionId string } -func (s *setpRegionCopyAlicloudImage) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { +func (s *stepRegionCopyAlicloudImage) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { if len(s.AlicloudImageDestinationRegions) == 0 { return multistep.ActionContinue } @@ -52,7 +52,7 @@ func (s *setpRegionCopyAlicloudImage) Run(_ context.Context, state multistep.Sta return multistep.ActionContinue } -func (s *setpRegionCopyAlicloudImage) Cleanup(state multistep.StateBag) { +func (s *stepRegionCopyAlicloudImage) Cleanup(state multistep.StateBag) { _, cancelled := state.GetOk(multistep.StateCancelled) _, halted := state.GetOk(multistep.StateHalted) if cancelled || halted { diff --git a/builder/alicloud/ecs/step_share_image.go b/builder/alicloud/ecs/step_share_image.go index f954eb421..6e9ae6ed0 100644 --- a/builder/alicloud/ecs/step_share_image.go +++ b/builder/alicloud/ecs/step_share_image.go @@ -10,13 +10,13 @@ import ( "github.com/hashicorp/packer/packer" ) -type setpShareAlicloudImage struct { +type stepShareAlicloudImage struct { AlicloudImageShareAccounts []string AlicloudImageUNShareAccounts []string RegionId string } -func (s *setpShareAlicloudImage) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { +func (s *stepShareAlicloudImage) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { client := state.Get("client").(*ecs.Client) ui := state.Get("ui").(packer.Ui) alicloudImages := state.Get("alicloudimages").(map[string]string) @@ -37,7 +37,7 @@ func (s *setpShareAlicloudImage) Run(_ context.Context, state multistep.StateBag return multistep.ActionContinue } -func (s *setpShareAlicloudImage) Cleanup(state multistep.StateBag) { +func (s *stepShareAlicloudImage) Cleanup(state multistep.StateBag) { _, cancelled := state.GetOk(multistep.StateCancelled) _, halted := state.GetOk(multistep.StateHalted) if cancelled || halted { From 58f73aabbf34f1362b70c223da02a241a0b49500 Mon Sep 17 00:00:00 2001 From: EC2 Default User Date: Thu, 14 Jun 2018 16:16:01 +0000 Subject: [PATCH 118/138] Add ap-northeast-3 region --- builder/amazon/common/regions.go | 1 + 1 file changed, 1 insertion(+) diff --git a/builder/amazon/common/regions.go b/builder/amazon/common/regions.go index 66b0c490e..ad71a027f 100644 --- a/builder/amazon/common/regions.go +++ b/builder/amazon/common/regions.go @@ -5,6 +5,7 @@ func listEC2Regions() []string { return []string{ "ap-northeast-1", "ap-northeast-2", + "ap-northeast-3", "ap-south-1", "ap-southeast-1", "ap-southeast-2", From 6c6716fe162a31726bcec46274b83df115db0b2a Mon Sep 17 00:00:00 2001 From: DanHam Date: Sun, 17 Jun 2018 03:05:30 +0100 Subject: [PATCH 119/138] Remove `go get` from Quick Start as this isn't required to use Packer --- README.md | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/README.md b/README.md index b031d542e..a3ec402a9 100644 --- a/README.md +++ b/README.md @@ -24,7 +24,7 @@ from a single source configuration. Packer is lightweight, runs on every major operating system, and is highly performant, creating machine images for multiple platforms in parallel. Packer comes out of the box with support for many platforms, the full list of which can -be found at https://www.packer.io/docs/builders/index.html. +be found at https://www.packer.io/docs/builders/index.html. Support for other platforms can be added via plugins. @@ -32,10 +32,6 @@ The images that Packer creates can easily be turned into [Vagrant](http://www.vagrantup.com) boxes. ## Quick Start -Download and install packages and dependencies -``` -go get github.com/hashicorp/packer -``` **Note:** There is a great [introduction and getting started guide](https://www.packer.io/intro) From efa90219744aad48deaeab88a39d46f8386ea707 Mon Sep 17 00:00:00 2001 From: Jordan Borean Date: Mon, 18 Jun 2018 14:45:33 +1000 Subject: [PATCH 120/138] Check if hyper-v addresses is an array before slicing it --- common/powershell/hyperv/hyperv.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/common/powershell/hyperv/hyperv.go b/common/powershell/hyperv/hyperv.go index 5b0108dbe..392984e3f 100644 --- a/common/powershell/hyperv/hyperv.go +++ b/common/powershell/hyperv/hyperv.go @@ -1009,7 +1009,11 @@ param([string]$mac, [int]$addressIndex) try { $vm = Hyper-V\Get-VM | ?{$_.NetworkAdapters.MacAddress -eq $mac} if ($vm.NetworkAdapters.IpAddresses) { - $ip = $vm.NetworkAdapters.IpAddresses[$addressIndex] + $ipAddresses = $vm.NetworkAdapters.IPAddresses + if ($ipAddresses -isnot [array]) { + $ipAddresses = @($ipAddresses) + } + $ip = $ipAddresses[$addressIndex] } else { $vm_info = Get-CimInstance -ClassName Msvm_ComputerSystem -Namespace root\virtualization\v2 -Filter "ElementName='$($vm.Name)'" $ip_details = (Get-CimAssociatedInstance -InputObject $vm_info -ResultClassName Msvm_KvpExchangeComponent).GuestIntrinsicExchangeItems | %{ [xml]$_ } | ?{ $_.SelectSingleNode("/INSTANCE/PROPERTY[@NAME='Name']/VALUE[child::text()='NetworkAddressIPv4']") } From ec8747a04242d60349e74a8b98784342ad856ae4 Mon Sep 17 00:00:00 2001 From: Alexander Georgievskiy Date: Fri, 22 Jun 2018 00:45:20 +0300 Subject: [PATCH 121/138] They finally added https on download.virtualbox.org Because downloading SHA256SUMS via http is a fun joke --- builder/virtualbox/common/step_download_guest_additions.go | 4 ++-- .../guides/packer-on-cicd/build-virtualbox-image.html.md | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/builder/virtualbox/common/step_download_guest_additions.go b/builder/virtualbox/common/step_download_guest_additions.go index 706d5f466..da413e994 100644 --- a/builder/virtualbox/common/step_download_guest_additions.go +++ b/builder/virtualbox/common/step_download_guest_additions.go @@ -94,7 +94,7 @@ func (s *StepDownloadGuestAdditions) Run(ctx context.Context, state multistep.St } else { ui.Error(err.Error()) url = fmt.Sprintf( - "http://download.virtualbox.org/virtualbox/%s/%s", + "https://download.virtualbox.org/virtualbox/%s/%s", version, additionsName) } @@ -150,7 +150,7 @@ func (s *StepDownloadGuestAdditions) downloadAdditionsSHA256(ctx context.Context // First things first, we get the list of checksums for the files available // for this version. checksumsUrl := fmt.Sprintf( - "http://download.virtualbox.org/virtualbox/%s/SHA256SUMS", + "https://download.virtualbox.org/virtualbox/%s/SHA256SUMS", additionsVersion) checksumsFile, err := ioutil.TempFile("", "packer") diff --git a/website/source/guides/packer-on-cicd/build-virtualbox-image.html.md b/website/source/guides/packer-on-cicd/build-virtualbox-image.html.md index 33582a910..479ab8614 100644 --- a/website/source/guides/packer-on-cicd/build-virtualbox-image.html.md +++ b/website/source/guides/packer-on-cicd/build-virtualbox-image.html.md @@ -68,7 +68,7 @@ apt-get install -y zip linux-headers-generic linux-headers-4.13.0-16-generic bui **Install VirtualBox** ``` -curl -OL "http://download.virtualbox.org/virtualbox/5.2.2/virtualbox-5.2_5.2.2-119230~Ubuntu~xenial_amd64.deb" +curl -OL "https://download.virtualbox.org/virtualbox/5.2.2/virtualbox-5.2_5.2.2-119230~Ubuntu~xenial_amd64.deb" dpkg -i virtualbox-5.2_5.2.2-119230~Ubuntu~xenial_amd64.deb ``` From f79381c3d59679f2adc28133bf71484146d899c3 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Fri, 22 Jun 2018 11:04:19 -0700 Subject: [PATCH 122/138] update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5c37764f7..49cf377a8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -52,6 +52,8 @@ * provisoner/shell-local: New options have been added to create feature parity with the shell-local post-processor. This feature now works on Windows hosts. [GH-5956] +* builder/virtualbox: Use HTTPS to download guest editions, now that it's + available. [GH-6406] ## 1.2.3 (April 25, 2018) From 4a7953f93a6c135f5c990c0a0deed62a18c336d0 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Fri, 22 Jun 2018 13:49:39 -0700 Subject: [PATCH 123/138] found a config validation bug where packer crashes instead of throwing a validation error if a windows-style path is provided to a provisioner on linux --- common/shell-local/config.go | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/common/shell-local/config.go b/common/shell-local/config.go index 82a41a192..ba2508120 100644 --- a/common/shell-local/config.go +++ b/common/shell-local/config.go @@ -215,6 +215,13 @@ func ConvertToLinuxPath(winAbsPath string) (string, error) { // get absolute path of script, and morph it into the bash path winAbsPath = strings.Replace(winAbsPath, "\\", "/", -1) splitPath := strings.SplitN(winAbsPath, ":/", 2) - winBashPath := fmt.Sprintf("/mnt/%s/%s", strings.ToLower(splitPath[0]), splitPath[1]) - return winBashPath, nil + if len(splitPath) == 2 { + winBashPath := fmt.Sprintf("/mnt/%s/%s", strings.ToLower(splitPath[0]), splitPath[1]) + return winBashPath, nil + } else { + err := fmt.Errorf("There was an error splitting your absolute path; expected "+ + "to find a drive following the format ':/' but did not: absolute "+ + "path: %s", winAbsPath) + return "", err + } } From 896ceee9021bc2259232dfa828ee91a65c39234f Mon Sep 17 00:00:00 2001 From: Fotios Lindiakos <30440247+thefotios-enigma@users.noreply.github.com> Date: Fri, 22 Jun 2018 18:55:50 -0400 Subject: [PATCH 124/138] Update list of required IAM permissions The `ec2:DescribeSpotPriceHistory` is required when the `spot_price` parameter is set to `auto`. --- website/source/docs/builders/amazon.html.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/website/source/docs/builders/amazon.html.md b/website/source/docs/builders/amazon.html.md index d54a28cef..3b1a177a9 100644 --- a/website/source/docs/builders/amazon.html.md +++ b/website/source/docs/builders/amazon.html.md @@ -184,7 +184,13 @@ Note that if you'd like to create a spot instance, you must also add: ec2:RequestSpotInstances, ec2:CancelSpotInstanceRequests, ec2:DescribeSpotInstanceRequests -``` +``` + +If you have the `spot_price` parameter set to `auto`, you must also add: + +``` json +ec2:DescribeSpotPriceHistory +``` ## Troubleshooting From 591bfe3dfa1527e90d6e29aa214e67a0e282d905 Mon Sep 17 00:00:00 2001 From: Bob Brumfield Date: Fri, 22 Jun 2018 18:09:27 -0700 Subject: [PATCH 125/138] Continue searching for leases even if one of the files cannot be read. --- builder/vmware/common/driver.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/builder/vmware/common/driver.go b/builder/vmware/common/driver.go index 8ceb393ff..553d307ba 100644 --- a/builder/vmware/common/driver.go +++ b/builder/vmware/common/driver.go @@ -358,7 +358,8 @@ func (d *VmwareDriver) GuestIP(state multistep.StateBag) (string, error) { // open up the lease and read its contents fh, err := os.Open(dhcpLeasesPath) if err != nil { - return "", err + log.Printf("Unable to open lease path, skipping: %s", dhcpLeasesPath) + continue } defer fh.Close() From f511c706c9af478b91ee0df8ad105a76776144c7 Mon Sep 17 00:00:00 2001 From: willmao Date: Sat, 23 Jun 2018 16:34:45 +0800 Subject: [PATCH 126/138] fix alicloud builder eip allocating issue --- builder/alicloud/ecs/builder.go | 1 + builder/alicloud/ecs/step_config_eip.go | 2 ++ 2 files changed, 3 insertions(+) diff --git a/builder/alicloud/ecs/builder.go b/builder/alicloud/ecs/builder.go index 892910c3d..99d759dc2 100644 --- a/builder/alicloud/ecs/builder.go +++ b/builder/alicloud/ecs/builder.go @@ -140,6 +140,7 @@ func (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packe AssociatePublicIpAddress: b.config.AssociatePublicIpAddress, RegionId: b.config.AlicloudRegion, InternetChargeType: b.config.InternetChargeType, + InternetMaxBandwidthOut: b.config.InternetMaxBandwidthOut, }) } else { steps = append(steps, &stepConfigAlicloudPublicIP{ diff --git a/builder/alicloud/ecs/step_config_eip.go b/builder/alicloud/ecs/step_config_eip.go index 397d8fd0a..f747068f2 100644 --- a/builder/alicloud/ecs/step_config_eip.go +++ b/builder/alicloud/ecs/step_config_eip.go @@ -14,6 +14,7 @@ type stepConfigAlicloudEIP struct { AssociatePublicIpAddress bool RegionId string InternetChargeType string + InternetMaxBandwidthOut int allocatedId string } @@ -24,6 +25,7 @@ func (s *stepConfigAlicloudEIP) Run(_ context.Context, state multistep.StateBag) ui.Say("Allocating eip") ipaddress, allocateId, err := client.AllocateEipAddress(&ecs.AllocateEipAddressArgs{ RegionId: common.Region(s.RegionId), InternetChargeType: common.InternetChargeType(s.InternetChargeType), + Bandwidth: s.InternetMaxBandwidthOut, }) if err != nil { state.Put("error", err) From b45d8c49c5b707b48dae77a6c72e1b803879a955 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolae=20Vl=C4=83descu?= Date: Sat, 23 Jun 2018 16:45:15 +0300 Subject: [PATCH 127/138] Fix typo --- website/source/docs/provisioners/shell-local.html.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/source/docs/provisioners/shell-local.html.md b/website/source/docs/provisioners/shell-local.html.md index 6a04272ad..d64e19fac 100644 --- a/website/source/docs/provisioners/shell-local.html.md +++ b/website/source/docs/provisioners/shell-local.html.md @@ -1,7 +1,7 @@ --- description: | shell-local will run a shell script of your choosing on the machine where Packer - is being run - in other words, it shell-local will run the shell script on your + is being run - in other words, shell-local will run the shell script on your build server, or your desktop, etc., rather than the remote/guest machine being provisioned by Packer. layout: docs @@ -14,7 +14,7 @@ sidebar_current: 'docs-provisioners-shell-local' Type: `shell-local` shell-local will run a shell script of your choosing on the machine where Packer -is being run - in other words, it shell-local will run the shell script on your +is being run - in other words, shell-local will run the shell script on your build server, or your desktop, etc., rather than the remote/guest machine being provisioned by Packer. From 7bab499b73a33bbcd358a2e3577b8e9aad7d97b8 Mon Sep 17 00:00:00 2001 From: willmao Date: Sun, 24 Jun 2018 12:23:35 +0800 Subject: [PATCH 128/138] fix vpc clean up issue --- builder/alicloud/ecs/step_config_vpc.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/builder/alicloud/ecs/step_config_vpc.go b/builder/alicloud/ecs/step_config_vpc.go index 36d52838f..8ce5663ed 100644 --- a/builder/alicloud/ecs/step_config_vpc.go +++ b/builder/alicloud/ecs/step_config_vpc.go @@ -85,7 +85,8 @@ func (s *stepConfigAlicloudVPC) Cleanup(state multistep.StateBag) { e, _ := err.(*common.Error) if (e.Code == "DependencyViolation.Instance" || e.Code == "DependencyViolation.RouteEntry" || e.Code == "DependencyViolation.VSwitch" || - e.Code == "DependencyViolation.SecurityGroup") && time.Now().Before(timeoutPoint) { + e.Code == "DependencyViolation.SecurityGroup" || + e.Code == "Forbbiden") && time.Now().Before(timeoutPoint) { time.Sleep(1 * time.Second) continue } From a69e2ac78e5efb5aab6a7d27ee072d2597cc7408 Mon Sep 17 00:00:00 2001 From: Harvey Lowndes Date: Fri, 22 Jun 2018 12:38:25 +0100 Subject: [PATCH 129/138] Support instance display name configuration --- builder/oracle/oci/config.go | 4 ++++ builder/oracle/oci/driver_oci.go | 11 +++++++++-- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/builder/oracle/oci/config.go b/builder/oracle/oci/config.go index 75fea183b..c82246762 100644 --- a/builder/oracle/oci/config.go +++ b/builder/oracle/oci/config.go @@ -44,6 +44,10 @@ type Config struct { BaseImageID string `mapstructure:"base_image_ocid"` Shape string `mapstructure:"shape"` ImageName string `mapstructure:"image_name"` + + // Instance + InstanceName string `mapstructure:"instance_name"` + // UserData and UserDataFile file are both optional and mutually exclusive. UserData string `mapstructure:"user_data"` UserDataFile string `mapstructure:"user_data_file"` diff --git a/builder/oracle/oci/driver_oci.go b/builder/oracle/oci/driver_oci.go index ffb3732c1..43b2d5c88 100644 --- a/builder/oracle/oci/driver_oci.go +++ b/builder/oracle/oci/driver_oci.go @@ -45,14 +45,21 @@ func (d *driverOCI) CreateInstance(publicKey string) (string, error) { metadata["user_data"] = d.cfg.UserData } - instance, err := d.computeClient.LaunchInstance(context.TODO(), core.LaunchInstanceRequest{LaunchInstanceDetails: core.LaunchInstanceDetails{ + instanceDetails := core.LaunchInstanceDetails{ AvailabilityDomain: &d.cfg.AvailabilityDomain, CompartmentId: &d.cfg.CompartmentID, ImageId: &d.cfg.BaseImageID, Shape: &d.cfg.Shape, SubnetId: &d.cfg.SubnetID, Metadata: metadata, - }}) + } + + // When empty, the default display name is used. + if d.cfg.InstanceName != "" { + instanceDetails.DisplayName = &d.cfg.InstanceName + } + + instance, err := d.computeClient.LaunchInstance(context.TODO(), core.LaunchInstanceRequest{LaunchInstanceDetails: instanceDetails}) if err != nil { return "", err From 475e79a2510196e967cf739549537b3e53ea6ea6 Mon Sep 17 00:00:00 2001 From: Simon Hulme Date: Mon, 25 Jun 2018 14:13:43 +0100 Subject: [PATCH 130/138] Fixed SecureBootTemplate not being passed through to PS cmdlet Added check for SecureBootTemplate parameter for Server 2012 and below Corrected enableSecureBootString usage --- common/powershell/hyperv/hyperv.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/common/powershell/hyperv/hyperv.go b/common/powershell/hyperv/hyperv.go index 392984e3f..5b6501abe 100644 --- a/common/powershell/hyperv/hyperv.go +++ b/common/powershell/hyperv/hyperv.go @@ -518,8 +518,13 @@ Hyper-V\Set-VMNetworkAdapter -VMName $vmName -MacAddressSpoofing $enableMacSpoof func SetVirtualMachineSecureBoot(vmName string, enableSecureBoot bool, templateName string) error { var script = ` -param([string]$vmName, $enableSecureBoot) -Hyper-V\Set-VMFirmware -VMName $vmName -EnableSecureBoot $enableSecureBoot +param([string]$vmName, [string]$enableSecureBootString, [string]$templateName) +$cmdletParameterExists = Get-Help SetVMFirmware -Parameter SecureBootTemplate -ErrorAction SilentlyContinue +if ($cmdletParameterExists) { + Hyper-V\Set-VMFirmware -VMName $vmName -EnableSecureBoot $enableSecureBootString -SecureBootTemplate $templateName +} else { + Hyper-V\Set-VMFirmware -VMName $vmName -EnableSecureBoot $enableSecureBootString +} ` var ps powershell.PowerShellCmd From 04ff0761e6ffa749da817bcb1c819eaa864f6e49 Mon Sep 17 00:00:00 2001 From: Bob Brumfield Date: Mon, 25 Jun 2018 08:28:32 -0700 Subject: [PATCH 131/138] Notify that we are skipping file, but retain error message --- builder/vmware/common/driver.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/vmware/common/driver.go b/builder/vmware/common/driver.go index 553d307ba..1a945d19b 100644 --- a/builder/vmware/common/driver.go +++ b/builder/vmware/common/driver.go @@ -358,7 +358,7 @@ func (d *VmwareDriver) GuestIP(state multistep.StateBag) (string, error) { // open up the lease and read its contents fh, err := os.Open(dhcpLeasesPath) if err != nil { - log.Printf("Unable to open lease path, skipping: %s", dhcpLeasesPath) + log.Printf("Error while reading DHCP lease path file %s: %s", dhcpLeasesPath, err.Error()) continue } defer fh.Close() From 297f6b85ec59ef3f64b206d28923e7d57f47adc7 Mon Sep 17 00:00:00 2001 From: DanHam Date: Mon, 25 Jun 2018 23:51:27 +0100 Subject: [PATCH 132/138] Use Get-Command over Get-Help to check for SecureBootTemplate parameter --- common/powershell/hyperv/hyperv.go | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/common/powershell/hyperv/hyperv.go b/common/powershell/hyperv/hyperv.go index 5b6501abe..343e02776 100644 --- a/common/powershell/hyperv/hyperv.go +++ b/common/powershell/hyperv/hyperv.go @@ -267,7 +267,7 @@ if ($harddrivePath){ func DisableAutomaticCheckpoints(vmName string) error { var script = ` param([string]$vmName) -if ((Get-Command Hyper-V\Set-Vm).Parameters["AutomaticCheckpointsEnabled"]) { +if ((Get-Command Hyper-V\Set-Vm).Parameters["AutomaticCheckpointsEnabled"]) { Hyper-V\Set-Vm -Name $vmName -AutomaticCheckpointsEnabled $false } ` var ps powershell.PowerShellCmd @@ -279,7 +279,7 @@ func ExportVmxcVirtualMachine(exportPath string, vmName string, snapshotName str var script = ` param([string]$exportPath, [string]$vmName, [string]$snapshotName, [string]$allSnapshotsString) -$WorkingPath = Join-Path $exportPath $vmName +$WorkingPath = Join-Path $exportPath $vmName if (Test-Path $WorkingPath) { throw "Export path working directory: $WorkingPath already exists!" @@ -297,7 +297,7 @@ if ($snapshotName) { } else { $snapshot = $null } - + if (!$snapshot) { #No snapshot clone Hyper-V\Export-VM -Name $vmName -Path $exportPath -ErrorAction Stop @@ -328,7 +328,7 @@ param([string]$exportPath, [string]$cloneFromVmxcPath) if (!(Test-Path $cloneFromVmxcPath)){ throw "Clone from vmxc directory: $cloneFromVmxcPath does not exist!" } - + if (!(Test-Path $exportPath)){ New-Item -ItemType Directory -Force -Path $exportPath } @@ -390,12 +390,12 @@ if ($vhdPath){ $existingFirstHarddrive | Hyper-V\Set-VMHardDiskDrive -Path $vhdPath } else { Hyper-V\Add-VMHardDiskDrive -VM $compatibilityReport.VM -Path $vhdPath - } + } } Hyper-V\Set-VMMemory -VM $compatibilityReport.VM -StartupBytes $memoryStartupBytes $networkAdaptor = $compatibilityReport.VM.NetworkAdapters | Select -First 1 Hyper-V\Disconnect-VMNetworkAdapter -VMNetworkAdapter $networkAdaptor -Hyper-V\Connect-VMNetworkAdapter -VMNetworkAdapter $networkAdaptor -SwitchName $switchName +Hyper-V\Connect-VMNetworkAdapter -VMNetworkAdapter $networkAdaptor -SwitchName $switchName $vm = Hyper-V\Import-VM -CompatibilityReport $compatibilityReport if ($vm) { @@ -519,8 +519,9 @@ Hyper-V\Set-VMNetworkAdapter -VMName $vmName -MacAddressSpoofing $enableMacSpoof func SetVirtualMachineSecureBoot(vmName string, enableSecureBoot bool, templateName string) error { var script = ` param([string]$vmName, [string]$enableSecureBootString, [string]$templateName) -$cmdletParameterExists = Get-Help SetVMFirmware -Parameter SecureBootTemplate -ErrorAction SilentlyContinue -if ($cmdletParameterExists) { +$cmdlet = Get-Command Hyper-V\Set-VMFirmware +# The SecureBootTemplate parameter is only available in later versions +if ($cmdlet.Parameters.SecureBootTemplate) { Hyper-V\Set-VMFirmware -VMName $vmName -EnableSecureBoot $enableSecureBootString -SecureBootTemplate $templateName } else { Hyper-V\Set-VMFirmware -VMName $vmName -EnableSecureBoot $enableSecureBootString From b600be009d7024513d6d0a409e5985d67b1f1f5c Mon Sep 17 00:00:00 2001 From: Owain Lewis Date: Tue, 26 Jun 2018 10:05:56 +0100 Subject: [PATCH 133/138] Pass context into OCI client --- builder/oracle/oci/artifact.go | 4 ++- builder/oracle/oci/driver.go | 20 ++++++----- builder/oracle/oci/driver_mock.go | 20 ++++++----- builder/oracle/oci/driver_oci.go | 33 ++++++++++--------- builder/oracle/oci/step_create_instance.go | 10 +++--- .../oci/step_get_default_credentials.go | 4 +-- builder/oracle/oci/step_image.go | 6 ++-- builder/oracle/oci/step_instance_info.go | 4 +-- 8 files changed, 56 insertions(+), 45 deletions(-) diff --git a/builder/oracle/oci/artifact.go b/builder/oracle/oci/artifact.go index a454ff7c7..703cd062d 100644 --- a/builder/oracle/oci/artifact.go +++ b/builder/oracle/oci/artifact.go @@ -1,6 +1,7 @@ package oci import ( + "context" "fmt" "github.com/oracle/oci-go-sdk/core" @@ -41,11 +42,12 @@ func (a *Artifact) String() string { ) } +// State ... func (a *Artifact) State(name string) interface{} { return nil } // Destroy deletes the custom image associated with the artifact. func (a *Artifact) Destroy() error { - return a.driver.DeleteImage(*a.Image.Id) + return a.driver.DeleteImage(context.TODO(), *a.Image.Id) } diff --git a/builder/oracle/oci/driver.go b/builder/oracle/oci/driver.go index 704b2f2a9..4e6013bbd 100644 --- a/builder/oracle/oci/driver.go +++ b/builder/oracle/oci/driver.go @@ -1,14 +1,18 @@ package oci -import "github.com/oracle/oci-go-sdk/core" +import ( + "context" + + "github.com/oracle/oci-go-sdk/core" +) // Driver interfaces between the builder steps and the OCI SDK. type Driver interface { - CreateInstance(publicKey string) (string, error) - CreateImage(id string) (core.Image, error) - DeleteImage(id string) error - GetInstanceIP(id string) (string, error) - TerminateInstance(id string) error - WaitForImageCreation(id string) error - WaitForInstanceState(id string, waitStates []string, terminalState string) error + CreateInstance(ctx context.Context, publicKey string) (string, error) + CreateImage(ctx context.Context, id string) (core.Image, error) + DeleteImage(ctx context.Context, id string) error + GetInstanceIP(ctx context.Context, id string) (string, error) + TerminateInstance(ctx context.Context, id string) error + WaitForImageCreation(ctx context.Context, id string) error + WaitForInstanceState(ctx context.Context, id string, waitStates []string, terminalState string) error } diff --git a/builder/oracle/oci/driver_mock.go b/builder/oracle/oci/driver_mock.go index 1f2e7ec2b..df478998c 100644 --- a/builder/oracle/oci/driver_mock.go +++ b/builder/oracle/oci/driver_mock.go @@ -1,6 +1,10 @@ package oci -import "github.com/oracle/oci-go-sdk/core" +import ( + "context" + + "github.com/oracle/oci-go-sdk/core" +) // driverMock implements the Driver interface and communicates with Oracle // OCI. @@ -27,7 +31,7 @@ type driverMock struct { } // CreateInstance creates a new compute instance. -func (d *driverMock) CreateInstance(publicKey string) (string, error) { +func (d *driverMock) CreateInstance(ctx context.Context, publicKey string) (string, error) { if d.CreateInstanceErr != nil { return "", d.CreateInstanceErr } @@ -38,7 +42,7 @@ func (d *driverMock) CreateInstance(publicKey string) (string, error) { } // CreateImage creates a new custom image. -func (d *driverMock) CreateImage(id string) (core.Image, error) { +func (d *driverMock) CreateImage(ctx context.Context, id string) (core.Image, error) { if d.CreateImageErr != nil { return core.Image{}, d.CreateImageErr } @@ -47,7 +51,7 @@ func (d *driverMock) CreateImage(id string) (core.Image, error) { } // DeleteImage mocks deleting a custom image. -func (d *driverMock) DeleteImage(id string) error { +func (d *driverMock) DeleteImage(ctx context.Context, id string) error { if d.DeleteImageErr != nil { return d.DeleteImageErr } @@ -58,7 +62,7 @@ func (d *driverMock) DeleteImage(id string) error { } // GetInstanceIP returns the public or private IP corresponding to the given instance id. -func (d *driverMock) GetInstanceIP(id string) (string, error) { +func (d *driverMock) GetInstanceIP(ctx context.Context, id string) (string, error) { if d.GetInstanceIPErr != nil { return "", d.GetInstanceIPErr } @@ -69,7 +73,7 @@ func (d *driverMock) GetInstanceIP(id string) (string, error) { } // TerminateInstance terminates a compute instance. -func (d *driverMock) TerminateInstance(id string) error { +func (d *driverMock) TerminateInstance(ctx context.Context, id string) error { if d.TerminateInstanceErr != nil { return d.TerminateInstanceErr } @@ -81,12 +85,12 @@ func (d *driverMock) TerminateInstance(id string) error { // WaitForImageCreation waits for a provisioning custom image to reach the // "AVAILABLE" state. -func (d *driverMock) WaitForImageCreation(id string) error { +func (d *driverMock) WaitForImageCreation(ctx context.Context, id string) error { return d.WaitForImageCreationErr } // WaitForInstanceState waits for an instance to reach the a given terminal // state. -func (d *driverMock) WaitForInstanceState(id string, waitStates []string, terminalState string) error { +func (d *driverMock) WaitForInstanceState(ctx context.Context, id string, waitStates []string, terminalState string) error { return d.WaitForInstanceStateErr } diff --git a/builder/oracle/oci/driver_oci.go b/builder/oracle/oci/driver_oci.go index 43b2d5c88..4a752c0a2 100644 --- a/builder/oracle/oci/driver_oci.go +++ b/builder/oracle/oci/driver_oci.go @@ -15,6 +15,7 @@ type driverOCI struct { computeClient core.ComputeClient vcnClient core.VirtualNetworkClient cfg *Config + context context.Context } // NewDriverOCI Creates a new driverOCI with a connected compute client and a connected vcn client. @@ -37,7 +38,7 @@ func NewDriverOCI(cfg *Config) (Driver, error) { } // CreateInstance creates a new compute instance. -func (d *driverOCI) CreateInstance(publicKey string) (string, error) { +func (d *driverOCI) CreateInstance(ctx context.Context, publicKey string) (string, error) { metadata := map[string]string{ "ssh_authorized_keys": publicKey, } @@ -69,8 +70,8 @@ func (d *driverOCI) CreateInstance(publicKey string) (string, error) { } // CreateImage creates a new custom image. -func (d *driverOCI) CreateImage(id string) (core.Image, error) { - res, err := d.computeClient.CreateImage(context.TODO(), core.CreateImageRequest{CreateImageDetails: core.CreateImageDetails{ +func (d *driverOCI) CreateImage(ctx context.Context, id string) (core.Image, error) { + res, err := d.computeClient.CreateImage(ctx, core.CreateImageRequest{CreateImageDetails: core.CreateImageDetails{ CompartmentId: &d.cfg.CompartmentID, InstanceId: &id, DisplayName: &d.cfg.ImageName, @@ -84,14 +85,14 @@ func (d *driverOCI) CreateImage(id string) (core.Image, error) { } // DeleteImage deletes a custom image. -func (d *driverOCI) DeleteImage(id string) error { - _, err := d.computeClient.DeleteImage(context.TODO(), core.DeleteImageRequest{ImageId: &id}) +func (d *driverOCI) DeleteImage(ctx context.Context, id string) error { + _, err := d.computeClient.DeleteImage(ctx, core.DeleteImageRequest{ImageId: &id}) return err } // GetInstanceIP returns the public or private IP corresponding to the given instance id. -func (d *driverOCI) GetInstanceIP(id string) (string, error) { - vnics, err := d.computeClient.ListVnicAttachments(context.TODO(), core.ListVnicAttachmentsRequest{ +func (d *driverOCI) GetInstanceIP(ctx context.Context, id string) (string, error) { + vnics, err := d.computeClient.ListVnicAttachments(ctx, core.ListVnicAttachmentsRequest{ InstanceId: &id, CompartmentId: &d.cfg.CompartmentID, }) @@ -103,7 +104,7 @@ func (d *driverOCI) GetInstanceIP(id string) (string, error) { return "", errors.New("instance has zero VNICs") } - vnic, err := d.vcnClient.GetVnic(context.TODO(), core.GetVnicRequest{VnicId: vnics.Items[0].VnicId}) + vnic, err := d.vcnClient.GetVnic(ctx, core.GetVnicRequest{VnicId: vnics.Items[0].VnicId}) if err != nil { return "", fmt.Errorf("Error getting VNIC details: %s", err) } @@ -119,8 +120,8 @@ func (d *driverOCI) GetInstanceIP(id string) (string, error) { return *vnic.PublicIp, nil } -func (d *driverOCI) GetInstanceInitialCredentials(id string) (string, string, error) { - credentials, err := d.computeClient.GetWindowsInstanceInitialCredentials(context.TODO(), core.GetWindowsInstanceInitialCredentialsRequest{ +func (d *driverOCI) GetInstanceInitialCredentials(ctx context.Context, id string) (string, string, error) { + credentials, err := d.computeClient.GetWindowsInstanceInitialCredentials(ctx, core.GetWindowsInstanceInitialCredentialsRequest{ InstanceId: &id, }) if err != nil { @@ -131,8 +132,8 @@ func (d *driverOCI) GetInstanceInitialCredentials(id string) (string, string, er } // TerminateInstance terminates a compute instance. -func (d *driverOCI) TerminateInstance(id string) error { - _, err := d.computeClient.TerminateInstance(context.TODO(), core.TerminateInstanceRequest{ +func (d *driverOCI) TerminateInstance(ctx context.Context, id string) error { + _, err := d.computeClient.TerminateInstance(ctx, core.TerminateInstanceRequest{ InstanceId: &id, }) return err @@ -140,10 +141,10 @@ func (d *driverOCI) TerminateInstance(id string) error { // WaitForImageCreation waits for a provisioning custom image to reach the // "AVAILABLE" state. -func (d *driverOCI) WaitForImageCreation(id string) error { +func (d *driverOCI) WaitForImageCreation(ctx context.Context, id string) error { return waitForResourceToReachState( func(string) (string, error) { - image, err := d.computeClient.GetImage(context.TODO(), core.GetImageRequest{ImageId: &id}) + image, err := d.computeClient.GetImage(ctx, core.GetImageRequest{ImageId: &id}) if err != nil { return "", err } @@ -159,10 +160,10 @@ func (d *driverOCI) WaitForImageCreation(id string) error { // WaitForInstanceState waits for an instance to reach the a given terminal // state. -func (d *driverOCI) WaitForInstanceState(id string, waitStates []string, terminalState string) error { +func (d *driverOCI) WaitForInstanceState(ctx context.Context, id string, waitStates []string, terminalState string) error { return waitForResourceToReachState( func(string) (string, error) { - instance, err := d.computeClient.GetInstance(context.TODO(), core.GetInstanceRequest{InstanceId: &id}) + instance, err := d.computeClient.GetInstance(ctx, core.GetInstanceRequest{InstanceId: &id}) if err != nil { return "", err } diff --git a/builder/oracle/oci/step_create_instance.go b/builder/oracle/oci/step_create_instance.go index 375b92b1b..7ca52bf32 100644 --- a/builder/oracle/oci/step_create_instance.go +++ b/builder/oracle/oci/step_create_instance.go @@ -10,7 +10,7 @@ import ( type stepCreateInstance struct{} -func (s *stepCreateInstance) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { +func (s *stepCreateInstance) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { var ( driver = state.Get("driver").(Driver) ui = state.Get("ui").(packer.Ui) @@ -19,7 +19,7 @@ func (s *stepCreateInstance) Run(_ context.Context, state multistep.StateBag) mu ui.Say("Creating instance...") - instanceID, err := driver.CreateInstance(publicKey) + instanceID, err := driver.CreateInstance(ctx, publicKey) if err != nil { err = fmt.Errorf("Problem creating instance: %s", err) ui.Error(err.Error()) @@ -33,7 +33,7 @@ func (s *stepCreateInstance) Run(_ context.Context, state multistep.StateBag) mu ui.Say("Waiting for instance to enter 'RUNNING' state...") - if err = driver.WaitForInstanceState(instanceID, []string{"STARTING", "PROVISIONING"}, "RUNNING"); err != nil { + if err = driver.WaitForInstanceState(ctx, instanceID, []string{"STARTING", "PROVISIONING"}, "RUNNING"); err != nil { err = fmt.Errorf("Error waiting for instance to start: %s", err) ui.Error(err.Error()) state.Put("error", err) @@ -57,14 +57,14 @@ func (s *stepCreateInstance) Cleanup(state multistep.StateBag) { ui.Say(fmt.Sprintf("Terminating instance (%s)...", id)) - if err := driver.TerminateInstance(id); err != nil { + if err := driver.TerminateInstance(context.TODO(), id); err != nil { err = fmt.Errorf("Error terminating instance. Please terminate manually: %s", err) ui.Error(err.Error()) state.Put("error", err) return } - err := driver.WaitForInstanceState(id, []string{"TERMINATING"}, "TERMINATED") + err := driver.WaitForInstanceState(context.TODO(), id, []string{"TERMINATING"}, "TERMINATED") if err != nil { err = fmt.Errorf("Error terminating instance. Please terminate manually: %s", err) ui.Error(err.Error()) diff --git a/builder/oracle/oci/step_get_default_credentials.go b/builder/oracle/oci/step_get_default_credentials.go index 7503b853c..b7f6f71e9 100644 --- a/builder/oracle/oci/step_get_default_credentials.go +++ b/builder/oracle/oci/step_get_default_credentials.go @@ -17,7 +17,7 @@ type stepGetDefaultCredentials struct { BuildName string } -func (s *stepGetDefaultCredentials) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { +func (s *stepGetDefaultCredentials) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { var ( driver = state.Get("driver").(*driverOCI) ui = state.Get("ui").(packer.Ui) @@ -36,7 +36,7 @@ func (s *stepGetDefaultCredentials) Run(_ context.Context, state multistep.State return multistep.ActionContinue } - username, password, err := driver.GetInstanceInitialCredentials(id) + username, password, err := driver.GetInstanceInitialCredentials(ctx, id) if err != nil { err = fmt.Errorf("Error getting instance's credentials: %s", err) ui.Error(err.Error()) diff --git a/builder/oracle/oci/step_image.go b/builder/oracle/oci/step_image.go index 9589594c8..b3d37da0a 100644 --- a/builder/oracle/oci/step_image.go +++ b/builder/oracle/oci/step_image.go @@ -10,7 +10,7 @@ import ( type stepImage struct{} -func (s *stepImage) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { +func (s *stepImage) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { var ( driver = state.Get("driver").(Driver) ui = state.Get("ui").(packer.Ui) @@ -19,7 +19,7 @@ func (s *stepImage) Run(_ context.Context, state multistep.StateBag) multistep.S ui.Say("Creating image from instance...") - image, err := driver.CreateImage(instanceID) + image, err := driver.CreateImage(ctx, instanceID) if err != nil { err = fmt.Errorf("Error creating image from instance: %s", err) ui.Error(err.Error()) @@ -27,7 +27,7 @@ func (s *stepImage) Run(_ context.Context, state multistep.StateBag) multistep.S return multistep.ActionHalt } - err = driver.WaitForImageCreation(*image.Id) + err = driver.WaitForImageCreation(ctx, *image.Id) if err != nil { err = fmt.Errorf("Error waiting for image creation to finish: %s", err) ui.Error(err.Error()) diff --git a/builder/oracle/oci/step_instance_info.go b/builder/oracle/oci/step_instance_info.go index 891a261cd..63b9120c1 100644 --- a/builder/oracle/oci/step_instance_info.go +++ b/builder/oracle/oci/step_instance_info.go @@ -10,14 +10,14 @@ import ( type stepInstanceInfo struct{} -func (s *stepInstanceInfo) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { +func (s *stepInstanceInfo) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { var ( driver = state.Get("driver").(Driver) ui = state.Get("ui").(packer.Ui) id = state.Get("instance_id").(string) ) - ip, err := driver.GetInstanceIP(id) + ip, err := driver.GetInstanceIP(ctx, id) if err != nil { err = fmt.Errorf("Error getting instance's IP: %s", err) ui.Error(err.Error()) From b2e456c340871982a513510b106eee8978bdfff5 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Tue, 26 Jun 2018 11:35:04 -0700 Subject: [PATCH 134/138] docs needed clarifying on what Packer would do. Document this feature in ebs-volume. --- .../source/docs/builders/amazon-ebs.html.md | 11 ++++++++-- .../docs/builders/amazon-ebssurrogate.html.md | 11 ++++++++-- .../docs/builders/amazon-ebsvolume.html.md | 21 +++++++++++++++++++ 3 files changed, 39 insertions(+), 4 deletions(-) diff --git a/website/source/docs/builders/amazon-ebs.html.md b/website/source/docs/builders/amazon-ebs.html.md index 2901bd249..f8d75ebd0 100644 --- a/website/source/docs/builders/amazon-ebs.html.md +++ b/website/source/docs/builders/amazon-ebs.html.md @@ -150,8 +150,15 @@ builder. after all provisioners have run. For Windows instances, it is sometimes desirable to [run Sysprep](http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ami-create-standard.html) which will stop the instance for you. If this is set to true, Packer *will not* - stop the instance and will wait for you to stop it manually. You can do this - with a [windows-shell provisioner](https://www.packer.io/docs/provisioners/windows-shell.html). + stop the instance but will assume that you will send the stop signal + yourself through your final provisioner. You can do this with a + [windows-shell provisioner](https://www.packer.io/docs/provisioners/windows-shell.html). + + Note that Packer will still wait for the instance to be stopped, and failing + to send the stop signal yourself, when you have set this flag to `true`, + will cause a timeout. + + Example of a valid shutdown command: ``` json { diff --git a/website/source/docs/builders/amazon-ebssurrogate.html.md b/website/source/docs/builders/amazon-ebssurrogate.html.md index 720521536..eb21c1525 100644 --- a/website/source/docs/builders/amazon-ebssurrogate.html.md +++ b/website/source/docs/builders/amazon-ebssurrogate.html.md @@ -143,8 +143,15 @@ builder. after all provisioners have run. For Windows instances, it is sometimes desirable to [run Sysprep](http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ami-create-standard.html) which will stop the instance for you. If this is set to true, Packer *will not* - stop the instance and will wait for you to stop it manually. You can do this - with a [windows-shell provisioner](https://www.packer.io/docs/provisioners/windows-shell.html). + stop the instance but will assume that you will send the stop signal + yourself through your final provisioner. You can do this with a + [windows-shell provisioner](https://www.packer.io/docs/provisioners/windows-shell.html). + + Note that Packer will still wait for the instance to be stopped, and failing + to send the stop signal yourself, when you have set this flag to `true`, + will cause a timeout. + + Example of a valid shutdown command: ``` json { diff --git a/website/source/docs/builders/amazon-ebsvolume.html.md b/website/source/docs/builders/amazon-ebsvolume.html.md index 1bab5eb62..acfa33e84 100644 --- a/website/source/docs/builders/amazon-ebsvolume.html.md +++ b/website/source/docs/builders/amazon-ebsvolume.html.md @@ -111,6 +111,27 @@ builder. provider whose API is compatible with aws EC2. Specify another endpoint like this `https://ec2.custom.endpoint.com`. +- `disable_stop_instance` (boolean) - Packer normally stops the build instance + after all provisioners have run. For Windows instances, it is sometimes + desirable to [run Sysprep](http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ami-create-standard.html) + which will stop the instance for you. If this is set to true, Packer *will not* + stop the instance but will assume that you will send the stop signal + yourself through your final provisioner. You can do this with a + [windows-shell provisioner](https://www.packer.io/docs/provisioners/windows-shell.html). + + Note that Packer will still wait for the instance to be stopped, and failing + to send the stop signal yourself, when you have set this flag to `true`, + will cause a timeout. + + Example of a valid shutdown command: + + ``` json + { + "type": "windows-shell", + "inline": ["\"c:\\Program Files\\Amazon\\Ec2ConfigService\\ec2config.exe\" -sysprep"] + } + ``` + - `ebs_optimized` (boolean) - Mark instance as [EBS Optimized](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html). Default `false`. From ce40e9cc860d574ebb7ead6f7bfda4af9e95feb6 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Tue, 26 Jun 2018 16:18:20 -0700 Subject: [PATCH 135/138] only test on .10.x --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 2bac499b3..02f936309 100644 --- a/.travis.yml +++ b/.travis.yml @@ -8,7 +8,7 @@ language: go go: - 1.8.x - 1.9.x - - 1.x + - 1.10.x install: From e21981e58190641cf62fae5cbcb395a9bd710a22 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Tue, 26 Jun 2018 15:17:59 -0700 Subject: [PATCH 136/138] fix vendor commit for go-oracle-terraform --- .../go-oracle-terraform/compute/image_list.go | 4 ++-- .../compute/machine_images.go | 4 ++-- .../compute/orchestration.go | 23 ++++++++++++++++--- .../compute/security_lists.go | 8 +++---- .../go-oracle-terraform/compute/snapshots.go | 8 +++---- .../go-oracle-terraform/compute/ssh_keys.go | 4 ++-- .../compute/storage_volume_attachments.go | 1 + vendor/vendor.json | 20 ++++++---------- 8 files changed, 42 insertions(+), 30 deletions(-) diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/image_list.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/image_list.go index 0d4ca06ba..1d908987c 100644 --- a/vendor/github.com/hashicorp/go-oracle-terraform/compute/image_list.go +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/image_list.go @@ -89,7 +89,7 @@ func (c *ImageListClient) CreateImageList(createInput *CreateImageListInput) (*I // DeleteKeyInput describes the image list to delete type DeleteImageListInput struct { // The name of the Image List - Name string `json:name` + Name string `json:"name"` } // DeleteImageList deletes the Image List with the given name. @@ -101,7 +101,7 @@ func (c *ImageListClient) DeleteImageList(deleteInput *DeleteImageListInput) err // GetImageListInput describes the image list to get type GetImageListInput struct { // The name of the Image List - Name string `json:name` + Name string `json:"name"` } // GetImageList retrieves the Image List with the given name. diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/machine_images.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/machine_images.go index bf7736a63..f2c4b4a15 100644 --- a/vendor/github.com/hashicorp/go-oracle-terraform/compute/machine_images.go +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/machine_images.go @@ -109,7 +109,7 @@ func (c *MachineImagesClient) CreateMachineImage(createInput *CreateMachineImage // DeleteMachineImageInput describes the MachineImage to delete type DeleteMachineImageInput struct { // The name of the MachineImage - Name string `json:name` + Name string `json:"name"` } // DeleteMachineImage deletes the MachineImage with the given name. @@ -122,7 +122,7 @@ type GetMachineImageInput struct { // account of the associated Object Storage Classic instance Account string `json:"account"` // The name of the Machine Image - Name string `json:name` + Name string `json:"name"` } // GetMachineImage retrieves the MachineImage with the given name. diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/orchestration.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/orchestration.go index 684e0d45f..93d9e2342 100644 --- a/vendor/github.com/hashicorp/go-oracle-terraform/compute/orchestration.go +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/orchestration.go @@ -60,6 +60,12 @@ const ( OrchestrationTypeInstance OrchestrationType = "Instance" ) +type OrchestrationRelationshipType string + +const ( + OrchestrationRelationshipTypeDepends OrchestrationRelationshipType = "depends" +) + // OrchestrationInfo describes an existing Orchestration. type Orchestration struct { // The default Oracle Compute Cloud Service account, such as /Compute-acme/default. @@ -162,7 +168,7 @@ type Object struct { // Note that when recovering from a failure, the orchestration doesn't consider object relationships. // Orchestrations v2 use object references to recover interdependent objects to a healthy state. SeeObject // References and Relationships in Using Oracle Compute Cloud Service (IaaS). - Relationship []Object `json:"relationships,omitempty"` + Relationships []Relationship `json:"relationships,omitempty"` // The template attribute defines the properties or characteristics of the Oracle Compute Cloud Service object // that you want to create, as specified by the type attribute. // The fields in the template section vary depending on the specified type. See Orchestration v2 Attributes @@ -193,6 +199,16 @@ type Health struct { Error string `json:"error,omitempty"` } +type Relationship struct { + // The type of Relationship + // The only type is depends + // Required + Type OrchestrationRelationshipType `json:"type"` + // What objects the relationship depends on + // Required + Targets []string `json:"targets"` +} + // CreateOrchestration creates a new Orchestration with the given name, key and enabled flag. func (c *OrchestrationsClient) CreateOrchestration(input *CreateOrchestrationInput) (*Orchestration, error) { var createdOrchestration Orchestration @@ -222,6 +238,7 @@ func (c *OrchestrationsClient) CreateOrchestration(input *CreateOrchestrationInp instanceInput.Storage = qualifiedStorageAttachments instanceInput.Networking = instanceClient.qualifyNetworking(instanceInput.Networking) + } } @@ -258,7 +275,7 @@ func (c *OrchestrationsClient) CreateOrchestration(input *CreateOrchestrationInp // GetOrchestrationInput describes the Orchestration to get type GetOrchestrationInput struct { // The three-part name of the Orchestration (/Compute-identity_domain/user/object). - Name string `json:name` + Name string `json:"name"` } // GetOrchestration retrieves the Orchestration with the given name. @@ -341,7 +358,7 @@ func (c *OrchestrationsClient) UpdateOrchestration(input *UpdateOrchestrationInp type DeleteOrchestrationInput struct { // The three-part name of the Orchestration (/Compute-identity_domain/user/object). // Required - Name string `json:name` + Name string `json:"name"` // Timeout for delete request Timeout time.Duration `json:"-"` } diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/security_lists.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/security_lists.go index 3c6d87364..dec906e8c 100644 --- a/vendor/github.com/hashicorp/go-oracle-terraform/compute/security_lists.go +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/security_lists.go @@ -30,7 +30,7 @@ type SecurityListInfo struct { // Shows the default account for your identity domain. Account string `json:"account"` // A description of the security list. - Description string `json:description` + Description string `json:"description"` // The three-part name of the security list (/Compute-identity_domain/user/object). Name string `json:"name"` // The policy for outbound traffic from the security list. @@ -73,7 +73,7 @@ func (c *SecurityListsClient) CreateSecurityList(createInput *CreateSecurityList type GetSecurityListInput struct { // The three-part name of the Security List (/Compute-identity_domain/user/object). // Required - Name string `json:name` + Name string `json:"name"` } // GetSecurityList retrieves the security list with the given name. @@ -90,7 +90,7 @@ func (c *SecurityListsClient) GetSecurityList(getInput *GetSecurityListInput) (* type UpdateSecurityListInput struct { // A description of the security list. // Optional - Description string `json:description` + Description string `json:"description"` // The three-part name of the Security List (/Compute-identity_domain/user/object). // Required Name string `json:"name"` @@ -117,7 +117,7 @@ func (c *SecurityListsClient) UpdateSecurityList(updateInput *UpdateSecurityList type DeleteSecurityListInput struct { // The three-part name of the Security List (/Compute-identity_domain/user/object). // Required - Name string `json:name` + Name string `json:"name"` } // DeleteSecurityList deletes the security list with the given name. diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/snapshots.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/snapshots.go index 370fd0c97..98a24ac9d 100644 --- a/vendor/github.com/hashicorp/go-oracle-terraform/compute/snapshots.go +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/snapshots.go @@ -120,7 +120,7 @@ func (c *SnapshotsClient) CreateSnapshot(input *CreateSnapshotInput) (*Snapshot, type GetSnapshotInput struct { // The name of the Snapshot // Required - Name string `json:name` + Name string `json:"name"` } // GetSnapshot retrieves the Snapshot with the given name. @@ -176,9 +176,9 @@ func (c *SnapshotsClient) DeleteSnapshot(machineImagesClient *MachineImagesClien return nil } -// DeleteSnapshot deletes the Snapshot with the given name. -// A machine image gets created with the associated snapshot is not deleted -// by this method. +// DeleteSnapshotResourceOnly deletes the Snapshot with the given name. +// The machine image that gets created with the associated snapshot is not +// deleted by this method. func (c *SnapshotsClient) DeleteSnapshotResourceOnly(input *DeleteSnapshotInput) error { // Wait for snapshot complete in case delay is active and the corresponding // instance needs to be deleted first diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/ssh_keys.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/ssh_keys.go index 7e8be20ed..821fd2216 100644 --- a/vendor/github.com/hashicorp/go-oracle-terraform/compute/ssh_keys.go +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/ssh_keys.go @@ -70,7 +70,7 @@ func (c *SSHKeysClient) CreateSSHKey(createInput *CreateSSHKeyInput) (*SSHKey, e // GetSSHKeyInput describes the ssh key to get type GetSSHKeyInput struct { // The three-part name of the SSH Key (/Compute-identity_domain/user/object). - Name string `json:name` + Name string `json:"name"` } // GetSSHKey retrieves the SSH key with the given name. @@ -110,7 +110,7 @@ func (c *SSHKeysClient) UpdateSSHKey(updateInput *UpdateSSHKeyInput) (*SSHKey, e // DeleteKeyInput describes the ssh key to delete type DeleteSSHKeyInput struct { // The three-part name of the SSH Key (/Compute-identity_domain/user/object). - Name string `json:name` + Name string `json:"name"` } // DeleteSSHKey deletes the SSH key with the given name. diff --git a/vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volume_attachments.go b/vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volume_attachments.go index 51e18af47..7fe9a13ef 100644 --- a/vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volume_attachments.go +++ b/vendor/github.com/hashicorp/go-oracle-terraform/compute/storage_volume_attachments.go @@ -81,6 +81,7 @@ type CreateStorageAttachmentInput struct { // CreateStorageAttachment creates a storage attachment attaching the given volume to the given instance at the given index. func (c *StorageAttachmentsClient) CreateStorageAttachment(input *CreateStorageAttachmentInput) (*StorageAttachmentInfo, error) { input.InstanceName = c.getQualifiedName(input.InstanceName) + input.StorageVolumeName = c.getQualifiedName(input.StorageVolumeName) var attachmentInfo *StorageAttachmentInfo if err := c.createResource(&input, &attachmentInfo); err != nil { diff --git a/vendor/vendor.json b/vendor/vendor.json index 3f2ba9413..7f7d5ae68 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -836,29 +836,23 @@ "path": "github.com/hashicorp/go-multierror", "revision": "d30f09973e19c1dfcd120b2d9c4f168e68d6b5d5" }, - { - "checksumSHA1": "Nf2Gdn9M1KlUS3sovKfymO1VJF4=", - "path": "github.com/hashicorp/go-oracle-terraform", - "revision": "5a9a298c54339d2296d2f1135eae55a3a8f5e8c2", - "revisionTime": "2018-01-11T20:31:13Z" - }, { "checksumSHA1": "hjQfXn32Tvuu6IJACOTsMzm+AbA=", "path": "github.com/hashicorp/go-oracle-terraform/client", - "revision": "5a9a298c54339d2296d2f1135eae55a3a8f5e8c2", - "revisionTime": "2018-01-11T20:31:13Z" + "revision": "62e2241f9c4154d5603a3678adc912991a47a468", + "revisionTime": "2018-01-31T23:42:02Z" }, { - "checksumSHA1": "wce86V0j11J6xRSvJEanprjK7so=", + "checksumSHA1": "yoA7SyeQNJ8XxwC7IcXdJ2kOTqg=", "path": "github.com/hashicorp/go-oracle-terraform/compute", - "revision": "5a9a298c54339d2296d2f1135eae55a3a8f5e8c2", - "revisionTime": "2018-01-11T20:31:13Z" + "revision": "62e2241f9c4154d5603a3678adc912991a47a468", + "revisionTime": "2018-01-31T23:42:02Z" }, { "checksumSHA1": "NuObCk0/ybL3w++EnltgrB1GQRc=", "path": "github.com/hashicorp/go-oracle-terraform/opc", - "revision": "5a9a298c54339d2296d2f1135eae55a3a8f5e8c2", - "revisionTime": "2018-01-11T20:31:13Z" + "revision": "62e2241f9c4154d5603a3678adc912991a47a468", + "revisionTime": "2018-01-31T23:42:02Z" }, { "checksumSHA1": "ErJHGU6AVPZM9yoY/xV11TwSjQs=", From 3622a669dcfa304ae16ff3bd5f9657d117e36585 Mon Sep 17 00:00:00 2001 From: Sean Malloy Date: Thu, 29 Mar 2018 22:50:58 -0500 Subject: [PATCH 137/138] Add new post processor googlecompute-import --- command/plugin.go | 2 + post-processor/compress/notes.txt | 3 + post-processor/compress/post-processor.go | 3 + post-processor/compress/tar_fix.go | 9 + post-processor/compress/tar_fix_go110.go | 11 + .../googlecompute-import/artifact.go | 37 + .../googlecompute-import/artifact_test.go | 15 + .../googlecompute-import/post-processor.go | 235 + .../google.golang.org/api/gensupport/go18.go | 17 + .../google.golang.org/api/gensupport/media.go | 166 +- .../api/gensupport/not_go18.go | 14 + .../google.golang.org/api/gensupport/send.go | 16 + .../api/storage/v1/storage-api.json | 3784 ++++++ .../api/storage/v1/storage-gen.go | 11171 ++++++++++++++++ vendor/vendor.json | 12 +- .../googlecompute-import.html.md | 145 + 16 files changed, 15620 insertions(+), 20 deletions(-) create mode 100644 post-processor/compress/notes.txt create mode 100644 post-processor/compress/tar_fix.go create mode 100644 post-processor/compress/tar_fix_go110.go create mode 100644 post-processor/googlecompute-import/artifact.go create mode 100644 post-processor/googlecompute-import/artifact_test.go create mode 100644 post-processor/googlecompute-import/post-processor.go create mode 100644 vendor/google.golang.org/api/gensupport/go18.go create mode 100644 vendor/google.golang.org/api/gensupport/not_go18.go create mode 100644 vendor/google.golang.org/api/storage/v1/storage-api.json create mode 100644 vendor/google.golang.org/api/storage/v1/storage-gen.go create mode 100644 website/source/docs/post-processors/googlecompute-import.html.md diff --git a/command/plugin.go b/command/plugin.go index 60651332d..34062a11e 100644 --- a/command/plugin.go +++ b/command/plugin.go @@ -56,6 +56,7 @@ import ( dockersavepostprocessor "github.com/hashicorp/packer/post-processor/docker-save" dockertagpostprocessor "github.com/hashicorp/packer/post-processor/docker-tag" googlecomputeexportpostprocessor "github.com/hashicorp/packer/post-processor/googlecompute-export" + googlecomputeimportpostprocessor "github.com/hashicorp/packer/post-processor/googlecompute-import" manifestpostprocessor "github.com/hashicorp/packer/post-processor/manifest" shelllocalpostprocessor "github.com/hashicorp/packer/post-processor/shell-local" vagrantpostprocessor "github.com/hashicorp/packer/post-processor/vagrant" @@ -146,6 +147,7 @@ var PostProcessors = map[string]packer.PostProcessor{ "docker-save": new(dockersavepostprocessor.PostProcessor), "docker-tag": new(dockertagpostprocessor.PostProcessor), "googlecompute-export": new(googlecomputeexportpostprocessor.PostProcessor), + "googlecompute-import": new(googlecomputeimportpostprocessor.PostProcessor), "manifest": new(manifestpostprocessor.PostProcessor), "shell-local": new(shelllocalpostprocessor.PostProcessor), "vagrant": new(vagrantpostprocessor.PostProcessor), diff --git a/post-processor/compress/notes.txt b/post-processor/compress/notes.txt new file mode 100644 index 000000000..51f9f7f0b --- /dev/null +++ b/post-processor/compress/notes.txt @@ -0,0 +1,3 @@ +* 1.9.6 => GNU tar format +* 1.10.3 w/ patch => GNU tar format +* 1.10.3 w/o patch => Posix tar format diff --git a/post-processor/compress/post-processor.go b/post-processor/compress/post-processor.go index d1bf89f1a..3e089f867 100644 --- a/post-processor/compress/post-processor.go +++ b/post-processor/compress/post-processor.go @@ -303,6 +303,9 @@ func createTarArchive(files []string, output io.WriteCloser) error { return fmt.Errorf("Failed to create tar header for %s: %s", path, err) } + // workaround for archive format on go >=1.10 + setHeaderFormat(header) + if err := archive.WriteHeader(header); err != nil { return fmt.Errorf("Failed to write tar header for %s: %s", path, err) } diff --git a/post-processor/compress/tar_fix.go b/post-processor/compress/tar_fix.go new file mode 100644 index 000000000..af60a2fff --- /dev/null +++ b/post-processor/compress/tar_fix.go @@ -0,0 +1,9 @@ +// +build !go1.10 + +package compress + +import "archive/tar" + +func setHeaderFormat(header *tar.Header) { + // no-op +} diff --git a/post-processor/compress/tar_fix_go110.go b/post-processor/compress/tar_fix_go110.go new file mode 100644 index 000000000..016b6b656 --- /dev/null +++ b/post-processor/compress/tar_fix_go110.go @@ -0,0 +1,11 @@ +// +build go1.10 + +package compress + +import "archive/tar" + +func setHeaderFormat(header *tar.Header) { + // We have to set the Format explicitly for the googlecompute-import + // post-processor. Google Cloud only allows importing GNU tar format. + header.Format = tar.FormatGNU +} diff --git a/post-processor/googlecompute-import/artifact.go b/post-processor/googlecompute-import/artifact.go new file mode 100644 index 000000000..1a43d7c50 --- /dev/null +++ b/post-processor/googlecompute-import/artifact.go @@ -0,0 +1,37 @@ +package googlecomputeimport + +import ( + "fmt" +) + +const BuilderId = "packer.post-processor.googlecompute-import" + +type Artifact struct { + paths []string +} + +func (*Artifact) BuilderId() string { + return BuilderId +} + +func (*Artifact) Id() string { + return "" +} + +func (a *Artifact) Files() []string { + pathsCopy := make([]string, len(a.paths)) + copy(pathsCopy, a.paths) + return pathsCopy +} + +func (a *Artifact) String() string { + return fmt.Sprintf("Exported artifacts in: %s", a.paths) +} + +func (*Artifact) State(name string) interface{} { + return nil +} + +func (a *Artifact) Destroy() error { + return nil +} diff --git a/post-processor/googlecompute-import/artifact_test.go b/post-processor/googlecompute-import/artifact_test.go new file mode 100644 index 000000000..9d53c0b07 --- /dev/null +++ b/post-processor/googlecompute-import/artifact_test.go @@ -0,0 +1,15 @@ +package googlecomputeimport + +import ( + "testing" + + "github.com/hashicorp/packer/packer" +) + +func TestArtifact_ImplementsArtifact(t *testing.T) { + var raw interface{} + raw = &Artifact{} + if _, ok := raw.(packer.Artifact); !ok { + t.Fatalf("Artifact should be a Artifact") + } +} diff --git a/post-processor/googlecompute-import/post-processor.go b/post-processor/googlecompute-import/post-processor.go new file mode 100644 index 000000000..a043a8ade --- /dev/null +++ b/post-processor/googlecompute-import/post-processor.go @@ -0,0 +1,235 @@ +package googlecomputeimport + +import ( + "fmt" + "net/http" + "os" + "strings" + "time" + + "google.golang.org/api/compute/v1" + "google.golang.org/api/storage/v1" + + "github.com/hashicorp/packer/builder/googlecompute" + "github.com/hashicorp/packer/common" + "github.com/hashicorp/packer/helper/config" + "github.com/hashicorp/packer/helper/multistep" + "github.com/hashicorp/packer/packer" + "github.com/hashicorp/packer/post-processor/compress" + "github.com/hashicorp/packer/template/interpolate" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/jwt" +) + +type Config struct { + common.PackerConfig `mapstructure:",squash"` + + Bucket string `mapstructure:"bucket"` + GCSObjectName string `mapstructure:"gcs_object_name"` + ImageDescription string `mapstructure:"image_description"` + ImageFamily string `mapstructure:"image_family"` + ImageLabels map[string]string `mapstructure:"image_labels"` + ImageName string `mapstructure:"image_name"` + ProjectId string `mapstructure:"project_id"` + AccountFile string `mapstructure:"account_file"` + KeepOriginalImage bool `mapstructure:"keep_input_artifact"` + + ctx interpolate.Context +} + +type PostProcessor struct { + config Config + runner multistep.Runner +} + +func (p *PostProcessor) Configure(raws ...interface{}) error { + err := config.Decode(&p.config, &config.DecodeOpts{ + Interpolate: true, + InterpolateContext: &p.config.ctx, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{ + "gcs_object_name", + }, + }, + }, raws...) + if err != nil { + return err + } + + // Set defaults + if p.config.GCSObjectName == "" { + p.config.GCSObjectName = "packer-import-{{timestamp}}.tar.gz" + } + + errs := new(packer.MultiError) + + // Check and render gcs_object_name + if err = interpolate.Validate(p.config.GCSObjectName, &p.config.ctx); err != nil { + errs = packer.MultiErrorAppend( + errs, fmt.Errorf("Error parsing gcs_object_name template: %s", err)) + } + + templates := map[string]*string{ + "bucket": &p.config.Bucket, + "image_name": &p.config.ImageName, + "project_id": &p.config.ProjectId, + "account_file": &p.config.AccountFile, + } + for key, ptr := range templates { + if *ptr == "" { + errs = packer.MultiErrorAppend( + errs, fmt.Errorf("%s must be set", key)) + } + } + + if len(errs.Errors) > 0 { + return errs + } + + return nil +} + +func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) { + var err error + + if artifact.BuilderId() != compress.BuilderId { + err = fmt.Errorf( + "incompatible artifact type: %s\nCan only import from Compress post-processor artifacts", + artifact.BuilderId()) + return nil, false, err + } + + p.config.GCSObjectName, err = interpolate.Render(p.config.GCSObjectName, &p.config.ctx) + if err != nil { + return nil, false, fmt.Errorf("Error rendering gcs_object_name template: %s", err) + } + + rawImageGcsPath, err := UploadToBucket(p.config.AccountFile, ui, artifact, p.config.Bucket, p.config.GCSObjectName) + if err != nil { + return nil, p.config.KeepOriginalImage, err + } + + gceImageArtifact, err := CreateGceImage(p.config.AccountFile, ui, p.config.ProjectId, rawImageGcsPath, p.config.ImageName, p.config.ImageDescription, p.config.ImageFamily, p.config.ImageLabels) + if err != nil { + return nil, p.config.KeepOriginalImage, err + } + + return gceImageArtifact, p.config.KeepOriginalImage, nil +} + +func UploadToBucket(accountFile string, ui packer.Ui, artifact packer.Artifact, bucket string, gcsObjectName string) (string, error) { + var client *http.Client + var account googlecompute.AccountFile + + err := googlecompute.ProcessAccountFile(&account, accountFile) + if err != nil { + return "", err + } + + var DriverScopes = []string{"https://www.googleapis.com/auth/devstorage.full_control"} + conf := jwt.Config{ + Email: account.ClientEmail, + PrivateKey: []byte(account.PrivateKey), + Scopes: DriverScopes, + TokenURL: "https://accounts.google.com/o/oauth2/token", + } + + client = conf.Client(oauth2.NoContext) + service, err := storage.New(client) + if err != nil { + return "", err + } + + ui.Say("Looking for tar.gz file in list of artifacts...") + source := "" + for _, path := range artifact.Files() { + ui.Say(fmt.Sprintf("Found artifact %v...", path)) + if strings.HasSuffix(path, ".tar.gz") { + source = path + break + } + } + + if source == "" { + return "", fmt.Errorf("No tar.gz file found in list of articats") + } + + artifactFile, err := os.Open(source) + if err != nil { + err := fmt.Errorf("error opening %v", source) + return "", err + } + + ui.Say(fmt.Sprintf("Uploading file %v to GCS bucket %v/%v...", source, bucket, gcsObjectName)) + storageObject, err := service.Objects.Insert(bucket, &storage.Object{Name: gcsObjectName}).Media(artifactFile).Do() + if err != nil { + ui.Say(fmt.Sprintf("Failed to upload: %v", storageObject)) + return "", err + } + + return "https://storage.googleapis.com/" + bucket + "/" + gcsObjectName, nil +} + +func CreateGceImage(accountFile string, ui packer.Ui, project string, rawImageURL string, imageName string, imageDescription string, imageFamily string, imageLabels map[string]string) (packer.Artifact, error) { + var client *http.Client + var account googlecompute.AccountFile + + err := googlecompute.ProcessAccountFile(&account, accountFile) + if err != nil { + return nil, err + } + + var DriverScopes = []string{"https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/devstorage.full_control"} + conf := jwt.Config{ + Email: account.ClientEmail, + PrivateKey: []byte(account.PrivateKey), + Scopes: DriverScopes, + TokenURL: "https://accounts.google.com/o/oauth2/token", + } + + client = conf.Client(oauth2.NoContext) + + service, err := compute.New(client) + if err != nil { + return nil, err + } + + gceImage := &compute.Image{ + Name: imageName, + Description: imageDescription, + Family: imageFamily, + Labels: imageLabels, + RawDisk: &compute.ImageRawDisk{Source: rawImageURL}, + SourceType: "RAW", + } + + ui.Say(fmt.Sprintf("Creating GCE image %v...", imageName)) + op, err := service.Images.Insert(project, gceImage).Do() + if err != nil { + ui.Say("Error creating GCE image") + return nil, err + } + + ui.Say("Waiting for GCE image creation operation to complete...") + for op.Status != "DONE" { + op, err = service.GlobalOperations.Get(project, op.Name).Do() + if err != nil { + return nil, err + } + + time.Sleep(5 * time.Second) + } + + // fail if image creation operation has an error + if op.Error != nil { + var imageError string + for _, error := range op.Error.Errors { + imageError += error.Message + } + err = fmt.Errorf("failed to create GCE image %s: %s", imageName, imageError) + return nil, err + } + + return &Artifact{paths: []string{op.TargetLink}}, nil +} diff --git a/vendor/google.golang.org/api/gensupport/go18.go b/vendor/google.golang.org/api/gensupport/go18.go new file mode 100644 index 000000000..c76cb8f20 --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/go18.go @@ -0,0 +1,17 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package gensupport + +import ( + "io" + "net/http" +) + +// SetGetBody sets the GetBody field of req to f. +func SetGetBody(req *http.Request, f func() (io.ReadCloser, error)) { + req.GetBody = f +} diff --git a/vendor/google.golang.org/api/gensupport/media.go b/vendor/google.golang.org/api/gensupport/media.go index c6410e89a..5a2674104 100644 --- a/vendor/google.golang.org/api/gensupport/media.go +++ b/vendor/google.golang.org/api/gensupport/media.go @@ -5,12 +5,14 @@ package gensupport import ( + "bytes" "fmt" "io" "io/ioutil" "mime/multipart" "net/http" "net/textproto" + "strings" "google.golang.org/api/googleapi" ) @@ -174,26 +176,156 @@ func typeHeader(contentType string) textproto.MIMEHeader { // PrepareUpload determines whether the data in the supplied reader should be // uploaded in a single request, or in sequential chunks. // chunkSize is the size of the chunk that media should be split into. -// If chunkSize is non-zero and the contents of media do not fit in a single -// chunk (or there is an error reading media), then media will be returned as a -// MediaBuffer. Otherwise, media will be returned as a Reader. +// +// If chunkSize is zero, media is returned as the first value, and the other +// two return values are nil, true. +// +// Otherwise, a MediaBuffer is returned, along with a bool indicating whether the +// contents of media fit in a single chunk. // // After PrepareUpload has been called, media should no longer be used: the // media content should be accessed via one of the return values. -func PrepareUpload(media io.Reader, chunkSize int) (io.Reader, *MediaBuffer) { +func PrepareUpload(media io.Reader, chunkSize int) (r io.Reader, mb *MediaBuffer, singleChunk bool) { if chunkSize == 0 { // do not chunk - return media, nil + return media, nil, true + } + mb = NewMediaBuffer(media, chunkSize) + _, _, _, err := mb.Chunk() + // If err is io.EOF, we can upload this in a single request. Otherwise, err is + // either nil or a non-EOF error. If it is the latter, then the next call to + // mb.Chunk will return the same error. Returning a MediaBuffer ensures that this + // error will be handled at some point. + return nil, mb, err == io.EOF +} + +// MediaInfo holds information for media uploads. It is intended for use by generated +// code only. +type MediaInfo struct { + // At most one of Media and MediaBuffer will be set. + media io.Reader + buffer *MediaBuffer + singleChunk bool + mType string + size int64 // mediaSize, if known. Used only for calls to progressUpdater_. + progressUpdater googleapi.ProgressUpdater +} + +// NewInfoFromMedia should be invoked from the Media method of a call. It returns a +// MediaInfo populated with chunk size and content type, and a reader or MediaBuffer +// if needed. +func NewInfoFromMedia(r io.Reader, options []googleapi.MediaOption) *MediaInfo { + mi := &MediaInfo{} + opts := googleapi.ProcessMediaOptions(options) + if !opts.ForceEmptyContentType { + r, mi.mType = DetermineContentType(r, opts.ContentType) + } + mi.media, mi.buffer, mi.singleChunk = PrepareUpload(r, opts.ChunkSize) + return mi +} + +// NewInfoFromResumableMedia should be invoked from the ResumableMedia method of a +// call. It returns a MediaInfo using the given reader, size and media type. +func NewInfoFromResumableMedia(r io.ReaderAt, size int64, mediaType string) *MediaInfo { + rdr := ReaderAtToReader(r, size) + rdr, mType := DetermineContentType(rdr, mediaType) + return &MediaInfo{ + size: size, + mType: mType, + buffer: NewMediaBuffer(rdr, googleapi.DefaultUploadChunkSize), + media: nil, + singleChunk: false, + } +} + +func (mi *MediaInfo) SetProgressUpdater(pu googleapi.ProgressUpdater) { + if mi != nil { + mi.progressUpdater = pu + } +} + +// UploadType determines the type of upload: a single request, or a resumable +// series of requests. +func (mi *MediaInfo) UploadType() string { + if mi.singleChunk { + return "multipart" + } + return "resumable" +} + +// UploadRequest sets up an HTTP request for media upload. It adds headers +// as necessary, and returns a replacement for the body and a function for http.Request.GetBody. +func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newBody io.Reader, getBody func() (io.ReadCloser, error), cleanup func()) { + cleanup = func() {} + if mi == nil { + return body, nil, cleanup + } + var media io.Reader + if mi.media != nil { + // This only happens when the caller has turned off chunking. In that + // case, we write all of media in a single non-retryable request. + media = mi.media + } else if mi.singleChunk { + // The data fits in a single chunk, which has now been read into the MediaBuffer. + // We obtain that chunk so we can write it in a single request. The request can + // be retried because the data is stored in the MediaBuffer. + media, _, _, _ = mi.buffer.Chunk() + } + if media != nil { + fb := readerFunc(body) + fm := readerFunc(media) + combined, ctype := CombineBodyMedia(body, "application/json", media, mi.mType) + if fb != nil && fm != nil { + getBody = func() (io.ReadCloser, error) { + rb := ioutil.NopCloser(fb()) + rm := ioutil.NopCloser(fm()) + r, _ := CombineBodyMedia(rb, "application/json", rm, mi.mType) + return r, nil + } + } + cleanup = func() { combined.Close() } + reqHeaders.Set("Content-Type", ctype) + body = combined + } + if mi.buffer != nil && mi.mType != "" && !mi.singleChunk { + reqHeaders.Set("X-Upload-Content-Type", mi.mType) + } + return body, getBody, cleanup +} + +// readerFunc returns a function that always returns an io.Reader that has the same +// contents as r, provided that can be done without consuming r. Otherwise, it +// returns nil. +// See http.NewRequest (in net/http/request.go). +func readerFunc(r io.Reader) func() io.Reader { + switch r := r.(type) { + case *bytes.Buffer: + buf := r.Bytes() + return func() io.Reader { return bytes.NewReader(buf) } + case *bytes.Reader: + snapshot := *r + return func() io.Reader { r := snapshot; return &r } + case *strings.Reader: + snapshot := *r + return func() io.Reader { r := snapshot; return &r } + default: + return nil + } +} + +// ResumableUpload returns an appropriately configured ResumableUpload value if the +// upload is resumable, or nil otherwise. +func (mi *MediaInfo) ResumableUpload(locURI string) *ResumableUpload { + if mi == nil || mi.singleChunk { + return nil + } + return &ResumableUpload{ + URI: locURI, + Media: mi.buffer, + MediaType: mi.mType, + Callback: func(curr int64) { + if mi.progressUpdater != nil { + mi.progressUpdater(curr, mi.size) + } + }, } - - mb := NewMediaBuffer(media, chunkSize) - rdr, _, _, err := mb.Chunk() - - if err == io.EOF { // we can upload this in a single request - return rdr, nil - } - // err might be a non-EOF error. If it is, the next call to mb.Chunk will - // return the same error. Returning a MediaBuffer ensures that this error - // will be handled at some point. - - return nil, mb } diff --git a/vendor/google.golang.org/api/gensupport/not_go18.go b/vendor/google.golang.org/api/gensupport/not_go18.go new file mode 100644 index 000000000..2536501ce --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/not_go18.go @@ -0,0 +1,14 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.8 + +package gensupport + +import ( + "io" + "net/http" +) + +func SetGetBody(req *http.Request, f func() (io.ReadCloser, error)) {} diff --git a/vendor/google.golang.org/api/gensupport/send.go b/vendor/google.golang.org/api/gensupport/send.go index 3d22f638f..0f75aa867 100644 --- a/vendor/google.golang.org/api/gensupport/send.go +++ b/vendor/google.golang.org/api/gensupport/send.go @@ -5,6 +5,8 @@ package gensupport import ( + "encoding/json" + "errors" "net/http" "golang.org/x/net/context" @@ -32,6 +34,11 @@ func RegisterHook(h Hook) { // If ctx is non-nil, it calls all hooks, then sends the request with // ctxhttp.Do, then calls any functions returned by the hooks in reverse order. func SendRequest(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + // Disallow Accept-Encoding because it interferes with the automatic gzip handling + // done by the default http.Transport. See https://github.com/google/google-api-go-client/issues/219. + if _, ok := req.Header["Accept-Encoding"]; ok { + return nil, errors.New("google api: custom Accept-Encoding headers not allowed") + } if ctx == nil { return client.Do(req) } @@ -53,3 +60,12 @@ func SendRequest(ctx context.Context, client *http.Client, req *http.Request) (* } return resp, err } + +// DecodeResponse decodes the body of res into target. If there is no body, +// target is unchanged. +func DecodeResponse(target interface{}, res *http.Response) error { + if res.StatusCode == http.StatusNoContent { + return nil + } + return json.NewDecoder(res.Body).Decode(target) +} diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json new file mode 100644 index 000000000..2d23f028b --- /dev/null +++ b/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -0,0 +1,3784 @@ +{ + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + }, + "https://www.googleapis.com/auth/cloud-platform.read-only": { + "description": "View your data across Google Cloud Platform services" + }, + "https://www.googleapis.com/auth/devstorage.full_control": { + "description": "Manage your data and permissions in Google Cloud Storage" + }, + "https://www.googleapis.com/auth/devstorage.read_only": { + "description": "View your data in Google Cloud Storage" + }, + "https://www.googleapis.com/auth/devstorage.read_write": { + "description": "Manage your data in Google Cloud Storage" + } + } + } + }, + "basePath": "/storage/v1/", + "baseUrl": "https://www.googleapis.com/storage/v1/", + "batchPath": "batch/storage/v1", + "description": "Stores and retrieves potentially large, immutable data objects.", + "discoveryVersion": "v1", + "documentationLink": "https://developers.google.com/storage/docs/json_api/", + "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/nD7tyZqYOGFELa4QRBOZJ8raFKA\"", + "icons": { + "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", + "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" + }, + "id": "storage:v1", + "kind": "discovery#restDescription", + "labels": [ + "labs" + ], + "name": "storage", + "ownerDomain": "google.com", + "ownerName": "Google", + "parameters": { + "alt": { + "default": "json", + "description": "Data format for the response.", + "enum": [ + "json" + ], + "enumDescriptions": [ + "Responses with Content-Type of application/json" + ], + "location": "query", + "type": "string" + }, + "fields": { + "description": "Selector specifying which fields to include in a partial response.", + "location": "query", + "type": "string" + }, + "key": { + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "location": "query", + "type": "string" + }, + "oauth_token": { + "description": "OAuth 2.0 token for the current user.", + "location": "query", + "type": "string" + }, + "prettyPrint": { + "default": "true", + "description": "Returns response with indentations and line breaks.", + "location": "query", + "type": "boolean" + }, + "quotaUser": { + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.", + "location": "query", + "type": "string" + }, + "userIp": { + "description": "IP address of the site where the request originates. Use this if you want to enforce per-user limits.", + "location": "query", + "type": "string" + } + }, + "protocol": "rest", + "resources": { + "bucketAccessControls": { + "methods": { + "delete": { + "description": "Permanently deletes the ACL entry for the specified entity on the specified bucket.", + "httpMethod": "DELETE", + "id": "storage.bucketAccessControls.delete", + "parameterOrder": [ + "bucket", + "entity" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "entity": { + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/acl/{entity}", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "get": { + "description": "Returns the ACL entry for the specified entity on the specified bucket.", + "httpMethod": "GET", + "id": "storage.bucketAccessControls.get", + "parameterOrder": [ + "bucket", + "entity" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "entity": { + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/acl/{entity}", + "response": { + "$ref": "BucketAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "insert": { + "description": "Creates a new ACL entry on the specified bucket.", + "httpMethod": "POST", + "id": "storage.bucketAccessControls.insert", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/acl", + "request": { + "$ref": "BucketAccessControl" + }, + "response": { + "$ref": "BucketAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "list": { + "description": "Retrieves ACL entries on the specified bucket.", + "httpMethod": "GET", + "id": "storage.bucketAccessControls.list", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/acl", + "response": { + "$ref": "BucketAccessControls" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "patch": { + "description": "Patches an ACL entry on the specified bucket.", + "httpMethod": "PATCH", + "id": "storage.bucketAccessControls.patch", + "parameterOrder": [ + "bucket", + "entity" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "entity": { + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/acl/{entity}", + "request": { + "$ref": "BucketAccessControl" + }, + "response": { + "$ref": "BucketAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "update": { + "description": "Updates an ACL entry on the specified bucket.", + "httpMethod": "PUT", + "id": "storage.bucketAccessControls.update", + "parameterOrder": [ + "bucket", + "entity" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "entity": { + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/acl/{entity}", + "request": { + "$ref": "BucketAccessControl" + }, + "response": { + "$ref": "BucketAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + } + } + }, + "buckets": { + "methods": { + "delete": { + "description": "Permanently deletes an empty bucket.", + "httpMethod": "DELETE", + "id": "storage.buckets.delete", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "If set, only deletes the bucket if its metageneration matches this value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "If set, only deletes the bucket if its metageneration does not match this value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "get": { + "description": "Returns metadata for the specified bucket.", + "httpMethod": "GET", + "id": "storage.buckets.get", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to noAcl.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit owner, acl and defaultObjectAcl properties." + ], + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}", + "response": { + "$ref": "Bucket" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "getIamPolicy": { + "description": "Returns an IAM policy for the specified bucket.", + "httpMethod": "GET", + "id": "storage.buckets.getIamPolicy", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/iam", + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "insert": { + "description": "Creates a new bucket.", + "httpMethod": "POST", + "id": "storage.buckets.insert", + "parameterOrder": [ + "project" + ], + "parameters": { + "predefinedAcl": { + "description": "Apply a predefined set of access controls to this bucket.", + "enum": [ + "authenticatedRead", + "private", + "projectPrivate", + "publicRead", + "publicReadWrite" + ], + "enumDescriptions": [ + "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + "Project team owners get OWNER access.", + "Project team members get access according to their roles.", + "Project team owners get OWNER access, and allUsers get READER access.", + "Project team owners get OWNER access, and allUsers get WRITER access." + ], + "location": "query", + "type": "string" + }, + "predefinedDefaultObjectAcl": { + "description": "Apply a predefined set of default object access controls to this bucket.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query", + "type": "string" + }, + "project": { + "description": "A valid API project identifier.", + "location": "query", + "required": true, + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to noAcl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit owner, acl and defaultObjectAcl properties." + ], + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request.", + "location": "query", + "type": "string" + } + }, + "path": "b", + "request": { + "$ref": "Bucket" + }, + "response": { + "$ref": "Bucket" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "list": { + "description": "Retrieves a list of buckets for a given project.", + "httpMethod": "GET", + "id": "storage.buckets.list", + "parameterOrder": [ + "project" + ], + "parameters": { + "maxResults": { + "default": "1000", + "description": "Maximum number of buckets to return in a single response. The service will use this parameter or 1,000 items, whichever is smaller.", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "pageToken": { + "description": "A previously-returned page token representing part of the larger set of results to view.", + "location": "query", + "type": "string" + }, + "prefix": { + "description": "Filter results to buckets whose names begin with this prefix.", + "location": "query", + "type": "string" + }, + "project": { + "description": "A valid API project identifier.", + "location": "query", + "required": true, + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to noAcl.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit owner, acl and defaultObjectAcl properties." + ], + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request.", + "location": "query", + "type": "string" + } + }, + "path": "b", + "response": { + "$ref": "Buckets" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "lockRetentionPolicy": { + "description": "Locks retention policy on a bucket.", + "httpMethod": "POST", + "id": "storage.buckets.lockRetentionPolicy", + "parameterOrder": [ + "bucket", + "ifMetagenerationMatch" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the operation conditional on whether bucket's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/lockRetentionPolicy", + "response": { + "$ref": "Bucket" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "patch": { + "description": "Updates a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate. This method supports patch semantics.", + "httpMethod": "PATCH", + "id": "storage.buckets.patch", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "predefinedAcl": { + "description": "Apply a predefined set of access controls to this bucket.", + "enum": [ + "authenticatedRead", + "private", + "projectPrivate", + "publicRead", + "publicReadWrite" + ], + "enumDescriptions": [ + "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + "Project team owners get OWNER access.", + "Project team members get access according to their roles.", + "Project team owners get OWNER access, and allUsers get READER access.", + "Project team owners get OWNER access, and allUsers get WRITER access." + ], + "location": "query", + "type": "string" + }, + "predefinedDefaultObjectAcl": { + "description": "Apply a predefined set of default object access controls to this bucket.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query", + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit owner, acl and defaultObjectAcl properties." + ], + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}", + "request": { + "$ref": "Bucket" + }, + "response": { + "$ref": "Bucket" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "setIamPolicy": { + "description": "Updates an IAM policy for the specified bucket.", + "httpMethod": "PUT", + "id": "storage.buckets.setIamPolicy", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/iam", + "request": { + "$ref": "Policy" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "testIamPermissions": { + "description": "Tests a set of permissions on the given bucket to see which, if any, are held by the caller.", + "httpMethod": "GET", + "id": "storage.buckets.testIamPermissions", + "parameterOrder": [ + "bucket", + "permissions" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "permissions": { + "description": "Permissions to test.", + "location": "query", + "repeated": true, + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/iam/testPermissions", + "response": { + "$ref": "TestIamPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "update": { + "description": "Updates a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate.", + "httpMethod": "PUT", + "id": "storage.buckets.update", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "predefinedAcl": { + "description": "Apply a predefined set of access controls to this bucket.", + "enum": [ + "authenticatedRead", + "private", + "projectPrivate", + "publicRead", + "publicReadWrite" + ], + "enumDescriptions": [ + "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + "Project team owners get OWNER access.", + "Project team members get access according to their roles.", + "Project team owners get OWNER access, and allUsers get READER access.", + "Project team owners get OWNER access, and allUsers get WRITER access." + ], + "location": "query", + "type": "string" + }, + "predefinedDefaultObjectAcl": { + "description": "Apply a predefined set of default object access controls to this bucket.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query", + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit owner, acl and defaultObjectAcl properties." + ], + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}", + "request": { + "$ref": "Bucket" + }, + "response": { + "$ref": "Bucket" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + } + } + }, + "channels": { + "methods": { + "stop": { + "description": "Stop watching resources through this channel", + "httpMethod": "POST", + "id": "storage.channels.stop", + "path": "channels/stop", + "request": { + "$ref": "Channel", + "parameterName": "resource" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + } + } + }, + "defaultObjectAccessControls": { + "methods": { + "delete": { + "description": "Permanently deletes the default object ACL entry for the specified entity on the specified bucket.", + "httpMethod": "DELETE", + "id": "storage.defaultObjectAccessControls.delete", + "parameterOrder": [ + "bucket", + "entity" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "entity": { + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/defaultObjectAcl/{entity}", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "get": { + "description": "Returns the default object ACL entry for the specified entity on the specified bucket.", + "httpMethod": "GET", + "id": "storage.defaultObjectAccessControls.get", + "parameterOrder": [ + "bucket", + "entity" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "entity": { + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/defaultObjectAcl/{entity}", + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "insert": { + "description": "Creates a new default object ACL entry on the specified bucket.", + "httpMethod": "POST", + "id": "storage.defaultObjectAccessControls.insert", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/defaultObjectAcl", + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "list": { + "description": "Retrieves default object ACL entries on the specified bucket.", + "httpMethod": "GET", + "id": "storage.defaultObjectAccessControls.list", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "If present, only return default ACL listing if the bucket's current metageneration matches this value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "If present, only return default ACL listing if the bucket's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/defaultObjectAcl", + "response": { + "$ref": "ObjectAccessControls" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "patch": { + "description": "Patches a default object ACL entry on the specified bucket.", + "httpMethod": "PATCH", + "id": "storage.defaultObjectAccessControls.patch", + "parameterOrder": [ + "bucket", + "entity" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "entity": { + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/defaultObjectAcl/{entity}", + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "update": { + "description": "Updates a default object ACL entry on the specified bucket.", + "httpMethod": "PUT", + "id": "storage.defaultObjectAccessControls.update", + "parameterOrder": [ + "bucket", + "entity" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "entity": { + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/defaultObjectAcl/{entity}", + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + } + } + }, + "notifications": { + "methods": { + "delete": { + "description": "Permanently deletes a notification subscription.", + "httpMethod": "DELETE", + "id": "storage.notifications.delete", + "parameterOrder": [ + "bucket", + "notification" + ], + "parameters": { + "bucket": { + "description": "The parent bucket of the notification.", + "location": "path", + "required": true, + "type": "string" + }, + "notification": { + "description": "ID of the notification to delete.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/notificationConfigs/{notification}", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "get": { + "description": "View a notification configuration.", + "httpMethod": "GET", + "id": "storage.notifications.get", + "parameterOrder": [ + "bucket", + "notification" + ], + "parameters": { + "bucket": { + "description": "The parent bucket of the notification.", + "location": "path", + "required": true, + "type": "string" + }, + "notification": { + "description": "Notification ID", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/notificationConfigs/{notification}", + "response": { + "$ref": "Notification" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "insert": { + "description": "Creates a notification subscription for a given bucket.", + "httpMethod": "POST", + "id": "storage.notifications.insert", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "The parent bucket of the notification.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/notificationConfigs", + "request": { + "$ref": "Notification" + }, + "response": { + "$ref": "Notification" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "list": { + "description": "Retrieves a list of notification subscriptions for a given bucket.", + "httpMethod": "GET", + "id": "storage.notifications.list", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of a Google Cloud Storage bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/notificationConfigs", + "response": { + "$ref": "Notifications" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + } + } + }, + "objectAccessControls": { + "methods": { + "delete": { + "description": "Permanently deletes the ACL entry for the specified entity on the specified object.", + "httpMethod": "DELETE", + "id": "storage.objectAccessControls.delete", + "parameterOrder": [ + "bucket", + "object", + "entity" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "entity": { + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}/acl/{entity}", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "get": { + "description": "Returns the ACL entry for the specified entity on the specified object.", + "httpMethod": "GET", + "id": "storage.objectAccessControls.get", + "parameterOrder": [ + "bucket", + "object", + "entity" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "entity": { + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}/acl/{entity}", + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "insert": { + "description": "Creates a new ACL entry on the specified object.", + "httpMethod": "POST", + "id": "storage.objectAccessControls.insert", + "parameterOrder": [ + "bucket", + "object" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}/acl", + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "list": { + "description": "Retrieves ACL entries on the specified object.", + "httpMethod": "GET", + "id": "storage.objectAccessControls.list", + "parameterOrder": [ + "bucket", + "object" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}/acl", + "response": { + "$ref": "ObjectAccessControls" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "patch": { + "description": "Patches an ACL entry on the specified object.", + "httpMethod": "PATCH", + "id": "storage.objectAccessControls.patch", + "parameterOrder": [ + "bucket", + "object", + "entity" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "entity": { + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}/acl/{entity}", + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "update": { + "description": "Updates an ACL entry on the specified object.", + "httpMethod": "PUT", + "id": "storage.objectAccessControls.update", + "parameterOrder": [ + "bucket", + "object", + "entity" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "entity": { + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}/acl/{entity}", + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + } + } + }, + "objects": { + "methods": { + "compose": { + "description": "Concatenates a list of existing objects into a new object in the same bucket.", + "httpMethod": "POST", + "id": "storage.objects.compose", + "parameterOrder": [ + "destinationBucket", + "destinationObject" + ], + "parameters": { + "destinationBucket": { + "description": "Name of the bucket in which to store the new object.", + "location": "path", + "required": true, + "type": "string" + }, + "destinationObject": { + "description": "Name of the new object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "path", + "required": true, + "type": "string" + }, + "destinationPredefinedAcl": { + "description": "Apply a predefined set of access controls to the destination object.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query", + "type": "string" + }, + "ifGenerationMatch": { + "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "kmsKeyName": { + "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{destinationBucket}/o/{destinationObject}/compose", + "request": { + "$ref": "ComposeRequest" + }, + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "copy": { + "description": "Copies a source object to a destination object. Optionally overrides metadata.", + "httpMethod": "POST", + "id": "storage.objects.copy", + "parameterOrder": [ + "sourceBucket", + "sourceObject", + "destinationBucket", + "destinationObject" + ], + "parameters": { + "destinationBucket": { + "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "path", + "required": true, + "type": "string" + }, + "destinationObject": { + "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.", + "location": "path", + "required": true, + "type": "string" + }, + "destinationPredefinedAcl": { + "description": "Apply a predefined set of access controls to the destination object.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query", + "type": "string" + }, + "ifGenerationMatch": { + "description": "Makes the operation conditional on whether the destination object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifGenerationNotMatch": { + "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifSourceGenerationMatch": { + "description": "Makes the operation conditional on whether the source object's current generation matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifSourceGenerationNotMatch": { + "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifSourceMetagenerationMatch": { + "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifSourceMetagenerationNotMatch": { + "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the owner, acl property." + ], + "location": "query", + "type": "string" + }, + "sourceBucket": { + "description": "Name of the bucket in which to find the source object.", + "location": "path", + "required": true, + "type": "string" + }, + "sourceGeneration": { + "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "sourceObject": { + "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}", + "request": { + "$ref": "Object" + }, + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "delete": { + "description": "Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.", + "httpMethod": "DELETE", + "id": "storage.objects.delete", + "parameterOrder": [ + "bucket", + "object" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which the object resides.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifGenerationMatch": { + "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifGenerationNotMatch": { + "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "get": { + "description": "Retrieves an object or its metadata.", + "httpMethod": "GET", + "id": "storage.objects.get", + "parameterOrder": [ + "bucket", + "object" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which the object resides.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifGenerationMatch": { + "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifGenerationNotMatch": { + "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "path", + "required": true, + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to noAcl.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the owner, acl property." + ], + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}", + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ], + "supportsMediaDownload": true, + "useMediaDownloadService": true + }, + "getIamPolicy": { + "description": "Returns an IAM policy for the specified object.", + "httpMethod": "GET", + "id": "storage.objects.getIamPolicy", + "parameterOrder": [ + "bucket", + "object" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which the object resides.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}/iam", + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "insert": { + "description": "Stores a new object and metadata.", + "httpMethod": "POST", + "id": "storage.objects.insert", + "mediaUpload": { + "accept": [ + "*/*" + ], + "protocols": { + "resumable": { + "multipart": true, + "path": "/resumable/upload/storage/v1/b/{bucket}/o" + }, + "simple": { + "multipart": true, + "path": "/upload/storage/v1/b/{bucket}/o" + } + } + }, + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", + "location": "path", + "required": true, + "type": "string" + }, + "contentEncoding": { + "description": "If set, sets the contentEncoding property of the final object to this value. Setting this parameter is equivalent to setting the contentEncoding metadata property. This can be useful when uploading an object with uploadType=media to indicate the encoding of the content being uploaded.", + "location": "query", + "type": "string" + }, + "ifGenerationMatch": { + "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifGenerationNotMatch": { + "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "kmsKeyName": { + "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any. Limited availability; usable only by enabled projects.", + "location": "query", + "type": "string" + }, + "name": { + "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "query", + "type": "string" + }, + "predefinedAcl": { + "description": "Apply a predefined set of access controls to this object.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query", + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the owner, acl property." + ], + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o", + "request": { + "$ref": "Object" + }, + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ], + "supportsMediaUpload": true + }, + "list": { + "description": "Retrieves a list of objects matching the criteria.", + "httpMethod": "GET", + "id": "storage.objects.list", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which to look for objects.", + "location": "path", + "required": true, + "type": "string" + }, + "delimiter": { + "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "1000", + "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "pageToken": { + "description": "A previously-returned page token representing part of the larger set of results to view.", + "location": "query", + "type": "string" + }, + "prefix": { + "description": "Filter results to objects whose names begin with this prefix.", + "location": "query", + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to noAcl.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the owner, acl property." + ], + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + }, + "versions": { + "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", + "location": "query", + "type": "boolean" + } + }, + "path": "b/{bucket}/o", + "response": { + "$ref": "Objects" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ], + "supportsSubscription": true + }, + "patch": { + "description": "Patches an object's metadata.", + "httpMethod": "PATCH", + "id": "storage.objects.patch", + "parameterOrder": [ + "bucket", + "object" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which the object resides.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifGenerationMatch": { + "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifGenerationNotMatch": { + "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "path", + "required": true, + "type": "string" + }, + "predefinedAcl": { + "description": "Apply a predefined set of access controls to this object.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query", + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the owner, acl property." + ], + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request, for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}", + "request": { + "$ref": "Object" + }, + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "rewrite": { + "description": "Rewrites a source object to a destination object. Optionally overrides metadata.", + "httpMethod": "POST", + "id": "storage.objects.rewrite", + "parameterOrder": [ + "sourceBucket", + "sourceObject", + "destinationBucket", + "destinationObject" + ], + "parameters": { + "destinationBucket": { + "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", + "location": "path", + "required": true, + "type": "string" + }, + "destinationKmsKeyName": { + "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + "location": "query", + "type": "string" + }, + "destinationObject": { + "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "path", + "required": true, + "type": "string" + }, + "destinationPredefinedAcl": { + "description": "Apply a predefined set of access controls to the destination object.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query", + "type": "string" + }, + "ifGenerationMatch": { + "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifGenerationNotMatch": { + "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifSourceGenerationMatch": { + "description": "Makes the operation conditional on whether the source object's current generation matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifSourceGenerationNotMatch": { + "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifSourceMetagenerationMatch": { + "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifSourceMetagenerationNotMatch": { + "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "maxBytesRewrittenPerCall": { + "description": "The maximum number of bytes that will be rewritten per rewrite request. Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. If specified the value must be an integral multiple of 1 MiB (1048576). Also, this only applies to requests where the source and destination span locations and/or storage classes. Finally, this value must not change across rewrite calls else you'll get an error that the rewriteToken is invalid.", + "format": "int64", + "location": "query", + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the owner, acl property." + ], + "location": "query", + "type": "string" + }, + "rewriteToken": { + "description": "Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.", + "location": "query", + "type": "string" + }, + "sourceBucket": { + "description": "Name of the bucket in which to find the source object.", + "location": "path", + "required": true, + "type": "string" + }, + "sourceGeneration": { + "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "sourceObject": { + "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}", + "request": { + "$ref": "Object" + }, + "response": { + "$ref": "RewriteResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "setIamPolicy": { + "description": "Updates an IAM policy for the specified object.", + "httpMethod": "PUT", + "id": "storage.objects.setIamPolicy", + "parameterOrder": [ + "bucket", + "object" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which the object resides.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}/iam", + "request": { + "$ref": "Policy" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "testIamPermissions": { + "description": "Tests a set of permissions on the given object to see which, if any, are held by the caller.", + "httpMethod": "GET", + "id": "storage.objects.testIamPermissions", + "parameterOrder": [ + "bucket", + "object", + "permissions" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which the object resides.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "path", + "required": true, + "type": "string" + }, + "permissions": { + "description": "Permissions to test.", + "location": "query", + "repeated": true, + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}/iam/testPermissions", + "response": { + "$ref": "TestIamPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "update": { + "description": "Updates an object's metadata.", + "httpMethod": "PUT", + "id": "storage.objects.update", + "parameterOrder": [ + "bucket", + "object" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which the object resides.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifGenerationMatch": { + "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifGenerationNotMatch": { + "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "path", + "required": true, + "type": "string" + }, + "predefinedAcl": { + "description": "Apply a predefined set of access controls to this object.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query", + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the owner, acl property." + ], + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}", + "request": { + "$ref": "Object" + }, + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "watchAll": { + "description": "Watch for changes on all objects in a bucket.", + "httpMethod": "POST", + "id": "storage.objects.watchAll", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which to look for objects.", + "location": "path", + "required": true, + "type": "string" + }, + "delimiter": { + "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "1000", + "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "pageToken": { + "description": "A previously-returned page token representing part of the larger set of results to view.", + "location": "query", + "type": "string" + }, + "prefix": { + "description": "Filter results to objects whose names begin with this prefix.", + "location": "query", + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to noAcl.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the owner, acl property." + ], + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + }, + "versions": { + "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", + "location": "query", + "type": "boolean" + } + }, + "path": "b/{bucket}/o/watch", + "request": { + "$ref": "Channel", + "parameterName": "resource" + }, + "response": { + "$ref": "Channel" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ], + "supportsSubscription": true + } + } + }, + "projects": { + "resources": { + "serviceAccount": { + "methods": { + "get": { + "description": "Get the email address of this project's Google Cloud Storage service account.", + "httpMethod": "GET", + "id": "storage.projects.serviceAccount.get", + "parameterOrder": [ + "projectId" + ], + "parameters": { + "projectId": { + "description": "Project ID", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request.", + "location": "query", + "type": "string" + } + }, + "path": "projects/{projectId}/serviceAccount", + "response": { + "$ref": "ServiceAccount" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + } + } + } + } + } + }, + "revision": "20180305", + "rootUrl": "https://www.googleapis.com/", + "schemas": { + "Bucket": { + "description": "A bucket.", + "id": "Bucket", + "properties": { + "acl": { + "annotations": { + "required": [ + "storage.buckets.update" + ] + }, + "description": "Access controls on the bucket.", + "items": { + "$ref": "BucketAccessControl" + }, + "type": "array" + }, + "billing": { + "description": "The bucket's billing configuration.", + "properties": { + "requesterPays": { + "description": "When set to true, Requester Pays is enabled for this bucket.", + "type": "boolean" + } + }, + "type": "object" + }, + "cors": { + "description": "The bucket's Cross-Origin Resource Sharing (CORS) configuration.", + "items": { + "properties": { + "maxAgeSeconds": { + "description": "The value, in seconds, to return in the Access-Control-Max-Age header used in preflight responses.", + "format": "int32", + "type": "integer" + }, + "method": { + "description": "The list of HTTP methods on which to include CORS response headers, (GET, OPTIONS, POST, etc) Note: \"*\" is permitted in the list of methods, and means \"any method\".", + "items": { + "type": "string" + }, + "type": "array" + }, + "origin": { + "description": "The list of Origins eligible to receive CORS response headers. Note: \"*\" is permitted in the list of origins, and means \"any Origin\".", + "items": { + "type": "string" + }, + "type": "array" + }, + "responseHeader": { + "description": "The list of HTTP headers other than the simple response headers to give permission for the user-agent to share across domains.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "array" + }, + "defaultEventBasedHold": { + "description": "Defines the default value for Event-Based hold on newly created objects in this bucket. Event-Based hold is a way to retain objects indefinitely until an event occurs, signified by the hold's release. After being released, such objects will be subject to bucket-level retention (if any). One sample use case of this flag is for banks to hold loan documents for at least 3 years after loan is paid in full. Here bucket-level retention is 3 years and the event is loan being paid in full. In this example these objects will be held intact for any number of years until the event has occurred (hold is released) and then 3 more years after that. Objects under Event-Based hold cannot be deleted, overwritten or archived until the hold is removed.", + "type": "boolean" + }, + "defaultObjectAcl": { + "description": "Default access controls to apply to new objects when no ACL is provided.", + "items": { + "$ref": "ObjectAccessControl" + }, + "type": "array" + }, + "encryption": { + "description": "Encryption configuration used by default for newly inserted objects, when no encryption config is specified.", + "properties": { + "defaultKmsKeyName": { + "description": "A Cloud KMS key that will be used to encrypt objects inserted into this bucket, if no encryption method is specified. Limited availability; usable only by enabled projects.", + "type": "string" + } + }, + "type": "object" + }, + "etag": { + "description": "HTTP 1.1 Entity tag for the bucket.", + "type": "string" + }, + "id": { + "description": "The ID of the bucket. For buckets, the id and name properties are the same.", + "type": "string" + }, + "kind": { + "default": "storage#bucket", + "description": "The kind of item this is. For buckets, this is always storage#bucket.", + "type": "string" + }, + "labels": { + "additionalProperties": { + "description": "An individual label entry.", + "type": "string" + }, + "description": "User-provided labels, in key/value pairs.", + "type": "object" + }, + "lifecycle": { + "description": "The bucket's lifecycle configuration. See lifecycle management for more information.", + "properties": { + "rule": { + "description": "A lifecycle management rule, which is made of an action to take and the condition(s) under which the action will be taken.", + "items": { + "properties": { + "action": { + "description": "The action to take.", + "properties": { + "storageClass": { + "description": "Target storage class. Required iff the type of the action is SetStorageClass.", + "type": "string" + }, + "type": { + "description": "Type of the action. Currently, only Delete and SetStorageClass are supported.", + "type": "string" + } + }, + "type": "object" + }, + "condition": { + "description": "The condition(s) under which the action will be taken.", + "properties": { + "age": { + "description": "Age of an object (in days). This condition is satisfied when an object reaches the specified age.", + "format": "int32", + "type": "integer" + }, + "createdBefore": { + "description": "A date in RFC 3339 format with only the date part (for instance, \"2013-01-15\"). This condition is satisfied when an object is created before midnight of the specified date in UTC.", + "format": "date", + "type": "string" + }, + "isLive": { + "description": "Relevant only for versioned objects. If the value is true, this condition matches live objects; if the value is false, it matches archived objects.", + "type": "boolean" + }, + "matchesStorageClass": { + "description": "Objects having any of the storage classes specified by this condition will be matched. Values include MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, STANDARD, and DURABLE_REDUCED_AVAILABILITY.", + "items": { + "type": "string" + }, + "type": "array" + }, + "numNewerVersions": { + "description": "Relevant only for versioned objects. If the value is N, this condition is satisfied when there are at least N versions (including the live version) newer than this version of the object.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + }, + "location": { + "description": "The location of the bucket. Object data for objects in the bucket resides in physical storage within this region. Defaults to US. See the developer's guide for the authoritative list.", + "type": "string" + }, + "logging": { + "description": "The bucket's logging configuration, which defines the destination bucket and optional name prefix for the current bucket's logs.", + "properties": { + "logBucket": { + "description": "The destination bucket where the current bucket's logs should be placed.", + "type": "string" + }, + "logObjectPrefix": { + "description": "A prefix for log object names.", + "type": "string" + } + }, + "type": "object" + }, + "metageneration": { + "description": "The metadata generation of this bucket.", + "format": "int64", + "type": "string" + }, + "name": { + "annotations": { + "required": [ + "storage.buckets.insert" + ] + }, + "description": "The name of the bucket.", + "type": "string" + }, + "owner": { + "description": "The owner of the bucket. This is always the project team's owner group.", + "properties": { + "entity": { + "description": "The entity, in the form project-owner-projectId.", + "type": "string" + }, + "entityId": { + "description": "The ID for the entity.", + "type": "string" + } + }, + "type": "object" + }, + "projectNumber": { + "description": "The project number of the project the bucket belongs to.", + "format": "uint64", + "type": "string" + }, + "retentionPolicy": { + "description": "Defines the retention policy for a bucket. The Retention policy enforces a minimum retention time for all objects contained in the bucket, based on their creation time. Any attempt to overwrite or delete objects younger than the retention period will result in a PERMISSION_DENIED error. An unlocked retention policy can be modified or removed from the bucket via the UpdateBucketMetadata RPC. A locked retention policy cannot be removed or shortened in duration for the lifetime of the bucket. Attempting to remove or decrease period of a locked retention policy will result in a PERMISSION_DENIED error.", + "properties": { + "effectiveTime": { + "description": "The time from which policy was enforced and effective. RFC 3339 format.", + "format": "date-time", + "type": "string" + }, + "isLocked": { + "description": "Once locked, an object retention policy cannot be modified.", + "type": "boolean" + }, + "retentionPeriod": { + "description": "Specifies the duration that objects need to be retained. Retention duration must be greater than zero and less than 100 years. Note that enforcement of retention periods less than a day is not guaranteed. Such periods should only be used for testing purposes.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "selfLink": { + "description": "The URI of this bucket.", + "type": "string" + }, + "storageClass": { + "description": "The bucket's default storage class, used whenever no storageClass is specified for a newly-created object. This defines how objects in the bucket are stored and determines the SLA and the cost of storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE, COLDLINE, and DURABLE_REDUCED_AVAILABILITY. If this value is not specified when the bucket is created, it will default to STANDARD. For more information, see storage classes.", + "type": "string" + }, + "timeCreated": { + "description": "The creation time of the bucket in RFC 3339 format.", + "format": "date-time", + "type": "string" + }, + "updated": { + "description": "The modification time of the bucket in RFC 3339 format.", + "format": "date-time", + "type": "string" + }, + "versioning": { + "description": "The bucket's versioning configuration.", + "properties": { + "enabled": { + "description": "While set to true, versioning is fully enabled for this bucket.", + "type": "boolean" + } + }, + "type": "object" + }, + "website": { + "description": "The bucket's website configuration, controlling how the service behaves when accessing bucket contents as a web site. See the Static Website Examples for more information.", + "properties": { + "mainPageSuffix": { + "description": "If the requested object path is missing, the service will ensure the path has a trailing '/', append this suffix, and attempt to retrieve the resulting object. This allows the creation of index.html objects to represent directory pages.", + "type": "string" + }, + "notFoundPage": { + "description": "If the requested object path is missing, and any mainPageSuffix object is missing, if applicable, the service will return the named object from this bucket as the content for a 404 Not Found result.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "BucketAccessControl": { + "description": "An access-control entry.", + "id": "BucketAccessControl", + "properties": { + "bucket": { + "description": "The name of the bucket.", + "type": "string" + }, + "domain": { + "description": "The domain associated with the entity, if any.", + "type": "string" + }, + "email": { + "description": "The email address associated with the entity, if any.", + "type": "string" + }, + "entity": { + "annotations": { + "required": [ + "storage.bucketAccessControls.insert" + ] + }, + "description": "The entity holding the permission, in one of the following forms: \n- user-userId \n- user-email \n- group-groupId \n- group-email \n- domain-domain \n- project-team-projectId \n- allUsers \n- allAuthenticatedUsers Examples: \n- The user liz@example.com would be user-liz@example.com. \n- The group example@googlegroups.com would be group-example@googlegroups.com. \n- To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com.", + "type": "string" + }, + "entityId": { + "description": "The ID for the entity, if any.", + "type": "string" + }, + "etag": { + "description": "HTTP 1.1 Entity tag for the access-control entry.", + "type": "string" + }, + "id": { + "description": "The ID of the access-control entry.", + "type": "string" + }, + "kind": { + "default": "storage#bucketAccessControl", + "description": "The kind of item this is. For bucket access control entries, this is always storage#bucketAccessControl.", + "type": "string" + }, + "projectTeam": { + "description": "The project team associated with the entity, if any.", + "properties": { + "projectNumber": { + "description": "The project number.", + "type": "string" + }, + "team": { + "description": "The team.", + "type": "string" + } + }, + "type": "object" + }, + "role": { + "annotations": { + "required": [ + "storage.bucketAccessControls.insert" + ] + }, + "description": "The access permission for the entity.", + "type": "string" + }, + "selfLink": { + "description": "The link to this access-control entry.", + "type": "string" + } + }, + "type": "object" + }, + "BucketAccessControls": { + "description": "An access-control list.", + "id": "BucketAccessControls", + "properties": { + "items": { + "description": "The list of items.", + "items": { + "$ref": "BucketAccessControl" + }, + "type": "array" + }, + "kind": { + "default": "storage#bucketAccessControls", + "description": "The kind of item this is. For lists of bucket access control entries, this is always storage#bucketAccessControls.", + "type": "string" + } + }, + "type": "object" + }, + "Buckets": { + "description": "A list of buckets.", + "id": "Buckets", + "properties": { + "items": { + "description": "The list of items.", + "items": { + "$ref": "Bucket" + }, + "type": "array" + }, + "kind": { + "default": "storage#buckets", + "description": "The kind of item this is. For lists of buckets, this is always storage#buckets.", + "type": "string" + }, + "nextPageToken": { + "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.", + "type": "string" + } + }, + "type": "object" + }, + "Channel": { + "description": "An notification channel used to watch for resource changes.", + "id": "Channel", + "properties": { + "address": { + "description": "The address where notifications are delivered for this channel.", + "type": "string" + }, + "expiration": { + "description": "Date and time of notification channel expiration, expressed as a Unix timestamp, in milliseconds. Optional.", + "format": "int64", + "type": "string" + }, + "id": { + "description": "A UUID or similar unique string that identifies this channel.", + "type": "string" + }, + "kind": { + "default": "api#channel", + "description": "Identifies this as a notification channel used to watch for changes to a resource. Value: the fixed string \"api#channel\".", + "type": "string" + }, + "params": { + "additionalProperties": { + "description": "Declares a new parameter by name.", + "type": "string" + }, + "description": "Additional parameters controlling delivery channel behavior. Optional.", + "type": "object" + }, + "payload": { + "description": "A Boolean value to indicate whether payload is wanted. Optional.", + "type": "boolean" + }, + "resourceId": { + "description": "An opaque ID that identifies the resource being watched on this channel. Stable across different API versions.", + "type": "string" + }, + "resourceUri": { + "description": "A version-specific identifier for the watched resource.", + "type": "string" + }, + "token": { + "description": "An arbitrary string delivered to the target address with each notification delivered over this channel. Optional.", + "type": "string" + }, + "type": { + "description": "The type of delivery mechanism used for this channel.", + "type": "string" + } + }, + "type": "object" + }, + "ComposeRequest": { + "description": "A Compose request.", + "id": "ComposeRequest", + "properties": { + "destination": { + "$ref": "Object", + "description": "Properties of the resulting object." + }, + "kind": { + "default": "storage#composeRequest", + "description": "The kind of item this is.", + "type": "string" + }, + "sourceObjects": { + "annotations": { + "required": [ + "storage.objects.compose" + ] + }, + "description": "The list of source objects that will be concatenated into a single object.", + "items": { + "properties": { + "generation": { + "description": "The generation of this object to use as the source.", + "format": "int64", + "type": "string" + }, + "name": { + "annotations": { + "required": [ + "storage.objects.compose" + ] + }, + "description": "The source object's name. The source object's bucket is implicitly the destination bucket.", + "type": "string" + }, + "objectPreconditions": { + "description": "Conditions that must be met for this operation to execute.", + "properties": { + "ifGenerationMatch": { + "description": "Only perform the composition if the generation of the source object that would be used matches this value. If this value and a generation are both specified, they must be the same value or the call will fail.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + }, + "Notification": { + "description": "A subscription to receive Google PubSub notifications.", + "id": "Notification", + "properties": { + "custom_attributes": { + "additionalProperties": { + "type": "string" + }, + "description": "An optional list of additional attributes to attach to each Cloud PubSub message published for this notification subscription.", + "type": "object" + }, + "etag": { + "description": "HTTP 1.1 Entity tag for this subscription notification.", + "type": "string" + }, + "event_types": { + "description": "If present, only send notifications about listed event types. If empty, sent notifications for all event types.", + "items": { + "type": "string" + }, + "type": "array" + }, + "id": { + "description": "The ID of the notification.", + "type": "string" + }, + "kind": { + "default": "storage#notification", + "description": "The kind of item this is. For notifications, this is always storage#notification.", + "type": "string" + }, + "object_name_prefix": { + "description": "If present, only apply this notification configuration to object names that begin with this prefix.", + "type": "string" + }, + "payload_format": { + "annotations": { + "required": [ + "storage.notifications.insert" + ] + }, + "default": "JSON_API_V1", + "description": "The desired content of the Payload.", + "type": "string" + }, + "selfLink": { + "description": "The canonical URL of this notification.", + "type": "string" + }, + "topic": { + "annotations": { + "required": [ + "storage.notifications.insert" + ] + }, + "description": "The Cloud PubSub topic to which this subscription publishes. Formatted as: '//pubsub.googleapis.com/projects/{project-identifier}/topics/{my-topic}'", + "type": "string" + } + }, + "type": "object" + }, + "Notifications": { + "description": "A list of notification subscriptions.", + "id": "Notifications", + "properties": { + "items": { + "description": "The list of items.", + "items": { + "$ref": "Notification" + }, + "type": "array" + }, + "kind": { + "default": "storage#notifications", + "description": "The kind of item this is. For lists of notifications, this is always storage#notifications.", + "type": "string" + } + }, + "type": "object" + }, + "Object": { + "description": "An object.", + "id": "Object", + "properties": { + "acl": { + "annotations": { + "required": [ + "storage.objects.update" + ] + }, + "description": "Access controls on the object.", + "items": { + "$ref": "ObjectAccessControl" + }, + "type": "array" + }, + "bucket": { + "description": "The name of the bucket containing this object.", + "type": "string" + }, + "cacheControl": { + "description": "Cache-Control directive for the object data. If omitted, and the object is accessible to all anonymous users, the default will be public, max-age=3600.", + "type": "string" + }, + "componentCount": { + "description": "Number of underlying components that make up this object. Components are accumulated by compose operations.", + "format": "int32", + "type": "integer" + }, + "contentDisposition": { + "description": "Content-Disposition of the object data.", + "type": "string" + }, + "contentEncoding": { + "description": "Content-Encoding of the object data.", + "type": "string" + }, + "contentLanguage": { + "description": "Content-Language of the object data.", + "type": "string" + }, + "contentType": { + "description": "Content-Type of the object data. If an object is stored without a Content-Type, it is served as application/octet-stream.", + "type": "string" + }, + "crc32c": { + "description": "CRC32c checksum, as described in RFC 4960, Appendix B; encoded using base64 in big-endian byte order. For more information about using the CRC32c checksum, see Hashes and ETags: Best Practices.", + "type": "string" + }, + "customerEncryption": { + "description": "Metadata of customer-supplied encryption key, if the object is encrypted by such a key.", + "properties": { + "encryptionAlgorithm": { + "description": "The encryption algorithm.", + "type": "string" + }, + "keySha256": { + "description": "SHA256 hash value of the encryption key.", + "type": "string" + } + }, + "type": "object" + }, + "etag": { + "description": "HTTP 1.1 Entity tag for the object.", + "type": "string" + }, + "eventBasedHold": { + "description": "Defines the Event-Based hold for an object. Event-Based hold is a way to retain objects indefinitely until an event occurs, signified by the hold's release. After being released, such objects will be subject to bucket-level retention (if any). One sample use case of this flag is for banks to hold loan documents for at least 3 years after loan is paid in full. Here bucket-level retention is 3 years and the event is loan being paid in full. In this example these objects will be held intact for any number of years until the event has occurred (hold is released) and then 3 more years after that.", + "type": "boolean" + }, + "generation": { + "description": "The content generation of this object. Used for object versioning.", + "format": "int64", + "type": "string" + }, + "id": { + "description": "The ID of the object, including the bucket name, object name, and generation number.", + "type": "string" + }, + "kind": { + "default": "storage#object", + "description": "The kind of item this is. For objects, this is always storage#object.", + "type": "string" + }, + "kmsKeyName": { + "description": "Cloud KMS Key used to encrypt this object, if the object is encrypted by such a key. Limited availability; usable only by enabled projects.", + "type": "string" + }, + "md5Hash": { + "description": "MD5 hash of the data; encoded using base64. For more information about using the MD5 hash, see Hashes and ETags: Best Practices.", + "type": "string" + }, + "mediaLink": { + "description": "Media download link.", + "type": "string" + }, + "metadata": { + "additionalProperties": { + "description": "An individual metadata entry.", + "type": "string" + }, + "description": "User-provided metadata, in key/value pairs.", + "type": "object" + }, + "metageneration": { + "description": "The version of the metadata for this object at this generation. Used for preconditions and for detecting changes in metadata. A metageneration number is only meaningful in the context of a particular generation of a particular object.", + "format": "int64", + "type": "string" + }, + "name": { + "description": "The name of the object. Required if not specified by URL parameter.", + "type": "string" + }, + "owner": { + "description": "The owner of the object. This will always be the uploader of the object.", + "properties": { + "entity": { + "description": "The entity, in the form user-userId.", + "type": "string" + }, + "entityId": { + "description": "The ID for the entity.", + "type": "string" + } + }, + "type": "object" + }, + "retentionExpirationTime": { + "description": "Specifies the earliest time that the object's retention period expires. This value is server-determined and is in RFC 3339 format. Note 1: This field is not provided for objects with an active Event-Based hold, since retention expiration is unknown until the hold is removed. Note 2: This value can be provided even when TemporaryHold is set (so that the user can reason about policy without having to first unset the TemporaryHold).", + "format": "date-time", + "type": "string" + }, + "selfLink": { + "description": "The link to this object.", + "type": "string" + }, + "size": { + "description": "Content-Length of the data in bytes.", + "format": "uint64", + "type": "string" + }, + "storageClass": { + "description": "Storage class of the object.", + "type": "string" + }, + "temporaryHold": { + "description": "Defines the temporary hold for an object. This flag is used to enforce a temporary hold on an object. While it is set to true, the object is protected against deletion and overwrites. A common use case of this flag is regulatory investigations where objects need to be retained while the investigation is ongoing.", + "type": "boolean" + }, + "timeCreated": { + "description": "The creation time of the object in RFC 3339 format.", + "format": "date-time", + "type": "string" + }, + "timeDeleted": { + "description": "The deletion time of the object in RFC 3339 format. Will be returned if and only if this version of the object has been deleted.", + "format": "date-time", + "type": "string" + }, + "timeStorageClassUpdated": { + "description": "The time at which the object's storage class was last changed. When the object is initially created, it will be set to timeCreated.", + "format": "date-time", + "type": "string" + }, + "updated": { + "description": "The modification time of the object metadata in RFC 3339 format.", + "format": "date-time", + "type": "string" + } + }, + "type": "object" + }, + "ObjectAccessControl": { + "description": "An access-control entry.", + "id": "ObjectAccessControl", + "properties": { + "bucket": { + "description": "The name of the bucket.", + "type": "string" + }, + "domain": { + "description": "The domain associated with the entity, if any.", + "type": "string" + }, + "email": { + "description": "The email address associated with the entity, if any.", + "type": "string" + }, + "entity": { + "annotations": { + "required": [ + "storage.defaultObjectAccessControls.insert", + "storage.objectAccessControls.insert" + ] + }, + "description": "The entity holding the permission, in one of the following forms: \n- user-userId \n- user-email \n- group-groupId \n- group-email \n- domain-domain \n- project-team-projectId \n- allUsers \n- allAuthenticatedUsers Examples: \n- The user liz@example.com would be user-liz@example.com. \n- The group example@googlegroups.com would be group-example@googlegroups.com. \n- To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com.", + "type": "string" + }, + "entityId": { + "description": "The ID for the entity, if any.", + "type": "string" + }, + "etag": { + "description": "HTTP 1.1 Entity tag for the access-control entry.", + "type": "string" + }, + "generation": { + "description": "The content generation of the object, if applied to an object.", + "format": "int64", + "type": "string" + }, + "id": { + "description": "The ID of the access-control entry.", + "type": "string" + }, + "kind": { + "default": "storage#objectAccessControl", + "description": "The kind of item this is. For object access control entries, this is always storage#objectAccessControl.", + "type": "string" + }, + "object": { + "description": "The name of the object, if applied to an object.", + "type": "string" + }, + "projectTeam": { + "description": "The project team associated with the entity, if any.", + "properties": { + "projectNumber": { + "description": "The project number.", + "type": "string" + }, + "team": { + "description": "The team.", + "type": "string" + } + }, + "type": "object" + }, + "role": { + "annotations": { + "required": [ + "storage.defaultObjectAccessControls.insert", + "storage.objectAccessControls.insert" + ] + }, + "description": "The access permission for the entity.", + "type": "string" + }, + "selfLink": { + "description": "The link to this access-control entry.", + "type": "string" + } + }, + "type": "object" + }, + "ObjectAccessControls": { + "description": "An access-control list.", + "id": "ObjectAccessControls", + "properties": { + "items": { + "description": "The list of items.", + "items": { + "$ref": "ObjectAccessControl" + }, + "type": "array" + }, + "kind": { + "default": "storage#objectAccessControls", + "description": "The kind of item this is. For lists of object access control entries, this is always storage#objectAccessControls.", + "type": "string" + } + }, + "type": "object" + }, + "Objects": { + "description": "A list of objects.", + "id": "Objects", + "properties": { + "items": { + "description": "The list of items.", + "items": { + "$ref": "Object" + }, + "type": "array" + }, + "kind": { + "default": "storage#objects", + "description": "The kind of item this is. For lists of objects, this is always storage#objects.", + "type": "string" + }, + "nextPageToken": { + "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.", + "type": "string" + }, + "prefixes": { + "description": "The list of prefixes of objects matching-but-not-listed up to and including the requested delimiter.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "Policy": { + "description": "A bucket/object IAM policy.", + "id": "Policy", + "properties": { + "bindings": { + "annotations": { + "required": [ + "storage.buckets.setIamPolicy", + "storage.objects.setIamPolicy" + ] + }, + "description": "An association between a role, which comes with a set of permissions, and members who may assume that role.", + "items": { + "properties": { + "condition": { + "type": "any" + }, + "members": { + "annotations": { + "required": [ + "storage.buckets.setIamPolicy", + "storage.objects.setIamPolicy" + ] + }, + "description": "A collection of identifiers for members who may assume the provided role. Recognized identifiers are as follows: \n- allUsers — A special identifier that represents anyone on the internet; with or without a Google account. \n- allAuthenticatedUsers — A special identifier that represents anyone who is authenticated with a Google account or a service account. \n- user:emailid — An email address that represents a specific account. For example, user:alice@gmail.com or user:joe@example.com. \n- serviceAccount:emailid — An email address that represents a service account. For example, serviceAccount:my-other-app@appspot.gserviceaccount.com . \n- group:emailid — An email address that represents a Google group. For example, group:admins@example.com. \n- domain:domain — A Google Apps domain name that represents all the users of that domain. For example, domain:google.com or domain:example.com. \n- projectOwner:projectid — Owners of the given project. For example, projectOwner:my-example-project \n- projectEditor:projectid — Editors of the given project. For example, projectEditor:my-example-project \n- projectViewer:projectid — Viewers of the given project. For example, projectViewer:my-example-project", + "items": { + "type": "string" + }, + "type": "array" + }, + "role": { + "annotations": { + "required": [ + "storage.buckets.setIamPolicy", + "storage.objects.setIamPolicy" + ] + }, + "description": "The role to which members belong. Two types of roles are supported: new IAM roles, which grant permissions that do not map directly to those provided by ACLs, and legacy IAM roles, which do map directly to ACL permissions. All roles are of the format roles/storage.specificRole.\nThe new IAM roles are: \n- roles/storage.admin — Full control of Google Cloud Storage resources. \n- roles/storage.objectViewer — Read-Only access to Google Cloud Storage objects. \n- roles/storage.objectCreator — Access to create objects in Google Cloud Storage. \n- roles/storage.objectAdmin — Full control of Google Cloud Storage objects. The legacy IAM roles are: \n- roles/storage.legacyObjectReader — Read-only access to objects without listing. Equivalent to an ACL entry on an object with the READER role. \n- roles/storage.legacyObjectOwner — Read/write access to existing objects without listing. Equivalent to an ACL entry on an object with the OWNER role. \n- roles/storage.legacyBucketReader — Read access to buckets with object listing. Equivalent to an ACL entry on a bucket with the READER role. \n- roles/storage.legacyBucketWriter — Read access to buckets with object listing/creation/deletion. Equivalent to an ACL entry on a bucket with the WRITER role. \n- roles/storage.legacyBucketOwner — Read and write access to existing buckets with object listing/creation/deletion. Equivalent to an ACL entry on a bucket with the OWNER role.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "etag": { + "description": "HTTP 1.1 Entity tag for the policy.", + "format": "byte", + "type": "string" + }, + "kind": { + "default": "storage#policy", + "description": "The kind of item this is. For policies, this is always storage#policy. This field is ignored on input.", + "type": "string" + }, + "resourceId": { + "description": "The ID of the resource to which this policy belongs. Will be of the form projects/_/buckets/bucket for buckets, and projects/_/buckets/bucket/objects/object for objects. A specific generation may be specified by appending #generationNumber to the end of the object name, e.g. projects/_/buckets/my-bucket/objects/data.txt#17. The current generation can be denoted with #0. This field is ignored on input.", + "type": "string" + } + }, + "type": "object" + }, + "RewriteResponse": { + "description": "A rewrite response.", + "id": "RewriteResponse", + "properties": { + "done": { + "description": "true if the copy is finished; otherwise, false if the copy is in progress. This property is always present in the response.", + "type": "boolean" + }, + "kind": { + "default": "storage#rewriteResponse", + "description": "The kind of item this is.", + "type": "string" + }, + "objectSize": { + "description": "The total size of the object being copied in bytes. This property is always present in the response.", + "format": "int64", + "type": "string" + }, + "resource": { + "$ref": "Object", + "description": "A resource containing the metadata for the copied-to object. This property is present in the response only when copying completes." + }, + "rewriteToken": { + "description": "A token to use in subsequent requests to continue copying data. This token is present in the response only when there is more data to copy.", + "type": "string" + }, + "totalBytesRewritten": { + "description": "The total bytes written so far, which can be used to provide a waiting user with a progress indicator. This property is always present in the response.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "ServiceAccount": { + "description": "A subscription to receive Google PubSub notifications.", + "id": "ServiceAccount", + "properties": { + "email_address": { + "description": "The ID of the notification.", + "type": "string" + }, + "kind": { + "default": "storage#serviceAccount", + "description": "The kind of item this is. For notifications, this is always storage#notification.", + "type": "string" + } + }, + "type": "object" + }, + "TestIamPermissionsResponse": { + "description": "A storage.(buckets|objects).testIamPermissions response.", + "id": "TestIamPermissionsResponse", + "properties": { + "kind": { + "default": "storage#testIamPermissionsResponse", + "description": "The kind of item this is.", + "type": "string" + }, + "permissions": { + "description": "The permissions held by the caller. Permissions are always of the format storage.resource.capability, where resource is one of buckets or objects. The supported permissions are as follows: \n- storage.buckets.delete — Delete bucket. \n- storage.buckets.get — Read bucket metadata. \n- storage.buckets.getIamPolicy — Read bucket IAM policy. \n- storage.buckets.create — Create bucket. \n- storage.buckets.list — List buckets. \n- storage.buckets.setIamPolicy — Update bucket IAM policy. \n- storage.buckets.update — Update bucket metadata. \n- storage.objects.delete — Delete object. \n- storage.objects.get — Read object data and metadata. \n- storage.objects.getIamPolicy — Read object IAM policy. \n- storage.objects.create — Create object. \n- storage.objects.list — List objects. \n- storage.objects.setIamPolicy — Update object IAM policy. \n- storage.objects.update — Update object metadata.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + } + }, + "servicePath": "storage/v1/", + "title": "Cloud Storage JSON API", + "version": "v1" +} \ No newline at end of file diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go new file mode 100644 index 000000000..36846eb54 --- /dev/null +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -0,0 +1,11171 @@ +// Package storage provides access to the Cloud Storage JSON API. +// +// See https://developers.google.com/storage/docs/json_api/ +// +// Usage example: +// +// import "google.golang.org/api/storage/v1" +// ... +// storageService, err := storage.New(oauthHttpClient) +package storage // import "google.golang.org/api/storage/v1" + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + context "golang.org/x/net/context" + ctxhttp "golang.org/x/net/context/ctxhttp" + gensupport "google.golang.org/api/gensupport" + googleapi "google.golang.org/api/googleapi" + "io" + "net/http" + "net/url" + "strconv" + "strings" +) + +// Always reference these packages, just in case the auto-generated code +// below doesn't. +var _ = bytes.NewBuffer +var _ = strconv.Itoa +var _ = fmt.Sprintf +var _ = json.NewDecoder +var _ = io.Copy +var _ = url.Parse +var _ = gensupport.MarshalJSON +var _ = googleapi.Version +var _ = errors.New +var _ = strings.Replace +var _ = context.Canceled +var _ = ctxhttp.Do + +const apiId = "storage:v1" +const apiName = "storage" +const apiVersion = "v1" +const basePath = "https://www.googleapis.com/storage/v1/" + +// OAuth2 scopes used by this API. +const ( + // View and manage your data across Google Cloud Platform services + CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" + + // View your data across Google Cloud Platform services + CloudPlatformReadOnlyScope = "https://www.googleapis.com/auth/cloud-platform.read-only" + + // Manage your data and permissions in Google Cloud Storage + DevstorageFullControlScope = "https://www.googleapis.com/auth/devstorage.full_control" + + // View your data in Google Cloud Storage + DevstorageReadOnlyScope = "https://www.googleapis.com/auth/devstorage.read_only" + + // Manage your data in Google Cloud Storage + DevstorageReadWriteScope = "https://www.googleapis.com/auth/devstorage.read_write" +) + +func New(client *http.Client) (*Service, error) { + if client == nil { + return nil, errors.New("client is nil") + } + s := &Service{client: client, BasePath: basePath} + s.BucketAccessControls = NewBucketAccessControlsService(s) + s.Buckets = NewBucketsService(s) + s.Channels = NewChannelsService(s) + s.DefaultObjectAccessControls = NewDefaultObjectAccessControlsService(s) + s.Notifications = NewNotificationsService(s) + s.ObjectAccessControls = NewObjectAccessControlsService(s) + s.Objects = NewObjectsService(s) + s.Projects = NewProjectsService(s) + return s, nil +} + +type Service struct { + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + + BucketAccessControls *BucketAccessControlsService + + Buckets *BucketsService + + Channels *ChannelsService + + DefaultObjectAccessControls *DefaultObjectAccessControlsService + + Notifications *NotificationsService + + ObjectAccessControls *ObjectAccessControlsService + + Objects *ObjectsService + + Projects *ProjectsService +} + +func (s *Service) userAgent() string { + if s.UserAgent == "" { + return googleapi.UserAgent + } + return googleapi.UserAgent + " " + s.UserAgent +} + +func NewBucketAccessControlsService(s *Service) *BucketAccessControlsService { + rs := &BucketAccessControlsService{s: s} + return rs +} + +type BucketAccessControlsService struct { + s *Service +} + +func NewBucketsService(s *Service) *BucketsService { + rs := &BucketsService{s: s} + return rs +} + +type BucketsService struct { + s *Service +} + +func NewChannelsService(s *Service) *ChannelsService { + rs := &ChannelsService{s: s} + return rs +} + +type ChannelsService struct { + s *Service +} + +func NewDefaultObjectAccessControlsService(s *Service) *DefaultObjectAccessControlsService { + rs := &DefaultObjectAccessControlsService{s: s} + return rs +} + +type DefaultObjectAccessControlsService struct { + s *Service +} + +func NewNotificationsService(s *Service) *NotificationsService { + rs := &NotificationsService{s: s} + return rs +} + +type NotificationsService struct { + s *Service +} + +func NewObjectAccessControlsService(s *Service) *ObjectAccessControlsService { + rs := &ObjectAccessControlsService{s: s} + return rs +} + +type ObjectAccessControlsService struct { + s *Service +} + +func NewObjectsService(s *Service) *ObjectsService { + rs := &ObjectsService{s: s} + return rs +} + +type ObjectsService struct { + s *Service +} + +func NewProjectsService(s *Service) *ProjectsService { + rs := &ProjectsService{s: s} + rs.ServiceAccount = NewProjectsServiceAccountService(s) + return rs +} + +type ProjectsService struct { + s *Service + + ServiceAccount *ProjectsServiceAccountService +} + +func NewProjectsServiceAccountService(s *Service) *ProjectsServiceAccountService { + rs := &ProjectsServiceAccountService{s: s} + return rs +} + +type ProjectsServiceAccountService struct { + s *Service +} + +// Bucket: A bucket. +type Bucket struct { + // Acl: Access controls on the bucket. + Acl []*BucketAccessControl `json:"acl,omitempty"` + + // Billing: The bucket's billing configuration. + Billing *BucketBilling `json:"billing,omitempty"` + + // Cors: The bucket's Cross-Origin Resource Sharing (CORS) + // configuration. + Cors []*BucketCors `json:"cors,omitempty"` + + // DefaultEventBasedHold: Defines the default value for Event-Based hold + // on newly created objects in this bucket. Event-Based hold is a way to + // retain objects indefinitely until an event occurs, signified by the + // hold's release. After being released, such objects will be subject to + // bucket-level retention (if any). One sample use case of this flag is + // for banks to hold loan documents for at least 3 years after loan is + // paid in full. Here bucket-level retention is 3 years and the event is + // loan being paid in full. In this example these objects will be held + // intact for any number of years until the event has occurred (hold is + // released) and then 3 more years after that. Objects under Event-Based + // hold cannot be deleted, overwritten or archived until the hold is + // removed. + DefaultEventBasedHold bool `json:"defaultEventBasedHold,omitempty"` + + // DefaultObjectAcl: Default access controls to apply to new objects + // when no ACL is provided. + DefaultObjectAcl []*ObjectAccessControl `json:"defaultObjectAcl,omitempty"` + + // Encryption: Encryption configuration used by default for newly + // inserted objects, when no encryption config is specified. + Encryption *BucketEncryption `json:"encryption,omitempty"` + + // Etag: HTTP 1.1 Entity tag for the bucket. + Etag string `json:"etag,omitempty"` + + // Id: The ID of the bucket. For buckets, the id and name properties are + // the same. + Id string `json:"id,omitempty"` + + // Kind: The kind of item this is. For buckets, this is always + // storage#bucket. + Kind string `json:"kind,omitempty"` + + // Labels: User-provided labels, in key/value pairs. + Labels map[string]string `json:"labels,omitempty"` + + // Lifecycle: The bucket's lifecycle configuration. See lifecycle + // management for more information. + Lifecycle *BucketLifecycle `json:"lifecycle,omitempty"` + + // Location: The location of the bucket. Object data for objects in the + // bucket resides in physical storage within this region. Defaults to + // US. See the developer's guide for the authoritative list. + Location string `json:"location,omitempty"` + + // Logging: The bucket's logging configuration, which defines the + // destination bucket and optional name prefix for the current bucket's + // logs. + Logging *BucketLogging `json:"logging,omitempty"` + + // Metageneration: The metadata generation of this bucket. + Metageneration int64 `json:"metageneration,omitempty,string"` + + // Name: The name of the bucket. + Name string `json:"name,omitempty"` + + // Owner: The owner of the bucket. This is always the project team's + // owner group. + Owner *BucketOwner `json:"owner,omitempty"` + + // ProjectNumber: The project number of the project the bucket belongs + // to. + ProjectNumber uint64 `json:"projectNumber,omitempty,string"` + + // RetentionPolicy: Defines the retention policy for a bucket. The + // Retention policy enforces a minimum retention time for all objects + // contained in the bucket, based on their creation time. Any attempt to + // overwrite or delete objects younger than the retention period will + // result in a PERMISSION_DENIED error. An unlocked retention policy can + // be modified or removed from the bucket via the UpdateBucketMetadata + // RPC. A locked retention policy cannot be removed or shortened in + // duration for the lifetime of the bucket. Attempting to remove or + // decrease period of a locked retention policy will result in a + // PERMISSION_DENIED error. + RetentionPolicy *BucketRetentionPolicy `json:"retentionPolicy,omitempty"` + + // SelfLink: The URI of this bucket. + SelfLink string `json:"selfLink,omitempty"` + + // StorageClass: The bucket's default storage class, used whenever no + // storageClass is specified for a newly-created object. This defines + // how objects in the bucket are stored and determines the SLA and the + // cost of storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, + // NEARLINE, COLDLINE, and DURABLE_REDUCED_AVAILABILITY. If this value + // is not specified when the bucket is created, it will default to + // STANDARD. For more information, see storage classes. + StorageClass string `json:"storageClass,omitempty"` + + // TimeCreated: The creation time of the bucket in RFC 3339 format. + TimeCreated string `json:"timeCreated,omitempty"` + + // Updated: The modification time of the bucket in RFC 3339 format. + Updated string `json:"updated,omitempty"` + + // Versioning: The bucket's versioning configuration. + Versioning *BucketVersioning `json:"versioning,omitempty"` + + // Website: The bucket's website configuration, controlling how the + // service behaves when accessing bucket contents as a web site. See the + // Static Website Examples for more information. + Website *BucketWebsite `json:"website,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Acl") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Acl") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Bucket) MarshalJSON() ([]byte, error) { + type NoMethod Bucket + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BucketBilling: The bucket's billing configuration. +type BucketBilling struct { + // RequesterPays: When set to true, Requester Pays is enabled for this + // bucket. + RequesterPays bool `json:"requesterPays,omitempty"` + + // ForceSendFields is a list of field names (e.g. "RequesterPays") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "RequesterPays") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketBilling) MarshalJSON() ([]byte, error) { + type NoMethod BucketBilling + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type BucketCors struct { + // MaxAgeSeconds: The value, in seconds, to return in the + // Access-Control-Max-Age header used in preflight responses. + MaxAgeSeconds int64 `json:"maxAgeSeconds,omitempty"` + + // Method: The list of HTTP methods on which to include CORS response + // headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list + // of methods, and means "any method". + Method []string `json:"method,omitempty"` + + // Origin: The list of Origins eligible to receive CORS response + // headers. Note: "*" is permitted in the list of origins, and means + // "any Origin". + Origin []string `json:"origin,omitempty"` + + // ResponseHeader: The list of HTTP headers other than the simple + // response headers to give permission for the user-agent to share + // across domains. + ResponseHeader []string `json:"responseHeader,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MaxAgeSeconds") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "MaxAgeSeconds") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketCors) MarshalJSON() ([]byte, error) { + type NoMethod BucketCors + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BucketEncryption: Encryption configuration used by default for newly +// inserted objects, when no encryption config is specified. +type BucketEncryption struct { + // DefaultKmsKeyName: A Cloud KMS key that will be used to encrypt + // objects inserted into this bucket, if no encryption method is + // specified. Limited availability; usable only by enabled projects. + DefaultKmsKeyName string `json:"defaultKmsKeyName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DefaultKmsKeyName") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DefaultKmsKeyName") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *BucketEncryption) MarshalJSON() ([]byte, error) { + type NoMethod BucketEncryption + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BucketLifecycle: The bucket's lifecycle configuration. See lifecycle +// management for more information. +type BucketLifecycle struct { + // Rule: A lifecycle management rule, which is made of an action to take + // and the condition(s) under which the action will be taken. + Rule []*BucketLifecycleRule `json:"rule,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Rule") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Rule") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketLifecycle) MarshalJSON() ([]byte, error) { + type NoMethod BucketLifecycle + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type BucketLifecycleRule struct { + // Action: The action to take. + Action *BucketLifecycleRuleAction `json:"action,omitempty"` + + // Condition: The condition(s) under which the action will be taken. + Condition *BucketLifecycleRuleCondition `json:"condition,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Action") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Action") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketLifecycleRule) MarshalJSON() ([]byte, error) { + type NoMethod BucketLifecycleRule + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BucketLifecycleRuleAction: The action to take. +type BucketLifecycleRuleAction struct { + // StorageClass: Target storage class. Required iff the type of the + // action is SetStorageClass. + StorageClass string `json:"storageClass,omitempty"` + + // Type: Type of the action. Currently, only Delete and SetStorageClass + // are supported. + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "StorageClass") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "StorageClass") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketLifecycleRuleAction) MarshalJSON() ([]byte, error) { + type NoMethod BucketLifecycleRuleAction + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BucketLifecycleRuleCondition: The condition(s) under which the action +// will be taken. +type BucketLifecycleRuleCondition struct { + // Age: Age of an object (in days). This condition is satisfied when an + // object reaches the specified age. + Age int64 `json:"age,omitempty"` + + // CreatedBefore: A date in RFC 3339 format with only the date part (for + // instance, "2013-01-15"). This condition is satisfied when an object + // is created before midnight of the specified date in UTC. + CreatedBefore string `json:"createdBefore,omitempty"` + + // IsLive: Relevant only for versioned objects. If the value is true, + // this condition matches live objects; if the value is false, it + // matches archived objects. + IsLive *bool `json:"isLive,omitempty"` + + // MatchesStorageClass: Objects having any of the storage classes + // specified by this condition will be matched. Values include + // MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, STANDARD, and + // DURABLE_REDUCED_AVAILABILITY. + MatchesStorageClass []string `json:"matchesStorageClass,omitempty"` + + // NumNewerVersions: Relevant only for versioned objects. If the value + // is N, this condition is satisfied when there are at least N versions + // (including the live version) newer than this version of the object. + NumNewerVersions int64 `json:"numNewerVersions,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Age") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Age") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketLifecycleRuleCondition) MarshalJSON() ([]byte, error) { + type NoMethod BucketLifecycleRuleCondition + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BucketLogging: The bucket's logging configuration, which defines the +// destination bucket and optional name prefix for the current bucket's +// logs. +type BucketLogging struct { + // LogBucket: The destination bucket where the current bucket's logs + // should be placed. + LogBucket string `json:"logBucket,omitempty"` + + // LogObjectPrefix: A prefix for log object names. + LogObjectPrefix string `json:"logObjectPrefix,omitempty"` + + // ForceSendFields is a list of field names (e.g. "LogBucket") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "LogBucket") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketLogging) MarshalJSON() ([]byte, error) { + type NoMethod BucketLogging + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BucketOwner: The owner of the bucket. This is always the project +// team's owner group. +type BucketOwner struct { + // Entity: The entity, in the form project-owner-projectId. + Entity string `json:"entity,omitempty"` + + // EntityId: The ID for the entity. + EntityId string `json:"entityId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Entity") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Entity") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketOwner) MarshalJSON() ([]byte, error) { + type NoMethod BucketOwner + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BucketRetentionPolicy: Defines the retention policy for a bucket. The +// Retention policy enforces a minimum retention time for all objects +// contained in the bucket, based on their creation time. Any attempt to +// overwrite or delete objects younger than the retention period will +// result in a PERMISSION_DENIED error. An unlocked retention policy can +// be modified or removed from the bucket via the UpdateBucketMetadata +// RPC. A locked retention policy cannot be removed or shortened in +// duration for the lifetime of the bucket. Attempting to remove or +// decrease period of a locked retention policy will result in a +// PERMISSION_DENIED error. +type BucketRetentionPolicy struct { + // EffectiveTime: The time from which policy was enforced and effective. + // RFC 3339 format. + EffectiveTime string `json:"effectiveTime,omitempty"` + + // IsLocked: Once locked, an object retention policy cannot be modified. + IsLocked bool `json:"isLocked,omitempty"` + + // RetentionPeriod: Specifies the duration that objects need to be + // retained. Retention duration must be greater than zero and less than + // 100 years. Note that enforcement of retention periods less than a day + // is not guaranteed. Such periods should only be used for testing + // purposes. + RetentionPeriod int64 `json:"retentionPeriod,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "EffectiveTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EffectiveTime") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketRetentionPolicy) MarshalJSON() ([]byte, error) { + type NoMethod BucketRetentionPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BucketVersioning: The bucket's versioning configuration. +type BucketVersioning struct { + // Enabled: While set to true, versioning is fully enabled for this + // bucket. + Enabled bool `json:"enabled,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Enabled") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Enabled") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketVersioning) MarshalJSON() ([]byte, error) { + type NoMethod BucketVersioning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BucketWebsite: The bucket's website configuration, controlling how +// the service behaves when accessing bucket contents as a web site. See +// the Static Website Examples for more information. +type BucketWebsite struct { + // MainPageSuffix: If the requested object path is missing, the service + // will ensure the path has a trailing '/', append this suffix, and + // attempt to retrieve the resulting object. This allows the creation of + // index.html objects to represent directory pages. + MainPageSuffix string `json:"mainPageSuffix,omitempty"` + + // NotFoundPage: If the requested object path is missing, and any + // mainPageSuffix object is missing, if applicable, the service will + // return the named object from this bucket as the content for a 404 Not + // Found result. + NotFoundPage string `json:"notFoundPage,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MainPageSuffix") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "MainPageSuffix") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *BucketWebsite) MarshalJSON() ([]byte, error) { + type NoMethod BucketWebsite + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BucketAccessControl: An access-control entry. +type BucketAccessControl struct { + // Bucket: The name of the bucket. + Bucket string `json:"bucket,omitempty"` + + // Domain: The domain associated with the entity, if any. + Domain string `json:"domain,omitempty"` + + // Email: The email address associated with the entity, if any. + Email string `json:"email,omitempty"` + + // Entity: The entity holding the permission, in one of the following + // forms: + // - user-userId + // - user-email + // - group-groupId + // - group-email + // - domain-domain + // - project-team-projectId + // - allUsers + // - allAuthenticatedUsers Examples: + // - The user liz@example.com would be user-liz@example.com. + // - The group example@googlegroups.com would be + // group-example@googlegroups.com. + // - To refer to all members of the Google Apps for Business domain + // example.com, the entity would be domain-example.com. + Entity string `json:"entity,omitempty"` + + // EntityId: The ID for the entity, if any. + EntityId string `json:"entityId,omitempty"` + + // Etag: HTTP 1.1 Entity tag for the access-control entry. + Etag string `json:"etag,omitempty"` + + // Id: The ID of the access-control entry. + Id string `json:"id,omitempty"` + + // Kind: The kind of item this is. For bucket access control entries, + // this is always storage#bucketAccessControl. + Kind string `json:"kind,omitempty"` + + // ProjectTeam: The project team associated with the entity, if any. + ProjectTeam *BucketAccessControlProjectTeam `json:"projectTeam,omitempty"` + + // Role: The access permission for the entity. + Role string `json:"role,omitempty"` + + // SelfLink: The link to this access-control entry. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Bucket") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Bucket") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketAccessControl) MarshalJSON() ([]byte, error) { + type NoMethod BucketAccessControl + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BucketAccessControlProjectTeam: The project team associated with the +// entity, if any. +type BucketAccessControlProjectTeam struct { + // ProjectNumber: The project number. + ProjectNumber string `json:"projectNumber,omitempty"` + + // Team: The team. + Team string `json:"team,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ProjectNumber") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ProjectNumber") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketAccessControlProjectTeam) MarshalJSON() ([]byte, error) { + type NoMethod BucketAccessControlProjectTeam + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BucketAccessControls: An access-control list. +type BucketAccessControls struct { + // Items: The list of items. + Items []*BucketAccessControl `json:"items,omitempty"` + + // Kind: The kind of item this is. For lists of bucket access control + // entries, this is always storage#bucketAccessControls. + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Items") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketAccessControls) MarshalJSON() ([]byte, error) { + type NoMethod BucketAccessControls + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Buckets: A list of buckets. +type Buckets struct { + // Items: The list of items. + Items []*Bucket `json:"items,omitempty"` + + // Kind: The kind of item this is. For lists of buckets, this is always + // storage#buckets. + Kind string `json:"kind,omitempty"` + + // NextPageToken: The continuation token, used to page through large + // result sets. Provide this value in a subsequent request to return the + // next page of results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Items") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Buckets) MarshalJSON() ([]byte, error) { + type NoMethod Buckets + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Channel: An notification channel used to watch for resource changes. +type Channel struct { + // Address: The address where notifications are delivered for this + // channel. + Address string `json:"address,omitempty"` + + // Expiration: Date and time of notification channel expiration, + // expressed as a Unix timestamp, in milliseconds. Optional. + Expiration int64 `json:"expiration,omitempty,string"` + + // Id: A UUID or similar unique string that identifies this channel. + Id string `json:"id,omitempty"` + + // Kind: Identifies this as a notification channel used to watch for + // changes to a resource. Value: the fixed string "api#channel". + Kind string `json:"kind,omitempty"` + + // Params: Additional parameters controlling delivery channel behavior. + // Optional. + Params map[string]string `json:"params,omitempty"` + + // Payload: A Boolean value to indicate whether payload is wanted. + // Optional. + Payload bool `json:"payload,omitempty"` + + // ResourceId: An opaque ID that identifies the resource being watched + // on this channel. Stable across different API versions. + ResourceId string `json:"resourceId,omitempty"` + + // ResourceUri: A version-specific identifier for the watched resource. + ResourceUri string `json:"resourceUri,omitempty"` + + // Token: An arbitrary string delivered to the target address with each + // notification delivered over this channel. Optional. + Token string `json:"token,omitempty"` + + // Type: The type of delivery mechanism used for this channel. + Type string `json:"type,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Address") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Address") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Channel) MarshalJSON() ([]byte, error) { + type NoMethod Channel + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ComposeRequest: A Compose request. +type ComposeRequest struct { + // Destination: Properties of the resulting object. + Destination *Object `json:"destination,omitempty"` + + // Kind: The kind of item this is. + Kind string `json:"kind,omitempty"` + + // SourceObjects: The list of source objects that will be concatenated + // into a single object. + SourceObjects []*ComposeRequestSourceObjects `json:"sourceObjects,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Destination") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Destination") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ComposeRequest) MarshalJSON() ([]byte, error) { + type NoMethod ComposeRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ComposeRequestSourceObjects struct { + // Generation: The generation of this object to use as the source. + Generation int64 `json:"generation,omitempty,string"` + + // Name: The source object's name. The source object's bucket is + // implicitly the destination bucket. + Name string `json:"name,omitempty"` + + // ObjectPreconditions: Conditions that must be met for this operation + // to execute. + ObjectPreconditions *ComposeRequestSourceObjectsObjectPreconditions `json:"objectPreconditions,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Generation") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Generation") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ComposeRequestSourceObjects) MarshalJSON() ([]byte, error) { + type NoMethod ComposeRequestSourceObjects + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ComposeRequestSourceObjectsObjectPreconditions: Conditions that must +// be met for this operation to execute. +type ComposeRequestSourceObjectsObjectPreconditions struct { + // IfGenerationMatch: Only perform the composition if the generation of + // the source object that would be used matches this value. If this + // value and a generation are both specified, they must be the same + // value or the call will fail. + IfGenerationMatch int64 `json:"ifGenerationMatch,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "IfGenerationMatch") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "IfGenerationMatch") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ComposeRequestSourceObjectsObjectPreconditions) MarshalJSON() ([]byte, error) { + type NoMethod ComposeRequestSourceObjectsObjectPreconditions + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Notification: A subscription to receive Google PubSub notifications. +type Notification struct { + // CustomAttributes: An optional list of additional attributes to attach + // to each Cloud PubSub message published for this notification + // subscription. + CustomAttributes map[string]string `json:"custom_attributes,omitempty"` + + // Etag: HTTP 1.1 Entity tag for this subscription notification. + Etag string `json:"etag,omitempty"` + + // EventTypes: If present, only send notifications about listed event + // types. If empty, sent notifications for all event types. + EventTypes []string `json:"event_types,omitempty"` + + // Id: The ID of the notification. + Id string `json:"id,omitempty"` + + // Kind: The kind of item this is. For notifications, this is always + // storage#notification. + Kind string `json:"kind,omitempty"` + + // ObjectNamePrefix: If present, only apply this notification + // configuration to object names that begin with this prefix. + ObjectNamePrefix string `json:"object_name_prefix,omitempty"` + + // PayloadFormat: The desired content of the Payload. + PayloadFormat string `json:"payload_format,omitempty"` + + // SelfLink: The canonical URL of this notification. + SelfLink string `json:"selfLink,omitempty"` + + // Topic: The Cloud PubSub topic to which this subscription publishes. + // Formatted as: + // '//pubsub.googleapis.com/projects/{project-identifier}/topics/{my-topi + // c}' + Topic string `json:"topic,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CustomAttributes") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CustomAttributes") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *Notification) MarshalJSON() ([]byte, error) { + type NoMethod Notification + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Notifications: A list of notification subscriptions. +type Notifications struct { + // Items: The list of items. + Items []*Notification `json:"items,omitempty"` + + // Kind: The kind of item this is. For lists of notifications, this is + // always storage#notifications. + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Items") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Notifications) MarshalJSON() ([]byte, error) { + type NoMethod Notifications + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Object: An object. +type Object struct { + // Acl: Access controls on the object. + Acl []*ObjectAccessControl `json:"acl,omitempty"` + + // Bucket: The name of the bucket containing this object. + Bucket string `json:"bucket,omitempty"` + + // CacheControl: Cache-Control directive for the object data. If + // omitted, and the object is accessible to all anonymous users, the + // default will be public, max-age=3600. + CacheControl string `json:"cacheControl,omitempty"` + + // ComponentCount: Number of underlying components that make up this + // object. Components are accumulated by compose operations. + ComponentCount int64 `json:"componentCount,omitempty"` + + // ContentDisposition: Content-Disposition of the object data. + ContentDisposition string `json:"contentDisposition,omitempty"` + + // ContentEncoding: Content-Encoding of the object data. + ContentEncoding string `json:"contentEncoding,omitempty"` + + // ContentLanguage: Content-Language of the object data. + ContentLanguage string `json:"contentLanguage,omitempty"` + + // ContentType: Content-Type of the object data. If an object is stored + // without a Content-Type, it is served as application/octet-stream. + ContentType string `json:"contentType,omitempty"` + + // Crc32c: CRC32c checksum, as described in RFC 4960, Appendix B; + // encoded using base64 in big-endian byte order. For more information + // about using the CRC32c checksum, see Hashes and ETags: Best + // Practices. + Crc32c string `json:"crc32c,omitempty"` + + // CustomerEncryption: Metadata of customer-supplied encryption key, if + // the object is encrypted by such a key. + CustomerEncryption *ObjectCustomerEncryption `json:"customerEncryption,omitempty"` + + // Etag: HTTP 1.1 Entity tag for the object. + Etag string `json:"etag,omitempty"` + + // EventBasedHold: Defines the Event-Based hold for an object. + // Event-Based hold is a way to retain objects indefinitely until an + // event occurs, signified by the hold's release. After being released, + // such objects will be subject to bucket-level retention (if any). One + // sample use case of this flag is for banks to hold loan documents for + // at least 3 years after loan is paid in full. Here bucket-level + // retention is 3 years and the event is loan being paid in full. In + // this example these objects will be held intact for any number of + // years until the event has occurred (hold is released) and then 3 more + // years after that. + EventBasedHold bool `json:"eventBasedHold,omitempty"` + + // Generation: The content generation of this object. Used for object + // versioning. + Generation int64 `json:"generation,omitempty,string"` + + // Id: The ID of the object, including the bucket name, object name, and + // generation number. + Id string `json:"id,omitempty"` + + // Kind: The kind of item this is. For objects, this is always + // storage#object. + Kind string `json:"kind,omitempty"` + + // KmsKeyName: Cloud KMS Key used to encrypt this object, if the object + // is encrypted by such a key. Limited availability; usable only by + // enabled projects. + KmsKeyName string `json:"kmsKeyName,omitempty"` + + // Md5Hash: MD5 hash of the data; encoded using base64. For more + // information about using the MD5 hash, see Hashes and ETags: Best + // Practices. + Md5Hash string `json:"md5Hash,omitempty"` + + // MediaLink: Media download link. + MediaLink string `json:"mediaLink,omitempty"` + + // Metadata: User-provided metadata, in key/value pairs. + Metadata map[string]string `json:"metadata,omitempty"` + + // Metageneration: The version of the metadata for this object at this + // generation. Used for preconditions and for detecting changes in + // metadata. A metageneration number is only meaningful in the context + // of a particular generation of a particular object. + Metageneration int64 `json:"metageneration,omitempty,string"` + + // Name: The name of the object. Required if not specified by URL + // parameter. + Name string `json:"name,omitempty"` + + // Owner: The owner of the object. This will always be the uploader of + // the object. + Owner *ObjectOwner `json:"owner,omitempty"` + + // RetentionExpirationTime: Specifies the earliest time that the + // object's retention period expires. This value is server-determined + // and is in RFC 3339 format. Note 1: This field is not provided for + // objects with an active Event-Based hold, since retention expiration + // is unknown until the hold is removed. Note 2: This value can be + // provided even when TemporaryHold is set (so that the user can reason + // about policy without having to first unset the TemporaryHold). + RetentionExpirationTime string `json:"retentionExpirationTime,omitempty"` + + // SelfLink: The link to this object. + SelfLink string `json:"selfLink,omitempty"` + + // Size: Content-Length of the data in bytes. + Size uint64 `json:"size,omitempty,string"` + + // StorageClass: Storage class of the object. + StorageClass string `json:"storageClass,omitempty"` + + // TemporaryHold: Defines the temporary hold for an object. This flag is + // used to enforce a temporary hold on an object. While it is set to + // true, the object is protected against deletion and overwrites. A + // common use case of this flag is regulatory investigations where + // objects need to be retained while the investigation is ongoing. + TemporaryHold bool `json:"temporaryHold,omitempty"` + + // TimeCreated: The creation time of the object in RFC 3339 format. + TimeCreated string `json:"timeCreated,omitempty"` + + // TimeDeleted: The deletion time of the object in RFC 3339 format. Will + // be returned if and only if this version of the object has been + // deleted. + TimeDeleted string `json:"timeDeleted,omitempty"` + + // TimeStorageClassUpdated: The time at which the object's storage class + // was last changed. When the object is initially created, it will be + // set to timeCreated. + TimeStorageClassUpdated string `json:"timeStorageClassUpdated,omitempty"` + + // Updated: The modification time of the object metadata in RFC 3339 + // format. + Updated string `json:"updated,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Acl") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Acl") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Object) MarshalJSON() ([]byte, error) { + type NoMethod Object + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ObjectCustomerEncryption: Metadata of customer-supplied encryption +// key, if the object is encrypted by such a key. +type ObjectCustomerEncryption struct { + // EncryptionAlgorithm: The encryption algorithm. + EncryptionAlgorithm string `json:"encryptionAlgorithm,omitempty"` + + // KeySha256: SHA256 hash value of the encryption key. + KeySha256 string `json:"keySha256,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EncryptionAlgorithm") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EncryptionAlgorithm") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ObjectCustomerEncryption) MarshalJSON() ([]byte, error) { + type NoMethod ObjectCustomerEncryption + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ObjectOwner: The owner of the object. This will always be the +// uploader of the object. +type ObjectOwner struct { + // Entity: The entity, in the form user-userId. + Entity string `json:"entity,omitempty"` + + // EntityId: The ID for the entity. + EntityId string `json:"entityId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Entity") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Entity") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ObjectOwner) MarshalJSON() ([]byte, error) { + type NoMethod ObjectOwner + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ObjectAccessControl: An access-control entry. +type ObjectAccessControl struct { + // Bucket: The name of the bucket. + Bucket string `json:"bucket,omitempty"` + + // Domain: The domain associated with the entity, if any. + Domain string `json:"domain,omitempty"` + + // Email: The email address associated with the entity, if any. + Email string `json:"email,omitempty"` + + // Entity: The entity holding the permission, in one of the following + // forms: + // - user-userId + // - user-email + // - group-groupId + // - group-email + // - domain-domain + // - project-team-projectId + // - allUsers + // - allAuthenticatedUsers Examples: + // - The user liz@example.com would be user-liz@example.com. + // - The group example@googlegroups.com would be + // group-example@googlegroups.com. + // - To refer to all members of the Google Apps for Business domain + // example.com, the entity would be domain-example.com. + Entity string `json:"entity,omitempty"` + + // EntityId: The ID for the entity, if any. + EntityId string `json:"entityId,omitempty"` + + // Etag: HTTP 1.1 Entity tag for the access-control entry. + Etag string `json:"etag,omitempty"` + + // Generation: The content generation of the object, if applied to an + // object. + Generation int64 `json:"generation,omitempty,string"` + + // Id: The ID of the access-control entry. + Id string `json:"id,omitempty"` + + // Kind: The kind of item this is. For object access control entries, + // this is always storage#objectAccessControl. + Kind string `json:"kind,omitempty"` + + // Object: The name of the object, if applied to an object. + Object string `json:"object,omitempty"` + + // ProjectTeam: The project team associated with the entity, if any. + ProjectTeam *ObjectAccessControlProjectTeam `json:"projectTeam,omitempty"` + + // Role: The access permission for the entity. + Role string `json:"role,omitempty"` + + // SelfLink: The link to this access-control entry. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Bucket") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Bucket") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ObjectAccessControl) MarshalJSON() ([]byte, error) { + type NoMethod ObjectAccessControl + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ObjectAccessControlProjectTeam: The project team associated with the +// entity, if any. +type ObjectAccessControlProjectTeam struct { + // ProjectNumber: The project number. + ProjectNumber string `json:"projectNumber,omitempty"` + + // Team: The team. + Team string `json:"team,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ProjectNumber") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ProjectNumber") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ObjectAccessControlProjectTeam) MarshalJSON() ([]byte, error) { + type NoMethod ObjectAccessControlProjectTeam + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ObjectAccessControls: An access-control list. +type ObjectAccessControls struct { + // Items: The list of items. + Items []*ObjectAccessControl `json:"items,omitempty"` + + // Kind: The kind of item this is. For lists of object access control + // entries, this is always storage#objectAccessControls. + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Items") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ObjectAccessControls) MarshalJSON() ([]byte, error) { + type NoMethod ObjectAccessControls + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Objects: A list of objects. +type Objects struct { + // Items: The list of items. + Items []*Object `json:"items,omitempty"` + + // Kind: The kind of item this is. For lists of objects, this is always + // storage#objects. + Kind string `json:"kind,omitempty"` + + // NextPageToken: The continuation token, used to page through large + // result sets. Provide this value in a subsequent request to return the + // next page of results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Prefixes: The list of prefixes of objects matching-but-not-listed up + // to and including the requested delimiter. + Prefixes []string `json:"prefixes,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Items") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Objects) MarshalJSON() ([]byte, error) { + type NoMethod Objects + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Policy: A bucket/object IAM policy. +type Policy struct { + // Bindings: An association between a role, which comes with a set of + // permissions, and members who may assume that role. + Bindings []*PolicyBindings `json:"bindings,omitempty"` + + // Etag: HTTP 1.1 Entity tag for the policy. + Etag string `json:"etag,omitempty"` + + // Kind: The kind of item this is. For policies, this is always + // storage#policy. This field is ignored on input. + Kind string `json:"kind,omitempty"` + + // ResourceId: The ID of the resource to which this policy belongs. Will + // be of the form projects/_/buckets/bucket for buckets, and + // projects/_/buckets/bucket/objects/object for objects. A specific + // generation may be specified by appending #generationNumber to the end + // of the object name, e.g. + // projects/_/buckets/my-bucket/objects/data.txt#17. The current + // generation can be denoted with #0. This field is ignored on input. + ResourceId string `json:"resourceId,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Bindings") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Bindings") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Policy) MarshalJSON() ([]byte, error) { + type NoMethod Policy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type PolicyBindings struct { + Condition interface{} `json:"condition,omitempty"` + + // Members: A collection of identifiers for members who may assume the + // provided role. Recognized identifiers are as follows: + // - allUsers — A special identifier that represents anyone on the + // internet; with or without a Google account. + // - allAuthenticatedUsers — A special identifier that represents + // anyone who is authenticated with a Google account or a service + // account. + // - user:emailid — An email address that represents a specific + // account. For example, user:alice@gmail.com or user:joe@example.com. + // + // - serviceAccount:emailid — An email address that represents a + // service account. For example, + // serviceAccount:my-other-app@appspot.gserviceaccount.com . + // - group:emailid — An email address that represents a Google group. + // For example, group:admins@example.com. + // - domain:domain — A Google Apps domain name that represents all the + // users of that domain. For example, domain:google.com or + // domain:example.com. + // - projectOwner:projectid — Owners of the given project. For + // example, projectOwner:my-example-project + // - projectEditor:projectid — Editors of the given project. For + // example, projectEditor:my-example-project + // - projectViewer:projectid — Viewers of the given project. For + // example, projectViewer:my-example-project + Members []string `json:"members,omitempty"` + + // Role: The role to which members belong. Two types of roles are + // supported: new IAM roles, which grant permissions that do not map + // directly to those provided by ACLs, and legacy IAM roles, which do + // map directly to ACL permissions. All roles are of the format + // roles/storage.specificRole. + // The new IAM roles are: + // - roles/storage.admin — Full control of Google Cloud Storage + // resources. + // - roles/storage.objectViewer — Read-Only access to Google Cloud + // Storage objects. + // - roles/storage.objectCreator — Access to create objects in Google + // Cloud Storage. + // - roles/storage.objectAdmin — Full control of Google Cloud Storage + // objects. The legacy IAM roles are: + // - roles/storage.legacyObjectReader — Read-only access to objects + // without listing. Equivalent to an ACL entry on an object with the + // READER role. + // - roles/storage.legacyObjectOwner — Read/write access to existing + // objects without listing. Equivalent to an ACL entry on an object with + // the OWNER role. + // - roles/storage.legacyBucketReader — Read access to buckets with + // object listing. Equivalent to an ACL entry on a bucket with the + // READER role. + // - roles/storage.legacyBucketWriter — Read access to buckets with + // object listing/creation/deletion. Equivalent to an ACL entry on a + // bucket with the WRITER role. + // - roles/storage.legacyBucketOwner — Read and write access to + // existing buckets with object listing/creation/deletion. Equivalent to + // an ACL entry on a bucket with the OWNER role. + Role string `json:"role,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Condition") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Condition") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *PolicyBindings) MarshalJSON() ([]byte, error) { + type NoMethod PolicyBindings + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RewriteResponse: A rewrite response. +type RewriteResponse struct { + // Done: true if the copy is finished; otherwise, false if the copy is + // in progress. This property is always present in the response. + Done bool `json:"done,omitempty"` + + // Kind: The kind of item this is. + Kind string `json:"kind,omitempty"` + + // ObjectSize: The total size of the object being copied in bytes. This + // property is always present in the response. + ObjectSize int64 `json:"objectSize,omitempty,string"` + + // Resource: A resource containing the metadata for the copied-to + // object. This property is present in the response only when copying + // completes. + Resource *Object `json:"resource,omitempty"` + + // RewriteToken: A token to use in subsequent requests to continue + // copying data. This token is present in the response only when there + // is more data to copy. + RewriteToken string `json:"rewriteToken,omitempty"` + + // TotalBytesRewritten: The total bytes written so far, which can be + // used to provide a waiting user with a progress indicator. This + // property is always present in the response. + TotalBytesRewritten int64 `json:"totalBytesRewritten,omitempty,string"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Done") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Done") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RewriteResponse) MarshalJSON() ([]byte, error) { + type NoMethod RewriteResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ServiceAccount: A subscription to receive Google PubSub +// notifications. +type ServiceAccount struct { + // EmailAddress: The ID of the notification. + EmailAddress string `json:"email_address,omitempty"` + + // Kind: The kind of item this is. For notifications, this is always + // storage#notification. + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "EmailAddress") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EmailAddress") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ServiceAccount) MarshalJSON() ([]byte, error) { + type NoMethod ServiceAccount + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TestIamPermissionsResponse: A +// storage.(buckets|objects).testIamPermissions response. +type TestIamPermissionsResponse struct { + // Kind: The kind of item this is. + Kind string `json:"kind,omitempty"` + + // Permissions: The permissions held by the caller. Permissions are + // always of the format storage.resource.capability, where resource is + // one of buckets or objects. The supported permissions are as follows: + // + // - storage.buckets.delete — Delete bucket. + // - storage.buckets.get — Read bucket metadata. + // - storage.buckets.getIamPolicy — Read bucket IAM policy. + // - storage.buckets.create — Create bucket. + // - storage.buckets.list — List buckets. + // - storage.buckets.setIamPolicy — Update bucket IAM policy. + // - storage.buckets.update — Update bucket metadata. + // - storage.objects.delete — Delete object. + // - storage.objects.get — Read object data and metadata. + // - storage.objects.getIamPolicy — Read object IAM policy. + // - storage.objects.create — Create object. + // - storage.objects.list — List objects. + // - storage.objects.setIamPolicy — Update object IAM policy. + // - storage.objects.update — Update object metadata. + Permissions []string `json:"permissions,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { + type NoMethod TestIamPermissionsResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// method id "storage.bucketAccessControls.delete": + +type BucketAccessControlsDeleteCall struct { + s *Service + bucket string + entity string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Permanently deletes the ACL entry for the specified entity on +// the specified bucket. +func (r *BucketAccessControlsService) Delete(bucket string, entity string) *BucketAccessControlsDeleteCall { + c := &BucketAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketAccessControlsDeleteCall) UserProject(userProject string) *BucketAccessControlsDeleteCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketAccessControlsDeleteCall) Fields(s ...googleapi.Field) *BucketAccessControlsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketAccessControlsDeleteCall) Context(ctx context.Context) *BucketAccessControlsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketAccessControlsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.bucketAccessControls.delete" call. +func (c *BucketAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Permanently deletes the ACL entry for the specified entity on the specified bucket.", + // "httpMethod": "DELETE", + // "id": "storage.bucketAccessControls.delete", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/acl/{entity}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.bucketAccessControls.get": + +type BucketAccessControlsGetCall struct { + s *Service + bucket string + entity string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the ACL entry for the specified entity on the specified +// bucket. +func (r *BucketAccessControlsService) Get(bucket string, entity string) *BucketAccessControlsGetCall { + c := &BucketAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketAccessControlsGetCall) UserProject(userProject string) *BucketAccessControlsGetCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketAccessControlsGetCall) Fields(s ...googleapi.Field) *BucketAccessControlsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BucketAccessControlsGetCall) IfNoneMatch(entityTag string) *BucketAccessControlsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketAccessControlsGetCall) Context(ctx context.Context) *BucketAccessControlsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketAccessControlsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.bucketAccessControls.get" call. +// Exactly one of *BucketAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BucketAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BucketAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BucketAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the ACL entry for the specified entity on the specified bucket.", + // "httpMethod": "GET", + // "id": "storage.bucketAccessControls.get", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/acl/{entity}", + // "response": { + // "$ref": "BucketAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.bucketAccessControls.insert": + +type BucketAccessControlsInsertCall struct { + s *Service + bucket string + bucketaccesscontrol *BucketAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a new ACL entry on the specified bucket. +func (r *BucketAccessControlsService) Insert(bucket string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsInsertCall { + c := &BucketAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.bucketaccesscontrol = bucketaccesscontrol + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketAccessControlsInsertCall) UserProject(userProject string) *BucketAccessControlsInsertCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketAccessControlsInsertCall) Fields(s ...googleapi.Field) *BucketAccessControlsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketAccessControlsInsertCall) Context(ctx context.Context) *BucketAccessControlsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketAccessControlsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.bucketAccessControls.insert" call. +// Exactly one of *BucketAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BucketAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BucketAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BucketAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new ACL entry on the specified bucket.", + // "httpMethod": "POST", + // "id": "storage.bucketAccessControls.insert", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/acl", + // "request": { + // "$ref": "BucketAccessControl" + // }, + // "response": { + // "$ref": "BucketAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.bucketAccessControls.list": + +type BucketAccessControlsListCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves ACL entries on the specified bucket. +func (r *BucketAccessControlsService) List(bucket string) *BucketAccessControlsListCall { + c := &BucketAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketAccessControlsListCall) UserProject(userProject string) *BucketAccessControlsListCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketAccessControlsListCall) Fields(s ...googleapi.Field) *BucketAccessControlsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BucketAccessControlsListCall) IfNoneMatch(entityTag string) *BucketAccessControlsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketAccessControlsListCall) Context(ctx context.Context) *BucketAccessControlsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketAccessControlsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.bucketAccessControls.list" call. +// Exactly one of *BucketAccessControls or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BucketAccessControls.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BucketAccessControlsListCall) Do(opts ...googleapi.CallOption) (*BucketAccessControls, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BucketAccessControls{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves ACL entries on the specified bucket.", + // "httpMethod": "GET", + // "id": "storage.bucketAccessControls.list", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/acl", + // "response": { + // "$ref": "BucketAccessControls" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.bucketAccessControls.patch": + +type BucketAccessControlsPatchCall struct { + s *Service + bucket string + entity string + bucketaccesscontrol *BucketAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches an ACL entry on the specified bucket. +func (r *BucketAccessControlsService) Patch(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsPatchCall { + c := &BucketAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + c.bucketaccesscontrol = bucketaccesscontrol + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketAccessControlsPatchCall) UserProject(userProject string) *BucketAccessControlsPatchCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketAccessControlsPatchCall) Fields(s ...googleapi.Field) *BucketAccessControlsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketAccessControlsPatchCall) Context(ctx context.Context) *BucketAccessControlsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketAccessControlsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.bucketAccessControls.patch" call. +// Exactly one of *BucketAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BucketAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BucketAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BucketAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches an ACL entry on the specified bucket.", + // "httpMethod": "PATCH", + // "id": "storage.bucketAccessControls.patch", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/acl/{entity}", + // "request": { + // "$ref": "BucketAccessControl" + // }, + // "response": { + // "$ref": "BucketAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.bucketAccessControls.update": + +type BucketAccessControlsUpdateCall struct { + s *Service + bucket string + entity string + bucketaccesscontrol *BucketAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates an ACL entry on the specified bucket. +func (r *BucketAccessControlsService) Update(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsUpdateCall { + c := &BucketAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + c.bucketaccesscontrol = bucketaccesscontrol + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketAccessControlsUpdateCall) UserProject(userProject string) *BucketAccessControlsUpdateCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketAccessControlsUpdateCall) Fields(s ...googleapi.Field) *BucketAccessControlsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketAccessControlsUpdateCall) Context(ctx context.Context) *BucketAccessControlsUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketAccessControlsUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.bucketAccessControls.update" call. +// Exactly one of *BucketAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BucketAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BucketAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BucketAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an ACL entry on the specified bucket.", + // "httpMethod": "PUT", + // "id": "storage.bucketAccessControls.update", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/acl/{entity}", + // "request": { + // "$ref": "BucketAccessControl" + // }, + // "response": { + // "$ref": "BucketAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.buckets.delete": + +type BucketsDeleteCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Permanently deletes an empty bucket. +func (r *BucketsService) Delete(bucket string) *BucketsDeleteCall { + c := &BucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": If set, only deletes the bucket if its +// metageneration matches this value. +func (c *BucketsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsDeleteCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": If set, only deletes the bucket if its +// metageneration does not match this value. +func (c *BucketsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsDeleteCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketsDeleteCall) UserProject(userProject string) *BucketsDeleteCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsDeleteCall) Fields(s ...googleapi.Field) *BucketsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketsDeleteCall) Context(ctx context.Context) *BucketsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.delete" call. +func (c *BucketsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Permanently deletes an empty bucket.", + // "httpMethod": "DELETE", + // "id": "storage.buckets.delete", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "If set, only deletes the bucket if its metageneration matches this value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "If set, only deletes the bucket if its metageneration does not match this value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.buckets.get": + +type BucketsGetCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns metadata for the specified bucket. +func (r *BucketsService) Get(bucket string) *BucketsGetCall { + c := &BucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration matches +// the given value. +func (c *BucketsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsGetCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration does not +// match the given value. +func (c *BucketsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsGetCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +func (c *BucketsGetCall) Projection(projection string) *BucketsGetCall { + c.urlParams_.Set("projection", projection) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketsGetCall) UserProject(userProject string) *BucketsGetCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsGetCall) Fields(s ...googleapi.Field) *BucketsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BucketsGetCall) IfNoneMatch(entityTag string) *BucketsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketsGetCall) Context(ctx context.Context) *BucketsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.get" call. +// Exactly one of *Bucket or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsGetCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Bucket{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns metadata for the specified bucket.", + // "httpMethod": "GET", + // "id": "storage.buckets.get", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit owner, acl and defaultObjectAcl properties." + // ], + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}", + // "response": { + // "$ref": "Bucket" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.buckets.getIamPolicy": + +type BucketsGetIamPolicyCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetIamPolicy: Returns an IAM policy for the specified bucket. +func (r *BucketsService) GetIamPolicy(bucket string) *BucketsGetIamPolicyCall { + c := &BucketsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketsGetIamPolicyCall) UserProject(userProject string) *BucketsGetIamPolicyCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsGetIamPolicyCall) Fields(s ...googleapi.Field) *BucketsGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BucketsGetIamPolicyCall) IfNoneMatch(entityTag string) *BucketsGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketsGetIamPolicyCall) Context(ctx context.Context) *BucketsGetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketsGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns an IAM policy for the specified bucket.", + // "httpMethod": "GET", + // "id": "storage.buckets.getIamPolicy", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/iam", + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.buckets.insert": + +type BucketsInsertCall struct { + s *Service + bucket *Bucket + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a new bucket. +func (r *BucketsService) Insert(projectid string, bucket *Bucket) *BucketsInsertCall { + c := &BucketsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.urlParams_.Set("project", projectid) + c.bucket = bucket + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this bucket. +// +// Possible values: +// "authenticatedRead" - Project team owners get OWNER access, and +// allAuthenticatedUsers get READER access. +// "private" - Project team owners get OWNER access. +// "projectPrivate" - Project team members get access according to +// their roles. +// "publicRead" - Project team owners get OWNER access, and allUsers +// get READER access. +// "publicReadWrite" - Project team owners get OWNER access, and +// allUsers get WRITER access. +func (c *BucketsInsertCall) PredefinedAcl(predefinedAcl string) *BucketsInsertCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// PredefinedDefaultObjectAcl sets the optional parameter +// "predefinedDefaultObjectAcl": Apply a predefined set of default +// object access controls to this bucket. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *BucketsInsertCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsInsertCall { + c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl, unless the bucket resource +// specifies acl or defaultObjectAcl properties, when it defaults to +// full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +func (c *BucketsInsertCall) Projection(projection string) *BucketsInsertCall { + c.urlParams_.Set("projection", projection) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. +func (c *BucketsInsertCall) UserProject(userProject string) *BucketsInsertCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsInsertCall) Fields(s ...googleapi.Field) *BucketsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketsInsertCall) Context(ctx context.Context) *BucketsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.insert" call. +// Exactly one of *Bucket or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Bucket{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new bucket.", + // "httpMethod": "POST", + // "id": "storage.buckets.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "private", + // "projectPrivate", + // "publicRead", + // "publicReadWrite" + // ], + // "enumDescriptions": [ + // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + // "Project team owners get OWNER access.", + // "Project team members get access according to their roles.", + // "Project team owners get OWNER access, and allUsers get READER access.", + // "Project team owners get OWNER access, and allUsers get WRITER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "predefinedDefaultObjectAcl": { + // "description": "Apply a predefined set of default object access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "A valid API project identifier.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit owner, acl and defaultObjectAcl properties." + // ], + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b", + // "request": { + // "$ref": "Bucket" + // }, + // "response": { + // "$ref": "Bucket" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.buckets.list": + +type BucketsListCall struct { + s *Service + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of buckets for a given project. +func (r *BucketsService) List(projectid string) *BucketsListCall { + c := &BucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.urlParams_.Set("project", projectid) + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of buckets to return in a single response. The service will use this +// parameter or 1,000 items, whichever is smaller. +func (c *BucketsListCall) MaxResults(maxResults int64) *BucketsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": A +// previously-returned page token representing part of the larger set of +// results to view. +func (c *BucketsListCall) PageToken(pageToken string) *BucketsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Prefix sets the optional parameter "prefix": Filter results to +// buckets whose names begin with this prefix. +func (c *BucketsListCall) Prefix(prefix string) *BucketsListCall { + c.urlParams_.Set("prefix", prefix) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +func (c *BucketsListCall) Projection(projection string) *BucketsListCall { + c.urlParams_.Set("projection", projection) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. +func (c *BucketsListCall) UserProject(userProject string) *BucketsListCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsListCall) Fields(s ...googleapi.Field) *BucketsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BucketsListCall) IfNoneMatch(entityTag string) *BucketsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketsListCall) Context(ctx context.Context) *BucketsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.list" call. +// Exactly one of *Buckets or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Buckets.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Buckets{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of buckets for a given project.", + // "httpMethod": "GET", + // "id": "storage.buckets.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "maxResults": { + // "default": "1000", + // "description": "Maximum number of buckets to return in a single response. The service will use this parameter or 1,000 items, whichever is smaller.", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A previously-returned page token representing part of the larger set of results to view.", + // "location": "query", + // "type": "string" + // }, + // "prefix": { + // "description": "Filter results to buckets whose names begin with this prefix.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "A valid API project identifier.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit owner, acl and defaultObjectAcl properties." + // ], + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b", + // "response": { + // "$ref": "Buckets" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *BucketsListCall) Pages(ctx context.Context, f func(*Buckets) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "storage.buckets.lockRetentionPolicy": + +type BucketsLockRetentionPolicyCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// LockRetentionPolicy: Locks retention policy on a bucket. +func (r *BucketsService) LockRetentionPolicy(bucket string, ifMetagenerationMatch int64) *BucketsLockRetentionPolicyCall { + c := &BucketsLockRetentionPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketsLockRetentionPolicyCall) UserProject(userProject string) *BucketsLockRetentionPolicyCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsLockRetentionPolicyCall) Fields(s ...googleapi.Field) *BucketsLockRetentionPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketsLockRetentionPolicyCall) Context(ctx context.Context) *BucketsLockRetentionPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketsLockRetentionPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsLockRetentionPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/lockRetentionPolicy") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.lockRetentionPolicy" call. +// Exactly one of *Bucket or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsLockRetentionPolicyCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Bucket{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Locks retention policy on a bucket.", + // "httpMethod": "POST", + // "id": "storage.buckets.lockRetentionPolicy", + // "parameterOrder": [ + // "bucket", + // "ifMetagenerationMatch" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether bucket's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/lockRetentionPolicy", + // "response": { + // "$ref": "Bucket" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.buckets.patch": + +type BucketsPatchCall struct { + s *Service + bucket string + bucket2 *Bucket + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates a bucket. Changes to the bucket will be readable +// immediately after writing, but configuration changes may take time to +// propagate. This method supports patch semantics. +func (r *BucketsService) Patch(bucket string, bucket2 *Bucket) *BucketsPatchCall { + c := &BucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.bucket2 = bucket2 + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration matches +// the given value. +func (c *BucketsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsPatchCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration does not +// match the given value. +func (c *BucketsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsPatchCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this bucket. +// +// Possible values: +// "authenticatedRead" - Project team owners get OWNER access, and +// allAuthenticatedUsers get READER access. +// "private" - Project team owners get OWNER access. +// "projectPrivate" - Project team members get access according to +// their roles. +// "publicRead" - Project team owners get OWNER access, and allUsers +// get READER access. +// "publicReadWrite" - Project team owners get OWNER access, and +// allUsers get WRITER access. +func (c *BucketsPatchCall) PredefinedAcl(predefinedAcl string) *BucketsPatchCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// PredefinedDefaultObjectAcl sets the optional parameter +// "predefinedDefaultObjectAcl": Apply a predefined set of default +// object access controls to this bucket. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *BucketsPatchCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsPatchCall { + c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +func (c *BucketsPatchCall) Projection(projection string) *BucketsPatchCall { + c.urlParams_.Set("projection", projection) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketsPatchCall) UserProject(userProject string) *BucketsPatchCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsPatchCall) Fields(s ...googleapi.Field) *BucketsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketsPatchCall) Context(ctx context.Context) *BucketsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.patch" call. +// Exactly one of *Bucket or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Bucket{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "storage.buckets.patch", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "private", + // "projectPrivate", + // "publicRead", + // "publicReadWrite" + // ], + // "enumDescriptions": [ + // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + // "Project team owners get OWNER access.", + // "Project team members get access according to their roles.", + // "Project team owners get OWNER access, and allUsers get READER access.", + // "Project team owners get OWNER access, and allUsers get WRITER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "predefinedDefaultObjectAcl": { + // "description": "Apply a predefined set of default object access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit owner, acl and defaultObjectAcl properties." + // ], + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}", + // "request": { + // "$ref": "Bucket" + // }, + // "response": { + // "$ref": "Bucket" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.buckets.setIamPolicy": + +type BucketsSetIamPolicyCall struct { + s *Service + bucket string + policy *Policy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetIamPolicy: Updates an IAM policy for the specified bucket. +func (r *BucketsService) SetIamPolicy(bucket string, policy *Policy) *BucketsSetIamPolicyCall { + c := &BucketsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.policy = policy + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketsSetIamPolicyCall) UserProject(userProject string) *BucketsSetIamPolicyCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsSetIamPolicyCall) Fields(s ...googleapi.Field) *BucketsSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketsSetIamPolicyCall) Context(ctx context.Context) *BucketsSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketsSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an IAM policy for the specified bucket.", + // "httpMethod": "PUT", + // "id": "storage.buckets.setIamPolicy", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/iam", + // "request": { + // "$ref": "Policy" + // }, + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.buckets.testIamPermissions": + +type BucketsTestIamPermissionsCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Tests a set of permissions on the given bucket to +// see which, if any, are held by the caller. +func (r *BucketsService) TestIamPermissions(bucket string, permissions []string) *BucketsTestIamPermissionsCall { + c := &BucketsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.urlParams_.SetMulti("permissions", append([]string{}, permissions...)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketsTestIamPermissionsCall) UserProject(userProject string) *BucketsTestIamPermissionsCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsTestIamPermissionsCall) Fields(s ...googleapi.Field) *BucketsTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BucketsTestIamPermissionsCall) IfNoneMatch(entityTag string) *BucketsTestIamPermissionsCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketsTestIamPermissionsCall) Context(ctx context.Context) *BucketsTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketsTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam/testPermissions") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.testIamPermissions" call. +// Exactly one of *TestIamPermissionsResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *TestIamPermissionsResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TestIamPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Tests a set of permissions on the given bucket to see which, if any, are held by the caller.", + // "httpMethod": "GET", + // "id": "storage.buckets.testIamPermissions", + // "parameterOrder": [ + // "bucket", + // "permissions" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "permissions": { + // "description": "Permissions to test.", + // "location": "query", + // "repeated": true, + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/iam/testPermissions", + // "response": { + // "$ref": "TestIamPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.buckets.update": + +type BucketsUpdateCall struct { + s *Service + bucket string + bucket2 *Bucket + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates a bucket. Changes to the bucket will be readable +// immediately after writing, but configuration changes may take time to +// propagate. +func (r *BucketsService) Update(bucket string, bucket2 *Bucket) *BucketsUpdateCall { + c := &BucketsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.bucket2 = bucket2 + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration matches +// the given value. +func (c *BucketsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsUpdateCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration does not +// match the given value. +func (c *BucketsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsUpdateCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this bucket. +// +// Possible values: +// "authenticatedRead" - Project team owners get OWNER access, and +// allAuthenticatedUsers get READER access. +// "private" - Project team owners get OWNER access. +// "projectPrivate" - Project team members get access according to +// their roles. +// "publicRead" - Project team owners get OWNER access, and allUsers +// get READER access. +// "publicReadWrite" - Project team owners get OWNER access, and +// allUsers get WRITER access. +func (c *BucketsUpdateCall) PredefinedAcl(predefinedAcl string) *BucketsUpdateCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// PredefinedDefaultObjectAcl sets the optional parameter +// "predefinedDefaultObjectAcl": Apply a predefined set of default +// object access controls to this bucket. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *BucketsUpdateCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsUpdateCall { + c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +func (c *BucketsUpdateCall) Projection(projection string) *BucketsUpdateCall { + c.urlParams_.Set("projection", projection) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketsUpdateCall) UserProject(userProject string) *BucketsUpdateCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsUpdateCall) Fields(s ...googleapi.Field) *BucketsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketsUpdateCall) Context(ctx context.Context) *BucketsUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketsUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.update" call. +// Exactly one of *Bucket or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Bucket{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate.", + // "httpMethod": "PUT", + // "id": "storage.buckets.update", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "private", + // "projectPrivate", + // "publicRead", + // "publicReadWrite" + // ], + // "enumDescriptions": [ + // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + // "Project team owners get OWNER access.", + // "Project team members get access according to their roles.", + // "Project team owners get OWNER access, and allUsers get READER access.", + // "Project team owners get OWNER access, and allUsers get WRITER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "predefinedDefaultObjectAcl": { + // "description": "Apply a predefined set of default object access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit owner, acl and defaultObjectAcl properties." + // ], + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}", + // "request": { + // "$ref": "Bucket" + // }, + // "response": { + // "$ref": "Bucket" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.channels.stop": + +type ChannelsStopCall struct { + s *Service + channel *Channel + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Stop: Stop watching resources through this channel +func (r *ChannelsService) Stop(channel *Channel) *ChannelsStopCall { + c := &ChannelsStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.channel = channel + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ChannelsStopCall) Fields(s ...googleapi.Field) *ChannelsStopCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ChannelsStopCall) Context(ctx context.Context) *ChannelsStopCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ChannelsStopCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "channels/stop") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.channels.stop" call. +func (c *ChannelsStopCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Stop watching resources through this channel", + // "httpMethod": "POST", + // "id": "storage.channels.stop", + // "path": "channels/stop", + // "request": { + // "$ref": "Channel", + // "parameterName": "resource" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.defaultObjectAccessControls.delete": + +type DefaultObjectAccessControlsDeleteCall struct { + s *Service + bucket string + entity string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Permanently deletes the default object ACL entry for the +// specified entity on the specified bucket. +func (r *DefaultObjectAccessControlsService) Delete(bucket string, entity string) *DefaultObjectAccessControlsDeleteCall { + c := &DefaultObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *DefaultObjectAccessControlsDeleteCall) UserProject(userProject string) *DefaultObjectAccessControlsDeleteCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DefaultObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DefaultObjectAccessControlsDeleteCall) Context(ctx context.Context) *DefaultObjectAccessControlsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DefaultObjectAccessControlsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.defaultObjectAccessControls.delete" call. +func (c *DefaultObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Permanently deletes the default object ACL entry for the specified entity on the specified bucket.", + // "httpMethod": "DELETE", + // "id": "storage.defaultObjectAccessControls.delete", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/defaultObjectAcl/{entity}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.defaultObjectAccessControls.get": + +type DefaultObjectAccessControlsGetCall struct { + s *Service + bucket string + entity string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the default object ACL entry for the specified entity on +// the specified bucket. +func (r *DefaultObjectAccessControlsService) Get(bucket string, entity string) *DefaultObjectAccessControlsGetCall { + c := &DefaultObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *DefaultObjectAccessControlsGetCall) UserProject(userProject string) *DefaultObjectAccessControlsGetCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DefaultObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DefaultObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *DefaultObjectAccessControlsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DefaultObjectAccessControlsGetCall) Context(ctx context.Context) *DefaultObjectAccessControlsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DefaultObjectAccessControlsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.defaultObjectAccessControls.get" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DefaultObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the default object ACL entry for the specified entity on the specified bucket.", + // "httpMethod": "GET", + // "id": "storage.defaultObjectAccessControls.get", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/defaultObjectAcl/{entity}", + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.defaultObjectAccessControls.insert": + +type DefaultObjectAccessControlsInsertCall struct { + s *Service + bucket string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a new default object ACL entry on the specified +// bucket. +func (r *DefaultObjectAccessControlsService) Insert(bucket string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsInsertCall { + c := &DefaultObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *DefaultObjectAccessControlsInsertCall) UserProject(userProject string) *DefaultObjectAccessControlsInsertCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DefaultObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DefaultObjectAccessControlsInsertCall) Context(ctx context.Context) *DefaultObjectAccessControlsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DefaultObjectAccessControlsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.defaultObjectAccessControls.insert" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DefaultObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new default object ACL entry on the specified bucket.", + // "httpMethod": "POST", + // "id": "storage.defaultObjectAccessControls.insert", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/defaultObjectAcl", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.defaultObjectAccessControls.list": + +type DefaultObjectAccessControlsListCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves default object ACL entries on the specified bucket. +func (r *DefaultObjectAccessControlsService) List(bucket string) *DefaultObjectAccessControlsListCall { + c := &DefaultObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": If present, only return default ACL listing +// if the bucket's current metageneration matches this value. +func (c *DefaultObjectAccessControlsListCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *DefaultObjectAccessControlsListCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": If present, only return default ACL +// listing if the bucket's current metageneration does not match the +// given value. +func (c *DefaultObjectAccessControlsListCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *DefaultObjectAccessControlsListCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *DefaultObjectAccessControlsListCall) UserProject(userProject string) *DefaultObjectAccessControlsListCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DefaultObjectAccessControlsListCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DefaultObjectAccessControlsListCall) IfNoneMatch(entityTag string) *DefaultObjectAccessControlsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DefaultObjectAccessControlsListCall) Context(ctx context.Context) *DefaultObjectAccessControlsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DefaultObjectAccessControlsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.defaultObjectAccessControls.list" call. +// Exactly one of *ObjectAccessControls or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControls.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControls, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControls{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves default object ACL entries on the specified bucket.", + // "httpMethod": "GET", + // "id": "storage.defaultObjectAccessControls.list", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "If present, only return default ACL listing if the bucket's current metageneration matches this value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "If present, only return default ACL listing if the bucket's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/defaultObjectAcl", + // "response": { + // "$ref": "ObjectAccessControls" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.defaultObjectAccessControls.patch": + +type DefaultObjectAccessControlsPatchCall struct { + s *Service + bucket string + entity string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches a default object ACL entry on the specified bucket. +func (r *DefaultObjectAccessControlsService) Patch(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsPatchCall { + c := &DefaultObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *DefaultObjectAccessControlsPatchCall) UserProject(userProject string) *DefaultObjectAccessControlsPatchCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DefaultObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DefaultObjectAccessControlsPatchCall) Context(ctx context.Context) *DefaultObjectAccessControlsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DefaultObjectAccessControlsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.defaultObjectAccessControls.patch" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches a default object ACL entry on the specified bucket.", + // "httpMethod": "PATCH", + // "id": "storage.defaultObjectAccessControls.patch", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/defaultObjectAcl/{entity}", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.defaultObjectAccessControls.update": + +type DefaultObjectAccessControlsUpdateCall struct { + s *Service + bucket string + entity string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates a default object ACL entry on the specified bucket. +func (r *DefaultObjectAccessControlsService) Update(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsUpdateCall { + c := &DefaultObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *DefaultObjectAccessControlsUpdateCall) UserProject(userProject string) *DefaultObjectAccessControlsUpdateCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DefaultObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DefaultObjectAccessControlsUpdateCall) Context(ctx context.Context) *DefaultObjectAccessControlsUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DefaultObjectAccessControlsUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.defaultObjectAccessControls.update" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a default object ACL entry on the specified bucket.", + // "httpMethod": "PUT", + // "id": "storage.defaultObjectAccessControls.update", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/defaultObjectAcl/{entity}", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.notifications.delete": + +type NotificationsDeleteCall struct { + s *Service + bucket string + notification string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Permanently deletes a notification subscription. +func (r *NotificationsService) Delete(bucket string, notification string) *NotificationsDeleteCall { + c := &NotificationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.notification = notification + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *NotificationsDeleteCall) UserProject(userProject string) *NotificationsDeleteCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NotificationsDeleteCall) Fields(s ...googleapi.Field) *NotificationsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NotificationsDeleteCall) Context(ctx context.Context) *NotificationsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NotificationsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NotificationsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs/{notification}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "notification": c.notification, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.notifications.delete" call. +func (c *NotificationsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Permanently deletes a notification subscription.", + // "httpMethod": "DELETE", + // "id": "storage.notifications.delete", + // "parameterOrder": [ + // "bucket", + // "notification" + // ], + // "parameters": { + // "bucket": { + // "description": "The parent bucket of the notification.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "notification": { + // "description": "ID of the notification to delete.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/notificationConfigs/{notification}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.notifications.get": + +type NotificationsGetCall struct { + s *Service + bucket string + notification string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: View a notification configuration. +func (r *NotificationsService) Get(bucket string, notification string) *NotificationsGetCall { + c := &NotificationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.notification = notification + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *NotificationsGetCall) UserProject(userProject string) *NotificationsGetCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NotificationsGetCall) Fields(s ...googleapi.Field) *NotificationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NotificationsGetCall) IfNoneMatch(entityTag string) *NotificationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NotificationsGetCall) Context(ctx context.Context) *NotificationsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NotificationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NotificationsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs/{notification}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "notification": c.notification, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.notifications.get" call. +// Exactly one of *Notification or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Notification.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NotificationsGetCall) Do(opts ...googleapi.CallOption) (*Notification, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Notification{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "View a notification configuration.", + // "httpMethod": "GET", + // "id": "storage.notifications.get", + // "parameterOrder": [ + // "bucket", + // "notification" + // ], + // "parameters": { + // "bucket": { + // "description": "The parent bucket of the notification.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "notification": { + // "description": "Notification ID", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/notificationConfigs/{notification}", + // "response": { + // "$ref": "Notification" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.notifications.insert": + +type NotificationsInsertCall struct { + s *Service + bucket string + notification *Notification + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a notification subscription for a given bucket. +func (r *NotificationsService) Insert(bucket string, notification *Notification) *NotificationsInsertCall { + c := &NotificationsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.notification = notification + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *NotificationsInsertCall) UserProject(userProject string) *NotificationsInsertCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NotificationsInsertCall) Fields(s ...googleapi.Field) *NotificationsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NotificationsInsertCall) Context(ctx context.Context) *NotificationsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NotificationsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NotificationsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.notification) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.notifications.insert" call. +// Exactly one of *Notification or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Notification.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NotificationsInsertCall) Do(opts ...googleapi.CallOption) (*Notification, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Notification{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a notification subscription for a given bucket.", + // "httpMethod": "POST", + // "id": "storage.notifications.insert", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "The parent bucket of the notification.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/notificationConfigs", + // "request": { + // "$ref": "Notification" + // }, + // "response": { + // "$ref": "Notification" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.notifications.list": + +type NotificationsListCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of notification subscriptions for a given +// bucket. +func (r *NotificationsService) List(bucket string) *NotificationsListCall { + c := &NotificationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *NotificationsListCall) UserProject(userProject string) *NotificationsListCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NotificationsListCall) Fields(s ...googleapi.Field) *NotificationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NotificationsListCall) IfNoneMatch(entityTag string) *NotificationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NotificationsListCall) Context(ctx context.Context) *NotificationsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NotificationsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.notifications.list" call. +// Exactly one of *Notifications or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Notifications.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NotificationsListCall) Do(opts ...googleapi.CallOption) (*Notifications, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Notifications{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of notification subscriptions for a given bucket.", + // "httpMethod": "GET", + // "id": "storage.notifications.list", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a Google Cloud Storage bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/notificationConfigs", + // "response": { + // "$ref": "Notifications" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objectAccessControls.delete": + +type ObjectAccessControlsDeleteCall struct { + s *Service + bucket string + object string + entity string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Permanently deletes the ACL entry for the specified entity on +// the specified object. +func (r *ObjectAccessControlsService) Delete(bucket string, object string, entity string) *ObjectAccessControlsDeleteCall { + c := &ObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.entity = entity + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsDeleteCall) Generation(generation int64) *ObjectAccessControlsDeleteCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectAccessControlsDeleteCall) UserProject(userProject string) *ObjectAccessControlsDeleteCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *ObjectAccessControlsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectAccessControlsDeleteCall) Context(ctx context.Context) *ObjectAccessControlsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectAccessControlsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objectAccessControls.delete" call. +func (c *ObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Permanently deletes the ACL entry for the specified entity on the specified object.", + // "httpMethod": "DELETE", + // "id": "storage.objectAccessControls.delete", + // "parameterOrder": [ + // "bucket", + // "object", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl/{entity}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objectAccessControls.get": + +type ObjectAccessControlsGetCall struct { + s *Service + bucket string + object string + entity string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the ACL entry for the specified entity on the specified +// object. +func (r *ObjectAccessControlsService) Get(bucket string, object string, entity string) *ObjectAccessControlsGetCall { + c := &ObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.entity = entity + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsGetCall) Generation(generation int64) *ObjectAccessControlsGetCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectAccessControlsGetCall) UserProject(userProject string) *ObjectAccessControlsGetCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *ObjectAccessControlsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *ObjectAccessControlsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectAccessControlsGetCall) Context(ctx context.Context) *ObjectAccessControlsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectAccessControlsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objectAccessControls.get" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the ACL entry for the specified entity on the specified object.", + // "httpMethod": "GET", + // "id": "storage.objectAccessControls.get", + // "parameterOrder": [ + // "bucket", + // "object", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl/{entity}", + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objectAccessControls.insert": + +type ObjectAccessControlsInsertCall struct { + s *Service + bucket string + object string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a new ACL entry on the specified object. +func (r *ObjectAccessControlsService) Insert(bucket string, object string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsInsertCall { + c := &ObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsInsertCall) Generation(generation int64) *ObjectAccessControlsInsertCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectAccessControlsInsertCall) UserProject(userProject string) *ObjectAccessControlsInsertCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *ObjectAccessControlsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectAccessControlsInsertCall) Context(ctx context.Context) *ObjectAccessControlsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectAccessControlsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objectAccessControls.insert" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new ACL entry on the specified object.", + // "httpMethod": "POST", + // "id": "storage.objectAccessControls.insert", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objectAccessControls.list": + +type ObjectAccessControlsListCall struct { + s *Service + bucket string + object string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves ACL entries on the specified object. +func (r *ObjectAccessControlsService) List(bucket string, object string) *ObjectAccessControlsListCall { + c := &ObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsListCall) Generation(generation int64) *ObjectAccessControlsListCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectAccessControlsListCall) UserProject(userProject string) *ObjectAccessControlsListCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsListCall) Fields(s ...googleapi.Field) *ObjectAccessControlsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ObjectAccessControlsListCall) IfNoneMatch(entityTag string) *ObjectAccessControlsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectAccessControlsListCall) Context(ctx context.Context) *ObjectAccessControlsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectAccessControlsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objectAccessControls.list" call. +// Exactly one of *ObjectAccessControls or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControls.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControls, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControls{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves ACL entries on the specified object.", + // "httpMethod": "GET", + // "id": "storage.objectAccessControls.list", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl", + // "response": { + // "$ref": "ObjectAccessControls" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objectAccessControls.patch": + +type ObjectAccessControlsPatchCall struct { + s *Service + bucket string + object string + entity string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches an ACL entry on the specified object. +func (r *ObjectAccessControlsService) Patch(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsPatchCall { + c := &ObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.entity = entity + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsPatchCall) Generation(generation int64) *ObjectAccessControlsPatchCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectAccessControlsPatchCall) UserProject(userProject string) *ObjectAccessControlsPatchCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *ObjectAccessControlsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectAccessControlsPatchCall) Context(ctx context.Context) *ObjectAccessControlsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectAccessControlsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objectAccessControls.patch" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches an ACL entry on the specified object.", + // "httpMethod": "PATCH", + // "id": "storage.objectAccessControls.patch", + // "parameterOrder": [ + // "bucket", + // "object", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl/{entity}", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objectAccessControls.update": + +type ObjectAccessControlsUpdateCall struct { + s *Service + bucket string + object string + entity string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates an ACL entry on the specified object. +func (r *ObjectAccessControlsService) Update(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsUpdateCall { + c := &ObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.entity = entity + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsUpdateCall) Generation(generation int64) *ObjectAccessControlsUpdateCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectAccessControlsUpdateCall) UserProject(userProject string) *ObjectAccessControlsUpdateCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *ObjectAccessControlsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectAccessControlsUpdateCall) Context(ctx context.Context) *ObjectAccessControlsUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectAccessControlsUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objectAccessControls.update" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an ACL entry on the specified object.", + // "httpMethod": "PUT", + // "id": "storage.objectAccessControls.update", + // "parameterOrder": [ + // "bucket", + // "object", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl/{entity}", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objects.compose": + +type ObjectsComposeCall struct { + s *Service + destinationBucket string + destinationObject string + composerequest *ComposeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Compose: Concatenates a list of existing objects into a new object in +// the same bucket. +func (r *ObjectsService) Compose(destinationBucket string, destinationObject string, composerequest *ComposeRequest) *ObjectsComposeCall { + c := &ObjectsComposeCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.destinationBucket = destinationBucket + c.destinationObject = destinationObject + c.composerequest = composerequest + return c +} + +// DestinationPredefinedAcl sets the optional parameter +// "destinationPredefinedAcl": Apply a predefined set of access controls +// to the destination object. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *ObjectsComposeCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsComposeCall { + c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. Setting to 0 makes the operation +// succeed only if there are no live versions of the object. +func (c *ObjectsComposeCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsComposeCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsComposeCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsComposeCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// KmsKeyName sets the optional parameter "kmsKeyName": Resource name of +// the Cloud KMS key, of the form +// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, +// that will be used to encrypt the object. Overrides the object +// metadata's kms_key_name value, if any. +func (c *ObjectsComposeCall) KmsKeyName(kmsKeyName string) *ObjectsComposeCall { + c.urlParams_.Set("kmsKeyName", kmsKeyName) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsComposeCall) UserProject(userProject string) *ObjectsComposeCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsComposeCall) Fields(s ...googleapi.Field) *ObjectsComposeCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsComposeCall) Context(ctx context.Context) *ObjectsComposeCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsComposeCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.composerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{destinationBucket}/o/{destinationObject}/compose") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "destinationBucket": c.destinationBucket, + "destinationObject": c.destinationObject, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.compose" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Concatenates a list of existing objects into a new object in the same bucket.", + // "httpMethod": "POST", + // "id": "storage.objects.compose", + // "parameterOrder": [ + // "destinationBucket", + // "destinationObject" + // ], + // "parameters": { + // "destinationBucket": { + // "description": "Name of the bucket in which to store the new object.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationObject": { + // "description": "Name of the new object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationPredefinedAcl": { + // "description": "Apply a predefined set of access controls to the destination object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "kmsKeyName": { + // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{destinationBucket}/o/{destinationObject}/compose", + // "request": { + // "$ref": "ComposeRequest" + // }, + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objects.copy": + +type ObjectsCopyCall struct { + s *Service + sourceBucket string + sourceObject string + destinationBucket string + destinationObject string + object *Object + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Copy: Copies a source object to a destination object. Optionally +// overrides metadata. +func (r *ObjectsService) Copy(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsCopyCall { + c := &ObjectsCopyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.sourceBucket = sourceBucket + c.sourceObject = sourceObject + c.destinationBucket = destinationBucket + c.destinationObject = destinationObject + c.object = object + return c +} + +// DestinationPredefinedAcl sets the optional parameter +// "destinationPredefinedAcl": Apply a predefined set of access controls +// to the destination object. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *ObjectsCopyCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsCopyCall { + c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the destination object's +// current generation matches the given value. Setting to 0 makes the +// operation succeed only if there are no live versions of the object. +func (c *ObjectsCopyCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the destination object's current generation does not match the given +// value. If no live object exists, the precondition fails. Setting to 0 +// makes the operation succeed only if there is a live version of the +// object. +func (c *ObjectsCopyCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the destination object's current metageneration matches the given +// value. +func (c *ObjectsCopyCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the destination object's current metageneration does not +// match the given value. +func (c *ObjectsCopyCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// IfSourceGenerationMatch sets the optional parameter +// "ifSourceGenerationMatch": Makes the operation conditional on whether +// the source object's current generation matches the given value. +func (c *ObjectsCopyCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifSourceGenerationMatch", fmt.Sprint(ifSourceGenerationMatch)) + return c +} + +// IfSourceGenerationNotMatch sets the optional parameter +// "ifSourceGenerationNotMatch": Makes the operation conditional on +// whether the source object's current generation does not match the +// given value. +func (c *ObjectsCopyCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifSourceGenerationNotMatch", fmt.Sprint(ifSourceGenerationNotMatch)) + return c +} + +// IfSourceMetagenerationMatch sets the optional parameter +// "ifSourceMetagenerationMatch": Makes the operation conditional on +// whether the source object's current metageneration matches the given +// value. +func (c *ObjectsCopyCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch)) + return c +} + +// IfSourceMetagenerationNotMatch sets the optional parameter +// "ifSourceMetagenerationNotMatch": Makes the operation conditional on +// whether the source object's current metageneration does not match the +// given value. +func (c *ObjectsCopyCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch)) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl, unless the object resource +// specifies the acl property, when it defaults to full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsCopyCall) Projection(projection string) *ObjectsCopyCall { + c.urlParams_.Set("projection", projection) + return c +} + +// SourceGeneration sets the optional parameter "sourceGeneration": If +// present, selects a specific revision of the source object (as opposed +// to the latest version, the default). +func (c *ObjectsCopyCall) SourceGeneration(sourceGeneration int64) *ObjectsCopyCall { + c.urlParams_.Set("sourceGeneration", fmt.Sprint(sourceGeneration)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsCopyCall) UserProject(userProject string) *ObjectsCopyCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsCopyCall) Fields(s ...googleapi.Field) *ObjectsCopyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsCopyCall) Context(ctx context.Context) *ObjectsCopyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsCopyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "sourceBucket": c.sourceBucket, + "sourceObject": c.sourceObject, + "destinationBucket": c.destinationBucket, + "destinationObject": c.destinationObject, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.copy" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Copies a source object to a destination object. Optionally overrides metadata.", + // "httpMethod": "POST", + // "id": "storage.objects.copy", + // "parameterOrder": [ + // "sourceBucket", + // "sourceObject", + // "destinationBucket", + // "destinationObject" + // ], + // "parameters": { + // "destinationBucket": { + // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationObject": { + // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationPredefinedAcl": { + // "description": "Apply a predefined set of access controls to the destination object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the destination object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceGenerationMatch": { + // "description": "Makes the operation conditional on whether the source object's current generation matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the owner, acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "sourceBucket": { + // "description": "Name of the bucket in which to find the source object.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "sourceGeneration": { + // "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "sourceObject": { + // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}", + // "request": { + // "$ref": "Object" + // }, + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objects.delete": + +type ObjectsDeleteCall struct { + s *Service + bucket string + object string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes an object and its metadata. Deletions are permanent +// if versioning is not enabled for the bucket, or if the generation +// parameter is used. +func (r *ObjectsService) Delete(bucket string, object string) *ObjectsDeleteCall { + c := &ObjectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + return c +} + +// Generation sets the optional parameter "generation": If present, +// permanently deletes a specific revision of this object (as opposed to +// the latest version, the default). +func (c *ObjectsDeleteCall) Generation(generation int64) *ObjectsDeleteCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. Setting to 0 makes the operation +// succeed only if there are no live versions of the object. +func (c *ObjectsDeleteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsDeleteCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's current generation does not match the given value. If no +// live object exists, the precondition fails. Setting to 0 makes the +// operation succeed only if there is a live version of the object. +func (c *ObjectsDeleteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsDeleteCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsDeleteCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the object's current metageneration does not match the given +// value. +func (c *ObjectsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsDeleteCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsDeleteCall) UserProject(userProject string) *ObjectsDeleteCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsDeleteCall) Fields(s ...googleapi.Field) *ObjectsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsDeleteCall) Context(ctx context.Context) *ObjectsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.delete" call. +func (c *ObjectsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.", + // "httpMethod": "DELETE", + // "id": "storage.objects.delete", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objects.get": + +type ObjectsGetCall struct { + s *Service + bucket string + object string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves an object or its metadata. +func (r *ObjectsService) Get(bucket string, object string) *ObjectsGetCall { + c := &ObjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectsGetCall) Generation(generation int64) *ObjectsGetCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. Setting to 0 makes the operation +// succeed only if there are no live versions of the object. +func (c *ObjectsGetCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsGetCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's current generation does not match the given value. If no +// live object exists, the precondition fails. Setting to 0 makes the +// operation succeed only if there is a live version of the object. +func (c *ObjectsGetCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsGetCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsGetCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the object's current metageneration does not match the given +// value. +func (c *ObjectsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsGetCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsGetCall) Projection(projection string) *ObjectsGetCall { + c.urlParams_.Set("projection", projection) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsGetCall) UserProject(userProject string) *ObjectsGetCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsGetCall) Fields(s ...googleapi.Field) *ObjectsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ObjectsGetCall) IfNoneMatch(entityTag string) *ObjectsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do and Download +// methods. Any pending HTTP request will be aborted if the provided +// context is canceled. +func (c *ObjectsGetCall) Context(ctx context.Context) *ObjectsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Download fetches the API endpoint's "media" value, instead of the normal +// API response value. If the returned error is nil, the Response is guaranteed to +// have a 2xx status code. Callers must close the Response.Body as usual. +func (c *ObjectsGetCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("media") + if err != nil { + return nil, err + } + if err := googleapi.CheckMediaResponse(res); err != nil { + res.Body.Close() + return nil, err + } + return res, nil +} + +// Do executes the "storage.objects.get" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an object or its metadata.", + // "httpMethod": "GET", + // "id": "storage.objects.get", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the owner, acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}", + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ], + // "supportsMediaDownload": true, + // "useMediaDownloadService": true + // } + +} + +// method id "storage.objects.getIamPolicy": + +type ObjectsGetIamPolicyCall struct { + s *Service + bucket string + object string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetIamPolicy: Returns an IAM policy for the specified object. +func (r *ObjectsService) GetIamPolicy(bucket string, object string) *ObjectsGetIamPolicyCall { + c := &ObjectsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectsGetIamPolicyCall) Generation(generation int64) *ObjectsGetIamPolicyCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsGetIamPolicyCall) UserProject(userProject string) *ObjectsGetIamPolicyCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsGetIamPolicyCall) Fields(s ...googleapi.Field) *ObjectsGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ObjectsGetIamPolicyCall) IfNoneMatch(entityTag string) *ObjectsGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsGetIamPolicyCall) Context(ctx context.Context) *ObjectsGetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns an IAM policy for the specified object.", + // "httpMethod": "GET", + // "id": "storage.objects.getIamPolicy", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/iam", + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objects.insert": + +type ObjectsInsertCall struct { + s *Service + bucket string + object *Object + urlParams_ gensupport.URLParams + mediaInfo_ *gensupport.MediaInfo + ctx_ context.Context + header_ http.Header +} + +// Insert: Stores a new object and metadata. +func (r *ObjectsService) Insert(bucket string, object *Object) *ObjectsInsertCall { + c := &ObjectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + return c +} + +// ContentEncoding sets the optional parameter "contentEncoding": If +// set, sets the contentEncoding property of the final object to this +// value. Setting this parameter is equivalent to setting the +// contentEncoding metadata property. This can be useful when uploading +// an object with uploadType=media to indicate the encoding of the +// content being uploaded. +func (c *ObjectsInsertCall) ContentEncoding(contentEncoding string) *ObjectsInsertCall { + c.urlParams_.Set("contentEncoding", contentEncoding) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. Setting to 0 makes the operation +// succeed only if there are no live versions of the object. +func (c *ObjectsInsertCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsInsertCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's current generation does not match the given value. If no +// live object exists, the precondition fails. Setting to 0 makes the +// operation succeed only if there is a live version of the object. +func (c *ObjectsInsertCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsInsertCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsInsertCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsInsertCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the object's current metageneration does not match the given +// value. +func (c *ObjectsInsertCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsInsertCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// KmsKeyName sets the optional parameter "kmsKeyName": Resource name of +// the Cloud KMS key, of the form +// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, +// that will be used to encrypt the object. Overrides the object +// metadata's kms_key_name value, if any. Limited availability; usable +// only by enabled projects. +func (c *ObjectsInsertCall) KmsKeyName(kmsKeyName string) *ObjectsInsertCall { + c.urlParams_.Set("kmsKeyName", kmsKeyName) + return c +} + +// Name sets the optional parameter "name": Name of the object. Required +// when the object metadata is not otherwise provided. Overrides the +// object metadata's name value, if any. For information about how to +// URL encode object names to be path safe, see Encoding URI Path Parts. +func (c *ObjectsInsertCall) Name(name string) *ObjectsInsertCall { + c.urlParams_.Set("name", name) + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this object. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *ObjectsInsertCall) PredefinedAcl(predefinedAcl string) *ObjectsInsertCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl, unless the object resource +// specifies the acl property, when it defaults to full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsInsertCall) Projection(projection string) *ObjectsInsertCall { + c.urlParams_.Set("projection", projection) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsInsertCall) UserProject(userProject string) *ObjectsInsertCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Media specifies the media to upload in one or more chunks. The chunk +// size may be controlled by supplying a MediaOption generated by +// googleapi.ChunkSize. The chunk size defaults to +// googleapi.DefaultUploadChunkSize.The Content-Type header used in the +// upload request will be determined by sniffing the contents of r, +// unless a MediaOption generated by googleapi.ContentType is +// supplied. +// At most one of Media and ResumableMedia may be set. +func (c *ObjectsInsertCall) Media(r io.Reader, options ...googleapi.MediaOption) *ObjectsInsertCall { + if ct := c.object.ContentType; ct != "" { + options = append([]googleapi.MediaOption{googleapi.ContentType(ct)}, options...) + } + c.mediaInfo_ = gensupport.NewInfoFromMedia(r, options) + return c +} + +// ResumableMedia specifies the media to upload in chunks and can be +// canceled with ctx. +// +// Deprecated: use Media instead. +// +// At most one of Media and ResumableMedia may be set. mediaType +// identifies the MIME media type of the upload, such as "image/png". If +// mediaType is "", it will be auto-detected. The provided ctx will +// supersede any context previously provided to the Context method. +func (c *ObjectsInsertCall) ResumableMedia(ctx context.Context, r io.ReaderAt, size int64, mediaType string) *ObjectsInsertCall { + c.ctx_ = ctx + c.mediaInfo_ = gensupport.NewInfoFromResumableMedia(r, size, mediaType) + return c +} + +// ProgressUpdater provides a callback function that will be called +// after every chunk. It should be a low-latency function in order to +// not slow down the upload operation. This should only be called when +// using ResumableMedia (as opposed to Media). +func (c *ObjectsInsertCall) ProgressUpdater(pu googleapi.ProgressUpdater) *ObjectsInsertCall { + c.mediaInfo_.SetProgressUpdater(pu) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsInsertCall) Fields(s ...googleapi.Field) *ObjectsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +// This context will supersede any context previously provided to the +// ResumableMedia method. +func (c *ObjectsInsertCall) Context(ctx context.Context) *ObjectsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") + if c.mediaInfo_ != nil { + urls = strings.Replace(urls, "https://www.googleapis.com/", "https://www.googleapis.com/upload/", 1) + c.urlParams_.Set("uploadType", c.mediaInfo_.UploadType()) + } + if body == nil { + body = new(bytes.Buffer) + reqHeaders.Set("Content-Type", "application/json") + } + body, getBody, cleanup := c.mediaInfo_.UploadRequest(reqHeaders, body) + defer cleanup() + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + gensupport.SetGetBody(req, getBody) + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.insert" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + rx := c.mediaInfo_.ResumableUpload(res.Header.Get("Location")) + if rx != nil { + rx.Client = c.s.client + rx.UserAgent = c.s.userAgent() + ctx := c.ctx_ + if ctx == nil { + ctx = context.TODO() + } + res, err = rx.Upload(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Stores a new object and metadata.", + // "httpMethod": "POST", + // "id": "storage.objects.insert", + // "mediaUpload": { + // "accept": [ + // "*/*" + // ], + // "protocols": { + // "resumable": { + // "multipart": true, + // "path": "/resumable/upload/storage/v1/b/{bucket}/o" + // }, + // "simple": { + // "multipart": true, + // "path": "/upload/storage/v1/b/{bucket}/o" + // } + // } + // }, + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "contentEncoding": { + // "description": "If set, sets the contentEncoding property of the final object to this value. Setting this parameter is equivalent to setting the contentEncoding metadata property. This can be useful when uploading an object with uploadType=media to indicate the encoding of the content being uploaded.", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "kmsKeyName": { + // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any. Limited availability; usable only by enabled projects.", + // "location": "query", + // "type": "string" + // }, + // "name": { + // "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "query", + // "type": "string" + // }, + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the owner, acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o", + // "request": { + // "$ref": "Object" + // }, + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ], + // "supportsMediaUpload": true + // } + +} + +// method id "storage.objects.list": + +type ObjectsListCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of objects matching the criteria. +func (r *ObjectsService) List(bucket string) *ObjectsListCall { + c := &ObjectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// Delimiter sets the optional parameter "delimiter": Returns results in +// a directory-like mode. items will contain only objects whose names, +// aside from the prefix, do not contain delimiter. Objects whose names, +// aside from the prefix, contain delimiter will have their name, +// truncated after the delimiter, returned in prefixes. Duplicate +// prefixes are omitted. +func (c *ObjectsListCall) Delimiter(delimiter string) *ObjectsListCall { + c.urlParams_.Set("delimiter", delimiter) + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of items plus prefixes to return in a single page of responses. As +// duplicate prefixes are omitted, fewer total results may be returned +// than requested. The service will use this parameter or 1,000 items, +// whichever is smaller. +func (c *ObjectsListCall) MaxResults(maxResults int64) *ObjectsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": A +// previously-returned page token representing part of the larger set of +// results to view. +func (c *ObjectsListCall) PageToken(pageToken string) *ObjectsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Prefix sets the optional parameter "prefix": Filter results to +// objects whose names begin with this prefix. +func (c *ObjectsListCall) Prefix(prefix string) *ObjectsListCall { + c.urlParams_.Set("prefix", prefix) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsListCall) Projection(projection string) *ObjectsListCall { + c.urlParams_.Set("projection", projection) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsListCall) UserProject(userProject string) *ObjectsListCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Versions sets the optional parameter "versions": If true, lists all +// versions of an object as distinct results. The default is false. For +// more information, see Object Versioning. +func (c *ObjectsListCall) Versions(versions bool) *ObjectsListCall { + c.urlParams_.Set("versions", fmt.Sprint(versions)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsListCall) Fields(s ...googleapi.Field) *ObjectsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ObjectsListCall) IfNoneMatch(entityTag string) *ObjectsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsListCall) Context(ctx context.Context) *ObjectsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.list" call. +// Exactly one of *Objects or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Objects.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Objects{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of objects matching the criteria.", + // "httpMethod": "GET", + // "id": "storage.objects.list", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which to look for objects.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "delimiter": { + // "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "1000", + // "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A previously-returned page token representing part of the larger set of results to view.", + // "location": "query", + // "type": "string" + // }, + // "prefix": { + // "description": "Filter results to objects whose names begin with this prefix.", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the owner, acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // }, + // "versions": { + // "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "b/{bucket}/o", + // "response": { + // "$ref": "Objects" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ], + // "supportsSubscription": true + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ObjectsListCall) Pages(ctx context.Context, f func(*Objects) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "storage.objects.patch": + +type ObjectsPatchCall struct { + s *Service + bucket string + object string + object2 *Object + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches an object's metadata. +func (r *ObjectsService) Patch(bucket string, object string, object2 *Object) *ObjectsPatchCall { + c := &ObjectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.object2 = object2 + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectsPatchCall) Generation(generation int64) *ObjectsPatchCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. Setting to 0 makes the operation +// succeed only if there are no live versions of the object. +func (c *ObjectsPatchCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsPatchCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's current generation does not match the given value. If no +// live object exists, the precondition fails. Setting to 0 makes the +// operation succeed only if there is a live version of the object. +func (c *ObjectsPatchCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsPatchCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsPatchCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the object's current metageneration does not match the given +// value. +func (c *ObjectsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsPatchCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this object. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *ObjectsPatchCall) PredefinedAcl(predefinedAcl string) *ObjectsPatchCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsPatchCall) Projection(projection string) *ObjectsPatchCall { + c.urlParams_.Set("projection", projection) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request, for Requester Pays buckets. +func (c *ObjectsPatchCall) UserProject(userProject string) *ObjectsPatchCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsPatchCall) Fields(s ...googleapi.Field) *ObjectsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsPatchCall) Context(ctx context.Context) *ObjectsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.patch" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches an object's metadata.", + // "httpMethod": "PATCH", + // "id": "storage.objects.patch", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the owner, acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}", + // "request": { + // "$ref": "Object" + // }, + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objects.rewrite": + +type ObjectsRewriteCall struct { + s *Service + sourceBucket string + sourceObject string + destinationBucket string + destinationObject string + object *Object + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Rewrite: Rewrites a source object to a destination object. Optionally +// overrides metadata. +func (r *ObjectsService) Rewrite(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsRewriteCall { + c := &ObjectsRewriteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.sourceBucket = sourceBucket + c.sourceObject = sourceObject + c.destinationBucket = destinationBucket + c.destinationObject = destinationObject + c.object = object + return c +} + +// DestinationKmsKeyName sets the optional parameter +// "destinationKmsKeyName": Resource name of the Cloud KMS key, of the +// form +// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, +// that will be used to encrypt the object. Overrides the object +// metadata's kms_key_name value, if any. +func (c *ObjectsRewriteCall) DestinationKmsKeyName(destinationKmsKeyName string) *ObjectsRewriteCall { + c.urlParams_.Set("destinationKmsKeyName", destinationKmsKeyName) + return c +} + +// DestinationPredefinedAcl sets the optional parameter +// "destinationPredefinedAcl": Apply a predefined set of access controls +// to the destination object. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *ObjectsRewriteCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsRewriteCall { + c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. Setting to 0 makes the operation +// succeed only if there are no live versions of the object. +func (c *ObjectsRewriteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's current generation does not match the given value. If no +// live object exists, the precondition fails. Setting to 0 makes the +// operation succeed only if there is a live version of the object. +func (c *ObjectsRewriteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the destination object's current metageneration matches the given +// value. +func (c *ObjectsRewriteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the destination object's current metageneration does not +// match the given value. +func (c *ObjectsRewriteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// IfSourceGenerationMatch sets the optional parameter +// "ifSourceGenerationMatch": Makes the operation conditional on whether +// the source object's current generation matches the given value. +func (c *ObjectsRewriteCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifSourceGenerationMatch", fmt.Sprint(ifSourceGenerationMatch)) + return c +} + +// IfSourceGenerationNotMatch sets the optional parameter +// "ifSourceGenerationNotMatch": Makes the operation conditional on +// whether the source object's current generation does not match the +// given value. +func (c *ObjectsRewriteCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifSourceGenerationNotMatch", fmt.Sprint(ifSourceGenerationNotMatch)) + return c +} + +// IfSourceMetagenerationMatch sets the optional parameter +// "ifSourceMetagenerationMatch": Makes the operation conditional on +// whether the source object's current metageneration matches the given +// value. +func (c *ObjectsRewriteCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch)) + return c +} + +// IfSourceMetagenerationNotMatch sets the optional parameter +// "ifSourceMetagenerationNotMatch": Makes the operation conditional on +// whether the source object's current metageneration does not match the +// given value. +func (c *ObjectsRewriteCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch)) + return c +} + +// MaxBytesRewrittenPerCall sets the optional parameter +// "maxBytesRewrittenPerCall": The maximum number of bytes that will be +// rewritten per rewrite request. Most callers shouldn't need to specify +// this parameter - it is primarily in place to support testing. If +// specified the value must be an integral multiple of 1 MiB (1048576). +// Also, this only applies to requests where the source and destination +// span locations and/or storage classes. Finally, this value must not +// change across rewrite calls else you'll get an error that the +// rewriteToken is invalid. +func (c *ObjectsRewriteCall) MaxBytesRewrittenPerCall(maxBytesRewrittenPerCall int64) *ObjectsRewriteCall { + c.urlParams_.Set("maxBytesRewrittenPerCall", fmt.Sprint(maxBytesRewrittenPerCall)) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl, unless the object resource +// specifies the acl property, when it defaults to full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsRewriteCall) Projection(projection string) *ObjectsRewriteCall { + c.urlParams_.Set("projection", projection) + return c +} + +// RewriteToken sets the optional parameter "rewriteToken": Include this +// field (from the previous rewrite response) on each rewrite request +// after the first one, until the rewrite response 'done' flag is true. +// Calls that provide a rewriteToken can omit all other request fields, +// but if included those fields must match the values provided in the +// first rewrite request. +func (c *ObjectsRewriteCall) RewriteToken(rewriteToken string) *ObjectsRewriteCall { + c.urlParams_.Set("rewriteToken", rewriteToken) + return c +} + +// SourceGeneration sets the optional parameter "sourceGeneration": If +// present, selects a specific revision of the source object (as opposed +// to the latest version, the default). +func (c *ObjectsRewriteCall) SourceGeneration(sourceGeneration int64) *ObjectsRewriteCall { + c.urlParams_.Set("sourceGeneration", fmt.Sprint(sourceGeneration)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsRewriteCall) UserProject(userProject string) *ObjectsRewriteCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsRewriteCall) Fields(s ...googleapi.Field) *ObjectsRewriteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsRewriteCall) Context(ctx context.Context) *ObjectsRewriteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsRewriteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "sourceBucket": c.sourceBucket, + "sourceObject": c.sourceObject, + "destinationBucket": c.destinationBucket, + "destinationObject": c.destinationObject, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.rewrite" call. +// Exactly one of *RewriteResponse or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *RewriteResponse.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &RewriteResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Rewrites a source object to a destination object. Optionally overrides metadata.", + // "httpMethod": "POST", + // "id": "storage.objects.rewrite", + // "parameterOrder": [ + // "sourceBucket", + // "sourceObject", + // "destinationBucket", + // "destinationObject" + // ], + // "parameters": { + // "destinationBucket": { + // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationKmsKeyName": { + // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + // "location": "query", + // "type": "string" + // }, + // "destinationObject": { + // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationPredefinedAcl": { + // "description": "Apply a predefined set of access controls to the destination object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceGenerationMatch": { + // "description": "Makes the operation conditional on whether the source object's current generation matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "maxBytesRewrittenPerCall": { + // "description": "The maximum number of bytes that will be rewritten per rewrite request. Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. If specified the value must be an integral multiple of 1 MiB (1048576). Also, this only applies to requests where the source and destination span locations and/or storage classes. Finally, this value must not change across rewrite calls else you'll get an error that the rewriteToken is invalid.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the owner, acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "rewriteToken": { + // "description": "Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.", + // "location": "query", + // "type": "string" + // }, + // "sourceBucket": { + // "description": "Name of the bucket in which to find the source object.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "sourceGeneration": { + // "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "sourceObject": { + // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}", + // "request": { + // "$ref": "Object" + // }, + // "response": { + // "$ref": "RewriteResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objects.setIamPolicy": + +type ObjectsSetIamPolicyCall struct { + s *Service + bucket string + object string + policy *Policy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetIamPolicy: Updates an IAM policy for the specified object. +func (r *ObjectsService) SetIamPolicy(bucket string, object string, policy *Policy) *ObjectsSetIamPolicyCall { + c := &ObjectsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.policy = policy + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectsSetIamPolicyCall) Generation(generation int64) *ObjectsSetIamPolicyCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsSetIamPolicyCall) UserProject(userProject string) *ObjectsSetIamPolicyCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsSetIamPolicyCall) Fields(s ...googleapi.Field) *ObjectsSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsSetIamPolicyCall) Context(ctx context.Context) *ObjectsSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an IAM policy for the specified object.", + // "httpMethod": "PUT", + // "id": "storage.objects.setIamPolicy", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/iam", + // "request": { + // "$ref": "Policy" + // }, + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objects.testIamPermissions": + +type ObjectsTestIamPermissionsCall struct { + s *Service + bucket string + object string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Tests a set of permissions on the given object to +// see which, if any, are held by the caller. +func (r *ObjectsService) TestIamPermissions(bucket string, object string, permissions []string) *ObjectsTestIamPermissionsCall { + c := &ObjectsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.urlParams_.SetMulti("permissions", append([]string{}, permissions...)) + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectsTestIamPermissionsCall) Generation(generation int64) *ObjectsTestIamPermissionsCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsTestIamPermissionsCall) UserProject(userProject string) *ObjectsTestIamPermissionsCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsTestIamPermissionsCall) Fields(s ...googleapi.Field) *ObjectsTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ObjectsTestIamPermissionsCall) IfNoneMatch(entityTag string) *ObjectsTestIamPermissionsCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsTestIamPermissionsCall) Context(ctx context.Context) *ObjectsTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam/testPermissions") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.testIamPermissions" call. +// Exactly one of *TestIamPermissionsResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *TestIamPermissionsResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TestIamPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Tests a set of permissions on the given object to see which, if any, are held by the caller.", + // "httpMethod": "GET", + // "id": "storage.objects.testIamPermissions", + // "parameterOrder": [ + // "bucket", + // "object", + // "permissions" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "permissions": { + // "description": "Permissions to test.", + // "location": "query", + // "repeated": true, + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/iam/testPermissions", + // "response": { + // "$ref": "TestIamPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objects.update": + +type ObjectsUpdateCall struct { + s *Service + bucket string + object string + object2 *Object + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates an object's metadata. +func (r *ObjectsService) Update(bucket string, object string, object2 *Object) *ObjectsUpdateCall { + c := &ObjectsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.object2 = object2 + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectsUpdateCall) Generation(generation int64) *ObjectsUpdateCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. Setting to 0 makes the operation +// succeed only if there are no live versions of the object. +func (c *ObjectsUpdateCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsUpdateCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's current generation does not match the given value. If no +// live object exists, the precondition fails. Setting to 0 makes the +// operation succeed only if there is a live version of the object. +func (c *ObjectsUpdateCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsUpdateCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsUpdateCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the object's current metageneration does not match the given +// value. +func (c *ObjectsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsUpdateCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this object. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *ObjectsUpdateCall) PredefinedAcl(predefinedAcl string) *ObjectsUpdateCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsUpdateCall) Projection(projection string) *ObjectsUpdateCall { + c.urlParams_.Set("projection", projection) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsUpdateCall) UserProject(userProject string) *ObjectsUpdateCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsUpdateCall) Fields(s ...googleapi.Field) *ObjectsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsUpdateCall) Context(ctx context.Context) *ObjectsUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.update" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an object's metadata.", + // "httpMethod": "PUT", + // "id": "storage.objects.update", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the owner, acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}", + // "request": { + // "$ref": "Object" + // }, + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objects.watchAll": + +type ObjectsWatchAllCall struct { + s *Service + bucket string + channel *Channel + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// WatchAll: Watch for changes on all objects in a bucket. +func (r *ObjectsService) WatchAll(bucket string, channel *Channel) *ObjectsWatchAllCall { + c := &ObjectsWatchAllCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.channel = channel + return c +} + +// Delimiter sets the optional parameter "delimiter": Returns results in +// a directory-like mode. items will contain only objects whose names, +// aside from the prefix, do not contain delimiter. Objects whose names, +// aside from the prefix, contain delimiter will have their name, +// truncated after the delimiter, returned in prefixes. Duplicate +// prefixes are omitted. +func (c *ObjectsWatchAllCall) Delimiter(delimiter string) *ObjectsWatchAllCall { + c.urlParams_.Set("delimiter", delimiter) + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of items plus prefixes to return in a single page of responses. As +// duplicate prefixes are omitted, fewer total results may be returned +// than requested. The service will use this parameter or 1,000 items, +// whichever is smaller. +func (c *ObjectsWatchAllCall) MaxResults(maxResults int64) *ObjectsWatchAllCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": A +// previously-returned page token representing part of the larger set of +// results to view. +func (c *ObjectsWatchAllCall) PageToken(pageToken string) *ObjectsWatchAllCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Prefix sets the optional parameter "prefix": Filter results to +// objects whose names begin with this prefix. +func (c *ObjectsWatchAllCall) Prefix(prefix string) *ObjectsWatchAllCall { + c.urlParams_.Set("prefix", prefix) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsWatchAllCall) Projection(projection string) *ObjectsWatchAllCall { + c.urlParams_.Set("projection", projection) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsWatchAllCall) UserProject(userProject string) *ObjectsWatchAllCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Versions sets the optional parameter "versions": If true, lists all +// versions of an object as distinct results. The default is false. For +// more information, see Object Versioning. +func (c *ObjectsWatchAllCall) Versions(versions bool) *ObjectsWatchAllCall { + c.urlParams_.Set("versions", fmt.Sprint(versions)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsWatchAllCall) Fields(s ...googleapi.Field) *ObjectsWatchAllCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsWatchAllCall) Context(ctx context.Context) *ObjectsWatchAllCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsWatchAllCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/watch") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.watchAll" call. +// Exactly one of *Channel or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Channel.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Channel{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Watch for changes on all objects in a bucket.", + // "httpMethod": "POST", + // "id": "storage.objects.watchAll", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which to look for objects.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "delimiter": { + // "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "1000", + // "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A previously-returned page token representing part of the larger set of results to view.", + // "location": "query", + // "type": "string" + // }, + // "prefix": { + // "description": "Filter results to objects whose names begin with this prefix.", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the owner, acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // }, + // "versions": { + // "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "b/{bucket}/o/watch", + // "request": { + // "$ref": "Channel", + // "parameterName": "resource" + // }, + // "response": { + // "$ref": "Channel" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ], + // "supportsSubscription": true + // } + +} + +// method id "storage.projects.serviceAccount.get": + +type ProjectsServiceAccountGetCall struct { + s *Service + projectId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Get the email address of this project's Google Cloud Storage +// service account. +func (r *ProjectsServiceAccountService) Get(projectId string) *ProjectsServiceAccountGetCall { + c := &ProjectsServiceAccountGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. +func (c *ProjectsServiceAccountGetCall) UserProject(userProject string) *ProjectsServiceAccountGetCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsServiceAccountGetCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsServiceAccountGetCall) IfNoneMatch(entityTag string) *ProjectsServiceAccountGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsServiceAccountGetCall) Context(ctx context.Context) *ProjectsServiceAccountGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsServiceAccountGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsServiceAccountGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/serviceAccount") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.projects.serviceAccount.get" call. +// Exactly one of *ServiceAccount or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ServiceAccount.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsServiceAccountGetCall) Do(opts ...googleapi.CallOption) (*ServiceAccount, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ServiceAccount{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Get the email address of this project's Google Cloud Storage service account.", + // "httpMethod": "GET", + // "id": "storage.projects.serviceAccount.get", + // "parameterOrder": [ + // "projectId" + // ], + // "parameters": { + // "projectId": { + // "description": "Project ID", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{projectId}/serviceAccount", + // "response": { + // "$ref": "ServiceAccount" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} diff --git a/vendor/vendor.json b/vendor/vendor.json index 7f7d5ae68..0ae028187 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -1568,10 +1568,10 @@ "revisionTime": "2017-07-07T17:19:04Z" }, { - "checksumSHA1": "gvrxuXnqGhfzY0O3MFbS8XhMH/k=", + "checksumSHA1": "EooPqEpEyY/7NCRwHDMWhhlkQNw=", "path": "google.golang.org/api/gensupport", - "revision": "e665075b5ff79143ba49c58fab02df9dc122afd5", - "revisionTime": "2017-07-09T10:32:00Z" + "revision": "9c79deebf7496e355d7e95d82d4af1fe4e769b2f", + "revisionTime": "2018-04-16T00:04:00Z" }, { "checksumSHA1": "yQREK/OWrz9PLljbr127+xFk6J0=", @@ -1583,6 +1583,12 @@ "path": "google.golang.org/api/googleapi/internal/uritemplates", "revision": "ff0a1ff302946b997eb1832381419d1f95143483" }, + { + "checksumSHA1": "Zpu9YB1omKr0VhFb8iycN1u+42Y=", + "path": "google.golang.org/api/storage/v1", + "revision": "9c79deebf7496e355d7e95d82d4af1fe4e769b2f", + "revisionTime": "2018-04-16T00:04:00Z" + }, { "checksumSHA1": "SSYsrizGeHQRKn/S7j5CQu86egU=", "path": "google.golang.org/appengine", diff --git a/website/source/docs/post-processors/googlecompute-import.html.md b/website/source/docs/post-processors/googlecompute-import.html.md new file mode 100644 index 000000000..8f6c4f825 --- /dev/null +++ b/website/source/docs/post-processors/googlecompute-import.html.md @@ -0,0 +1,145 @@ +--- +description: | + The Google Compute Image Import post-processor takes a compressed raw disk + image and imports it to a GCE image available to Google Compute Engine. + +layout: docs +page_title: 'Google Compute Image Import - Post-Processors' +sidebar_current: 'docs-post-processors-googlecompute-import' +--- + +# Google Compute Image Import Post-Processor + +Type: `googlecompute-import` + +The Google Compute Image Import post-processor takes a compressed raw disk +image and imports it to a GCE image available to Google Compute Engine. + +~> This post-processor is for advanced users. Please ensure you read the [GCE import documentation](https://cloud.google.com/compute/docs/images/import-existing-image) before using this post-processor. + +## How Does it Work? + +The import process operates by uploading a temporary copy of the compressed raw disk image +to a GCS bucket, and calling an import task in GCP on the raw disk file. Once completed, a +GCE image is created containing the converted virtual machine. The temporary raw disk image +copy in GCS can be discarded after the import is complete. + +Google Cloud has very specific requirements for images being imported. Please see the +[GCE import documentation](https://cloud.google.com/compute/docs/images/import-existing-image) +for details. + +## Configuration + +### Required + +- `account_file` (string) - The JSON file containing your account credentials. + +- `bucket` (string) - The name of the GCS bucket where the raw disk image + will be uploaded. + +- `image_name` (string) - The unique name of the resulting image. + +- `project_id` (string) - The project ID where the GCS bucket exists and + where the GCE image is stored. + +### Optional + +- `gcs_object_name` (string) - The name of the GCS object in `bucket` where the RAW disk image will be copied for import. Defaults to "packer-import-{{timestamp}}.tar.gz". + +- `image_description` (string) - The description of the resulting image. + +- `image_family` (string) - The name of the image family to which the resulting image belongs. + +- `image_labels` (object of key/value strings) - Key/value pair labels to apply to the created image. + +- `keep_input_artifact` (boolean) - if true, do not delete the compressed RAW disk image. Defaults to false. + + +## Basic Example + +Here is a basic example. This assumes that the builder has produced an compressed +raw disk image artifact for us to work with, and that the GCS bucket has been created. + +``` json +{ + "type": "googlecompute-import", + "account_file": "account.json", + "project_id": "my-project", + "bucket": "my-bucket", + "image_name": "my-gce-image" +} + +``` + +## QEMU Builder Example + +Here is a complete example for building a Fedora 28 server GCE image. For this example +packer was run from a CentOS 7 server with KVM installed. The CentOS 7 server was running +in GCE with the nested hypervisor feature enabled. + +``` +$ packer build -var serial=$(tty) build.json +``` + +``` json +{ + "variables": { + "serial": "" + }, + "builders": [ + { + "type": "qemu", + "accelerator": "kvm", + "communicator": "none", + "boot_command": [" console=ttyS0,115200n8 inst.text inst.ks=http://{{ .HTTPIP }}:{{ .HTTPPort }}/fedora-28-ks.cfg rd.live.check=0"], + "disk_size": "15000", + "format": "raw", + "iso_checksum_type": "sha256", + "iso_checksum": "ea1efdc692356b3346326f82e2f468903e8da59324fdee8b10eac4fea83f23fe", + "iso_url": "https://download-ib01.fedoraproject.org/pub/fedora/linux/releases/28/Server/x86_64/iso/Fedora-Server-netinst-x86_64-28-1.1.iso", + "headless": "true", + "http_directory": "http", + "http_port_max": "10089", + "http_port_min": "10082", + "output_directory": "output", + "shutdown_timeout": "30m", + "vm_name": "disk.raw", + "qemu_binary": "/usr/libexec/qemu-kvm", + "qemuargs": [ + [ + "-m", "1024" + ], + [ + "-cpu", "host" + ], + [ + "-chardev", "tty,id=pts,path={{user `serial`}}" + ], + [ + "-device", "isa-serial,chardev=pts" + ], + [ + "-device", "virtio-net,netdev=user.0" + ] + ] + } + ], + "post-processors": [ + [ + { + "type": "compress", + "output": "output/disk.raw.tar.gz" + }, + { + "type": "googlecompute-import", + "project_id": "my-project", + "account_file": "account.json", + "bucket": "my-bucket", + "image_name": "fedora28-server-{{timestamp}}", + "image_description": "Fedora 28 Server", + "image_family": "fedora28-server" + } + ] + ] +} +``` From 8715bfbf70c1ad8a4ca21360e9bfef35dc9a363f Mon Sep 17 00:00:00 2001 From: Adam Robinson Date: Tue, 26 Jun 2018 14:06:37 -0400 Subject: [PATCH 138/138] set all tar timestamp fields to the zero date --- post-processor/compress/tar_fix_go110.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/post-processor/compress/tar_fix_go110.go b/post-processor/compress/tar_fix_go110.go index 016b6b656..be8456321 100644 --- a/post-processor/compress/tar_fix_go110.go +++ b/post-processor/compress/tar_fix_go110.go @@ -2,10 +2,16 @@ package compress -import "archive/tar" +import ( + "archive/tar" + "time" +) func setHeaderFormat(header *tar.Header) { // We have to set the Format explicitly for the googlecompute-import // post-processor. Google Cloud only allows importing GNU tar format. header.Format = tar.FormatGNU + header.AccessTime = time.Time{} + header.ModTime = time.Time{} + header.ChangeTime = time.Time{} }