updated vendor for aws
This commit is contained in:
parent
4d79f3973c
commit
5753f3dc43
25
Gopkg.lock
generated
25
Gopkg.lock
generated
@ -7,6 +7,12 @@
|
|||||||
revision = "bbdbe644099b7fdc8327d5cc69c030945188b2e9"
|
revision = "bbdbe644099b7fdc8327d5cc69c030945188b2e9"
|
||||||
version = "v1.13.0"
|
version = "v1.13.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/aws/aws-sdk-go"
|
||||||
|
packages = ["aws","aws/awserr","aws/awsutil","aws/client","aws/client/metadata","aws/corehandlers","aws/credentials","aws/credentials/ec2rolecreds","aws/credentials/endpointcreds","aws/credentials/stscreds","aws/defaults","aws/ec2metadata","aws/endpoints","aws/request","aws/session","aws/signer/v4","internal/shareddefaults","private/protocol","private/protocol/query","private/protocol/query/queryutil","private/protocol/rest","private/protocol/xml/xmlutil","service/sqs","service/sts"]
|
||||||
|
revision = "da415b5fa0ff3f91d4707348a8ea1be53f700c22"
|
||||||
|
version = "v1.12.6"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "github.com/davecgh/go-spew"
|
name = "github.com/davecgh/go-spew"
|
||||||
packages = ["spew"]
|
packages = ["spew"]
|
||||||
@ -43,6 +49,12 @@
|
|||||||
revision = "433969511232c397de61b1442f9fd49ec06ae9ba"
|
revision = "433969511232c397de61b1442f9fd49ec06ae9ba"
|
||||||
version = "v1.1.0"
|
version = "v1.1.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/go-ini/ini"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "20b96f641a5ea98f2f8619ff4f3e061cff4833bd"
|
||||||
|
version = "v1.28.2"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
name = "github.com/golang/protobuf"
|
name = "github.com/golang/protobuf"
|
||||||
@ -55,6 +67,11 @@
|
|||||||
packages = ["."]
|
packages = ["."]
|
||||||
revision = "553a641470496b2327abcac10b36396bd98e45c9"
|
revision = "553a641470496b2327abcac10b36396bd98e45c9"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/jmespath/go-jmespath"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "0b12d6b5"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
name = "github.com/peterh/liner"
|
name = "github.com/peterh/liner"
|
||||||
@ -163,6 +180,12 @@
|
|||||||
packages = ["."]
|
packages = ["."]
|
||||||
revision = "1feaf062ef04a231c9126f99a68eaa579fd0e390"
|
revision = "1feaf062ef04a231c9126f99a68eaa579fd0e390"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/yuin/gopher-lua"
|
||||||
|
packages = [".","ast","parse","pm"]
|
||||||
|
revision = "eb1c7299435cc746b72514f37f74a5154dfe460f"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
name = "golang.org/x/crypto"
|
name = "golang.org/x/crypto"
|
||||||
@ -202,6 +225,6 @@
|
|||||||
[solve-meta]
|
[solve-meta]
|
||||||
analyzer-name = "dep"
|
analyzer-name = "dep"
|
||||||
analyzer-version = 1
|
analyzer-version = 1
|
||||||
inputs-digest = "03a37ac805762eaebe7dbf8d7b1a079894ff6b0f67f4d2f38d3b9348dd5af40f"
|
inputs-digest = "4766bb1bebb736256ed12e864e0810ebf21e26e9fee09fa1884cda8072982155"
|
||||||
solver-name = "gps-cdcl"
|
solver-name = "gps-cdcl"
|
||||||
solver-version = 1
|
solver-version = 1
|
||||||
|
14
vendor/github.com/aws/aws-sdk-go/.github/ISSUE_TEMPLATE.md
generated
vendored
Normal file
14
vendor/github.com/aws/aws-sdk-go/.github/ISSUE_TEMPLATE.md
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
Please fill out the sections below to help us address your issue.
|
||||||
|
|
||||||
|
### Version of AWS SDK for Go?
|
||||||
|
|
||||||
|
|
||||||
|
### Version of Go (`go version`)?
|
||||||
|
|
||||||
|
|
||||||
|
### What issue did you see?
|
||||||
|
|
||||||
|
### Steps to reproduce
|
||||||
|
|
||||||
|
If you have have an runnable example, please include it.
|
||||||
|
|
3
vendor/github.com/aws/aws-sdk-go/.github/PULL_REQUEST_TEMPLATE.md
generated
vendored
Normal file
3
vendor/github.com/aws/aws-sdk-go/.github/PULL_REQUEST_TEMPLATE.md
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
For changes to files under the `/model/` folder, and manual edits to autogenerated code (e.g. `/service/s3/api.go`) please create an Issue instead of a PR for those type of changes.
|
||||||
|
|
||||||
|
If there is an existing bug or feature this PR is answers please reference it here.
|
11
vendor/github.com/aws/aws-sdk-go/.gitignore
generated
vendored
Normal file
11
vendor/github.com/aws/aws-sdk-go/.gitignore
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
dist
|
||||||
|
/doc
|
||||||
|
/doc-staging
|
||||||
|
.yardoc
|
||||||
|
Gemfile.lock
|
||||||
|
awstesting/integration/smoke/**/importmarker__.go
|
||||||
|
awstesting/integration/smoke/_test/
|
||||||
|
/vendor/bin/
|
||||||
|
/vendor/pkg/
|
||||||
|
/vendor/src/
|
||||||
|
/private/model/cli/gen-api/gen-api
|
14
vendor/github.com/aws/aws-sdk-go/.godoc_config
generated
vendored
Normal file
14
vendor/github.com/aws/aws-sdk-go/.godoc_config
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
{
|
||||||
|
"PkgHandler": {
|
||||||
|
"Pattern": "/sdk-for-go/api/",
|
||||||
|
"StripPrefix": "/sdk-for-go/api",
|
||||||
|
"Include": ["/src/github.com/aws/aws-sdk-go/aws", "/src/github.com/aws/aws-sdk-go/service"],
|
||||||
|
"Exclude": ["/src/cmd", "/src/github.com/aws/aws-sdk-go/awstesting", "/src/github.com/aws/aws-sdk-go/awsmigrate"],
|
||||||
|
"IgnoredSuffixes": ["iface"]
|
||||||
|
},
|
||||||
|
"Github": {
|
||||||
|
"Tag": "master",
|
||||||
|
"Repo": "/aws/aws-sdk-go",
|
||||||
|
"UseGithub": true
|
||||||
|
}
|
||||||
|
}
|
25
vendor/github.com/aws/aws-sdk-go/.travis.yml
generated
vendored
Normal file
25
vendor/github.com/aws/aws-sdk-go/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
language: go
|
||||||
|
|
||||||
|
sudo: required
|
||||||
|
|
||||||
|
go:
|
||||||
|
- 1.5
|
||||||
|
- 1.6
|
||||||
|
- 1.7
|
||||||
|
- 1.8
|
||||||
|
- 1.9
|
||||||
|
- tip
|
||||||
|
|
||||||
|
# Use Go 1.5's vendoring experiment for 1.5 tests.
|
||||||
|
env:
|
||||||
|
- GO15VENDOREXPERIMENT=1
|
||||||
|
|
||||||
|
install:
|
||||||
|
- make get-deps
|
||||||
|
|
||||||
|
script:
|
||||||
|
- make unit-with-race-cover
|
||||||
|
|
||||||
|
matrix:
|
||||||
|
allow_failures:
|
||||||
|
- go: tip
|
133
vendor/github.com/aws/aws-sdk-go/CHANGELOG.md
generated
vendored
133
vendor/github.com/aws/aws-sdk-go/CHANGELOG.md
generated
vendored
@ -1,3 +1,136 @@
|
|||||||
|
Release v1.12.6 (2017-10-05)
|
||||||
|
===
|
||||||
|
|
||||||
|
### Service Client Updates
|
||||||
|
* `service/redshift`: Updates service API and documentation
|
||||||
|
* DescribeEventSubscriptions API supports tag keys and tag values as request parameters.
|
||||||
|
|
||||||
|
Release v1.12.5 (2017-10-04)
|
||||||
|
===
|
||||||
|
|
||||||
|
### Service Client Updates
|
||||||
|
* `service/kinesisanalytics`: Updates service API and documentation
|
||||||
|
* Kinesis Analytics now supports schema discovery on objects in S3. Additionally, Kinesis Analytics now supports input data preprocessing through Lambda.
|
||||||
|
* `service/route53domains`: Updates service API and documentation
|
||||||
|
* Added a new API that checks whether a domain name can be transferred to Amazon Route 53.
|
||||||
|
|
||||||
|
### SDK Bugs
|
||||||
|
* `service/s3/s3crypto`: Correct PutObjectRequest documentation ([#1568](https://github.com/aws/aws-sdk-go/pull/1568))
|
||||||
|
* s3Crypto's PutObjectRequest docstring example was using an incorrect value. Corrected the type used in the example.
|
||||||
|
Release v1.12.4 (2017-10-03)
|
||||||
|
===
|
||||||
|
|
||||||
|
### Service Client Updates
|
||||||
|
* `service/ec2`: Updates service API, documentation, and waiters
|
||||||
|
* This release includes service updates to AWS VPN.
|
||||||
|
* `service/ssm`: Updates service API and documentation
|
||||||
|
* EC2 Systems Manager support for tagging SSM Documents. Also support for tag-based permissions to restrict access to SSM Documents based on these tags.
|
||||||
|
|
||||||
|
Release v1.12.3 (2017-10-02)
|
||||||
|
===
|
||||||
|
|
||||||
|
### Service Client Updates
|
||||||
|
* `service/cloudhsm`: Updates service documentation and paginators
|
||||||
|
* Documentation updates for CloudHSM
|
||||||
|
|
||||||
|
Release v1.12.2 (2017-09-29)
|
||||||
|
===
|
||||||
|
|
||||||
|
### Service Client Updates
|
||||||
|
* `service/appstream`: Updates service API and documentation
|
||||||
|
* Includes APIs for managing and accessing image builders, and deleting images.
|
||||||
|
* `service/codebuild`: Updates service API and documentation
|
||||||
|
* Adding support for Building GitHub Pull Requests in AWS CodeBuild
|
||||||
|
* `service/mturk-requester`: Updates service API and documentation
|
||||||
|
* `service/organizations`: Updates service API and documentation
|
||||||
|
* This release flags the HandshakeParty structure's Type and Id fields as 'required'. They effectively were required in the past, as you received an error if you did not include them. This is now reflected at the API definition level.
|
||||||
|
* `service/route53`: Updates service API and documentation
|
||||||
|
* This change allows customers to reset elements of health check.
|
||||||
|
|
||||||
|
### SDK Bugs
|
||||||
|
* `private/protocol/query`: Fix query protocol handling of nested byte slices ([#1557](https://github.com/aws/aws-sdk-go/issues/1557))
|
||||||
|
* Fixes the query protocol to correctly marshal nested []byte values of API operations.
|
||||||
|
* `service/s3`: Fix PutObject and UploadPart API to include ContentMD5 field ([#1559](https://github.com/aws/aws-sdk-go/pull/1559))
|
||||||
|
* Fixes the SDK's S3 PutObject and UploadPart API code generation to correctly render the ContentMD5 field into the associated input types for these two API operations.
|
||||||
|
* Fixes [#1553](https://github.com/aws/aws-sdk-go/pull/1553)
|
||||||
|
Release v1.12.1 (2017-09-27)
|
||||||
|
===
|
||||||
|
|
||||||
|
### Service Client Updates
|
||||||
|
* `aws/endpoints`: Updated Regions and Endpoints metadata.
|
||||||
|
* `service/pinpoint`: Updates service API and documentation
|
||||||
|
* Added two new push notification channels: Amazon Device Messaging (ADM) and, for push notification support in China, Baidu Cloud Push. Added support for APNs auth via .p8 key file. Added operation for direct message deliveries to user IDs, enabling you to message an individual user on multiple endpoints.
|
||||||
|
|
||||||
|
Release v1.12.0 (2017-09-26)
|
||||||
|
===
|
||||||
|
|
||||||
|
### SDK Bugs
|
||||||
|
* `API Marshaler`: Revert REST JSON and XML protocol marshaler improvements
|
||||||
|
* Bug [#1550](https://github.com/aws/aws-sdk-go/issues/1550) identified a missed condition in the Amazon Route 53 RESTXML protocol marshaling causing requests to that service to fail. Reverting the marshaler improvements until the bug can be fixed.
|
||||||
|
|
||||||
|
Release v1.11.0 (2017-09-26)
|
||||||
|
===
|
||||||
|
|
||||||
|
### Service Client Updates
|
||||||
|
* `service/cloudformation`: Updates service API and documentation
|
||||||
|
* You can now prevent a stack from being accidentally deleted by enabling termination protection on the stack. If you attempt to delete a stack with termination protection enabled, the deletion fails and the stack, including its status, remains unchanged. You can enable termination protection on a stack when you create it. Termination protection on stacks is disabled by default. After creation, you can set termination protection on a stack whose status is CREATE_COMPLETE, UPDATE_COMPLETE, or UPDATE_ROLLBACK_COMPLETE.
|
||||||
|
|
||||||
|
### SDK Features
|
||||||
|
* Add dep Go dependency management metadata files (#1544)
|
||||||
|
* Adds the Go `dep` dependency management metadata files to the SDK.
|
||||||
|
* Fixes [#1451](https://github.com/aws/aws-sdk-go/issues/1451)
|
||||||
|
* Fixes [#634](https://github.com/aws/aws-sdk-go/issues/634)
|
||||||
|
* `service/dynamodb/expression`: Add expression building utility for DynamoDB ([#1527](https://github.com/aws/aws-sdk-go/pull/1527))
|
||||||
|
* Adds a new package, expression, to the SDK providing builder utilities to create DynamoDB expressions safely taking advantage of type safety.
|
||||||
|
* `API Marshaler`: Add generated marshalers for RESTXML protocol ([#1409](https://github.com/aws/aws-sdk-go/pull/1409))
|
||||||
|
* Updates the RESTXML protocol marshaler to use generated code instead of reflection for REST XML based services.
|
||||||
|
* `API Marshaler`: Add generated marshalers for RESTJSON protocol ([#1547](https://github.com/aws/aws-sdk-go/pull/1547))
|
||||||
|
* Updates the RESTJSON protocol marshaler to use generated code instead of reflection for REST JSON based services.
|
||||||
|
|
||||||
|
### SDK Enhancements
|
||||||
|
* `private/protocol`: Update format of REST JSON and XMl benchmarks ([#1546](https://github.com/aws/aws-sdk-go/pull/1546))
|
||||||
|
* Updates the format of the REST JSON and XML benchmarks to be readable. RESTJSON benchmarks were updated to more accurately bench building of the protocol.
|
||||||
|
|
||||||
|
Release v1.10.51 (2017-09-22)
|
||||||
|
===
|
||||||
|
|
||||||
|
### Service Client Updates
|
||||||
|
* `service/config`: Updates service API and documentation
|
||||||
|
* `service/ecs`: Updates service API and documentation
|
||||||
|
* Amazon ECS users can now add and drop Linux capabilities to their containers through the use of docker's cap-add and cap-drop features. Customers can specify the capabilities they wish to add or drop for each container in their task definition.
|
||||||
|
* `aws/endpoints`: Updated Regions and Endpoints metadata.
|
||||||
|
* `service/rds`: Updates service documentation
|
||||||
|
* Documentation updates for rds
|
||||||
|
|
||||||
|
Release v1.10.50 (2017-09-21)
|
||||||
|
===
|
||||||
|
|
||||||
|
### Service Client Updates
|
||||||
|
* `service/budgets`: Updates service API
|
||||||
|
* Including "DuplicateRecordException" in UpdateNotification and UpdateSubscriber.
|
||||||
|
* `service/ec2`: Updates service API and documentation
|
||||||
|
* Add EC2 APIs to copy Amazon FPGA Images (AFIs) within the same region and across multiple regions, delete AFIs, and modify AFI attributes. AFI attributes include name, description and granting/denying other AWS accounts to load the AFI.
|
||||||
|
* `service/logs`: Updates service API and documentation
|
||||||
|
* Adds support for associating LogGroups with KMS Keys.
|
||||||
|
|
||||||
|
### SDK Bugs
|
||||||
|
* Fix greengrass service model being duplicated with different casing. ([#1541](https://github.com/aws/aws-sdk-go/pull/1541))
|
||||||
|
* Fixes [#1540](https://github.com/aws/aws-sdk-go/issues/1540)
|
||||||
|
* Fixes [#1539](https://github.com/aws/aws-sdk-go/issues/1539)
|
||||||
|
Release v1.10.49 (2017-09-20)
|
||||||
|
===
|
||||||
|
|
||||||
|
### Service Client Updates
|
||||||
|
* `service/Greengrass`: Adds new service
|
||||||
|
* `service/appstream`: Updates service API and documentation
|
||||||
|
* API updates for supporting On-Demand fleets.
|
||||||
|
* `service/codepipeline`: Updates service API and documentation
|
||||||
|
* This change includes a PipelineMetadata object that is part of the output from the GetPipeline API that includes the Pipeline ARN, created, and updated timestamp.
|
||||||
|
* `aws/endpoints`: Updated Regions and Endpoints metadata.
|
||||||
|
* `service/rds`: Updates service API and documentation
|
||||||
|
* Introduces the --option-group-name parameter to the ModifyDBSnapshot CLI command. You can specify this parameter when you upgrade an Oracle DB snapshot. The same option group considerations apply when upgrading a DB snapshot as when upgrading a DB instance. For more information, see http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Oracle.html#USER_UpgradeDBInstance.Oracle.OGPG.OG
|
||||||
|
* `service/runtime.lex`: Updates service API and documentation
|
||||||
|
|
||||||
Release v1.10.48 (2017-09-19)
|
Release v1.10.48 (2017-09-19)
|
||||||
===
|
===
|
||||||
|
|
||||||
|
20
vendor/github.com/aws/aws-sdk-go/Gopkg.lock
generated
vendored
Normal file
20
vendor/github.com/aws/aws-sdk-go/Gopkg.lock
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||||
|
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/go-ini/ini"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "300e940a926eb277d3901b20bdfcc54928ad3642"
|
||||||
|
version = "v1.25.4"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/jmespath/go-jmespath"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "0b12d6b5"
|
||||||
|
|
||||||
|
[solve-meta]
|
||||||
|
analyzer-name = "dep"
|
||||||
|
analyzer-version = 1
|
||||||
|
inputs-digest = "51a86a867df617990082dec6b868e4efe2fdb2ed0e02a3daa93cd30f962b5085"
|
||||||
|
solver-name = "gps-cdcl"
|
||||||
|
solver-version = 1
|
48
vendor/github.com/aws/aws-sdk-go/Gopkg.toml
generated
vendored
Normal file
48
vendor/github.com/aws/aws-sdk-go/Gopkg.toml
generated
vendored
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
|
||||||
|
# Gopkg.toml example
|
||||||
|
#
|
||||||
|
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
|
||||||
|
# for detailed Gopkg.toml documentation.
|
||||||
|
#
|
||||||
|
# required = ["github.com/user/thing/cmd/thing"]
|
||||||
|
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
||||||
|
#
|
||||||
|
# [[constraint]]
|
||||||
|
# name = "github.com/user/project"
|
||||||
|
# version = "1.0.0"
|
||||||
|
#
|
||||||
|
# [[constraint]]
|
||||||
|
# name = "github.com/user/project2"
|
||||||
|
# branch = "dev"
|
||||||
|
# source = "github.com/myfork/project2"
|
||||||
|
#
|
||||||
|
# [[override]]
|
||||||
|
# name = "github.com/x/y"
|
||||||
|
# version = "2.4.0"
|
||||||
|
|
||||||
|
ignored = [
|
||||||
|
# Testing/Example/Codegen dependencies
|
||||||
|
"github.com/stretchr/testify",
|
||||||
|
"github.com/stretchr/testify/assert",
|
||||||
|
"github.com/stretchr/testify/require",
|
||||||
|
"github.com/go-sql-driver/mysql",
|
||||||
|
"github.com/gucumber/gucumber",
|
||||||
|
"github.com/pkg/errors",
|
||||||
|
"golang.org/x/net",
|
||||||
|
"golang.org/x/net/html",
|
||||||
|
"golang.org/x/net/http2",
|
||||||
|
"golang.org/x/text",
|
||||||
|
"golang.org/x/text/html",
|
||||||
|
"golang.org/x/tools",
|
||||||
|
"golang.org/x/tools/go/loader",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/go-ini/ini"
|
||||||
|
version = "1.25.4"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/jmespath/go-jmespath"
|
||||||
|
revision = "0b12d6b5"
|
||||||
|
#version = "0.2.2"
|
347
vendor/github.com/aws/aws-sdk-go/aws/convert_types_test.go
generated
vendored
347
vendor/github.com/aws/aws-sdk-go/aws/convert_types_test.go
generated
vendored
@ -1,10 +1,9 @@
|
|||||||
package aws
|
package aws
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var testCasesStringSlice = [][]string{
|
var testCasesStringSlice = [][]string{
|
||||||
@ -18,14 +17,22 @@ func TestStringSlice(t *testing.T) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
out := StringSlice(in)
|
out := StringSlice(in)
|
||||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
if e, a := len(out), len(in); e != a {
|
||||||
|
t.Errorf("Unexpected len at idx %d", idx)
|
||||||
|
}
|
||||||
for i := range out {
|
for i := range out {
|
||||||
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
|
if e, a := in[i], *(out[i]); e != a {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
out2 := StringValueSlice(out)
|
out2 := StringValueSlice(out)
|
||||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
if e, a := len(out2), len(in); e != a {
|
||||||
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
|
t.Errorf("Unexpected len at idx %d", idx)
|
||||||
|
}
|
||||||
|
if e, a := in, out2; !reflect.DeepEqual(e, a) {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -39,22 +46,34 @@ func TestStringValueSlice(t *testing.T) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
out := StringValueSlice(in)
|
out := StringValueSlice(in)
|
||||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
if e, a := len(out), len(in); e != a {
|
||||||
|
t.Errorf("Unexpected len at idx %d", idx)
|
||||||
|
}
|
||||||
for i := range out {
|
for i := range out {
|
||||||
if in[i] == nil {
|
if in[i] == nil {
|
||||||
assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
|
if out[i] != "" {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
|
if e, a := *(in[i]), out[i]; e != a {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
out2 := StringSlice(out)
|
out2 := StringSlice(out)
|
||||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
if e, a := len(out2), len(in); e != a {
|
||||||
|
t.Errorf("Unexpected len at idx %d", idx)
|
||||||
|
}
|
||||||
for i := range out2 {
|
for i := range out2 {
|
||||||
if in[i] == nil {
|
if in[i] == nil {
|
||||||
assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
|
if *(out2[i]) != "" {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
|
if e, a := *in[i], *out2[i]; e != a {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -70,14 +89,22 @@ func TestStringMap(t *testing.T) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
out := StringMap(in)
|
out := StringMap(in)
|
||||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
if e, a := len(out), len(in); e != a {
|
||||||
|
t.Errorf("Unexpected len at idx %d", idx)
|
||||||
|
}
|
||||||
for i := range out {
|
for i := range out {
|
||||||
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
|
if e, a := in[i], *(out[i]); e != a {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
out2 := StringValueMap(out)
|
out2 := StringValueMap(out)
|
||||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
if e, a := len(out2), len(in); e != a {
|
||||||
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
|
t.Errorf("Unexpected len at idx %d", idx)
|
||||||
|
}
|
||||||
|
if e, a := in, out2; !reflect.DeepEqual(e, a) {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -91,14 +118,22 @@ func TestBoolSlice(t *testing.T) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
out := BoolSlice(in)
|
out := BoolSlice(in)
|
||||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
if e, a := len(out), len(in); e != a {
|
||||||
|
t.Errorf("Unexpected len at idx %d", idx)
|
||||||
|
}
|
||||||
for i := range out {
|
for i := range out {
|
||||||
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
|
if e, a := in[i], *(out[i]); e != a {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
out2 := BoolValueSlice(out)
|
out2 := BoolValueSlice(out)
|
||||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
if e, a := len(out2), len(in); e != a {
|
||||||
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
|
t.Errorf("Unexpected len at idx %d", idx)
|
||||||
|
}
|
||||||
|
if e, a := in, out2; !reflect.DeepEqual(e, a) {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -110,22 +145,34 @@ func TestBoolValueSlice(t *testing.T) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
out := BoolValueSlice(in)
|
out := BoolValueSlice(in)
|
||||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
if e, a := len(out), len(in); e != a {
|
||||||
|
t.Errorf("Unexpected len at idx %d", idx)
|
||||||
|
}
|
||||||
for i := range out {
|
for i := range out {
|
||||||
if in[i] == nil {
|
if in[i] == nil {
|
||||||
assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
|
if out[i] {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
|
if e, a := *(in[i]), out[i]; e != a {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
out2 := BoolSlice(out)
|
out2 := BoolSlice(out)
|
||||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
if e, a := len(out2), len(in); e != a {
|
||||||
|
t.Errorf("Unexpected len at idx %d", idx)
|
||||||
|
}
|
||||||
for i := range out2 {
|
for i := range out2 {
|
||||||
if in[i] == nil {
|
if in[i] == nil {
|
||||||
assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
|
if *(out2[i]) {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
|
if e, a := in[i], out2[i]; e != a {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -141,14 +188,22 @@ func TestBoolMap(t *testing.T) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
out := BoolMap(in)
|
out := BoolMap(in)
|
||||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
if e, a := len(out), len(in); e != a {
|
||||||
|
t.Errorf("Unexpected len at idx %d", idx)
|
||||||
|
}
|
||||||
for i := range out {
|
for i := range out {
|
||||||
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
|
if e, a := in[i], *(out[i]); e != a {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
out2 := BoolValueMap(out)
|
out2 := BoolValueMap(out)
|
||||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
if e, a := len(out2), len(in); e != a {
|
||||||
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
|
t.Errorf("Unexpected len at idx %d", idx)
|
||||||
|
}
|
||||||
|
if e, a := in, out2; !reflect.DeepEqual(e, a) {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -162,14 +217,22 @@ func TestIntSlice(t *testing.T) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
out := IntSlice(in)
|
out := IntSlice(in)
|
||||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
if e, a := len(out), len(in); e != a {
|
||||||
|
t.Errorf("Unexpected len at idx %d", idx)
|
||||||
|
}
|
||||||
for i := range out {
|
for i := range out {
|
||||||
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
|
if e, a := in[i], *(out[i]); e != a {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
out2 := IntValueSlice(out)
|
out2 := IntValueSlice(out)
|
||||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
if e, a := len(out2), len(in); e != a {
|
||||||
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
|
t.Errorf("Unexpected len at idx %d", idx)
|
||||||
|
}
|
||||||
|
if e, a := in, out2; !reflect.DeepEqual(e, a) {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -181,22 +244,34 @@ func TestIntValueSlice(t *testing.T) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
out := IntValueSlice(in)
|
out := IntValueSlice(in)
|
||||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
if e, a := len(out), len(in); e != a {
|
||||||
|
t.Errorf("Unexpected len at idx %d", idx)
|
||||||
|
}
|
||||||
for i := range out {
|
for i := range out {
|
||||||
if in[i] == nil {
|
if in[i] == nil {
|
||||||
assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
|
if out[i] != 0 {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
|
if e, a := *(in[i]), out[i]; e != a {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
out2 := IntSlice(out)
|
out2 := IntSlice(out)
|
||||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
if e, a := len(out2), len(in); e != a {
|
||||||
|
t.Errorf("Unexpected len at idx %d", idx)
|
||||||
|
}
|
||||||
for i := range out2 {
|
for i := range out2 {
|
||||||
if in[i] == nil {
|
if in[i] == nil {
|
||||||
assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
|
if *(out2[i]) != 0 {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
|
if e, a := in[i], out2[i]; e != a {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -212,14 +287,22 @@ func TestIntMap(t *testing.T) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
out := IntMap(in)
|
out := IntMap(in)
|
||||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
if e, a := len(out), len(in); e != a {
|
||||||
|
t.Errorf("Unexpected len at idx %d", idx)
|
||||||
|
}
|
||||||
for i := range out {
|
for i := range out {
|
||||||
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
|
if e, a := in[i], *(out[i]); e != a {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
out2 := IntValueMap(out)
|
out2 := IntValueMap(out)
|
||||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
if e, a := len(out2), len(in); e != a {
|
||||||
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
|
t.Errorf("Unexpected len at idx %d", idx)
|
||||||
|
}
|
||||||
|
if e, a := in, out2; !reflect.DeepEqual(e, a) {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -233,14 +316,22 @@ func TestInt64Slice(t *testing.T) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
out := Int64Slice(in)
|
out := Int64Slice(in)
|
||||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
if e, a := len(out), len(in); e != a {
|
||||||
|
t.Errorf("Unexpected len at idx %d", idx)
|
||||||
|
}
|
||||||
for i := range out {
|
for i := range out {
|
||||||
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
|
if e, a := in[i], *(out[i]); e != a {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
out2 := Int64ValueSlice(out)
|
out2 := Int64ValueSlice(out)
|
||||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
if e, a := len(out2), len(in); e != a {
|
||||||
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
|
t.Errorf("Unexpected len at idx %d", idx)
|
||||||
|
}
|
||||||
|
if e, a := in, out2; !reflect.DeepEqual(e, a) {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -252,22 +343,34 @@ func TestInt64ValueSlice(t *testing.T) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
out := Int64ValueSlice(in)
|
out := Int64ValueSlice(in)
|
||||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
if e, a := len(out), len(in); e != a {
|
||||||
|
t.Errorf("Unexpected len at idx %d", idx)
|
||||||
|
}
|
||||||
for i := range out {
|
for i := range out {
|
||||||
if in[i] == nil {
|
if in[i] == nil {
|
||||||
assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
|
if out[i] != 0 {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
|
if e, a := *(in[i]), out[i]; e != a {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
out2 := Int64Slice(out)
|
out2 := Int64Slice(out)
|
||||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
if e, a := len(out2), len(in); e != a {
|
||||||
|
t.Errorf("Unexpected len at idx %d", idx)
|
||||||
|
}
|
||||||
for i := range out2 {
|
for i := range out2 {
|
||||||
if in[i] == nil {
|
if in[i] == nil {
|
||||||
assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
|
if *(out2[i]) != 0 {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
|
if e, a := in[i], out2[i]; e != a {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -283,14 +386,22 @@ func TestInt64Map(t *testing.T) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
out := Int64Map(in)
|
out := Int64Map(in)
|
||||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
if e, a := len(out), len(in); e != a {
|
||||||
|
t.Errorf("Unexpected len at idx %d", idx)
|
||||||
|
}
|
||||||
for i := range out {
|
for i := range out {
|
||||||
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
|
if e, a := in[i], *(out[i]); e != a {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
out2 := Int64ValueMap(out)
|
out2 := Int64ValueMap(out)
|
||||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
if e, a := len(out2), len(in); e != a {
|
||||||
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
|
t.Errorf("Unexpected len at idx %d", idx)
|
||||||
|
}
|
||||||
|
if e, a := in, out2; !reflect.DeepEqual(e, a) {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -304,14 +415,22 @@ func TestFloat64Slice(t *testing.T) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
out := Float64Slice(in)
|
out := Float64Slice(in)
|
||||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
if e, a := len(out), len(in); e != a {
|
||||||
|
t.Errorf("Unexpected len at idx %d", idx)
|
||||||
|
}
|
||||||
for i := range out {
|
for i := range out {
|
||||||
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
|
if e, a := in[i], *(out[i]); e != a {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
out2 := Float64ValueSlice(out)
|
out2 := Float64ValueSlice(out)
|
||||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
if e, a := len(out2), len(in); e != a {
|
||||||
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
|
t.Errorf("Unexpected len at idx %d", idx)
|
||||||
|
}
|
||||||
|
if e, a := in, out2; !reflect.DeepEqual(e, a) {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -323,22 +442,34 @@ func TestFloat64ValueSlice(t *testing.T) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
out := Float64ValueSlice(in)
|
out := Float64ValueSlice(in)
|
||||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
if e, a := len(out), len(in); e != a {
|
||||||
|
t.Errorf("Unexpected len at idx %d", idx)
|
||||||
|
}
|
||||||
for i := range out {
|
for i := range out {
|
||||||
if in[i] == nil {
|
if in[i] == nil {
|
||||||
assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
|
if out[i] != 0 {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
|
if e, a := *(in[i]), out[i]; e != a {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
out2 := Float64Slice(out)
|
out2 := Float64Slice(out)
|
||||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
if e, a := len(out2), len(in); e != a {
|
||||||
|
t.Errorf("Unexpected len at idx %d", idx)
|
||||||
|
}
|
||||||
for i := range out2 {
|
for i := range out2 {
|
||||||
if in[i] == nil {
|
if in[i] == nil {
|
||||||
assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
|
if *(out2[i]) != 0 {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
|
if e, a := in[i], out2[i]; e != a {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -354,14 +485,22 @@ func TestFloat64Map(t *testing.T) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
out := Float64Map(in)
|
out := Float64Map(in)
|
||||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
if e, a := len(out), len(in); e != a {
|
||||||
|
t.Errorf("Unexpected len at idx %d", idx)
|
||||||
|
}
|
||||||
for i := range out {
|
for i := range out {
|
||||||
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
|
if e, a := in[i], *(out[i]); e != a {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
out2 := Float64ValueMap(out)
|
out2 := Float64ValueMap(out)
|
||||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
if e, a := len(out2), len(in); e != a {
|
||||||
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
|
t.Errorf("Unexpected len at idx %d", idx)
|
||||||
|
}
|
||||||
|
if e, a := in, out2; !reflect.DeepEqual(e, a) {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -375,14 +514,22 @@ func TestTimeSlice(t *testing.T) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
out := TimeSlice(in)
|
out := TimeSlice(in)
|
||||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
if e, a := len(out), len(in); e != a {
|
||||||
|
t.Errorf("Unexpected len at idx %d", idx)
|
||||||
|
}
|
||||||
for i := range out {
|
for i := range out {
|
||||||
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
|
if e, a := in[i], *(out[i]); e != a {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
out2 := TimeValueSlice(out)
|
out2 := TimeValueSlice(out)
|
||||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
if e, a := len(out2), len(in); e != a {
|
||||||
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
|
t.Errorf("Unexpected len at idx %d", idx)
|
||||||
|
}
|
||||||
|
if e, a := in, out2; !reflect.DeepEqual(e, a) {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -394,22 +541,34 @@ func TestTimeValueSlice(t *testing.T) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
out := TimeValueSlice(in)
|
out := TimeValueSlice(in)
|
||||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
if e, a := len(out), len(in); e != a {
|
||||||
|
t.Errorf("Unexpected len at idx %d", idx)
|
||||||
|
}
|
||||||
for i := range out {
|
for i := range out {
|
||||||
if in[i] == nil {
|
if in[i] == nil {
|
||||||
assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
|
if !out[i].IsZero() {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
|
if e, a := *(in[i]), out[i]; e != a {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
out2 := TimeSlice(out)
|
out2 := TimeSlice(out)
|
||||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
if e, a := len(out2), len(in); e != a {
|
||||||
|
t.Errorf("Unexpected len at idx %d", idx)
|
||||||
|
}
|
||||||
for i := range out2 {
|
for i := range out2 {
|
||||||
if in[i] == nil {
|
if in[i] == nil {
|
||||||
assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
|
if !(*(out2[i])).IsZero() {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
|
if e, a := in[i], out2[i]; e != a {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -425,14 +584,22 @@ func TestTimeMap(t *testing.T) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
out := TimeMap(in)
|
out := TimeMap(in)
|
||||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
if e, a := len(out), len(in); e != a {
|
||||||
|
t.Errorf("Unexpected len at idx %d", idx)
|
||||||
|
}
|
||||||
for i := range out {
|
for i := range out {
|
||||||
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
|
if e, a := in[i], *(out[i]); e != a {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
out2 := TimeValueMap(out)
|
out2 := TimeValueMap(out)
|
||||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
if e, a := len(out2), len(in); e != a {
|
||||||
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
|
t.Errorf("Unexpected len at idx %d", idx)
|
||||||
|
}
|
||||||
|
if e, a := in, out2; !reflect.DeepEqual(e, a) {
|
||||||
|
t.Errorf("Unexpected value at idx %d", idx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -458,13 +625,17 @@ var testCasesTimeValue = []TimeValueTestCase{
|
|||||||
func TestSecondsTimeValue(t *testing.T) {
|
func TestSecondsTimeValue(t *testing.T) {
|
||||||
for idx, testCase := range testCasesTimeValue {
|
for idx, testCase := range testCasesTimeValue {
|
||||||
out := SecondsTimeValue(&testCase.in)
|
out := SecondsTimeValue(&testCase.in)
|
||||||
assert.Equal(t, testCase.outSecs, out, "Unexpected value for time value at %d", idx)
|
if e, a := testCase.outSecs, out; e != a {
|
||||||
|
t.Errorf("Unexpected value for time value at %d", idx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMillisecondsTimeValue(t *testing.T) {
|
func TestMillisecondsTimeValue(t *testing.T) {
|
||||||
for idx, testCase := range testCasesTimeValue {
|
for idx, testCase := range testCasesTimeValue {
|
||||||
out := MillisecondsTimeValue(&testCase.in)
|
out := MillisecondsTimeValue(&testCase.in)
|
||||||
assert.Equal(t, testCase.outMillis, out, "Unexpected value for time value at %d", idx)
|
if e, a := testCase.outMillis, out; e != a {
|
||||||
|
t.Errorf("Unexpected value for time value at %d", idx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
91
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers_test.go
generated
vendored
91
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers_test.go
generated
vendored
@ -7,11 +7,10 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"os"
|
"os"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
"github.com/aws/aws-sdk-go/aws/corehandlers"
|
"github.com/aws/aws-sdk-go/aws/corehandlers"
|
||||||
@ -32,7 +31,9 @@ func TestValidateEndpointHandler(t *testing.T) {
|
|||||||
req := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil)
|
req := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil)
|
||||||
err := req.Build()
|
err := req.Build()
|
||||||
|
|
||||||
assert.NoError(t, err)
|
if err != nil {
|
||||||
|
t.Errorf("expect no error, got %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestValidateEndpointHandlerErrorRegion(t *testing.T) {
|
func TestValidateEndpointHandlerErrorRegion(t *testing.T) {
|
||||||
@ -45,8 +46,12 @@ func TestValidateEndpointHandlerErrorRegion(t *testing.T) {
|
|||||||
req := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil)
|
req := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil)
|
||||||
err := req.Build()
|
err := req.Build()
|
||||||
|
|
||||||
assert.Error(t, err)
|
if err == nil {
|
||||||
assert.Equal(t, aws.ErrMissingRegion, err)
|
t.Errorf("expect error, got none")
|
||||||
|
}
|
||||||
|
if e, a := aws.ErrMissingRegion, err; e != a {
|
||||||
|
t.Errorf("expect %v to be %v", e, a)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type mockCredsProvider struct {
|
type mockCredsProvider struct {
|
||||||
@ -82,18 +87,30 @@ func TestAfterRetryRefreshCreds(t *testing.T) {
|
|||||||
})
|
})
|
||||||
svc.Handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler)
|
svc.Handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler)
|
||||||
|
|
||||||
assert.True(t, svc.Config.Credentials.IsExpired(), "Expect to start out expired")
|
if !svc.Config.Credentials.IsExpired() {
|
||||||
assert.False(t, credProvider.retrieveCalled)
|
t.Errorf("Expect to start out expired")
|
||||||
|
}
|
||||||
|
if credProvider.retrieveCalled {
|
||||||
|
t.Errorf("expect not called")
|
||||||
|
}
|
||||||
|
|
||||||
req := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil)
|
req := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil)
|
||||||
req.Send()
|
req.Send()
|
||||||
|
|
||||||
assert.True(t, svc.Config.Credentials.IsExpired())
|
if !svc.Config.Credentials.IsExpired() {
|
||||||
assert.False(t, credProvider.retrieveCalled)
|
t.Errorf("Expect to start out expired")
|
||||||
|
}
|
||||||
|
if credProvider.retrieveCalled {
|
||||||
|
t.Errorf("expect not called")
|
||||||
|
}
|
||||||
|
|
||||||
_, err := svc.Config.Credentials.Get()
|
_, err := svc.Config.Credentials.Get()
|
||||||
assert.NoError(t, err)
|
if err != nil {
|
||||||
assert.True(t, credProvider.retrieveCalled)
|
t.Errorf("expect no error, got %v", err)
|
||||||
|
}
|
||||||
|
if !credProvider.retrieveCalled {
|
||||||
|
t.Errorf("expect not called")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAfterRetryWithContextCanceled(t *testing.T) {
|
func TestAfterRetryWithContextCanceled(t *testing.T) {
|
||||||
@ -202,8 +219,12 @@ func TestSendHandlerError(t *testing.T) {
|
|||||||
|
|
||||||
r.Send()
|
r.Send()
|
||||||
|
|
||||||
assert.Error(t, r.Error)
|
if r.Error == nil {
|
||||||
assert.NotNil(t, r.HTTPResponse)
|
t.Errorf("expect error, got none")
|
||||||
|
}
|
||||||
|
if r.HTTPResponse == nil {
|
||||||
|
t.Errorf("expect response, got none")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSendWithoutFollowRedirects(t *testing.T) {
|
func TestSendWithoutFollowRedirects(t *testing.T) {
|
||||||
@ -273,31 +294,47 @@ func TestValidateReqSigHandler(t *testing.T) {
|
|||||||
|
|
||||||
corehandlers.ValidateReqSigHandler.Fn(c.Req)
|
corehandlers.ValidateReqSigHandler.Fn(c.Req)
|
||||||
|
|
||||||
assert.NoError(t, c.Req.Error, "%d, expect no error", i)
|
if c.Req.Error != nil {
|
||||||
assert.Equal(t, c.Resign, resigned, "%d, expected resigning to match", i)
|
t.Errorf("expect no error, got %v", c.Req.Error)
|
||||||
|
}
|
||||||
|
if e, a := c.Resign, resigned; e != a {
|
||||||
|
t.Errorf("%d, expect %v to be %v", i, e, a)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func setupContentLengthTestServer(t *testing.T, hasContentLength bool, contentLength int64) *httptest.Server {
|
func setupContentLengthTestServer(t *testing.T, hasContentLength bool, contentLength int64) *httptest.Server {
|
||||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
_, ok := r.Header["Content-Length"]
|
_, ok := r.Header["Content-Length"]
|
||||||
assert.Equal(t, hasContentLength, ok, "expect content length to be set, %t", hasContentLength)
|
if e, a := hasContentLength, ok; e != a {
|
||||||
|
t.Errorf("expect %v to be %v", e, a)
|
||||||
|
}
|
||||||
if hasContentLength {
|
if hasContentLength {
|
||||||
assert.Equal(t, contentLength, r.ContentLength)
|
if e, a := contentLength, r.ContentLength; e != a {
|
||||||
|
t.Errorf("expect %v to be %v", e, a)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
b, err := ioutil.ReadAll(r.Body)
|
b, err := ioutil.ReadAll(r.Body)
|
||||||
assert.NoError(t, err)
|
if err != nil {
|
||||||
|
t.Errorf("expect no error, got %v", err)
|
||||||
|
}
|
||||||
r.Body.Close()
|
r.Body.Close()
|
||||||
|
|
||||||
authHeader := r.Header.Get("Authorization")
|
authHeader := r.Header.Get("Authorization")
|
||||||
if hasContentLength {
|
if hasContentLength {
|
||||||
assert.Contains(t, authHeader, "content-length")
|
if e, a := "content-length", authHeader; !strings.Contains(a, e) {
|
||||||
|
t.Errorf("expect %v to be in %v", e, a)
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
assert.NotContains(t, authHeader, "content-length")
|
if e, a := "content-length", authHeader; strings.Contains(a, e) {
|
||||||
|
t.Errorf("expect %v to not be in %v", e, a)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
assert.Equal(t, contentLength, int64(len(b)))
|
if e, a := contentLength, int64(len(b)); e != a {
|
||||||
|
t.Errorf("expect %v to be %v", e, a)
|
||||||
|
}
|
||||||
}))
|
}))
|
||||||
|
|
||||||
return server
|
return server
|
||||||
@ -316,7 +353,9 @@ func TestBuildContentLength_ZeroBody(t *testing.T) {
|
|||||||
Key: aws.String("keyname"),
|
Key: aws.String("keyname"),
|
||||||
})
|
})
|
||||||
|
|
||||||
assert.NoError(t, err)
|
if err != nil {
|
||||||
|
t.Errorf("expect no error, got %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBuildContentLength_NegativeBody(t *testing.T) {
|
func TestBuildContentLength_NegativeBody(t *testing.T) {
|
||||||
@ -334,7 +373,9 @@ func TestBuildContentLength_NegativeBody(t *testing.T) {
|
|||||||
|
|
||||||
req.HTTPRequest.Header.Set("Content-Length", "-1")
|
req.HTTPRequest.Header.Set("Content-Length", "-1")
|
||||||
|
|
||||||
assert.NoError(t, req.Send())
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBuildContentLength_WithBody(t *testing.T) {
|
func TestBuildContentLength_WithBody(t *testing.T) {
|
||||||
@ -351,5 +392,7 @@ func TestBuildContentLength_WithBody(t *testing.T) {
|
|||||||
Body: bytes.NewReader(make([]byte, 1024)),
|
Body: bytes.NewReader(make([]byte, 1024)),
|
||||||
})
|
})
|
||||||
|
|
||||||
assert.NoError(t, err)
|
if err != nil {
|
||||||
|
t.Errorf("expect no error, got %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
72
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator_test.go
generated
vendored
72
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator_test.go
generated
vendored
@ -3,8 +3,7 @@ package corehandlers_test
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
"reflect"
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
@ -14,7 +13,6 @@ import (
|
|||||||
"github.com/aws/aws-sdk-go/aws/request"
|
"github.com/aws/aws-sdk-go/aws/request"
|
||||||
"github.com/aws/aws-sdk-go/awstesting/unit"
|
"github.com/aws/aws-sdk-go/awstesting/unit"
|
||||||
"github.com/aws/aws-sdk-go/service/kinesis"
|
"github.com/aws/aws-sdk-go/service/kinesis"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var testSvc = func() *client.Client {
|
var testSvc = func() *client.Client {
|
||||||
@ -113,7 +111,9 @@ func TestNoErrors(t *testing.T) {
|
|||||||
|
|
||||||
req := testSvc.NewRequest(&request.Operation{}, input, nil)
|
req := testSvc.NewRequest(&request.Operation{}, input, nil)
|
||||||
corehandlers.ValidateParametersHandler.Fn(req)
|
corehandlers.ValidateParametersHandler.Fn(req)
|
||||||
require.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Fatalf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMissingRequiredParameters(t *testing.T) {
|
func TestMissingRequiredParameters(t *testing.T) {
|
||||||
@ -121,17 +121,33 @@ func TestMissingRequiredParameters(t *testing.T) {
|
|||||||
req := testSvc.NewRequest(&request.Operation{}, input, nil)
|
req := testSvc.NewRequest(&request.Operation{}, input, nil)
|
||||||
corehandlers.ValidateParametersHandler.Fn(req)
|
corehandlers.ValidateParametersHandler.Fn(req)
|
||||||
|
|
||||||
require.Error(t, req.Error)
|
if req.Error == nil {
|
||||||
assert.Equal(t, "InvalidParameter", req.Error.(awserr.Error).Code())
|
t.Fatalf("expect error")
|
||||||
assert.Equal(t, "3 validation error(s) found.", req.Error.(awserr.Error).Message())
|
}
|
||||||
|
if e, a := "InvalidParameter", req.Error.(awserr.Error).Code(); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "3 validation error(s) found.", req.Error.(awserr.Error).Message(); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
errs := req.Error.(awserr.BatchedErrors).OrigErrs()
|
errs := req.Error.(awserr.BatchedErrors).OrigErrs()
|
||||||
assert.Len(t, errs, 3)
|
if e, a := 3, len(errs); e != a {
|
||||||
assert.Equal(t, "ParamRequiredError: missing required field, StructShape.RequiredList.", errs[0].Error())
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
assert.Equal(t, "ParamRequiredError: missing required field, StructShape.RequiredMap.", errs[1].Error())
|
}
|
||||||
assert.Equal(t, "ParamRequiredError: missing required field, StructShape.RequiredBool.", errs[2].Error())
|
if e, a := "ParamRequiredError: missing required field, StructShape.RequiredList.", errs[0].Error(); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "ParamRequiredError: missing required field, StructShape.RequiredMap.", errs[1].Error(); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "ParamRequiredError: missing required field, StructShape.RequiredBool.", errs[2].Error(); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
assert.Equal(t, "InvalidParameter: 3 validation error(s) found.\n- missing required field, StructShape.RequiredList.\n- missing required field, StructShape.RequiredMap.\n- missing required field, StructShape.RequiredBool.\n", req.Error.Error())
|
if e, a := "InvalidParameter: 3 validation error(s) found.\n- missing required field, StructShape.RequiredList.\n- missing required field, StructShape.RequiredMap.\n- missing required field, StructShape.RequiredBool.\n", req.Error.Error(); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNestedMissingRequiredParameters(t *testing.T) {
|
func TestNestedMissingRequiredParameters(t *testing.T) {
|
||||||
@ -148,15 +164,29 @@ func TestNestedMissingRequiredParameters(t *testing.T) {
|
|||||||
req := testSvc.NewRequest(&request.Operation{}, input, nil)
|
req := testSvc.NewRequest(&request.Operation{}, input, nil)
|
||||||
corehandlers.ValidateParametersHandler.Fn(req)
|
corehandlers.ValidateParametersHandler.Fn(req)
|
||||||
|
|
||||||
require.Error(t, req.Error)
|
if req.Error == nil {
|
||||||
assert.Equal(t, "InvalidParameter", req.Error.(awserr.Error).Code())
|
t.Fatalf("expect error")
|
||||||
assert.Equal(t, "3 validation error(s) found.", req.Error.(awserr.Error).Message())
|
}
|
||||||
|
if e, a := "InvalidParameter", req.Error.(awserr.Error).Code(); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "3 validation error(s) found.", req.Error.(awserr.Error).Message(); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
errs := req.Error.(awserr.BatchedErrors).OrigErrs()
|
errs := req.Error.(awserr.BatchedErrors).OrigErrs()
|
||||||
assert.Len(t, errs, 3)
|
if e, a := 3, len(errs); e != a {
|
||||||
assert.Equal(t, "ParamRequiredError: missing required field, StructShape.RequiredList[0].Name.", errs[0].Error())
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
assert.Equal(t, "ParamRequiredError: missing required field, StructShape.RequiredMap[key2].Name.", errs[1].Error())
|
}
|
||||||
assert.Equal(t, "ParamRequiredError: missing required field, StructShape.OptionalStruct.Name.", errs[2].Error())
|
if e, a := "ParamRequiredError: missing required field, StructShape.RequiredList[0].Name.", errs[0].Error(); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "ParamRequiredError: missing required field, StructShape.RequiredMap[key2].Name.", errs[1].Error(); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "ParamRequiredError: missing required field, StructShape.OptionalStruct.Name.", errs[2].Error(); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type testInput struct {
|
type testInput struct {
|
||||||
@ -226,7 +256,9 @@ func TestValidateFieldMinParameter(t *testing.T) {
|
|||||||
req := testSvc.NewRequest(&request.Operation{}, &c.in, nil)
|
req := testSvc.NewRequest(&request.Operation{}, &c.in, nil)
|
||||||
corehandlers.ValidateParametersHandler.Fn(req)
|
corehandlers.ValidateParametersHandler.Fn(req)
|
||||||
|
|
||||||
assert.Equal(t, c.err, req.Error, "%d case failed", i)
|
if e, a := c.err, req.Error; !reflect.DeepEqual(e,a) {
|
||||||
|
t.Errorf("%d, expect %v, got %v", i, e, a)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
117
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api_test.go
generated
vendored
117
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api_test.go
generated
vendored
@ -11,8 +11,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||||
@ -71,8 +69,12 @@ func TestEndpoint(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
req := c.NewRequest(op, nil, nil)
|
req := c.NewRequest(op, nil, nil)
|
||||||
assert.Equal(t, "http://169.254.169.254/latest", req.ClientInfo.Endpoint)
|
if e, a := "http://169.254.169.254/latest", req.ClientInfo.Endpoint; e != a {
|
||||||
assert.Equal(t, "http://169.254.169.254/latest/meta-data/testpath", req.HTTPRequest.URL.String())
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "http://169.254.169.254/latest/meta-data/testpath", req.HTTPRequest.URL.String(); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetMetadata(t *testing.T) {
|
func TestGetMetadata(t *testing.T) {
|
||||||
@ -85,8 +87,12 @@ func TestGetMetadata(t *testing.T) {
|
|||||||
|
|
||||||
resp, err := c.GetMetadata("some/path")
|
resp, err := c.GetMetadata("some/path")
|
||||||
|
|
||||||
assert.NoError(t, err)
|
if err != nil {
|
||||||
assert.Equal(t, "success", resp)
|
t.Errorf("expect no error, got %v", err)
|
||||||
|
}
|
||||||
|
if e, a := "success", resp; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetUserData(t *testing.T) {
|
func TestGetUserData(t *testing.T) {
|
||||||
@ -99,8 +105,12 @@ func TestGetUserData(t *testing.T) {
|
|||||||
|
|
||||||
resp, err := c.GetUserData()
|
resp, err := c.GetUserData()
|
||||||
|
|
||||||
assert.NoError(t, err)
|
if err != nil {
|
||||||
assert.Equal(t, "success", resp)
|
t.Errorf("expect no error, got %v", err)
|
||||||
|
}
|
||||||
|
if e, a := "success", resp; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetUserData_Error(t *testing.T) {
|
func TestGetUserData_Error(t *testing.T) {
|
||||||
@ -126,12 +136,17 @@ func TestGetUserData_Error(t *testing.T) {
|
|||||||
c := ec2metadata.New(unit.Session, &aws.Config{Endpoint: aws.String(server.URL + "/latest")})
|
c := ec2metadata.New(unit.Session, &aws.Config{Endpoint: aws.String(server.URL + "/latest")})
|
||||||
|
|
||||||
resp, err := c.GetUserData()
|
resp, err := c.GetUserData()
|
||||||
assert.Error(t, err)
|
if err == nil {
|
||||||
assert.Empty(t, resp)
|
t.Errorf("expect error")
|
||||||
|
}
|
||||||
|
if len(resp) != 0 {
|
||||||
|
t.Errorf("expect empty, got %v", resp)
|
||||||
|
}
|
||||||
|
|
||||||
aerr, ok := err.(awserr.Error)
|
aerr := err.(awserr.Error)
|
||||||
assert.True(t, ok)
|
if e, a := "NotFoundError", aerr.Code(); e != a {
|
||||||
assert.Equal(t, "NotFoundError", aerr.Code())
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetRegion(t *testing.T) {
|
func TestGetRegion(t *testing.T) {
|
||||||
@ -144,8 +159,12 @@ func TestGetRegion(t *testing.T) {
|
|||||||
|
|
||||||
region, err := c.Region()
|
region, err := c.Region()
|
||||||
|
|
||||||
assert.NoError(t, err)
|
if err != nil {
|
||||||
assert.Equal(t, "us-west-2", region)
|
t.Errorf("expect no error, got %v", err)
|
||||||
|
}
|
||||||
|
if e, a := "us-west-2", region; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMetadataAvailable(t *testing.T) {
|
func TestMetadataAvailable(t *testing.T) {
|
||||||
@ -156,9 +175,9 @@ func TestMetadataAvailable(t *testing.T) {
|
|||||||
defer server.Close()
|
defer server.Close()
|
||||||
c := ec2metadata.New(unit.Session, &aws.Config{Endpoint: aws.String(server.URL + "/latest")})
|
c := ec2metadata.New(unit.Session, &aws.Config{Endpoint: aws.String(server.URL + "/latest")})
|
||||||
|
|
||||||
available := c.Available()
|
if !c.Available() {
|
||||||
|
t.Errorf("expect available")
|
||||||
assert.True(t, available)
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMetadataIAMInfo_success(t *testing.T) {
|
func TestMetadataIAMInfo_success(t *testing.T) {
|
||||||
@ -170,10 +189,18 @@ func TestMetadataIAMInfo_success(t *testing.T) {
|
|||||||
c := ec2metadata.New(unit.Session, &aws.Config{Endpoint: aws.String(server.URL + "/latest")})
|
c := ec2metadata.New(unit.Session, &aws.Config{Endpoint: aws.String(server.URL + "/latest")})
|
||||||
|
|
||||||
iamInfo, err := c.IAMInfo()
|
iamInfo, err := c.IAMInfo()
|
||||||
assert.NoError(t, err)
|
if err != nil {
|
||||||
assert.Equal(t, "Success", iamInfo.Code)
|
t.Errorf("expect no error, got %v", err)
|
||||||
assert.Equal(t, "arn:aws:iam::123456789012:instance-profile/my-instance-profile", iamInfo.InstanceProfileArn)
|
}
|
||||||
assert.Equal(t, "AIPAABCDEFGHIJKLMN123", iamInfo.InstanceProfileID)
|
if e, a := "Success", iamInfo.Code; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "arn:aws:iam::123456789012:instance-profile/my-instance-profile", iamInfo.InstanceProfileArn; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "AIPAABCDEFGHIJKLMN123", iamInfo.InstanceProfileID; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMetadataIAMInfo_failure(t *testing.T) {
|
func TestMetadataIAMInfo_failure(t *testing.T) {
|
||||||
@ -185,10 +212,18 @@ func TestMetadataIAMInfo_failure(t *testing.T) {
|
|||||||
c := ec2metadata.New(unit.Session, &aws.Config{Endpoint: aws.String(server.URL + "/latest")})
|
c := ec2metadata.New(unit.Session, &aws.Config{Endpoint: aws.String(server.URL + "/latest")})
|
||||||
|
|
||||||
iamInfo, err := c.IAMInfo()
|
iamInfo, err := c.IAMInfo()
|
||||||
assert.NotNil(t, err)
|
if err == nil {
|
||||||
assert.Equal(t, "", iamInfo.Code)
|
t.Errorf("expect error")
|
||||||
assert.Equal(t, "", iamInfo.InstanceProfileArn)
|
}
|
||||||
assert.Equal(t, "", iamInfo.InstanceProfileID)
|
if e, a := "", iamInfo.Code; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "", iamInfo.InstanceProfileArn; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "", iamInfo.InstanceProfileID; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMetadataNotAvailable(t *testing.T) {
|
func TestMetadataNotAvailable(t *testing.T) {
|
||||||
@ -204,9 +239,9 @@ func TestMetadataNotAvailable(t *testing.T) {
|
|||||||
r.Retryable = aws.Bool(true) // network errors are retryable
|
r.Retryable = aws.Bool(true) // network errors are retryable
|
||||||
})
|
})
|
||||||
|
|
||||||
available := c.Available()
|
if c.Available() {
|
||||||
|
t.Errorf("expect not available")
|
||||||
assert.False(t, available)
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMetadataErrorResponse(t *testing.T) {
|
func TestMetadataErrorResponse(t *testing.T) {
|
||||||
@ -222,8 +257,12 @@ func TestMetadataErrorResponse(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
data, err := c.GetMetadata("uri/path")
|
data, err := c.GetMetadata("uri/path")
|
||||||
assert.Empty(t, data)
|
if len(data) != 0 {
|
||||||
assert.Contains(t, err.Error(), "error message text")
|
t.Errorf("expect empty, got %v", data)
|
||||||
|
}
|
||||||
|
if e, a := "error message text", err.Error(); !strings.Contains(a, e) {
|
||||||
|
t.Errorf("expect %v to be in %v", e, a)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEC2RoleProviderInstanceIdentity(t *testing.T) {
|
func TestEC2RoleProviderInstanceIdentity(t *testing.T) {
|
||||||
@ -235,8 +274,16 @@ func TestEC2RoleProviderInstanceIdentity(t *testing.T) {
|
|||||||
c := ec2metadata.New(unit.Session, &aws.Config{Endpoint: aws.String(server.URL + "/latest")})
|
c := ec2metadata.New(unit.Session, &aws.Config{Endpoint: aws.String(server.URL + "/latest")})
|
||||||
|
|
||||||
doc, err := c.GetInstanceIdentityDocument()
|
doc, err := c.GetInstanceIdentityDocument()
|
||||||
assert.Nil(t, err, "Expect no error, %v", err)
|
if err != nil {
|
||||||
assert.Equal(t, doc.AccountID, "123456789012")
|
t.Errorf("expect no error, got %v", err)
|
||||||
assert.Equal(t, doc.AvailabilityZone, "us-east-1d")
|
}
|
||||||
assert.Equal(t, doc.Region, "us-east-1")
|
if e, a := doc.AccountID, "123456789012"; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := doc.AvailabilityZone, "us-east-1d"; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := doc.Region, "us-east-1"; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
6
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
6
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
@ -1008,6 +1008,7 @@ var awsPartition = partition{
|
|||||||
"ap-northeast-1": endpoint{},
|
"ap-northeast-1": endpoint{},
|
||||||
"ap-northeast-2": endpoint{},
|
"ap-northeast-2": endpoint{},
|
||||||
"ap-south-1": endpoint{},
|
"ap-south-1": endpoint{},
|
||||||
|
"ap-southeast-1": endpoint{},
|
||||||
"ap-southeast-2": endpoint{},
|
"ap-southeast-2": endpoint{},
|
||||||
"ca-central-1": endpoint{},
|
"ca-central-1": endpoint{},
|
||||||
"eu-central-1": endpoint{},
|
"eu-central-1": endpoint{},
|
||||||
@ -1023,6 +1024,8 @@ var awsPartition = partition{
|
|||||||
|
|
||||||
Endpoints: endpoints{
|
Endpoints: endpoints{
|
||||||
"us-east-1": endpoint{},
|
"us-east-1": endpoint{},
|
||||||
|
"us-east-2": endpoint{},
|
||||||
|
"us-west-2": endpoint{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"greengrass": service{
|
"greengrass": service{
|
||||||
@ -1031,6 +1034,7 @@ var awsPartition = partition{
|
|||||||
Protocols: []string{"https"},
|
Protocols: []string{"https"},
|
||||||
},
|
},
|
||||||
Endpoints: endpoints{
|
Endpoints: endpoints{
|
||||||
|
"ap-northeast-1": endpoint{},
|
||||||
"ap-southeast-2": endpoint{},
|
"ap-southeast-2": endpoint{},
|
||||||
"eu-central-1": endpoint{},
|
"eu-central-1": endpoint{},
|
||||||
"us-east-1": endpoint{},
|
"us-east-1": endpoint{},
|
||||||
@ -1545,6 +1549,7 @@ var awsPartition = partition{
|
|||||||
"snowball": service{
|
"snowball": service{
|
||||||
|
|
||||||
Endpoints: endpoints{
|
Endpoints: endpoints{
|
||||||
|
"ap-northeast-1": endpoint{},
|
||||||
"ap-south-1": endpoint{},
|
"ap-south-1": endpoint{},
|
||||||
"ap-southeast-2": endpoint{},
|
"ap-southeast-2": endpoint{},
|
||||||
"eu-central-1": endpoint{},
|
"eu-central-1": endpoint{},
|
||||||
@ -1825,6 +1830,7 @@ var awsPartition = partition{
|
|||||||
"ap-southeast-2": endpoint{},
|
"ap-southeast-2": endpoint{},
|
||||||
"eu-central-1": endpoint{},
|
"eu-central-1": endpoint{},
|
||||||
"eu-west-1": endpoint{},
|
"eu-west-1": endpoint{},
|
||||||
|
"eu-west-2": endpoint{},
|
||||||
"us-east-1": endpoint{},
|
"us-east-1": endpoint{},
|
||||||
"us-west-2": endpoint{},
|
"us-west-2": endpoint{},
|
||||||
},
|
},
|
||||||
|
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
@ -5,4 +5,4 @@ package aws
|
|||||||
const SDKName = "aws-sdk-go"
|
const SDKName = "aws-sdk-go"
|
||||||
|
|
||||||
// SDKVersion is the version of this SDK
|
// SDKVersion is the version of this SDK
|
||||||
const SDKVersion = "1.10.48"
|
const SDKVersion = "1.12.6"
|
||||||
|
@ -1,27 +0,0 @@
|
|||||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
|
||||||
modification, are permitted provided that the following conditions are
|
|
||||||
met:
|
|
||||||
|
|
||||||
* Redistributions of source code must retain the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer.
|
|
||||||
* Redistributions in binary form must reproduce the above
|
|
||||||
copyright notice, this list of conditions and the following disclaimer
|
|
||||||
in the documentation and/or other materials provided with the
|
|
||||||
distribution.
|
|
||||||
* Neither the name of Google Inc. nor the names of its
|
|
||||||
contributors may be used to endorse or promote products derived from
|
|
||||||
this software without specific prior written permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -1,22 +0,0 @@
|
|||||||
Additional IP Rights Grant (Patents)
|
|
||||||
|
|
||||||
"This implementation" means the copyrightable works distributed by
|
|
||||||
Google as part of the Go project.
|
|
||||||
|
|
||||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
|
||||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
|
||||||
patent license to make, have made, use, offer to sell, sell, import,
|
|
||||||
transfer and otherwise run, modify and propagate the contents of this
|
|
||||||
implementation of Go, where such license applies only to those patent
|
|
||||||
claims, both currently owned or controlled by Google and acquired in
|
|
||||||
the future, licensable by Google that are necessarily infringed by this
|
|
||||||
implementation of Go. This grant does not include claims that would be
|
|
||||||
infringed only as a consequence of further modification of this
|
|
||||||
implementation. If you or your agent or exclusive licensee institute or
|
|
||||||
order or agree to the institution of patent litigation against any
|
|
||||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
|
||||||
that this implementation of Go or any code incorporated within this
|
|
||||||
implementation of Go constitutes direct or contributory patent
|
|
||||||
infringement, or inducement of patent infringement, then any patent
|
|
||||||
rights granted to you under this License for this implementation of Go
|
|
||||||
shall terminate as of the date such litigation is filed.
|
|
@ -1,624 +0,0 @@
|
|||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package astutil
|
|
||||||
|
|
||||||
// This file defines utilities for working with source positions.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"go/ast"
|
|
||||||
"go/token"
|
|
||||||
"sort"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PathEnclosingInterval returns the node that encloses the source
|
|
||||||
// interval [start, end), and all its ancestors up to the AST root.
|
|
||||||
//
|
|
||||||
// The definition of "enclosing" used by this function considers
|
|
||||||
// additional whitespace abutting a node to be enclosed by it.
|
|
||||||
// In this example:
|
|
||||||
//
|
|
||||||
// z := x + y // add them
|
|
||||||
// <-A->
|
|
||||||
// <----B----->
|
|
||||||
//
|
|
||||||
// the ast.BinaryExpr(+) node is considered to enclose interval B
|
|
||||||
// even though its [Pos()..End()) is actually only interval A.
|
|
||||||
// This behaviour makes user interfaces more tolerant of imperfect
|
|
||||||
// input.
|
|
||||||
//
|
|
||||||
// This function treats tokens as nodes, though they are not included
|
|
||||||
// in the result. e.g. PathEnclosingInterval("+") returns the
|
|
||||||
// enclosing ast.BinaryExpr("x + y").
|
|
||||||
//
|
|
||||||
// If start==end, the 1-char interval following start is used instead.
|
|
||||||
//
|
|
||||||
// The 'exact' result is true if the interval contains only path[0]
|
|
||||||
// and perhaps some adjacent whitespace. It is false if the interval
|
|
||||||
// overlaps multiple children of path[0], or if it contains only
|
|
||||||
// interior whitespace of path[0].
|
|
||||||
// In this example:
|
|
||||||
//
|
|
||||||
// z := x + y // add them
|
|
||||||
// <--C--> <---E-->
|
|
||||||
// ^
|
|
||||||
// D
|
|
||||||
//
|
|
||||||
// intervals C, D and E are inexact. C is contained by the
|
|
||||||
// z-assignment statement, because it spans three of its children (:=,
|
|
||||||
// x, +). So too is the 1-char interval D, because it contains only
|
|
||||||
// interior whitespace of the assignment. E is considered interior
|
|
||||||
// whitespace of the BlockStmt containing the assignment.
|
|
||||||
//
|
|
||||||
// Precondition: [start, end) both lie within the same file as root.
|
|
||||||
// TODO(adonovan): return (nil, false) in this case and remove precond.
|
|
||||||
// Requires FileSet; see loader.tokenFileContainsPos.
|
|
||||||
//
|
|
||||||
// Postcondition: path is never nil; it always contains at least 'root'.
|
|
||||||
//
|
|
||||||
func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Node, exact bool) {
|
|
||||||
// fmt.Printf("EnclosingInterval %d %d\n", start, end) // debugging
|
|
||||||
|
|
||||||
// Precondition: node.[Pos..End) and adjoining whitespace contain [start, end).
|
|
||||||
var visit func(node ast.Node) bool
|
|
||||||
visit = func(node ast.Node) bool {
|
|
||||||
path = append(path, node)
|
|
||||||
|
|
||||||
nodePos := node.Pos()
|
|
||||||
nodeEnd := node.End()
|
|
||||||
|
|
||||||
// fmt.Printf("visit(%T, %d, %d)\n", node, nodePos, nodeEnd) // debugging
|
|
||||||
|
|
||||||
// Intersect [start, end) with interval of node.
|
|
||||||
if start < nodePos {
|
|
||||||
start = nodePos
|
|
||||||
}
|
|
||||||
if end > nodeEnd {
|
|
||||||
end = nodeEnd
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find sole child that contains [start, end).
|
|
||||||
children := childrenOf(node)
|
|
||||||
l := len(children)
|
|
||||||
for i, child := range children {
|
|
||||||
// [childPos, childEnd) is unaugmented interval of child.
|
|
||||||
childPos := child.Pos()
|
|
||||||
childEnd := child.End()
|
|
||||||
|
|
||||||
// [augPos, augEnd) is whitespace-augmented interval of child.
|
|
||||||
augPos := childPos
|
|
||||||
augEnd := childEnd
|
|
||||||
if i > 0 {
|
|
||||||
augPos = children[i-1].End() // start of preceding whitespace
|
|
||||||
}
|
|
||||||
if i < l-1 {
|
|
||||||
nextChildPos := children[i+1].Pos()
|
|
||||||
// Does [start, end) lie between child and next child?
|
|
||||||
if start >= augEnd && end <= nextChildPos {
|
|
||||||
return false // inexact match
|
|
||||||
}
|
|
||||||
augEnd = nextChildPos // end of following whitespace
|
|
||||||
}
|
|
||||||
|
|
||||||
// fmt.Printf("\tchild %d: [%d..%d)\tcontains interval [%d..%d)?\n",
|
|
||||||
// i, augPos, augEnd, start, end) // debugging
|
|
||||||
|
|
||||||
// Does augmented child strictly contain [start, end)?
|
|
||||||
if augPos <= start && end <= augEnd {
|
|
||||||
_, isToken := child.(tokenNode)
|
|
||||||
return isToken || visit(child)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Does [start, end) overlap multiple children?
|
|
||||||
// i.e. left-augmented child contains start
|
|
||||||
// but LR-augmented child does not contain end.
|
|
||||||
if start < childEnd && end > augEnd {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// No single child contained [start, end),
|
|
||||||
// so node is the result. Is it exact?
|
|
||||||
|
|
||||||
// (It's tempting to put this condition before the
|
|
||||||
// child loop, but it gives the wrong result in the
|
|
||||||
// case where a node (e.g. ExprStmt) and its sole
|
|
||||||
// child have equal intervals.)
|
|
||||||
if start == nodePos && end == nodeEnd {
|
|
||||||
return true // exact match
|
|
||||||
}
|
|
||||||
|
|
||||||
return false // inexact: overlaps multiple children
|
|
||||||
}
|
|
||||||
|
|
||||||
if start > end {
|
|
||||||
start, end = end, start
|
|
||||||
}
|
|
||||||
|
|
||||||
if start < root.End() && end > root.Pos() {
|
|
||||||
if start == end {
|
|
||||||
end = start + 1 // empty interval => interval of size 1
|
|
||||||
}
|
|
||||||
exact = visit(root)
|
|
||||||
|
|
||||||
// Reverse the path:
|
|
||||||
for i, l := 0, len(path); i < l/2; i++ {
|
|
||||||
path[i], path[l-1-i] = path[l-1-i], path[i]
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Selection lies within whitespace preceding the
|
|
||||||
// first (or following the last) declaration in the file.
|
|
||||||
// The result nonetheless always includes the ast.File.
|
|
||||||
path = append(path, root)
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// tokenNode is a dummy implementation of ast.Node for a single token.
|
|
||||||
// They are used transiently by PathEnclosingInterval but never escape
|
|
||||||
// this package.
|
|
||||||
//
|
|
||||||
type tokenNode struct {
|
|
||||||
pos token.Pos
|
|
||||||
end token.Pos
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n tokenNode) Pos() token.Pos {
|
|
||||||
return n.pos
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n tokenNode) End() token.Pos {
|
|
||||||
return n.end
|
|
||||||
}
|
|
||||||
|
|
||||||
func tok(pos token.Pos, len int) ast.Node {
|
|
||||||
return tokenNode{pos, pos + token.Pos(len)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// childrenOf returns the direct non-nil children of ast.Node n.
|
|
||||||
// It may include fake ast.Node implementations for bare tokens.
|
|
||||||
// it is not safe to call (e.g.) ast.Walk on such nodes.
|
|
||||||
//
|
|
||||||
func childrenOf(n ast.Node) []ast.Node {
|
|
||||||
var children []ast.Node
|
|
||||||
|
|
||||||
// First add nodes for all true subtrees.
|
|
||||||
ast.Inspect(n, func(node ast.Node) bool {
|
|
||||||
if node == n { // push n
|
|
||||||
return true // recur
|
|
||||||
}
|
|
||||||
if node != nil { // push child
|
|
||||||
children = append(children, node)
|
|
||||||
}
|
|
||||||
return false // no recursion
|
|
||||||
})
|
|
||||||
|
|
||||||
// Then add fake Nodes for bare tokens.
|
|
||||||
switch n := n.(type) {
|
|
||||||
case *ast.ArrayType:
|
|
||||||
children = append(children,
|
|
||||||
tok(n.Lbrack, len("[")),
|
|
||||||
tok(n.Elt.End(), len("]")))
|
|
||||||
|
|
||||||
case *ast.AssignStmt:
|
|
||||||
children = append(children,
|
|
||||||
tok(n.TokPos, len(n.Tok.String())))
|
|
||||||
|
|
||||||
case *ast.BasicLit:
|
|
||||||
children = append(children,
|
|
||||||
tok(n.ValuePos, len(n.Value)))
|
|
||||||
|
|
||||||
case *ast.BinaryExpr:
|
|
||||||
children = append(children, tok(n.OpPos, len(n.Op.String())))
|
|
||||||
|
|
||||||
case *ast.BlockStmt:
|
|
||||||
children = append(children,
|
|
||||||
tok(n.Lbrace, len("{")),
|
|
||||||
tok(n.Rbrace, len("}")))
|
|
||||||
|
|
||||||
case *ast.BranchStmt:
|
|
||||||
children = append(children,
|
|
||||||
tok(n.TokPos, len(n.Tok.String())))
|
|
||||||
|
|
||||||
case *ast.CallExpr:
|
|
||||||
children = append(children,
|
|
||||||
tok(n.Lparen, len("(")),
|
|
||||||
tok(n.Rparen, len(")")))
|
|
||||||
if n.Ellipsis != 0 {
|
|
||||||
children = append(children, tok(n.Ellipsis, len("...")))
|
|
||||||
}
|
|
||||||
|
|
||||||
case *ast.CaseClause:
|
|
||||||
if n.List == nil {
|
|
||||||
children = append(children,
|
|
||||||
tok(n.Case, len("default")))
|
|
||||||
} else {
|
|
||||||
children = append(children,
|
|
||||||
tok(n.Case, len("case")))
|
|
||||||
}
|
|
||||||
children = append(children, tok(n.Colon, len(":")))
|
|
||||||
|
|
||||||
case *ast.ChanType:
|
|
||||||
switch n.Dir {
|
|
||||||
case ast.RECV:
|
|
||||||
children = append(children, tok(n.Begin, len("<-chan")))
|
|
||||||
case ast.SEND:
|
|
||||||
children = append(children, tok(n.Begin, len("chan<-")))
|
|
||||||
case ast.RECV | ast.SEND:
|
|
||||||
children = append(children, tok(n.Begin, len("chan")))
|
|
||||||
}
|
|
||||||
|
|
||||||
case *ast.CommClause:
|
|
||||||
if n.Comm == nil {
|
|
||||||
children = append(children,
|
|
||||||
tok(n.Case, len("default")))
|
|
||||||
} else {
|
|
||||||
children = append(children,
|
|
||||||
tok(n.Case, len("case")))
|
|
||||||
}
|
|
||||||
children = append(children, tok(n.Colon, len(":")))
|
|
||||||
|
|
||||||
case *ast.Comment:
|
|
||||||
// nop
|
|
||||||
|
|
||||||
case *ast.CommentGroup:
|
|
||||||
// nop
|
|
||||||
|
|
||||||
case *ast.CompositeLit:
|
|
||||||
children = append(children,
|
|
||||||
tok(n.Lbrace, len("{")),
|
|
||||||
tok(n.Rbrace, len("{")))
|
|
||||||
|
|
||||||
case *ast.DeclStmt:
|
|
||||||
// nop
|
|
||||||
|
|
||||||
case *ast.DeferStmt:
|
|
||||||
children = append(children,
|
|
||||||
tok(n.Defer, len("defer")))
|
|
||||||
|
|
||||||
case *ast.Ellipsis:
|
|
||||||
children = append(children,
|
|
||||||
tok(n.Ellipsis, len("...")))
|
|
||||||
|
|
||||||
case *ast.EmptyStmt:
|
|
||||||
// nop
|
|
||||||
|
|
||||||
case *ast.ExprStmt:
|
|
||||||
// nop
|
|
||||||
|
|
||||||
case *ast.Field:
|
|
||||||
// TODO(adonovan): Field.{Doc,Comment,Tag}?
|
|
||||||
|
|
||||||
case *ast.FieldList:
|
|
||||||
children = append(children,
|
|
||||||
tok(n.Opening, len("(")),
|
|
||||||
tok(n.Closing, len(")")))
|
|
||||||
|
|
||||||
case *ast.File:
|
|
||||||
// TODO test: Doc
|
|
||||||
children = append(children,
|
|
||||||
tok(n.Package, len("package")))
|
|
||||||
|
|
||||||
case *ast.ForStmt:
|
|
||||||
children = append(children,
|
|
||||||
tok(n.For, len("for")))
|
|
||||||
|
|
||||||
case *ast.FuncDecl:
|
|
||||||
// TODO(adonovan): FuncDecl.Comment?
|
|
||||||
|
|
||||||
// Uniquely, FuncDecl breaks the invariant that
|
|
||||||
// preorder traversal yields tokens in lexical order:
|
|
||||||
// in fact, FuncDecl.Recv precedes FuncDecl.Type.Func.
|
|
||||||
//
|
|
||||||
// As a workaround, we inline the case for FuncType
|
|
||||||
// here and order things correctly.
|
|
||||||
//
|
|
||||||
children = nil // discard ast.Walk(FuncDecl) info subtrees
|
|
||||||
children = append(children, tok(n.Type.Func, len("func")))
|
|
||||||
if n.Recv != nil {
|
|
||||||
children = append(children, n.Recv)
|
|
||||||
}
|
|
||||||
children = append(children, n.Name)
|
|
||||||
if n.Type.Params != nil {
|
|
||||||
children = append(children, n.Type.Params)
|
|
||||||
}
|
|
||||||
if n.Type.Results != nil {
|
|
||||||
children = append(children, n.Type.Results)
|
|
||||||
}
|
|
||||||
if n.Body != nil {
|
|
||||||
children = append(children, n.Body)
|
|
||||||
}
|
|
||||||
|
|
||||||
case *ast.FuncLit:
|
|
||||||
// nop
|
|
||||||
|
|
||||||
case *ast.FuncType:
|
|
||||||
if n.Func != 0 {
|
|
||||||
children = append(children,
|
|
||||||
tok(n.Func, len("func")))
|
|
||||||
}
|
|
||||||
|
|
||||||
case *ast.GenDecl:
|
|
||||||
children = append(children,
|
|
||||||
tok(n.TokPos, len(n.Tok.String())))
|
|
||||||
if n.Lparen != 0 {
|
|
||||||
children = append(children,
|
|
||||||
tok(n.Lparen, len("(")),
|
|
||||||
tok(n.Rparen, len(")")))
|
|
||||||
}
|
|
||||||
|
|
||||||
case *ast.GoStmt:
|
|
||||||
children = append(children,
|
|
||||||
tok(n.Go, len("go")))
|
|
||||||
|
|
||||||
case *ast.Ident:
|
|
||||||
children = append(children,
|
|
||||||
tok(n.NamePos, len(n.Name)))
|
|
||||||
|
|
||||||
case *ast.IfStmt:
|
|
||||||
children = append(children,
|
|
||||||
tok(n.If, len("if")))
|
|
||||||
|
|
||||||
case *ast.ImportSpec:
|
|
||||||
// TODO(adonovan): ImportSpec.{Doc,EndPos}?
|
|
||||||
|
|
||||||
case *ast.IncDecStmt:
|
|
||||||
children = append(children,
|
|
||||||
tok(n.TokPos, len(n.Tok.String())))
|
|
||||||
|
|
||||||
case *ast.IndexExpr:
|
|
||||||
children = append(children,
|
|
||||||
tok(n.Lbrack, len("{")),
|
|
||||||
tok(n.Rbrack, len("}")))
|
|
||||||
|
|
||||||
case *ast.InterfaceType:
|
|
||||||
children = append(children,
|
|
||||||
tok(n.Interface, len("interface")))
|
|
||||||
|
|
||||||
case *ast.KeyValueExpr:
|
|
||||||
children = append(children,
|
|
||||||
tok(n.Colon, len(":")))
|
|
||||||
|
|
||||||
case *ast.LabeledStmt:
|
|
||||||
children = append(children,
|
|
||||||
tok(n.Colon, len(":")))
|
|
||||||
|
|
||||||
case *ast.MapType:
|
|
||||||
children = append(children,
|
|
||||||
tok(n.Map, len("map")))
|
|
||||||
|
|
||||||
case *ast.ParenExpr:
|
|
||||||
children = append(children,
|
|
||||||
tok(n.Lparen, len("(")),
|
|
||||||
tok(n.Rparen, len(")")))
|
|
||||||
|
|
||||||
case *ast.RangeStmt:
|
|
||||||
children = append(children,
|
|
||||||
tok(n.For, len("for")),
|
|
||||||
tok(n.TokPos, len(n.Tok.String())))
|
|
||||||
|
|
||||||
case *ast.ReturnStmt:
|
|
||||||
children = append(children,
|
|
||||||
tok(n.Return, len("return")))
|
|
||||||
|
|
||||||
case *ast.SelectStmt:
|
|
||||||
children = append(children,
|
|
||||||
tok(n.Select, len("select")))
|
|
||||||
|
|
||||||
case *ast.SelectorExpr:
|
|
||||||
// nop
|
|
||||||
|
|
||||||
case *ast.SendStmt:
|
|
||||||
children = append(children,
|
|
||||||
tok(n.Arrow, len("<-")))
|
|
||||||
|
|
||||||
case *ast.SliceExpr:
|
|
||||||
children = append(children,
|
|
||||||
tok(n.Lbrack, len("[")),
|
|
||||||
tok(n.Rbrack, len("]")))
|
|
||||||
|
|
||||||
case *ast.StarExpr:
|
|
||||||
children = append(children, tok(n.Star, len("*")))
|
|
||||||
|
|
||||||
case *ast.StructType:
|
|
||||||
children = append(children, tok(n.Struct, len("struct")))
|
|
||||||
|
|
||||||
case *ast.SwitchStmt:
|
|
||||||
children = append(children, tok(n.Switch, len("switch")))
|
|
||||||
|
|
||||||
case *ast.TypeAssertExpr:
|
|
||||||
children = append(children,
|
|
||||||
tok(n.Lparen-1, len(".")),
|
|
||||||
tok(n.Lparen, len("(")),
|
|
||||||
tok(n.Rparen, len(")")))
|
|
||||||
|
|
||||||
case *ast.TypeSpec:
|
|
||||||
// TODO(adonovan): TypeSpec.{Doc,Comment}?
|
|
||||||
|
|
||||||
case *ast.TypeSwitchStmt:
|
|
||||||
children = append(children, tok(n.Switch, len("switch")))
|
|
||||||
|
|
||||||
case *ast.UnaryExpr:
|
|
||||||
children = append(children, tok(n.OpPos, len(n.Op.String())))
|
|
||||||
|
|
||||||
case *ast.ValueSpec:
|
|
||||||
// TODO(adonovan): ValueSpec.{Doc,Comment}?
|
|
||||||
|
|
||||||
case *ast.BadDecl, *ast.BadExpr, *ast.BadStmt:
|
|
||||||
// nop
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(adonovan): opt: merge the logic of ast.Inspect() into
|
|
||||||
// the switch above so we can make interleaved callbacks for
|
|
||||||
// both Nodes and Tokens in the right order and avoid the need
|
|
||||||
// to sort.
|
|
||||||
sort.Sort(byPos(children))
|
|
||||||
|
|
||||||
return children
|
|
||||||
}
|
|
||||||
|
|
||||||
type byPos []ast.Node
|
|
||||||
|
|
||||||
func (sl byPos) Len() int {
|
|
||||||
return len(sl)
|
|
||||||
}
|
|
||||||
func (sl byPos) Less(i, j int) bool {
|
|
||||||
return sl[i].Pos() < sl[j].Pos()
|
|
||||||
}
|
|
||||||
func (sl byPos) Swap(i, j int) {
|
|
||||||
sl[i], sl[j] = sl[j], sl[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
// NodeDescription returns a description of the concrete type of n suitable
|
|
||||||
// for a user interface.
|
|
||||||
//
|
|
||||||
// TODO(adonovan): in some cases (e.g. Field, FieldList, Ident,
|
|
||||||
// StarExpr) we could be much more specific given the path to the AST
|
|
||||||
// root. Perhaps we should do that.
|
|
||||||
//
|
|
||||||
func NodeDescription(n ast.Node) string {
|
|
||||||
switch n := n.(type) {
|
|
||||||
case *ast.ArrayType:
|
|
||||||
return "array type"
|
|
||||||
case *ast.AssignStmt:
|
|
||||||
return "assignment"
|
|
||||||
case *ast.BadDecl:
|
|
||||||
return "bad declaration"
|
|
||||||
case *ast.BadExpr:
|
|
||||||
return "bad expression"
|
|
||||||
case *ast.BadStmt:
|
|
||||||
return "bad statement"
|
|
||||||
case *ast.BasicLit:
|
|
||||||
return "basic literal"
|
|
||||||
case *ast.BinaryExpr:
|
|
||||||
return fmt.Sprintf("binary %s operation", n.Op)
|
|
||||||
case *ast.BlockStmt:
|
|
||||||
return "block"
|
|
||||||
case *ast.BranchStmt:
|
|
||||||
switch n.Tok {
|
|
||||||
case token.BREAK:
|
|
||||||
return "break statement"
|
|
||||||
case token.CONTINUE:
|
|
||||||
return "continue statement"
|
|
||||||
case token.GOTO:
|
|
||||||
return "goto statement"
|
|
||||||
case token.FALLTHROUGH:
|
|
||||||
return "fall-through statement"
|
|
||||||
}
|
|
||||||
case *ast.CallExpr:
|
|
||||||
return "function call (or conversion)"
|
|
||||||
case *ast.CaseClause:
|
|
||||||
return "case clause"
|
|
||||||
case *ast.ChanType:
|
|
||||||
return "channel type"
|
|
||||||
case *ast.CommClause:
|
|
||||||
return "communication clause"
|
|
||||||
case *ast.Comment:
|
|
||||||
return "comment"
|
|
||||||
case *ast.CommentGroup:
|
|
||||||
return "comment group"
|
|
||||||
case *ast.CompositeLit:
|
|
||||||
return "composite literal"
|
|
||||||
case *ast.DeclStmt:
|
|
||||||
return NodeDescription(n.Decl) + " statement"
|
|
||||||
case *ast.DeferStmt:
|
|
||||||
return "defer statement"
|
|
||||||
case *ast.Ellipsis:
|
|
||||||
return "ellipsis"
|
|
||||||
case *ast.EmptyStmt:
|
|
||||||
return "empty statement"
|
|
||||||
case *ast.ExprStmt:
|
|
||||||
return "expression statement"
|
|
||||||
case *ast.Field:
|
|
||||||
// Can be any of these:
|
|
||||||
// struct {x, y int} -- struct field(s)
|
|
||||||
// struct {T} -- anon struct field
|
|
||||||
// interface {I} -- interface embedding
|
|
||||||
// interface {f()} -- interface method
|
|
||||||
// func (A) func(B) C -- receiver, param(s), result(s)
|
|
||||||
return "field/method/parameter"
|
|
||||||
case *ast.FieldList:
|
|
||||||
return "field/method/parameter list"
|
|
||||||
case *ast.File:
|
|
||||||
return "source file"
|
|
||||||
case *ast.ForStmt:
|
|
||||||
return "for loop"
|
|
||||||
case *ast.FuncDecl:
|
|
||||||
return "function declaration"
|
|
||||||
case *ast.FuncLit:
|
|
||||||
return "function literal"
|
|
||||||
case *ast.FuncType:
|
|
||||||
return "function type"
|
|
||||||
case *ast.GenDecl:
|
|
||||||
switch n.Tok {
|
|
||||||
case token.IMPORT:
|
|
||||||
return "import declaration"
|
|
||||||
case token.CONST:
|
|
||||||
return "constant declaration"
|
|
||||||
case token.TYPE:
|
|
||||||
return "type declaration"
|
|
||||||
case token.VAR:
|
|
||||||
return "variable declaration"
|
|
||||||
}
|
|
||||||
case *ast.GoStmt:
|
|
||||||
return "go statement"
|
|
||||||
case *ast.Ident:
|
|
||||||
return "identifier"
|
|
||||||
case *ast.IfStmt:
|
|
||||||
return "if statement"
|
|
||||||
case *ast.ImportSpec:
|
|
||||||
return "import specification"
|
|
||||||
case *ast.IncDecStmt:
|
|
||||||
if n.Tok == token.INC {
|
|
||||||
return "increment statement"
|
|
||||||
}
|
|
||||||
return "decrement statement"
|
|
||||||
case *ast.IndexExpr:
|
|
||||||
return "index expression"
|
|
||||||
case *ast.InterfaceType:
|
|
||||||
return "interface type"
|
|
||||||
case *ast.KeyValueExpr:
|
|
||||||
return "key/value association"
|
|
||||||
case *ast.LabeledStmt:
|
|
||||||
return "statement label"
|
|
||||||
case *ast.MapType:
|
|
||||||
return "map type"
|
|
||||||
case *ast.Package:
|
|
||||||
return "package"
|
|
||||||
case *ast.ParenExpr:
|
|
||||||
return "parenthesized " + NodeDescription(n.X)
|
|
||||||
case *ast.RangeStmt:
|
|
||||||
return "range loop"
|
|
||||||
case *ast.ReturnStmt:
|
|
||||||
return "return statement"
|
|
||||||
case *ast.SelectStmt:
|
|
||||||
return "select statement"
|
|
||||||
case *ast.SelectorExpr:
|
|
||||||
return "selector"
|
|
||||||
case *ast.SendStmt:
|
|
||||||
return "channel send"
|
|
||||||
case *ast.SliceExpr:
|
|
||||||
return "slice expression"
|
|
||||||
case *ast.StarExpr:
|
|
||||||
return "*-operation" // load/store expr or pointer type
|
|
||||||
case *ast.StructType:
|
|
||||||
return "struct type"
|
|
||||||
case *ast.SwitchStmt:
|
|
||||||
return "switch statement"
|
|
||||||
case *ast.TypeAssertExpr:
|
|
||||||
return "type assertion"
|
|
||||||
case *ast.TypeSpec:
|
|
||||||
return "type specification"
|
|
||||||
case *ast.TypeSwitchStmt:
|
|
||||||
return "type switch"
|
|
||||||
case *ast.UnaryExpr:
|
|
||||||
return fmt.Sprintf("unary %s operation", n.Op)
|
|
||||||
case *ast.ValueSpec:
|
|
||||||
return "value specification"
|
|
||||||
|
|
||||||
}
|
|
||||||
panic(fmt.Sprintf("unexpected node type: %T", n))
|
|
||||||
}
|
|
@ -1,400 +0,0 @@
|
|||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package astutil contains common utilities for working with the Go AST.
|
|
||||||
package astutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"go/ast"
|
|
||||||
"go/token"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// AddImport adds the import path to the file f, if absent.
|
|
||||||
func AddImport(fset *token.FileSet, f *ast.File, ipath string) (added bool) {
|
|
||||||
return AddNamedImport(fset, f, "", ipath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddNamedImport adds the import path to the file f, if absent.
|
|
||||||
// If name is not empty, it is used to rename the import.
|
|
||||||
//
|
|
||||||
// For example, calling
|
|
||||||
// AddNamedImport(fset, f, "pathpkg", "path")
|
|
||||||
// adds
|
|
||||||
// import pathpkg "path"
|
|
||||||
func AddNamedImport(fset *token.FileSet, f *ast.File, name, ipath string) (added bool) {
|
|
||||||
if imports(f, ipath) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
newImport := &ast.ImportSpec{
|
|
||||||
Path: &ast.BasicLit{
|
|
||||||
Kind: token.STRING,
|
|
||||||
Value: strconv.Quote(ipath),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
if name != "" {
|
|
||||||
newImport.Name = &ast.Ident{Name: name}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find an import decl to add to.
|
|
||||||
// The goal is to find an existing import
|
|
||||||
// whose import path has the longest shared
|
|
||||||
// prefix with ipath.
|
|
||||||
var (
|
|
||||||
bestMatch = -1 // length of longest shared prefix
|
|
||||||
lastImport = -1 // index in f.Decls of the file's final import decl
|
|
||||||
impDecl *ast.GenDecl // import decl containing the best match
|
|
||||||
impIndex = -1 // spec index in impDecl containing the best match
|
|
||||||
)
|
|
||||||
for i, decl := range f.Decls {
|
|
||||||
gen, ok := decl.(*ast.GenDecl)
|
|
||||||
if ok && gen.Tok == token.IMPORT {
|
|
||||||
lastImport = i
|
|
||||||
// Do not add to import "C", to avoid disrupting the
|
|
||||||
// association with its doc comment, breaking cgo.
|
|
||||||
if declImports(gen, "C") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Match an empty import decl if that's all that is available.
|
|
||||||
if len(gen.Specs) == 0 && bestMatch == -1 {
|
|
||||||
impDecl = gen
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compute longest shared prefix with imports in this group.
|
|
||||||
for j, spec := range gen.Specs {
|
|
||||||
impspec := spec.(*ast.ImportSpec)
|
|
||||||
n := matchLen(importPath(impspec), ipath)
|
|
||||||
if n > bestMatch {
|
|
||||||
bestMatch = n
|
|
||||||
impDecl = gen
|
|
||||||
impIndex = j
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If no import decl found, add one after the last import.
|
|
||||||
if impDecl == nil {
|
|
||||||
impDecl = &ast.GenDecl{
|
|
||||||
Tok: token.IMPORT,
|
|
||||||
}
|
|
||||||
if lastImport >= 0 {
|
|
||||||
impDecl.TokPos = f.Decls[lastImport].End()
|
|
||||||
} else {
|
|
||||||
// There are no existing imports.
|
|
||||||
// Our new import goes after the package declaration and after
|
|
||||||
// the comment, if any, that starts on the same line as the
|
|
||||||
// package declaration.
|
|
||||||
impDecl.TokPos = f.Package
|
|
||||||
|
|
||||||
file := fset.File(f.Package)
|
|
||||||
pkgLine := file.Line(f.Package)
|
|
||||||
for _, c := range f.Comments {
|
|
||||||
if file.Line(c.Pos()) > pkgLine {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
impDecl.TokPos = c.End()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
f.Decls = append(f.Decls, nil)
|
|
||||||
copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:])
|
|
||||||
f.Decls[lastImport+1] = impDecl
|
|
||||||
}
|
|
||||||
|
|
||||||
// Insert new import at insertAt.
|
|
||||||
insertAt := 0
|
|
||||||
if impIndex >= 0 {
|
|
||||||
// insert after the found import
|
|
||||||
insertAt = impIndex + 1
|
|
||||||
}
|
|
||||||
impDecl.Specs = append(impDecl.Specs, nil)
|
|
||||||
copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:])
|
|
||||||
impDecl.Specs[insertAt] = newImport
|
|
||||||
pos := impDecl.Pos()
|
|
||||||
if insertAt > 0 {
|
|
||||||
// If there is a comment after an existing import, preserve the comment
|
|
||||||
// position by adding the new import after the comment.
|
|
||||||
if spec, ok := impDecl.Specs[insertAt-1].(*ast.ImportSpec); ok && spec.Comment != nil {
|
|
||||||
pos = spec.Comment.End()
|
|
||||||
} else {
|
|
||||||
// Assign same position as the previous import,
|
|
||||||
// so that the sorter sees it as being in the same block.
|
|
||||||
pos = impDecl.Specs[insertAt-1].Pos()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if newImport.Name != nil {
|
|
||||||
newImport.Name.NamePos = pos
|
|
||||||
}
|
|
||||||
newImport.Path.ValuePos = pos
|
|
||||||
newImport.EndPos = pos
|
|
||||||
|
|
||||||
// Clean up parens. impDecl contains at least one spec.
|
|
||||||
if len(impDecl.Specs) == 1 {
|
|
||||||
// Remove unneeded parens.
|
|
||||||
impDecl.Lparen = token.NoPos
|
|
||||||
} else if !impDecl.Lparen.IsValid() {
|
|
||||||
// impDecl needs parens added.
|
|
||||||
impDecl.Lparen = impDecl.Specs[0].Pos()
|
|
||||||
}
|
|
||||||
|
|
||||||
f.Imports = append(f.Imports, newImport)
|
|
||||||
|
|
||||||
if len(f.Decls) <= 1 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Merge all the import declarations into the first one.
|
|
||||||
var first *ast.GenDecl
|
|
||||||
for i, decl := range f.Decls {
|
|
||||||
gen, ok := decl.(*ast.GenDecl)
|
|
||||||
if !ok || gen.Tok != token.IMPORT || declImports(gen, "C") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if first == nil {
|
|
||||||
first = gen
|
|
||||||
continue // Don't touch the first one.
|
|
||||||
}
|
|
||||||
// Move the imports of the other import declaration to the first one.
|
|
||||||
for _, spec := range gen.Specs {
|
|
||||||
spec.(*ast.ImportSpec).Path.ValuePos = first.Pos()
|
|
||||||
first.Specs = append(first.Specs, spec)
|
|
||||||
}
|
|
||||||
f.Decls = append(f.Decls[:i], f.Decls[i+1:]...)
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteImport deletes the import path from the file f, if present.
|
|
||||||
func DeleteImport(fset *token.FileSet, f *ast.File, path string) (deleted bool) {
|
|
||||||
return DeleteNamedImport(fset, f, "", path)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteNamedImport deletes the import with the given name and path from the file f, if present.
|
|
||||||
func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (deleted bool) {
|
|
||||||
var delspecs []*ast.ImportSpec
|
|
||||||
|
|
||||||
// Find the import nodes that import path, if any.
|
|
||||||
for i := 0; i < len(f.Decls); i++ {
|
|
||||||
decl := f.Decls[i]
|
|
||||||
gen, ok := decl.(*ast.GenDecl)
|
|
||||||
if !ok || gen.Tok != token.IMPORT {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for j := 0; j < len(gen.Specs); j++ {
|
|
||||||
spec := gen.Specs[j]
|
|
||||||
impspec := spec.(*ast.ImportSpec)
|
|
||||||
if impspec.Name == nil && name != "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if impspec.Name != nil && impspec.Name.Name != name {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if importPath(impspec) != path {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// We found an import spec that imports path.
|
|
||||||
// Delete it.
|
|
||||||
delspecs = append(delspecs, impspec)
|
|
||||||
deleted = true
|
|
||||||
copy(gen.Specs[j:], gen.Specs[j+1:])
|
|
||||||
gen.Specs = gen.Specs[:len(gen.Specs)-1]
|
|
||||||
|
|
||||||
// If this was the last import spec in this decl,
|
|
||||||
// delete the decl, too.
|
|
||||||
if len(gen.Specs) == 0 {
|
|
||||||
copy(f.Decls[i:], f.Decls[i+1:])
|
|
||||||
f.Decls = f.Decls[:len(f.Decls)-1]
|
|
||||||
i--
|
|
||||||
break
|
|
||||||
} else if len(gen.Specs) == 1 {
|
|
||||||
gen.Lparen = token.NoPos // drop parens
|
|
||||||
}
|
|
||||||
if j > 0 {
|
|
||||||
lastImpspec := gen.Specs[j-1].(*ast.ImportSpec)
|
|
||||||
lastLine := fset.Position(lastImpspec.Path.ValuePos).Line
|
|
||||||
line := fset.Position(impspec.Path.ValuePos).Line
|
|
||||||
|
|
||||||
// We deleted an entry but now there may be
|
|
||||||
// a blank line-sized hole where the import was.
|
|
||||||
if line-lastLine > 1 {
|
|
||||||
// There was a blank line immediately preceding the deleted import,
|
|
||||||
// so there's no need to close the hole.
|
|
||||||
// Do nothing.
|
|
||||||
} else {
|
|
||||||
// There was no blank line. Close the hole.
|
|
||||||
fset.File(gen.Rparen).MergeLine(line)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
j--
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete them from f.Imports.
|
|
||||||
for i := 0; i < len(f.Imports); i++ {
|
|
||||||
imp := f.Imports[i]
|
|
||||||
for j, del := range delspecs {
|
|
||||||
if imp == del {
|
|
||||||
copy(f.Imports[i:], f.Imports[i+1:])
|
|
||||||
f.Imports = f.Imports[:len(f.Imports)-1]
|
|
||||||
copy(delspecs[j:], delspecs[j+1:])
|
|
||||||
delspecs = delspecs[:len(delspecs)-1]
|
|
||||||
i--
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(delspecs) > 0 {
|
|
||||||
panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs))
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// RewriteImport rewrites any import of path oldPath to path newPath.
|
|
||||||
func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (rewrote bool) {
|
|
||||||
for _, imp := range f.Imports {
|
|
||||||
if importPath(imp) == oldPath {
|
|
||||||
rewrote = true
|
|
||||||
// record old End, because the default is to compute
|
|
||||||
// it using the length of imp.Path.Value.
|
|
||||||
imp.EndPos = imp.End()
|
|
||||||
imp.Path.Value = strconv.Quote(newPath)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// UsesImport reports whether a given import is used.
|
|
||||||
func UsesImport(f *ast.File, path string) (used bool) {
|
|
||||||
spec := importSpec(f, path)
|
|
||||||
if spec == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
name := spec.Name.String()
|
|
||||||
switch name {
|
|
||||||
case "<nil>":
|
|
||||||
// If the package name is not explicitly specified,
|
|
||||||
// make an educated guess. This is not guaranteed to be correct.
|
|
||||||
lastSlash := strings.LastIndex(path, "/")
|
|
||||||
if lastSlash == -1 {
|
|
||||||
name = path
|
|
||||||
} else {
|
|
||||||
name = path[lastSlash+1:]
|
|
||||||
}
|
|
||||||
case "_", ".":
|
|
||||||
// Not sure if this import is used - err on the side of caution.
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
ast.Walk(visitFn(func(n ast.Node) {
|
|
||||||
sel, ok := n.(*ast.SelectorExpr)
|
|
||||||
if ok && isTopName(sel.X, name) {
|
|
||||||
used = true
|
|
||||||
}
|
|
||||||
}), f)
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
type visitFn func(node ast.Node)
|
|
||||||
|
|
||||||
func (fn visitFn) Visit(node ast.Node) ast.Visitor {
|
|
||||||
fn(node)
|
|
||||||
return fn
|
|
||||||
}
|
|
||||||
|
|
||||||
// imports returns true if f imports path.
|
|
||||||
func imports(f *ast.File, path string) bool {
|
|
||||||
return importSpec(f, path) != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// importSpec returns the import spec if f imports path,
|
|
||||||
// or nil otherwise.
|
|
||||||
func importSpec(f *ast.File, path string) *ast.ImportSpec {
|
|
||||||
for _, s := range f.Imports {
|
|
||||||
if importPath(s) == path {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// importPath returns the unquoted import path of s,
|
|
||||||
// or "" if the path is not properly quoted.
|
|
||||||
func importPath(s *ast.ImportSpec) string {
|
|
||||||
t, err := strconv.Unquote(s.Path.Value)
|
|
||||||
if err == nil {
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// declImports reports whether gen contains an import of path.
|
|
||||||
func declImports(gen *ast.GenDecl, path string) bool {
|
|
||||||
if gen.Tok != token.IMPORT {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for _, spec := range gen.Specs {
|
|
||||||
impspec := spec.(*ast.ImportSpec)
|
|
||||||
if importPath(impspec) == path {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// matchLen returns the length of the longest path segment prefix shared by x and y.
|
|
||||||
func matchLen(x, y string) int {
|
|
||||||
n := 0
|
|
||||||
for i := 0; i < len(x) && i < len(y) && x[i] == y[i]; i++ {
|
|
||||||
if x[i] == '/' {
|
|
||||||
n++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
// isTopName returns true if n is a top-level unresolved identifier with the given name.
|
|
||||||
func isTopName(n ast.Expr, name string) bool {
|
|
||||||
id, ok := n.(*ast.Ident)
|
|
||||||
return ok && id.Name == name && id.Obj == nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Imports returns the file imports grouped by paragraph.
|
|
||||||
func Imports(fset *token.FileSet, f *ast.File) [][]*ast.ImportSpec {
|
|
||||||
var groups [][]*ast.ImportSpec
|
|
||||||
|
|
||||||
for _, decl := range f.Decls {
|
|
||||||
genDecl, ok := decl.(*ast.GenDecl)
|
|
||||||
if !ok || genDecl.Tok != token.IMPORT {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
group := []*ast.ImportSpec{}
|
|
||||||
|
|
||||||
var lastLine int
|
|
||||||
for _, spec := range genDecl.Specs {
|
|
||||||
importSpec := spec.(*ast.ImportSpec)
|
|
||||||
pos := importSpec.Path.ValuePos
|
|
||||||
line := fset.Position(pos).Line
|
|
||||||
if lastLine > 0 && pos > 0 && line-lastLine > 1 {
|
|
||||||
groups = append(groups, group)
|
|
||||||
group = []*ast.ImportSpec{}
|
|
||||||
}
|
|
||||||
group = append(group, importSpec)
|
|
||||||
lastLine = line
|
|
||||||
}
|
|
||||||
groups = append(groups, group)
|
|
||||||
}
|
|
||||||
|
|
||||||
return groups
|
|
||||||
}
|
|
@ -1,14 +0,0 @@
|
|||||||
package astutil
|
|
||||||
|
|
||||||
import "go/ast"
|
|
||||||
|
|
||||||
// Unparen returns e with any enclosing parentheses stripped.
|
|
||||||
func Unparen(e ast.Expr) ast.Expr {
|
|
||||||
for {
|
|
||||||
p, ok := e.(*ast.ParenExpr)
|
|
||||||
if !ok {
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
e = p.X
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,195 +0,0 @@
|
|||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package buildutil provides utilities related to the go/build
|
|
||||||
// package in the standard library.
|
|
||||||
//
|
|
||||||
// All I/O is done via the build.Context file system interface, which must
|
|
||||||
// be concurrency-safe.
|
|
||||||
package buildutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"go/build"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// AllPackages returns the package path of each Go package in any source
|
|
||||||
// directory of the specified build context (e.g. $GOROOT or an element
|
|
||||||
// of $GOPATH). Errors are ignored. The results are sorted.
|
|
||||||
// All package paths are canonical, and thus may contain "/vendor/".
|
|
||||||
//
|
|
||||||
// The result may include import paths for directories that contain no
|
|
||||||
// *.go files, such as "archive" (in $GOROOT/src).
|
|
||||||
//
|
|
||||||
// All I/O is done via the build.Context file system interface,
|
|
||||||
// which must be concurrency-safe.
|
|
||||||
//
|
|
||||||
func AllPackages(ctxt *build.Context) []string {
|
|
||||||
var list []string
|
|
||||||
ForEachPackage(ctxt, func(pkg string, _ error) {
|
|
||||||
list = append(list, pkg)
|
|
||||||
})
|
|
||||||
sort.Strings(list)
|
|
||||||
return list
|
|
||||||
}
|
|
||||||
|
|
||||||
// ForEachPackage calls the found function with the package path of
|
|
||||||
// each Go package it finds in any source directory of the specified
|
|
||||||
// build context (e.g. $GOROOT or an element of $GOPATH).
|
|
||||||
// All package paths are canonical, and thus may contain "/vendor/".
|
|
||||||
//
|
|
||||||
// If the package directory exists but could not be read, the second
|
|
||||||
// argument to the found function provides the error.
|
|
||||||
//
|
|
||||||
// All I/O is done via the build.Context file system interface,
|
|
||||||
// which must be concurrency-safe.
|
|
||||||
//
|
|
||||||
func ForEachPackage(ctxt *build.Context, found func(importPath string, err error)) {
|
|
||||||
ch := make(chan item)
|
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
for _, root := range ctxt.SrcDirs() {
|
|
||||||
root := root
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
allPackages(ctxt, root, ch)
|
|
||||||
wg.Done()
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
go func() {
|
|
||||||
wg.Wait()
|
|
||||||
close(ch)
|
|
||||||
}()
|
|
||||||
|
|
||||||
// All calls to found occur in the caller's goroutine.
|
|
||||||
for i := range ch {
|
|
||||||
found(i.importPath, i.err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type item struct {
|
|
||||||
importPath string
|
|
||||||
err error // (optional)
|
|
||||||
}
|
|
||||||
|
|
||||||
// We use a process-wide counting semaphore to limit
|
|
||||||
// the number of parallel calls to ReadDir.
|
|
||||||
var ioLimit = make(chan bool, 20)
|
|
||||||
|
|
||||||
func allPackages(ctxt *build.Context, root string, ch chan<- item) {
|
|
||||||
root = filepath.Clean(root) + string(os.PathSeparator)
|
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
|
|
||||||
var walkDir func(dir string)
|
|
||||||
walkDir = func(dir string) {
|
|
||||||
// Avoid .foo, _foo, and testdata directory trees.
|
|
||||||
base := filepath.Base(dir)
|
|
||||||
if base == "" || base[0] == '.' || base[0] == '_' || base == "testdata" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
pkg := filepath.ToSlash(strings.TrimPrefix(dir, root))
|
|
||||||
|
|
||||||
// Prune search if we encounter any of these import paths.
|
|
||||||
switch pkg {
|
|
||||||
case "builtin":
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ioLimit <- true
|
|
||||||
files, err := ReadDir(ctxt, dir)
|
|
||||||
<-ioLimit
|
|
||||||
if pkg != "" || err != nil {
|
|
||||||
ch <- item{pkg, err}
|
|
||||||
}
|
|
||||||
for _, fi := range files {
|
|
||||||
fi := fi
|
|
||||||
if fi.IsDir() {
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
walkDir(filepath.Join(dir, fi.Name()))
|
|
||||||
wg.Done()
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
walkDir(root)
|
|
||||||
wg.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExpandPatterns returns the set of packages matched by patterns,
|
|
||||||
// which may have the following forms:
|
|
||||||
//
|
|
||||||
// golang.org/x/tools/cmd/guru # a single package
|
|
||||||
// golang.org/x/tools/... # all packages beneath dir
|
|
||||||
// ... # the entire workspace.
|
|
||||||
//
|
|
||||||
// Order is significant: a pattern preceded by '-' removes matching
|
|
||||||
// packages from the set. For example, these patterns match all encoding
|
|
||||||
// packages except encoding/xml:
|
|
||||||
//
|
|
||||||
// encoding/... -encoding/xml
|
|
||||||
//
|
|
||||||
func ExpandPatterns(ctxt *build.Context, patterns []string) map[string]bool {
|
|
||||||
// TODO(adonovan): support other features of 'go list':
|
|
||||||
// - "std"/"cmd"/"all" meta-packages
|
|
||||||
// - "..." not at the end of a pattern
|
|
||||||
// - relative patterns using "./" or "../" prefix
|
|
||||||
|
|
||||||
pkgs := make(map[string]bool)
|
|
||||||
doPkg := func(pkg string, neg bool) {
|
|
||||||
if neg {
|
|
||||||
delete(pkgs, pkg)
|
|
||||||
} else {
|
|
||||||
pkgs[pkg] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Scan entire workspace if wildcards are present.
|
|
||||||
// TODO(adonovan): opt: scan only the necessary subtrees of the workspace.
|
|
||||||
var all []string
|
|
||||||
for _, arg := range patterns {
|
|
||||||
if strings.HasSuffix(arg, "...") {
|
|
||||||
all = AllPackages(ctxt)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, arg := range patterns {
|
|
||||||
if arg == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
neg := arg[0] == '-'
|
|
||||||
if neg {
|
|
||||||
arg = arg[1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
if arg == "..." {
|
|
||||||
// ... matches all packages
|
|
||||||
for _, pkg := range all {
|
|
||||||
doPkg(pkg, neg)
|
|
||||||
}
|
|
||||||
} else if dir := strings.TrimSuffix(arg, "/..."); dir != arg {
|
|
||||||
// dir/... matches all packages beneath dir
|
|
||||||
for _, pkg := range all {
|
|
||||||
if strings.HasPrefix(pkg, dir) &&
|
|
||||||
(len(pkg) == len(dir) || pkg[len(dir)] == '/') {
|
|
||||||
doPkg(pkg, neg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// single package
|
|
||||||
doPkg(arg, neg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return pkgs
|
|
||||||
}
|
|
@ -1,108 +0,0 @@
|
|||||||
package buildutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"go/build"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"path/filepath"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// FakeContext returns a build.Context for the fake file tree specified
|
|
||||||
// by pkgs, which maps package import paths to a mapping from file base
|
|
||||||
// names to contents.
|
|
||||||
//
|
|
||||||
// The fake Context has a GOROOT of "/go" and no GOPATH, and overrides
|
|
||||||
// the necessary file access methods to read from memory instead of the
|
|
||||||
// real file system.
|
|
||||||
//
|
|
||||||
// Unlike a real file tree, the fake one has only two levels---packages
|
|
||||||
// and files---so ReadDir("/go/src/") returns all packages under
|
|
||||||
// /go/src/ including, for instance, "math" and "math/big".
|
|
||||||
// ReadDir("/go/src/math/big") would return all the files in the
|
|
||||||
// "math/big" package.
|
|
||||||
//
|
|
||||||
func FakeContext(pkgs map[string]map[string]string) *build.Context {
|
|
||||||
clean := func(filename string) string {
|
|
||||||
f := path.Clean(filepath.ToSlash(filename))
|
|
||||||
// Removing "/go/src" while respecting segment
|
|
||||||
// boundaries has this unfortunate corner case:
|
|
||||||
if f == "/go/src" {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return strings.TrimPrefix(f, "/go/src/")
|
|
||||||
}
|
|
||||||
|
|
||||||
ctxt := build.Default // copy
|
|
||||||
ctxt.GOROOT = "/go"
|
|
||||||
ctxt.GOPATH = ""
|
|
||||||
ctxt.IsDir = func(dir string) bool {
|
|
||||||
dir = clean(dir)
|
|
||||||
if dir == "" {
|
|
||||||
return true // needed by (*build.Context).SrcDirs
|
|
||||||
}
|
|
||||||
return pkgs[dir] != nil
|
|
||||||
}
|
|
||||||
ctxt.ReadDir = func(dir string) ([]os.FileInfo, error) {
|
|
||||||
dir = clean(dir)
|
|
||||||
var fis []os.FileInfo
|
|
||||||
if dir == "" {
|
|
||||||
// enumerate packages
|
|
||||||
for importPath := range pkgs {
|
|
||||||
fis = append(fis, fakeDirInfo(importPath))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// enumerate files of package
|
|
||||||
for basename := range pkgs[dir] {
|
|
||||||
fis = append(fis, fakeFileInfo(basename))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sort.Sort(byName(fis))
|
|
||||||
return fis, nil
|
|
||||||
}
|
|
||||||
ctxt.OpenFile = func(filename string) (io.ReadCloser, error) {
|
|
||||||
filename = clean(filename)
|
|
||||||
dir, base := path.Split(filename)
|
|
||||||
content, ok := pkgs[path.Clean(dir)][base]
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("file not found: %s", filename)
|
|
||||||
}
|
|
||||||
return ioutil.NopCloser(strings.NewReader(content)), nil
|
|
||||||
}
|
|
||||||
ctxt.IsAbsPath = func(path string) bool {
|
|
||||||
path = filepath.ToSlash(path)
|
|
||||||
// Don't rely on the default (filepath.Path) since on
|
|
||||||
// Windows, it reports virtual paths as non-absolute.
|
|
||||||
return strings.HasPrefix(path, "/")
|
|
||||||
}
|
|
||||||
return &ctxt
|
|
||||||
}
|
|
||||||
|
|
||||||
type byName []os.FileInfo
|
|
||||||
|
|
||||||
func (s byName) Len() int { return len(s) }
|
|
||||||
func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
|
||||||
func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() }
|
|
||||||
|
|
||||||
type fakeFileInfo string
|
|
||||||
|
|
||||||
func (fi fakeFileInfo) Name() string { return string(fi) }
|
|
||||||
func (fakeFileInfo) Sys() interface{} { return nil }
|
|
||||||
func (fakeFileInfo) ModTime() time.Time { return time.Time{} }
|
|
||||||
func (fakeFileInfo) IsDir() bool { return false }
|
|
||||||
func (fakeFileInfo) Size() int64 { return 0 }
|
|
||||||
func (fakeFileInfo) Mode() os.FileMode { return 0644 }
|
|
||||||
|
|
||||||
type fakeDirInfo string
|
|
||||||
|
|
||||||
func (fd fakeDirInfo) Name() string { return string(fd) }
|
|
||||||
func (fakeDirInfo) Sys() interface{} { return nil }
|
|
||||||
func (fakeDirInfo) ModTime() time.Time { return time.Time{} }
|
|
||||||
func (fakeDirInfo) IsDir() bool { return true }
|
|
||||||
func (fakeDirInfo) Size() int64 { return 0 }
|
|
||||||
func (fakeDirInfo) Mode() os.FileMode { return 0755 }
|
|
@ -1,75 +0,0 @@
|
|||||||
package buildutil
|
|
||||||
|
|
||||||
// This logic was copied from stringsFlag from $GOROOT/src/cmd/go/build.go.
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
|
|
||||||
const TagsFlagDoc = "a list of `build tags` to consider satisfied during the build. " +
|
|
||||||
"For more information about build tags, see the description of " +
|
|
||||||
"build constraints in the documentation for the go/build package"
|
|
||||||
|
|
||||||
// TagsFlag is an implementation of the flag.Value and flag.Getter interfaces that parses
|
|
||||||
// a flag value in the same manner as go build's -tags flag and
|
|
||||||
// populates a []string slice.
|
|
||||||
//
|
|
||||||
// See $GOROOT/src/go/build/doc.go for description of build tags.
|
|
||||||
// See $GOROOT/src/cmd/go/doc.go for description of 'go build -tags' flag.
|
|
||||||
//
|
|
||||||
// Example:
|
|
||||||
// flag.Var((*buildutil.TagsFlag)(&build.Default.BuildTags), "tags", buildutil.TagsFlagDoc)
|
|
||||||
type TagsFlag []string
|
|
||||||
|
|
||||||
func (v *TagsFlag) Set(s string) error {
|
|
||||||
var err error
|
|
||||||
*v, err = splitQuotedFields(s)
|
|
||||||
if *v == nil {
|
|
||||||
*v = []string{}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *TagsFlag) Get() interface{} { return *v }
|
|
||||||
|
|
||||||
func splitQuotedFields(s string) ([]string, error) {
|
|
||||||
// Split fields allowing '' or "" around elements.
|
|
||||||
// Quotes further inside the string do not count.
|
|
||||||
var f []string
|
|
||||||
for len(s) > 0 {
|
|
||||||
for len(s) > 0 && isSpaceByte(s[0]) {
|
|
||||||
s = s[1:]
|
|
||||||
}
|
|
||||||
if len(s) == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
// Accepted quoted string. No unescaping inside.
|
|
||||||
if s[0] == '"' || s[0] == '\'' {
|
|
||||||
quote := s[0]
|
|
||||||
s = s[1:]
|
|
||||||
i := 0
|
|
||||||
for i < len(s) && s[i] != quote {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
if i >= len(s) {
|
|
||||||
return nil, fmt.Errorf("unterminated %c string", quote)
|
|
||||||
}
|
|
||||||
f = append(f, s[:i])
|
|
||||||
s = s[i+1:]
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
i := 0
|
|
||||||
for i < len(s) && !isSpaceByte(s[i]) {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
f = append(f, s[:i])
|
|
||||||
s = s[i:]
|
|
||||||
}
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *TagsFlag) String() string {
|
|
||||||
return "<tagsFlag>"
|
|
||||||
}
|
|
||||||
|
|
||||||
func isSpaceByte(c byte) bool {
|
|
||||||
return c == ' ' || c == '\t' || c == '\n' || c == '\r'
|
|
||||||
}
|
|
@ -1,167 +0,0 @@
|
|||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package buildutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"go/ast"
|
|
||||||
"go/build"
|
|
||||||
"go/parser"
|
|
||||||
"go/token"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ParseFile behaves like parser.ParseFile,
|
|
||||||
// but uses the build context's file system interface, if any.
|
|
||||||
//
|
|
||||||
// If file is not absolute (as defined by IsAbsPath), the (dir, file)
|
|
||||||
// components are joined using JoinPath; dir must be absolute.
|
|
||||||
//
|
|
||||||
// The displayPath function, if provided, is used to transform the
|
|
||||||
// filename that will be attached to the ASTs.
|
|
||||||
//
|
|
||||||
// TODO(adonovan): call this from go/loader.parseFiles when the tree thaws.
|
|
||||||
//
|
|
||||||
func ParseFile(fset *token.FileSet, ctxt *build.Context, displayPath func(string) string, dir string, file string, mode parser.Mode) (*ast.File, error) {
|
|
||||||
if !IsAbsPath(ctxt, file) {
|
|
||||||
file = JoinPath(ctxt, dir, file)
|
|
||||||
}
|
|
||||||
rd, err := OpenFile(ctxt, file)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer rd.Close() // ignore error
|
|
||||||
if displayPath != nil {
|
|
||||||
file = displayPath(file)
|
|
||||||
}
|
|
||||||
return parser.ParseFile(fset, file, rd, mode)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainingPackage returns the package containing filename.
|
|
||||||
//
|
|
||||||
// If filename is not absolute, it is interpreted relative to working directory dir.
|
|
||||||
// All I/O is via the build context's file system interface, if any.
|
|
||||||
//
|
|
||||||
// The '...Files []string' fields of the resulting build.Package are not
|
|
||||||
// populated (build.FindOnly mode).
|
|
||||||
//
|
|
||||||
// TODO(adonovan): call this from oracle when the tree thaws.
|
|
||||||
//
|
|
||||||
func ContainingPackage(ctxt *build.Context, dir, filename string) (*build.Package, error) {
|
|
||||||
if !IsAbsPath(ctxt, filename) {
|
|
||||||
filename = JoinPath(ctxt, dir, filename)
|
|
||||||
}
|
|
||||||
|
|
||||||
// We must not assume the file tree uses
|
|
||||||
// "/" always,
|
|
||||||
// `\` always,
|
|
||||||
// or os.PathSeparator (which varies by platform),
|
|
||||||
// but to make any progress, we are forced to assume that
|
|
||||||
// paths will not use `\` unless the PathSeparator
|
|
||||||
// is also `\`, thus we can rely on filepath.ToSlash for some sanity.
|
|
||||||
|
|
||||||
dirSlash := path.Dir(filepath.ToSlash(filename)) + "/"
|
|
||||||
|
|
||||||
// We assume that no source root (GOPATH[i] or GOROOT) contains any other.
|
|
||||||
for _, srcdir := range ctxt.SrcDirs() {
|
|
||||||
srcdirSlash := filepath.ToSlash(srcdir) + "/"
|
|
||||||
if dirHasPrefix(dirSlash, srcdirSlash) {
|
|
||||||
importPath := dirSlash[len(srcdirSlash) : len(dirSlash)-len("/")]
|
|
||||||
return ctxt.Import(importPath, dir, build.FindOnly)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, fmt.Errorf("can't find package containing %s", filename)
|
|
||||||
}
|
|
||||||
|
|
||||||
// dirHasPrefix tests whether the directory dir begins with prefix.
|
|
||||||
func dirHasPrefix(dir, prefix string) bool {
|
|
||||||
if runtime.GOOS != "windows" {
|
|
||||||
return strings.HasPrefix(dir, prefix)
|
|
||||||
}
|
|
||||||
return len(dir) >= len(prefix) && strings.EqualFold(dir[:len(prefix)], prefix)
|
|
||||||
}
|
|
||||||
|
|
||||||
// -- Effective methods of file system interface -------------------------
|
|
||||||
|
|
||||||
// (go/build.Context defines these as methods, but does not export them.)
|
|
||||||
|
|
||||||
// TODO(adonovan): HasSubdir?
|
|
||||||
|
|
||||||
// FileExists returns true if the specified file exists,
|
|
||||||
// using the build context's file system interface.
|
|
||||||
func FileExists(ctxt *build.Context, path string) bool {
|
|
||||||
if ctxt.OpenFile != nil {
|
|
||||||
r, err := ctxt.OpenFile(path)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
r.Close() // ignore error
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
_, err := os.Stat(path)
|
|
||||||
return err == nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// OpenFile behaves like os.Open,
|
|
||||||
// but uses the build context's file system interface, if any.
|
|
||||||
func OpenFile(ctxt *build.Context, path string) (io.ReadCloser, error) {
|
|
||||||
if ctxt.OpenFile != nil {
|
|
||||||
return ctxt.OpenFile(path)
|
|
||||||
}
|
|
||||||
return os.Open(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsAbsPath behaves like filepath.IsAbs,
|
|
||||||
// but uses the build context's file system interface, if any.
|
|
||||||
func IsAbsPath(ctxt *build.Context, path string) bool {
|
|
||||||
if ctxt.IsAbsPath != nil {
|
|
||||||
return ctxt.IsAbsPath(path)
|
|
||||||
}
|
|
||||||
return filepath.IsAbs(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
// JoinPath behaves like filepath.Join,
|
|
||||||
// but uses the build context's file system interface, if any.
|
|
||||||
func JoinPath(ctxt *build.Context, path ...string) string {
|
|
||||||
if ctxt.JoinPath != nil {
|
|
||||||
return ctxt.JoinPath(path...)
|
|
||||||
}
|
|
||||||
return filepath.Join(path...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsDir behaves like os.Stat plus IsDir,
|
|
||||||
// but uses the build context's file system interface, if any.
|
|
||||||
func IsDir(ctxt *build.Context, path string) bool {
|
|
||||||
if ctxt.IsDir != nil {
|
|
||||||
return ctxt.IsDir(path)
|
|
||||||
}
|
|
||||||
fi, err := os.Stat(path)
|
|
||||||
return err == nil && fi.IsDir()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadDir behaves like ioutil.ReadDir,
|
|
||||||
// but uses the build context's file system interface, if any.
|
|
||||||
func ReadDir(ctxt *build.Context, path string) ([]os.FileInfo, error) {
|
|
||||||
if ctxt.ReadDir != nil {
|
|
||||||
return ctxt.ReadDir(path)
|
|
||||||
}
|
|
||||||
return ioutil.ReadDir(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SplitPathList behaves like filepath.SplitList,
|
|
||||||
// but uses the build context's file system interface, if any.
|
|
||||||
func SplitPathList(ctxt *build.Context, s string) []string {
|
|
||||||
if ctxt.SplitPathList != nil {
|
|
||||||
return ctxt.SplitPathList(s)
|
|
||||||
}
|
|
||||||
return filepath.SplitList(s)
|
|
||||||
}
|
|
@ -1,209 +0,0 @@
|
|||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build go1.5
|
|
||||||
|
|
||||||
package loader
|
|
||||||
|
|
||||||
// This file handles cgo preprocessing of files containing `import "C"`.
|
|
||||||
//
|
|
||||||
// DESIGN
|
|
||||||
//
|
|
||||||
// The approach taken is to run the cgo processor on the package's
|
|
||||||
// CgoFiles and parse the output, faking the filenames of the
|
|
||||||
// resulting ASTs so that the synthetic file containing the C types is
|
|
||||||
// called "C" (e.g. "~/go/src/net/C") and the preprocessed files
|
|
||||||
// have their original names (e.g. "~/go/src/net/cgo_unix.go"),
|
|
||||||
// not the names of the actual temporary files.
|
|
||||||
//
|
|
||||||
// The advantage of this approach is its fidelity to 'go build'. The
|
|
||||||
// downside is that the token.Position.Offset for each AST node is
|
|
||||||
// incorrect, being an offset within the temporary file. Line numbers
|
|
||||||
// should still be correct because of the //line comments.
|
|
||||||
//
|
|
||||||
// The logic of this file is mostly plundered from the 'go build'
|
|
||||||
// tool, which also invokes the cgo preprocessor.
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// REJECTED ALTERNATIVE
|
|
||||||
//
|
|
||||||
// An alternative approach that we explored is to extend go/types'
|
|
||||||
// Importer mechanism to provide the identity of the importing package
|
|
||||||
// so that each time `import "C"` appears it resolves to a different
|
|
||||||
// synthetic package containing just the objects needed in that case.
|
|
||||||
// The loader would invoke cgo but parse only the cgo_types.go file
|
|
||||||
// defining the package-level objects, discarding the other files
|
|
||||||
// resulting from preprocessing.
|
|
||||||
//
|
|
||||||
// The benefit of this approach would have been that source-level
|
|
||||||
// syntax information would correspond exactly to the original cgo
|
|
||||||
// file, with no preprocessing involved, making source tools like
|
|
||||||
// godoc, oracle, and eg happy. However, the approach was rejected
|
|
||||||
// due to the additional complexity it would impose on go/types. (It
|
|
||||||
// made for a beautiful demo, though.)
|
|
||||||
//
|
|
||||||
// cgo files, despite their *.go extension, are not legal Go source
|
|
||||||
// files per the specification since they may refer to unexported
|
|
||||||
// members of package "C" such as C.int. Also, a function such as
|
|
||||||
// C.getpwent has in effect two types, one matching its C type and one
|
|
||||||
// which additionally returns (errno C.int). The cgo preprocessor
|
|
||||||
// uses name mangling to distinguish these two functions in the
|
|
||||||
// processed code, but go/types would need to duplicate this logic in
|
|
||||||
// its handling of function calls, analogous to the treatment of map
|
|
||||||
// lookups in which y=m[k] and y,ok=m[k] are both legal.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"go/ast"
|
|
||||||
"go/build"
|
|
||||||
"go/parser"
|
|
||||||
"go/token"
|
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// processCgoFiles invokes the cgo preprocessor on bp.CgoFiles, parses
|
|
||||||
// the output and returns the resulting ASTs.
|
|
||||||
//
|
|
||||||
func processCgoFiles(bp *build.Package, fset *token.FileSet, DisplayPath func(path string) string, mode parser.Mode) ([]*ast.File, error) {
|
|
||||||
tmpdir, err := ioutil.TempDir("", strings.Replace(bp.ImportPath, "/", "_", -1)+"_C")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(tmpdir)
|
|
||||||
|
|
||||||
pkgdir := bp.Dir
|
|
||||||
if DisplayPath != nil {
|
|
||||||
pkgdir = DisplayPath(pkgdir)
|
|
||||||
}
|
|
||||||
|
|
||||||
cgoFiles, cgoDisplayFiles, err := runCgo(bp, pkgdir, tmpdir)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var files []*ast.File
|
|
||||||
for i := range cgoFiles {
|
|
||||||
rd, err := os.Open(cgoFiles[i])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
display := filepath.Join(bp.Dir, cgoDisplayFiles[i])
|
|
||||||
f, err := parser.ParseFile(fset, display, rd, mode)
|
|
||||||
rd.Close()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
files = append(files, f)
|
|
||||||
}
|
|
||||||
return files, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var cgoRe = regexp.MustCompile(`[/\\:]`)
|
|
||||||
|
|
||||||
// runCgo invokes the cgo preprocessor on bp.CgoFiles and returns two
|
|
||||||
// lists of files: the resulting processed files (in temporary
|
|
||||||
// directory tmpdir) and the corresponding names of the unprocessed files.
|
|
||||||
//
|
|
||||||
// runCgo is adapted from (*builder).cgo in
|
|
||||||
// $GOROOT/src/cmd/go/build.go, but these features are unsupported:
|
|
||||||
// Objective C, CGOPKGPATH, CGO_FLAGS.
|
|
||||||
//
|
|
||||||
func runCgo(bp *build.Package, pkgdir, tmpdir string) (files, displayFiles []string, err error) {
|
|
||||||
cgoCPPFLAGS, _, _, _ := cflags(bp, true)
|
|
||||||
_, cgoexeCFLAGS, _, _ := cflags(bp, false)
|
|
||||||
|
|
||||||
if len(bp.CgoPkgConfig) > 0 {
|
|
||||||
pcCFLAGS, err := pkgConfigFlags(bp)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
cgoCPPFLAGS = append(cgoCPPFLAGS, pcCFLAGS...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Allows including _cgo_export.h from .[ch] files in the package.
|
|
||||||
cgoCPPFLAGS = append(cgoCPPFLAGS, "-I", tmpdir)
|
|
||||||
|
|
||||||
// _cgo_gotypes.go (displayed "C") contains the type definitions.
|
|
||||||
files = append(files, filepath.Join(tmpdir, "_cgo_gotypes.go"))
|
|
||||||
displayFiles = append(displayFiles, "C")
|
|
||||||
for _, fn := range bp.CgoFiles {
|
|
||||||
// "foo.cgo1.go" (displayed "foo.go") is the processed Go source.
|
|
||||||
f := cgoRe.ReplaceAllString(fn[:len(fn)-len("go")], "_")
|
|
||||||
files = append(files, filepath.Join(tmpdir, f+"cgo1.go"))
|
|
||||||
displayFiles = append(displayFiles, fn)
|
|
||||||
}
|
|
||||||
|
|
||||||
var cgoflags []string
|
|
||||||
if bp.Goroot && bp.ImportPath == "runtime/cgo" {
|
|
||||||
cgoflags = append(cgoflags, "-import_runtime_cgo=false")
|
|
||||||
}
|
|
||||||
if bp.Goroot && bp.ImportPath == "runtime/race" || bp.ImportPath == "runtime/cgo" {
|
|
||||||
cgoflags = append(cgoflags, "-import_syscall=false")
|
|
||||||
}
|
|
||||||
|
|
||||||
args := stringList(
|
|
||||||
"go", "tool", "cgo", "-objdir", tmpdir, cgoflags, "--",
|
|
||||||
cgoCPPFLAGS, cgoexeCFLAGS, bp.CgoFiles,
|
|
||||||
)
|
|
||||||
if false {
|
|
||||||
log.Printf("Running cgo for package %q: %s (dir=%s)", bp.ImportPath, args, pkgdir)
|
|
||||||
}
|
|
||||||
cmd := exec.Command(args[0], args[1:]...)
|
|
||||||
cmd.Dir = pkgdir
|
|
||||||
cmd.Stdout = os.Stderr
|
|
||||||
cmd.Stderr = os.Stderr
|
|
||||||
if err := cmd.Run(); err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("cgo failed: %s: %s", args, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return files, displayFiles, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// -- unmodified from 'go build' ---------------------------------------
|
|
||||||
|
|
||||||
// Return the flags to use when invoking the C or C++ compilers, or cgo.
|
|
||||||
func cflags(p *build.Package, def bool) (cppflags, cflags, cxxflags, ldflags []string) {
|
|
||||||
var defaults string
|
|
||||||
if def {
|
|
||||||
defaults = "-g -O2"
|
|
||||||
}
|
|
||||||
|
|
||||||
cppflags = stringList(envList("CGO_CPPFLAGS", ""), p.CgoCPPFLAGS)
|
|
||||||
cflags = stringList(envList("CGO_CFLAGS", defaults), p.CgoCFLAGS)
|
|
||||||
cxxflags = stringList(envList("CGO_CXXFLAGS", defaults), p.CgoCXXFLAGS)
|
|
||||||
ldflags = stringList(envList("CGO_LDFLAGS", defaults), p.CgoLDFLAGS)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// envList returns the value of the given environment variable broken
|
|
||||||
// into fields, using the default value when the variable is empty.
|
|
||||||
func envList(key, def string) []string {
|
|
||||||
v := os.Getenv(key)
|
|
||||||
if v == "" {
|
|
||||||
v = def
|
|
||||||
}
|
|
||||||
return strings.Fields(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// stringList's arguments should be a sequence of string or []string values.
|
|
||||||
// stringList flattens them into a single []string.
|
|
||||||
func stringList(args ...interface{}) []string {
|
|
||||||
var x []string
|
|
||||||
for _, arg := range args {
|
|
||||||
switch arg := arg.(type) {
|
|
||||||
case []string:
|
|
||||||
x = append(x, arg...)
|
|
||||||
case string:
|
|
||||||
x = append(x, arg)
|
|
||||||
default:
|
|
||||||
panic("stringList: invalid argument")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return x
|
|
||||||
}
|
|
@ -1,39 +0,0 @@
|
|||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package loader
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"go/build"
|
|
||||||
"os/exec"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// pkgConfig runs pkg-config with the specified arguments and returns the flags it prints.
|
|
||||||
func pkgConfig(mode string, pkgs []string) (flags []string, err error) {
|
|
||||||
cmd := exec.Command("pkg-config", append([]string{mode}, pkgs...)...)
|
|
||||||
out, err := cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
s := fmt.Sprintf("%s failed: %v", strings.Join(cmd.Args, " "), err)
|
|
||||||
if len(out) > 0 {
|
|
||||||
s = fmt.Sprintf("%s: %s", s, out)
|
|
||||||
}
|
|
||||||
return nil, errors.New(s)
|
|
||||||
}
|
|
||||||
if len(out) > 0 {
|
|
||||||
flags = strings.Fields(string(out))
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// pkgConfigFlags calls pkg-config if needed and returns the cflags
|
|
||||||
// needed to build the package.
|
|
||||||
func pkgConfigFlags(p *build.Package) (cflags []string, err error) {
|
|
||||||
if len(p.CgoPkgConfig) == 0 {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
return pkgConfig("--cflags", p.CgoPkgConfig)
|
|
||||||
}
|
|
@ -1,205 +0,0 @@
|
|||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package loader loads a complete Go program from source code, parsing
|
|
||||||
// and type-checking the initial packages plus their transitive closure
|
|
||||||
// of dependencies. The ASTs and the derived facts are retained for
|
|
||||||
// later use.
|
|
||||||
//
|
|
||||||
// THIS INTERFACE IS EXPERIMENTAL AND IS LIKELY TO CHANGE.
|
|
||||||
//
|
|
||||||
// The package defines two primary types: Config, which specifies a
|
|
||||||
// set of initial packages to load and various other options; and
|
|
||||||
// Program, which is the result of successfully loading the packages
|
|
||||||
// specified by a configuration.
|
|
||||||
//
|
|
||||||
// The configuration can be set directly, but *Config provides various
|
|
||||||
// convenience methods to simplify the common cases, each of which can
|
|
||||||
// be called any number of times. Finally, these are followed by a
|
|
||||||
// call to Load() to actually load and type-check the program.
|
|
||||||
//
|
|
||||||
// var conf loader.Config
|
|
||||||
//
|
|
||||||
// // Use the command-line arguments to specify
|
|
||||||
// // a set of initial packages to load from source.
|
|
||||||
// // See FromArgsUsage for help.
|
|
||||||
// rest, err := conf.FromArgs(os.Args[1:], wantTests)
|
|
||||||
//
|
|
||||||
// // Parse the specified files and create an ad hoc package with path "foo".
|
|
||||||
// // All files must have the same 'package' declaration.
|
|
||||||
// conf.CreateFromFilenames("foo", "foo.go", "bar.go")
|
|
||||||
//
|
|
||||||
// // Create an ad hoc package with path "foo" from
|
|
||||||
// // the specified already-parsed files.
|
|
||||||
// // All ASTs must have the same 'package' declaration.
|
|
||||||
// conf.CreateFromFiles("foo", parsedFiles)
|
|
||||||
//
|
|
||||||
// // Add "runtime" to the set of packages to be loaded.
|
|
||||||
// conf.Import("runtime")
|
|
||||||
//
|
|
||||||
// // Adds "fmt" and "fmt_test" to the set of packages
|
|
||||||
// // to be loaded. "fmt" will include *_test.go files.
|
|
||||||
// conf.ImportWithTests("fmt")
|
|
||||||
//
|
|
||||||
// // Finally, load all the packages specified by the configuration.
|
|
||||||
// prog, err := conf.Load()
|
|
||||||
//
|
|
||||||
// See examples_test.go for examples of API usage.
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// CONCEPTS AND TERMINOLOGY
|
|
||||||
//
|
|
||||||
// The WORKSPACE is the set of packages accessible to the loader. The
|
|
||||||
// workspace is defined by Config.Build, a *build.Context. The
|
|
||||||
// default context treats subdirectories of $GOROOT and $GOPATH as
|
|
||||||
// packages, but this behavior may be overridden.
|
|
||||||
//
|
|
||||||
// An AD HOC package is one specified as a set of source files on the
|
|
||||||
// command line. In the simplest case, it may consist of a single file
|
|
||||||
// such as $GOROOT/src/net/http/triv.go.
|
|
||||||
//
|
|
||||||
// EXTERNAL TEST packages are those comprised of a set of *_test.go
|
|
||||||
// files all with the same 'package foo_test' declaration, all in the
|
|
||||||
// same directory. (go/build.Package calls these files XTestFiles.)
|
|
||||||
//
|
|
||||||
// An IMPORTABLE package is one that can be referred to by some import
|
|
||||||
// spec. Every importable package is uniquely identified by its
|
|
||||||
// PACKAGE PATH or just PATH, a string such as "fmt", "encoding/json",
|
|
||||||
// or "cmd/vendor/golang.org/x/arch/x86/x86asm". A package path
|
|
||||||
// typically denotes a subdirectory of the workspace.
|
|
||||||
//
|
|
||||||
// An import declaration uses an IMPORT PATH to refer to a package.
|
|
||||||
// Most import declarations use the package path as the import path.
|
|
||||||
//
|
|
||||||
// Due to VENDORING (https://golang.org/s/go15vendor), the
|
|
||||||
// interpretation of an import path may depend on the directory in which
|
|
||||||
// it appears. To resolve an import path to a package path, go/build
|
|
||||||
// must search the enclosing directories for a subdirectory named
|
|
||||||
// "vendor".
|
|
||||||
//
|
|
||||||
// ad hoc packages and external test packages are NON-IMPORTABLE. The
|
|
||||||
// path of an ad hoc package is inferred from the package
|
|
||||||
// declarations of its files and is therefore not a unique package key.
|
|
||||||
// For example, Config.CreatePkgs may specify two initial ad hoc
|
|
||||||
// packages, both with path "main".
|
|
||||||
//
|
|
||||||
// An AUGMENTED package is an importable package P plus all the
|
|
||||||
// *_test.go files with same 'package foo' declaration as P.
|
|
||||||
// (go/build.Package calls these files TestFiles.)
|
|
||||||
//
|
|
||||||
// The INITIAL packages are those specified in the configuration. A
|
|
||||||
// DEPENDENCY is a package loaded to satisfy an import in an initial
|
|
||||||
// package or another dependency.
|
|
||||||
//
|
|
||||||
package loader
|
|
||||||
|
|
||||||
// IMPLEMENTATION NOTES
|
|
||||||
//
|
|
||||||
// 'go test', in-package test files, and import cycles
|
|
||||||
// ---------------------------------------------------
|
|
||||||
//
|
|
||||||
// An external test package may depend upon members of the augmented
|
|
||||||
// package that are not in the unaugmented package, such as functions
|
|
||||||
// that expose internals. (See bufio/export_test.go for an example.)
|
|
||||||
// So, the loader must ensure that for each external test package
|
|
||||||
// it loads, it also augments the corresponding non-test package.
|
|
||||||
//
|
|
||||||
// The import graph over n unaugmented packages must be acyclic; the
|
|
||||||
// import graph over n-1 unaugmented packages plus one augmented
|
|
||||||
// package must also be acyclic. ('go test' relies on this.) But the
|
|
||||||
// import graph over n augmented packages may contain cycles.
|
|
||||||
//
|
|
||||||
// First, all the (unaugmented) non-test packages and their
|
|
||||||
// dependencies are imported in the usual way; the loader reports an
|
|
||||||
// error if it detects an import cycle.
|
|
||||||
//
|
|
||||||
// Then, each package P for which testing is desired is augmented by
|
|
||||||
// the list P' of its in-package test files, by calling
|
|
||||||
// (*types.Checker).Files. This arrangement ensures that P' may
|
|
||||||
// reference definitions within P, but P may not reference definitions
|
|
||||||
// within P'. Furthermore, P' may import any other package, including
|
|
||||||
// ones that depend upon P, without an import cycle error.
|
|
||||||
//
|
|
||||||
// Consider two packages A and B, both of which have lists of
|
|
||||||
// in-package test files we'll call A' and B', and which have the
|
|
||||||
// following import graph edges:
|
|
||||||
// B imports A
|
|
||||||
// B' imports A
|
|
||||||
// A' imports B
|
|
||||||
// This last edge would be expected to create an error were it not
|
|
||||||
// for the special type-checking discipline above.
|
|
||||||
// Cycles of size greater than two are possible. For example:
|
|
||||||
// compress/bzip2/bzip2_test.go (package bzip2) imports "io/ioutil"
|
|
||||||
// io/ioutil/tempfile_test.go (package ioutil) imports "regexp"
|
|
||||||
// regexp/exec_test.go (package regexp) imports "compress/bzip2"
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// Concurrency
|
|
||||||
// -----------
|
|
||||||
//
|
|
||||||
// Let us define the import dependency graph as follows. Each node is a
|
|
||||||
// list of files passed to (Checker).Files at once. Many of these lists
|
|
||||||
// are the production code of an importable Go package, so those nodes
|
|
||||||
// are labelled by the package's path. The remaining nodes are
|
|
||||||
// ad hoc packages and lists of in-package *_test.go files that augment
|
|
||||||
// an importable package; those nodes have no label.
|
|
||||||
//
|
|
||||||
// The edges of the graph represent import statements appearing within a
|
|
||||||
// file. An edge connects a node (a list of files) to the node it
|
|
||||||
// imports, which is importable and thus always labelled.
|
|
||||||
//
|
|
||||||
// Loading is controlled by this dependency graph.
|
|
||||||
//
|
|
||||||
// To reduce I/O latency, we start loading a package's dependencies
|
|
||||||
// asynchronously as soon as we've parsed its files and enumerated its
|
|
||||||
// imports (scanImports). This performs a preorder traversal of the
|
|
||||||
// import dependency graph.
|
|
||||||
//
|
|
||||||
// To exploit hardware parallelism, we type-check unrelated packages in
|
|
||||||
// parallel, where "unrelated" means not ordered by the partial order of
|
|
||||||
// the import dependency graph.
|
|
||||||
//
|
|
||||||
// We use a concurrency-safe non-blocking cache (importer.imported) to
|
|
||||||
// record the results of type-checking, whether success or failure. An
|
|
||||||
// entry is created in this cache by startLoad the first time the
|
|
||||||
// package is imported. The first goroutine to request an entry becomes
|
|
||||||
// responsible for completing the task and broadcasting completion to
|
|
||||||
// subsequent requestors, which block until then.
|
|
||||||
//
|
|
||||||
// Type checking occurs in (parallel) postorder: we cannot type-check a
|
|
||||||
// set of files until we have loaded and type-checked all of their
|
|
||||||
// immediate dependencies (and thus all of their transitive
|
|
||||||
// dependencies). If the input were guaranteed free of import cycles,
|
|
||||||
// this would be trivial: we could simply wait for completion of the
|
|
||||||
// dependencies and then invoke the typechecker.
|
|
||||||
//
|
|
||||||
// But as we saw in the 'go test' section above, some cycles in the
|
|
||||||
// import graph over packages are actually legal, so long as the
|
|
||||||
// cycle-forming edge originates in the in-package test files that
|
|
||||||
// augment the package. This explains why the nodes of the import
|
|
||||||
// dependency graph are not packages, but lists of files: the unlabelled
|
|
||||||
// nodes avoid the cycles. Consider packages A and B where B imports A
|
|
||||||
// and A's in-package tests AT import B. The naively constructed import
|
|
||||||
// graph over packages would contain a cycle (A+AT) --> B --> (A+AT) but
|
|
||||||
// the graph over lists of files is AT --> B --> A, where AT is an
|
|
||||||
// unlabelled node.
|
|
||||||
//
|
|
||||||
// Awaiting completion of the dependencies in a cyclic graph would
|
|
||||||
// deadlock, so we must materialize the import dependency graph (as
|
|
||||||
// importer.graph) and check whether each import edge forms a cycle. If
|
|
||||||
// x imports y, and the graph already contains a path from y to x, then
|
|
||||||
// there is an import cycle, in which case the processing of x must not
|
|
||||||
// wait for the completion of processing of y.
|
|
||||||
//
|
|
||||||
// When the type-checker makes a callback (doImport) to the loader for a
|
|
||||||
// given import edge, there are two possible cases. In the normal case,
|
|
||||||
// the dependency has already been completely type-checked; doImport
|
|
||||||
// does a cache lookup and returns it. In the cyclic case, the entry in
|
|
||||||
// the cache is still necessarily incomplete, indicating a cycle. We
|
|
||||||
// perform the cycle check again to obtain the error message, and return
|
|
||||||
// the error.
|
|
||||||
//
|
|
||||||
// The result of using concurrency is about a 2.5x speedup for stdlib_test.
|
|
||||||
|
|
||||||
// TODO(adonovan): overhaul the package documentation.
|
|
@ -1,13 +0,0 @@
|
|||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build go1.6
|
|
||||||
|
|
||||||
package loader
|
|
||||||
|
|
||||||
import "go/build"
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
ignoreVendor = build.IgnoreVendor
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load Diff
@ -1,124 +0,0 @@
|
|||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package loader
|
|
||||||
|
|
||||||
import (
|
|
||||||
"go/ast"
|
|
||||||
"go/build"
|
|
||||||
"go/parser"
|
|
||||||
"go/token"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"strconv"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"golang.org/x/tools/go/buildutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
// We use a counting semaphore to limit
|
|
||||||
// the number of parallel I/O calls per process.
|
|
||||||
var ioLimit = make(chan bool, 10)
|
|
||||||
|
|
||||||
// parseFiles parses the Go source files within directory dir and
|
|
||||||
// returns the ASTs of the ones that could be at least partially parsed,
|
|
||||||
// along with a list of I/O and parse errors encountered.
|
|
||||||
//
|
|
||||||
// I/O is done via ctxt, which may specify a virtual file system.
|
|
||||||
// displayPath is used to transform the filenames attached to the ASTs.
|
|
||||||
//
|
|
||||||
func parseFiles(fset *token.FileSet, ctxt *build.Context, displayPath func(string) string, dir string, files []string, mode parser.Mode) ([]*ast.File, []error) {
|
|
||||||
if displayPath == nil {
|
|
||||||
displayPath = func(path string) string { return path }
|
|
||||||
}
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
n := len(files)
|
|
||||||
parsed := make([]*ast.File, n)
|
|
||||||
errors := make([]error, n)
|
|
||||||
for i, file := range files {
|
|
||||||
if !buildutil.IsAbsPath(ctxt, file) {
|
|
||||||
file = buildutil.JoinPath(ctxt, dir, file)
|
|
||||||
}
|
|
||||||
wg.Add(1)
|
|
||||||
go func(i int, file string) {
|
|
||||||
ioLimit <- true // wait
|
|
||||||
defer func() {
|
|
||||||
wg.Done()
|
|
||||||
<-ioLimit // signal
|
|
||||||
}()
|
|
||||||
var rd io.ReadCloser
|
|
||||||
var err error
|
|
||||||
if ctxt.OpenFile != nil {
|
|
||||||
rd, err = ctxt.OpenFile(file)
|
|
||||||
} else {
|
|
||||||
rd, err = os.Open(file)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
errors[i] = err // open failed
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseFile may return both an AST and an error.
|
|
||||||
parsed[i], errors[i] = parser.ParseFile(fset, displayPath(file), rd, mode)
|
|
||||||
rd.Close()
|
|
||||||
}(i, file)
|
|
||||||
}
|
|
||||||
wg.Wait()
|
|
||||||
|
|
||||||
// Eliminate nils, preserving order.
|
|
||||||
var o int
|
|
||||||
for _, f := range parsed {
|
|
||||||
if f != nil {
|
|
||||||
parsed[o] = f
|
|
||||||
o++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
parsed = parsed[:o]
|
|
||||||
|
|
||||||
o = 0
|
|
||||||
for _, err := range errors {
|
|
||||||
if err != nil {
|
|
||||||
errors[o] = err
|
|
||||||
o++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
errors = errors[:o]
|
|
||||||
|
|
||||||
return parsed, errors
|
|
||||||
}
|
|
||||||
|
|
||||||
// scanImports returns the set of all import paths from all
|
|
||||||
// import specs in the specified files.
|
|
||||||
func scanImports(files []*ast.File) map[string]bool {
|
|
||||||
imports := make(map[string]bool)
|
|
||||||
for _, f := range files {
|
|
||||||
for _, decl := range f.Decls {
|
|
||||||
if decl, ok := decl.(*ast.GenDecl); ok && decl.Tok == token.IMPORT {
|
|
||||||
for _, spec := range decl.Specs {
|
|
||||||
spec := spec.(*ast.ImportSpec)
|
|
||||||
|
|
||||||
// NB: do not assume the program is well-formed!
|
|
||||||
path, err := strconv.Unquote(spec.Path.Value)
|
|
||||||
if err != nil {
|
|
||||||
continue // quietly ignore the error
|
|
||||||
}
|
|
||||||
if path == "C" {
|
|
||||||
continue // skip pseudopackage
|
|
||||||
}
|
|
||||||
imports[path] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return imports
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------- Internal helpers ----------
|
|
||||||
|
|
||||||
// TODO(adonovan): make this a method: func (*token.File) Contains(token.Pos)
|
|
||||||
func tokenFileContainsPos(f *token.File, pos token.Pos) bool {
|
|
||||||
p := int(pos)
|
|
||||||
base := f.Base()
|
|
||||||
return base <= p && p < base+f.Size()
|
|
||||||
}
|
|
16
vendor/github.com/aws/aws-sdk-go/awstesting/assert.go
generated
vendored
16
vendor/github.com/aws/aws-sdk-go/awstesting/assert.go
generated
vendored
@ -125,6 +125,22 @@ func AssertXML(t *testing.T, expect, actual string, container interface{}, msgAn
|
|||||||
return equal(t, expectVal, actualVal, msgAndArgs...)
|
return equal(t, expectVal, actualVal, msgAndArgs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DidPanic returns if the function paniced and returns true if the function paniced.
|
||||||
|
func DidPanic(fn func()) (bool, interface{}) {
|
||||||
|
var paniced bool
|
||||||
|
var msg interface{}
|
||||||
|
func() {
|
||||||
|
defer func() {
|
||||||
|
if msg = recover(); msg != nil {
|
||||||
|
paniced = true
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
fn()
|
||||||
|
}()
|
||||||
|
|
||||||
|
return paniced, msg
|
||||||
|
}
|
||||||
|
|
||||||
// objectsAreEqual determines if two objects are considered equal.
|
// objectsAreEqual determines if two objects are considered equal.
|
||||||
//
|
//
|
||||||
// This function does no assertion of any kind.
|
// This function does no assertion of any kind.
|
||||||
|
@ -9,11 +9,10 @@ import (
|
|||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/awstesting/integration"
|
"github.com/aws/aws-sdk-go/awstesting/integration"
|
||||||
"github.com/aws/aws-sdk-go/service/s3"
|
"github.com/aws/aws-sdk-go/service/s3"
|
||||||
@ -67,16 +66,22 @@ func TestWriteToObject(t *testing.T) {
|
|||||||
Key: aws.String("key name"),
|
Key: aws.String("key name"),
|
||||||
Body: bytes.NewReader([]byte("hello world")),
|
Body: bytes.NewReader([]byte("hello world")),
|
||||||
})
|
})
|
||||||
assert.NoError(t, err)
|
if err != nil {
|
||||||
|
t.Errorf("expect no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
resp, err := svc.GetObject(&s3.GetObjectInput{
|
resp, err := svc.GetObject(&s3.GetObjectInput{
|
||||||
Bucket: bucketName,
|
Bucket: bucketName,
|
||||||
Key: aws.String("key name"),
|
Key: aws.String("key name"),
|
||||||
})
|
})
|
||||||
assert.NoError(t, err)
|
if err != nil {
|
||||||
|
t.Errorf("expect no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
b, _ := ioutil.ReadAll(resp.Body)
|
b, _ := ioutil.ReadAll(resp.Body)
|
||||||
assert.Equal(t, []byte("hello world"), b)
|
if e, a := []byte("hello world"), b; !reflect.DeepEqual(e, a) {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPresignedGetPut(t *testing.T) {
|
func TestPresignedGetPut(t *testing.T) {
|
||||||
@ -89,18 +94,26 @@ func TestPresignedGetPut(t *testing.T) {
|
|||||||
// Presign a PUT request
|
// Presign a PUT request
|
||||||
var puturl string
|
var puturl string
|
||||||
puturl, err = putreq.Presign(300 * time.Second)
|
puturl, err = putreq.Presign(300 * time.Second)
|
||||||
assert.NoError(t, err)
|
if err != nil {
|
||||||
|
t.Errorf("expect no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
// PUT to the presigned URL with a body
|
// PUT to the presigned URL with a body
|
||||||
var puthttpreq *http.Request
|
var puthttpreq *http.Request
|
||||||
buf := bytes.NewReader([]byte("hello world"))
|
buf := bytes.NewReader([]byte("hello world"))
|
||||||
puthttpreq, err = http.NewRequest("PUT", puturl, buf)
|
puthttpreq, err = http.NewRequest("PUT", puturl, buf)
|
||||||
assert.NoError(t, err)
|
if err != nil {
|
||||||
|
t.Errorf("expect no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
var putresp *http.Response
|
var putresp *http.Response
|
||||||
putresp, err = http.DefaultClient.Do(puthttpreq)
|
putresp, err = http.DefaultClient.Do(puthttpreq)
|
||||||
assert.NoError(t, err)
|
if err != nil {
|
||||||
assert.Equal(t, 200, putresp.StatusCode)
|
t.Errorf("expect no error, got %v", err)
|
||||||
|
}
|
||||||
|
if e, a := 200, putresp.StatusCode; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
// Presign a GET on the same URL
|
// Presign a GET on the same URL
|
||||||
getreq, _ := svc.GetObjectRequest(&s3.GetObjectInput{
|
getreq, _ := svc.GetObjectRequest(&s3.GetObjectInput{
|
||||||
@ -110,15 +123,21 @@ func TestPresignedGetPut(t *testing.T) {
|
|||||||
|
|
||||||
var geturl string
|
var geturl string
|
||||||
geturl, err = getreq.Presign(300 * time.Second)
|
geturl, err = getreq.Presign(300 * time.Second)
|
||||||
assert.NoError(t, err)
|
if err != nil {
|
||||||
|
t.Errorf("expect no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
// Get the body
|
// Get the body
|
||||||
var getresp *http.Response
|
var getresp *http.Response
|
||||||
getresp, err = http.Get(geturl)
|
getresp, err = http.Get(geturl)
|
||||||
assert.NoError(t, err)
|
if err != nil {
|
||||||
|
t.Errorf("expect no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
var b []byte
|
var b []byte
|
||||||
defer getresp.Body.Close()
|
defer getresp.Body.Close()
|
||||||
b, err = ioutil.ReadAll(getresp.Body)
|
b, err = ioutil.ReadAll(getresp.Body)
|
||||||
assert.Equal(t, "hello world", string(b))
|
if e, a := "hello world", string(b); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -11,7 +11,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/gucumber/gucumber"
|
"github.com/gucumber/gucumber"
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/aws/session"
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
@ -31,7 +30,9 @@ func init() {
|
|||||||
Bucket: aws.String(bucket),
|
Bucket: aws.String(bucket),
|
||||||
Prefix: aws.String(baseFolder + "/" + prefix),
|
Prefix: aws.String(baseFolder + "/" + prefix),
|
||||||
})
|
})
|
||||||
assert.NoError(gucumber.T, err)
|
if err != nil {
|
||||||
|
gucumber.T.Errorf("expect no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
plaintexts := make(map[string][]byte)
|
plaintexts := make(map[string][]byte)
|
||||||
for _, obj := range out.Contents {
|
for _, obj := range out.Contents {
|
||||||
@ -40,10 +41,14 @@ func init() {
|
|||||||
Bucket: aws.String(bucket),
|
Bucket: aws.String(bucket),
|
||||||
Key: plaintextKey,
|
Key: plaintextKey,
|
||||||
})
|
})
|
||||||
assert.NoError(gucumber.T, err)
|
if err != nil {
|
||||||
|
gucumber.T.Errorf("expect no error, got %v", err)
|
||||||
|
}
|
||||||
caseKey := strings.TrimPrefix(*plaintextKey, baseFolder+"/"+prefix)
|
caseKey := strings.TrimPrefix(*plaintextKey, baseFolder+"/"+prefix)
|
||||||
plaintext, err := ioutil.ReadAll(ptObj.Body)
|
plaintext, err := ioutil.ReadAll(ptObj.Body)
|
||||||
assert.NoError(gucumber.T, err)
|
if err != nil {
|
||||||
|
gucumber.T.Errorf("expect no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
plaintexts[caseKey] = plaintext
|
plaintexts[caseKey] = plaintext
|
||||||
}
|
}
|
||||||
@ -84,10 +89,14 @@ func init() {
|
|||||||
Key: &cipherKey,
|
Key: &cipherKey,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
assert.NoError(gucumber.T, err)
|
if err != nil {
|
||||||
|
gucumber.T.Errorf("expect no error, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
ciphertext, err := ioutil.ReadAll(ctObj.Body)
|
ciphertext, err := ioutil.ReadAll(ctObj.Body)
|
||||||
assert.NoError(gucumber.T, err)
|
if err != nil {
|
||||||
|
gucumber.T.Errorf("expect no error, got %v", err)
|
||||||
|
}
|
||||||
ciphertexts[caseKey] = ciphertext
|
ciphertexts[caseKey] = ciphertext
|
||||||
}
|
}
|
||||||
gucumber.World["decrypted"] = ciphertexts
|
gucumber.World["decrypted"] = ciphertexts
|
||||||
@ -97,8 +106,12 @@ func init() {
|
|||||||
plaintexts := gucumber.World["plaintexts"].(map[string][]byte)
|
plaintexts := gucumber.World["plaintexts"].(map[string][]byte)
|
||||||
ciphertexts := gucumber.World["decrypted"].(map[string][]byte)
|
ciphertexts := gucumber.World["decrypted"].(map[string][]byte)
|
||||||
for caseKey, ciphertext := range ciphertexts {
|
for caseKey, ciphertext := range ciphertexts {
|
||||||
assert.Equal(gucumber.T, len(plaintexts[caseKey]), len(ciphertext))
|
if e, a := len(plaintexts[caseKey]), len(ciphertext); e != a {
|
||||||
assert.True(gucumber.T, bytes.Equal(plaintexts[caseKey], ciphertext))
|
gucumber.T.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := plaintexts[caseKey], ciphertext; !bytes.Equal(e, a) {
|
||||||
|
gucumber.T.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -108,16 +121,22 @@ func init() {
|
|||||||
switch kek {
|
switch kek {
|
||||||
case "kms":
|
case "kms":
|
||||||
arn, err := getAliasInformation(v1, v2)
|
arn, err := getAliasInformation(v1, v2)
|
||||||
assert.Nil(gucumber.T, err)
|
if err != nil {
|
||||||
|
gucumber.T.Errorf("expect nil, got %v", nil)
|
||||||
|
}
|
||||||
|
|
||||||
b64Arn := base64.StdEncoding.EncodeToString([]byte(arn))
|
b64Arn := base64.StdEncoding.EncodeToString([]byte(arn))
|
||||||
assert.Nil(gucumber.T, err)
|
if err != nil {
|
||||||
|
gucumber.T.Errorf("expect nil, got %v", nil)
|
||||||
|
}
|
||||||
gucumber.World["Masterkey"] = b64Arn
|
gucumber.World["Masterkey"] = b64Arn
|
||||||
|
|
||||||
handler = s3crypto.NewKMSKeyGenerator(kms.New(session.New(&aws.Config{
|
handler = s3crypto.NewKMSKeyGenerator(kms.New(session.New(&aws.Config{
|
||||||
Region: &v2,
|
Region: &v2,
|
||||||
})), arn)
|
})), arn)
|
||||||
assert.Nil(gucumber.T, err)
|
if err != nil {
|
||||||
|
gucumber.T.Errorf("expect nil, got %v", nil)
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
gucumber.T.Skip()
|
gucumber.T.Skip()
|
||||||
}
|
}
|
||||||
@ -157,7 +176,9 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
_, err := c.PutObject(input)
|
_, err := c.PutObject(input)
|
||||||
assert.Nil(gucumber.T, err)
|
if err != nil {
|
||||||
|
gucumber.T.Errorf("expect nil, got %v", nil)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
86
vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/shared.go
generated
vendored
86
vendor/github.com/aws/aws-sdk-go/awstesting/integration/smoke/shared.go
generated
vendored
@ -5,7 +5,6 @@ package smoke
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
|
||||||
"os"
|
"os"
|
||||||
"reflect"
|
"reflect"
|
||||||
"regexp"
|
"regexp"
|
||||||
@ -13,7 +12,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/gucumber/gucumber"
|
"github.com/gucumber/gucumber"
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
@ -47,12 +45,16 @@ func init() {
|
|||||||
|
|
||||||
gucumber.Then(`^the value at "(.+?)" should be a list$`, func(member string) {
|
gucumber.Then(`^the value at "(.+?)" should be a list$`, func(member string) {
|
||||||
vals, _ := awsutil.ValuesAtPath(gucumber.World["response"], member)
|
vals, _ := awsutil.ValuesAtPath(gucumber.World["response"], member)
|
||||||
assert.NotNil(gucumber.T, vals)
|
if vals == nil {
|
||||||
|
gucumber.T.Errorf("expect not nil, was")
|
||||||
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
gucumber.Then(`^the response should contain a "(.+?)"$`, func(member string) {
|
gucumber.Then(`^the response should contain a "(.+?)"$`, func(member string) {
|
||||||
vals, _ := awsutil.ValuesAtPath(gucumber.World["response"], member)
|
vals, _ := awsutil.ValuesAtPath(gucumber.World["response"], member)
|
||||||
assert.NotEmpty(gucumber.T, vals)
|
if len(vals) == 0 {
|
||||||
|
gucumber.T.Errorf("expect values, got none")
|
||||||
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
gucumber.When(`^I attempt to call the "(.+?)" API with:$`, func(op string, args [][]string) {
|
gucumber.When(`^I attempt to call the "(.+?)" API with:$`, func(op string, args [][]string) {
|
||||||
@ -61,23 +63,33 @@ func init() {
|
|||||||
|
|
||||||
gucumber.Then(`^I expect the response error code to be "(.+?)"$`, func(code string) {
|
gucumber.Then(`^I expect the response error code to be "(.+?)"$`, func(code string) {
|
||||||
err, ok := gucumber.World["error"].(awserr.Error)
|
err, ok := gucumber.World["error"].(awserr.Error)
|
||||||
assert.True(gucumber.T, ok, "no error returned")
|
if !ok {
|
||||||
|
gucumber.T.Errorf("no error returned")
|
||||||
|
}
|
||||||
if ok {
|
if ok {
|
||||||
assert.Equal(gucumber.T, code, err.Code(), "Error: %v", err)
|
if e, a := code, err.Code(); e != a {
|
||||||
|
gucumber.T.Errorf("Error: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
gucumber.And(`^I expect the response error message to include:$`, func(data string) {
|
gucumber.And(`^I expect the response error message to include:$`, func(data string) {
|
||||||
err, ok := gucumber.World["error"].(awserr.Error)
|
err, ok := gucumber.World["error"].(awserr.Error)
|
||||||
assert.True(gucumber.T, ok, "no error returned")
|
if !ok {
|
||||||
|
gucumber.T.Errorf("no error returned")
|
||||||
|
}
|
||||||
if ok {
|
if ok {
|
||||||
assert.Contains(gucumber.T, err.Error(), data)
|
if e, a := data, err.Error(); !strings.Contains(a, e) {
|
||||||
|
gucumber.T.Errorf("expect %v to be in %v, was not", e, a)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
gucumber.And(`^I expect the response error message to include one of:$`, func(table [][]string) {
|
gucumber.And(`^I expect the response error message to include one of:$`, func(table [][]string) {
|
||||||
err, ok := gucumber.World["error"].(awserr.Error)
|
err, ok := gucumber.World["error"].(awserr.Error)
|
||||||
assert.True(gucumber.T, ok, "no error returned")
|
if !ok {
|
||||||
|
gucumber.T.Errorf("no error returned")
|
||||||
|
}
|
||||||
if ok {
|
if ok {
|
||||||
found := false
|
found := false
|
||||||
for _, row := range table {
|
for _, row := range table {
|
||||||
@ -87,14 +99,20 @@ func init() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
assert.True(gucumber.T, found, fmt.Sprintf("no error messages matched: \"%s\"", err.Error()))
|
if !found {
|
||||||
|
gucumber.T.Errorf("no error messages matched: \"%s\"", err.Error())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
gucumber.And(`^I expect the response error message not be empty$`, func() {
|
gucumber.And(`^I expect the response error message not be empty$`, func() {
|
||||||
err, ok := gucumber.World["error"].(awserr.Error)
|
err, ok := gucumber.World["error"].(awserr.Error)
|
||||||
assert.True(gucumber.T, ok, "no error returned")
|
if !ok {
|
||||||
assert.NotEmpty(gucumber.T, err.Message())
|
gucumber.T.Errorf("no error returned")
|
||||||
|
}
|
||||||
|
if len(err.Message()) == 0 {
|
||||||
|
gucumber.T.Errorf("expect values, got none")
|
||||||
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
gucumber.When(`^I call the "(.+?)" API with JSON:$`, func(s1 string, data string) {
|
gucumber.When(`^I call the "(.+?)" API with JSON:$`, func(s1 string, data string) {
|
||||||
@ -107,26 +125,42 @@ func init() {
|
|||||||
|
|
||||||
gucumber.Then(`^the error code should be "(.+?)"$`, func(s1 string) {
|
gucumber.Then(`^the error code should be "(.+?)"$`, func(s1 string) {
|
||||||
err, ok := gucumber.World["error"].(awserr.Error)
|
err, ok := gucumber.World["error"].(awserr.Error)
|
||||||
assert.True(gucumber.T, ok, "no error returned")
|
if !ok {
|
||||||
assert.Equal(gucumber.T, s1, err.Code())
|
gucumber.T.Errorf("no error returned")
|
||||||
|
}
|
||||||
|
if e, a := s1, err.Code(); e != a {
|
||||||
|
gucumber.T.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
gucumber.And(`^the error message should contain:$`, func(data string) {
|
gucumber.And(`^the error message should contain:$`, func(data string) {
|
||||||
err, ok := gucumber.World["error"].(awserr.Error)
|
err, ok := gucumber.World["error"].(awserr.Error)
|
||||||
assert.True(gucumber.T, ok, "no error returned")
|
if !ok {
|
||||||
assert.Contains(gucumber.T, err.Error(), data)
|
gucumber.T.Errorf("no error returned")
|
||||||
|
}
|
||||||
|
if e, a := data, err.Error(); !strings.Contains(a, e) {
|
||||||
|
gucumber.T.Errorf("expect %v to be in %v, was not", e, a)
|
||||||
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
gucumber.Then(`^the request should fail$`, func() {
|
gucumber.Then(`^the request should fail$`, func() {
|
||||||
err, ok := gucumber.World["error"].(awserr.Error)
|
err, ok := gucumber.World["error"].(awserr.Error)
|
||||||
assert.True(gucumber.T, ok, "no error returned")
|
if !ok {
|
||||||
assert.Error(gucumber.T, err)
|
gucumber.T.Errorf("no error returned")
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
gucumber.T.Errorf("expect error, got none")
|
||||||
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
gucumber.Then(`^the request should be successful$`, func() {
|
gucumber.Then(`^the request should be successful$`, func() {
|
||||||
err, ok := gucumber.World["error"].(awserr.Error)
|
err, ok := gucumber.World["error"].(awserr.Error)
|
||||||
assert.False(gucumber.T, ok, "error returned")
|
if ok {
|
||||||
assert.NoError(gucumber.T, err)
|
gucumber.T.Errorf("error returned")
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
gucumber.T.Errorf("expect no error, got %v", err)
|
||||||
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -160,10 +194,12 @@ func call(op string, args [][]string, allowError bool) {
|
|||||||
|
|
||||||
if !allowError {
|
if !allowError {
|
||||||
err, _ := gucumber.World["error"].(error)
|
err, _ := gucumber.World["error"].(error)
|
||||||
assert.NoError(gucumber.T, err)
|
if err != nil {
|
||||||
|
gucumber.T.Errorf("expect no error, got %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
assert.Fail(gucumber.T, "failed to find operation "+op)
|
gucumber.T.Errorf("failed to find operation " + op)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -215,10 +251,12 @@ func callWithJSON(op, j string, allowError bool) {
|
|||||||
|
|
||||||
if !allowError {
|
if !allowError {
|
||||||
err, _ := gucumber.World["error"].(error)
|
err, _ := gucumber.World["error"].(error)
|
||||||
assert.NoError(gucumber.T, err)
|
if err != nil {
|
||||||
|
gucumber.T.Errorf("expect no error, got %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
assert.Fail(gucumber.T, "failed to find operation "+op)
|
gucumber.T.Errorf("failed to find operation " + op)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
9
vendor/github.com/aws/aws-sdk-go/awstesting/performance/init.go
generated
vendored
9
vendor/github.com/aws/aws-sdk-go/awstesting/performance/init.go
generated
vendored
@ -9,7 +9,6 @@ import (
|
|||||||
"runtime"
|
"runtime"
|
||||||
|
|
||||||
"github.com/gucumber/gucumber"
|
"github.com/gucumber/gucumber"
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
@ -32,8 +31,12 @@ func init() {
|
|||||||
gucumber.Then(`^I should not have leaked any resources$`, func() {
|
gucumber.Then(`^I should not have leaked any resources$`, func() {
|
||||||
runtime.GC()
|
runtime.GC()
|
||||||
err, ok := gucumber.World["error"].(awserr.Error)
|
err, ok := gucumber.World["error"].(awserr.Error)
|
||||||
assert.False(gucumber.T, ok, "error returned")
|
if ok {
|
||||||
assert.NoError(gucumber.T, err)
|
gucumber.T.Errorf("error returned")
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
gucumber.T.Errorf("expect no error, got %v", err)
|
||||||
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
gucumber.And(`^I have a list of services$`, func() {
|
gucumber.And(`^I have a list of services$`, func() {
|
||||||
|
58
vendor/github.com/aws/aws-sdk-go/awstesting/util_test.go
generated
vendored
58
vendor/github.com/aws/aws-sdk-go/awstesting/util_test.go
generated
vendored
@ -4,8 +4,6 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/awstesting"
|
"github.com/aws/aws-sdk-go/awstesting"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -13,9 +11,15 @@ func TestReadCloserClose(t *testing.T) {
|
|||||||
rc := awstesting.ReadCloser{Size: 1}
|
rc := awstesting.ReadCloser{Size: 1}
|
||||||
err := rc.Close()
|
err := rc.Close()
|
||||||
|
|
||||||
assert.Nil(t, err)
|
if err != nil {
|
||||||
assert.True(t, rc.Closed)
|
t.Errorf("expect nil, got %v", err)
|
||||||
assert.Equal(t, rc.Size, 1)
|
}
|
||||||
|
if !rc.Closed {
|
||||||
|
t.Errorf("expect closed, was not")
|
||||||
|
}
|
||||||
|
if e, a := rc.Size, 1; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReadCloserRead(t *testing.T) {
|
func TestReadCloserRead(t *testing.T) {
|
||||||
@ -24,16 +28,30 @@ func TestReadCloserRead(t *testing.T) {
|
|||||||
|
|
||||||
n, err := rc.Read(b)
|
n, err := rc.Read(b)
|
||||||
|
|
||||||
assert.Nil(t, err)
|
if err != nil {
|
||||||
assert.Equal(t, n, 2)
|
t.Errorf("expect nil, got %v", err)
|
||||||
assert.False(t, rc.Closed)
|
}
|
||||||
assert.Equal(t, rc.Size, 3)
|
if e, a := n, 2; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if rc.Closed {
|
||||||
|
t.Errorf("expect not to be closed")
|
||||||
|
}
|
||||||
|
if e, a := rc.Size, 3; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
err = rc.Close()
|
err = rc.Close()
|
||||||
assert.Nil(t, err)
|
if err != nil {
|
||||||
|
t.Errorf("expect nil, got %v", err)
|
||||||
|
}
|
||||||
n, err = rc.Read(b)
|
n, err = rc.Read(b)
|
||||||
assert.Equal(t, err, io.EOF)
|
if e, a := err, io.EOF; e != a {
|
||||||
assert.Equal(t, n, 0)
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := n, 0; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReadCloserReadAll(t *testing.T) {
|
func TestReadCloserReadAll(t *testing.T) {
|
||||||
@ -42,8 +60,16 @@ func TestReadCloserReadAll(t *testing.T) {
|
|||||||
|
|
||||||
n, err := rc.Read(b)
|
n, err := rc.Read(b)
|
||||||
|
|
||||||
assert.Equal(t, err, io.EOF)
|
if e, a := err, io.EOF; e != a {
|
||||||
assert.Equal(t, n, 5)
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
assert.False(t, rc.Closed)
|
}
|
||||||
assert.Equal(t, rc.Size, 0)
|
if e, a := n, 5; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if rc.Closed {
|
||||||
|
t.Errorf("expect not to be closed")
|
||||||
|
}
|
||||||
|
if e, a := rc.Size, 0; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
21
vendor/github.com/aws/aws-sdk-go/buildspec.yml
generated
vendored
Normal file
21
vendor/github.com/aws/aws-sdk-go/buildspec.yml
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
version: 0.2
|
||||||
|
|
||||||
|
phases:
|
||||||
|
build:
|
||||||
|
commands:
|
||||||
|
- echo Build started on `date`
|
||||||
|
- export GOPATH=/go
|
||||||
|
- export SDK_CB_ROOT=`pwd`
|
||||||
|
- export SDK_GO_ROOT=/go/src/github.com/aws/aws-sdk-go
|
||||||
|
- mkdir -p /go/src/github.com/aws
|
||||||
|
- ln -s $SDK_CB_ROOT $SDK_GO_ROOT
|
||||||
|
- cd $SDK_GO_ROOT
|
||||||
|
- make unit
|
||||||
|
- cd $SDK_CB_ROOT
|
||||||
|
- #echo Compiling the Go code...
|
||||||
|
post_build:
|
||||||
|
commands:
|
||||||
|
- echo Build completed on `date`
|
||||||
|
#artifacts:
|
||||||
|
# files:
|
||||||
|
# - hello
|
102
vendor/github.com/aws/aws-sdk-go/example/service/dynamodb/expression/readme.md
generated
vendored
Normal file
102
vendor/github.com/aws/aws-sdk-go/example/service/dynamodb/expression/readme.md
generated
vendored
Normal file
@ -0,0 +1,102 @@
|
|||||||
|
# Example
|
||||||
|
|
||||||
|
`scan` is an example how to use Amazon DynamoDB's `expression` package to fill
|
||||||
|
the member fields of Amazon DynamoDB's Operation input types.
|
||||||
|
|
||||||
|
## Representing DynamoDB Expressions
|
||||||
|
|
||||||
|
In the example, the variable `filt` represents a `FilterExpression`. Note that
|
||||||
|
DynamoDB item attributes are represented using the function `Name()` and
|
||||||
|
DynamoDB item values are similarly represented using the function `Value()`. In
|
||||||
|
this context, the string `"Artist"` represents the name of the item attribute
|
||||||
|
that we want to evaluate and the string `"No One You Know"` represents the value
|
||||||
|
we want to evaluate the item attribute against. The relationship between the two
|
||||||
|
[operands](http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.OperatorsAndFunctions.html#Expressions.OperatorsAndFunctions.Syntax)
|
||||||
|
are specified using the method `Equal()`.
|
||||||
|
|
||||||
|
Similarly, the variable `proj` represents a `ProjectionExpression`. The list of
|
||||||
|
item attribute names comprising the `ProjectionExpression` are specified as
|
||||||
|
arguments to the function `NamesList()`. The `expression` package utilizes the
|
||||||
|
type safety of Go and if an item value were to be used as an argument to the
|
||||||
|
function `NamesList()`, a compile time error is returned. The pattern of
|
||||||
|
representing DynamoDB Expressions by indicating relationships between `operands`
|
||||||
|
with functions is consistent throughout the whole `expression` package.
|
||||||
|
|
||||||
|
```go
|
||||||
|
filt := expression.Name("Artist").Equal(expression.Value("No One You Know"))
|
||||||
|
// let :a be an ExpressionAttributeValue representing the string "No One You Know"
|
||||||
|
// equivalent FilterExpression: "Artist = :a"
|
||||||
|
|
||||||
|
proj := expression.NamesList(expression.Name("SongTitle"), expression.Name("AlbumTitle"))
|
||||||
|
// equivalent ProjectionExpression: "SongTitle, AlbumTitle"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Creating an `Expression`
|
||||||
|
|
||||||
|
In the example, the variable `expr` is an instance of an `Expression` type. An
|
||||||
|
`Expression` is built using a builder pattern. First, a new `Builder` is
|
||||||
|
initialized by the `NewBuilder()` function. Then, types representing DynamoDB
|
||||||
|
Expressions are added to the `Builder` by methods `WithFilter()` and
|
||||||
|
`WithProjection()`. The `Build()` method returns an instance of an `Expression`
|
||||||
|
and an error. The error will be either an `InvalidParameterError` or an
|
||||||
|
`UnsetParameterError`.
|
||||||
|
|
||||||
|
```go
|
||||||
|
filt := expression.Name("Artist").Equal(expression.Value("No One You Know"))
|
||||||
|
proj := expression.NamesList(expression.Name("SongTitle"), expression.Name("AlbumTitle"))
|
||||||
|
|
||||||
|
expr, err := expression.NewBuilder().WithFilter(filt).WithProjection(proj).Build()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Filling in the fields of a DynamoDB `Scan` API
|
||||||
|
|
||||||
|
In the example, the getter methods of the `Expression` type is used to get the
|
||||||
|
formatted DynamoDB Expression strings. The `ExpressionAttributeNames` and
|
||||||
|
`ExpressionAttributeValues` member field of the DynamoDB API must always be
|
||||||
|
assigned when using an `Expression` since all item attribute names and values
|
||||||
|
are aliased. That means that if the `ExpressionAttributeNames` and
|
||||||
|
`ExpressionAttributeValues` member is not assigned with the corresponding
|
||||||
|
`Names()` and `Values()` methods, the DynamoDB operation will run into a logic
|
||||||
|
error.
|
||||||
|
|
||||||
|
```go
|
||||||
|
filt := expression.Name("Artist").Equal(expression.Value("No One You Know"))
|
||||||
|
proj := expression.NamesList(expression.Name("SongTitle"), expression.Name("AlbumTitle"))
|
||||||
|
expr, err := expression.NewBuilder().WithFilter(filt).WithProjection(proj).Build()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
input := &dynamodb.ScanInput{
|
||||||
|
ExpressionAttributeNames: expr.Names(),
|
||||||
|
ExpressionAttributeValues: expr.Values(),
|
||||||
|
FilterExpression: expr.Filter(),
|
||||||
|
ProjectionExpression: expr.Projection(),
|
||||||
|
TableName: aws.String("Music"),
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
`go run -tags example scan.go -table "<table_name>" -region "<optional_region>"`
|
||||||
|
|
||||||
|
## Output
|
||||||
|
|
||||||
|
```
|
||||||
|
{
|
||||||
|
Count: #SomeNumber,
|
||||||
|
Items: [{
|
||||||
|
AlbumTitle: {
|
||||||
|
#SomeAlbumTitle
|
||||||
|
},
|
||||||
|
SongTitle: {
|
||||||
|
#SomeSongTitle
|
||||||
|
}
|
||||||
|
}],
|
||||||
|
...
|
||||||
|
ScannedCount: #SomeNumber,
|
||||||
|
}
|
||||||
|
```
|
88
vendor/github.com/aws/aws-sdk-go/example/service/dynamodb/expression/scan.go
generated
vendored
Normal file
88
vendor/github.com/aws/aws-sdk-go/example/service/dynamodb/expression/scan.go
generated
vendored
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
// +build example
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
|
"github.com/aws/aws-sdk-go/service/dynamodb"
|
||||||
|
"github.com/aws/aws-sdk-go/service/dynamodb/expression"
|
||||||
|
)
|
||||||
|
|
||||||
|
func exitWithError(err error) {
|
||||||
|
fmt.Fprintln(os.Stderr, err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
cfg := Config{}
|
||||||
|
if err := cfg.Load(); err != nil {
|
||||||
|
exitWithError(fmt.Errorf("failed to load config, %v", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the config specifying the Region for the DynamoDB table.
|
||||||
|
// If Config.Region is not set the region must come from the shared
|
||||||
|
// config or AWS_REGION environment variable.
|
||||||
|
awscfg := &aws.Config{}
|
||||||
|
if len(cfg.Region) > 0 {
|
||||||
|
awscfg.WithRegion(cfg.Region)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the session that the DynamoDB service will use.
|
||||||
|
sess := session.Must(session.NewSession(awscfg))
|
||||||
|
|
||||||
|
// Create the DynamoDB service client to make the query request with.
|
||||||
|
svc := dynamodb.New(sess)
|
||||||
|
|
||||||
|
// Create the Expression to fill the input struct with.
|
||||||
|
filt := expression.Name("Artist").Equal(expression.Value("No One You Know"))
|
||||||
|
proj := expression.NamesList(expression.Name("SongTitle"), expression.Name("AlbumTitle"))
|
||||||
|
expr, err := expression.NewBuilder().WithFilter(filt).WithProjection(proj).Build()
|
||||||
|
if err != nil {
|
||||||
|
exitWithError(fmt.Errorf("failed to create the Expression, %v", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build the query input parameters
|
||||||
|
params := &dynamodb.ScanInput{
|
||||||
|
ExpressionAttributeNames: expr.Names(),
|
||||||
|
ExpressionAttributeValues: expr.Values(),
|
||||||
|
FilterExpression: expr.Filter(),
|
||||||
|
ProjectionExpression: expr.Projection(),
|
||||||
|
TableName: aws.String(cfg.Table),
|
||||||
|
}
|
||||||
|
if cfg.Limit > 0 {
|
||||||
|
params.Limit = aws.Int64(cfg.Limit)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make the DynamoDB Query API call
|
||||||
|
result, err := svc.Scan(params)
|
||||||
|
if err != nil {
|
||||||
|
exitWithError(fmt.Errorf("failed to make Query API call, %v", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
Table string // required
|
||||||
|
Region string // optional
|
||||||
|
Limit int64 // optional
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Config) Load() error {
|
||||||
|
flag.Int64Var(&c.Limit, "limit", 0, "Limit is the max items to be returned, 0 is no limit")
|
||||||
|
flag.StringVar(&c.Table, "table", "", "Table to Query on")
|
||||||
|
flag.StringVar(&c.Region, "region", "", "AWS Region the table is in")
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
if len(c.Table) == 0 {
|
||||||
|
flag.PrintDefaults()
|
||||||
|
return fmt.Errorf("table name is required.")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
1
vendor/github.com/aws/aws-sdk-go/models/apis/AWS
generated
vendored
1
vendor/github.com/aws/aws-sdk-go/models/apis/AWS
generated
vendored
@ -1 +0,0 @@
|
|||||||
|
|
280
vendor/github.com/aws/aws-sdk-go/models/apis/appstream/2016-12-01/api-2.json
generated
vendored
280
vendor/github.com/aws/aws-sdk-go/models/apis/appstream/2016-12-01/api-2.json
generated
vendored
@ -60,6 +60,38 @@
|
|||||||
{"shape":"IncompatibleImageException"}
|
{"shape":"IncompatibleImageException"}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"CreateImageBuilder":{
|
||||||
|
"name":"CreateImageBuilder",
|
||||||
|
"http":{
|
||||||
|
"method":"POST",
|
||||||
|
"requestUri":"/"
|
||||||
|
},
|
||||||
|
"input":{"shape":"CreateImageBuilderRequest"},
|
||||||
|
"output":{"shape":"CreateImageBuilderResult"},
|
||||||
|
"errors":[
|
||||||
|
{"shape":"LimitExceededException"},
|
||||||
|
{"shape":"ResourceAlreadyExistsException"},
|
||||||
|
{"shape":"ResourceNotAvailableException"},
|
||||||
|
{"shape":"ResourceNotFoundException"},
|
||||||
|
{"shape":"InvalidRoleException"},
|
||||||
|
{"shape":"ConcurrentModificationException"},
|
||||||
|
{"shape":"InvalidParameterCombinationException"},
|
||||||
|
{"shape":"IncompatibleImageException"}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"CreateImageBuilderStreamingURL":{
|
||||||
|
"name":"CreateImageBuilderStreamingURL",
|
||||||
|
"http":{
|
||||||
|
"method":"POST",
|
||||||
|
"requestUri":"/"
|
||||||
|
},
|
||||||
|
"input":{"shape":"CreateImageBuilderStreamingURLRequest"},
|
||||||
|
"output":{"shape":"CreateImageBuilderStreamingURLResult"},
|
||||||
|
"errors":[
|
||||||
|
{"shape":"OperationNotPermittedException"},
|
||||||
|
{"shape":"ResourceNotFoundException"}
|
||||||
|
]
|
||||||
|
},
|
||||||
"CreateStack":{
|
"CreateStack":{
|
||||||
"name":"CreateStack",
|
"name":"CreateStack",
|
||||||
"http":{
|
"http":{
|
||||||
@ -119,6 +151,35 @@
|
|||||||
{"shape":"ConcurrentModificationException"}
|
{"shape":"ConcurrentModificationException"}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"DeleteImage":{
|
||||||
|
"name":"DeleteImage",
|
||||||
|
"http":{
|
||||||
|
"method":"POST",
|
||||||
|
"requestUri":"/"
|
||||||
|
},
|
||||||
|
"input":{"shape":"DeleteImageRequest"},
|
||||||
|
"output":{"shape":"DeleteImageResult"},
|
||||||
|
"errors":[
|
||||||
|
{"shape":"ResourceInUseException"},
|
||||||
|
{"shape":"ResourceNotFoundException"},
|
||||||
|
{"shape":"OperationNotPermittedException"},
|
||||||
|
{"shape":"ConcurrentModificationException"}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"DeleteImageBuilder":{
|
||||||
|
"name":"DeleteImageBuilder",
|
||||||
|
"http":{
|
||||||
|
"method":"POST",
|
||||||
|
"requestUri":"/"
|
||||||
|
},
|
||||||
|
"input":{"shape":"DeleteImageBuilderRequest"},
|
||||||
|
"output":{"shape":"DeleteImageBuilderResult"},
|
||||||
|
"errors":[
|
||||||
|
{"shape":"ResourceNotFoundException"},
|
||||||
|
{"shape":"OperationNotPermittedException"},
|
||||||
|
{"shape":"ConcurrentModificationException"}
|
||||||
|
]
|
||||||
|
},
|
||||||
"DeleteStack":{
|
"DeleteStack":{
|
||||||
"name":"DeleteStack",
|
"name":"DeleteStack",
|
||||||
"http":{
|
"http":{
|
||||||
@ -157,6 +218,18 @@
|
|||||||
{"shape":"ResourceNotFoundException"}
|
{"shape":"ResourceNotFoundException"}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"DescribeImageBuilders":{
|
||||||
|
"name":"DescribeImageBuilders",
|
||||||
|
"http":{
|
||||||
|
"method":"POST",
|
||||||
|
"requestUri":"/"
|
||||||
|
},
|
||||||
|
"input":{"shape":"DescribeImageBuildersRequest"},
|
||||||
|
"output":{"shape":"DescribeImageBuildersResult"},
|
||||||
|
"errors":[
|
||||||
|
{"shape":"ResourceNotFoundException"}
|
||||||
|
]
|
||||||
|
},
|
||||||
"DescribeImages":{
|
"DescribeImages":{
|
||||||
"name":"DescribeImages",
|
"name":"DescribeImages",
|
||||||
"http":{
|
"http":{
|
||||||
@ -249,6 +322,20 @@
|
|||||||
{"shape":"ConcurrentModificationException"}
|
{"shape":"ConcurrentModificationException"}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"StartImageBuilder":{
|
||||||
|
"name":"StartImageBuilder",
|
||||||
|
"http":{
|
||||||
|
"method":"POST",
|
||||||
|
"requestUri":"/"
|
||||||
|
},
|
||||||
|
"input":{"shape":"StartImageBuilderRequest"},
|
||||||
|
"output":{"shape":"StartImageBuilderResult"},
|
||||||
|
"errors":[
|
||||||
|
{"shape":"ResourceNotAvailableException"},
|
||||||
|
{"shape":"ResourceNotFoundException"},
|
||||||
|
{"shape":"ConcurrentModificationException"}
|
||||||
|
]
|
||||||
|
},
|
||||||
"StopFleet":{
|
"StopFleet":{
|
||||||
"name":"StopFleet",
|
"name":"StopFleet",
|
||||||
"http":{
|
"http":{
|
||||||
@ -262,6 +349,20 @@
|
|||||||
{"shape":"ConcurrentModificationException"}
|
{"shape":"ConcurrentModificationException"}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"StopImageBuilder":{
|
||||||
|
"name":"StopImageBuilder",
|
||||||
|
"http":{
|
||||||
|
"method":"POST",
|
||||||
|
"requestUri":"/"
|
||||||
|
},
|
||||||
|
"input":{"shape":"StopImageBuilderRequest"},
|
||||||
|
"output":{"shape":"StopImageBuilderResult"},
|
||||||
|
"errors":[
|
||||||
|
{"shape":"ResourceNotFoundException"},
|
||||||
|
{"shape":"OperationNotPermittedException"},
|
||||||
|
{"shape":"ConcurrentModificationException"}
|
||||||
|
]
|
||||||
|
},
|
||||||
"UpdateDirectoryConfig":{
|
"UpdateDirectoryConfig":{
|
||||||
"name":"UpdateDirectoryConfig",
|
"name":"UpdateDirectoryConfig",
|
||||||
"http":{
|
"http":{
|
||||||
@ -427,6 +528,7 @@
|
|||||||
"Name":{"shape":"Name"},
|
"Name":{"shape":"Name"},
|
||||||
"ImageName":{"shape":"String"},
|
"ImageName":{"shape":"String"},
|
||||||
"InstanceType":{"shape":"String"},
|
"InstanceType":{"shape":"String"},
|
||||||
|
"FleetType":{"shape":"FleetType"},
|
||||||
"ComputeCapacity":{"shape":"ComputeCapacity"},
|
"ComputeCapacity":{"shape":"ComputeCapacity"},
|
||||||
"VpcConfig":{"shape":"VpcConfig"},
|
"VpcConfig":{"shape":"VpcConfig"},
|
||||||
"MaxUserDurationInSeconds":{"shape":"Integer"},
|
"MaxUserDurationInSeconds":{"shape":"Integer"},
|
||||||
@ -443,6 +545,45 @@
|
|||||||
"Fleet":{"shape":"Fleet"}
|
"Fleet":{"shape":"Fleet"}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"CreateImageBuilderRequest":{
|
||||||
|
"type":"structure",
|
||||||
|
"required":[
|
||||||
|
"Name",
|
||||||
|
"ImageName",
|
||||||
|
"InstanceType"
|
||||||
|
],
|
||||||
|
"members":{
|
||||||
|
"Name":{"shape":"Name"},
|
||||||
|
"ImageName":{"shape":"String"},
|
||||||
|
"InstanceType":{"shape":"String"},
|
||||||
|
"Description":{"shape":"Description"},
|
||||||
|
"DisplayName":{"shape":"DisplayName"},
|
||||||
|
"VpcConfig":{"shape":"VpcConfig"},
|
||||||
|
"EnableDefaultInternetAccess":{"shape":"BooleanObject"},
|
||||||
|
"DomainJoinInfo":{"shape":"DomainJoinInfo"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"CreateImageBuilderResult":{
|
||||||
|
"type":"structure",
|
||||||
|
"members":{
|
||||||
|
"ImageBuilder":{"shape":"ImageBuilder"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"CreateImageBuilderStreamingURLRequest":{
|
||||||
|
"type":"structure",
|
||||||
|
"required":["Name"],
|
||||||
|
"members":{
|
||||||
|
"Name":{"shape":"String"},
|
||||||
|
"Validity":{"shape":"Long"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"CreateImageBuilderStreamingURLResult":{
|
||||||
|
"type":"structure",
|
||||||
|
"members":{
|
||||||
|
"StreamingURL":{"shape":"String"},
|
||||||
|
"Expires":{"shape":"Timestamp"}
|
||||||
|
}
|
||||||
|
},
|
||||||
"CreateStackRequest":{
|
"CreateStackRequest":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
"required":["Name"],
|
"required":["Name"],
|
||||||
@ -506,6 +647,32 @@
|
|||||||
"members":{
|
"members":{
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"DeleteImageBuilderRequest":{
|
||||||
|
"type":"structure",
|
||||||
|
"required":["Name"],
|
||||||
|
"members":{
|
||||||
|
"Name":{"shape":"Name"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"DeleteImageBuilderResult":{
|
||||||
|
"type":"structure",
|
||||||
|
"members":{
|
||||||
|
"ImageBuilder":{"shape":"ImageBuilder"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"DeleteImageRequest":{
|
||||||
|
"type":"structure",
|
||||||
|
"required":["Name"],
|
||||||
|
"members":{
|
||||||
|
"Name":{"shape":"Name"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"DeleteImageResult":{
|
||||||
|
"type":"structure",
|
||||||
|
"members":{
|
||||||
|
"Image":{"shape":"Image"}
|
||||||
|
}
|
||||||
|
},
|
||||||
"DeleteStackRequest":{
|
"DeleteStackRequest":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
"required":["Name"],
|
"required":["Name"],
|
||||||
@ -547,6 +714,21 @@
|
|||||||
"NextToken":{"shape":"String"}
|
"NextToken":{"shape":"String"}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"DescribeImageBuildersRequest":{
|
||||||
|
"type":"structure",
|
||||||
|
"members":{
|
||||||
|
"Names":{"shape":"StringList"},
|
||||||
|
"MaxResults":{"shape":"Integer"},
|
||||||
|
"NextToken":{"shape":"String"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"DescribeImageBuildersResult":{
|
||||||
|
"type":"structure",
|
||||||
|
"members":{
|
||||||
|
"ImageBuilders":{"shape":"ImageBuilderList"},
|
||||||
|
"NextToken":{"shape":"String"}
|
||||||
|
}
|
||||||
|
},
|
||||||
"DescribeImagesRequest":{
|
"DescribeImagesRequest":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
"members":{
|
"members":{
|
||||||
@ -675,6 +857,7 @@
|
|||||||
"Description":{"shape":"String"},
|
"Description":{"shape":"String"},
|
||||||
"ImageName":{"shape":"String"},
|
"ImageName":{"shape":"String"},
|
||||||
"InstanceType":{"shape":"String"},
|
"InstanceType":{"shape":"String"},
|
||||||
|
"FleetType":{"shape":"FleetType"},
|
||||||
"ComputeCapacityStatus":{"shape":"ComputeCapacityStatus"},
|
"ComputeCapacityStatus":{"shape":"ComputeCapacityStatus"},
|
||||||
"MaxUserDurationInSeconds":{"shape":"Integer"},
|
"MaxUserDurationInSeconds":{"shape":"Integer"},
|
||||||
"DisconnectTimeoutInSeconds":{"shape":"Integer"},
|
"DisconnectTimeoutInSeconds":{"shape":"Integer"},
|
||||||
@ -720,6 +903,7 @@
|
|||||||
"IMAGE_NOT_FOUND",
|
"IMAGE_NOT_FOUND",
|
||||||
"INVALID_SUBNET_CONFIGURATION",
|
"INVALID_SUBNET_CONFIGURATION",
|
||||||
"SECURITY_GROUPS_NOT_FOUND",
|
"SECURITY_GROUPS_NOT_FOUND",
|
||||||
|
"IGW_NOT_ATTACHED",
|
||||||
"IAM_SERVICE_ROLE_MISSING_DESCRIBE_SECURITY_GROUPS_ACTION",
|
"IAM_SERVICE_ROLE_MISSING_DESCRIBE_SECURITY_GROUPS_ACTION",
|
||||||
"DOMAIN_JOIN_ERROR_FILE_NOT_FOUND",
|
"DOMAIN_JOIN_ERROR_FILE_NOT_FOUND",
|
||||||
"DOMAIN_JOIN_ERROR_ACCESS_DENIED",
|
"DOMAIN_JOIN_ERROR_ACCESS_DENIED",
|
||||||
@ -752,6 +936,13 @@
|
|||||||
"STOPPED"
|
"STOPPED"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"FleetType":{
|
||||||
|
"type":"string",
|
||||||
|
"enum":[
|
||||||
|
"ALWAYS_ON",
|
||||||
|
"ON_DEMAND"
|
||||||
|
]
|
||||||
|
},
|
||||||
"Image":{
|
"Image":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
"required":["Name"],
|
"required":["Name"],
|
||||||
@ -771,6 +962,57 @@
|
|||||||
"PublicBaseImageReleasedDate":{"shape":"Timestamp"}
|
"PublicBaseImageReleasedDate":{"shape":"Timestamp"}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"ImageBuilder":{
|
||||||
|
"type":"structure",
|
||||||
|
"required":["Name"],
|
||||||
|
"members":{
|
||||||
|
"Name":{"shape":"String"},
|
||||||
|
"Arn":{"shape":"Arn"},
|
||||||
|
"ImageArn":{"shape":"Arn"},
|
||||||
|
"Description":{"shape":"String"},
|
||||||
|
"DisplayName":{"shape":"String"},
|
||||||
|
"VpcConfig":{"shape":"VpcConfig"},
|
||||||
|
"InstanceType":{"shape":"String"},
|
||||||
|
"Platform":{"shape":"PlatformType"},
|
||||||
|
"State":{"shape":"ImageBuilderState"},
|
||||||
|
"StateChangeReason":{"shape":"ImageBuilderStateChangeReason"},
|
||||||
|
"CreatedTime":{"shape":"Timestamp"},
|
||||||
|
"EnableDefaultInternetAccess":{"shape":"BooleanObject"},
|
||||||
|
"DomainJoinInfo":{"shape":"DomainJoinInfo"},
|
||||||
|
"ImageBuilderErrors":{"shape":"ResourceErrors"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"ImageBuilderList":{
|
||||||
|
"type":"list",
|
||||||
|
"member":{"shape":"ImageBuilder"}
|
||||||
|
},
|
||||||
|
"ImageBuilderState":{
|
||||||
|
"type":"string",
|
||||||
|
"enum":[
|
||||||
|
"PENDING",
|
||||||
|
"RUNNING",
|
||||||
|
"STOPPING",
|
||||||
|
"STOPPED",
|
||||||
|
"REBOOTING",
|
||||||
|
"SNAPSHOTTING",
|
||||||
|
"DELETING",
|
||||||
|
"FAILED"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"ImageBuilderStateChangeReason":{
|
||||||
|
"type":"structure",
|
||||||
|
"members":{
|
||||||
|
"Code":{"shape":"ImageBuilderStateChangeReasonCode"},
|
||||||
|
"Message":{"shape":"String"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"ImageBuilderStateChangeReasonCode":{
|
||||||
|
"type":"string",
|
||||||
|
"enum":[
|
||||||
|
"INTERNAL_ERROR",
|
||||||
|
"IMAGE_UNAVAILABLE"
|
||||||
|
]
|
||||||
|
},
|
||||||
"ImageList":{
|
"ImageList":{
|
||||||
"type":"list",
|
"type":"list",
|
||||||
"member":{"shape":"Image"}
|
"member":{"shape":"Image"}
|
||||||
@ -893,6 +1135,18 @@
|
|||||||
},
|
},
|
||||||
"exception":true
|
"exception":true
|
||||||
},
|
},
|
||||||
|
"ResourceError":{
|
||||||
|
"type":"structure",
|
||||||
|
"members":{
|
||||||
|
"ErrorCode":{"shape":"FleetErrorCode"},
|
||||||
|
"ErrorMessage":{"shape":"String"},
|
||||||
|
"ErrorTimestamp":{"shape":"Timestamp"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"ResourceErrors":{
|
||||||
|
"type":"list",
|
||||||
|
"member":{"shape":"ResourceError"}
|
||||||
|
},
|
||||||
"ResourceIdentifier":{
|
"ResourceIdentifier":{
|
||||||
"type":"string",
|
"type":"string",
|
||||||
"min":1
|
"min":1
|
||||||
@ -1011,6 +1265,19 @@
|
|||||||
"members":{
|
"members":{
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"StartImageBuilderRequest":{
|
||||||
|
"type":"structure",
|
||||||
|
"required":["Name"],
|
||||||
|
"members":{
|
||||||
|
"Name":{"shape":"String"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"StartImageBuilderResult":{
|
||||||
|
"type":"structure",
|
||||||
|
"members":{
|
||||||
|
"ImageBuilder":{"shape":"ImageBuilder"}
|
||||||
|
}
|
||||||
|
},
|
||||||
"StopFleetRequest":{
|
"StopFleetRequest":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
"required":["Name"],
|
"required":["Name"],
|
||||||
@ -1023,6 +1290,19 @@
|
|||||||
"members":{
|
"members":{
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"StopImageBuilderRequest":{
|
||||||
|
"type":"structure",
|
||||||
|
"required":["Name"],
|
||||||
|
"members":{
|
||||||
|
"Name":{"shape":"String"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"StopImageBuilderResult":{
|
||||||
|
"type":"structure",
|
||||||
|
"members":{
|
||||||
|
"ImageBuilder":{"shape":"ImageBuilder"}
|
||||||
|
}
|
||||||
|
},
|
||||||
"StorageConnector":{
|
"StorageConnector":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
"required":["ConnectorType"],
|
"required":["ConnectorType"],
|
||||||
|
534
vendor/github.com/aws/aws-sdk-go/models/apis/appstream/2016-12-01/docs-2.json
generated
vendored
534
vendor/github.com/aws/aws-sdk-go/models/apis/appstream/2016-12-01/docs-2.json
generated
vendored
@ -1,45 +1,52 @@
|
|||||||
{
|
{
|
||||||
"version": "2.0",
|
"version": "2.0",
|
||||||
"service": "<fullname>Amazon AppStream 2.0</fullname> <p>API documentation for Amazon AppStream 2.0.</p>",
|
"service": "<fullname>Amazon AppStream 2.0</fullname> <p>You can use Amazon AppStream 2.0 to stream desktop applications to any device running a web browser, without rewriting them.</p>",
|
||||||
"operations": {
|
"operations": {
|
||||||
"AssociateFleet": "<p>Associate a fleet to a stack.</p>",
|
"AssociateFleet": "<p>Associates the specified fleet with the specified stack.</p>",
|
||||||
"CreateDirectoryConfig": "<p>Creates a directory configuration with the given parameters.</p>",
|
"CreateDirectoryConfig": "<p>Creates a directory configuration.</p>",
|
||||||
"CreateFleet": "<p>Creates a new fleet.</p>",
|
"CreateFleet": "<p>Creates a fleet.</p>",
|
||||||
"CreateStack": "<p>Create a new stack.</p>",
|
"CreateImageBuilder": null,
|
||||||
"CreateStreamingURL": "<p>Creates a URL to start an AppStream 2.0 streaming session for a user. By default, the URL is valid only for 1 minute from the time that it is generated.</p>",
|
"CreateImageBuilderStreamingURL": null,
|
||||||
"DeleteDirectoryConfig": "<p>Deletes the directory configuration with the given parameters.</p>",
|
"CreateStack": "<p>Creates a stack.</p>",
|
||||||
"DeleteFleet": "<p>Deletes a fleet.</p>",
|
"CreateStreamingURL": "<p>Creates a URL to start a streaming session for the specified user.</p> <p>By default, the URL is valid only for one minute from the time that it is generated.</p>",
|
||||||
"DeleteStack": "<p>Deletes the stack. After this operation completes, the environment can no longer be activated, and any reservations made for the stack are released.</p>",
|
"DeleteDirectoryConfig": "<p>Deletes the specified directory configuration.</p>",
|
||||||
"DescribeDirectoryConfigs": "<p>Returns a list describing the specified directory configurations.</p>",
|
"DeleteFleet": "<p>Deletes the specified fleet.</p>",
|
||||||
"DescribeFleets": "<p>If fleet names are provided, this operation describes the specified fleets; otherwise, all the fleets in the account are described.</p>",
|
"DeleteImage": null,
|
||||||
"DescribeImages": "<p>Describes the images. If a list of names is not provided, all images in your account are returned. This operation does not return a paginated result.</p>",
|
"DeleteImageBuilder": null,
|
||||||
"DescribeSessions": "<p>Describes the streaming sessions for a stack and a fleet. If a user ID is provided, this operation returns streaming sessions for only that user. To retrieve the next set of items, pass this value for the <code>nextToken</code> parameter in a subsequent call to this operation. If an authentication type is not provided, the operation defaults to users authenticated using a streaming URL.</p>",
|
"DeleteStack": "<p>Deletes the specified stack. After this operation completes, the environment can no longer be activated and any reservations made for the stack are released.</p>",
|
||||||
"DescribeStacks": "<p>If stack names are not provided, this operation describes the specified stacks; otherwise, all stacks in the account are described. To retrieve the next set of items, pass the <code>nextToken</code> value in a subsequent call to this operation.</p>",
|
"DescribeDirectoryConfigs": "<p>Describes the specified directory configurations.</p>",
|
||||||
"DisassociateFleet": "<p>Disassociates a fleet from a stack.</p>",
|
"DescribeFleets": "<p>Describes the specified fleets or all fleets in the account.</p>",
|
||||||
"ExpireSession": "<p>This operation immediately stops a streaming session.</p>",
|
"DescribeImageBuilders": null,
|
||||||
"ListAssociatedFleets": "<p>Lists all fleets associated with the stack.</p>",
|
"DescribeImages": "<p>Describes the specified images or all images in the account.</p>",
|
||||||
"ListAssociatedStacks": "<p>Lists all stacks to which the specified fleet is associated.</p>",
|
"DescribeSessions": "<p>Describes the streaming sessions for the specified stack and fleet. If a user ID is provided, only the streaming sessions for only that user are returned. If an authentication type is not provided, the default is to authenticate users using a streaming URL.</p>",
|
||||||
"StartFleet": "<p>Starts a fleet.</p>",
|
"DescribeStacks": "<p>Describes the specified stacks or all stacks in the account.</p>",
|
||||||
"StopFleet": "<p>Stops a fleet.</p>",
|
"DisassociateFleet": "<p>Disassociates the specified fleet from the specified stack.</p>",
|
||||||
"UpdateDirectoryConfig": "<p>Updates the directory configuration with the given parameters.</p>",
|
"ExpireSession": "<p>Stops the specified streaming session.</p>",
|
||||||
"UpdateFleet": "<p>Updates an existing fleet. All the attributes except the fleet name can be updated in the <b>STOPPED</b> state. When a fleet is in the <b>RUNNING</b> state, only <code>DisplayName</code> and <code>ComputeCapacity</code> can be updated. A fleet cannot be updated in a status of <b>STARTING</b> or <b>STOPPING</b>.</p>",
|
"ListAssociatedFleets": "<p>Lists the fleets associated with the specified stack.</p>",
|
||||||
"UpdateStack": "<p>Updates the specified fields in the stack with the specified name.</p>"
|
"ListAssociatedStacks": "<p>Lists the stacks associated with the specified fleet.</p>",
|
||||||
|
"StartFleet": "<p>Starts the specified fleet.</p>",
|
||||||
|
"StartImageBuilder": null,
|
||||||
|
"StopFleet": "<p>Stops the specified fleet.</p>",
|
||||||
|
"StopImageBuilder": null,
|
||||||
|
"UpdateDirectoryConfig": "<p>Updates the specified directory configuration.</p>",
|
||||||
|
"UpdateFleet": "<p>Updates the specified fleet.</p> <p>If the fleet is in the <code>STOPPED</code> state, you can update any attribute except the fleet name. If the fleet is in the <code>RUNNING</code> state, you can update the <code>DisplayName</code> and <code>ComputeCapacity</code> attributes. If the fleet is in the <code>STARTING</code> or <code>STOPPING</code> state, you can't update it.</p>",
|
||||||
|
"UpdateStack": "<p>Updates the specified stack.</p>"
|
||||||
},
|
},
|
||||||
"shapes": {
|
"shapes": {
|
||||||
"AccountName": {
|
"AccountName": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"ServiceAccountCredentials$AccountName": "<p>The user name of an account in the directory that is used by AppStream 2.0 streaming instances to connect to the directory. This account must have the following privileges: create computer objects, join computers to the domain, change/reset the password on descendant computer objects for the organizational units specified.</p>"
|
"ServiceAccountCredentials$AccountName": "<p>The user name of the account. This account must have the following privileges: create computer objects, join computers to the domain, and change/reset the password on descendant computer objects for the organizational units specified.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"AccountPassword": {
|
"AccountPassword": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"ServiceAccountCredentials$AccountPassword": "<p>The password for the user account for directory actions.</p>"
|
"ServiceAccountCredentials$AccountPassword": "<p>The password for the account.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"Application": {
|
"Application": {
|
||||||
"base": "<p>An entry for a single application in the application catalog.</p>",
|
"base": "<p>Describes an application in the application catalog.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
"Applications$member": null
|
"Applications$member": null
|
||||||
}
|
}
|
||||||
@ -47,15 +54,17 @@
|
|||||||
"Applications": {
|
"Applications": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"Image$Applications": "<p>The applications associated with an image.</p>"
|
"Image$Applications": "<p>The applications associated with the image.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"Arn": {
|
"Arn": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"Fleet$Arn": "<p>The ARN for the fleet.</p>",
|
"Fleet$Arn": "<p>The ARN for the fleet.</p>",
|
||||||
"Image$Arn": "<p>The ARN for the image.</p>",
|
"Image$Arn": "<p>The ARN of the image.</p>",
|
||||||
"Image$BaseImageArn": "<p>The source image ARN from which this image was created.</p>",
|
"Image$BaseImageArn": "<p>The ARN of the image from which this image was created.</p>",
|
||||||
|
"ImageBuilder$Arn": null,
|
||||||
|
"ImageBuilder$ImageArn": null,
|
||||||
"Stack$Arn": "<p>The ARN of the stack.</p>"
|
"Stack$Arn": "<p>The ARN of the stack.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -72,38 +81,40 @@
|
|||||||
"AuthenticationType": {
|
"AuthenticationType": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"DescribeSessionsRequest$AuthenticationType": "<p>The authentication method of the user. It can be <code>API</code> for a user authenticated using a streaming URL, or <code>SAML</code> for a SAML federated user. If an authentication type is not provided, the operation defaults to users authenticated using a streaming URL.</p>",
|
"DescribeSessionsRequest$AuthenticationType": "<p>The authentication method. Specify <code>API</code> for a user authenticated using a streaming URL or <code>SAML</code> for a SAML federated user. The default is to authenticate users using a streaming URL.</p>",
|
||||||
"Session$AuthenticationType": "<p>The authentication method of the user for whom the session was created. It can be <code>API</code> for a user authenticated using a streaming URL or <code>SAML</code> for a SAML federated user.</p>"
|
"Session$AuthenticationType": "<p>The authentication method. The user is authenticated using a streaming URL (<code>API</code>) or SAML federation (<code>SAML</code>).</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"Boolean": {
|
"Boolean": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"Application$Enabled": "<p>If there is a problem, an application can be disabled after image creation.</p>",
|
"Application$Enabled": "<p>If there is a problem, the application can be disabled after image creation.</p>",
|
||||||
"Image$ImageBuilderSupported": "<p>Whether an image builder can be launched from this image.</p>",
|
"Image$ImageBuilderSupported": "<p>Indicates whether an image builder can be launched from this image.</p>",
|
||||||
"UpdateFleetRequest$DeleteVpcConfig": "<p>Delete the VPC association for the specified fleet.</p>",
|
"UpdateFleetRequest$DeleteVpcConfig": "<p>Deletes the VPC association for the specified fleet.</p>",
|
||||||
"UpdateStackRequest$DeleteStorageConnectors": "<p>Remove all the storage connectors currently enabled for the stack.</p>"
|
"UpdateStackRequest$DeleteStorageConnectors": "<p>Deletes the storage connectors currently enabled for the stack.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"BooleanObject": {
|
"BooleanObject": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"CreateFleetRequest$EnableDefaultInternetAccess": "<p>Enables or disables default internet access for the fleet.</p>",
|
"CreateFleetRequest$EnableDefaultInternetAccess": "<p>Enables or disables default internet access for the fleet.</p>",
|
||||||
"Fleet$EnableDefaultInternetAccess": "<p>Whether default internet access is enabled for the fleet. </p>",
|
"CreateImageBuilderRequest$EnableDefaultInternetAccess": null,
|
||||||
|
"Fleet$EnableDefaultInternetAccess": "<p>Indicates whether default internet access is enabled for the fleet.</p>",
|
||||||
|
"ImageBuilder$EnableDefaultInternetAccess": null,
|
||||||
"UpdateFleetRequest$EnableDefaultInternetAccess": "<p>Enables or disables default internet access for the fleet.</p>"
|
"UpdateFleetRequest$EnableDefaultInternetAccess": "<p>Enables or disables default internet access for the fleet.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"ComputeCapacity": {
|
"ComputeCapacity": {
|
||||||
"base": "<p>The capacity configuration for the fleet.</p>",
|
"base": "<p>Describes the capacity for a fleet.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
"CreateFleetRequest$ComputeCapacity": "<p>The parameters for the capacity allocated to the fleet.</p>",
|
"CreateFleetRequest$ComputeCapacity": "<p>The desired capacity for the fleet.</p>",
|
||||||
"UpdateFleetRequest$ComputeCapacity": "<p>The parameters for the capacity allocated to the fleet. </p>"
|
"UpdateFleetRequest$ComputeCapacity": "<p>The desired capacity for the fleet.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"ComputeCapacityStatus": {
|
"ComputeCapacityStatus": {
|
||||||
"base": "<p>The capacity information for the fleet.</p>",
|
"base": "<p>Describes the capacity status for a fleet.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
"Fleet$ComputeCapacityStatus": "<p>The capacity information for the fleet.</p>"
|
"Fleet$ComputeCapacityStatus": "<p>The capacity status for the fleet.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"ConcurrentModificationException": {
|
"ConcurrentModificationException": {
|
||||||
@ -122,7 +133,7 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"CreateFleetRequest": {
|
"CreateFleetRequest": {
|
||||||
"base": "<p>Contains the parameters for the new fleet to create.</p>",
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -131,6 +142,26 @@
|
|||||||
"refs": {
|
"refs": {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"CreateImageBuilderRequest": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"CreateImageBuilderResult": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"CreateImageBuilderStreamingURLRequest": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"CreateImageBuilderStreamingURLResult": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
}
|
||||||
|
},
|
||||||
"CreateStackRequest": {
|
"CreateStackRequest": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
@ -171,6 +202,26 @@
|
|||||||
"refs": {
|
"refs": {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"DeleteImageBuilderRequest": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"DeleteImageBuilderResult": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"DeleteImageRequest": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"DeleteImageResult": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
}
|
||||||
|
},
|
||||||
"DeleteStackRequest": {
|
"DeleteStackRequest": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
@ -201,6 +252,16 @@
|
|||||||
"refs": {
|
"refs": {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"DescribeImageBuildersRequest": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"DescribeImageBuildersResult": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
}
|
||||||
|
},
|
||||||
"DescribeImagesRequest": {
|
"DescribeImagesRequest": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
@ -234,41 +295,42 @@
|
|||||||
"Description": {
|
"Description": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"CreateFleetRequest$Description": "<p>The description of the fleet.</p>",
|
"CreateFleetRequest$Description": "<p>The description displayed to end users.</p>",
|
||||||
"CreateStackRequest$Description": "<p>The description displayed to end users on the AppStream 2.0 portal.</p>",
|
"CreateImageBuilderRequest$Description": null,
|
||||||
"UpdateFleetRequest$Description": "<p>The description displayed to end users on the AppStream 2.0 portal.</p>",
|
"CreateStackRequest$Description": "<p>The description displayed to end users.</p>",
|
||||||
"UpdateStackRequest$Description": "<p>The description displayed to end users on the AppStream 2.0 portal.</p>"
|
"UpdateFleetRequest$Description": "<p>The description displayed to end users.</p>",
|
||||||
|
"UpdateStackRequest$Description": "<p>The description displayed to end users.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"DirectoryConfig": {
|
"DirectoryConfig": {
|
||||||
"base": "<p>Full directory configuration details, which are used to join domains for the AppStream 2.0 streaming instances.</p>",
|
"base": "<p>Configuration information for the directory used to join domains.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
"CreateDirectoryConfigResult$DirectoryConfig": "<p>Directory configuration details.</p>",
|
"CreateDirectoryConfigResult$DirectoryConfig": "<p>Information about the directory configuration.</p>",
|
||||||
"DirectoryConfigList$member": null,
|
"DirectoryConfigList$member": null,
|
||||||
"UpdateDirectoryConfigResult$DirectoryConfig": "<p>The updated directory configuration details.</p>"
|
"UpdateDirectoryConfigResult$DirectoryConfig": "<p>Information about the directory configuration.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"DirectoryConfigList": {
|
"DirectoryConfigList": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"DescribeDirectoryConfigsResult$DirectoryConfigs": "<p>The list of directory configurations.</p>"
|
"DescribeDirectoryConfigsResult$DirectoryConfigs": "<p>Information about the directory configurations.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"DirectoryName": {
|
"DirectoryName": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"CreateDirectoryConfigRequest$DirectoryName": "<p>The fully qualified name of the directory, such as corp.example.com</p>",
|
"CreateDirectoryConfigRequest$DirectoryName": "<p>The fully qualified name of the directory (for example, corp.example.com).</p>",
|
||||||
"DeleteDirectoryConfigRequest$DirectoryName": "<p>The name of the directory configuration to be deleted.</p>",
|
"DeleteDirectoryConfigRequest$DirectoryName": "<p>The name of the directory configuration.</p>",
|
||||||
"DirectoryConfig$DirectoryName": "<p>The fully qualified name of the directory, such as corp.example.com</p>",
|
"DirectoryConfig$DirectoryName": "<p>The fully qualified name of the directory (for example, corp.example.com).</p>",
|
||||||
"DirectoryNameList$member": null,
|
"DirectoryNameList$member": null,
|
||||||
"DomainJoinInfo$DirectoryName": "<p>The fully qualified name of the directory, such as corp.example.com</p>",
|
"DomainJoinInfo$DirectoryName": "<p>The fully qualified name of the directory (for example, corp.example.com).</p>",
|
||||||
"UpdateDirectoryConfigRequest$DirectoryName": "<p>The name of the existing directory configuration to be updated.</p>"
|
"UpdateDirectoryConfigRequest$DirectoryName": "<p>The name of the directory configuration.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"DirectoryNameList": {
|
"DirectoryNameList": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"DescribeDirectoryConfigsRequest$DirectoryNames": "<p>A specific list of directory names.</p>"
|
"DescribeDirectoryConfigsRequest$DirectoryNames": "<p>The directory names.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"DisassociateFleetRequest": {
|
"DisassociateFleetRequest": {
|
||||||
@ -284,18 +346,21 @@
|
|||||||
"DisplayName": {
|
"DisplayName": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"CreateFleetRequest$DisplayName": "<p>The display name of the fleet.</p>",
|
"CreateFleetRequest$DisplayName": "<p>The fleet name displayed to end users.</p>",
|
||||||
"CreateStackRequest$DisplayName": "<p>The name displayed to end users on the AppStream 2.0 portal.</p>",
|
"CreateImageBuilderRequest$DisplayName": null,
|
||||||
"UpdateFleetRequest$DisplayName": "<p>The name displayed to end users on the AppStream 2.0 portal.</p>",
|
"CreateStackRequest$DisplayName": "<p>The stack name displayed to end users.</p>",
|
||||||
"UpdateStackRequest$DisplayName": "<p>The name displayed to end users on the AppStream 2.0 portal.</p>"
|
"UpdateFleetRequest$DisplayName": "<p>The fleet name displayed to end users.</p>",
|
||||||
|
"UpdateStackRequest$DisplayName": "<p>The stack name displayed to end users.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"DomainJoinInfo": {
|
"DomainJoinInfo": {
|
||||||
"base": "<p>The <i>DirectoryName</i> and <i>OrganizationalUnitDistinguishedName</i> values, which are used to join domains for the AppStream 2.0 streaming instances.</p>",
|
"base": "<p>Contains the information needed for streaming instances to join a domain.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
"CreateFleetRequest$DomainJoinInfo": "<p>The <i>DirectoryName</i> and <i>OrganizationalUnitDistinguishedName</i> values, which are used to join domains for the AppStream 2.0 streaming instances.</p>",
|
"CreateFleetRequest$DomainJoinInfo": "<p>The information needed for streaming instances to join a domain.</p>",
|
||||||
"Fleet$DomainJoinInfo": "<p>The <i>DirectoryName</i> and <i>OrganizationalUnitDistinguishedName</i> values, which are used to join domains for the AppStream 2.0 streaming instances.</p>",
|
"CreateImageBuilderRequest$DomainJoinInfo": null,
|
||||||
"UpdateFleetRequest$DomainJoinInfo": "<p>The <i>DirectoryName</i> and <i>OrganizationalUnitDistinguishedName</i> values, which are used to join domains for the AppStream 2.0 streaming instances.</p>"
|
"Fleet$DomainJoinInfo": "<p>The information needed for streaming instances to join a domain.</p>",
|
||||||
|
"ImageBuilder$DomainJoinInfo": null,
|
||||||
|
"UpdateFleetRequest$DomainJoinInfo": "<p>The information needed for streaming instances to join a domain.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"ErrorMessage": {
|
"ErrorMessage": {
|
||||||
@ -326,25 +391,25 @@
|
|||||||
"Fleet": {
|
"Fleet": {
|
||||||
"base": "<p>Contains the parameters for a fleet.</p>",
|
"base": "<p>Contains the parameters for a fleet.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
"CreateFleetResult$Fleet": "<p>The details for the created fleet.</p>",
|
"CreateFleetResult$Fleet": "<p>Information about the fleet.</p>",
|
||||||
"FleetList$member": null,
|
"FleetList$member": null,
|
||||||
"UpdateFleetResult$Fleet": "<p>A list of fleet details.</p>"
|
"UpdateFleetResult$Fleet": "<p>Information about the fleet.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"FleetAttribute": {
|
"FleetAttribute": {
|
||||||
"base": "<p>Fleet attribute.</p>",
|
"base": "<p>The fleet attribute.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
"FleetAttributes$member": null
|
"FleetAttributes$member": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"FleetAttributes": {
|
"FleetAttributes": {
|
||||||
"base": "<p>A list of fleet attributes.</p>",
|
"base": "<p>The fleet attributes.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
"UpdateFleetRequest$AttributesToDelete": "<p>Fleet attributes to be deleted.</p>"
|
"UpdateFleetRequest$AttributesToDelete": "<p>The fleet attributes to delete.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"FleetError": {
|
"FleetError": {
|
||||||
"base": "<p>The details of the fleet error.</p>",
|
"base": "<p>Describes a fleet error.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
"FleetErrors$member": null
|
"FleetErrors$member": null
|
||||||
}
|
}
|
||||||
@ -352,19 +417,20 @@
|
|||||||
"FleetErrorCode": {
|
"FleetErrorCode": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"FleetError$ErrorCode": "<p>The error code for the fleet error.</p>"
|
"FleetError$ErrorCode": "<p>The error code.</p>",
|
||||||
|
"ResourceError$ErrorCode": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"FleetErrors": {
|
"FleetErrors": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"Fleet$FleetErrors": "<p>The list of fleet errors is appended to this list.</p>"
|
"Fleet$FleetErrors": "<p>The fleet errors.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"FleetList": {
|
"FleetList": {
|
||||||
"base": "<p>A list of fleets.</p>",
|
"base": "<p>The fleets.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
"DescribeFleetsResult$Fleets": "<p>The list of fleet details.</p>"
|
"DescribeFleetsResult$Fleets": "<p>Information about the fleets.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"FleetState": {
|
"FleetState": {
|
||||||
@ -373,26 +439,68 @@
|
|||||||
"Fleet$State": "<p>The current state for the fleet.</p>"
|
"Fleet$State": "<p>The current state for the fleet.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"Image": {
|
"FleetType": {
|
||||||
"base": "<p>New streaming instances are booted from images. The image stores the application catalog and is connected to fleets.</p>",
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
|
"CreateFleetRequest$FleetType": null,
|
||||||
|
"Fleet$FleetType": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"Image": {
|
||||||
|
"base": "<p>Describes an image.</p>",
|
||||||
|
"refs": {
|
||||||
|
"DeleteImageResult$Image": null,
|
||||||
"ImageList$member": null
|
"ImageList$member": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"ImageBuilder": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
"CreateImageBuilderResult$ImageBuilder": null,
|
||||||
|
"DeleteImageBuilderResult$ImageBuilder": null,
|
||||||
|
"ImageBuilderList$member": null,
|
||||||
|
"StartImageBuilderResult$ImageBuilder": null,
|
||||||
|
"StopImageBuilderResult$ImageBuilder": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"ImageBuilderList": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
"DescribeImageBuildersResult$ImageBuilders": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"ImageBuilderState": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
"ImageBuilder$State": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"ImageBuilderStateChangeReason": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
"ImageBuilder$StateChangeReason": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"ImageBuilderStateChangeReasonCode": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
"ImageBuilderStateChangeReason$Code": null
|
||||||
|
}
|
||||||
|
},
|
||||||
"ImageList": {
|
"ImageList": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"DescribeImagesResult$Images": "<p>The list of images.</p>"
|
"DescribeImagesResult$Images": "<p>Information about the images.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"ImageState": {
|
"ImageState": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"Image$State": "<p>The image starts in the <b>PENDING</b> state. If image creation succeeds, it moves to <b>AVAILABLE</b>. If image creation fails, it moves to <b>FAILED</b>.</p>"
|
"Image$State": "<p>The image starts in the <code>PENDING</code> state. If image creation succeeds, the state is <code>AVAILABLE</code>. If image creation fails, the state is <code>FAILED</code>.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"ImageStateChangeReason": {
|
"ImageStateChangeReason": {
|
||||||
"base": "<p>The reason why the last state change occurred.</p>",
|
"base": "<p>Describes the reason why the last state change occurred.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
"Image$StateChangeReason": "<p>The reason why the last state change occurred.</p>"
|
"Image$StateChangeReason": "<p>The reason why the last state change occurred.</p>"
|
||||||
}
|
}
|
||||||
@ -400,7 +508,7 @@
|
|||||||
"ImageStateChangeReasonCode": {
|
"ImageStateChangeReasonCode": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"ImageStateChangeReason$Code": "<p>The state change reason code of the image.</p>"
|
"ImageStateChangeReason$Code": "<p>The state change reason code.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"IncompatibleImageException": {
|
"IncompatibleImageException": {
|
||||||
@ -414,16 +522,17 @@
|
|||||||
"ComputeCapacity$DesiredInstances": "<p>The desired number of streaming instances.</p>",
|
"ComputeCapacity$DesiredInstances": "<p>The desired number of streaming instances.</p>",
|
||||||
"ComputeCapacityStatus$Desired": "<p>The desired number of streaming instances.</p>",
|
"ComputeCapacityStatus$Desired": "<p>The desired number of streaming instances.</p>",
|
||||||
"ComputeCapacityStatus$Running": "<p>The total number of simultaneous streaming instances that are running.</p>",
|
"ComputeCapacityStatus$Running": "<p>The total number of simultaneous streaming instances that are running.</p>",
|
||||||
"ComputeCapacityStatus$InUse": "<p>The number of instances that are being used for streaming.</p>",
|
"ComputeCapacityStatus$InUse": "<p>The number of instances in use for streaming.</p>",
|
||||||
"ComputeCapacityStatus$Available": "<p>The number of currently available instances that can be used to stream sessions.</p>",
|
"ComputeCapacityStatus$Available": "<p>The number of currently available instances that can be used to stream sessions.</p>",
|
||||||
"CreateFleetRequest$MaxUserDurationInSeconds": "<p>The maximum time for which a streaming session can run. The input can be any numeric value in seconds between 600 and 57600.</p>",
|
"CreateFleetRequest$MaxUserDurationInSeconds": "<p>The maximum time that a streaming session can run, in seconds. Specify a value between 600 and 57600.</p>",
|
||||||
"CreateFleetRequest$DisconnectTimeoutInSeconds": "<p>The time after disconnection when a session is considered to have ended. If a user who got disconnected reconnects within this timeout interval, the user is connected back to their previous session. The input can be any numeric value in seconds between 60 and 57600. </p>",
|
"CreateFleetRequest$DisconnectTimeoutInSeconds": "<p>The time after disconnection when a session is considered to have ended, in seconds. If a user who was disconnected reconnects within this time interval, the user is connected to their previous session. Specify a value between 60 and 57600.</p>",
|
||||||
"DescribeDirectoryConfigsRequest$MaxResults": "<p>The size of each page of results.</p>",
|
"DescribeDirectoryConfigsRequest$MaxResults": "<p>The maximum size of each page of results.</p>",
|
||||||
"DescribeSessionsRequest$Limit": "<p>The size of each page of results. The default value is 20 and the maximum supported value is 50.</p>",
|
"DescribeImageBuildersRequest$MaxResults": null,
|
||||||
"Fleet$MaxUserDurationInSeconds": "<p>The maximum time for which a streaming session can run. The value can be any numeric value in seconds between 600 and 57600.</p>",
|
"DescribeSessionsRequest$Limit": "<p>The size of each page of results. The default value is 20 and the maximum value is 50.</p>",
|
||||||
"Fleet$DisconnectTimeoutInSeconds": "<p>The time after disconnection when a session is considered to have ended. If a user who got disconnected reconnects within this timeout interval, the user is connected back to their previous session. The input can be any numeric value in seconds between 60 and 57600.</p>",
|
"Fleet$MaxUserDurationInSeconds": "<p>The maximum time that a streaming session can run, in seconds. Specify a value between 600 and 57600.</p>",
|
||||||
"UpdateFleetRequest$MaxUserDurationInSeconds": "<p>The maximum time for which a streaming session can run. The input can be any numeric value in seconds between 600 and 57600.</p>",
|
"Fleet$DisconnectTimeoutInSeconds": "<p>The time after disconnection when a session is considered to have ended, in seconds. If a user who was disconnected reconnects within this time interval, the user is connected to their previous session. Specify a value between 60 and 57600.</p>",
|
||||||
"UpdateFleetRequest$DisconnectTimeoutInSeconds": "<p>The time after disconnection when a session is considered to have ended. If a user who got disconnected reconnects within this timeout interval, the user is connected back to their previous session. The input can be any numeric value in seconds between 60 and 57600.</p>"
|
"UpdateFleetRequest$MaxUserDurationInSeconds": "<p>The maximum time that a streaming session can run, in seconds. Specify a value between 600 and 57600.</p>",
|
||||||
|
"UpdateFleetRequest$DisconnectTimeoutInSeconds": "<p>The time after disconnection when a session is considered to have ended, in seconds. If a user who was disconnected reconnects within this time interval, the user is connected to their previous session. Specify a value between 60 and 57600.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"InvalidParameterCombinationException": {
|
"InvalidParameterCombinationException": {
|
||||||
@ -447,7 +556,7 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"ListAssociatedFleetsResult": {
|
"ListAssociatedFleetsResult": {
|
||||||
"base": "<p>The response from a successful operation.</p>",
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -457,14 +566,15 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"ListAssociatedStacksResult": {
|
"ListAssociatedStacksResult": {
|
||||||
"base": "<p>The response from a successful operation.</p>",
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"Long": {
|
"Long": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"CreateStreamingURLRequest$Validity": "<p>The duration up to which the URL returned by this action is valid. The input can be any numeric value in seconds between 1 and 604800 seconds.</p>"
|
"CreateImageBuilderStreamingURLRequest$Validity": null,
|
||||||
|
"CreateStreamingURLRequest$Validity": "<p>The time that the streaming URL will be valid, in seconds. Specify a value between 1 and 604800 seconds.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"Metadata": {
|
"Metadata": {
|
||||||
@ -476,7 +586,10 @@
|
|||||||
"Name": {
|
"Name": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"CreateFleetRequest$Name": "<p>A unique identifier for the fleet.</p>"
|
"CreateFleetRequest$Name": "<p>A unique name for the fleet.</p>",
|
||||||
|
"CreateImageBuilderRequest$Name": null,
|
||||||
|
"DeleteImageBuilderRequest$Name": null,
|
||||||
|
"DeleteImageRequest$Name": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"OperationNotPermittedException": {
|
"OperationNotPermittedException": {
|
||||||
@ -487,22 +600,23 @@
|
|||||||
"OrganizationalUnitDistinguishedName": {
|
"OrganizationalUnitDistinguishedName": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"DomainJoinInfo$OrganizationalUnitDistinguishedName": "<p>The distinguished name of the organizational unit to place the computer account in.</p>",
|
"DomainJoinInfo$OrganizationalUnitDistinguishedName": "<p>The distinguished name of the organizational unit for computer accounts.</p>",
|
||||||
"OrganizationalUnitDistinguishedNamesList$member": null
|
"OrganizationalUnitDistinguishedNamesList$member": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"OrganizationalUnitDistinguishedNamesList": {
|
"OrganizationalUnitDistinguishedNamesList": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"CreateDirectoryConfigRequest$OrganizationalUnitDistinguishedNames": "<p>The list of the distinguished names of organizational units to place computer accounts in.</p>",
|
"CreateDirectoryConfigRequest$OrganizationalUnitDistinguishedNames": "<p>The distinguished names of the organizational units for computer accounts.</p>",
|
||||||
"DirectoryConfig$OrganizationalUnitDistinguishedNames": "<p>The list of the distinguished names of organizational units in which to place computer accounts.</p>",
|
"DirectoryConfig$OrganizationalUnitDistinguishedNames": "<p>The distinguished names of the organizational units for computer accounts.</p>",
|
||||||
"UpdateDirectoryConfigRequest$OrganizationalUnitDistinguishedNames": "<p>The list of the distinguished names of organizational units to place computer accounts in.</p>"
|
"UpdateDirectoryConfigRequest$OrganizationalUnitDistinguishedNames": "<p>The distinguished names of the organizational units for computer accounts.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"PlatformType": {
|
"PlatformType": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"Image$Platform": "<p>The operating system platform of the image.</p>"
|
"Image$Platform": "<p>The operating system platform of the image.</p>",
|
||||||
|
"ImageBuilder$Platform": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"ResourceAlreadyExistsException": {
|
"ResourceAlreadyExistsException": {
|
||||||
@ -510,10 +624,22 @@
|
|||||||
"refs": {
|
"refs": {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"ResourceError": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
"ResourceErrors$member": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"ResourceErrors": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
"ImageBuilder$ImageBuilderErrors": null
|
||||||
|
}
|
||||||
|
},
|
||||||
"ResourceIdentifier": {
|
"ResourceIdentifier": {
|
||||||
"base": "<p>The ARN of the resource.</p>",
|
"base": "<p>The ARN of the resource.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
"StorageConnector$ResourceIdentifier": "<p>The ARN associated with the storage connector.</p>"
|
"StorageConnector$ResourceIdentifier": "<p>The ARN of the storage connector.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"ResourceInUseException": {
|
"ResourceInUseException": {
|
||||||
@ -532,21 +658,21 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"SecurityGroupIdList": {
|
"SecurityGroupIdList": {
|
||||||
"base": "<p>A list of security groups.</p>",
|
"base": "<p>The security group IDs.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
"VpcConfig$SecurityGroupIds": "<p>Security groups associated with the fleet.</p>"
|
"VpcConfig$SecurityGroupIds": "<p>The security groups for the fleet.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"ServiceAccountCredentials": {
|
"ServiceAccountCredentials": {
|
||||||
"base": "<p>The <i>AccountName</i> and <i>AccountPassword</i> of the service account, to be used by the streaming instance to connect to the directory.</p>",
|
"base": "<p>Describes the credentials for the service account used by the streaming instance to connect to the directory.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
"CreateDirectoryConfigRequest$ServiceAccountCredentials": "<p>The <i>AccountName</i> and <i>AccountPassword</i> values for the service account, which are used by the streaming instance to connect to the directory.</p>",
|
"CreateDirectoryConfigRequest$ServiceAccountCredentials": "<p>The credentials for the service account used by the streaming instance to connect to the directory.</p>",
|
||||||
"DirectoryConfig$ServiceAccountCredentials": "<p>The <i>AccountName</i> and <i>AccountPassword</i> of the service account, to be used by the streaming instance to connect to the directory.</p>",
|
"DirectoryConfig$ServiceAccountCredentials": "<p>The credentials for the service account used by the streaming instance to connect to the directory.</p>",
|
||||||
"UpdateDirectoryConfigRequest$ServiceAccountCredentials": "<p>The <i>AccountName</i> and <i>AccountPassword</i> values for the service account, which are used by the streaming instance to connect to the directory</p>"
|
"UpdateDirectoryConfigRequest$ServiceAccountCredentials": "<p>The credentials for the service account used by the streaming instance to connect to the directory.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"Session": {
|
"Session": {
|
||||||
"base": "<p>Contains the parameters for a streaming session.</p>",
|
"base": "<p>Describes a streaming session.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
"SessionList$member": null
|
"SessionList$member": null
|
||||||
}
|
}
|
||||||
@ -554,7 +680,7 @@
|
|||||||
"SessionList": {
|
"SessionList": {
|
||||||
"base": "<p>List of sessions.</p>",
|
"base": "<p>List of sessions.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
"DescribeSessionsResult$Sessions": "<p>The list of streaming sessions.</p>"
|
"DescribeSessionsResult$Sessions": "<p>Information about the streaming sessions.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"SessionState": {
|
"SessionState": {
|
||||||
@ -564,15 +690,15 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"Stack": {
|
"Stack": {
|
||||||
"base": "<p>Details about a stack.</p>",
|
"base": "<p>Describes a stack.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
"CreateStackResult$Stack": "<p>The details for the created stack.</p>",
|
"CreateStackResult$Stack": "<p>Information about the stack.</p>",
|
||||||
"StackList$member": null,
|
"StackList$member": null,
|
||||||
"UpdateStackResult$Stack": "<p>A list of stack details.</p>"
|
"UpdateStackResult$Stack": "<p>Information about the stack.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"StackError": {
|
"StackError": {
|
||||||
"base": "<p>Contains the parameters for a stack error.</p>",
|
"base": "<p>Describes a stack error.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
"StackErrors$member": null
|
"StackErrors$member": null
|
||||||
}
|
}
|
||||||
@ -580,19 +706,19 @@
|
|||||||
"StackErrorCode": {
|
"StackErrorCode": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"StackError$ErrorCode": "<p>The error code of a stack error.</p>"
|
"StackError$ErrorCode": "<p>The error code.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"StackErrors": {
|
"StackErrors": {
|
||||||
"base": "<p>A list of stack errors.</p>",
|
"base": "<p>The stack errors.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
"Stack$StackErrors": "<p>The list of errors associated with the stack.</p>"
|
"Stack$StackErrors": "<p>The errors for the stack.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"StackList": {
|
"StackList": {
|
||||||
"base": "<p>A list of stacks.</p>",
|
"base": "<p>The stacks.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
"DescribeStacksResult$Stacks": "<p>The list of stack details.</p>"
|
"DescribeStacksResult$Stacks": "<p>Information about the stacks.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"StartFleetRequest": {
|
"StartFleetRequest": {
|
||||||
@ -605,6 +731,16 @@
|
|||||||
"refs": {
|
"refs": {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"StartImageBuilderRequest": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"StartImageBuilderResult": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
}
|
||||||
|
},
|
||||||
"StopFleetRequest": {
|
"StopFleetRequest": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
@ -615,126 +751,154 @@
|
|||||||
"refs": {
|
"refs": {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"StopImageBuilderRequest": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"StopImageBuilderResult": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
}
|
||||||
|
},
|
||||||
"StorageConnector": {
|
"StorageConnector": {
|
||||||
"base": "<p>Contains the parameters for a storage connector.</p>",
|
"base": "<p>Describes a storage connector.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
"StorageConnectorList$member": null
|
"StorageConnectorList$member": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"StorageConnectorList": {
|
"StorageConnectorList": {
|
||||||
"base": "<p>A list of storage connectors.</p>",
|
"base": "<p>The storage connectors.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
"CreateStackRequest$StorageConnectors": "<p>The storage connectors to be enabled for the stack.</p>",
|
"CreateStackRequest$StorageConnectors": "<p>The storage connectors to enable.</p>",
|
||||||
"Stack$StorageConnectors": "<p>The storage connectors to be enabled for the stack.</p>",
|
"Stack$StorageConnectors": "<p>The storage connectors to enable.</p>",
|
||||||
"UpdateStackRequest$StorageConnectors": "<p>The storage connectors to be enabled for the stack.</p>"
|
"UpdateStackRequest$StorageConnectors": "<p>The storage connectors to enable.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"StorageConnectorType": {
|
"StorageConnectorType": {
|
||||||
"base": "<p>The type of storage connector. The possible values include: HOMEFOLDERS.</p>",
|
"base": "<p>The type of storage connector.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
"StorageConnector$ConnectorType": "<p>The type of storage connector. The possible values include: HOMEFOLDERS.</p>"
|
"StorageConnector$ConnectorType": "<p>The type of storage connector.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"StreamingUrlUserId": {
|
"StreamingUrlUserId": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"CreateStreamingURLRequest$UserId": "<p>A unique user ID for whom the URL is generated.</p>"
|
"CreateStreamingURLRequest$UserId": "<p>The ID of the user.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"String": {
|
"String": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"Application$Name": "<p>The unique identifier for the application.</p>",
|
"Application$Name": "<p>The name of the application.</p>",
|
||||||
"Application$DisplayName": "<p>The name of the application shown to the end users.</p>",
|
"Application$DisplayName": "<p>The application name displayed to end users.</p>",
|
||||||
"Application$IconURL": "<p>The URL for the application icon. This URL may be time-limited.</p>",
|
"Application$IconURL": "<p>The URL for the application icon. This URL might be time-limited.</p>",
|
||||||
"Application$LaunchPath": "<p>The path to the application executable in the instance.</p>",
|
"Application$LaunchPath": "<p>The path to the application executable in the instance.</p>",
|
||||||
"Application$LaunchParameters": "<p>A list of arguments that are passed to the application at launch.</p>",
|
"Application$LaunchParameters": "<p>The arguments that are passed to the application at launch.</p>",
|
||||||
"AssociateFleetRequest$FleetName": "<p>The name of the fleet to associate.</p>",
|
"AssociateFleetRequest$FleetName": "<p>The name of the fleet.</p>",
|
||||||
"AssociateFleetRequest$StackName": "<p>The name of the stack to which the fleet is associated.</p>",
|
"AssociateFleetRequest$StackName": "<p>The name of the stack.</p>",
|
||||||
"CreateFleetRequest$ImageName": "<p>Unique name of the image used by the fleet.</p>",
|
"CreateFleetRequest$ImageName": "<p>The name of the image used by the fleet.</p>",
|
||||||
"CreateFleetRequest$InstanceType": "<p>The instance type of compute resources for the fleet. Fleet instances are launched from this instance type. Available instance types are:</p> <ul> <li> <p>stream.standard.medium</p> </li> <li> <p>stream.standard.large</p> </li> <li> <p>stream.compute.large</p> </li> <li> <p>stream.compute.xlarge</p> </li> <li> <p>stream.compute.2xlarge</p> </li> <li> <p>stream.compute.4xlarge</p> </li> <li> <p>stream.compute.8xlarge</p> </li> <li> <p>stream.memory.large</p> </li> <li> <p>stream.memory.xlarge</p> </li> <li> <p>stream.memory.2xlarge</p> </li> <li> <p>stream.memory.4xlarge</p> </li> <li> <p>stream.memory.8xlarge</p> </li> <li> <p>stream.graphics-pro.4xlarge</p> </li> <li> <p>stream.graphics-pro.8xlarge</p> </li> <li> <p>stream.graphics-pro.16xlarge</p> </li> <li> <p>stream.graphics-desktop.2xlarge</p> </li> </ul>",
|
"CreateFleetRequest$InstanceType": "<p>The instance type to use when launching fleet instances. The following instance types are available:</p> <ul> <li> <p>stream.standard.medium</p> </li> <li> <p>stream.standard.large</p> </li> <li> <p>stream.compute.large</p> </li> <li> <p>stream.compute.xlarge</p> </li> <li> <p>stream.compute.2xlarge</p> </li> <li> <p>stream.compute.4xlarge</p> </li> <li> <p>stream.compute.8xlarge</p> </li> <li> <p>stream.memory.large</p> </li> <li> <p>stream.memory.xlarge</p> </li> <li> <p>stream.memory.2xlarge</p> </li> <li> <p>stream.memory.4xlarge</p> </li> <li> <p>stream.memory.8xlarge</p> </li> <li> <p>stream.graphics-design.large</p> </li> <li> <p>stream.graphics-design.xlarge</p> </li> <li> <p>stream.graphics-design.2xlarge</p> </li> <li> <p>stream.graphics-design.4xlarge</p> </li> <li> <p>stream.graphics-desktop.2xlarge</p> </li> <li> <p>stream.graphics-pro.4xlarge</p> </li> <li> <p>stream.graphics-pro.8xlarge</p> </li> <li> <p>stream.graphics-pro.16xlarge</p> </li> </ul>",
|
||||||
"CreateStackRequest$Name": "<p>The unique identifier for this stack.</p>",
|
"CreateImageBuilderRequest$ImageName": null,
|
||||||
"CreateStreamingURLRequest$StackName": "<p>The stack for which the URL is generated.</p>",
|
"CreateImageBuilderRequest$InstanceType": null,
|
||||||
"CreateStreamingURLRequest$FleetName": "<p>The fleet for which the URL is generated.</p>",
|
"CreateImageBuilderStreamingURLRequest$Name": null,
|
||||||
|
"CreateImageBuilderStreamingURLResult$StreamingURL": null,
|
||||||
|
"CreateStackRequest$Name": "<p>The name of the stack.</p>",
|
||||||
|
"CreateStreamingURLRequest$StackName": "<p>The name of the stack.</p>",
|
||||||
|
"CreateStreamingURLRequest$FleetName": "<p>The name of the fleet.</p>",
|
||||||
"CreateStreamingURLRequest$ApplicationId": "<p>The ID of the application that must be launched after the session starts.</p>",
|
"CreateStreamingURLRequest$ApplicationId": "<p>The ID of the application that must be launched after the session starts.</p>",
|
||||||
"CreateStreamingURLRequest$SessionContext": "<p>The sessionContext of the streaming URL.</p>",
|
"CreateStreamingURLRequest$SessionContext": "<p>The session context of the streaming URL.</p>",
|
||||||
"CreateStreamingURLResult$StreamingURL": "<p>The URL to start the AppStream 2.0 streaming session.</p>",
|
"CreateStreamingURLResult$StreamingURL": "<p>The URL to start the AppStream 2.0 streaming session.</p>",
|
||||||
"DeleteFleetRequest$Name": "<p>The name of the fleet to be deleted.</p>",
|
"DeleteFleetRequest$Name": "<p>The name of the fleet.</p>",
|
||||||
"DeleteStackRequest$Name": "<p>The name of the stack to delete.</p>",
|
"DeleteStackRequest$Name": "<p>The name of the stack.</p>",
|
||||||
"DescribeDirectoryConfigsRequest$NextToken": "<p>The DescribeDirectoryConfigsResult.NextToken from a previous call to DescribeDirectoryConfigs. If this is the first call, pass null.</p>",
|
"DescribeDirectoryConfigsRequest$NextToken": "<p>The pagination token to use to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.</p>",
|
||||||
"DescribeDirectoryConfigsResult$NextToken": "<p>If not null, more results are available. To retrieve the next set of items, pass this value for the NextToken parameter in a subsequent call to DescribeDirectoryConfigs.</p>",
|
"DescribeDirectoryConfigsResult$NextToken": "<p>The pagination token to use to retrieve the next page of results for this operation. If there are no more pages, this value is null.</p>",
|
||||||
"DescribeFleetsRequest$NextToken": "<p>The pagination token to use to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.</p>",
|
"DescribeFleetsRequest$NextToken": "<p>The pagination token to use to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.</p>",
|
||||||
"DescribeFleetsResult$NextToken": "<p>The pagination token to use to retrieve the next page of results for this operation. If there are no more pages, this value is null.</p>",
|
"DescribeFleetsResult$NextToken": "<p>The pagination token to use to retrieve the next page of results for this operation. If there are no more pages, this value is null.</p>",
|
||||||
"DescribeSessionsRequest$StackName": "<p>The name of the stack for which to list sessions.</p>",
|
"DescribeImageBuildersRequest$NextToken": null,
|
||||||
"DescribeSessionsRequest$FleetName": "<p>The name of the fleet for which to list sessions.</p>",
|
"DescribeImageBuildersResult$NextToken": null,
|
||||||
|
"DescribeSessionsRequest$StackName": "<p>The name of the stack.</p>",
|
||||||
|
"DescribeSessionsRequest$FleetName": "<p>The name of the fleet.</p>",
|
||||||
"DescribeSessionsRequest$NextToken": "<p>The pagination token to use to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.</p>",
|
"DescribeSessionsRequest$NextToken": "<p>The pagination token to use to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.</p>",
|
||||||
"DescribeSessionsResult$NextToken": "<p>The pagination token to use to retrieve the next page of results for this operation. If there are no more pages, this value is null.</p>",
|
"DescribeSessionsResult$NextToken": "<p>The pagination token to use to retrieve the next page of results for this operation. If there are no more pages, this value is null.</p>",
|
||||||
"DescribeStacksRequest$NextToken": "<p>The pagination token to use to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.</p>",
|
"DescribeStacksRequest$NextToken": "<p>The pagination token to use to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.</p>",
|
||||||
"DescribeStacksResult$NextToken": "<p>The pagination token to use to retrieve the next page of results for this operation. If there are no more pages, this value is null.</p>",
|
"DescribeStacksResult$NextToken": "<p>The pagination token to use to retrieve the next page of results for this operation. If there are no more pages, this value is null.</p>",
|
||||||
"DisassociateFleetRequest$FleetName": "<p>The name of the fleet to disassociate.</p>",
|
"DisassociateFleetRequest$FleetName": "<p>The name of the fleet.</p>",
|
||||||
"DisassociateFleetRequest$StackName": "<p>The name of the stack with which the fleet is associated.</p>",
|
"DisassociateFleetRequest$StackName": "<p>The name of the stack.</p>",
|
||||||
"ExpireSessionRequest$SessionId": "<p>The unique identifier of the streaming session to be stopped.</p>",
|
"ExpireSessionRequest$SessionId": "<p>The ID of the streaming session.</p>",
|
||||||
"Fleet$Name": "<p>The name of the fleet.</p>",
|
"Fleet$Name": "<p>The name of the fleet.</p>",
|
||||||
"Fleet$DisplayName": "<p>The name displayed to end users on the AppStream 2.0 portal.</p>",
|
"Fleet$DisplayName": "<p>The fleet name displayed to end users.</p>",
|
||||||
"Fleet$Description": "<p>The description displayed to end users on the AppStream 2.0 portal.</p>",
|
"Fleet$Description": "<p>The description displayed to end users.</p>",
|
||||||
"Fleet$ImageName": "<p>The image used by the fleet.</p>",
|
"Fleet$ImageName": "<p>The image used by the fleet.</p>",
|
||||||
"Fleet$InstanceType": "<p>The instance type of compute resources for the fleet. The fleet instances are launched from this instance type. </p>",
|
"Fleet$InstanceType": "<p>The instance type to use when launching fleet instances.</p>",
|
||||||
"FleetError$ErrorMessage": "<p>The error message generated when the fleet has errors.</p>",
|
"FleetError$ErrorMessage": "<p>The error message.</p>",
|
||||||
"Image$Name": "<p>The unique identifier for the image.</p>",
|
"Image$Name": "<p>The name of the image.</p>",
|
||||||
"Image$DisplayName": "<p>The display name for the image.</p>",
|
"Image$DisplayName": "<p>The image name displayed to end users.</p>",
|
||||||
"Image$Description": "<p>A meaningful description for the image.</p>",
|
"Image$Description": "<p>The description displayed to end users.</p>",
|
||||||
"ImageStateChangeReason$Message": "<p>The state change reason message to the end user.</p>",
|
"ImageBuilder$Name": null,
|
||||||
"ListAssociatedFleetsRequest$StackName": "<p>The name of the stack whose associated fleets are listed.</p>",
|
"ImageBuilder$Description": null,
|
||||||
|
"ImageBuilder$DisplayName": null,
|
||||||
|
"ImageBuilder$InstanceType": null,
|
||||||
|
"ImageBuilderStateChangeReason$Message": null,
|
||||||
|
"ImageStateChangeReason$Message": "<p>The state change reason message.</p>",
|
||||||
|
"ListAssociatedFleetsRequest$StackName": "<p>The name of the stack.</p>",
|
||||||
"ListAssociatedFleetsRequest$NextToken": "<p>The pagination token to use to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.</p>",
|
"ListAssociatedFleetsRequest$NextToken": "<p>The pagination token to use to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.</p>",
|
||||||
"ListAssociatedFleetsResult$NextToken": "<p>The pagination token to use to retrieve the next page of results for this operation. If there are no more pages, this value is null.</p>",
|
"ListAssociatedFleetsResult$NextToken": "<p>The pagination token to use to retrieve the next page of results for this operation. If there are no more pages, this value is null.</p>",
|
||||||
"ListAssociatedStacksRequest$FleetName": "<p>The name of the fleet whose associated stacks are listed.</p>",
|
"ListAssociatedStacksRequest$FleetName": "<p>The name of the fleet.</p>",
|
||||||
"ListAssociatedStacksRequest$NextToken": "<p>The pagination token to use to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.</p>",
|
"ListAssociatedStacksRequest$NextToken": "<p>The pagination token to use to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.</p>",
|
||||||
"ListAssociatedStacksResult$NextToken": "<p>The pagination token to use to retrieve the next page of results for this operation. If there are no more pages, this value is null.</p>",
|
"ListAssociatedStacksResult$NextToken": "<p>The pagination token to use to retrieve the next page of results for this operation. If there are no more pages, this value is null.</p>",
|
||||||
"Metadata$key": null,
|
"Metadata$key": null,
|
||||||
"Metadata$value": null,
|
"Metadata$value": null,
|
||||||
|
"ResourceError$ErrorMessage": null,
|
||||||
"SecurityGroupIdList$member": null,
|
"SecurityGroupIdList$member": null,
|
||||||
"Session$Id": "<p>The unique ID for a streaming session.</p>",
|
"Session$Id": "<p>The ID of the streaming session.</p>",
|
||||||
"Session$StackName": "<p>The name of the stack for which the streaming session was created.</p>",
|
"Session$StackName": "<p>The name of the stack for the streaming session.</p>",
|
||||||
"Session$FleetName": "<p>The name of the fleet for which the streaming session was created.</p>",
|
"Session$FleetName": "<p>The name of the fleet for the streaming session.</p>",
|
||||||
"Stack$Name": "<p>The unique identifier of the stack.</p>",
|
"Stack$Name": "<p>The name of the stack.</p>",
|
||||||
"Stack$Description": "<p>A meaningful description for the stack.</p>",
|
"Stack$Description": "<p>The description displayed to end users.</p>",
|
||||||
"Stack$DisplayName": "<p>A display name for the stack.</p>",
|
"Stack$DisplayName": "<p>The stack name displayed to end users.</p>",
|
||||||
"StackError$ErrorMessage": "<p>The error message of a stack error.</p>",
|
"StackError$ErrorMessage": "<p>The error message.</p>",
|
||||||
"StartFleetRequest$Name": "<p>The name of the fleet to start.</p>",
|
"StartFleetRequest$Name": "<p>The name of the fleet.</p>",
|
||||||
"StopFleetRequest$Name": "<p>The name of the fleet to stop.</p>",
|
"StartImageBuilderRequest$Name": null,
|
||||||
|
"StopFleetRequest$Name": "<p>The name of the fleet.</p>",
|
||||||
|
"StopImageBuilderRequest$Name": null,
|
||||||
"StringList$member": null,
|
"StringList$member": null,
|
||||||
"SubnetIdList$member": null,
|
"SubnetIdList$member": null,
|
||||||
"UpdateFleetRequest$ImageName": "<p>The image name from which a fleet is created.</p>",
|
"UpdateFleetRequest$ImageName": "<p>The name of the image used by the fleet.</p>",
|
||||||
"UpdateFleetRequest$Name": "<p>The name of the fleet.</p>",
|
"UpdateFleetRequest$Name": "<p>A unique name for the fleet.</p>",
|
||||||
"UpdateFleetRequest$InstanceType": "<p>The instance type of compute resources for the fleet. Fleet instances are launched from this instance type. Available instance types are:</p> <ul> <li> <p>stream.standard.medium</p> </li> <li> <p>stream.standard.large</p> </li> <li> <p>stream.compute.large</p> </li> <li> <p>stream.compute.xlarge</p> </li> <li> <p>stream.compute.2xlarge</p> </li> <li> <p>stream.compute.4xlarge</p> </li> <li> <p>stream.compute.8xlarge</p> </li> <li> <p>stream.memory.large</p> </li> <li> <p>stream.memory.xlarge</p> </li> <li> <p>stream.memory.2xlarge</p> </li> <li> <p>stream.memory.4xlarge</p> </li> <li> <p>stream.memory.8xlarge</p> </li> <li> <p>stream.graphics-pro.4xlarge</p> </li> <li> <p>stream.graphics-pro.8xlarge</p> </li> <li> <p>stream.graphics-pro.16xlarge</p> </li> <li> <p>stream.graphics-desktop.2xlarge</p> </li> </ul>",
|
"UpdateFleetRequest$InstanceType": "<p>The instance type to use when launching fleet instances. The following instance types are available:</p> <ul> <li> <p>stream.standard.medium</p> </li> <li> <p>stream.standard.large</p> </li> <li> <p>stream.compute.large</p> </li> <li> <p>stream.compute.xlarge</p> </li> <li> <p>stream.compute.2xlarge</p> </li> <li> <p>stream.compute.4xlarge</p> </li> <li> <p>stream.compute.8xlarge</p> </li> <li> <p>stream.memory.large</p> </li> <li> <p>stream.memory.xlarge</p> </li> <li> <p>stream.memory.2xlarge</p> </li> <li> <p>stream.memory.4xlarge</p> </li> <li> <p>stream.memory.8xlarge</p> </li> <li> <p>stream.graphics-design.large</p> </li> <li> <p>stream.graphics-design.xlarge</p> </li> <li> <p>stream.graphics-design.2xlarge</p> </li> <li> <p>stream.graphics-design.4xlarge</p> </li> <li> <p>stream.graphics-desktop.2xlarge</p> </li> <li> <p>stream.graphics-pro.4xlarge</p> </li> <li> <p>stream.graphics-pro.8xlarge</p> </li> <li> <p>stream.graphics-pro.16xlarge</p> </li> </ul>",
|
||||||
"UpdateStackRequest$Name": "<p>The name of the stack to update.</p>"
|
"UpdateStackRequest$Name": "<p>The name of the stack.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"StringList": {
|
"StringList": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"DescribeFleetsRequest$Names": "<p>The fleet names to describe. Use null to describe all the fleets for the AWS account.</p>",
|
"DescribeFleetsRequest$Names": "<p>The names of the fleets to describe.</p>",
|
||||||
"DescribeImagesRequest$Names": "<p>A specific list of images to describe.</p>",
|
"DescribeImageBuildersRequest$Names": null,
|
||||||
"DescribeStacksRequest$Names": "<p>The stack names to describe. Use null to describe all the stacks for the AWS account.</p>",
|
"DescribeImagesRequest$Names": "<p>The names of the images to describe.</p>",
|
||||||
"ListAssociatedFleetsResult$Names": "<p>The names of associated fleets.</p>",
|
"DescribeStacksRequest$Names": "<p>The names of the stacks to describe.</p>",
|
||||||
"ListAssociatedStacksResult$Names": "<p>The names of associated stacks.</p>"
|
"ListAssociatedFleetsResult$Names": "<p>The names of the fleets.</p>",
|
||||||
|
"ListAssociatedStacksResult$Names": "<p>The names of the stacks.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"SubnetIdList": {
|
"SubnetIdList": {
|
||||||
"base": "<p>A list of subnet IDs.</p>",
|
"base": "<p>The subnet IDs.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
"VpcConfig$SubnetIds": "<p>The list of subnets to which a network interface is established from the fleet instance.</p>"
|
"VpcConfig$SubnetIds": "<p>The subnets to which a network interface is established from the fleet instance.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"Timestamp": {
|
"Timestamp": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"CreateStreamingURLResult$Expires": "<p>Elapsed seconds after the Unix epoch, when this URL expires.</p>",
|
"CreateImageBuilderStreamingURLResult$Expires": null,
|
||||||
"DirectoryConfig$CreatedTime": "<p>The time stamp when the directory configuration was created within AppStream 2.0.</p>",
|
"CreateStreamingURLResult$Expires": "<p>The elapsed time, in seconds after the Unix epoch, when this URL expires.</p>",
|
||||||
"Fleet$CreatedTime": "<p>The time at which the fleet was created.</p>",
|
"DirectoryConfig$CreatedTime": "<p>The time the directory configuration was created.</p>",
|
||||||
"Image$CreatedTime": "<p>The time stamp when the image was created.</p>",
|
"Fleet$CreatedTime": "<p>The time the fleet was created.</p>",
|
||||||
"Image$PublicBaseImageReleasedDate": "<p>The AWS release date of the public base image. For private images, this date is the release date of the base image from which the image was created.</p>",
|
"Image$CreatedTime": "<p>The time the image was created.</p>",
|
||||||
"Stack$CreatedTime": "<p>The time stamp when the stack was created.</p>"
|
"Image$PublicBaseImageReleasedDate": "<p>The release date of the public base image. For private images, this date is the release date of the base image from which the image was created.</p>",
|
||||||
|
"ImageBuilder$CreatedTime": null,
|
||||||
|
"ResourceError$ErrorTimestamp": null,
|
||||||
|
"Stack$CreatedTime": "<p>The time the stack was created.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"UpdateDirectoryConfigRequest": {
|
"UpdateDirectoryConfigRequest": {
|
||||||
@ -770,21 +934,23 @@
|
|||||||
"UserId": {
|
"UserId": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"DescribeSessionsRequest$UserId": "<p>The user for whom to list sessions. Use null to describe all the sessions for the stack and fleet.</p>",
|
"DescribeSessionsRequest$UserId": "<p>The user ID.</p>",
|
||||||
"Session$UserId": "<p>The identifier of the user for whom the session was created.</p>"
|
"Session$UserId": "<p>The identifier of the user for whom the session was created.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"VisibilityType": {
|
"VisibilityType": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"Image$Visibility": "<p>The visibility of an image to the user; images can be public or private.</p>"
|
"Image$Visibility": "<p>Indicates whether the image is public or private.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"VpcConfig": {
|
"VpcConfig": {
|
||||||
"base": "<p>VPC configuration information.</p>",
|
"base": "<p>Describes VPC configuration information.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
"CreateFleetRequest$VpcConfig": "<p>The VPC configuration for the fleet.</p>",
|
"CreateFleetRequest$VpcConfig": "<p>The VPC configuration for the fleet.</p>",
|
||||||
|
"CreateImageBuilderRequest$VpcConfig": null,
|
||||||
"Fleet$VpcConfig": "<p>The VPC configuration for the fleet.</p>",
|
"Fleet$VpcConfig": "<p>The VPC configuration for the fleet.</p>",
|
||||||
|
"ImageBuilder$VpcConfig": null,
|
||||||
"UpdateFleetRequest$VpcConfig": "<p>The VPC configuration for the fleet.</p>"
|
"UpdateFleetRequest$VpcConfig": "<p>The VPC configuration for the fleet.</p>"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
6
vendor/github.com/aws/aws-sdk-go/models/apis/budgets/2016-10-20/api-2.json
generated
vendored
6
vendor/github.com/aws/aws-sdk-go/models/apis/budgets/2016-10-20/api-2.json
generated
vendored
@ -188,7 +188,8 @@
|
|||||||
"errors":[
|
"errors":[
|
||||||
{"shape":"InternalErrorException"},
|
{"shape":"InternalErrorException"},
|
||||||
{"shape":"InvalidParameterException"},
|
{"shape":"InvalidParameterException"},
|
||||||
{"shape":"NotFoundException"}
|
{"shape":"NotFoundException"},
|
||||||
|
{"shape":"DuplicateRecordException"}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"UpdateSubscriber":{
|
"UpdateSubscriber":{
|
||||||
@ -202,7 +203,8 @@
|
|||||||
"errors":[
|
"errors":[
|
||||||
{"shape":"InternalErrorException"},
|
{"shape":"InternalErrorException"},
|
||||||
{"shape":"InvalidParameterException"},
|
{"shape":"InvalidParameterException"},
|
||||||
{"shape":"NotFoundException"}
|
{"shape":"NotFoundException"},
|
||||||
|
{"shape":"DuplicateRecordException"}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
43
vendor/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/api-2.json
generated
vendored
43
vendor/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/api-2.json
generated
vendored
@ -549,6 +549,18 @@
|
|||||||
{"shape":"InvalidOperationException"}
|
{"shape":"InvalidOperationException"}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"UpdateTerminationProtection":{
|
||||||
|
"name":"UpdateTerminationProtection",
|
||||||
|
"http":{
|
||||||
|
"method":"POST",
|
||||||
|
"requestUri":"/"
|
||||||
|
},
|
||||||
|
"input":{"shape":"UpdateTerminationProtectionInput"},
|
||||||
|
"output":{
|
||||||
|
"shape":"UpdateTerminationProtectionOutput",
|
||||||
|
"resultWrapper":"UpdateTerminationProtectionResult"
|
||||||
|
}
|
||||||
|
},
|
||||||
"ValidateTemplate":{
|
"ValidateTemplate":{
|
||||||
"name":"ValidateTemplate",
|
"name":"ValidateTemplate",
|
||||||
"http":{
|
"http":{
|
||||||
@ -809,7 +821,8 @@
|
|||||||
"StackPolicyBody":{"shape":"StackPolicyBody"},
|
"StackPolicyBody":{"shape":"StackPolicyBody"},
|
||||||
"StackPolicyURL":{"shape":"StackPolicyURL"},
|
"StackPolicyURL":{"shape":"StackPolicyURL"},
|
||||||
"Tags":{"shape":"Tags"},
|
"Tags":{"shape":"Tags"},
|
||||||
"ClientRequestToken":{"shape":"ClientRequestToken"}
|
"ClientRequestToken":{"shape":"ClientRequestToken"},
|
||||||
|
"EnableTerminationProtection":{"shape":"EnableTerminationProtection"}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"CreateStackInstancesInput":{
|
"CreateStackInstancesInput":{
|
||||||
@ -1096,6 +1109,7 @@
|
|||||||
"min":1
|
"min":1
|
||||||
},
|
},
|
||||||
"DisableRollback":{"type":"boolean"},
|
"DisableRollback":{"type":"boolean"},
|
||||||
|
"EnableTerminationProtection":{"type":"boolean"},
|
||||||
"EstimateTemplateCostInput":{
|
"EstimateTemplateCostInput":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
"members":{
|
"members":{
|
||||||
@ -1734,6 +1748,7 @@
|
|||||||
"Description":{"shape":"Description"},
|
"Description":{"shape":"Description"},
|
||||||
"Parameters":{"shape":"Parameters"},
|
"Parameters":{"shape":"Parameters"},
|
||||||
"CreationTime":{"shape":"CreationTime"},
|
"CreationTime":{"shape":"CreationTime"},
|
||||||
|
"DeletionTime":{"shape":"DeletionTime"},
|
||||||
"LastUpdatedTime":{"shape":"LastUpdatedTime"},
|
"LastUpdatedTime":{"shape":"LastUpdatedTime"},
|
||||||
"RollbackConfiguration":{"shape":"RollbackConfiguration"},
|
"RollbackConfiguration":{"shape":"RollbackConfiguration"},
|
||||||
"StackStatus":{"shape":"StackStatus"},
|
"StackStatus":{"shape":"StackStatus"},
|
||||||
@ -1744,7 +1759,10 @@
|
|||||||
"Capabilities":{"shape":"Capabilities"},
|
"Capabilities":{"shape":"Capabilities"},
|
||||||
"Outputs":{"shape":"Outputs"},
|
"Outputs":{"shape":"Outputs"},
|
||||||
"RoleARN":{"shape":"RoleARN"},
|
"RoleARN":{"shape":"RoleARN"},
|
||||||
"Tags":{"shape":"Tags"}
|
"Tags":{"shape":"Tags"},
|
||||||
|
"EnableTerminationProtection":{"shape":"EnableTerminationProtection"},
|
||||||
|
"ParentId":{"shape":"StackId"},
|
||||||
|
"RootId":{"shape":"StackId"}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"StackEvent":{
|
"StackEvent":{
|
||||||
@ -2098,7 +2116,9 @@
|
|||||||
"LastUpdatedTime":{"shape":"LastUpdatedTime"},
|
"LastUpdatedTime":{"shape":"LastUpdatedTime"},
|
||||||
"DeletionTime":{"shape":"DeletionTime"},
|
"DeletionTime":{"shape":"DeletionTime"},
|
||||||
"StackStatus":{"shape":"StackStatus"},
|
"StackStatus":{"shape":"StackStatus"},
|
||||||
"StackStatusReason":{"shape":"StackStatusReason"}
|
"StackStatusReason":{"shape":"StackStatusReason"},
|
||||||
|
"ParentId":{"shape":"StackId"},
|
||||||
|
"RootId":{"shape":"StackId"}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"Stacks":{
|
"Stacks":{
|
||||||
@ -2267,6 +2287,23 @@
|
|||||||
"OperationId":{"shape":"ClientRequestToken"}
|
"OperationId":{"shape":"ClientRequestToken"}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"UpdateTerminationProtectionInput":{
|
||||||
|
"type":"structure",
|
||||||
|
"required":[
|
||||||
|
"EnableTerminationProtection",
|
||||||
|
"StackName"
|
||||||
|
],
|
||||||
|
"members":{
|
||||||
|
"EnableTerminationProtection":{"shape":"EnableTerminationProtection"},
|
||||||
|
"StackName":{"shape":"StackNameOrId"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"UpdateTerminationProtectionOutput":{
|
||||||
|
"type":"structure",
|
||||||
|
"members":{
|
||||||
|
"StackId":{"shape":"StackId"}
|
||||||
|
}
|
||||||
|
},
|
||||||
"Url":{"type":"string"},
|
"Url":{"type":"string"},
|
||||||
"UsePreviousTemplate":{"type":"boolean"},
|
"UsePreviousTemplate":{"type":"boolean"},
|
||||||
"UsePreviousValue":{"type":"boolean"},
|
"UsePreviousValue":{"type":"boolean"},
|
||||||
|
34
vendor/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/docs-2.json
generated
vendored
34
vendor/github.com/aws/aws-sdk-go/models/apis/cloudformation/2010-05-15/docs-2.json
generated
vendored
@ -40,6 +40,7 @@
|
|||||||
"StopStackSetOperation": "<p>Stops an in-progress operation on a stack set and its associated stack instances. </p>",
|
"StopStackSetOperation": "<p>Stops an in-progress operation on a stack set and its associated stack instances. </p>",
|
||||||
"UpdateStack": "<p>Updates a stack as specified in the template. After the call completes successfully, the stack update starts. You can check the status of the stack via the <a>DescribeStacks</a> action.</p> <p>To get a copy of the template for an existing stack, you can use the <a>GetTemplate</a> action.</p> <p>For more information about creating an update template, updating a stack, and monitoring the progress of the update, see <a href=\"http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks.html\">Updating a Stack</a>.</p>",
|
"UpdateStack": "<p>Updates a stack as specified in the template. After the call completes successfully, the stack update starts. You can check the status of the stack via the <a>DescribeStacks</a> action.</p> <p>To get a copy of the template for an existing stack, you can use the <a>GetTemplate</a> action.</p> <p>For more information about creating an update template, updating a stack, and monitoring the progress of the update, see <a href=\"http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks.html\">Updating a Stack</a>.</p>",
|
||||||
"UpdateStackSet": "<p>Updates the stack set and <i>all</i> associated stack instances.</p> <p>Even if the stack set operation created by updating the stack set fails (completely or partially, below or above a specified failure tolerance), the stack set is updated with your changes. Subsequent <a>CreateStackInstances</a> calls on the specified stack set use the updated stack set.</p>",
|
"UpdateStackSet": "<p>Updates the stack set and <i>all</i> associated stack instances.</p> <p>Even if the stack set operation created by updating the stack set fails (completely or partially, below or above a specified failure tolerance), the stack set is updated with your changes. Subsequent <a>CreateStackInstances</a> calls on the specified stack set use the updated stack set.</p>",
|
||||||
|
"UpdateTerminationProtection": "<p>Updates termination protection for the specified stack. If a user attempts to delete a stack with termination protection enabled, the operation fails and the stack remains unchanged. For more information, see <a href=\"http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-protect-stacks.html\">Protecting a Stack From Being Deleted</a> in the <i>AWS CloudFormation User Guide</i>.</p> <p> For <a href=\"http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-nested-stacks.html\">nested stacks</a>, termination protection is set on the root stack and cannot be changed directly on the nested stack.</p>",
|
||||||
"ValidateTemplate": "<p>Validates a specified template. AWS CloudFormation first checks if the template is valid JSON. If it isn't, AWS CloudFormation checks if the template is valid YAML. If both these checks fail, AWS CloudFormation returns a template validation error.</p>"
|
"ValidateTemplate": "<p>Validates a specified template. AWS CloudFormation first checks if the template is valid JSON. If it isn't, AWS CloudFormation checks if the template is valid YAML. If both these checks fail, AWS CloudFormation returns a template validation error.</p>"
|
||||||
},
|
},
|
||||||
"shapes": {
|
"shapes": {
|
||||||
@ -378,6 +379,7 @@
|
|||||||
"DeletionTime": {
|
"DeletionTime": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
|
"Stack$DeletionTime": "<p>The time the stack was deleted.</p>",
|
||||||
"StackSummary$DeletionTime": "<p>The time the stack was deleted.</p>"
|
"StackSummary$DeletionTime": "<p>The time the stack was deleted.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -498,6 +500,14 @@
|
|||||||
"Stack$DisableRollback": "<p>Boolean to enable or disable rollback on stack creation failures:</p> <ul> <li> <p> <code>true</code>: disable rollback</p> </li> <li> <p> <code>false</code>: enable rollback</p> </li> </ul>"
|
"Stack$DisableRollback": "<p>Boolean to enable or disable rollback on stack creation failures:</p> <ul> <li> <p> <code>true</code>: disable rollback</p> </li> <li> <p> <code>false</code>: enable rollback</p> </li> </ul>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"EnableTerminationProtection": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
"CreateStackInput$EnableTerminationProtection": "<p>Whether to enable termination protection on the specified stack. If a user attempts to delete a stack with termination protection enabled, the operation fails and the stack remains unchanged. For more information, see <a href=\"http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-protect-stacks.html\">Protecting a Stack From Being Deleted</a> in the <i>AWS CloudFormation User Guide</i>. Termination protection is disabled on stacks by default. </p> <p> For <a href=\"http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-nested-stacks.html\">nested stacks</a>, termination protection is set on the root stack and cannot be changed directly on the nested stack.</p>",
|
||||||
|
"Stack$EnableTerminationProtection": "<p>Whether termination protection is enabled for the stack.</p> <p> For <a href=\"http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-nested-stacks.html\">nested stacks</a>, termination protection is set on the root stack and cannot be changed directly on the nested stack. For more information, see <a href=\"http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-protect-stacks.html\">Protecting a Stack From Being Deleted</a> in the <i>AWS CloudFormation User Guide</i>.</p>",
|
||||||
|
"UpdateTerminationProtectionInput$EnableTerminationProtection": "<p>Whether to enable termination protection on the specified stack.</p>"
|
||||||
|
}
|
||||||
|
},
|
||||||
"EstimateTemplateCostInput": {
|
"EstimateTemplateCostInput": {
|
||||||
"base": "<p>The input for an <a>EstimateTemplateCost</a> action.</p>",
|
"base": "<p>The input for an <a>EstimateTemplateCost</a> action.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
@ -757,13 +767,13 @@
|
|||||||
"MaxConcurrentCount": {
|
"MaxConcurrentCount": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"StackSetOperationPreferences$MaxConcurrentCount": "<p>The maximum number of accounts in which to perform this operation at one time. This is dependent on the value of <code>FailureToleranceCount</code>—<code>MaxConcurrentCount</code> is at most one more than the <code>FailureToleranceCount</code> .</p> <p>Conditional: You must specify either <code>MaxConcurrentCount</code> or <code>MaxConcurrentPercentage</code>, but not both.</p>"
|
"StackSetOperationPreferences$MaxConcurrentCount": "<p>The maximum number of accounts in which to perform this operation at one time. This is dependent on the value of <code>FailureToleranceCount</code>—<code>MaxConcurrentCount</code> is at most one more than the <code>FailureToleranceCount</code> .</p> <p>Note that this setting lets you specify the <i>maximum</i> for operations. For large deployments, under certain circumstances the actual number of accounts acted upon concurrently may be lower due to service throttling.</p> <p>Conditional: You must specify either <code>MaxConcurrentCount</code> or <code>MaxConcurrentPercentage</code>, but not both.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"MaxConcurrentPercentage": {
|
"MaxConcurrentPercentage": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"StackSetOperationPreferences$MaxConcurrentPercentage": "<p>The maximum percentage of accounts in which to perform this operation at one time.</p> <p>When calculating the number of accounts based on the specified percentage, AWS CloudFormation rounds down to the next whole number. This is true except in cases where rounding down would result is zero. In this case, CloudFormation sets the number as one instead.</p> <p>Conditional: You must specify either <code>MaxConcurrentCount</code> or <code>MaxConcurrentPercentage</code>, but not both.</p>"
|
"StackSetOperationPreferences$MaxConcurrentPercentage": "<p>The maximum percentage of accounts in which to perform this operation at one time.</p> <p>When calculating the number of accounts based on the specified percentage, AWS CloudFormation rounds down to the next whole number. This is true except in cases where rounding down would result is zero. In this case, CloudFormation sets the number as one instead.</p> <p>Note that this setting lets you specify the <i>maximum</i> for operations. For large deployments, under certain circumstances the actual number of accounts acted upon concurrently may be lower due to service throttling.</p> <p>Conditional: You must specify either <code>MaxConcurrentCount</code> or <code>MaxConcurrentPercentage</code>, but not both.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"MaxResults": {
|
"MaxResults": {
|
||||||
@ -1201,13 +1211,18 @@
|
|||||||
"DescribeChangeSetOutput$StackId": "<p>The ARN of the stack that is associated with the change set.</p>",
|
"DescribeChangeSetOutput$StackId": "<p>The ARN of the stack that is associated with the change set.</p>",
|
||||||
"Export$ExportingStackId": "<p>The stack that contains the exported output name and value.</p>",
|
"Export$ExportingStackId": "<p>The stack that contains the exported output name and value.</p>",
|
||||||
"Stack$StackId": "<p>Unique identifier of the stack.</p>",
|
"Stack$StackId": "<p>Unique identifier of the stack.</p>",
|
||||||
|
"Stack$ParentId": "<p>For nested stacks--stacks created as resources for another stack--the stack ID of the direct parent of this stack. For the first level of nested stacks, the root stack is also the parent stack.</p> <p>For more information, see <a href=\"http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-nested-stacks.html\">Working with Nested Stacks</a> in the <i>AWS CloudFormation User Guide</i>.</p>",
|
||||||
|
"Stack$RootId": "<p>For nested stacks--stacks created as resources for another stack--the stack ID of the the top-level stack to which the nested stack ultimately belongs.</p> <p>For more information, see <a href=\"http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-nested-stacks.html\">Working with Nested Stacks</a> in the <i>AWS CloudFormation User Guide</i>.</p>",
|
||||||
"StackEvent$StackId": "<p>The unique ID name of the instance of the stack.</p>",
|
"StackEvent$StackId": "<p>The unique ID name of the instance of the stack.</p>",
|
||||||
"StackInstance$StackId": "<p>The ID of the stack instance.</p>",
|
"StackInstance$StackId": "<p>The ID of the stack instance.</p>",
|
||||||
"StackInstanceSummary$StackId": "<p>The ID of the stack instance.</p>",
|
"StackInstanceSummary$StackId": "<p>The ID of the stack instance.</p>",
|
||||||
"StackResource$StackId": "<p>Unique identifier of the stack.</p>",
|
"StackResource$StackId": "<p>Unique identifier of the stack.</p>",
|
||||||
"StackResourceDetail$StackId": "<p>Unique identifier of the stack.</p>",
|
"StackResourceDetail$StackId": "<p>Unique identifier of the stack.</p>",
|
||||||
"StackSummary$StackId": "<p>Unique stack identifier.</p>",
|
"StackSummary$StackId": "<p>Unique stack identifier.</p>",
|
||||||
"UpdateStackOutput$StackId": "<p>Unique identifier of the stack.</p>"
|
"StackSummary$ParentId": "<p>For nested stacks--stacks created as resources for another stack--the stack ID of the direct parent of this stack. For the first level of nested stacks, the root stack is also the parent stack.</p> <p>For more information, see <a href=\"http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-nested-stacks.html\">Working with Nested Stacks</a> in the <i>AWS CloudFormation User Guide</i>.</p>",
|
||||||
|
"StackSummary$RootId": "<p>For nested stacks--stacks created as resources for another stack--the stack ID of the the top-level stack to which the nested stack ultimately belongs.</p> <p>For more information, see <a href=\"http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-nested-stacks.html\">Working with Nested Stacks</a> in the <i>AWS CloudFormation User Guide</i>.</p>",
|
||||||
|
"UpdateStackOutput$StackId": "<p>Unique identifier of the stack.</p>",
|
||||||
|
"UpdateTerminationProtectionOutput$StackId": "<p>The unique ID of the stack.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"StackInstance": {
|
"StackInstance": {
|
||||||
@ -1275,7 +1290,8 @@
|
|||||||
"ExecuteChangeSetInput$StackName": "<p>If you specified the name of a change set, specify the stack name or ID (ARN) that is associated with the change set you want to execute.</p>",
|
"ExecuteChangeSetInput$StackName": "<p>If you specified the name of a change set, specify the stack name or ID (ARN) that is associated with the change set you want to execute.</p>",
|
||||||
"GetTemplateSummaryInput$StackName": "<p>The name or the stack ID that is associated with the stack, which are not always interchangeable. For running stacks, you can specify either the stack's name or its unique stack ID. For deleted stack, you must specify the unique stack ID.</p> <p>Conditional: You must specify only one of the following parameters: <code>StackName</code>, <code>StackSetName</code>, <code>TemplateBody</code>, or <code>TemplateURL</code>.</p>",
|
"GetTemplateSummaryInput$StackName": "<p>The name or the stack ID that is associated with the stack, which are not always interchangeable. For running stacks, you can specify either the stack's name or its unique stack ID. For deleted stack, you must specify the unique stack ID.</p> <p>Conditional: You must specify only one of the following parameters: <code>StackName</code>, <code>StackSetName</code>, <code>TemplateBody</code>, or <code>TemplateURL</code>.</p>",
|
||||||
"ListChangeSetsInput$StackName": "<p>The name or the Amazon Resource Name (ARN) of the stack for which you want to list change sets.</p>",
|
"ListChangeSetsInput$StackName": "<p>The name or the Amazon Resource Name (ARN) of the stack for which you want to list change sets.</p>",
|
||||||
"SignalResourceInput$StackName": "<p>The stack name or unique stack ID that includes the resource that you want to signal.</p>"
|
"SignalResourceInput$StackName": "<p>The stack name or unique stack ID that includes the resource that you want to signal.</p>",
|
||||||
|
"UpdateTerminationProtectionInput$StackName": "<p>The name or unique ID of the stack for which you want to set termination protection.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"StackPolicyBody": {
|
"StackPolicyBody": {
|
||||||
@ -1676,6 +1692,16 @@
|
|||||||
"refs": {
|
"refs": {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"UpdateTerminationProtectionInput": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"UpdateTerminationProtectionOutput": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
}
|
||||||
|
},
|
||||||
"Url": {
|
"Url": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
|
44
vendor/github.com/aws/aws-sdk-go/models/apis/cloudhsm/2014-05-30/docs-2.json
generated
vendored
44
vendor/github.com/aws/aws-sdk-go/models/apis/cloudhsm/2014-05-30/docs-2.json
generated
vendored
@ -1,10 +1,10 @@
|
|||||||
{
|
{
|
||||||
"version": "2.0",
|
"version": "2.0",
|
||||||
"service": "<fullname>AWS CloudHSM Service</fullname>",
|
"service": "<fullname>AWS CloudHSM Service</fullname> <p>This is the reference for AWS CloudHSM Classic. For more information, see <a href=\"http://aws.amazon.com/cloudhsm/faqs-classic/\">AWS CloudHSM Classic FAQs</a> and the <a href=\"http://docs.aws.amazon.com/cloudhsm/classic/userguide/\">AWS CloudHSM Classic User Guide</a>.</p> <p>For more information about AWS CloudHSM, see <a href=\"http://aws.amazon.com/cloudhsm/\">AWS CloudHSM</a> and the <a href=\"http://docs.aws.amazon.com/cloudhsm/latest/userguide/\">AWS CloudHSM User Guide</a>.</p>",
|
||||||
"operations": {
|
"operations": {
|
||||||
"AddTagsToResource": "<p>Adds or overwrites one or more tags for the specified AWS CloudHSM resource.</p> <p>Each tag consists of a key and a value. Tag keys must be unique to each resource.</p>",
|
"AddTagsToResource": "<p>Adds or overwrites one or more tags for the specified AWS CloudHSM resource.</p> <p>Each tag consists of a key and a value. Tag keys must be unique to each resource.</p>",
|
||||||
"CreateHapg": "<p>Creates a high-availability partition group. A high-availability partition group is a group of partitions that spans multiple physical HSMs.</p>",
|
"CreateHapg": "<p>Creates a high-availability partition group. A high-availability partition group is a group of partitions that spans multiple physical HSMs.</p>",
|
||||||
"CreateHsm": "<p>Creates an uninitialized HSM instance.</p> <p>There is an upfront fee charged for each HSM instance that you create with the <a>CreateHsm</a> operation. If you accidentally provision an HSM and want to request a refund, delete the instance using the <a>DeleteHsm</a> operation, go to the <a href=\"https://console.aws.amazon.com/support/home#/\">AWS Support Center</a>, create a new case, and select <b>Account and Billing Support</b>.</p> <important> <p>It can take up to 20 minutes to create and provision an HSM. You can monitor the status of the HSM with the <a>DescribeHsm</a> operation. The HSM is ready to be initialized when the status changes to <code>RUNNING</code>.</p> </important>",
|
"CreateHsm": "<p>Creates an uninitialized HSM instance.</p> <p>There is an upfront fee charged for each HSM instance that you create with the <code>CreateHsm</code> operation. If you accidentally provision an HSM and want to request a refund, delete the instance using the <a>DeleteHsm</a> operation, go to the <a href=\"https://console.aws.amazon.com/support/home\">AWS Support Center</a>, create a new case, and select <b>Account and Billing Support</b>.</p> <important> <p>It can take up to 20 minutes to create and provision an HSM. You can monitor the status of the HSM with the <a>DescribeHsm</a> operation. The HSM is ready to be initialized when the status changes to <code>RUNNING</code>.</p> </important>",
|
||||||
"CreateLunaClient": "<p>Creates an HSM client.</p>",
|
"CreateLunaClient": "<p>Creates an HSM client.</p>",
|
||||||
"DeleteHapg": "<p>Deletes a high-availability partition group.</p>",
|
"DeleteHapg": "<p>Deletes a high-availability partition group.</p>",
|
||||||
"DeleteHsm": "<p>Deletes an HSM. After completion, this operation cannot be undone and your key material cannot be recovered.</p>",
|
"DeleteHsm": "<p>Deletes an HSM. After completion, this operation cannot be undone and your key material cannot be recovered.</p>",
|
||||||
@ -14,9 +14,9 @@
|
|||||||
"DescribeLunaClient": "<p>Retrieves information about an HSM client.</p>",
|
"DescribeLunaClient": "<p>Retrieves information about an HSM client.</p>",
|
||||||
"GetConfig": "<p>Gets the configuration files necessary to connect to all high availability partition groups the client is associated with.</p>",
|
"GetConfig": "<p>Gets the configuration files necessary to connect to all high availability partition groups the client is associated with.</p>",
|
||||||
"ListAvailableZones": "<p>Lists the Availability Zones that have available AWS CloudHSM capacity.</p>",
|
"ListAvailableZones": "<p>Lists the Availability Zones that have available AWS CloudHSM capacity.</p>",
|
||||||
"ListHapgs": "<p>Lists the high-availability partition groups for the account.</p> <p>This operation supports pagination with the use of the <i>NextToken</i> member. If more results are available, the <i>NextToken</i> member of the response contains a token that you pass in the next call to <a>ListHapgs</a> to retrieve the next set of items.</p>",
|
"ListHapgs": "<p>Lists the high-availability partition groups for the account.</p> <p>This operation supports pagination with the use of the <code>NextToken</code> member. If more results are available, the <code>NextToken</code> member of the response contains a token that you pass in the next call to <code>ListHapgs</code> to retrieve the next set of items.</p>",
|
||||||
"ListHsms": "<p>Retrieves the identifiers of all of the HSMs provisioned for the current customer.</p> <p>This operation supports pagination with the use of the <i>NextToken</i> member. If more results are available, the <i>NextToken</i> member of the response contains a token that you pass in the next call to <a>ListHsms</a> to retrieve the next set of items.</p>",
|
"ListHsms": "<p>Retrieves the identifiers of all of the HSMs provisioned for the current customer.</p> <p>This operation supports pagination with the use of the <code>NextToken</code> member. If more results are available, the <code>NextToken</code> member of the response contains a token that you pass in the next call to <code>ListHsms</code> to retrieve the next set of items.</p>",
|
||||||
"ListLunaClients": "<p>Lists all of the clients.</p> <p>This operation supports pagination with the use of the <i>NextToken</i> member. If more results are available, the <i>NextToken</i> member of the response contains a token that you pass in the next call to <a>ListLunaClients</a> to retrieve the next set of items.</p>",
|
"ListLunaClients": "<p>Lists all of the clients.</p> <p>This operation supports pagination with the use of the <code>NextToken</code> member. If more results are available, the <code>NextToken</code> member of the response contains a token that you pass in the next call to <code>ListLunaClients</code> to retrieve the next set of items.</p>",
|
||||||
"ListTagsForResource": "<p>Returns a list of all tags for the specified AWS CloudHSM resource.</p>",
|
"ListTagsForResource": "<p>Returns a list of all tags for the specified AWS CloudHSM resource.</p>",
|
||||||
"ModifyHapg": "<p>Modifies an existing high-availability partition group.</p>",
|
"ModifyHapg": "<p>Modifies an existing high-availability partition group.</p>",
|
||||||
"ModifyHsm": "<p>Modifies an HSM.</p> <important> <p>This operation can result in the HSM being offline for up to 15 minutes while the AWS CloudHSM service is reconfigured. If you are modifying a production HSM, you should ensure that your AWS CloudHSM service is configured for high availability, and consider executing this operation during a maintenance window.</p> </important>",
|
"ModifyHsm": "<p>Modifies an HSM.</p> <important> <p>This operation can result in the HSM being offline for up to 15 minutes while the AWS CloudHSM service is reconfigured. If you are modifying a production HSM, you should ensure that your AWS CloudHSM service is configured for high availability, and consider executing this operation during a maintenance window.</p> </important>",
|
||||||
@ -132,12 +132,12 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"CreateHsmRequest": {
|
"CreateHsmRequest": {
|
||||||
"base": "<p>Contains the inputs for the <a>CreateHsm</a> operation.</p>",
|
"base": "<p>Contains the inputs for the <code>CreateHsm</code> operation.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"CreateHsmResponse": {
|
"CreateHsmResponse": {
|
||||||
"base": "<p>Contains the output of the <a>CreateHsm</a> operation.</p>",
|
"base": "<p>Contains the output of the <code>CreateHsm</code> operation.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -192,7 +192,7 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"DescribeHsmRequest": {
|
"DescribeHsmRequest": {
|
||||||
"base": "<p>Contains the inputs for the <a>DescribeHsm</a> operation. </p>",
|
"base": "<p>Contains the inputs for the <a>DescribeHsm</a> operation.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -220,7 +220,7 @@
|
|||||||
"ExternalId": {
|
"ExternalId": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"CreateHsmRequest$ExternalId": "<p>The external ID from <b>IamRoleArn</b>, if present.</p>",
|
"CreateHsmRequest$ExternalId": "<p>The external ID from <code>IamRoleArn</code>, if present.</p>",
|
||||||
"ModifyHsmRequest$ExternalId": "<p>The new external ID.</p>"
|
"ModifyHsmRequest$ExternalId": "<p>The new external ID.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -258,7 +258,7 @@
|
|||||||
"refs": {
|
"refs": {
|
||||||
"CreateHsmResponse$HsmArn": "<p>The ARN of the HSM.</p>",
|
"CreateHsmResponse$HsmArn": "<p>The ARN of the HSM.</p>",
|
||||||
"DeleteHsmRequest$HsmArn": "<p>The ARN of the HSM to delete.</p>",
|
"DeleteHsmRequest$HsmArn": "<p>The ARN of the HSM to delete.</p>",
|
||||||
"DescribeHsmRequest$HsmArn": "<p>The ARN of the HSM. Either the <i>HsmArn</i> or the <i>SerialNumber</i> parameter must be specified.</p>",
|
"DescribeHsmRequest$HsmArn": "<p>The ARN of the HSM. Either the <code>HsmArn</code> or the <code>SerialNumber</code> parameter must be specified.</p>",
|
||||||
"DescribeHsmResponse$HsmArn": "<p>The ARN of the HSM.</p>",
|
"DescribeHsmResponse$HsmArn": "<p>The ARN of the HSM.</p>",
|
||||||
"HsmList$member": null,
|
"HsmList$member": null,
|
||||||
"ModifyHsmRequest$HsmArn": "<p>The ARN of the HSM to modify.</p>",
|
"ModifyHsmRequest$HsmArn": "<p>The ARN of the HSM to modify.</p>",
|
||||||
@ -268,16 +268,16 @@
|
|||||||
"HsmList": {
|
"HsmList": {
|
||||||
"base": "<p>Contains a list of ARNs that identify the HSMs.</p>",
|
"base": "<p>Contains a list of ARNs that identify the HSMs.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
"DescribeHapgResponse$HsmsLastActionFailed": null,
|
"DescribeHapgResponse$HsmsLastActionFailed": "<p/>",
|
||||||
"DescribeHapgResponse$HsmsPendingDeletion": null,
|
"DescribeHapgResponse$HsmsPendingDeletion": "<p/>",
|
||||||
"DescribeHapgResponse$HsmsPendingRegistration": null,
|
"DescribeHapgResponse$HsmsPendingRegistration": "<p/>",
|
||||||
"ListHsmsResponse$HsmList": "<p>The list of ARNs that identify the HSMs.</p>"
|
"ListHsmsResponse$HsmList": "<p>The list of ARNs that identify the HSMs.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"HsmSerialNumber": {
|
"HsmSerialNumber": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"DescribeHsmRequest$HsmSerialNumber": "<p>The serial number of the HSM. Either the <i>HsmArn</i> or the <i>HsmSerialNumber</i> parameter must be specified.</p>",
|
"DescribeHsmRequest$HsmSerialNumber": "<p>The serial number of the HSM. Either the <code>HsmArn</code> or the <code>HsmSerialNumber</code> parameter must be specified.</p>",
|
||||||
"DescribeHsmResponse$SerialNumber": "<p>The serial number of the HSM.</p>"
|
"DescribeHsmResponse$SerialNumber": "<p>The serial number of the HSM.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -345,7 +345,7 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"ListHsmsResponse": {
|
"ListHsmsResponse": {
|
||||||
"base": "<p>Contains the output of the <a>ListHsms</a> operation.</p>",
|
"base": "<p>Contains the output of the <code>ListHsms</code> operation.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -402,12 +402,12 @@
|
|||||||
"PaginationToken": {
|
"PaginationToken": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"ListHapgsRequest$NextToken": "<p>The <i>NextToken</i> value from a previous call to <a>ListHapgs</a>. Pass null if this is the first call.</p>",
|
"ListHapgsRequest$NextToken": "<p>The <code>NextToken</code> value from a previous call to <code>ListHapgs</code>. Pass null if this is the first call.</p>",
|
||||||
"ListHapgsResponse$NextToken": "<p>If not null, more results are available. Pass this value to <a>ListHapgs</a> to retrieve the next set of items.</p>",
|
"ListHapgsResponse$NextToken": "<p>If not null, more results are available. Pass this value to <code>ListHapgs</code> to retrieve the next set of items.</p>",
|
||||||
"ListHsmsRequest$NextToken": "<p>The <i>NextToken</i> value from a previous call to <a>ListHsms</a>. Pass null if this is the first call.</p>",
|
"ListHsmsRequest$NextToken": "<p>The <code>NextToken</code> value from a previous call to <code>ListHsms</code>. Pass null if this is the first call.</p>",
|
||||||
"ListHsmsResponse$NextToken": "<p>If not null, more results are available. Pass this value to <a>ListHsms</a> to retrieve the next set of items.</p>",
|
"ListHsmsResponse$NextToken": "<p>If not null, more results are available. Pass this value to <code>ListHsms</code> to retrieve the next set of items.</p>",
|
||||||
"ListLunaClientsRequest$NextToken": "<p>The <i>NextToken</i> value from a previous call to <a>ListLunaClients</a>. Pass null if this is the first call.</p>",
|
"ListLunaClientsRequest$NextToken": "<p>The <code>NextToken</code> value from a previous call to <code>ListLunaClients</code>. Pass null if this is the first call.</p>",
|
||||||
"ListLunaClientsResponse$NextToken": "<p>If not null, more results are available. Pass this to <a>ListLunaClients</a> to retrieve the next set of items.</p>"
|
"ListLunaClientsResponse$NextToken": "<p>If not null, more results are available. Pass this to <code>ListLunaClients</code> to retrieve the next set of items.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"PartitionArn": {
|
"PartitionArn": {
|
||||||
@ -484,7 +484,7 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"SubscriptionType": {
|
"SubscriptionType": {
|
||||||
"base": "<p>Specifies the type of subscription for the HSM.</p> <ul> <li><b>PRODUCTION</b> - The HSM is being used in a production environment.</li> <li><b>TRIAL</b> - The HSM is being used in a product trial.</li> </ul>",
|
"base": "<p>Specifies the type of subscription for the HSM.</p> <ul> <li> <p> <b>PRODUCTION</b> - The HSM is being used in a production environment.</p> </li> <li> <p> <b>TRIAL</b> - The HSM is being used in a product trial.</p> </li> </ul>",
|
||||||
"refs": {
|
"refs": {
|
||||||
"CreateHsmRequest$SubscriptionType": null,
|
"CreateHsmRequest$SubscriptionType": null,
|
||||||
"DescribeHsmResponse$SubscriptionType": null
|
"DescribeHsmResponse$SubscriptionType": null
|
||||||
|
4
vendor/github.com/aws/aws-sdk-go/models/apis/cloudhsm/2014-05-30/paginators-1.json
generated
vendored
Normal file
4
vendor/github.com/aws/aws-sdk-go/models/apis/cloudhsm/2014-05-30/paginators-1.json
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
{
|
||||||
|
"pagination": {
|
||||||
|
}
|
||||||
|
}
|
69
vendor/github.com/aws/aws-sdk-go/models/apis/codebuild/2016-10-06/api-2.json
generated
vendored
69
vendor/github.com/aws/aws-sdk-go/models/apis/codebuild/2016-10-06/api-2.json
generated
vendored
@ -61,6 +61,21 @@
|
|||||||
{"shape":"AccountLimitExceededException"}
|
{"shape":"AccountLimitExceededException"}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"CreateWebhook":{
|
||||||
|
"name":"CreateWebhook",
|
||||||
|
"http":{
|
||||||
|
"method":"POST",
|
||||||
|
"requestUri":"/"
|
||||||
|
},
|
||||||
|
"input":{"shape":"CreateWebhookInput"},
|
||||||
|
"output":{"shape":"CreateWebhookOutput"},
|
||||||
|
"errors":[
|
||||||
|
{"shape":"InvalidInputException"},
|
||||||
|
{"shape":"OAuthProviderException"},
|
||||||
|
{"shape":"ResourceAlreadyExistsException"},
|
||||||
|
{"shape":"ResourceNotFoundException"}
|
||||||
|
]
|
||||||
|
},
|
||||||
"DeleteProject":{
|
"DeleteProject":{
|
||||||
"name":"DeleteProject",
|
"name":"DeleteProject",
|
||||||
"http":{
|
"http":{
|
||||||
@ -73,6 +88,20 @@
|
|||||||
{"shape":"InvalidInputException"}
|
{"shape":"InvalidInputException"}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"DeleteWebhook":{
|
||||||
|
"name":"DeleteWebhook",
|
||||||
|
"http":{
|
||||||
|
"method":"POST",
|
||||||
|
"requestUri":"/"
|
||||||
|
},
|
||||||
|
"input":{"shape":"DeleteWebhookInput"},
|
||||||
|
"output":{"shape":"DeleteWebhookOutput"},
|
||||||
|
"errors":[
|
||||||
|
{"shape":"InvalidInputException"},
|
||||||
|
{"shape":"ResourceNotFoundException"},
|
||||||
|
{"shape":"OAuthProviderException"}
|
||||||
|
]
|
||||||
|
},
|
||||||
"ListBuilds":{
|
"ListBuilds":{
|
||||||
"name":"ListBuilds",
|
"name":"ListBuilds",
|
||||||
"http":{
|
"http":{
|
||||||
@ -346,6 +375,19 @@
|
|||||||
"project":{"shape":"Project"}
|
"project":{"shape":"Project"}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"CreateWebhookInput":{
|
||||||
|
"type":"structure",
|
||||||
|
"required":["projectName"],
|
||||||
|
"members":{
|
||||||
|
"projectName":{"shape":"ProjectName"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"CreateWebhookOutput":{
|
||||||
|
"type":"structure",
|
||||||
|
"members":{
|
||||||
|
"webhook":{"shape":"Webhook"}
|
||||||
|
}
|
||||||
|
},
|
||||||
"DeleteProjectInput":{
|
"DeleteProjectInput":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
"required":["name"],
|
"required":["name"],
|
||||||
@ -358,6 +400,18 @@
|
|||||||
"members":{
|
"members":{
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"DeleteWebhookInput":{
|
||||||
|
"type":"structure",
|
||||||
|
"required":["projectName"],
|
||||||
|
"members":{
|
||||||
|
"projectName":{"shape":"ProjectName"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"DeleteWebhookOutput":{
|
||||||
|
"type":"structure",
|
||||||
|
"members":{
|
||||||
|
}
|
||||||
|
},
|
||||||
"EnvironmentImage":{
|
"EnvironmentImage":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
"members":{
|
"members":{
|
||||||
@ -512,6 +566,12 @@
|
|||||||
"type":"string",
|
"type":"string",
|
||||||
"min":1
|
"min":1
|
||||||
},
|
},
|
||||||
|
"OAuthProviderException":{
|
||||||
|
"type":"structure",
|
||||||
|
"members":{
|
||||||
|
},
|
||||||
|
"exception":true
|
||||||
|
},
|
||||||
"PhaseContext":{
|
"PhaseContext":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
"members":{
|
"members":{
|
||||||
@ -545,7 +605,8 @@
|
|||||||
"encryptionKey":{"shape":"NonEmptyString"},
|
"encryptionKey":{"shape":"NonEmptyString"},
|
||||||
"tags":{"shape":"TagList"},
|
"tags":{"shape":"TagList"},
|
||||||
"created":{"shape":"Timestamp"},
|
"created":{"shape":"Timestamp"},
|
||||||
"lastModified":{"shape":"Timestamp"}
|
"lastModified":{"shape":"Timestamp"},
|
||||||
|
"webhook":{"shape":"Webhook"}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"ProjectArtifacts":{
|
"ProjectArtifacts":{
|
||||||
@ -744,6 +805,12 @@
|
|||||||
"min":1,
|
"min":1,
|
||||||
"pattern":"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=@+\\\\-]*)$"
|
"pattern":"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=@+\\\\-]*)$"
|
||||||
},
|
},
|
||||||
|
"Webhook":{
|
||||||
|
"type":"structure",
|
||||||
|
"members":{
|
||||||
|
"url":{"shape":"NonEmptyString"}
|
||||||
|
}
|
||||||
|
},
|
||||||
"WrapperBoolean":{"type":"boolean"},
|
"WrapperBoolean":{"type":"boolean"},
|
||||||
"WrapperInt":{"type":"integer"},
|
"WrapperInt":{"type":"integer"},
|
||||||
"WrapperLong":{"type":"long"}
|
"WrapperLong":{"type":"long"}
|
||||||
|
41
vendor/github.com/aws/aws-sdk-go/models/apis/codebuild/2016-10-06/docs-2.json
generated
vendored
41
vendor/github.com/aws/aws-sdk-go/models/apis/codebuild/2016-10-06/docs-2.json
generated
vendored
@ -1,12 +1,14 @@
|
|||||||
{
|
{
|
||||||
"version": "2.0",
|
"version": "2.0",
|
||||||
"service": "<fullname>AWS CodeBuild</fullname> <p>AWS CodeBuild is a fully managed build service in the cloud. AWS CodeBuild compiles your source code, runs unit tests, and produces artifacts that are ready to deploy. AWS CodeBuild eliminates the need to provision, manage, and scale your own build servers. It provides prepackaged build environments for the most popular programming languages and build tools, such as Apach Maven, Gradle, and more. You can also fully customize build environments in AWS CodeBuild to use your own build tools. AWS CodeBuild scales automatically to meet peak build requests, and you pay only for the build time you consume. For more information about AWS CodeBuild, see the <i>AWS CodeBuild User Guide</i>.</p> <p>AWS CodeBuild supports these operations:</p> <ul> <li> <p> <code>BatchDeleteBuilds</code>: Deletes one or more builds.</p> </li> <li> <p> <code>BatchGetProjects</code>: Gets information about one or more build projects. A <i>build project</i> defines how AWS CodeBuild will run a build. This includes information such as where to get the source code to build, the build environment to use, the build commands to run, and where to store the build output. A <i>build environment</i> represents a combination of operating system, programming language runtime, and tools that AWS CodeBuild will use to run a build. Also, you can add tags to build projects to help manage your resources and costs.</p> </li> <li> <p> <code>CreateProject</code>: Creates a build project.</p> </li> <li> <p> <code>DeleteProject</code>: Deletes a build project.</p> </li> <li> <p> <code>ListProjects</code>: Gets a list of build project names, with each build project name representing a single build project.</p> </li> <li> <p> <code>UpdateProject</code>: Changes the settings of an existing build project.</p> </li> <li> <p> <code>BatchGetBuilds</code>: Gets information about one or more builds.</p> </li> <li> <p> <code>ListBuilds</code>: Gets a list of build IDs, with each build ID representing a single build.</p> </li> <li> <p> <code>ListBuildsForProject</code>: Gets a list of build IDs for the specified build project, with each build ID representing a single build.</p> </li> <li> <p> <code>StartBuild</code>: Starts running a build.</p> </li> <li> <p> <code>StopBuild</code>: Attempts to stop running a build.</p> </li> <li> <p> <code>ListCuratedEnvironmentImages</code>: Gets information about Docker images that are managed by AWS CodeBuild.</p> </li> </ul>",
|
"service": "<fullname>AWS CodeBuild</fullname> <p>AWS CodeBuild is a fully managed build service in the cloud. AWS CodeBuild compiles your source code, runs unit tests, and produces artifacts that are ready to deploy. AWS CodeBuild eliminates the need to provision, manage, and scale your own build servers. It provides prepackaged build environments for the most popular programming languages and build tools, such as Apache Maven, Gradle, and more. You can also fully customize build environments in AWS CodeBuild to use your own build tools. AWS CodeBuild scales automatically to meet peak build requests, and you pay only for the build time you consume. For more information about AWS CodeBuild, see the <i>AWS CodeBuild User Guide</i>.</p> <p>AWS CodeBuild supports these operations:</p> <ul> <li> <p> <code>BatchDeleteBuilds</code>: Deletes one or more builds.</p> </li> <li> <p> <code>BatchGetProjects</code>: Gets information about one or more build projects. A <i>build project</i> defines how AWS CodeBuild will run a build. This includes information such as where to get the source code to build, the build environment to use, the build commands to run, and where to store the build output. A <i>build environment</i> represents a combination of operating system, programming language runtime, and tools that AWS CodeBuild will use to run a build. Also, you can add tags to build projects to help manage your resources and costs.</p> </li> <li> <p> <code>CreateProject</code>: Creates a build project.</p> </li> <li> <p> <code>CreateWebhook</code>: For an existing AWS CodeBuild build project that has its source code stored in a GitHub repository, enables AWS CodeBuild to begin automatically rebuilding the source code every time a code change is pushed to the repository.</p> </li> <li> <p> <code>DeleteProject</code>: Deletes a build project.</p> </li> <li> <p> <code>DeleteWebhook</code>: For an existing AWS CodeBuild build project that has its source code stored in a GitHub repository, stops AWS CodeBuild from automatically rebuilding the source code every time a code change is pushed to the repository.</p> </li> <li> <p> <code>ListProjects</code>: Gets a list of build project names, with each build project name representing a single build project.</p> </li> <li> <p> <code>UpdateProject</code>: Changes the settings of an existing build project.</p> </li> <li> <p> <code>BatchGetBuilds</code>: Gets information about one or more builds.</p> </li> <li> <p> <code>ListBuilds</code>: Gets a list of build IDs, with each build ID representing a single build.</p> </li> <li> <p> <code>ListBuildsForProject</code>: Gets a list of build IDs for the specified build project, with each build ID representing a single build.</p> </li> <li> <p> <code>StartBuild</code>: Starts running a build.</p> </li> <li> <p> <code>StopBuild</code>: Attempts to stop running a build.</p> </li> <li> <p> <code>ListCuratedEnvironmentImages</code>: Gets information about Docker images that are managed by AWS CodeBuild.</p> </li> </ul>",
|
||||||
"operations": {
|
"operations": {
|
||||||
"BatchDeleteBuilds": "<p>Deletes one or more builds.</p>",
|
"BatchDeleteBuilds": "<p>Deletes one or more builds.</p>",
|
||||||
"BatchGetBuilds": "<p>Gets information about builds.</p>",
|
"BatchGetBuilds": "<p>Gets information about builds.</p>",
|
||||||
"BatchGetProjects": "<p>Gets information about build projects.</p>",
|
"BatchGetProjects": "<p>Gets information about build projects.</p>",
|
||||||
"CreateProject": "<p>Creates a build project.</p>",
|
"CreateProject": "<p>Creates a build project.</p>",
|
||||||
|
"CreateWebhook": "<p>For an existing AWS CodeBuild build project that has its source code stored in a GitHub repository, enables AWS CodeBuild to begin automatically rebuilding the source code every time a code change is pushed to the repository.</p> <important> <p>If you enable webhooks for an AWS CodeBuild project, and the project is used as a build step in AWS CodePipeline, then two identical builds will be created for each commit. One build is triggered through webhooks, and one through AWS CodePipeline. Because billing is on a per-build basis, you will be billed for both builds. Therefore, if you are using AWS CodePipeline, we recommend that you disable webhooks in CodeBuild. In the AWS CodeBuild console, clear the Webhook box. For more information, see step 9 in <a href=\"http://docs.aws.amazon.com/codebuild/latest/userguide/change-project.html#change-project-console\">Change a Build Project’s Settings</a>.</p> </important>",
|
||||||
"DeleteProject": "<p>Deletes a build project.</p>",
|
"DeleteProject": "<p>Deletes a build project.</p>",
|
||||||
|
"DeleteWebhook": "<p>For an existing AWS CodeBuild build project that has its source code stored in a GitHub repository, stops AWS CodeBuild from automatically rebuilding the source code every time a code change is pushed to the repository.</p>",
|
||||||
"ListBuilds": "<p>Gets a list of build IDs, with each build ID representing a single build.</p>",
|
"ListBuilds": "<p>Gets a list of build IDs, with each build ID representing a single build.</p>",
|
||||||
"ListBuildsForProject": "<p>Gets a list of build IDs for the specified build project, with each build ID representing a single build.</p>",
|
"ListBuildsForProject": "<p>Gets a list of build IDs for the specified build project, with each build ID representing a single build.</p>",
|
||||||
"ListCuratedEnvironmentImages": "<p>Gets information about Docker images that are managed by AWS CodeBuild.</p>",
|
"ListCuratedEnvironmentImages": "<p>Gets information about Docker images that are managed by AWS CodeBuild.</p>",
|
||||||
@ -152,6 +154,16 @@
|
|||||||
"refs": {
|
"refs": {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"CreateWebhookInput": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"CreateWebhookOutput": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
}
|
||||||
|
},
|
||||||
"DeleteProjectInput": {
|
"DeleteProjectInput": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
@ -162,6 +174,16 @@
|
|||||||
"refs": {
|
"refs": {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"DeleteWebhookInput": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"DeleteWebhookOutput": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
}
|
||||||
|
},
|
||||||
"EnvironmentImage": {
|
"EnvironmentImage": {
|
||||||
"base": "<p>Information about a Docker image that is managed by AWS CodeBuild.</p>",
|
"base": "<p>Information about a Docker image that is managed by AWS CodeBuild.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
@ -309,7 +331,13 @@
|
|||||||
"StopBuildInput$id": "<p>The ID of the build.</p>",
|
"StopBuildInput$id": "<p>The ID of the build.</p>",
|
||||||
"UpdateProjectInput$name": "<p>The name of the build project.</p> <note> <p>You cannot change a build project's name.</p> </note>",
|
"UpdateProjectInput$name": "<p>The name of the build project.</p> <note> <p>You cannot change a build project's name.</p> </note>",
|
||||||
"UpdateProjectInput$serviceRole": "<p>The replacement ARN of the AWS Identity and Access Management (IAM) role that enables AWS CodeBuild to interact with dependent AWS services on behalf of the AWS account.</p>",
|
"UpdateProjectInput$serviceRole": "<p>The replacement ARN of the AWS Identity and Access Management (IAM) role that enables AWS CodeBuild to interact with dependent AWS services on behalf of the AWS account.</p>",
|
||||||
"UpdateProjectInput$encryptionKey": "<p>The replacement AWS Key Management Service (AWS KMS) customer master key (CMK) to be used for encrypting the build output artifacts.</p> <p>You can specify either the CMK's Amazon Resource Name (ARN) or, if available, the CMK's alias (using the format <code>alias/<i>alias-name</i> </code>).</p>"
|
"UpdateProjectInput$encryptionKey": "<p>The replacement AWS Key Management Service (AWS KMS) customer master key (CMK) to be used for encrypting the build output artifacts.</p> <p>You can specify either the CMK's Amazon Resource Name (ARN) or, if available, the CMK's alias (using the format <code>alias/<i>alias-name</i> </code>).</p>",
|
||||||
|
"Webhook$url": "<p>The URL to the webhook.</p>"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"OAuthProviderException": {
|
||||||
|
"base": "<p>There was a problem with the underlying OAuth provider.</p>",
|
||||||
|
"refs": {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"PhaseContext": {
|
"PhaseContext": {
|
||||||
@ -368,6 +396,8 @@
|
|||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"CreateProjectInput$name": "<p>The name of the build project.</p>",
|
"CreateProjectInput$name": "<p>The name of the build project.</p>",
|
||||||
|
"CreateWebhookInput$projectName": "<p>The name of the build project.</p>",
|
||||||
|
"DeleteWebhookInput$projectName": "<p>The name of the build project.</p>",
|
||||||
"Project$name": "<p>The name of the build project.</p>"
|
"Project$name": "<p>The name of the build project.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -546,6 +576,13 @@
|
|||||||
"Tag$value": "<p>The tag's value.</p>"
|
"Tag$value": "<p>The tag's value.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"Webhook": {
|
||||||
|
"base": "<p>Information about a webhook in GitHub that connects repository events to a build project in AWS CodeBuild.</p>",
|
||||||
|
"refs": {
|
||||||
|
"CreateWebhookOutput$webhook": "<p>Information about a webhook in GitHub that connects repository events to a build project in AWS CodeBuild.</p>",
|
||||||
|
"Project$webhook": "<p>Information about a webhook in GitHub that connects repository events to a build project in AWS CodeBuild.</p>"
|
||||||
|
}
|
||||||
|
},
|
||||||
"WrapperBoolean": {
|
"WrapperBoolean": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
|
27
vendor/github.com/aws/aws-sdk-go/models/apis/codepipeline/2015-07-09/api-2.json
generated
vendored
27
vendor/github.com/aws/aws-sdk-go/models/apis/codepipeline/2015-07-09/api-2.json
generated
vendored
@ -798,7 +798,11 @@
|
|||||||
"type":"string",
|
"type":"string",
|
||||||
"pattern":"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
|
"pattern":"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
|
||||||
},
|
},
|
||||||
"ClientToken":{"type":"string"},
|
"ClientToken":{
|
||||||
|
"type":"string",
|
||||||
|
"max":256,
|
||||||
|
"min":1
|
||||||
|
},
|
||||||
"Code":{"type":"string"},
|
"Code":{"type":"string"},
|
||||||
"ContinuationToken":{"type":"string"},
|
"ContinuationToken":{"type":"string"},
|
||||||
"CreateCustomActionTypeInput":{
|
"CreateCustomActionTypeInput":{
|
||||||
@ -1018,7 +1022,8 @@
|
|||||||
"GetPipelineOutput":{
|
"GetPipelineOutput":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
"members":{
|
"members":{
|
||||||
"pipeline":{"shape":"PipelineDeclaration"}
|
"pipeline":{"shape":"PipelineDeclaration"},
|
||||||
|
"metadata":{"shape":"PipelineMetadata"}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"GetPipelineStateInput":{
|
"GetPipelineStateInput":{
|
||||||
@ -1255,7 +1260,11 @@
|
|||||||
"max":5,
|
"max":5,
|
||||||
"min":0
|
"min":0
|
||||||
},
|
},
|
||||||
"NextToken":{"type":"string"},
|
"NextToken":{
|
||||||
|
"type":"string",
|
||||||
|
"max":2048,
|
||||||
|
"min":1
|
||||||
|
},
|
||||||
"Nonce":{"type":"string"},
|
"Nonce":{"type":"string"},
|
||||||
"NotLatestPipelineExecutionException":{
|
"NotLatestPipelineExecutionException":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
@ -1279,6 +1288,10 @@
|
|||||||
"max":100,
|
"max":100,
|
||||||
"min":0
|
"min":0
|
||||||
},
|
},
|
||||||
|
"PipelineArn":{
|
||||||
|
"type":"string",
|
||||||
|
"pattern":"arn:aws(-[\\w]+)*:codepipeline:.+:[0-9]{12}:.+"
|
||||||
|
},
|
||||||
"PipelineContext":{
|
"PipelineContext":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
"members":{
|
"members":{
|
||||||
@ -1349,6 +1362,14 @@
|
|||||||
"type":"list",
|
"type":"list",
|
||||||
"member":{"shape":"PipelineSummary"}
|
"member":{"shape":"PipelineSummary"}
|
||||||
},
|
},
|
||||||
|
"PipelineMetadata":{
|
||||||
|
"type":"structure",
|
||||||
|
"members":{
|
||||||
|
"pipelineArn":{"shape":"PipelineArn"},
|
||||||
|
"created":{"shape":"Timestamp"},
|
||||||
|
"updated":{"shape":"Timestamp"}
|
||||||
|
}
|
||||||
|
},
|
||||||
"PipelineName":{
|
"PipelineName":{
|
||||||
"type":"string",
|
"type":"string",
|
||||||
"max":100,
|
"max":100,
|
||||||
|
122
vendor/github.com/aws/aws-sdk-go/models/apis/codepipeline/2015-07-09/docs-2.json
generated
vendored
122
vendor/github.com/aws/aws-sdk-go/models/apis/codepipeline/2015-07-09/docs-2.json
generated
vendored
File diff suppressed because one or more lines are too long
7
vendor/github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/api-2.json
generated
vendored
7
vendor/github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/api-2.json
generated
vendored
@ -1343,7 +1343,12 @@
|
|||||||
"AWS::Redshift::ClusterSubnetGroup",
|
"AWS::Redshift::ClusterSubnetGroup",
|
||||||
"AWS::Redshift::EventSubscription",
|
"AWS::Redshift::EventSubscription",
|
||||||
"AWS::CloudWatch::Alarm",
|
"AWS::CloudWatch::Alarm",
|
||||||
"AWS::CloudFormation::Stack"
|
"AWS::CloudFormation::Stack",
|
||||||
|
"AWS::DynamoDB::Table",
|
||||||
|
"AWS::AutoScaling::AutoScalingGroup",
|
||||||
|
"AWS::AutoScaling::LaunchConfiguration",
|
||||||
|
"AWS::AutoScaling::ScalingPolicy",
|
||||||
|
"AWS::AutoScaling::ScheduledAction"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"ResourceTypeList":{
|
"ResourceTypeList":{
|
||||||
|
4
vendor/github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/docs-2.json
generated
vendored
4
vendor/github.com/aws/aws-sdk-go/models/apis/config/2014-11-12/docs-2.json
generated
vendored
@ -180,7 +180,7 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"ConfigExportDeliveryInfo": {
|
"ConfigExportDeliveryInfo": {
|
||||||
"base": "<p>A list that contains the status of the delivery of either the snapshot or the configuration history to the specified Amazon S3 bucket.</p>",
|
"base": "<p>Provides status of the delivery of the snapshot or the configuration history to the specified Amazon S3 bucket. Also provides the status of notifications about the Amazon S3 delivery to the specified Amazon SNS topic.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
"DeliveryChannelStatus$configSnapshotDeliveryInfo": "<p>A list containing the status of the delivery of the snapshot to the specified Amazon S3 bucket.</p>",
|
"DeliveryChannelStatus$configSnapshotDeliveryInfo": "<p>A list containing the status of the delivery of the snapshot to the specified Amazon S3 bucket.</p>",
|
||||||
"DeliveryChannelStatus$configHistoryDeliveryInfo": "<p>A list that contains the status of the delivery of the configuration history to the specified Amazon S3 bucket.</p>"
|
"DeliveryChannelStatus$configHistoryDeliveryInfo": "<p>A list that contains the status of the delivery of the configuration history to the specified Amazon S3 bucket.</p>"
|
||||||
@ -1086,7 +1086,7 @@
|
|||||||
"StringWithCharLimit128": {
|
"StringWithCharLimit128": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"Scope$TagKey": "<p>The tag key that is applied to only those AWS resources that you want you want to trigger an evaluation for the rule.</p>"
|
"Scope$TagKey": "<p>The tag key that is applied to only those AWS resources that you want to trigger an evaluation for the rule.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"StringWithCharLimit256": {
|
"StringWithCharLimit256": {
|
||||||
|
268
vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2016-11-15/api-2.json
generated
vendored
268
vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2016-11-15/api-2.json
generated
vendored
@ -256,6 +256,15 @@
|
|||||||
"input":{"shape":"ConfirmProductInstanceRequest"},
|
"input":{"shape":"ConfirmProductInstanceRequest"},
|
||||||
"output":{"shape":"ConfirmProductInstanceResult"}
|
"output":{"shape":"ConfirmProductInstanceResult"}
|
||||||
},
|
},
|
||||||
|
"CopyFpgaImage":{
|
||||||
|
"name":"CopyFpgaImage",
|
||||||
|
"http":{
|
||||||
|
"method":"POST",
|
||||||
|
"requestUri":"/"
|
||||||
|
},
|
||||||
|
"input":{"shape":"CopyFpgaImageRequest"},
|
||||||
|
"output":{"shape":"CopyFpgaImageResult"}
|
||||||
|
},
|
||||||
"CopyImage":{
|
"CopyImage":{
|
||||||
"name":"CopyImage",
|
"name":"CopyImage",
|
||||||
"http":{
|
"http":{
|
||||||
@ -583,6 +592,15 @@
|
|||||||
"input":{"shape":"DeleteFlowLogsRequest"},
|
"input":{"shape":"DeleteFlowLogsRequest"},
|
||||||
"output":{"shape":"DeleteFlowLogsResult"}
|
"output":{"shape":"DeleteFlowLogsResult"}
|
||||||
},
|
},
|
||||||
|
"DeleteFpgaImage":{
|
||||||
|
"name":"DeleteFpgaImage",
|
||||||
|
"http":{
|
||||||
|
"method":"POST",
|
||||||
|
"requestUri":"/"
|
||||||
|
},
|
||||||
|
"input":{"shape":"DeleteFpgaImageRequest"},
|
||||||
|
"output":{"shape":"DeleteFpgaImageResult"}
|
||||||
|
},
|
||||||
"DeleteInternetGateway":{
|
"DeleteInternetGateway":{
|
||||||
"name":"DeleteInternetGateway",
|
"name":"DeleteInternetGateway",
|
||||||
"http":{
|
"http":{
|
||||||
@ -879,6 +897,15 @@
|
|||||||
"input":{"shape":"DescribeFlowLogsRequest"},
|
"input":{"shape":"DescribeFlowLogsRequest"},
|
||||||
"output":{"shape":"DescribeFlowLogsResult"}
|
"output":{"shape":"DescribeFlowLogsResult"}
|
||||||
},
|
},
|
||||||
|
"DescribeFpgaImageAttribute":{
|
||||||
|
"name":"DescribeFpgaImageAttribute",
|
||||||
|
"http":{
|
||||||
|
"method":"POST",
|
||||||
|
"requestUri":"/"
|
||||||
|
},
|
||||||
|
"input":{"shape":"DescribeFpgaImageAttributeRequest"},
|
||||||
|
"output":{"shape":"DescribeFpgaImageAttributeResult"}
|
||||||
|
},
|
||||||
"DescribeFpgaImages":{
|
"DescribeFpgaImages":{
|
||||||
"name":"DescribeFpgaImages",
|
"name":"DescribeFpgaImages",
|
||||||
"http":{
|
"http":{
|
||||||
@ -1636,6 +1663,15 @@
|
|||||||
"input":{"shape":"ImportVolumeRequest"},
|
"input":{"shape":"ImportVolumeRequest"},
|
||||||
"output":{"shape":"ImportVolumeResult"}
|
"output":{"shape":"ImportVolumeResult"}
|
||||||
},
|
},
|
||||||
|
"ModifyFpgaImageAttribute":{
|
||||||
|
"name":"ModifyFpgaImageAttribute",
|
||||||
|
"http":{
|
||||||
|
"method":"POST",
|
||||||
|
"requestUri":"/"
|
||||||
|
},
|
||||||
|
"input":{"shape":"ModifyFpgaImageAttributeRequest"},
|
||||||
|
"output":{"shape":"ModifyFpgaImageAttributeResult"}
|
||||||
|
},
|
||||||
"ModifyHosts":{
|
"ModifyHosts":{
|
||||||
"name":"ModifyHosts",
|
"name":"ModifyHosts",
|
||||||
"http":{
|
"http":{
|
||||||
@ -1928,6 +1964,15 @@
|
|||||||
"input":{"shape":"RequestSpotInstancesRequest"},
|
"input":{"shape":"RequestSpotInstancesRequest"},
|
||||||
"output":{"shape":"RequestSpotInstancesResult"}
|
"output":{"shape":"RequestSpotInstancesResult"}
|
||||||
},
|
},
|
||||||
|
"ResetFpgaImageAttribute":{
|
||||||
|
"name":"ResetFpgaImageAttribute",
|
||||||
|
"http":{
|
||||||
|
"method":"POST",
|
||||||
|
"requestUri":"/"
|
||||||
|
},
|
||||||
|
"input":{"shape":"ResetFpgaImageAttributeRequest"},
|
||||||
|
"output":{"shape":"ResetFpgaImageAttributeResult"}
|
||||||
|
},
|
||||||
"ResetImageAttribute":{
|
"ResetImageAttribute":{
|
||||||
"name":"ResetImageAttribute",
|
"name":"ResetImageAttribute",
|
||||||
"http":{
|
"http":{
|
||||||
@ -3458,6 +3503,30 @@
|
|||||||
"completed"
|
"completed"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"CopyFpgaImageRequest":{
|
||||||
|
"type":"structure",
|
||||||
|
"required":[
|
||||||
|
"SourceFpgaImageId",
|
||||||
|
"SourceRegion"
|
||||||
|
],
|
||||||
|
"members":{
|
||||||
|
"DryRun":{"shape":"Boolean"},
|
||||||
|
"SourceFpgaImageId":{"shape":"String"},
|
||||||
|
"Description":{"shape":"String"},
|
||||||
|
"Name":{"shape":"String"},
|
||||||
|
"SourceRegion":{"shape":"String"},
|
||||||
|
"ClientToken":{"shape":"String"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"CopyFpgaImageResult":{
|
||||||
|
"type":"structure",
|
||||||
|
"members":{
|
||||||
|
"FpgaImageId":{
|
||||||
|
"shape":"String",
|
||||||
|
"locationName":"fpgaImageId"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"CopyImageRequest":{
|
"CopyImageRequest":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
"required":[
|
"required":[
|
||||||
@ -4527,6 +4596,23 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"DeleteFpgaImageRequest":{
|
||||||
|
"type":"structure",
|
||||||
|
"required":["FpgaImageId"],
|
||||||
|
"members":{
|
||||||
|
"DryRun":{"shape":"Boolean"},
|
||||||
|
"FpgaImageId":{"shape":"String"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"DeleteFpgaImageResult":{
|
||||||
|
"type":"structure",
|
||||||
|
"members":{
|
||||||
|
"Return":{
|
||||||
|
"shape":"Boolean",
|
||||||
|
"locationName":"return"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"DeleteInternetGatewayRequest":{
|
"DeleteInternetGatewayRequest":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
"required":["InternetGatewayId"],
|
"required":["InternetGatewayId"],
|
||||||
@ -5183,6 +5269,27 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"DescribeFpgaImageAttributeRequest":{
|
||||||
|
"type":"structure",
|
||||||
|
"required":[
|
||||||
|
"FpgaImageId",
|
||||||
|
"Attribute"
|
||||||
|
],
|
||||||
|
"members":{
|
||||||
|
"DryRun":{"shape":"Boolean"},
|
||||||
|
"FpgaImageId":{"shape":"String"},
|
||||||
|
"Attribute":{"shape":"FpgaImageAttributeName"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"DescribeFpgaImageAttributeResult":{
|
||||||
|
"type":"structure",
|
||||||
|
"members":{
|
||||||
|
"FpgaImageAttribute":{
|
||||||
|
"shape":"FpgaImageAttribute",
|
||||||
|
"locationName":"fpgaImageAttribute"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"DescribeFpgaImagesRequest":{
|
"DescribeFpgaImagesRequest":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
"members":{
|
"members":{
|
||||||
@ -7854,9 +7961,47 @@
|
|||||||
"Tags":{
|
"Tags":{
|
||||||
"shape":"TagList",
|
"shape":"TagList",
|
||||||
"locationName":"tags"
|
"locationName":"tags"
|
||||||
|
},
|
||||||
|
"Public":{
|
||||||
|
"shape":"Boolean",
|
||||||
|
"locationName":"public"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"FpgaImageAttribute":{
|
||||||
|
"type":"structure",
|
||||||
|
"members":{
|
||||||
|
"FpgaImageId":{
|
||||||
|
"shape":"String",
|
||||||
|
"locationName":"fpgaImageId"
|
||||||
|
},
|
||||||
|
"Name":{
|
||||||
|
"shape":"String",
|
||||||
|
"locationName":"name"
|
||||||
|
},
|
||||||
|
"Description":{
|
||||||
|
"shape":"String",
|
||||||
|
"locationName":"description"
|
||||||
|
},
|
||||||
|
"LoadPermissions":{
|
||||||
|
"shape":"LoadPermissionList",
|
||||||
|
"locationName":"loadPermissions"
|
||||||
|
},
|
||||||
|
"ProductCodes":{
|
||||||
|
"shape":"ProductCodeList",
|
||||||
|
"locationName":"productCodes"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"FpgaImageAttributeName":{
|
||||||
|
"type":"string",
|
||||||
|
"enum":[
|
||||||
|
"description",
|
||||||
|
"name",
|
||||||
|
"loadPermission",
|
||||||
|
"productCodes"
|
||||||
|
]
|
||||||
|
},
|
||||||
"FpgaImageIdList":{
|
"FpgaImageIdList":{
|
||||||
"type":"list",
|
"type":"list",
|
||||||
"member":{
|
"member":{
|
||||||
@ -10281,12 +10426,87 @@
|
|||||||
"closed"
|
"closed"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"LoadPermission":{
|
||||||
|
"type":"structure",
|
||||||
|
"members":{
|
||||||
|
"UserId":{
|
||||||
|
"shape":"String",
|
||||||
|
"locationName":"userId"
|
||||||
|
},
|
||||||
|
"Group":{
|
||||||
|
"shape":"PermissionGroup",
|
||||||
|
"locationName":"group"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"LoadPermissionList":{
|
||||||
|
"type":"list",
|
||||||
|
"member":{
|
||||||
|
"shape":"LoadPermission",
|
||||||
|
"locationName":"item"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"LoadPermissionListRequest":{
|
||||||
|
"type":"list",
|
||||||
|
"member":{
|
||||||
|
"shape":"LoadPermissionRequest",
|
||||||
|
"locationName":"item"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"LoadPermissionModifications":{
|
||||||
|
"type":"structure",
|
||||||
|
"members":{
|
||||||
|
"Add":{"shape":"LoadPermissionListRequest"},
|
||||||
|
"Remove":{"shape":"LoadPermissionListRequest"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"LoadPermissionRequest":{
|
||||||
|
"type":"structure",
|
||||||
|
"members":{
|
||||||
|
"Group":{"shape":"PermissionGroup"},
|
||||||
|
"UserId":{"shape":"String"}
|
||||||
|
}
|
||||||
|
},
|
||||||
"Long":{"type":"long"},
|
"Long":{"type":"long"},
|
||||||
"MaxResults":{
|
"MaxResults":{
|
||||||
"type":"integer",
|
"type":"integer",
|
||||||
"max":255,
|
"max":255,
|
||||||
"min":5
|
"min":5
|
||||||
},
|
},
|
||||||
|
"ModifyFpgaImageAttributeRequest":{
|
||||||
|
"type":"structure",
|
||||||
|
"required":["FpgaImageId"],
|
||||||
|
"members":{
|
||||||
|
"DryRun":{"shape":"Boolean"},
|
||||||
|
"FpgaImageId":{"shape":"String"},
|
||||||
|
"Attribute":{"shape":"FpgaImageAttributeName"},
|
||||||
|
"OperationType":{"shape":"OperationType"},
|
||||||
|
"UserIds":{
|
||||||
|
"shape":"UserIdStringList",
|
||||||
|
"locationName":"UserId"
|
||||||
|
},
|
||||||
|
"UserGroups":{
|
||||||
|
"shape":"UserGroupStringList",
|
||||||
|
"locationName":"UserGroup"
|
||||||
|
},
|
||||||
|
"ProductCodes":{
|
||||||
|
"shape":"ProductCodeStringList",
|
||||||
|
"locationName":"ProductCode"
|
||||||
|
},
|
||||||
|
"LoadPermission":{"shape":"LoadPermissionModifications"},
|
||||||
|
"Description":{"shape":"String"},
|
||||||
|
"Name":{"shape":"String"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"ModifyFpgaImageAttributeResult":{
|
||||||
|
"type":"structure",
|
||||||
|
"members":{
|
||||||
|
"FpgaImageAttribute":{
|
||||||
|
"shape":"FpgaImageAttribute",
|
||||||
|
"locationName":"fpgaImageAttribute"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"ModifyHostsRequest":{
|
"ModifyHostsRequest":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
"required":[
|
"required":[
|
||||||
@ -11882,7 +12102,10 @@
|
|||||||
},
|
},
|
||||||
"PurchaseSet":{
|
"PurchaseSet":{
|
||||||
"type":"list",
|
"type":"list",
|
||||||
"member":{"shape":"Purchase"}
|
"member":{
|
||||||
|
"shape":"Purchase",
|
||||||
|
"locationName":"item"
|
||||||
|
}
|
||||||
},
|
},
|
||||||
"PurchasedScheduledInstanceSet":{
|
"PurchasedScheduledInstanceSet":{
|
||||||
"type":"list",
|
"type":"list",
|
||||||
@ -12943,6 +13166,28 @@
|
|||||||
"locationName":"item"
|
"locationName":"item"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"ResetFpgaImageAttributeName":{
|
||||||
|
"type":"string",
|
||||||
|
"enum":["loadPermission"]
|
||||||
|
},
|
||||||
|
"ResetFpgaImageAttributeRequest":{
|
||||||
|
"type":"structure",
|
||||||
|
"required":["FpgaImageId"],
|
||||||
|
"members":{
|
||||||
|
"DryRun":{"shape":"Boolean"},
|
||||||
|
"FpgaImageId":{"shape":"String"},
|
||||||
|
"Attribute":{"shape":"ResetFpgaImageAttributeName"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"ResetFpgaImageAttributeResult":{
|
||||||
|
"type":"structure",
|
||||||
|
"members":{
|
||||||
|
"Return":{
|
||||||
|
"shape":"Boolean",
|
||||||
|
"locationName":"return"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"ResetImageAttributeName":{
|
"ResetImageAttributeName":{
|
||||||
"type":"string",
|
"type":"string",
|
||||||
"enum":["launchPermission"]
|
"enum":["launchPermission"]
|
||||||
@ -14951,6 +15196,13 @@
|
|||||||
"ALL"
|
"ALL"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"TunnelOptionsList":{
|
||||||
|
"type":"list",
|
||||||
|
"member":{
|
||||||
|
"shape":"VpnTunnelOptionsSpecification",
|
||||||
|
"locationName":"item"
|
||||||
|
}
|
||||||
|
},
|
||||||
"UnassignIpv6AddressesRequest":{
|
"UnassignIpv6AddressesRequest":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
"required":[
|
"required":[
|
||||||
@ -15951,6 +16203,10 @@
|
|||||||
"shape":"String",
|
"shape":"String",
|
||||||
"locationName":"customerGatewayId"
|
"locationName":"customerGatewayId"
|
||||||
},
|
},
|
||||||
|
"Category":{
|
||||||
|
"shape":"String",
|
||||||
|
"locationName":"category"
|
||||||
|
},
|
||||||
"State":{
|
"State":{
|
||||||
"shape":"VpnState",
|
"shape":"VpnState",
|
||||||
"locationName":"state"
|
"locationName":"state"
|
||||||
@ -16014,7 +16270,8 @@
|
|||||||
"StaticRoutesOnly":{
|
"StaticRoutesOnly":{
|
||||||
"shape":"Boolean",
|
"shape":"Boolean",
|
||||||
"locationName":"staticRoutesOnly"
|
"locationName":"staticRoutesOnly"
|
||||||
}
|
},
|
||||||
|
"TunnelOptions":{"shape":"TunnelOptionsList"}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"VpnGateway":{
|
"VpnGateway":{
|
||||||
@ -16097,6 +16354,13 @@
|
|||||||
"type":"string",
|
"type":"string",
|
||||||
"enum":["Static"]
|
"enum":["Static"]
|
||||||
},
|
},
|
||||||
|
"VpnTunnelOptionsSpecification":{
|
||||||
|
"type":"structure",
|
||||||
|
"members":{
|
||||||
|
"TunnelInsideCidr":{"shape":"String"},
|
||||||
|
"PreSharedKey":{"shape":"String"}
|
||||||
|
}
|
||||||
|
},
|
||||||
"ZoneNameStringList":{
|
"ZoneNameStringList":{
|
||||||
"type":"list",
|
"type":"list",
|
||||||
"member":{
|
"member":{
|
||||||
|
209
vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2016-11-15/docs-2.json
generated
vendored
209
vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2016-11-15/docs-2.json
generated
vendored
File diff suppressed because one or more lines are too long
6
vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2016-11-15/waiters-2.json
generated
vendored
6
vendor/github.com/aws/aws-sdk-go/models/apis/ec2/2016-11-15/waiters-2.json
generated
vendored
@ -390,6 +390,12 @@
|
|||||||
"argument": "SpotInstanceRequests[].Status.Code",
|
"argument": "SpotInstanceRequests[].Status.Code",
|
||||||
"expected": "fulfilled"
|
"expected": "fulfilled"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"state": "success",
|
||||||
|
"matcher": "pathAll",
|
||||||
|
"argument": "SpotInstanceRequests[].Status.Code",
|
||||||
|
"expected": "request-canceled-and-instance-running"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"state": "failure",
|
"state": "failure",
|
||||||
"matcher": "pathAny",
|
"matcher": "pathAny",
|
||||||
|
14
vendor/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/api-2.json
generated
vendored
14
vendor/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/api-2.json
generated
vendored
@ -578,6 +578,7 @@
|
|||||||
"environment":{"shape":"EnvironmentVariables"},
|
"environment":{"shape":"EnvironmentVariables"},
|
||||||
"mountPoints":{"shape":"MountPointList"},
|
"mountPoints":{"shape":"MountPointList"},
|
||||||
"volumesFrom":{"shape":"VolumeFromList"},
|
"volumesFrom":{"shape":"VolumeFromList"},
|
||||||
|
"linuxParameters":{"shape":"LinuxParameters"},
|
||||||
"hostname":{"shape":"String"},
|
"hostname":{"shape":"String"},
|
||||||
"user":{"shape":"String"},
|
"user":{"shape":"String"},
|
||||||
"workingDirectory":{"shape":"String"},
|
"workingDirectory":{"shape":"String"},
|
||||||
@ -918,6 +919,13 @@
|
|||||||
},
|
},
|
||||||
"exception":true
|
"exception":true
|
||||||
},
|
},
|
||||||
|
"KernelCapabilities":{
|
||||||
|
"type":"structure",
|
||||||
|
"members":{
|
||||||
|
"add":{"shape":"StringList"},
|
||||||
|
"drop":{"shape":"StringList"}
|
||||||
|
}
|
||||||
|
},
|
||||||
"KeyValuePair":{
|
"KeyValuePair":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
"members":{
|
"members":{
|
||||||
@ -925,6 +933,12 @@
|
|||||||
"value":{"shape":"String"}
|
"value":{"shape":"String"}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"LinuxParameters":{
|
||||||
|
"type":"structure",
|
||||||
|
"members":{
|
||||||
|
"capabilities":{"shape":"KernelCapabilities"}
|
||||||
|
}
|
||||||
|
},
|
||||||
"ListAttributesRequest":{
|
"ListAttributesRequest":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
"required":["targetType"],
|
"required":["targetType"],
|
||||||
|
22
vendor/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/docs-2.json
generated
vendored
22
vendor/github.com/aws/aws-sdk-go/models/apis/ecs/2014-11-13/docs-2.json
generated
vendored
@ -78,7 +78,7 @@
|
|||||||
"ContainerDefinition$disableNetworking": "<p>When this parameter is true, networking is disabled within the container. This parameter maps to <code>NetworkDisabled</code> in the <a href=\"https://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/#create-a-container\">Create a container</a> section of the <a href=\"https://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/\">Docker Remote API</a>.</p>",
|
"ContainerDefinition$disableNetworking": "<p>When this parameter is true, networking is disabled within the container. This parameter maps to <code>NetworkDisabled</code> in the <a href=\"https://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/#create-a-container\">Create a container</a> section of the <a href=\"https://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/\">Docker Remote API</a>.</p>",
|
||||||
"ContainerDefinition$privileged": "<p>When this parameter is true, the container is given elevated privileges on the host container instance (similar to the <code>root</code> user). This parameter maps to <code>Privileged</code> in the <a href=\"https://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/#create-a-container\">Create a container</a> section of the <a href=\"https://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/\">Docker Remote API</a> and the <code>--privileged</code> option to <a href=\"https://docs.docker.com/engine/reference/run/\">docker run</a>.</p>",
|
"ContainerDefinition$privileged": "<p>When this parameter is true, the container is given elevated privileges on the host container instance (similar to the <code>root</code> user). This parameter maps to <code>Privileged</code> in the <a href=\"https://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/#create-a-container\">Create a container</a> section of the <a href=\"https://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/\">Docker Remote API</a> and the <code>--privileged</code> option to <a href=\"https://docs.docker.com/engine/reference/run/\">docker run</a>.</p>",
|
||||||
"ContainerDefinition$readonlyRootFilesystem": "<p>When this parameter is true, the container is given read-only access to its root file system. This parameter maps to <code>ReadonlyRootfs</code> in the <a href=\"https://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/#create-a-container\">Create a container</a> section of the <a href=\"https://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/\">Docker Remote API</a> and the <code>--read-only</code> option to <code>docker run</code>.</p>",
|
"ContainerDefinition$readonlyRootFilesystem": "<p>When this parameter is true, the container is given read-only access to its root file system. This parameter maps to <code>ReadonlyRootfs</code> in the <a href=\"https://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/#create-a-container\">Create a container</a> section of the <a href=\"https://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/\">Docker Remote API</a> and the <code>--read-only</code> option to <code>docker run</code>.</p>",
|
||||||
"DeregisterContainerInstanceRequest$force": "<p>Forces the deregistration of the container instance. If you have tasks running on the container instance when you deregister it with the <code>force</code> option, these tasks remain running until you terminate the instance or the tasks stop through some other means, but they are orphaned (no longer monitored or accounted for by Amazon ECS). If an orphaned task on your container instance is part of an Amazon ECS service, then the service scheduler starts another copy of that task, on a different container instance if possible. </p> <p>Any containers in orphaned service tasks that are registered with a Classic load balancer or an Application load balancer target group are deregistered, and they will begin connection draining according to the settings on the load balancer or target group.</p>",
|
"DeregisterContainerInstanceRequest$force": "<p>Forces the deregistration of the container instance. If you have tasks running on the container instance when you deregister it with the <code>force</code> option, these tasks remain running until you terminate the instance or the tasks stop through some other means, but they are orphaned (no longer monitored or accounted for by Amazon ECS). If an orphaned task on your container instance is part of an Amazon ECS service, then the service scheduler starts another copy of that task, on a different container instance if possible. </p> <p>Any containers in orphaned service tasks that are registered with a Classic Load Balancer or an Application Load Balancer target group are deregistered, and they will begin connection draining according to the settings on the load balancer or target group.</p>",
|
||||||
"MountPoint$readOnly": "<p>If this value is <code>true</code>, the container has read-only access to the volume. If this value is <code>false</code>, then the container can write to the volume. The default value is <code>false</code>.</p>",
|
"MountPoint$readOnly": "<p>If this value is <code>true</code>, the container has read-only access to the volume. If this value is <code>false</code>, then the container can write to the volume. The default value is <code>false</code>.</p>",
|
||||||
"VolumeFrom$readOnly": "<p>If this value is <code>true</code>, the container has read-only access to the volume. If this value is <code>false</code>, then the container can write to the volume. The default value is <code>false</code>.</p>"
|
"VolumeFrom$readOnly": "<p>If this value is <code>true</code>, the container has read-only access to the volume. If this value is <code>false</code>, then the container can write to the volume. The default value is <code>false</code>.</p>"
|
||||||
}
|
}
|
||||||
@ -444,12 +444,24 @@
|
|||||||
"refs": {
|
"refs": {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"KernelCapabilities": {
|
||||||
|
"base": "<p>The Linux capabilities for the container that are added to or dropped from the default configuration provided by Docker. For more information on the default capabilities and the non-default available capabilities, see <a href=\"https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities\">Runtime privilege and Linux capabilities</a> in the <i>Docker run reference</i>. For more detailed information on these Linux capabilities, see the <a href=\"http://man7.org/linux/man-pages/man7/capabilities.7.html\">capabilities(7)</a> Linux manual page.</p>",
|
||||||
|
"refs": {
|
||||||
|
"LinuxParameters$capabilities": "<p>The Linux capabilities for the container that are added to or dropped from the default configuration provided by Docker.</p>"
|
||||||
|
}
|
||||||
|
},
|
||||||
"KeyValuePair": {
|
"KeyValuePair": {
|
||||||
"base": "<p>A key and value pair object.</p>",
|
"base": "<p>A key and value pair object.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
"EnvironmentVariables$member": null
|
"EnvironmentVariables$member": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"LinuxParameters": {
|
||||||
|
"base": "<p>Linux-specific options that are applied to the container, such as Linux <a>KernelCapabilities</a>.</p>",
|
||||||
|
"refs": {
|
||||||
|
"ContainerDefinition$linuxParameters": "<p>Linux-specific modifications that are applied to the container, such as Linux <a>KernelCapabilities</a>.</p>"
|
||||||
|
}
|
||||||
|
},
|
||||||
"ListAttributesRequest": {
|
"ListAttributesRequest": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
@ -529,7 +541,7 @@
|
|||||||
"LoadBalancers": {
|
"LoadBalancers": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"CreateServiceRequest$loadBalancers": "<p>A load balancer object representing the load balancer to use with your service. Currently, you are limited to one load balancer or target group per service. After you create a service, the load balancer name or target group ARN, container name, and container port specified in the service definition are immutable.</p> <p>For Elastic Load Balancing Classic load balancers, this object must contain the load balancer name, the container name (as it appears in a container definition), and the container port to access from the load balancer. When a task from this service is placed on a container instance, the container instance is registered with the load balancer specified here.</p> <p>For Elastic Load Balancing Application load balancers, this object must contain the load balancer target group ARN, the container name (as it appears in a container definition), and the container port to access from the load balancer. When a task from this service is placed on a container instance, the container instance and port combination is registered as a target in the target group specified here.</p>",
|
"CreateServiceRequest$loadBalancers": "<p>A load balancer object representing the load balancer to use with your service. Currently, you are limited to one load balancer or target group per service. After you create a service, the load balancer name or target group ARN, container name, and container port specified in the service definition are immutable.</p> <p>For Classic Load Balancers, this object must contain the load balancer name, the container name (as it appears in a container definition), and the container port to access from the load balancer. When a task from this service is placed on a container instance, the container instance is registered with the load balancer specified here.</p> <p>For Application Load Balancers and Network Load Balancers, this object must contain the load balancer target group ARN, the container name (as it appears in a container definition), and the container port to access from the load balancer. When a task from this service is placed on a container instance, the container instance and port combination is registered as a target in the target group specified here.</p>",
|
||||||
"Service$loadBalancers": "<p>A list of Elastic Load Balancing load balancer objects, containing the load balancer name, the container name (as it appears in a container definition), and the container port to access from the load balancer.</p>"
|
"Service$loadBalancers": "<p>A list of Elastic Load Balancing load balancer objects, containing the load balancer name, the container name (as it appears in a container definition), and the container port to access from the load balancer.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -797,7 +809,7 @@
|
|||||||
"Container$lastStatus": "<p>The last known status of the container.</p>",
|
"Container$lastStatus": "<p>The last known status of the container.</p>",
|
||||||
"Container$reason": "<p>A short (255 max characters) human-readable string to provide additional details about a running or stopped container.</p>",
|
"Container$reason": "<p>A short (255 max characters) human-readable string to provide additional details about a running or stopped container.</p>",
|
||||||
"ContainerDefinition$name": "<p>The name of a container. If you are linking multiple containers together in a task definition, the <code>name</code> of one container can be entered in the <code>links</code> of another container to connect the containers. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed. This parameter maps to <code>name</code> in the <a href=\"https://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/#create-a-container\">Create a container</a> section of the <a href=\"https://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/\">Docker Remote API</a> and the <code>--name</code> option to <a href=\"https://docs.docker.com/engine/reference/run/\">docker run</a>. </p>",
|
"ContainerDefinition$name": "<p>The name of a container. If you are linking multiple containers together in a task definition, the <code>name</code> of one container can be entered in the <code>links</code> of another container to connect the containers. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed. This parameter maps to <code>name</code> in the <a href=\"https://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/#create-a-container\">Create a container</a> section of the <a href=\"https://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/\">Docker Remote API</a> and the <code>--name</code> option to <a href=\"https://docs.docker.com/engine/reference/run/\">docker run</a>. </p>",
|
||||||
"ContainerDefinition$image": "<p>The image used to start a container. This string is passed directly to the Docker daemon. Images in the Docker Hub registry are available by default. Other repositories are specified with <code> <i>repository-url</i>/<i>image</i>:<i>tag</i> </code>. Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to <code>Image</code> in the <a href=\"https://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/#create-a-container\">Create a container</a> section of the <a href=\"https://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/\">Docker Remote API</a> and the <code>IMAGE</code> parameter of <a href=\"https://docs.docker.com/engine/reference/run/\">docker run</a>.</p> <ul> <li> <p>Images in Amazon ECR repositories use the full registry and repository URI (for example, <code>012345678910.dkr.ecr.<region-name>.amazonaws.com/<repository-name></code>). </p> </li> <li> <p>Images in official repositories on Docker Hub use a single name (for example, <code>ubuntu</code> or <code>mongo</code>).</p> </li> <li> <p>Images in other repositories on Docker Hub are qualified with an organization name (for example, <code>amazon/amazon-ecs-agent</code>).</p> </li> <li> <p>Images in other online repositories are qualified further by a domain name (for example, <code>quay.io/assemblyline/ubuntu</code>).</p> </li> </ul>",
|
"ContainerDefinition$image": "<p>The image used to start a container. This string is passed directly to the Docker daemon. Images in the Docker Hub registry are available by default. Other repositories are specified with either <code> <i>repository-url</i>/<i>image</i>:<i>tag</i> </code> or <code> <i>repository-url</i>/<i>image</i>@<i>digest</i> </code>. Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to <code>Image</code> in the <a href=\"https://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/#create-a-container\">Create a container</a> section of the <a href=\"https://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/\">Docker Remote API</a> and the <code>IMAGE</code> parameter of <a href=\"https://docs.docker.com/engine/reference/run/\">docker run</a>.</p> <ul> <li> <p>Images in Amazon ECR repositories can be specified by either using the full <code>registry/repository:tag</code> or <code>registry/repository@digest</code>. For example, <code>012345678910.dkr.ecr.<region-name>.amazonaws.com/<repository-name>:latest</code> or <code>012345678910.dkr.ecr.<region-name>.amazonaws.com/<repository-name>@sha256:94afd1f2e64d908bc90dbca0035a5b567EXAMPLE</code>. </p> </li> <li> <p>Images in official repositories on Docker Hub use a single name (for example, <code>ubuntu</code> or <code>mongo</code>).</p> </li> <li> <p>Images in other repositories on Docker Hub are qualified with an organization name (for example, <code>amazon/amazon-ecs-agent</code>).</p> </li> <li> <p>Images in other online repositories are qualified further by a domain name (for example, <code>quay.io/assemblyline/ubuntu</code>).</p> </li> </ul>",
|
||||||
"ContainerDefinition$hostname": "<p>The hostname to use for your container. This parameter maps to <code>Hostname</code> in the <a href=\"https://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/#create-a-container\">Create a container</a> section of the <a href=\"https://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/\">Docker Remote API</a> and the <code>--hostname</code> option to <a href=\"https://docs.docker.com/engine/reference/run/\">docker run</a>.</p>",
|
"ContainerDefinition$hostname": "<p>The hostname to use for your container. This parameter maps to <code>Hostname</code> in the <a href=\"https://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/#create-a-container\">Create a container</a> section of the <a href=\"https://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/\">Docker Remote API</a> and the <code>--hostname</code> option to <a href=\"https://docs.docker.com/engine/reference/run/\">docker run</a>.</p>",
|
||||||
"ContainerDefinition$user": "<p>The user name to use inside the container. This parameter maps to <code>User</code> in the <a href=\"https://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/#create-a-container\">Create a container</a> section of the <a href=\"https://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/\">Docker Remote API</a> and the <code>--user</code> option to <a href=\"https://docs.docker.com/engine/reference/run/\">docker run</a>.</p>",
|
"ContainerDefinition$user": "<p>The user name to use inside the container. This parameter maps to <code>User</code> in the <a href=\"https://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/#create-a-container\">Create a container</a> section of the <a href=\"https://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/\">Docker Remote API</a> and the <code>--user</code> option to <a href=\"https://docs.docker.com/engine/reference/run/\">docker run</a>.</p>",
|
||||||
"ContainerDefinition$workingDirectory": "<p>The working directory in which to run commands inside the container. This parameter maps to <code>WorkingDir</code> in the <a href=\"https://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/#create-a-container\">Create a container</a> section of the <a href=\"https://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/\">Docker Remote API</a> and the <code>--workdir</code> option to <a href=\"https://docs.docker.com/engine/reference/run/\">docker run</a>.</p>",
|
"ContainerDefinition$workingDirectory": "<p>The working directory in which to run commands inside the container. This parameter maps to <code>WorkingDir</code> in the <a href=\"https://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/#create-a-container\">Create a container</a> section of the <a href=\"https://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/\">Docker Remote API</a> and the <code>--workdir</code> option to <a href=\"https://docs.docker.com/engine/reference/run/\">docker run</a>.</p>",
|
||||||
@ -866,7 +878,7 @@
|
|||||||
"ListTasksRequest$serviceName": "<p>The name of the service with which to filter the <code>ListTasks</code> results. Specifying a <code>serviceName</code> limits the results to tasks that belong to that service.</p>",
|
"ListTasksRequest$serviceName": "<p>The name of the service with which to filter the <code>ListTasks</code> results. Specifying a <code>serviceName</code> limits the results to tasks that belong to that service.</p>",
|
||||||
"ListTasksResponse$nextToken": "<p>The <code>nextToken</code> value to include in a future <code>ListTasks</code> request. When the results of a <code>ListTasks</code> request exceed <code>maxResults</code>, this value can be used to retrieve the next page of results. This value is <code>null</code> when there are no more results to return.</p>",
|
"ListTasksResponse$nextToken": "<p>The <code>nextToken</code> value to include in a future <code>ListTasks</code> request. When the results of a <code>ListTasks</code> request exceed <code>maxResults</code>, this value can be used to retrieve the next page of results. This value is <code>null</code> when there are no more results to return.</p>",
|
||||||
"LoadBalancer$targetGroupArn": "<p>The full Amazon Resource Name (ARN) of the Elastic Load Balancing target group associated with a service.</p>",
|
"LoadBalancer$targetGroupArn": "<p>The full Amazon Resource Name (ARN) of the Elastic Load Balancing target group associated with a service.</p>",
|
||||||
"LoadBalancer$loadBalancerName": "<p>The name of a Classic load balancer.</p>",
|
"LoadBalancer$loadBalancerName": "<p>The name of a load balancer.</p>",
|
||||||
"LoadBalancer$containerName": "<p>The name of the container (as it appears in a container definition) to associate with the load balancer.</p>",
|
"LoadBalancer$containerName": "<p>The name of the container (as it appears in a container definition) to associate with the load balancer.</p>",
|
||||||
"LogConfigurationOptionsMap$key": null,
|
"LogConfigurationOptionsMap$key": null,
|
||||||
"LogConfigurationOptionsMap$value": null,
|
"LogConfigurationOptionsMap$value": null,
|
||||||
@ -957,6 +969,8 @@
|
|||||||
"DescribeContainerInstancesRequest$containerInstances": "<p>A list of container instance IDs or full Amazon Resource Name (ARN) entries.</p>",
|
"DescribeContainerInstancesRequest$containerInstances": "<p>A list of container instance IDs or full Amazon Resource Name (ARN) entries.</p>",
|
||||||
"DescribeServicesRequest$services": "<p>A list of services to describe. You may specify up to 10 services to describe in a single operation.</p>",
|
"DescribeServicesRequest$services": "<p>A list of services to describe. You may specify up to 10 services to describe in a single operation.</p>",
|
||||||
"DescribeTasksRequest$tasks": "<p>A list of up to 100 task IDs or full Amazon Resource Name (ARN) entries.</p>",
|
"DescribeTasksRequest$tasks": "<p>A list of up to 100 task IDs or full Amazon Resource Name (ARN) entries.</p>",
|
||||||
|
"KernelCapabilities$add": "<p>The Linux capabilities for the container that have been added to the default configuration provided by Docker. This parameter maps to <code>CapAdd</code> in the <a href=\"https://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/#create-a-container\">Create a container</a> section of the <a href=\"https://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/\">Docker Remote API</a> and the <code>--cap-add</code> option to <a href=\"https://docs.docker.com/engine/reference/run/\">docker run</a>.</p>",
|
||||||
|
"KernelCapabilities$drop": "<p>The Linux capabilities for the container that have been removed from the default configuration provided by Docker. This parameter maps to <code>CapDrop</code> in the <a href=\"https://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/#create-a-container\">Create a container</a> section of the <a href=\"https://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/\">Docker Remote API</a> and the <code>--cap-drop</code> option to <a href=\"https://docs.docker.com/engine/reference/run/\">docker run</a>.</p>",
|
||||||
"ListClustersResponse$clusterArns": "<p>The list of full Amazon Resource Name (ARN) entries for each cluster associated with your account.</p>",
|
"ListClustersResponse$clusterArns": "<p>The list of full Amazon Resource Name (ARN) entries for each cluster associated with your account.</p>",
|
||||||
"ListContainerInstancesResponse$containerInstanceArns": "<p>The list of container instances with full Amazon Resource Name (ARN) entries for each container instance associated with the specified cluster.</p>",
|
"ListContainerInstancesResponse$containerInstanceArns": "<p>The list of container instances with full Amazon Resource Name (ARN) entries for each container instance associated with the specified cluster.</p>",
|
||||||
"ListServicesResponse$serviceArns": "<p>The list of full Amazon Resource Name (ARN) entries for each service associated with the specified cluster.</p>",
|
"ListServicesResponse$serviceArns": "<p>The list of full Amazon Resource Name (ARN) entries for each service associated with the specified cluster.</p>",
|
||||||
|
58
vendor/github.com/aws/aws-sdk-go/models/apis/greengrass/2017-06-07/api-2.json
generated
vendored
58
vendor/github.com/aws/aws-sdk-go/models/apis/greengrass/2017-06-07/api-2.json
generated
vendored
@ -968,6 +968,23 @@
|
|||||||
},
|
},
|
||||||
"errors" : [ ]
|
"errors" : [ ]
|
||||||
},
|
},
|
||||||
|
"ResetDeployments" : {
|
||||||
|
"name" : "ResetDeployments",
|
||||||
|
"http" : {
|
||||||
|
"method" : "POST",
|
||||||
|
"requestUri" : "/greengrass/groups/{GroupId}/deployments/$reset",
|
||||||
|
"responseCode" : 200
|
||||||
|
},
|
||||||
|
"input" : {
|
||||||
|
"shape" : "ResetDeploymentsRequest"
|
||||||
|
},
|
||||||
|
"output" : {
|
||||||
|
"shape" : "ResetDeploymentsResponse"
|
||||||
|
},
|
||||||
|
"errors" : [ {
|
||||||
|
"shape" : "BadRequestException"
|
||||||
|
} ]
|
||||||
|
},
|
||||||
"UpdateConnectivityInfo" : {
|
"UpdateConnectivityInfo" : {
|
||||||
"name" : "UpdateConnectivityInfo",
|
"name" : "UpdateConnectivityInfo",
|
||||||
"http" : {
|
"http" : {
|
||||||
@ -1873,6 +1890,9 @@
|
|||||||
"DeploymentId" : {
|
"DeploymentId" : {
|
||||||
"shape" : "__string"
|
"shape" : "__string"
|
||||||
},
|
},
|
||||||
|
"DeploymentType" : {
|
||||||
|
"shape" : "DeploymentType"
|
||||||
|
},
|
||||||
"GroupArn" : {
|
"GroupArn" : {
|
||||||
"shape" : "__string"
|
"shape" : "__string"
|
||||||
}
|
}
|
||||||
@ -1880,7 +1900,7 @@
|
|||||||
},
|
},
|
||||||
"DeploymentType" : {
|
"DeploymentType" : {
|
||||||
"type" : "string",
|
"type" : "string",
|
||||||
"enum" : [ "NewDeployment", "Redeployment" ]
|
"enum" : [ "NewDeployment", "Redeployment", "ResetDeployment", "ForceResetDeployment" ]
|
||||||
},
|
},
|
||||||
"Deployments" : {
|
"Deployments" : {
|
||||||
"type" : "list",
|
"type" : "list",
|
||||||
@ -2169,6 +2189,12 @@
|
|||||||
"DeploymentStatus" : {
|
"DeploymentStatus" : {
|
||||||
"shape" : "__string"
|
"shape" : "__string"
|
||||||
},
|
},
|
||||||
|
"DeploymentType" : {
|
||||||
|
"shape" : "DeploymentType"
|
||||||
|
},
|
||||||
|
"ErrorDetails" : {
|
||||||
|
"shape" : "ErrorDetails"
|
||||||
|
},
|
||||||
"ErrorMessage" : {
|
"ErrorMessage" : {
|
||||||
"shape" : "__string"
|
"shape" : "__string"
|
||||||
},
|
},
|
||||||
@ -3228,6 +3254,36 @@
|
|||||||
"shape" : "__string"
|
"shape" : "__string"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"ResetDeploymentsRequest" : {
|
||||||
|
"type" : "structure",
|
||||||
|
"members" : {
|
||||||
|
"AmznClientToken" : {
|
||||||
|
"shape" : "__string",
|
||||||
|
"location" : "header",
|
||||||
|
"locationName" : "X-Amzn-Client-Token"
|
||||||
|
},
|
||||||
|
"Force" : {
|
||||||
|
"shape" : "__boolean"
|
||||||
|
},
|
||||||
|
"GroupId" : {
|
||||||
|
"shape" : "__string",
|
||||||
|
"location" : "uri",
|
||||||
|
"locationName" : "GroupId"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required" : [ "GroupId" ]
|
||||||
|
},
|
||||||
|
"ResetDeploymentsResponse" : {
|
||||||
|
"type" : "structure",
|
||||||
|
"members" : {
|
||||||
|
"DeploymentArn" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"DeploymentId" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"Subscription" : {
|
"Subscription" : {
|
||||||
"type" : "structure",
|
"type" : "structure",
|
||||||
"members" : {
|
"members" : {
|
||||||
|
43
vendor/github.com/aws/aws-sdk-go/models/apis/greengrass/2017-06-07/docs-2.json
generated
vendored
43
vendor/github.com/aws/aws-sdk-go/models/apis/greengrass/2017-06-07/docs-2.json
generated
vendored
@ -1,5 +1,5 @@
|
|||||||
{
|
{
|
||||||
"version" : "1.0",
|
"version" : "2.0",
|
||||||
"service" : "AWS Greengrass seamlessly extends AWS onto physical devices so they can act locally on the data they generate, while still using the cloud for management, analytics, and durable storage. AWS Greengrass ensures your devices can respond quickly to local events and operate with intermittent connectivity. AWS Greengrass minimizes the cost of transmitting data to the cloud by allowing you to author AWS Lambda functions that execute locally.",
|
"service" : "AWS Greengrass seamlessly extends AWS onto physical devices so they can act locally on the data they generate, while still using the cloud for management, analytics, and durable storage. AWS Greengrass ensures your devices can respond quickly to local events and operate with intermittent connectivity. AWS Greengrass minimizes the cost of transmitting data to the cloud by allowing you to author AWS Lambda functions that execute locally.",
|
||||||
"operations" : {
|
"operations" : {
|
||||||
"AssociateRoleToGroup" : "Associates a role with a group. The role will be used by the AWS Greengrass core in order to access AWS cloud services. The role's permissions will allow Greengrass core Lambda functions to perform actions against the cloud.",
|
"AssociateRoleToGroup" : "Associates a role with a group. The role will be used by the AWS Greengrass core in order to access AWS cloud services. The role's permissions will allow Greengrass core Lambda functions to perform actions against the cloud.",
|
||||||
@ -58,6 +58,7 @@
|
|||||||
"ListLoggerDefinitions" : "Retrieves a list of logger definitions.",
|
"ListLoggerDefinitions" : "Retrieves a list of logger definitions.",
|
||||||
"ListSubscriptionDefinitionVersions" : "Lists the versions of a subscription definition.",
|
"ListSubscriptionDefinitionVersions" : "Lists the versions of a subscription definition.",
|
||||||
"ListSubscriptionDefinitions" : "Retrieves a list of subscription definitions.",
|
"ListSubscriptionDefinitions" : "Retrieves a list of subscription definitions.",
|
||||||
|
"ResetDeployments" : "Resets a group's deployments.",
|
||||||
"UpdateConnectivityInfo" : "Updates the connectivity information for the core. Any devices that belong to the group which has this core will receive this information in order to find the location of the core and connect to it.",
|
"UpdateConnectivityInfo" : "Updates the connectivity information for the core. Any devices that belong to the group which has this core will receive this information in order to find the location of the core and connect to it.",
|
||||||
"UpdateCoreDefinition" : "Updates a core definition.",
|
"UpdateCoreDefinition" : "Updates a core definition.",
|
||||||
"UpdateDeviceDefinition" : "Updates a device definition.",
|
"UpdateDeviceDefinition" : "Updates a device definition.",
|
||||||
@ -91,8 +92,8 @@
|
|||||||
"ConnectivityInfo" : {
|
"ConnectivityInfo" : {
|
||||||
"base" : "Connectivity Info",
|
"base" : "Connectivity Info",
|
||||||
"refs" : {
|
"refs" : {
|
||||||
"GetConnectivityInfoResponse$ConnectivityInfo" : "Connectivity info array",
|
"GetConnectivityInfoResponse$ConnectivityInfo" : "Connectivity info list",
|
||||||
"UpdateConnectivityInfoRequest$ConnectivityInfo" : "Connectivity info array"
|
"UpdateConnectivityInfoRequest$ConnectivityInfo" : "Connectivity info list"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"Core" : {
|
"Core" : {
|
||||||
@ -134,13 +135,15 @@
|
|||||||
"DeploymentType" : {
|
"DeploymentType" : {
|
||||||
"base" : null,
|
"base" : null,
|
||||||
"refs" : {
|
"refs" : {
|
||||||
"CreateDeploymentRequest$DeploymentType" : "Type of deployment"
|
"CreateDeploymentRequest$DeploymentType" : "Type of deployment. When used in CreateDeployment, only NewDeployment and Redeployment are valid. ",
|
||||||
|
"Deployment$DeploymentType" : "The type of deployment.",
|
||||||
|
"GetDeploymentStatusResponse$DeploymentType" : "The type of the deployment."
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"Deployments" : {
|
"Deployments" : {
|
||||||
"base" : null,
|
"base" : null,
|
||||||
"refs" : {
|
"refs" : {
|
||||||
"ListDeploymentsResponse$Deployments" : "Information on deployments"
|
"ListDeploymentsResponse$Deployments" : "List of deployments for the requested groups"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"Device" : {
|
"Device" : {
|
||||||
@ -176,7 +179,8 @@
|
|||||||
"ErrorDetails" : {
|
"ErrorDetails" : {
|
||||||
"base" : "Error Details",
|
"base" : "Error Details",
|
||||||
"refs" : {
|
"refs" : {
|
||||||
"GeneralError$ErrorDetails" : "Error Details"
|
"GeneralError$ErrorDetails" : "Error Details",
|
||||||
|
"GetDeploymentStatusResponse$ErrorDetails" : "The error Details"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"Function" : {
|
"Function" : {
|
||||||
@ -262,7 +266,7 @@
|
|||||||
"refs" : { }
|
"refs" : { }
|
||||||
},
|
},
|
||||||
"GroupInformation" : {
|
"GroupInformation" : {
|
||||||
"base" : "Information of a group",
|
"base" : "Information on the group",
|
||||||
"refs" : {
|
"refs" : {
|
||||||
"ListGroupsResponse$Groups" : "Groups"
|
"ListGroupsResponse$Groups" : "Groups"
|
||||||
}
|
}
|
||||||
@ -274,11 +278,11 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"InternalServerErrorException" : {
|
"InternalServerErrorException" : {
|
||||||
"base" : "This request was invalid.",
|
"base" : "Server Error",
|
||||||
"refs" : { }
|
"refs" : { }
|
||||||
},
|
},
|
||||||
"ListDefinitionsResponse" : {
|
"ListDefinitionsResponse" : {
|
||||||
"base" : "List of definition response",
|
"base" : "List of definition responses",
|
||||||
"refs" : { }
|
"refs" : { }
|
||||||
},
|
},
|
||||||
"ListDeploymentsResponse" : {
|
"ListDeploymentsResponse" : {
|
||||||
@ -371,6 +375,14 @@
|
|||||||
"base" : null,
|
"base" : null,
|
||||||
"refs" : { }
|
"refs" : { }
|
||||||
},
|
},
|
||||||
|
"ResetDeploymentsRequest" : {
|
||||||
|
"base" : "Information needed to perform a reset of a group's deployments.",
|
||||||
|
"refs" : { }
|
||||||
|
},
|
||||||
|
"ResetDeploymentsResponse" : {
|
||||||
|
"base" : null,
|
||||||
|
"refs" : { }
|
||||||
|
},
|
||||||
"Subscription" : {
|
"Subscription" : {
|
||||||
"base" : "Information on subscription",
|
"base" : "Information on subscription",
|
||||||
"refs" : {
|
"refs" : {
|
||||||
@ -406,7 +418,8 @@
|
|||||||
"refs" : {
|
"refs" : {
|
||||||
"Core$SyncShadow" : "If true, the local shadow value automatically syncs with the cloud's shadow state.",
|
"Core$SyncShadow" : "If true, the local shadow value automatically syncs with the cloud's shadow state.",
|
||||||
"Device$SyncShadow" : "If true, the local shadow value automatically syncs with the cloud's shadow state.",
|
"Device$SyncShadow" : "If true, the local shadow value automatically syncs with the cloud's shadow state.",
|
||||||
"FunctionConfiguration$Pinned" : "Whether the function is pinned or not. Pinned means the function is long-lived and starts when the core starts."
|
"FunctionConfiguration$Pinned" : "Whether the function is pinned or not. Pinned means the function is long-lived and starts when the core starts.",
|
||||||
|
"ResetDeploymentsRequest$Force" : "When set to true, perform a best-effort only core reset."
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"__integer" : {
|
"__integer" : {
|
||||||
@ -433,8 +446,8 @@
|
|||||||
"Core$ThingArn" : "Thing arn of the core.",
|
"Core$ThingArn" : "Thing arn of the core.",
|
||||||
"CreateDeploymentRequest$DeploymentId" : "Id of the deployment if you wish to redeploy a previous deployment.",
|
"CreateDeploymentRequest$DeploymentId" : "Id of the deployment if you wish to redeploy a previous deployment.",
|
||||||
"CreateDeploymentRequest$GroupVersionId" : "Group Version you wish to deploy.",
|
"CreateDeploymentRequest$GroupVersionId" : "Group Version you wish to deploy.",
|
||||||
"CreateDeploymentResponse$DeploymentArn" : "Arn of the deployment.",
|
"CreateDeploymentResponse$DeploymentArn" : "The arn of the deployment.",
|
||||||
"CreateDeploymentResponse$DeploymentId" : "Id of the deployment.",
|
"CreateDeploymentResponse$DeploymentId" : "The id of the deployment.",
|
||||||
"CreateGroupCertificateAuthorityResponse$GroupCertificateAuthorityArn" : "Arn of the group certificate authority.",
|
"CreateGroupCertificateAuthorityResponse$GroupCertificateAuthorityArn" : "Arn of the group certificate authority.",
|
||||||
"DefinitionInformation$Arn" : "Arn of the definition.",
|
"DefinitionInformation$Arn" : "Arn of the definition.",
|
||||||
"DefinitionInformation$CreationTimestamp" : "Timestamp of when the definition was created.",
|
"DefinitionInformation$CreationTimestamp" : "Timestamp of when the definition was created.",
|
||||||
@ -458,7 +471,7 @@
|
|||||||
"Function$Id" : "Id of the function in this version.",
|
"Function$Id" : "Id of the function in this version.",
|
||||||
"FunctionConfiguration$ExecArgs" : "Execution Arguments",
|
"FunctionConfiguration$ExecArgs" : "Execution Arguments",
|
||||||
"FunctionConfiguration$Executable" : "Executable",
|
"FunctionConfiguration$Executable" : "Executable",
|
||||||
"GeneralError$Message" : "Message",
|
"GeneralError$Message" : "Message containing information about the error",
|
||||||
"GetAssociatedRoleResponse$AssociatedAt" : "Time when the role was associated for the group.",
|
"GetAssociatedRoleResponse$AssociatedAt" : "Time when the role was associated for the group.",
|
||||||
"GetAssociatedRoleResponse$RoleArn" : "Arn of the role that is associated with the group.",
|
"GetAssociatedRoleResponse$RoleArn" : "Arn of the role that is associated with the group.",
|
||||||
"GetConnectivityInfoResponse$message" : "Response Text",
|
"GetConnectivityInfoResponse$message" : "Response Text",
|
||||||
@ -516,6 +529,8 @@
|
|||||||
"ListGroupsResponse$NextToken" : "The token for the next set of results, or ''null'' if there are no additional results.",
|
"ListGroupsResponse$NextToken" : "The token for the next set of results, or ''null'' if there are no additional results.",
|
||||||
"ListVersionsResponse$NextToken" : "The token for the next set of results, or ''null'' if there are no additional results.",
|
"ListVersionsResponse$NextToken" : "The token for the next set of results, or ''null'' if there are no additional results.",
|
||||||
"Logger$Id" : "Element Id for this entry in the list.",
|
"Logger$Id" : "Element Id for this entry in the list.",
|
||||||
|
"ResetDeploymentsResponse$DeploymentArn" : "The arn of the reset deployment.",
|
||||||
|
"ResetDeploymentsResponse$DeploymentId" : "The id of the reset deployment.",
|
||||||
"Subscription$Id" : "Element Id for this entry in the list.",
|
"Subscription$Id" : "Element Id for this entry in the list.",
|
||||||
"Subscription$Source" : "Source of the subscription. Can be a thing arn, lambda arn or word 'cloud'",
|
"Subscription$Source" : "Source of the subscription. Can be a thing arn, lambda arn or word 'cloud'",
|
||||||
"Subscription$Subject" : "Subject of the message.",
|
"Subscription$Subject" : "Subject of the message.",
|
||||||
@ -530,4 +545,4 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
168
vendor/github.com/aws/aws-sdk-go/models/apis/kinesisanalytics/2015-08-14/api-2.json
generated
vendored
168
vendor/github.com/aws/aws-sdk-go/models/apis/kinesisanalytics/2015-08-14/api-2.json
generated
vendored
@ -44,6 +44,21 @@
|
|||||||
{"shape":"CodeValidationException"}
|
{"shape":"CodeValidationException"}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"AddApplicationInputProcessingConfiguration":{
|
||||||
|
"name":"AddApplicationInputProcessingConfiguration",
|
||||||
|
"http":{
|
||||||
|
"method":"POST",
|
||||||
|
"requestUri":"/"
|
||||||
|
},
|
||||||
|
"input":{"shape":"AddApplicationInputProcessingConfigurationRequest"},
|
||||||
|
"output":{"shape":"AddApplicationInputProcessingConfigurationResponse"},
|
||||||
|
"errors":[
|
||||||
|
{"shape":"ResourceNotFoundException"},
|
||||||
|
{"shape":"ResourceInUseException"},
|
||||||
|
{"shape":"InvalidArgumentException"},
|
||||||
|
{"shape":"ConcurrentModificationException"}
|
||||||
|
]
|
||||||
|
},
|
||||||
"AddApplicationOutput":{
|
"AddApplicationOutput":{
|
||||||
"name":"AddApplicationOutput",
|
"name":"AddApplicationOutput",
|
||||||
"http":{
|
"http":{
|
||||||
@ -118,6 +133,21 @@
|
|||||||
{"shape":"ConcurrentModificationException"}
|
{"shape":"ConcurrentModificationException"}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"DeleteApplicationInputProcessingConfiguration":{
|
||||||
|
"name":"DeleteApplicationInputProcessingConfiguration",
|
||||||
|
"http":{
|
||||||
|
"method":"POST",
|
||||||
|
"requestUri":"/"
|
||||||
|
},
|
||||||
|
"input":{"shape":"DeleteApplicationInputProcessingConfigurationRequest"},
|
||||||
|
"output":{"shape":"DeleteApplicationInputProcessingConfigurationResponse"},
|
||||||
|
"errors":[
|
||||||
|
{"shape":"ResourceNotFoundException"},
|
||||||
|
{"shape":"ResourceInUseException"},
|
||||||
|
{"shape":"InvalidArgumentException"},
|
||||||
|
{"shape":"ConcurrentModificationException"}
|
||||||
|
]
|
||||||
|
},
|
||||||
"DeleteApplicationOutput":{
|
"DeleteApplicationOutput":{
|
||||||
"name":"DeleteApplicationOutput",
|
"name":"DeleteApplicationOutput",
|
||||||
"http":{
|
"http":{
|
||||||
@ -248,6 +278,26 @@
|
|||||||
"members":{
|
"members":{
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"AddApplicationInputProcessingConfigurationRequest":{
|
||||||
|
"type":"structure",
|
||||||
|
"required":[
|
||||||
|
"ApplicationName",
|
||||||
|
"CurrentApplicationVersionId",
|
||||||
|
"InputId",
|
||||||
|
"InputProcessingConfiguration"
|
||||||
|
],
|
||||||
|
"members":{
|
||||||
|
"ApplicationName":{"shape":"ApplicationName"},
|
||||||
|
"CurrentApplicationVersionId":{"shape":"ApplicationVersionId"},
|
||||||
|
"InputId":{"shape":"Id"},
|
||||||
|
"InputProcessingConfiguration":{"shape":"InputProcessingConfiguration"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"AddApplicationInputProcessingConfigurationResponse":{
|
||||||
|
"type":"structure",
|
||||||
|
"members":{
|
||||||
|
}
|
||||||
|
},
|
||||||
"AddApplicationInputRequest":{
|
"AddApplicationInputRequest":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
"required":[
|
"required":[
|
||||||
@ -497,6 +547,24 @@
|
|||||||
"members":{
|
"members":{
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"DeleteApplicationInputProcessingConfigurationRequest":{
|
||||||
|
"type":"structure",
|
||||||
|
"required":[
|
||||||
|
"ApplicationName",
|
||||||
|
"CurrentApplicationVersionId",
|
||||||
|
"InputId"
|
||||||
|
],
|
||||||
|
"members":{
|
||||||
|
"ApplicationName":{"shape":"ApplicationName"},
|
||||||
|
"CurrentApplicationVersionId":{"shape":"ApplicationVersionId"},
|
||||||
|
"InputId":{"shape":"Id"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"DeleteApplicationInputProcessingConfigurationResponse":{
|
||||||
|
"type":"structure",
|
||||||
|
"members":{
|
||||||
|
}
|
||||||
|
},
|
||||||
"DeleteApplicationOutputRequest":{
|
"DeleteApplicationOutputRequest":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
"required":[
|
"required":[
|
||||||
@ -571,15 +639,12 @@
|
|||||||
},
|
},
|
||||||
"DiscoverInputSchemaRequest":{
|
"DiscoverInputSchemaRequest":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
"required":[
|
|
||||||
"ResourceARN",
|
|
||||||
"RoleARN",
|
|
||||||
"InputStartingPositionConfiguration"
|
|
||||||
],
|
|
||||||
"members":{
|
"members":{
|
||||||
"ResourceARN":{"shape":"ResourceARN"},
|
"ResourceARN":{"shape":"ResourceARN"},
|
||||||
"RoleARN":{"shape":"RoleARN"},
|
"RoleARN":{"shape":"RoleARN"},
|
||||||
"InputStartingPositionConfiguration":{"shape":"InputStartingPositionConfiguration"}
|
"InputStartingPositionConfiguration":{"shape":"InputStartingPositionConfiguration"},
|
||||||
|
"S3Configuration":{"shape":"S3Configuration"},
|
||||||
|
"InputProcessingConfiguration":{"shape":"InputProcessingConfiguration"}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"DiscoverInputSchemaResponse":{
|
"DiscoverInputSchemaResponse":{
|
||||||
@ -587,11 +652,16 @@
|
|||||||
"members":{
|
"members":{
|
||||||
"InputSchema":{"shape":"SourceSchema"},
|
"InputSchema":{"shape":"SourceSchema"},
|
||||||
"ParsedInputRecords":{"shape":"ParsedInputRecords"},
|
"ParsedInputRecords":{"shape":"ParsedInputRecords"},
|
||||||
|
"ProcessedInputRecords":{"shape":"ProcessedInputRecords"},
|
||||||
"RawInputRecords":{"shape":"RawInputRecords"}
|
"RawInputRecords":{"shape":"RawInputRecords"}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"ErrorMessage":{"type":"string"},
|
"ErrorMessage":{"type":"string"},
|
||||||
"FileKey":{"type":"string"},
|
"FileKey":{
|
||||||
|
"type":"string",
|
||||||
|
"max":1024,
|
||||||
|
"min":1
|
||||||
|
},
|
||||||
"Id":{
|
"Id":{
|
||||||
"type":"string",
|
"type":"string",
|
||||||
"max":50,
|
"max":50,
|
||||||
@ -622,6 +692,7 @@
|
|||||||
],
|
],
|
||||||
"members":{
|
"members":{
|
||||||
"NamePrefix":{"shape":"InAppStreamName"},
|
"NamePrefix":{"shape":"InAppStreamName"},
|
||||||
|
"InputProcessingConfiguration":{"shape":"InputProcessingConfiguration"},
|
||||||
"KinesisStreamsInput":{"shape":"KinesisStreamsInput"},
|
"KinesisStreamsInput":{"shape":"KinesisStreamsInput"},
|
||||||
"KinesisFirehoseInput":{"shape":"KinesisFirehoseInput"},
|
"KinesisFirehoseInput":{"shape":"KinesisFirehoseInput"},
|
||||||
"InputParallelism":{"shape":"InputParallelism"},
|
"InputParallelism":{"shape":"InputParallelism"},
|
||||||
@ -649,6 +720,7 @@
|
|||||||
"InputId":{"shape":"Id"},
|
"InputId":{"shape":"Id"},
|
||||||
"NamePrefix":{"shape":"InAppStreamName"},
|
"NamePrefix":{"shape":"InAppStreamName"},
|
||||||
"InAppStreamNames":{"shape":"InAppStreamNames"},
|
"InAppStreamNames":{"shape":"InAppStreamNames"},
|
||||||
|
"InputProcessingConfigurationDescription":{"shape":"InputProcessingConfigurationDescription"},
|
||||||
"KinesisStreamsInputDescription":{"shape":"KinesisStreamsInputDescription"},
|
"KinesisStreamsInputDescription":{"shape":"KinesisStreamsInputDescription"},
|
||||||
"KinesisFirehoseInputDescription":{"shape":"KinesisFirehoseInputDescription"},
|
"KinesisFirehoseInputDescription":{"shape":"KinesisFirehoseInputDescription"},
|
||||||
"InputSchema":{"shape":"SourceSchema"},
|
"InputSchema":{"shape":"SourceSchema"},
|
||||||
@ -660,6 +732,31 @@
|
|||||||
"type":"list",
|
"type":"list",
|
||||||
"member":{"shape":"InputDescription"}
|
"member":{"shape":"InputDescription"}
|
||||||
},
|
},
|
||||||
|
"InputLambdaProcessor":{
|
||||||
|
"type":"structure",
|
||||||
|
"required":[
|
||||||
|
"ResourceARN",
|
||||||
|
"RoleARN"
|
||||||
|
],
|
||||||
|
"members":{
|
||||||
|
"ResourceARN":{"shape":"ResourceARN"},
|
||||||
|
"RoleARN":{"shape":"RoleARN"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"InputLambdaProcessorDescription":{
|
||||||
|
"type":"structure",
|
||||||
|
"members":{
|
||||||
|
"ResourceARN":{"shape":"ResourceARN"},
|
||||||
|
"RoleARN":{"shape":"RoleARN"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"InputLambdaProcessorUpdate":{
|
||||||
|
"type":"structure",
|
||||||
|
"members":{
|
||||||
|
"ResourceARNUpdate":{"shape":"ResourceARN"},
|
||||||
|
"RoleARNUpdate":{"shape":"RoleARN"}
|
||||||
|
}
|
||||||
|
},
|
||||||
"InputParallelism":{
|
"InputParallelism":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
"members":{
|
"members":{
|
||||||
@ -677,6 +774,26 @@
|
|||||||
"CountUpdate":{"shape":"InputParallelismCount"}
|
"CountUpdate":{"shape":"InputParallelismCount"}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"InputProcessingConfiguration":{
|
||||||
|
"type":"structure",
|
||||||
|
"required":["InputLambdaProcessor"],
|
||||||
|
"members":{
|
||||||
|
"InputLambdaProcessor":{"shape":"InputLambdaProcessor"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"InputProcessingConfigurationDescription":{
|
||||||
|
"type":"structure",
|
||||||
|
"members":{
|
||||||
|
"InputLambdaProcessorDescription":{"shape":"InputLambdaProcessorDescription"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"InputProcessingConfigurationUpdate":{
|
||||||
|
"type":"structure",
|
||||||
|
"required":["InputLambdaProcessorUpdate"],
|
||||||
|
"members":{
|
||||||
|
"InputLambdaProcessorUpdate":{"shape":"InputLambdaProcessorUpdate"}
|
||||||
|
}
|
||||||
|
},
|
||||||
"InputSchemaUpdate":{
|
"InputSchemaUpdate":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
"members":{
|
"members":{
|
||||||
@ -705,6 +822,7 @@
|
|||||||
"members":{
|
"members":{
|
||||||
"InputId":{"shape":"Id"},
|
"InputId":{"shape":"Id"},
|
||||||
"NamePrefixUpdate":{"shape":"InAppStreamName"},
|
"NamePrefixUpdate":{"shape":"InAppStreamName"},
|
||||||
|
"InputProcessingConfigurationUpdate":{"shape":"InputProcessingConfigurationUpdate"},
|
||||||
"KinesisStreamsInputUpdate":{"shape":"KinesisStreamsInputUpdate"},
|
"KinesisStreamsInputUpdate":{"shape":"KinesisStreamsInputUpdate"},
|
||||||
"KinesisFirehoseInputUpdate":{"shape":"KinesisFirehoseInputUpdate"},
|
"KinesisFirehoseInputUpdate":{"shape":"KinesisFirehoseInputUpdate"},
|
||||||
"InputSchemaUpdate":{"shape":"InputSchemaUpdate"},
|
"InputSchemaUpdate":{"shape":"InputSchemaUpdate"},
|
||||||
@ -938,6 +1056,11 @@
|
|||||||
"type":"list",
|
"type":"list",
|
||||||
"member":{"shape":"ParsedInputRecord"}
|
"member":{"shape":"ParsedInputRecord"}
|
||||||
},
|
},
|
||||||
|
"ProcessedInputRecord":{"type":"string"},
|
||||||
|
"ProcessedInputRecords":{
|
||||||
|
"type":"list",
|
||||||
|
"member":{"shape":"ProcessedInputRecord"}
|
||||||
|
},
|
||||||
"RawInputRecord":{"type":"string"},
|
"RawInputRecord":{"type":"string"},
|
||||||
"RawInputRecords":{
|
"RawInputRecords":{
|
||||||
"type":"list",
|
"type":"list",
|
||||||
@ -964,7 +1087,10 @@
|
|||||||
"type":"string",
|
"type":"string",
|
||||||
"pattern":"[a-zA-Z_][a-zA-Z0-9_]*"
|
"pattern":"[a-zA-Z_][a-zA-Z0-9_]*"
|
||||||
},
|
},
|
||||||
"RecordColumnSqlType":{"type":"string"},
|
"RecordColumnSqlType":{
|
||||||
|
"type":"string",
|
||||||
|
"min":1
|
||||||
|
},
|
||||||
"RecordColumns":{
|
"RecordColumns":{
|
||||||
"type":"list",
|
"type":"list",
|
||||||
"member":{"shape":"RecordColumn"},
|
"member":{"shape":"RecordColumn"},
|
||||||
@ -994,7 +1120,10 @@
|
|||||||
"type":"string",
|
"type":"string",
|
||||||
"min":1
|
"min":1
|
||||||
},
|
},
|
||||||
"RecordRowPath":{"type":"string"},
|
"RecordRowPath":{
|
||||||
|
"type":"string",
|
||||||
|
"min":1
|
||||||
|
},
|
||||||
"ReferenceDataSource":{
|
"ReferenceDataSource":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
"required":[
|
"required":[
|
||||||
@ -1043,7 +1172,7 @@
|
|||||||
"type":"string",
|
"type":"string",
|
||||||
"max":2048,
|
"max":2048,
|
||||||
"min":1,
|
"min":1,
|
||||||
"pattern":"arn:[a-zA-Z0-9\\-]+:[a-zA-Z0-9\\-]+:[a-zA-Z0-9\\-]*:\\d{12}:[a-zA-Z_0-9+=,.@\\-_/:]+"
|
"pattern":"arn:.*"
|
||||||
},
|
},
|
||||||
"ResourceInUseException":{
|
"ResourceInUseException":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
@ -1072,6 +1201,19 @@
|
|||||||
"min":1,
|
"min":1,
|
||||||
"pattern":"arn:aws:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+"
|
"pattern":"arn:aws:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+"
|
||||||
},
|
},
|
||||||
|
"S3Configuration":{
|
||||||
|
"type":"structure",
|
||||||
|
"required":[
|
||||||
|
"RoleARN",
|
||||||
|
"BucketARN",
|
||||||
|
"FileKey"
|
||||||
|
],
|
||||||
|
"members":{
|
||||||
|
"RoleARN":{"shape":"RoleARN"},
|
||||||
|
"BucketARN":{"shape":"BucketARN"},
|
||||||
|
"FileKey":{"shape":"FileKey"}
|
||||||
|
}
|
||||||
|
},
|
||||||
"S3ReferenceDataSource":{
|
"S3ReferenceDataSource":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
"required":[
|
"required":[
|
||||||
@ -1111,7 +1253,8 @@
|
|||||||
"members":{
|
"members":{
|
||||||
"message":{"shape":"ErrorMessage"}
|
"message":{"shape":"ErrorMessage"}
|
||||||
},
|
},
|
||||||
"exception":true
|
"exception":true,
|
||||||
|
"fault":true
|
||||||
},
|
},
|
||||||
"SourceSchema":{
|
"SourceSchema":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
@ -1158,7 +1301,8 @@
|
|||||||
"type":"structure",
|
"type":"structure",
|
||||||
"members":{
|
"members":{
|
||||||
"message":{"shape":"ErrorMessage"},
|
"message":{"shape":"ErrorMessage"},
|
||||||
"RawInputRecords":{"shape":"RawInputRecords"}
|
"RawInputRecords":{"shape":"RawInputRecords"},
|
||||||
|
"ProcessedInputRecords":{"shape":"ProcessedInputRecords"}
|
||||||
},
|
},
|
||||||
"exception":true
|
"exception":true
|
||||||
},
|
},
|
||||||
|
102
vendor/github.com/aws/aws-sdk-go/models/apis/kinesisanalytics/2015-08-14/docs-2.json
generated
vendored
102
vendor/github.com/aws/aws-sdk-go/models/apis/kinesisanalytics/2015-08-14/docs-2.json
generated
vendored
@ -4,11 +4,13 @@
|
|||||||
"operations": {
|
"operations": {
|
||||||
"AddApplicationCloudWatchLoggingOption": "<p>Adds a CloudWatch log stream to monitor application configuration errors. For more information about using CloudWatch log streams with Amazon Kinesis Analytics applications, see <a href=\"http://docs.aws.amazon.com/kinesisanalytics/latest/dev/cloudwatch-logs.html\">Working with Amazon CloudWatch Logs</a>.</p>",
|
"AddApplicationCloudWatchLoggingOption": "<p>Adds a CloudWatch log stream to monitor application configuration errors. For more information about using CloudWatch log streams with Amazon Kinesis Analytics applications, see <a href=\"http://docs.aws.amazon.com/kinesisanalytics/latest/dev/cloudwatch-logs.html\">Working with Amazon CloudWatch Logs</a>.</p>",
|
||||||
"AddApplicationInput": "<p> Adds a streaming source to your Amazon Kinesis application. For conceptual information, see <a href=\"http://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-input.html\">Configuring Application Input</a>. </p> <p>You can add a streaming source either when you create an application or you can use this operation to add a streaming source after you create an application. For more information, see <a>CreateApplication</a>.</p> <p>Any configuration update, including adding a streaming source using this operation, results in a new version of the application. You can use the <a>DescribeApplication</a> operation to find the current application version. </p> <p>This operation requires permissions to perform the <code>kinesisanalytics:AddApplicationInput</code> action.</p>",
|
"AddApplicationInput": "<p> Adds a streaming source to your Amazon Kinesis application. For conceptual information, see <a href=\"http://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-input.html\">Configuring Application Input</a>. </p> <p>You can add a streaming source either when you create an application or you can use this operation to add a streaming source after you create an application. For more information, see <a>CreateApplication</a>.</p> <p>Any configuration update, including adding a streaming source using this operation, results in a new version of the application. You can use the <a>DescribeApplication</a> operation to find the current application version. </p> <p>This operation requires permissions to perform the <code>kinesisanalytics:AddApplicationInput</code> action.</p>",
|
||||||
|
"AddApplicationInputProcessingConfiguration": "<p>Adds an <a>InputProcessingConfiguration</a> to an application. An input processor preprocesses records on the input stream before the application's SQL code executes. Currently, the only input processor available is <a href=\"https://aws.amazon.com/documentation/lambda/\">AWS Lambda</a>.</p>",
|
||||||
"AddApplicationOutput": "<p>Adds an external destination to your Amazon Kinesis Analytics application.</p> <p>If you want Amazon Kinesis Analytics to deliver data from an in-application stream within your application to an external destination (such as an Amazon Kinesis stream or a Firehose delivery stream), you add the relevant configuration to your application using this operation. You can configure one or more outputs for your application. Each output configuration maps an in-application stream and an external destination.</p> <p> You can use one of the output configurations to deliver data from your in-application error stream to an external destination so that you can analyze the errors. For conceptual information, see <a href=\"http://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-output.html\">Understanding Application Output (Destination)</a>. </p> <p> Note that any configuration update, including adding a streaming source using this operation, results in a new version of the application. You can use the <a>DescribeApplication</a> operation to find the current application version.</p> <p>For the limits on the number of application inputs and outputs you can configure, see <a href=\"http://docs.aws.amazon.com/kinesisanalytics/latest/dev/limits.html\">Limits</a>.</p> <p>This operation requires permissions to perform the <code>kinesisanalytics:AddApplicationOutput</code> action.</p>",
|
"AddApplicationOutput": "<p>Adds an external destination to your Amazon Kinesis Analytics application.</p> <p>If you want Amazon Kinesis Analytics to deliver data from an in-application stream within your application to an external destination (such as an Amazon Kinesis stream or a Firehose delivery stream), you add the relevant configuration to your application using this operation. You can configure one or more outputs for your application. Each output configuration maps an in-application stream and an external destination.</p> <p> You can use one of the output configurations to deliver data from your in-application error stream to an external destination so that you can analyze the errors. For conceptual information, see <a href=\"http://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-output.html\">Understanding Application Output (Destination)</a>. </p> <p> Note that any configuration update, including adding a streaming source using this operation, results in a new version of the application. You can use the <a>DescribeApplication</a> operation to find the current application version.</p> <p>For the limits on the number of application inputs and outputs you can configure, see <a href=\"http://docs.aws.amazon.com/kinesisanalytics/latest/dev/limits.html\">Limits</a>.</p> <p>This operation requires permissions to perform the <code>kinesisanalytics:AddApplicationOutput</code> action.</p>",
|
||||||
"AddApplicationReferenceDataSource": "<p>Adds a reference data source to an existing application.</p> <p>Amazon Kinesis Analytics reads reference data (that is, an Amazon S3 object) and creates an in-application table within your application. In the request, you provide the source (S3 bucket name and object key name), name of the in-application table to create, and the necessary mapping information that describes how data in Amazon S3 object maps to columns in the resulting in-application table.</p> <p> For conceptual information, see <a href=\"http://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-input.html\">Configuring Application Input</a>. For the limits on data sources you can add to your application, see <a href=\"http://docs.aws.amazon.com/kinesisanalytics/latest/dev/limits.html\">Limits</a>. </p> <p> This operation requires permissions to perform the <code>kinesisanalytics:AddApplicationOutput</code> action. </p>",
|
"AddApplicationReferenceDataSource": "<p>Adds a reference data source to an existing application.</p> <p>Amazon Kinesis Analytics reads reference data (that is, an Amazon S3 object) and creates an in-application table within your application. In the request, you provide the source (S3 bucket name and object key name), name of the in-application table to create, and the necessary mapping information that describes how data in Amazon S3 object maps to columns in the resulting in-application table.</p> <p> For conceptual information, see <a href=\"http://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-input.html\">Configuring Application Input</a>. For the limits on data sources you can add to your application, see <a href=\"http://docs.aws.amazon.com/kinesisanalytics/latest/dev/limits.html\">Limits</a>. </p> <p> This operation requires permissions to perform the <code>kinesisanalytics:AddApplicationOutput</code> action. </p>",
|
||||||
"CreateApplication": "<p> Creates an Amazon Kinesis Analytics application. You can configure each application with one streaming source as input, application code to process the input, and up to five streaming destinations where you want Amazon Kinesis Analytics to write the output data from your application. For an overview, see <a href=\"http://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works.html\">How it Works</a>. </p> <p>In the input configuration, you map the streaming source to an in-application stream, which you can think of as a constantly updating table. In the mapping, you must provide a schema for the in-application stream and map each data column in the in-application stream to a data element in the streaming source.</p> <p>Your application code is one or more SQL statements that read input data, transform it, and generate output. Your application code can create one or more SQL artifacts like SQL streams or pumps.</p> <p>In the output configuration, you can configure the application to write data from in-application streams created in your applications to up to five streaming destinations.</p> <p> To read data from your source stream or write data to destination streams, Amazon Kinesis Analytics needs your permissions. You grant these permissions by creating IAM roles. This operation requires permissions to perform the <code>kinesisanalytics:CreateApplication</code> action. </p> <p> For introductory exercises to create an Amazon Kinesis Analytics application, see <a href=\"http://docs.aws.amazon.com/kinesisanalytics/latest/dev/getting-started.html\">Getting Started</a>. </p>",
|
"CreateApplication": "<p> Creates an Amazon Kinesis Analytics application. You can configure each application with one streaming source as input, application code to process the input, and up to five streaming destinations where you want Amazon Kinesis Analytics to write the output data from your application. For an overview, see <a href=\"http://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works.html\">How it Works</a>. </p> <p>In the input configuration, you map the streaming source to an in-application stream, which you can think of as a constantly updating table. In the mapping, you must provide a schema for the in-application stream and map each data column in the in-application stream to a data element in the streaming source.</p> <p>Your application code is one or more SQL statements that read input data, transform it, and generate output. Your application code can create one or more SQL artifacts like SQL streams or pumps.</p> <p>In the output configuration, you can configure the application to write data from in-application streams created in your applications to up to five streaming destinations.</p> <p> To read data from your source stream or write data to destination streams, Amazon Kinesis Analytics needs your permissions. You grant these permissions by creating IAM roles. This operation requires permissions to perform the <code>kinesisanalytics:CreateApplication</code> action. </p> <p> For introductory exercises to create an Amazon Kinesis Analytics application, see <a href=\"http://docs.aws.amazon.com/kinesisanalytics/latest/dev/getting-started.html\">Getting Started</a>. </p>",
|
||||||
"DeleteApplication": "<p>Deletes the specified application. Amazon Kinesis Analytics halts application execution and deletes the application, including any application artifacts (such as in-application streams, reference table, and application code).</p> <p>This operation requires permissions to perform the <code>kinesisanalytics:DeleteApplication</code> action.</p>",
|
"DeleteApplication": "<p>Deletes the specified application. Amazon Kinesis Analytics halts application execution and deletes the application, including any application artifacts (such as in-application streams, reference table, and application code).</p> <p>This operation requires permissions to perform the <code>kinesisanalytics:DeleteApplication</code> action.</p>",
|
||||||
"DeleteApplicationCloudWatchLoggingOption": "<p>Deletes a CloudWatch log stream from an application. For more information about using CloudWatch log streams with Amazon Kinesis Analytics applications, see <a href=\"http://docs.aws.amazon.com/kinesisanalytics/latest/dev/cloudwatch-logs.html\">Working with Amazon CloudWatch Logs</a>.</p>",
|
"DeleteApplicationCloudWatchLoggingOption": "<p>Deletes a CloudWatch log stream from an application. For more information about using CloudWatch log streams with Amazon Kinesis Analytics applications, see <a href=\"http://docs.aws.amazon.com/kinesisanalytics/latest/dev/cloudwatch-logs.html\">Working with Amazon CloudWatch Logs</a>.</p>",
|
||||||
|
"DeleteApplicationInputProcessingConfiguration": "<p>Deletes an <a>InputProcessingConfiguration</a> from an input.</p>",
|
||||||
"DeleteApplicationOutput": "<p>Deletes output destination configuration from your application configuration. Amazon Kinesis Analytics will no longer write data from the corresponding in-application stream to the external output destination.</p> <p>This operation requires permissions to perform the <code>kinesisanalytics:DeleteApplicationOutput</code> action.</p>",
|
"DeleteApplicationOutput": "<p>Deletes output destination configuration from your application configuration. Amazon Kinesis Analytics will no longer write data from the corresponding in-application stream to the external output destination.</p> <p>This operation requires permissions to perform the <code>kinesisanalytics:DeleteApplicationOutput</code> action.</p>",
|
||||||
"DeleteApplicationReferenceDataSource": "<p>Deletes a reference data source configuration from the specified application configuration.</p> <p>If the application is running, Amazon Kinesis Analytics immediately removes the in-application table that you created using the <a>AddApplicationReferenceDataSource</a> operation. </p> <p>This operation requires permissions to perform the <code>kinesisanalytics.DeleteApplicationReferenceDataSource</code> action.</p>",
|
"DeleteApplicationReferenceDataSource": "<p>Deletes a reference data source configuration from the specified application configuration.</p> <p>If the application is running, Amazon Kinesis Analytics immediately removes the in-application table that you created using the <a>AddApplicationReferenceDataSource</a> operation. </p> <p>This operation requires permissions to perform the <code>kinesisanalytics.DeleteApplicationReferenceDataSource</code> action.</p>",
|
||||||
"DescribeApplication": "<p>Returns information about a specific Amazon Kinesis Analytics application.</p> <p>If you want to retrieve a list of all applications in your account, use the <a>ListApplications</a> operation.</p> <p>This operation requires permissions to perform the <code>kinesisanalytics:DescribeApplication</code> action. You can use <code>DescribeApplication</code> to get the current application versionId, which you need to call other operations such as <code>Update</code>. </p>",
|
"DescribeApplication": "<p>Returns information about a specific Amazon Kinesis Analytics application.</p> <p>If you want to retrieve a list of all applications in your account, use the <a>ListApplications</a> operation.</p> <p>This operation requires permissions to perform the <code>kinesisanalytics:DescribeApplication</code> action. You can use <code>DescribeApplication</code> to get the current application versionId, which you need to call other operations such as <code>Update</code>. </p>",
|
||||||
@ -29,6 +31,16 @@
|
|||||||
"refs": {
|
"refs": {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"AddApplicationInputProcessingConfigurationRequest": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"AddApplicationInputProcessingConfigurationResponse": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
}
|
||||||
|
},
|
||||||
"AddApplicationInputRequest": {
|
"AddApplicationInputRequest": {
|
||||||
"base": "<p/>",
|
"base": "<p/>",
|
||||||
"refs": {
|
"refs": {
|
||||||
@ -84,6 +96,7 @@
|
|||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"AddApplicationCloudWatchLoggingOptionRequest$ApplicationName": "<p>The Kinesis Analytics application name.</p>",
|
"AddApplicationCloudWatchLoggingOptionRequest$ApplicationName": "<p>The Kinesis Analytics application name.</p>",
|
||||||
|
"AddApplicationInputProcessingConfigurationRequest$ApplicationName": "<p>Name of the application to which you want to add the input processing configuration.</p>",
|
||||||
"AddApplicationInputRequest$ApplicationName": "<p>Name of your existing Amazon Kinesis Analytics application to which you want to add the streaming source.</p>",
|
"AddApplicationInputRequest$ApplicationName": "<p>Name of your existing Amazon Kinesis Analytics application to which you want to add the streaming source.</p>",
|
||||||
"AddApplicationOutputRequest$ApplicationName": "<p>Name of the application to which you want to add the output configuration.</p>",
|
"AddApplicationOutputRequest$ApplicationName": "<p>Name of the application to which you want to add the output configuration.</p>",
|
||||||
"AddApplicationReferenceDataSourceRequest$ApplicationName": "<p>Name of an existing application.</p>",
|
"AddApplicationReferenceDataSourceRequest$ApplicationName": "<p>Name of an existing application.</p>",
|
||||||
@ -91,6 +104,7 @@
|
|||||||
"ApplicationSummary$ApplicationName": "<p>Name of the application.</p>",
|
"ApplicationSummary$ApplicationName": "<p>Name of the application.</p>",
|
||||||
"CreateApplicationRequest$ApplicationName": "<p>Name of your Amazon Kinesis Analytics application (for example, <code>sample-app</code>).</p>",
|
"CreateApplicationRequest$ApplicationName": "<p>Name of your Amazon Kinesis Analytics application (for example, <code>sample-app</code>).</p>",
|
||||||
"DeleteApplicationCloudWatchLoggingOptionRequest$ApplicationName": "<p>The Kinesis Analytics application name.</p>",
|
"DeleteApplicationCloudWatchLoggingOptionRequest$ApplicationName": "<p>The Kinesis Analytics application name.</p>",
|
||||||
|
"DeleteApplicationInputProcessingConfigurationRequest$ApplicationName": "<p>The Kinesis Analytics application name.</p>",
|
||||||
"DeleteApplicationOutputRequest$ApplicationName": "<p>Amazon Kinesis Analytics application name.</p>",
|
"DeleteApplicationOutputRequest$ApplicationName": "<p>Amazon Kinesis Analytics application name.</p>",
|
||||||
"DeleteApplicationReferenceDataSourceRequest$ApplicationName": "<p>Name of an existing application.</p>",
|
"DeleteApplicationReferenceDataSourceRequest$ApplicationName": "<p>Name of an existing application.</p>",
|
||||||
"DeleteApplicationRequest$ApplicationName": "<p>Name of the Amazon Kinesis Analytics application to delete.</p>",
|
"DeleteApplicationRequest$ApplicationName": "<p>Name of the Amazon Kinesis Analytics application to delete.</p>",
|
||||||
@ -131,11 +145,13 @@
|
|||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"AddApplicationCloudWatchLoggingOptionRequest$CurrentApplicationVersionId": "<p>The version ID of the Kinesis Analytics application.</p>",
|
"AddApplicationCloudWatchLoggingOptionRequest$CurrentApplicationVersionId": "<p>The version ID of the Kinesis Analytics application.</p>",
|
||||||
|
"AddApplicationInputProcessingConfigurationRequest$CurrentApplicationVersionId": "<p>Version of the application to which you want to add the input processing configuration. You can use the <a>DescribeApplication</a> operation to get the current application version. If the version specified is not the current version, the <code>ConcurrentModificationException</code> is returned.</p>",
|
||||||
"AddApplicationInputRequest$CurrentApplicationVersionId": "<p>Current version of your Amazon Kinesis Analytics application. You can use the <a>DescribeApplication</a> operation to find the current application version.</p>",
|
"AddApplicationInputRequest$CurrentApplicationVersionId": "<p>Current version of your Amazon Kinesis Analytics application. You can use the <a>DescribeApplication</a> operation to find the current application version.</p>",
|
||||||
"AddApplicationOutputRequest$CurrentApplicationVersionId": "<p>Version of the application to which you want add the output configuration. You can use the <a>DescribeApplication</a> operation to get the current application version. If the version specified is not the current version, the <code>ConcurrentModificationException</code> is returned. </p>",
|
"AddApplicationOutputRequest$CurrentApplicationVersionId": "<p>Version of the application to which you want add the output configuration. You can use the <a>DescribeApplication</a> operation to get the current application version. If the version specified is not the current version, the <code>ConcurrentModificationException</code> is returned. </p>",
|
||||||
"AddApplicationReferenceDataSourceRequest$CurrentApplicationVersionId": "<p>Version of the application for which you are adding the reference data source. You can use the <a>DescribeApplication</a> operation to get the current application version. If the version specified is not the current version, the <code>ConcurrentModificationException</code> is returned.</p>",
|
"AddApplicationReferenceDataSourceRequest$CurrentApplicationVersionId": "<p>Version of the application for which you are adding the reference data source. You can use the <a>DescribeApplication</a> operation to get the current application version. If the version specified is not the current version, the <code>ConcurrentModificationException</code> is returned.</p>",
|
||||||
"ApplicationDetail$ApplicationVersionId": "<p>Provides the current application version.</p>",
|
"ApplicationDetail$ApplicationVersionId": "<p>Provides the current application version.</p>",
|
||||||
"DeleteApplicationCloudWatchLoggingOptionRequest$CurrentApplicationVersionId": "<p>The version ID of the Kinesis Analytics application.</p>",
|
"DeleteApplicationCloudWatchLoggingOptionRequest$CurrentApplicationVersionId": "<p>The version ID of the Kinesis Analytics application.</p>",
|
||||||
|
"DeleteApplicationInputProcessingConfigurationRequest$CurrentApplicationVersionId": "<p>The version ID of the Kinesis Analytics application.</p>",
|
||||||
"DeleteApplicationOutputRequest$CurrentApplicationVersionId": "<p>Amazon Kinesis Analytics application version. You can use the <a>DescribeApplication</a> operation to get the current application version. If the version specified is not the current version, the <code>ConcurrentModificationException</code> is returned. </p>",
|
"DeleteApplicationOutputRequest$CurrentApplicationVersionId": "<p>Amazon Kinesis Analytics application version. You can use the <a>DescribeApplication</a> operation to get the current application version. If the version specified is not the current version, the <code>ConcurrentModificationException</code> is returned. </p>",
|
||||||
"DeleteApplicationReferenceDataSourceRequest$CurrentApplicationVersionId": "<p>Version of the application. You can use the <a>DescribeApplication</a> operation to get the current application version. If the version specified is not the current version, the <code>ConcurrentModificationException</code> is returned.</p>",
|
"DeleteApplicationReferenceDataSourceRequest$CurrentApplicationVersionId": "<p>Version of the application. You can use the <a>DescribeApplication</a> operation to get the current application version. If the version specified is not the current version, the <code>ConcurrentModificationException</code> is returned.</p>",
|
||||||
"UpdateApplicationRequest$CurrentApplicationVersionId": "<p>The current application version ID. You can use the <a>DescribeApplication</a> operation to get this value.</p>"
|
"UpdateApplicationRequest$CurrentApplicationVersionId": "<p>The current application version ID. You can use the <a>DescribeApplication</a> operation to get this value.</p>"
|
||||||
@ -150,6 +166,7 @@
|
|||||||
"BucketARN": {
|
"BucketARN": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
|
"S3Configuration$BucketARN": null,
|
||||||
"S3ReferenceDataSource$BucketARN": "<p>Amazon Resource Name (ARN) of the S3 bucket.</p>",
|
"S3ReferenceDataSource$BucketARN": "<p>Amazon Resource Name (ARN) of the S3 bucket.</p>",
|
||||||
"S3ReferenceDataSourceDescription$BucketARN": "<p>Amazon Resource Name (ARN) of the S3 bucket.</p>",
|
"S3ReferenceDataSourceDescription$BucketARN": "<p>Amazon Resource Name (ARN) of the S3 bucket.</p>",
|
||||||
"S3ReferenceDataSourceUpdate$BucketARNUpdate": "<p>Amazon Resource Name (ARN) of the S3 bucket.</p>"
|
"S3ReferenceDataSourceUpdate$BucketARNUpdate": "<p>Amazon Resource Name (ARN) of the S3 bucket.</p>"
|
||||||
@ -228,6 +245,16 @@
|
|||||||
"refs": {
|
"refs": {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"DeleteApplicationInputProcessingConfigurationRequest": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"DeleteApplicationInputProcessingConfigurationResponse": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
}
|
||||||
|
},
|
||||||
"DeleteApplicationOutputRequest": {
|
"DeleteApplicationOutputRequest": {
|
||||||
"base": "<p/>",
|
"base": "<p/>",
|
||||||
"refs": {
|
"refs": {
|
||||||
@ -277,7 +304,7 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"DiscoverInputSchemaRequest": {
|
"DiscoverInputSchemaRequest": {
|
||||||
"base": "<p/>",
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -304,6 +331,7 @@
|
|||||||
"FileKey": {
|
"FileKey": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
|
"S3Configuration$FileKey": null,
|
||||||
"S3ReferenceDataSource$FileKey": "<p>Object key name containing reference data.</p>",
|
"S3ReferenceDataSource$FileKey": "<p>Object key name containing reference data.</p>",
|
||||||
"S3ReferenceDataSourceDescription$FileKey": "<p>Amazon S3 object key name.</p>",
|
"S3ReferenceDataSourceDescription$FileKey": "<p>Amazon S3 object key name.</p>",
|
||||||
"S3ReferenceDataSourceUpdate$FileKeyUpdate": "<p>Object key name.</p>"
|
"S3ReferenceDataSourceUpdate$FileKeyUpdate": "<p>Object key name.</p>"
|
||||||
@ -312,9 +340,11 @@
|
|||||||
"Id": {
|
"Id": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
|
"AddApplicationInputProcessingConfigurationRequest$InputId": "<p>The ID of the input configuration to which to add the input configuration. You can get a list of the input IDs for an application using the <a>DescribeApplication</a> operation.</p>",
|
||||||
"CloudWatchLoggingOptionDescription$CloudWatchLoggingOptionId": "<p>ID of the CloudWatch logging option description.</p>",
|
"CloudWatchLoggingOptionDescription$CloudWatchLoggingOptionId": "<p>ID of the CloudWatch logging option description.</p>",
|
||||||
"CloudWatchLoggingOptionUpdate$CloudWatchLoggingOptionId": "<p>ID of the CloudWatch logging option to update</p>",
|
"CloudWatchLoggingOptionUpdate$CloudWatchLoggingOptionId": "<p>ID of the CloudWatch logging option to update</p>",
|
||||||
"DeleteApplicationCloudWatchLoggingOptionRequest$CloudWatchLoggingOptionId": "<p>The <code>CloudWatchLoggingOptionId</code> of the CloudWatch logging option to delete. You can use the <a>DescribeApplication</a> operation to get the <code>CloudWatchLoggingOptionId</code>. </p>",
|
"DeleteApplicationCloudWatchLoggingOptionRequest$CloudWatchLoggingOptionId": "<p>The <code>CloudWatchLoggingOptionId</code> of the CloudWatch logging option to delete. You can use the <a>DescribeApplication</a> operation to get the <code>CloudWatchLoggingOptionId</code>. </p>",
|
||||||
|
"DeleteApplicationInputProcessingConfigurationRequest$InputId": "<p>The ID of the input configuration from which to delete the input configuration. You can get a list of the input IDs for an application using the <a>DescribeApplication</a> operation.</p>",
|
||||||
"DeleteApplicationOutputRequest$OutputId": "<p>The ID of the configuration to delete. Each output configuration that is added to the application, either when the application is created or later using the <a>AddApplicationOutput</a> operation, has a unique ID. You need to provide the ID to uniquely identify the output configuration that you want to delete from the application configuration. You can use the <a>DescribeApplication</a> operation to get the specific <code>OutputId</code>. </p>",
|
"DeleteApplicationOutputRequest$OutputId": "<p>The ID of the configuration to delete. Each output configuration that is added to the application, either when the application is created or later using the <a>AddApplicationOutput</a> operation, has a unique ID. You need to provide the ID to uniquely identify the output configuration that you want to delete from the application configuration. You can use the <a>DescribeApplication</a> operation to get the specific <code>OutputId</code>. </p>",
|
||||||
"DeleteApplicationReferenceDataSourceRequest$ReferenceId": "<p>ID of the reference data source. When you add a reference data source to your application using the <a>AddApplicationReferenceDataSource</a>, Amazon Kinesis Analytics assigns an ID. You can use the <a>DescribeApplication</a> operation to get the reference ID. </p>",
|
"DeleteApplicationReferenceDataSourceRequest$ReferenceId": "<p>ID of the reference data source. When you add a reference data source to your application using the <a>AddApplicationReferenceDataSource</a>, Amazon Kinesis Analytics assigns an ID. You can use the <a>DescribeApplication</a> operation to get the reference ID. </p>",
|
||||||
"InputConfiguration$Id": "<p>Input source ID. You can get this ID by calling the <a>DescribeApplication</a> operation.</p>",
|
"InputConfiguration$Id": "<p>Input source ID. You can get this ID by calling the <a>DescribeApplication</a> operation.</p>",
|
||||||
@ -355,7 +385,7 @@
|
|||||||
"Input": {
|
"Input": {
|
||||||
"base": "<p>When you configure the application input, you specify the streaming source, the in-application stream name that is created, and the mapping between the two. For more information, see <a href=\"http://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-input.html\">Configuring Application Input</a>. </p>",
|
"base": "<p>When you configure the application input, you specify the streaming source, the in-application stream name that is created, and the mapping between the two. For more information, see <a href=\"http://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-input.html\">Configuring Application Input</a>. </p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
"AddApplicationInputRequest$Input": "<p/>",
|
"AddApplicationInputRequest$Input": "<p>The <a>Input</a> to add.</p>",
|
||||||
"Inputs$member": null
|
"Inputs$member": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -383,6 +413,24 @@
|
|||||||
"ApplicationDetail$InputDescriptions": "<p>Describes the application input configuration. For more information, see <a href=\"http://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-input.html\">Configuring Application Input</a>. </p>"
|
"ApplicationDetail$InputDescriptions": "<p>Describes the application input configuration. For more information, see <a href=\"http://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-input.html\">Configuring Application Input</a>. </p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"InputLambdaProcessor": {
|
||||||
|
"base": "<p>An object that contains the ARN of the <a href=\"https://aws.amazon.com/documentation/lambda/\">AWS Lambda</a> function that is used to preprocess records in the stream, and the ARN of the IAM role used to access the AWS Lambda function. </p>",
|
||||||
|
"refs": {
|
||||||
|
"InputProcessingConfiguration$InputLambdaProcessor": "<p>The <a>InputLambdaProcessor</a> that is used to preprocess the records in the stream prior to being processed by your application code.</p>"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"InputLambdaProcessorDescription": {
|
||||||
|
"base": "<p>An object that contains the ARN of the <a href=\"https://aws.amazon.com/documentation/lambda/\">AWS Lambda</a> function that is used to preprocess records in the stream, and the ARN of the IAM role used to access the AWS Lambda expression.</p>",
|
||||||
|
"refs": {
|
||||||
|
"InputProcessingConfigurationDescription$InputLambdaProcessorDescription": "<p>Provides configuration information about the associated <a>InputLambdaProcessorDescription</a>.</p>"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"InputLambdaProcessorUpdate": {
|
||||||
|
"base": "<p>Represents an update to the <a>InputLambdaProcessor</a> that is used to preprocess the records in the stream.</p>",
|
||||||
|
"refs": {
|
||||||
|
"InputProcessingConfigurationUpdate$InputLambdaProcessorUpdate": "<p>Provides update information for an <a>InputLambdaProcessor</a>.</p>"
|
||||||
|
}
|
||||||
|
},
|
||||||
"InputParallelism": {
|
"InputParallelism": {
|
||||||
"base": "<p>Describes the number of in-application streams to create for a given streaming source. For information about parallelism, see <a href=\"http://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-input.html\">Configuring Application Input</a>. </p>",
|
"base": "<p>Describes the number of in-application streams to create for a given streaming source. For information about parallelism, see <a href=\"http://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-input.html\">Configuring Application Input</a>. </p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
@ -403,8 +451,28 @@
|
|||||||
"InputUpdate$InputParallelismUpdate": "<p>Describes the parallelism updates (the number in-application streams Amazon Kinesis Analytics creates for the specific streaming source).</p>"
|
"InputUpdate$InputParallelismUpdate": "<p>Describes the parallelism updates (the number in-application streams Amazon Kinesis Analytics creates for the specific streaming source).</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"InputProcessingConfiguration": {
|
||||||
|
"base": "<p>Provides a description of a processor that is used to preprocess the records in the stream prior to being processed by your application code. Currently, the only input processor available is <a href=\"https://aws.amazon.com/documentation/lambda/\">AWS Lambda</a>.</p>",
|
||||||
|
"refs": {
|
||||||
|
"AddApplicationInputProcessingConfigurationRequest$InputProcessingConfiguration": "<p>The <a>InputProcessingConfiguration</a> to add to the application.</p>",
|
||||||
|
"DiscoverInputSchemaRequest$InputProcessingConfiguration": "<p>The <a>InputProcessingConfiguration</a> to use to preprocess the records before discovering the schema of the records.</p>",
|
||||||
|
"Input$InputProcessingConfiguration": "<p>The <a>InputProcessingConfiguration</a> for the Input. An input processor transforms records as they are received from the stream, before the application's SQL code executes. Currently, the only input processing configuration available is <a>InputLambdaProcessor</a>.</p>"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"InputProcessingConfigurationDescription": {
|
||||||
|
"base": "<p>Provides configuration information about an input processor. Currently, the only input processor available is <a href=\"https://aws.amazon.com/documentation/lambda/\">AWS Lambda</a>.</p>",
|
||||||
|
"refs": {
|
||||||
|
"InputDescription$InputProcessingConfigurationDescription": "<p>The description of the preprocessor that executes on records in this input before the application's code is run.</p>"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"InputProcessingConfigurationUpdate": {
|
||||||
|
"base": "<p>Describes updates to an <a>InputProcessingConfiguration</a>. </p>",
|
||||||
|
"refs": {
|
||||||
|
"InputUpdate$InputProcessingConfigurationUpdate": "<p>Describes updates for an input processing configuration.</p>"
|
||||||
|
}
|
||||||
|
},
|
||||||
"InputSchemaUpdate": {
|
"InputSchemaUpdate": {
|
||||||
"base": "<p> Describes updates for the application's input schema. </p>",
|
"base": "<p>Describes updates for the application's input schema.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
"InputUpdate$InputSchemaUpdate": "<p>Describes the data format on the streaming source, and how record elements on the streaming source map to columns of the in-application stream that is created.</p>"
|
"InputUpdate$InputSchemaUpdate": "<p>Describes the data format on the streaming source, and how record elements on the streaming source map to columns of the in-application stream that is created.</p>"
|
||||||
}
|
}
|
||||||
@ -619,6 +687,19 @@
|
|||||||
"DiscoverInputSchemaResponse$ParsedInputRecords": "<p>An array of elements, where each element corresponds to a row in a stream record (a stream record can have more than one row).</p>"
|
"DiscoverInputSchemaResponse$ParsedInputRecords": "<p>An array of elements, where each element corresponds to a row in a stream record (a stream record can have more than one row).</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"ProcessedInputRecord": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
"ProcessedInputRecords$member": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"ProcessedInputRecords": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
"DiscoverInputSchemaResponse$ProcessedInputRecords": "<p>Stream data that was modified by the processor specified in the <code>InputProcessingConfiguration</code> parameter.</p>",
|
||||||
|
"UnableToDetectSchemaException$ProcessedInputRecords": null
|
||||||
|
}
|
||||||
|
},
|
||||||
"RawInputRecord": {
|
"RawInputRecord": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
@ -738,6 +819,9 @@
|
|||||||
"ApplicationDetail$ApplicationARN": "<p>ARN of the application.</p>",
|
"ApplicationDetail$ApplicationARN": "<p>ARN of the application.</p>",
|
||||||
"ApplicationSummary$ApplicationARN": "<p>ARN of the application.</p>",
|
"ApplicationSummary$ApplicationARN": "<p>ARN of the application.</p>",
|
||||||
"DiscoverInputSchemaRequest$ResourceARN": "<p>Amazon Resource Name (ARN) of the streaming source.</p>",
|
"DiscoverInputSchemaRequest$ResourceARN": "<p>Amazon Resource Name (ARN) of the streaming source.</p>",
|
||||||
|
"InputLambdaProcessor$ResourceARN": "<p>The ARN of the <a href=\"https://aws.amazon.com/documentation/lambda/\">AWS Lambda</a> function that operates on records in the stream.</p>",
|
||||||
|
"InputLambdaProcessorDescription$ResourceARN": "<p>The ARN of the <a href=\"https://aws.amazon.com/documentation/lambda/\">AWS Lambda</a> function that is used to preprocess the records in the stream.</p>",
|
||||||
|
"InputLambdaProcessorUpdate$ResourceARNUpdate": "<p>The ARN of the new <a href=\"https://aws.amazon.com/documentation/lambda/\">AWS Lambda</a> function that is used to preprocess the records in the stream.</p>",
|
||||||
"KinesisFirehoseInput$ResourceARN": "<p>ARN of the input Firehose delivery stream.</p>",
|
"KinesisFirehoseInput$ResourceARN": "<p>ARN of the input Firehose delivery stream.</p>",
|
||||||
"KinesisFirehoseInputDescription$ResourceARN": "<p>Amazon Resource Name (ARN) of the Amazon Kinesis Firehose delivery stream.</p>",
|
"KinesisFirehoseInputDescription$ResourceARN": "<p>Amazon Resource Name (ARN) of the Amazon Kinesis Firehose delivery stream.</p>",
|
||||||
"KinesisFirehoseInputUpdate$ResourceARNUpdate": "<p>ARN of the input Amazon Kinesis Firehose delivery stream to read.</p>",
|
"KinesisFirehoseInputUpdate$ResourceARNUpdate": "<p>ARN of the input Amazon Kinesis Firehose delivery stream to read.</p>",
|
||||||
@ -774,6 +858,9 @@
|
|||||||
"CloudWatchLoggingOptionDescription$RoleARN": "<p>IAM ARN of the role to use to send application messages. Note: To write application messages to CloudWatch, the IAM role used must have the <code>PutLogEvents</code> policy action enabled.</p>",
|
"CloudWatchLoggingOptionDescription$RoleARN": "<p>IAM ARN of the role to use to send application messages. Note: To write application messages to CloudWatch, the IAM role used must have the <code>PutLogEvents</code> policy action enabled.</p>",
|
||||||
"CloudWatchLoggingOptionUpdate$RoleARNUpdate": "<p>IAM ARN of the role to use to send application messages. Note: To write application messages to CloudWatch, the IAM role used must have the <code>PutLogEvents</code> policy action enabled.</p>",
|
"CloudWatchLoggingOptionUpdate$RoleARNUpdate": "<p>IAM ARN of the role to use to send application messages. Note: To write application messages to CloudWatch, the IAM role used must have the <code>PutLogEvents</code> policy action enabled.</p>",
|
||||||
"DiscoverInputSchemaRequest$RoleARN": "<p>ARN of the IAM role that Amazon Kinesis Analytics can assume to access the stream on your behalf.</p>",
|
"DiscoverInputSchemaRequest$RoleARN": "<p>ARN of the IAM role that Amazon Kinesis Analytics can assume to access the stream on your behalf.</p>",
|
||||||
|
"InputLambdaProcessor$RoleARN": "<p>The ARN of the IAM role used to access the AWS Lambda function.</p>",
|
||||||
|
"InputLambdaProcessorDescription$RoleARN": "<p>The ARN of the IAM role used to access the AWS Lambda function.</p>",
|
||||||
|
"InputLambdaProcessorUpdate$RoleARNUpdate": "<p>The ARN of the new IAM role used to access the AWS Lambda function.</p>",
|
||||||
"KinesisFirehoseInput$RoleARN": "<p>ARN of the IAM role that Amazon Kinesis Analytics can assume to access the stream on your behalf. You need to make sure the role has necessary permissions to access the stream.</p>",
|
"KinesisFirehoseInput$RoleARN": "<p>ARN of the IAM role that Amazon Kinesis Analytics can assume to access the stream on your behalf. You need to make sure the role has necessary permissions to access the stream.</p>",
|
||||||
"KinesisFirehoseInputDescription$RoleARN": "<p>ARN of the IAM role that Amazon Kinesis Analytics assumes to access the stream.</p>",
|
"KinesisFirehoseInputDescription$RoleARN": "<p>ARN of the IAM role that Amazon Kinesis Analytics assumes to access the stream.</p>",
|
||||||
"KinesisFirehoseInputUpdate$RoleARNUpdate": "<p>Amazon Resource Name (ARN) of the IAM role that Amazon Kinesis Analytics can assume to access the stream on your behalf. You need to grant necessary permissions to this role.</p>",
|
"KinesisFirehoseInputUpdate$RoleARNUpdate": "<p>Amazon Resource Name (ARN) of the IAM role that Amazon Kinesis Analytics can assume to access the stream on your behalf. You need to grant necessary permissions to this role.</p>",
|
||||||
@ -786,11 +873,18 @@
|
|||||||
"KinesisStreamsOutput$RoleARN": "<p>ARN of the IAM role that Amazon Kinesis Analytics can assume to write to the destination stream on your behalf. You need to grant the necessary permissions to this role.</p>",
|
"KinesisStreamsOutput$RoleARN": "<p>ARN of the IAM role that Amazon Kinesis Analytics can assume to write to the destination stream on your behalf. You need to grant the necessary permissions to this role.</p>",
|
||||||
"KinesisStreamsOutputDescription$RoleARN": "<p>ARN of the IAM role that Amazon Kinesis Analytics can assume to access the stream.</p>",
|
"KinesisStreamsOutputDescription$RoleARN": "<p>ARN of the IAM role that Amazon Kinesis Analytics can assume to access the stream.</p>",
|
||||||
"KinesisStreamsOutputUpdate$RoleARNUpdate": "<p>ARN of the IAM role that Amazon Kinesis Analytics can assume to access the stream on your behalf. You need to grant the necessary permissions to this role.</p>",
|
"KinesisStreamsOutputUpdate$RoleARNUpdate": "<p>ARN of the IAM role that Amazon Kinesis Analytics can assume to access the stream on your behalf. You need to grant the necessary permissions to this role.</p>",
|
||||||
|
"S3Configuration$RoleARN": null,
|
||||||
"S3ReferenceDataSource$ReferenceRoleARN": "<p>ARN of the IAM role that the service can assume to read data on your behalf. This role must have permission for the <code>s3:GetObject</code> action on the object and trust policy that allows Amazon Kinesis Analytics service principal to assume this role.</p>",
|
"S3ReferenceDataSource$ReferenceRoleARN": "<p>ARN of the IAM role that the service can assume to read data on your behalf. This role must have permission for the <code>s3:GetObject</code> action on the object and trust policy that allows Amazon Kinesis Analytics service principal to assume this role.</p>",
|
||||||
"S3ReferenceDataSourceDescription$ReferenceRoleARN": "<p>ARN of the IAM role that Amazon Kinesis Analytics can assume to read the Amazon S3 object on your behalf to populate the in-application reference table.</p>",
|
"S3ReferenceDataSourceDescription$ReferenceRoleARN": "<p>ARN of the IAM role that Amazon Kinesis Analytics can assume to read the Amazon S3 object on your behalf to populate the in-application reference table.</p>",
|
||||||
"S3ReferenceDataSourceUpdate$ReferenceRoleARNUpdate": "<p>ARN of the IAM role that Amazon Kinesis Analytics can assume to read the Amazon S3 object and populate the in-application.</p>"
|
"S3ReferenceDataSourceUpdate$ReferenceRoleARNUpdate": "<p>ARN of the IAM role that Amazon Kinesis Analytics can assume to read the Amazon S3 object and populate the in-application.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"S3Configuration": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
"DiscoverInputSchemaRequest$S3Configuration": null
|
||||||
|
}
|
||||||
|
},
|
||||||
"S3ReferenceDataSource": {
|
"S3ReferenceDataSource": {
|
||||||
"base": "<p>Identifies the S3 bucket and object that contains the reference data. Also identifies the IAM role Amazon Kinesis Analytics can assume to read this object on your behalf.</p> <p>An Amazon Kinesis Analytics application loads reference data only once. If the data changes, you call the <a>UpdateApplication</a> operation to trigger reloading of data into your application.</p>",
|
"base": "<p>Identifies the S3 bucket and object that contains the reference data. Also identifies the IAM role Amazon Kinesis Analytics can assume to read this object on your behalf.</p> <p>An Amazon Kinesis Analytics application loads reference data only once. If the data changes, you call the <a>UpdateApplication</a> operation to trigger reloading of data into your application.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
@ -819,7 +913,7 @@
|
|||||||
"refs": {
|
"refs": {
|
||||||
"DiscoverInputSchemaResponse$InputSchema": "<p>Schema inferred from the streaming source. It identifies the format of the data in the streaming source and how each data element maps to corresponding columns in the in-application stream that you can create.</p>",
|
"DiscoverInputSchemaResponse$InputSchema": "<p>Schema inferred from the streaming source. It identifies the format of the data in the streaming source and how each data element maps to corresponding columns in the in-application stream that you can create.</p>",
|
||||||
"Input$InputSchema": "<p>Describes the format of the data in the streaming source, and how each data element maps to corresponding columns in the in-application stream that is being created.</p> <p>Also used to describe the format of the reference data source.</p>",
|
"Input$InputSchema": "<p>Describes the format of the data in the streaming source, and how each data element maps to corresponding columns in the in-application stream that is being created.</p> <p>Also used to describe the format of the reference data source.</p>",
|
||||||
"InputDescription$InputSchema": null,
|
"InputDescription$InputSchema": "<p>Describes the format of the data in the streaming source, and how each data element maps to corresponding columns in the in-application stream that is being created. </p>",
|
||||||
"ReferenceDataSource$ReferenceSchema": null,
|
"ReferenceDataSource$ReferenceSchema": null,
|
||||||
"ReferenceDataSourceDescription$ReferenceSchema": null,
|
"ReferenceDataSourceDescription$ReferenceSchema": null,
|
||||||
"ReferenceDataSourceUpdate$ReferenceSchemaUpdate": null
|
"ReferenceDataSourceUpdate$ReferenceSchemaUpdate": null
|
||||||
|
54
vendor/github.com/aws/aws-sdk-go/models/apis/logs/2014-03-28/api-2.json
generated
vendored
54
vendor/github.com/aws/aws-sdk-go/models/apis/logs/2014-03-28/api-2.json
generated
vendored
@ -11,6 +11,20 @@
|
|||||||
"uid":"logs-2014-03-28"
|
"uid":"logs-2014-03-28"
|
||||||
},
|
},
|
||||||
"operations":{
|
"operations":{
|
||||||
|
"AssociateKmsKey":{
|
||||||
|
"name":"AssociateKmsKey",
|
||||||
|
"http":{
|
||||||
|
"method":"POST",
|
||||||
|
"requestUri":"/"
|
||||||
|
},
|
||||||
|
"input":{"shape":"AssociateKmsKeyRequest"},
|
||||||
|
"errors":[
|
||||||
|
{"shape":"InvalidParameterException"},
|
||||||
|
{"shape":"ResourceNotFoundException"},
|
||||||
|
{"shape":"OperationAbortedException"},
|
||||||
|
{"shape":"ServiceUnavailableException"}
|
||||||
|
]
|
||||||
|
},
|
||||||
"CancelExportTask":{
|
"CancelExportTask":{
|
||||||
"name":"CancelExportTask",
|
"name":"CancelExportTask",
|
||||||
"http":{
|
"http":{
|
||||||
@ -262,6 +276,20 @@
|
|||||||
{"shape":"ServiceUnavailableException"}
|
{"shape":"ServiceUnavailableException"}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"DisassociateKmsKey":{
|
||||||
|
"name":"DisassociateKmsKey",
|
||||||
|
"http":{
|
||||||
|
"method":"POST",
|
||||||
|
"requestUri":"/"
|
||||||
|
},
|
||||||
|
"input":{"shape":"DisassociateKmsKeyRequest"},
|
||||||
|
"errors":[
|
||||||
|
{"shape":"InvalidParameterException"},
|
||||||
|
{"shape":"ResourceNotFoundException"},
|
||||||
|
{"shape":"OperationAbortedException"},
|
||||||
|
{"shape":"ServiceUnavailableException"}
|
||||||
|
]
|
||||||
|
},
|
||||||
"FilterLogEvents":{
|
"FilterLogEvents":{
|
||||||
"name":"FilterLogEvents",
|
"name":"FilterLogEvents",
|
||||||
"http":{
|
"http":{
|
||||||
@ -447,6 +475,17 @@
|
|||||||
"min":1
|
"min":1
|
||||||
},
|
},
|
||||||
"Arn":{"type":"string"},
|
"Arn":{"type":"string"},
|
||||||
|
"AssociateKmsKeyRequest":{
|
||||||
|
"type":"structure",
|
||||||
|
"required":[
|
||||||
|
"logGroupName",
|
||||||
|
"kmsKeyId"
|
||||||
|
],
|
||||||
|
"members":{
|
||||||
|
"logGroupName":{"shape":"LogGroupName"},
|
||||||
|
"kmsKeyId":{"shape":"KmsKeyId"}
|
||||||
|
}
|
||||||
|
},
|
||||||
"CancelExportTaskRequest":{
|
"CancelExportTaskRequest":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
"required":["taskId"],
|
"required":["taskId"],
|
||||||
@ -483,6 +522,7 @@
|
|||||||
"required":["logGroupName"],
|
"required":["logGroupName"],
|
||||||
"members":{
|
"members":{
|
||||||
"logGroupName":{"shape":"LogGroupName"},
|
"logGroupName":{"shape":"LogGroupName"},
|
||||||
|
"kmsKeyId":{"shape":"KmsKeyId"},
|
||||||
"tags":{"shape":"Tags"}
|
"tags":{"shape":"Tags"}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -711,6 +751,13 @@
|
|||||||
"type":"list",
|
"type":"list",
|
||||||
"member":{"shape":"Destination"}
|
"member":{"shape":"Destination"}
|
||||||
},
|
},
|
||||||
|
"DisassociateKmsKeyRequest":{
|
||||||
|
"type":"structure",
|
||||||
|
"required":["logGroupName"],
|
||||||
|
"members":{
|
||||||
|
"logGroupName":{"shape":"LogGroupName"}
|
||||||
|
}
|
||||||
|
},
|
||||||
"Distribution":{
|
"Distribution":{
|
||||||
"type":"string",
|
"type":"string",
|
||||||
"enum":[
|
"enum":[
|
||||||
@ -909,6 +956,10 @@
|
|||||||
},
|
},
|
||||||
"exception":true
|
"exception":true
|
||||||
},
|
},
|
||||||
|
"KmsKeyId":{
|
||||||
|
"type":"string",
|
||||||
|
"max":256
|
||||||
|
},
|
||||||
"LimitExceededException":{
|
"LimitExceededException":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
"members":{
|
"members":{
|
||||||
@ -937,7 +988,8 @@
|
|||||||
"retentionInDays":{"shape":"Days"},
|
"retentionInDays":{"shape":"Days"},
|
||||||
"metricFilterCount":{"shape":"FilterCount"},
|
"metricFilterCount":{"shape":"FilterCount"},
|
||||||
"arn":{"shape":"Arn"},
|
"arn":{"shape":"Arn"},
|
||||||
"storedBytes":{"shape":"StoredBytes"}
|
"storedBytes":{"shape":"StoredBytes"},
|
||||||
|
"kmsKeyId":{"shape":"KmsKeyId"}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"LogGroupName":{
|
"LogGroupName":{
|
||||||
|
40
vendor/github.com/aws/aws-sdk-go/models/apis/logs/2014-03-28/docs-2.json
generated
vendored
40
vendor/github.com/aws/aws-sdk-go/models/apis/logs/2014-03-28/docs-2.json
generated
vendored
@ -2,9 +2,10 @@
|
|||||||
"version": "2.0",
|
"version": "2.0",
|
||||||
"service": "<p>You can use Amazon CloudWatch Logs to monitor, store, and access your log files from Amazon EC2 instances, AWS CloudTrail, or other sources. You can then retrieve the associated log data from CloudWatch Logs using the CloudWatch console, CloudWatch Logs commands in the AWS CLI, CloudWatch Logs API, or CloudWatch Logs SDK.</p> <p>You can use CloudWatch Logs to:</p> <ul> <li> <p> <b>Monitor logs from EC2 instances in real-time</b>: You can use CloudWatch Logs to monitor applications and systems using log data. For example, CloudWatch Logs can track the number of errors that occur in your application logs and send you a notification whenever the rate of errors exceeds a threshold that you specify. CloudWatch Logs uses your log data for monitoring; so, no code changes are required. For example, you can monitor application logs for specific literal terms (such as \"NullReferenceException\") or count the number of occurrences of a literal term at a particular position in log data (such as \"404\" status codes in an Apache access log). When the term you are searching for is found, CloudWatch Logs reports the data to a CloudWatch metric that you specify.</p> </li> <li> <p> <b>Monitor AWS CloudTrail logged events</b>: You can create alarms in CloudWatch and receive notifications of particular API activity as captured by CloudTrail and use the notification to perform troubleshooting.</p> </li> <li> <p> <b>Archive log data</b>: You can use CloudWatch Logs to store your log data in highly durable storage. You can change the log retention setting so that any log events older than this setting are automatically deleted. The CloudWatch Logs agent makes it easy to quickly send both rotated and non-rotated log data off of a host and into the log service. You can then access the raw log data when you need it.</p> </li> </ul>",
|
"service": "<p>You can use Amazon CloudWatch Logs to monitor, store, and access your log files from Amazon EC2 instances, AWS CloudTrail, or other sources. You can then retrieve the associated log data from CloudWatch Logs using the CloudWatch console, CloudWatch Logs commands in the AWS CLI, CloudWatch Logs API, or CloudWatch Logs SDK.</p> <p>You can use CloudWatch Logs to:</p> <ul> <li> <p> <b>Monitor logs from EC2 instances in real-time</b>: You can use CloudWatch Logs to monitor applications and systems using log data. For example, CloudWatch Logs can track the number of errors that occur in your application logs and send you a notification whenever the rate of errors exceeds a threshold that you specify. CloudWatch Logs uses your log data for monitoring; so, no code changes are required. For example, you can monitor application logs for specific literal terms (such as \"NullReferenceException\") or count the number of occurrences of a literal term at a particular position in log data (such as \"404\" status codes in an Apache access log). When the term you are searching for is found, CloudWatch Logs reports the data to a CloudWatch metric that you specify.</p> </li> <li> <p> <b>Monitor AWS CloudTrail logged events</b>: You can create alarms in CloudWatch and receive notifications of particular API activity as captured by CloudTrail and use the notification to perform troubleshooting.</p> </li> <li> <p> <b>Archive log data</b>: You can use CloudWatch Logs to store your log data in highly durable storage. You can change the log retention setting so that any log events older than this setting are automatically deleted. The CloudWatch Logs agent makes it easy to quickly send both rotated and non-rotated log data off of a host and into the log service. You can then access the raw log data when you need it.</p> </li> </ul>",
|
||||||
"operations": {
|
"operations": {
|
||||||
|
"AssociateKmsKey": "<p>Associates the specified AWS Key Management Service (AWS KMS) customer master key (CMK) with the specified log group.</p> <p>Associating an AWS KMS CMK with a log group overrides any existing associations between the log group and a CMK. After a CMK is associated with a log group, all newly ingested data for the log group is encrypted using the CMK. This association is stored as long as the data encrypted with the CMK is still within Amazon CloudWatch Logs. This enables Amazon CloudWatch Logs to decrypt this data whenever it is requested.</p> <p>Note that it can take up to 5 minutes for this operation to take effect.</p> <p>If you attempt to associate a CMK with a log group but the CMK does not exist or the CMK is disabled, you will receive an <code>InvalidParameterException</code> error. </p>",
|
||||||
"CancelExportTask": "<p>Cancels the specified export task.</p> <p>The task must be in the <code>PENDING</code> or <code>RUNNING</code> state.</p>",
|
"CancelExportTask": "<p>Cancels the specified export task.</p> <p>The task must be in the <code>PENDING</code> or <code>RUNNING</code> state.</p>",
|
||||||
"CreateExportTask": "<p>Creates an export task, which allows you to efficiently export data from a log group to an Amazon S3 bucket.</p> <p>This is an asynchronous call. If all the required information is provided, this operation initiates an export task and responds with the ID of the task. After the task has started, you can use <a>DescribeExportTasks</a> to get the status of the export task. Each account can only have one active (<code>RUNNING</code> or <code>PENDING</code>) export task at a time. To cancel an export task, use <a>CancelExportTask</a>.</p> <p>You can export logs from multiple log groups or multiple time ranges to the same S3 bucket. To separate out log data for each export task, you can specify a prefix to be used as the Amazon S3 key prefix for all exported objects.</p>",
|
"CreateExportTask": "<p>Creates an export task, which allows you to efficiently export data from a log group to an Amazon S3 bucket.</p> <p>This is an asynchronous call. If all the required information is provided, this operation initiates an export task and responds with the ID of the task. After the task has started, you can use <a>DescribeExportTasks</a> to get the status of the export task. Each account can only have one active (<code>RUNNING</code> or <code>PENDING</code>) export task at a time. To cancel an export task, use <a>CancelExportTask</a>.</p> <p>You can export logs from multiple log groups or multiple time ranges to the same S3 bucket. To separate out log data for each export task, you can specify a prefix to be used as the Amazon S3 key prefix for all exported objects.</p>",
|
||||||
"CreateLogGroup": "<p>Creates a log group with the specified name.</p> <p>You can create up to 5000 log groups per account.</p> <p>You must use the following guidelines when naming a log group:</p> <ul> <li> <p>Log group names must be unique within a region for an AWS account.</p> </li> <li> <p>Log group names can be between 1 and 512 characters long.</p> </li> <li> <p>Log group names consist of the following characters: a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), '/' (forward slash), and '.' (period).</p> </li> </ul>",
|
"CreateLogGroup": "<p>Creates a log group with the specified name.</p> <p>You can create up to 5000 log groups per account.</p> <p>You must use the following guidelines when naming a log group:</p> <ul> <li> <p>Log group names must be unique within a region for an AWS account.</p> </li> <li> <p>Log group names can be between 1 and 512 characters long.</p> </li> <li> <p>Log group names consist of the following characters: a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), '/' (forward slash), and '.' (period).</p> </li> </ul> <p>If you associate a AWS Key Management Service (AWS KMS) customer master key (CMK) with the log group, ingested data is encrypted using the CMK. This association is stored as long as the data encrypted with the CMK is still within Amazon CloudWatch Logs. This enables Amazon CloudWatch Logs to decrypt this data whenever it is requested.</p> <p>If you attempt to associate a CMK with the log group but the CMK does not exist or the CMK is disabled, you will receive an <code>InvalidParameterException</code> error. </p>",
|
||||||
"CreateLogStream": "<p>Creates a log stream for the specified log group.</p> <p>There is no limit on the number of log streams that you can create for a log group.</p> <p>You must use the following guidelines when naming a log stream:</p> <ul> <li> <p>Log stream names must be unique within the log group.</p> </li> <li> <p>Log stream names can be between 1 and 512 characters long.</p> </li> <li> <p>The ':' (colon) and '*' (asterisk) characters are not allowed.</p> </li> </ul>",
|
"CreateLogStream": "<p>Creates a log stream for the specified log group.</p> <p>There is no limit on the number of log streams that you can create for a log group.</p> <p>You must use the following guidelines when naming a log stream:</p> <ul> <li> <p>Log stream names must be unique within the log group.</p> </li> <li> <p>Log stream names can be between 1 and 512 characters long.</p> </li> <li> <p>The ':' (colon) and '*' (asterisk) characters are not allowed.</p> </li> </ul>",
|
||||||
"DeleteDestination": "<p>Deletes the specified destination, and eventually disables all the subscription filters that publish to it. This operation does not delete the physical resource encapsulated by the destination.</p>",
|
"DeleteDestination": "<p>Deletes the specified destination, and eventually disables all the subscription filters that publish to it. This operation does not delete the physical resource encapsulated by the destination.</p>",
|
||||||
"DeleteLogGroup": "<p>Deletes the specified log group and permanently deletes all the archived log events associated with the log group.</p>",
|
"DeleteLogGroup": "<p>Deletes the specified log group and permanently deletes all the archived log events associated with the log group.</p>",
|
||||||
@ -17,12 +18,13 @@
|
|||||||
"DescribeExportTasks": "<p>Lists the specified export tasks. You can list all your export tasks or filter the results based on task ID or task status.</p>",
|
"DescribeExportTasks": "<p>Lists the specified export tasks. You can list all your export tasks or filter the results based on task ID or task status.</p>",
|
||||||
"DescribeLogGroups": "<p>Lists the specified log groups. You can list all your log groups or filter the results by prefix. The results are ASCII-sorted by log group name.</p>",
|
"DescribeLogGroups": "<p>Lists the specified log groups. You can list all your log groups or filter the results by prefix. The results are ASCII-sorted by log group name.</p>",
|
||||||
"DescribeLogStreams": "<p>Lists the log streams for the specified log group. You can list all the log streams or filter the results by prefix. You can also control how the results are ordered.</p> <p>This operation has a limit of five transactions per second, after which transactions are throttled.</p>",
|
"DescribeLogStreams": "<p>Lists the log streams for the specified log group. You can list all the log streams or filter the results by prefix. You can also control how the results are ordered.</p> <p>This operation has a limit of five transactions per second, after which transactions are throttled.</p>",
|
||||||
"DescribeMetricFilters": "<p>Lists the specified metric filters. You can list all the metric filters or filter the results by log name, prefix, metric name, and metric namespace. The results are ASCII-sorted by filter name.</p>",
|
"DescribeMetricFilters": "<p>Lists the specified metric filters. You can list all the metric filters or filter the results by log name, prefix, metric name, or metric namespace. The results are ASCII-sorted by filter name.</p>",
|
||||||
"DescribeResourcePolicies": "<p>Lists the resource policies in this account.</p>",
|
"DescribeResourcePolicies": "<p>Lists the resource policies in this account.</p>",
|
||||||
"DescribeSubscriptionFilters": "<p>Lists the subscription filters for the specified log group. You can list all the subscription filters or filter the results by prefix. The results are ASCII-sorted by filter name.</p>",
|
"DescribeSubscriptionFilters": "<p>Lists the subscription filters for the specified log group. You can list all the subscription filters or filter the results by prefix. The results are ASCII-sorted by filter name.</p>",
|
||||||
|
"DisassociateKmsKey": "<p>Disassociates the associated AWS Key Management Service (AWS KMS) customer master key (CMK) from the specified log group.</p> <p>After the AWS KMS CMK is disassociated from the log group, AWS CloudWatch Logs stops encrypting newly ingested data for the log group. All previously ingested data remains encrypted, and AWS CloudWatch Logs requires permissions for the CMK whenever the encrypted data is requested.</p> <p>Note that it can take up to 5 minutes for this operation to take effect.</p>",
|
||||||
"FilterLogEvents": "<p>Lists log events from the specified log group. You can list all the log events or filter the results using a filter pattern, a time range, and the name of the log stream.</p> <p>By default, this operation returns as many log events as can fit in 1 MB (up to 10,000 log events), or all the events found within the time range that you specify. If the results include a token, then there are more log events available, and you can get additional results by specifying the token in a subsequent call.</p>",
|
"FilterLogEvents": "<p>Lists log events from the specified log group. You can list all the log events or filter the results using a filter pattern, a time range, and the name of the log stream.</p> <p>By default, this operation returns as many log events as can fit in 1 MB (up to 10,000 log events), or all the events found within the time range that you specify. If the results include a token, then there are more log events available, and you can get additional results by specifying the token in a subsequent call.</p>",
|
||||||
"GetLogEvents": "<p>Lists log events from the specified log stream. You can list all the log events or filter using a time range.</p> <p>By default, this operation returns as many log events as can fit in a response size of 1 MB (up to 10,000 log events). You can get additional log events by specifying one of the tokens in a subsequent call.</p>",
|
"GetLogEvents": "<p>Lists log events from the specified log stream. You can list all the log events or filter using a time range.</p> <p>By default, this operation returns as many log events as can fit in a response size of 1MB (up to 10,000 log events). You can get additional log events by specifying one of the tokens in a subsequent call.</p>",
|
||||||
"ListTagsLogGroup": "<p>Lists the tags for the specified log group.</p> <p>To add tags, use <a>TagLogGroup</a>. To remove tags, use <a>UntagLogGroup</a>.</p>",
|
"ListTagsLogGroup": "<p>Lists the tags for the specified log group.</p>",
|
||||||
"PutDestination": "<p>Creates or updates a destination. A destination encapsulates a physical resource (such as an Amazon Kinesis stream) and enables you to subscribe to a real-time stream of log events for a different account, ingested using <a>PutLogEvents</a>. Currently, the only supported physical resource is a Kinesis stream belonging to the same account as the destination.</p> <p>Through an access policy, a destination controls what is written to its Kinesis stream. By default, <code>PutDestination</code> does not set any access policy with the destination, which means a cross-account user cannot call <a>PutSubscriptionFilter</a> against this destination. To enable this, the destination owner must call <a>PutDestinationPolicy</a> after <code>PutDestination</code>.</p>",
|
"PutDestination": "<p>Creates or updates a destination. A destination encapsulates a physical resource (such as an Amazon Kinesis stream) and enables you to subscribe to a real-time stream of log events for a different account, ingested using <a>PutLogEvents</a>. Currently, the only supported physical resource is a Kinesis stream belonging to the same account as the destination.</p> <p>Through an access policy, a destination controls what is written to its Kinesis stream. By default, <code>PutDestination</code> does not set any access policy with the destination, which means a cross-account user cannot call <a>PutSubscriptionFilter</a> against this destination. To enable this, the destination owner must call <a>PutDestinationPolicy</a> after <code>PutDestination</code>.</p>",
|
||||||
"PutDestinationPolicy": "<p>Creates or updates an access policy associated with an existing destination. An access policy is an <a href=\"http://docs.aws.amazon.com/IAM/latest/UserGuide/policies_overview.html\">IAM policy document</a> that is used to authorize claims to register a subscription filter against a given destination.</p>",
|
"PutDestinationPolicy": "<p>Creates or updates an access policy associated with an existing destination. An access policy is an <a href=\"http://docs.aws.amazon.com/IAM/latest/UserGuide/policies_overview.html\">IAM policy document</a> that is used to authorize claims to register a subscription filter against a given destination.</p>",
|
||||||
"PutLogEvents": "<p>Uploads a batch of log events to the specified log stream.</p> <p>You must include the sequence token obtained from the response of the previous call. An upload in a newly created log stream does not require a sequence token. You can also get the sequence token using <a>DescribeLogStreams</a>. If you call <code>PutLogEvents</code> twice within a narrow time period using the same value for <code>sequenceToken</code>, both calls may be successful, or one may be rejected.</p> <p>The batch of events must satisfy the following constraints:</p> <ul> <li> <p>The maximum batch size is 1,048,576 bytes, and this size is calculated as the sum of all event messages in UTF-8, plus 26 bytes for each log event.</p> </li> <li> <p>None of the log events in the batch can be more than 2 hours in the future.</p> </li> <li> <p>None of the log events in the batch can be older than 14 days or the retention period of the log group.</p> </li> <li> <p>The log events in the batch must be in chronological ordered by their time stamp (the time the event occurred, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC).</p> </li> <li> <p>The maximum number of log events in a batch is 10,000.</p> </li> <li> <p>A batch of log events in a single request cannot span more than 24 hours. Otherwise, the operation fails.</p> </li> </ul>",
|
"PutLogEvents": "<p>Uploads a batch of log events to the specified log stream.</p> <p>You must include the sequence token obtained from the response of the previous call. An upload in a newly created log stream does not require a sequence token. You can also get the sequence token using <a>DescribeLogStreams</a>. If you call <code>PutLogEvents</code> twice within a narrow time period using the same value for <code>sequenceToken</code>, both calls may be successful, or one may be rejected.</p> <p>The batch of events must satisfy the following constraints:</p> <ul> <li> <p>The maximum batch size is 1,048,576 bytes, and this size is calculated as the sum of all event messages in UTF-8, plus 26 bytes for each log event.</p> </li> <li> <p>None of the log events in the batch can be more than 2 hours in the future.</p> </li> <li> <p>None of the log events in the batch can be older than 14 days or the retention period of the log group.</p> </li> <li> <p>The log events in the batch must be in chronological ordered by their time stamp (the time the event occurred, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC).</p> </li> <li> <p>The maximum number of log events in a batch is 10,000.</p> </li> <li> <p>A batch of log events in a single request cannot span more than 24 hours. Otherwise, the operation fails.</p> </li> </ul>",
|
||||||
@ -50,6 +52,11 @@
|
|||||||
"LogStream$arn": "<p>The Amazon Resource Name (ARN) of the log stream.</p>"
|
"LogStream$arn": "<p>The Amazon Resource Name (ARN) of the log stream.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"AssociateKmsKeyRequest": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
}
|
||||||
|
},
|
||||||
"CancelExportTaskRequest": {
|
"CancelExportTaskRequest": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
@ -246,11 +253,16 @@
|
|||||||
"DescribeDestinationsResponse$destinations": "<p>The destinations.</p>"
|
"DescribeDestinationsResponse$destinations": "<p>The destinations.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"Distribution": {
|
"DisassociateKmsKeyRequest": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"PutSubscriptionFilterRequest$distribution": "<p>The method used to distribute log data to the destination, when the destination is an Amazon Kinesis stream. By default, log data is grouped by log stream. For a more even distribution, you can group log data randomly.</p>",
|
}
|
||||||
"SubscriptionFilter$distribution": "<p>The method used to distribute log data to the destination, when the destination is an Amazon Kinesis stream.</p>"
|
},
|
||||||
|
"Distribution": {
|
||||||
|
"base": "<p>The method used to distribute log data to the destination, which can be either random or grouped by log stream.</p>",
|
||||||
|
"refs": {
|
||||||
|
"PutSubscriptionFilterRequest$distribution": "<p>The method used to distribute log data to the destination. By default log data is grouped by log stream, but the grouping can be set to random for a more even distribution. This property is only applicable when the destination is an Amazon Kinesis stream. </p>",
|
||||||
|
"SubscriptionFilter$distribution": null
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"EventId": {
|
"EventId": {
|
||||||
@ -456,6 +468,14 @@
|
|||||||
"refs": {
|
"refs": {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"KmsKeyId": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
"AssociateKmsKeyRequest$kmsKeyId": "<p>The Amazon Resource Name (ARN) of the CMK to use when encrypting log data. For more information, see <a href=\"http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kms\">Amazon Resource Names - AWS Key Management Service (AWS KMS)</a>.</p>",
|
||||||
|
"CreateLogGroupRequest$kmsKeyId": "<p>The Amazon Resource Name (ARN) of the CMK to use when encrypting log data. For more information, see <a href=\"http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kms\">Amazon Resource Names - AWS Key Management Service (AWS KMS)</a>.</p>",
|
||||||
|
"LogGroup$kmsKeyId": "<p>The Amazon Resource Name (ARN) of the CMK to use when encrypting log data.</p>"
|
||||||
|
}
|
||||||
|
},
|
||||||
"LimitExceededException": {
|
"LimitExceededException": {
|
||||||
"base": "<p>You have reached the maximum number of resources that can be created.</p>",
|
"base": "<p>You have reached the maximum number of resources that can be created.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
@ -488,6 +508,7 @@
|
|||||||
"LogGroupName": {
|
"LogGroupName": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
|
"AssociateKmsKeyRequest$logGroupName": "<p>The name of the log group.</p>",
|
||||||
"CreateExportTaskRequest$logGroupName": "<p>The name of the log group.</p>",
|
"CreateExportTaskRequest$logGroupName": "<p>The name of the log group.</p>",
|
||||||
"CreateLogGroupRequest$logGroupName": "<p>The name of the log group.</p>",
|
"CreateLogGroupRequest$logGroupName": "<p>The name of the log group.</p>",
|
||||||
"CreateLogStreamRequest$logGroupName": "<p>The name of the log group.</p>",
|
"CreateLogStreamRequest$logGroupName": "<p>The name of the log group.</p>",
|
||||||
@ -500,6 +521,7 @@
|
|||||||
"DescribeLogStreamsRequest$logGroupName": "<p>The name of the log group.</p>",
|
"DescribeLogStreamsRequest$logGroupName": "<p>The name of the log group.</p>",
|
||||||
"DescribeMetricFiltersRequest$logGroupName": "<p>The name of the log group.</p>",
|
"DescribeMetricFiltersRequest$logGroupName": "<p>The name of the log group.</p>",
|
||||||
"DescribeSubscriptionFiltersRequest$logGroupName": "<p>The name of the log group.</p>",
|
"DescribeSubscriptionFiltersRequest$logGroupName": "<p>The name of the log group.</p>",
|
||||||
|
"DisassociateKmsKeyRequest$logGroupName": "<p>The name of the log group.</p>",
|
||||||
"ExportTask$logGroupName": "<p>The name of the log group from which logs data was exported.</p>",
|
"ExportTask$logGroupName": "<p>The name of the log group from which logs data was exported.</p>",
|
||||||
"FilterLogEventsRequest$logGroupName": "<p>The name of the log group.</p>",
|
"FilterLogEventsRequest$logGroupName": "<p>The name of the log group.</p>",
|
||||||
"GetLogEventsRequest$logGroupName": "<p>The name of the log group.</p>",
|
"GetLogEventsRequest$logGroupName": "<p>The name of the log group.</p>",
|
||||||
@ -581,7 +603,7 @@
|
|||||||
"MetricName": {
|
"MetricName": {
|
||||||
"base": "<p>The name of the CloudWatch metric to which the monitored log information should be published. For example, you may publish to a metric called ErrorCount.</p>",
|
"base": "<p>The name of the CloudWatch metric to which the monitored log information should be published. For example, you may publish to a metric called ErrorCount.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
"DescribeMetricFiltersRequest$metricName": "<p>The name of the CloudWatch metric.</p>",
|
"DescribeMetricFiltersRequest$metricName": null,
|
||||||
"MetricTransformation$metricName": "<p>The name of the CloudWatch metric.</p>"
|
"MetricTransformation$metricName": "<p>The name of the CloudWatch metric.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -841,7 +863,7 @@
|
|||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"CreateLogGroupRequest$tags": "<p>The key-value pairs to use for the tags.</p>",
|
"CreateLogGroupRequest$tags": "<p>The key-value pairs to use for the tags.</p>",
|
||||||
"ListTagsLogGroupResponse$tags": "<p>The tags.</p>",
|
"ListTagsLogGroupResponse$tags": "<p>The tags for the log group.</p>",
|
||||||
"TagLogGroupRequest$tags": "<p>The key-value pairs to use for the tags.</p>"
|
"TagLogGroupRequest$tags": "<p>The key-value pairs to use for the tags.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
52
vendor/github.com/aws/aws-sdk-go/models/apis/mturk-requester/2017-01-17/api-2.json
generated
vendored
52
vendor/github.com/aws/aws-sdk-go/models/apis/mturk-requester/2017-01-17/api-2.json
generated
vendored
@ -629,7 +629,7 @@
|
|||||||
"type":"structure",
|
"type":"structure",
|
||||||
"members":{
|
"members":{
|
||||||
"WorkerId":{"shape":"CustomerId"},
|
"WorkerId":{"shape":"CustomerId"},
|
||||||
"BonusAmount":{"shape":"NumericValue"},
|
"BonusAmount":{"shape":"CurrencyAmount"},
|
||||||
"AssignmentId":{"shape":"EntityId"},
|
"AssignmentId":{"shape":"EntityId"},
|
||||||
"Reason":{"shape":"String"},
|
"Reason":{"shape":"String"},
|
||||||
"GrantTime":{"shape":"Timestamp"}
|
"GrantTime":{"shape":"Timestamp"}
|
||||||
@ -662,7 +662,10 @@
|
|||||||
},
|
},
|
||||||
"CreateAdditionalAssignmentsForHITRequest":{
|
"CreateAdditionalAssignmentsForHITRequest":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
"required":["HITId"],
|
"required":[
|
||||||
|
"HITId",
|
||||||
|
"NumberOfAdditionalAssignments"
|
||||||
|
],
|
||||||
"members":{
|
"members":{
|
||||||
"HITId":{"shape":"EntityId"},
|
"HITId":{"shape":"EntityId"},
|
||||||
"NumberOfAdditionalAssignments":{"shape":"Integer"},
|
"NumberOfAdditionalAssignments":{"shape":"Integer"},
|
||||||
@ -688,7 +691,7 @@
|
|||||||
"AutoApprovalDelayInSeconds":{"shape":"Long"},
|
"AutoApprovalDelayInSeconds":{"shape":"Long"},
|
||||||
"LifetimeInSeconds":{"shape":"Long"},
|
"LifetimeInSeconds":{"shape":"Long"},
|
||||||
"AssignmentDurationInSeconds":{"shape":"Long"},
|
"AssignmentDurationInSeconds":{"shape":"Long"},
|
||||||
"Reward":{"shape":"NumericValue"},
|
"Reward":{"shape":"CurrencyAmount"},
|
||||||
"Title":{"shape":"String"},
|
"Title":{"shape":"String"},
|
||||||
"Keywords":{"shape":"String"},
|
"Keywords":{"shape":"String"},
|
||||||
"Description":{"shape":"String"},
|
"Description":{"shape":"String"},
|
||||||
@ -719,7 +722,7 @@
|
|||||||
"members":{
|
"members":{
|
||||||
"AutoApprovalDelayInSeconds":{"shape":"Long"},
|
"AutoApprovalDelayInSeconds":{"shape":"Long"},
|
||||||
"AssignmentDurationInSeconds":{"shape":"Long"},
|
"AssignmentDurationInSeconds":{"shape":"Long"},
|
||||||
"Reward":{"shape":"NumericValue"},
|
"Reward":{"shape":"CurrencyAmount"},
|
||||||
"Title":{"shape":"String"},
|
"Title":{"shape":"String"},
|
||||||
"Keywords":{"shape":"String"},
|
"Keywords":{"shape":"String"},
|
||||||
"Description":{"shape":"String"},
|
"Description":{"shape":"String"},
|
||||||
@ -799,6 +802,10 @@
|
|||||||
"members":{
|
"members":{
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"CurrencyAmount":{
|
||||||
|
"type":"string",
|
||||||
|
"pattern":"^[0-9]+(\\.)?[0-9]{0,2}$"
|
||||||
|
},
|
||||||
"CustomerId":{
|
"CustomerId":{
|
||||||
"type":"string",
|
"type":"string",
|
||||||
"max":64,
|
"max":64,
|
||||||
@ -899,8 +906,8 @@
|
|||||||
"GetAccountBalanceResponse":{
|
"GetAccountBalanceResponse":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
"members":{
|
"members":{
|
||||||
"AvailableBalance":{"shape":"NumericValue"},
|
"AvailableBalance":{"shape":"CurrencyAmount"},
|
||||||
"OnHoldBalance":{"shape":"NumericValue"}
|
"OnHoldBalance":{"shape":"CurrencyAmount"}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"GetAssignmentRequest":{
|
"GetAssignmentRequest":{
|
||||||
@ -991,7 +998,7 @@
|
|||||||
"Keywords":{"shape":"String"},
|
"Keywords":{"shape":"String"},
|
||||||
"HITStatus":{"shape":"HITStatus"},
|
"HITStatus":{"shape":"HITStatus"},
|
||||||
"MaxAssignments":{"shape":"Integer"},
|
"MaxAssignments":{"shape":"Integer"},
|
||||||
"Reward":{"shape":"NumericValue"},
|
"Reward":{"shape":"CurrencyAmount"},
|
||||||
"AutoApprovalDelayInSeconds":{"shape":"Long"},
|
"AutoApprovalDelayInSeconds":{"shape":"Long"},
|
||||||
"Expiration":{"shape":"Timestamp"},
|
"Expiration":{"shape":"Timestamp"},
|
||||||
"AssignmentDurationInSeconds":{"shape":"Long"},
|
"AssignmentDurationInSeconds":{"shape":"Long"},
|
||||||
@ -1005,6 +1012,10 @@
|
|||||||
},
|
},
|
||||||
"HITLayoutParameter":{
|
"HITLayoutParameter":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
|
"required":[
|
||||||
|
"Name",
|
||||||
|
"Value"
|
||||||
|
],
|
||||||
"members":{
|
"members":{
|
||||||
"Name":{"shape":"String"},
|
"Name":{"shape":"String"},
|
||||||
"Value":{"shape":"String"}
|
"Value":{"shape":"String"}
|
||||||
@ -1239,7 +1250,9 @@
|
|||||||
"type":"structure",
|
"type":"structure",
|
||||||
"required":[
|
"required":[
|
||||||
"Destination",
|
"Destination",
|
||||||
"Transport"
|
"Transport",
|
||||||
|
"Version",
|
||||||
|
"EventTypes"
|
||||||
],
|
],
|
||||||
"members":{
|
"members":{
|
||||||
"Destination":{"shape":"String"},
|
"Destination":{"shape":"String"},
|
||||||
@ -1252,7 +1265,8 @@
|
|||||||
"type":"string",
|
"type":"string",
|
||||||
"enum":[
|
"enum":[
|
||||||
"Email",
|
"Email",
|
||||||
"SQS"
|
"SQS",
|
||||||
|
"SNS"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"NotifyWorkersFailureCode":{
|
"NotifyWorkersFailureCode":{
|
||||||
@ -1293,10 +1307,6 @@
|
|||||||
"NotifyWorkersFailureStatuses":{"shape":"NotifyWorkersFailureStatusList"}
|
"NotifyWorkersFailureStatuses":{"shape":"NotifyWorkersFailureStatusList"}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"NumericValue":{
|
|
||||||
"type":"string",
|
|
||||||
"pattern":"^[0-9]+(\\.)?[0-9]*$"
|
|
||||||
},
|
|
||||||
"PaginationToken":{
|
"PaginationToken":{
|
||||||
"type":"string",
|
"type":"string",
|
||||||
"max":255,
|
"max":255,
|
||||||
@ -1411,7 +1421,10 @@
|
|||||||
},
|
},
|
||||||
"RejectAssignmentRequest":{
|
"RejectAssignmentRequest":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
"required":["AssignmentId"],
|
"required":[
|
||||||
|
"AssignmentId",
|
||||||
|
"RequesterFeedback"
|
||||||
|
],
|
||||||
"members":{
|
"members":{
|
||||||
"AssignmentId":{"shape":"EntityId"},
|
"AssignmentId":{"shape":"EntityId"},
|
||||||
"RequesterFeedback":{"shape":"String"}
|
"RequesterFeedback":{"shape":"String"}
|
||||||
@ -1476,6 +1489,7 @@
|
|||||||
},
|
},
|
||||||
"ReviewPolicy":{
|
"ReviewPolicy":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
|
"required":["PolicyName"],
|
||||||
"members":{
|
"members":{
|
||||||
"PolicyName":{"shape":"String"},
|
"PolicyName":{"shape":"String"},
|
||||||
"Parameters":{"shape":"PolicyParameterList"}
|
"Parameters":{"shape":"PolicyParameterList"}
|
||||||
@ -1526,11 +1540,12 @@
|
|||||||
"required":[
|
"required":[
|
||||||
"WorkerId",
|
"WorkerId",
|
||||||
"BonusAmount",
|
"BonusAmount",
|
||||||
"AssignmentId"
|
"AssignmentId",
|
||||||
|
"Reason"
|
||||||
],
|
],
|
||||||
"members":{
|
"members":{
|
||||||
"WorkerId":{"shape":"CustomerId"},
|
"WorkerId":{"shape":"CustomerId"},
|
||||||
"BonusAmount":{"shape":"NumericValue"},
|
"BonusAmount":{"shape":"CurrencyAmount"},
|
||||||
"AssignmentId":{"shape":"EntityId"},
|
"AssignmentId":{"shape":"EntityId"},
|
||||||
"Reason":{"shape":"String"},
|
"Reason":{"shape":"String"},
|
||||||
"UniqueRequestToken":{"shape":"IdempotencyToken"}
|
"UniqueRequestToken":{"shape":"IdempotencyToken"}
|
||||||
@ -1575,7 +1590,10 @@
|
|||||||
"TurkErrorCode":{"type":"string"},
|
"TurkErrorCode":{"type":"string"},
|
||||||
"UpdateExpirationForHITRequest":{
|
"UpdateExpirationForHITRequest":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
"required":["HITId"],
|
"required":[
|
||||||
|
"HITId",
|
||||||
|
"ExpireAt"
|
||||||
|
],
|
||||||
"members":{
|
"members":{
|
||||||
"HITId":{"shape":"EntityId"},
|
"HITId":{"shape":"EntityId"},
|
||||||
"ExpireAt":{"shape":"Timestamp"}
|
"ExpireAt":{"shape":"Timestamp"}
|
||||||
|
28
vendor/github.com/aws/aws-sdk-go/models/apis/mturk-requester/2017-01-17/docs-2.json
generated
vendored
28
vendor/github.com/aws/aws-sdk-go/models/apis/mturk-requester/2017-01-17/docs-2.json
generated
vendored
@ -202,6 +202,18 @@
|
|||||||
"refs": {
|
"refs": {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"CurrencyAmount": {
|
||||||
|
"base": "<p>A string representing a currency amount.</p>",
|
||||||
|
"refs": {
|
||||||
|
"BonusPayment$BonusAmount": null,
|
||||||
|
"CreateHITRequest$Reward": "<p> The amount of money the Requester will pay a Worker for successfully completing the HIT. </p>",
|
||||||
|
"CreateHITTypeRequest$Reward": "<p> The amount of money the Requester will pay a Worker for successfully completing the HIT. </p>",
|
||||||
|
"GetAccountBalanceResponse$AvailableBalance": null,
|
||||||
|
"GetAccountBalanceResponse$OnHoldBalance": null,
|
||||||
|
"HIT$Reward": null,
|
||||||
|
"SendBonusRequest$BonusAmount": "<p> The Bonus amount is a US Dollar amount specified using a string (for example, \"5\" represents $5.00 USD and \"101.42\" represents $101.42 USD). Do not include currency symbols or currency codes. </p>"
|
||||||
|
}
|
||||||
|
},
|
||||||
"CustomerId": {
|
"CustomerId": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
@ -626,7 +638,7 @@
|
|||||||
"NotificationTransport": {
|
"NotificationTransport": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"NotificationSpecification$Transport": "<p> The method Amazon Mechanical Turk uses to send the notification. Valid Values: Email | SQS. </p>"
|
"NotificationSpecification$Transport": "<p> The method Amazon Mechanical Turk uses to send the notification. Valid Values: Email | SQS | SNS. </p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"NotifyWorkersFailureCode": {
|
"NotifyWorkersFailureCode": {
|
||||||
@ -657,18 +669,6 @@
|
|||||||
"refs": {
|
"refs": {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"NumericValue": {
|
|
||||||
"base": "<p>A string representing a numeric value.</p>",
|
|
||||||
"refs": {
|
|
||||||
"BonusPayment$BonusAmount": null,
|
|
||||||
"CreateHITRequest$Reward": "<p> The amount of money the Requester will pay a Worker for successfully completing the HIT. </p>",
|
|
||||||
"CreateHITTypeRequest$Reward": "<p> The amount of money the Requester will pay a Worker for successfully completing the HIT. </p>",
|
|
||||||
"GetAccountBalanceResponse$AvailableBalance": null,
|
|
||||||
"GetAccountBalanceResponse$OnHoldBalance": null,
|
|
||||||
"HIT$Reward": null,
|
|
||||||
"SendBonusRequest$BonusAmount": "<p> The Bonus amount is a US Dollar amount specified using a string (for example, \"5\" represents $5.00 USD and \"101.42\" represents $101.42 USD). Do not include currency symbols or currency codes. </p>"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"PaginationToken": {
|
"PaginationToken": {
|
||||||
"base": "<p>If the previous response was incomplete (because there is more data to retrieve), Amazon Mechanical Turk returns a pagination token in the response. You can use this pagination token to retrieve the next set of results. </p>",
|
"base": "<p>If the previous response was incomplete (because there is more data to retrieve), Amazon Mechanical Turk returns a pagination token in the response. You can use this pagination token to retrieve the next set of results. </p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
@ -954,7 +954,7 @@
|
|||||||
"HITLayoutParameter$Name": "<p> The name of the parameter in the HITLayout. </p>",
|
"HITLayoutParameter$Name": "<p> The name of the parameter in the HITLayout. </p>",
|
||||||
"HITLayoutParameter$Value": "<p>The value substituted for the parameter referenced in the HITLayout. </p>",
|
"HITLayoutParameter$Value": "<p>The value substituted for the parameter referenced in the HITLayout. </p>",
|
||||||
"ListQualificationTypesRequest$Query": "<p> A text query against all of the searchable attributes of Qualification types. </p>",
|
"ListQualificationTypesRequest$Query": "<p> A text query against all of the searchable attributes of Qualification types. </p>",
|
||||||
"NotificationSpecification$Destination": "<p> The destination for notification messages. or email notifications (if Transport is Email), this is an email address. For Amazon Simple Queue Service (Amazon SQS) notifications (if Transport is SQS), this is the URL for your Amazon SQS queue. </p>",
|
"NotificationSpecification$Destination": "<p> The target for notification messages. The Destination’s format is determined by the specified Transport: </p> <ul> <li> <p>When Transport is Email, the Destination is your email address.</p> </li> <li> <p>When Transport is SQS, the Destination is your queue URL.</p> </li> <li> <p>When Transport is SNS, the Destination is the ARN of your topic.</p> </li> </ul>",
|
||||||
"NotificationSpecification$Version": "<p>The version of the Notification API to use. Valid value is 2006-05-05.</p>",
|
"NotificationSpecification$Version": "<p>The version of the Notification API to use. Valid value is 2006-05-05.</p>",
|
||||||
"NotifyWorkersFailureStatus$NotifyWorkersFailureMessage": "<p> A message detailing the reason the Worker could not be notified. </p>",
|
"NotifyWorkersFailureStatus$NotifyWorkersFailureMessage": "<p> A message detailing the reason the Worker could not be notified. </p>",
|
||||||
"NotifyWorkersRequest$Subject": "<p>The subject line of the email message to send. Can include up to 200 characters.</p>",
|
"NotifyWorkersRequest$Subject": "<p>The subject line of the email message to send. Can include up to 200 characters.</p>",
|
||||||
|
4
vendor/github.com/aws/aws-sdk-go/models/apis/organizations/2016-11-28/api-2.json
generated
vendored
4
vendor/github.com/aws/aws-sdk-go/models/apis/organizations/2016-11-28/api-2.json
generated
vendored
@ -1310,6 +1310,10 @@
|
|||||||
},
|
},
|
||||||
"HandshakeParty":{
|
"HandshakeParty":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
|
"required":[
|
||||||
|
"Id",
|
||||||
|
"Type"
|
||||||
|
],
|
||||||
"members":{
|
"members":{
|
||||||
"Id":{"shape":"HandshakePartyId"},
|
"Id":{"shape":"HandshakePartyId"},
|
||||||
"Type":{"shape":"HandshakePartyType"}
|
"Type":{"shape":"HandshakePartyType"}
|
||||||
|
2
vendor/github.com/aws/aws-sdk-go/models/apis/organizations/2016-11-28/docs-2.json
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/models/apis/organizations/2016-11-28/docs-2.json
generated
vendored
@ -1119,7 +1119,7 @@
|
|||||||
"RootId": {
|
"RootId": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"DisablePolicyTypeRequest$RootId": "<p>The unique identifier (ID) of the root in which you want to disable a policy type. You can get the ID from the <a>ListPolicies</a> operation.</p> <p>The <a href=\"http://wikipedia.org/wiki/regex\">regex pattern</a> for a root ID string requires \"r-\" followed by from 4 to 32 lower-case letters or digits.</p>",
|
"DisablePolicyTypeRequest$RootId": "<p>The unique identifier (ID) of the root in which you want to disable a policy type. You can get the ID from the <a>ListRoots</a> operation.</p> <p>The <a href=\"http://wikipedia.org/wiki/regex\">regex pattern</a> for a root ID string requires \"r-\" followed by from 4 to 32 lower-case letters or digits.</p>",
|
||||||
"EnablePolicyTypeRequest$RootId": "<p>The unique identifier (ID) of the root in which you want to enable a policy type. You can get the ID from the <a>ListRoots</a> operation.</p> <p>The <a href=\"http://wikipedia.org/wiki/regex\">regex pattern</a> for a root ID string requires \"r-\" followed by from 4 to 32 lower-case letters or digits.</p>",
|
"EnablePolicyTypeRequest$RootId": "<p>The unique identifier (ID) of the root in which you want to enable a policy type. You can get the ID from the <a>ListRoots</a> operation.</p> <p>The <a href=\"http://wikipedia.org/wiki/regex\">regex pattern</a> for a root ID string requires \"r-\" followed by from 4 to 32 lower-case letters or digits.</p>",
|
||||||
"Root$Id": "<p>The unique identifier (ID) for the root.</p> <p>The <a href=\"http://wikipedia.org/wiki/regex\">regex pattern</a> for a root ID string requires \"r-\" followed by from 4 to 32 lower-case letters or digits.</p>"
|
"Root$Id": "<p>The unique identifier (ID) for the root.</p> <p>The <a href=\"http://wikipedia.org/wiki/regex\">regex pattern</a> for a root ID string requires \"r-\" followed by from 4 to 32 lower-case letters or digits.</p>"
|
||||||
}
|
}
|
||||||
|
752
vendor/github.com/aws/aws-sdk-go/models/apis/pinpoint/2016-12-01/api-2.json
generated
vendored
752
vendor/github.com/aws/aws-sdk-go/models/apis/pinpoint/2016-12-01/api-2.json
generated
vendored
@ -6,7 +6,8 @@
|
|||||||
"serviceFullName" : "Amazon Pinpoint",
|
"serviceFullName" : "Amazon Pinpoint",
|
||||||
"signatureVersion":"v4",
|
"signatureVersion":"v4",
|
||||||
"protocol" : "rest-json",
|
"protocol" : "rest-json",
|
||||||
"jsonVersion" : "1.1"
|
"jsonVersion" : "1.1",
|
||||||
|
"uid":"pinpoint-2016-12-01"
|
||||||
},
|
},
|
||||||
"operations" : {
|
"operations" : {
|
||||||
"CreateApp" : {
|
"CreateApp" : {
|
||||||
@ -117,6 +118,33 @@
|
|||||||
"shape" : "TooManyRequestsException"
|
"shape" : "TooManyRequestsException"
|
||||||
} ]
|
} ]
|
||||||
},
|
},
|
||||||
|
"DeleteAdmChannel" : {
|
||||||
|
"name" : "DeleteAdmChannel",
|
||||||
|
"http" : {
|
||||||
|
"method" : "DELETE",
|
||||||
|
"requestUri" : "/v1/apps/{application-id}/channels/adm",
|
||||||
|
"responseCode" : 200
|
||||||
|
},
|
||||||
|
"input" : {
|
||||||
|
"shape" : "DeleteAdmChannelRequest"
|
||||||
|
},
|
||||||
|
"output" : {
|
||||||
|
"shape" : "DeleteAdmChannelResponse"
|
||||||
|
},
|
||||||
|
"errors" : [ {
|
||||||
|
"shape" : "BadRequestException"
|
||||||
|
}, {
|
||||||
|
"shape" : "InternalServerErrorException"
|
||||||
|
}, {
|
||||||
|
"shape" : "ForbiddenException"
|
||||||
|
}, {
|
||||||
|
"shape" : "NotFoundException"
|
||||||
|
}, {
|
||||||
|
"shape" : "MethodNotAllowedException"
|
||||||
|
}, {
|
||||||
|
"shape" : "TooManyRequestsException"
|
||||||
|
} ]
|
||||||
|
},
|
||||||
"DeleteApnsChannel" : {
|
"DeleteApnsChannel" : {
|
||||||
"name" : "DeleteApnsChannel",
|
"name" : "DeleteApnsChannel",
|
||||||
"http" : {
|
"http" : {
|
||||||
@ -198,6 +226,33 @@
|
|||||||
"shape" : "TooManyRequestsException"
|
"shape" : "TooManyRequestsException"
|
||||||
} ]
|
} ]
|
||||||
},
|
},
|
||||||
|
"DeleteBaiduChannel" : {
|
||||||
|
"name" : "DeleteBaiduChannel",
|
||||||
|
"http" : {
|
||||||
|
"method" : "DELETE",
|
||||||
|
"requestUri" : "/v1/apps/{application-id}/channels/baidu",
|
||||||
|
"responseCode" : 200
|
||||||
|
},
|
||||||
|
"input" : {
|
||||||
|
"shape" : "DeleteBaiduChannelRequest"
|
||||||
|
},
|
||||||
|
"output" : {
|
||||||
|
"shape" : "DeleteBaiduChannelResponse"
|
||||||
|
},
|
||||||
|
"errors" : [ {
|
||||||
|
"shape" : "BadRequestException"
|
||||||
|
}, {
|
||||||
|
"shape" : "InternalServerErrorException"
|
||||||
|
}, {
|
||||||
|
"shape" : "ForbiddenException"
|
||||||
|
}, {
|
||||||
|
"shape" : "NotFoundException"
|
||||||
|
}, {
|
||||||
|
"shape" : "MethodNotAllowedException"
|
||||||
|
}, {
|
||||||
|
"shape" : "TooManyRequestsException"
|
||||||
|
} ]
|
||||||
|
},
|
||||||
"DeleteCampaign" : {
|
"DeleteCampaign" : {
|
||||||
"name" : "DeleteCampaign",
|
"name" : "DeleteCampaign",
|
||||||
"http" : {
|
"http" : {
|
||||||
@ -360,6 +415,33 @@
|
|||||||
"shape" : "TooManyRequestsException"
|
"shape" : "TooManyRequestsException"
|
||||||
} ]
|
} ]
|
||||||
},
|
},
|
||||||
|
"GetAdmChannel" : {
|
||||||
|
"name" : "GetAdmChannel",
|
||||||
|
"http" : {
|
||||||
|
"method" : "GET",
|
||||||
|
"requestUri" : "/v1/apps/{application-id}/channels/adm",
|
||||||
|
"responseCode" : 200
|
||||||
|
},
|
||||||
|
"input" : {
|
||||||
|
"shape" : "GetAdmChannelRequest"
|
||||||
|
},
|
||||||
|
"output" : {
|
||||||
|
"shape" : "GetAdmChannelResponse"
|
||||||
|
},
|
||||||
|
"errors" : [ {
|
||||||
|
"shape" : "BadRequestException"
|
||||||
|
}, {
|
||||||
|
"shape" : "InternalServerErrorException"
|
||||||
|
}, {
|
||||||
|
"shape" : "ForbiddenException"
|
||||||
|
}, {
|
||||||
|
"shape" : "NotFoundException"
|
||||||
|
}, {
|
||||||
|
"shape" : "MethodNotAllowedException"
|
||||||
|
}, {
|
||||||
|
"shape" : "TooManyRequestsException"
|
||||||
|
} ]
|
||||||
|
},
|
||||||
"GetApnsChannel" : {
|
"GetApnsChannel" : {
|
||||||
"name" : "GetApnsChannel",
|
"name" : "GetApnsChannel",
|
||||||
"http" : {
|
"http" : {
|
||||||
@ -495,6 +577,33 @@
|
|||||||
"shape" : "TooManyRequestsException"
|
"shape" : "TooManyRequestsException"
|
||||||
} ]
|
} ]
|
||||||
},
|
},
|
||||||
|
"GetBaiduChannel" : {
|
||||||
|
"name" : "GetBaiduChannel",
|
||||||
|
"http" : {
|
||||||
|
"method" : "GET",
|
||||||
|
"requestUri" : "/v1/apps/{application-id}/channels/baidu",
|
||||||
|
"responseCode" : 200
|
||||||
|
},
|
||||||
|
"input" : {
|
||||||
|
"shape" : "GetBaiduChannelRequest"
|
||||||
|
},
|
||||||
|
"output" : {
|
||||||
|
"shape" : "GetBaiduChannelResponse"
|
||||||
|
},
|
||||||
|
"errors" : [ {
|
||||||
|
"shape" : "BadRequestException"
|
||||||
|
}, {
|
||||||
|
"shape" : "InternalServerErrorException"
|
||||||
|
}, {
|
||||||
|
"shape" : "ForbiddenException"
|
||||||
|
}, {
|
||||||
|
"shape" : "NotFoundException"
|
||||||
|
}, {
|
||||||
|
"shape" : "MethodNotAllowedException"
|
||||||
|
}, {
|
||||||
|
"shape" : "TooManyRequestsException"
|
||||||
|
} ]
|
||||||
|
},
|
||||||
"GetCampaign" : {
|
"GetCampaign" : {
|
||||||
"name" : "GetCampaign",
|
"name" : "GetCampaign",
|
||||||
"http" : {
|
"http" : {
|
||||||
@ -1008,6 +1117,60 @@
|
|||||||
"shape" : "TooManyRequestsException"
|
"shape" : "TooManyRequestsException"
|
||||||
} ]
|
} ]
|
||||||
},
|
},
|
||||||
|
"SendUsersMessages" : {
|
||||||
|
"name" : "SendUsersMessages",
|
||||||
|
"http" : {
|
||||||
|
"method" : "POST",
|
||||||
|
"requestUri" : "/v1/apps/{application-id}/users-messages",
|
||||||
|
"responseCode" : 200
|
||||||
|
},
|
||||||
|
"input" : {
|
||||||
|
"shape" : "SendUsersMessagesRequest"
|
||||||
|
},
|
||||||
|
"output" : {
|
||||||
|
"shape" : "SendUsersMessagesResponse"
|
||||||
|
},
|
||||||
|
"errors" : [ {
|
||||||
|
"shape" : "BadRequestException"
|
||||||
|
}, {
|
||||||
|
"shape" : "InternalServerErrorException"
|
||||||
|
}, {
|
||||||
|
"shape" : "ForbiddenException"
|
||||||
|
}, {
|
||||||
|
"shape" : "NotFoundException"
|
||||||
|
}, {
|
||||||
|
"shape" : "MethodNotAllowedException"
|
||||||
|
}, {
|
||||||
|
"shape" : "TooManyRequestsException"
|
||||||
|
} ]
|
||||||
|
},
|
||||||
|
"UpdateAdmChannel" : {
|
||||||
|
"name" : "UpdateAdmChannel",
|
||||||
|
"http" : {
|
||||||
|
"method" : "PUT",
|
||||||
|
"requestUri" : "/v1/apps/{application-id}/channels/adm",
|
||||||
|
"responseCode" : 200
|
||||||
|
},
|
||||||
|
"input" : {
|
||||||
|
"shape" : "UpdateAdmChannelRequest"
|
||||||
|
},
|
||||||
|
"output" : {
|
||||||
|
"shape" : "UpdateAdmChannelResponse"
|
||||||
|
},
|
||||||
|
"errors" : [ {
|
||||||
|
"shape" : "BadRequestException"
|
||||||
|
}, {
|
||||||
|
"shape" : "InternalServerErrorException"
|
||||||
|
}, {
|
||||||
|
"shape" : "ForbiddenException"
|
||||||
|
}, {
|
||||||
|
"shape" : "NotFoundException"
|
||||||
|
}, {
|
||||||
|
"shape" : "MethodNotAllowedException"
|
||||||
|
}, {
|
||||||
|
"shape" : "TooManyRequestsException"
|
||||||
|
} ]
|
||||||
|
},
|
||||||
"UpdateApnsChannel" : {
|
"UpdateApnsChannel" : {
|
||||||
"name" : "UpdateApnsChannel",
|
"name" : "UpdateApnsChannel",
|
||||||
"http" : {
|
"http" : {
|
||||||
@ -1089,6 +1252,33 @@
|
|||||||
"shape" : "TooManyRequestsException"
|
"shape" : "TooManyRequestsException"
|
||||||
} ]
|
} ]
|
||||||
},
|
},
|
||||||
|
"UpdateBaiduChannel" : {
|
||||||
|
"name" : "UpdateBaiduChannel",
|
||||||
|
"http" : {
|
||||||
|
"method" : "PUT",
|
||||||
|
"requestUri" : "/v1/apps/{application-id}/channels/baidu",
|
||||||
|
"responseCode" : 200
|
||||||
|
},
|
||||||
|
"input" : {
|
||||||
|
"shape" : "UpdateBaiduChannelRequest"
|
||||||
|
},
|
||||||
|
"output" : {
|
||||||
|
"shape" : "UpdateBaiduChannelResponse"
|
||||||
|
},
|
||||||
|
"errors" : [ {
|
||||||
|
"shape" : "BadRequestException"
|
||||||
|
}, {
|
||||||
|
"shape" : "InternalServerErrorException"
|
||||||
|
}, {
|
||||||
|
"shape" : "ForbiddenException"
|
||||||
|
}, {
|
||||||
|
"shape" : "NotFoundException"
|
||||||
|
}, {
|
||||||
|
"shape" : "MethodNotAllowedException"
|
||||||
|
}, {
|
||||||
|
"shape" : "TooManyRequestsException"
|
||||||
|
} ]
|
||||||
|
},
|
||||||
"UpdateCampaign" : {
|
"UpdateCampaign" : {
|
||||||
"name" : "UpdateCampaign",
|
"name" : "UpdateCampaign",
|
||||||
"http" : {
|
"http" : {
|
||||||
@ -1280,17 +1470,134 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"shapes" : {
|
"shapes" : {
|
||||||
|
"ADMChannelRequest" : {
|
||||||
|
"type" : "structure",
|
||||||
|
"members" : {
|
||||||
|
"ClientId" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"ClientSecret" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"Enabled" : {
|
||||||
|
"shape" : "__boolean"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"ADMChannelResponse" : {
|
||||||
|
"type" : "structure",
|
||||||
|
"members" : {
|
||||||
|
"ApplicationId" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"CreationDate" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"Enabled" : {
|
||||||
|
"shape" : "__boolean"
|
||||||
|
},
|
||||||
|
"Id" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"IsArchived" : {
|
||||||
|
"shape" : "__boolean"
|
||||||
|
},
|
||||||
|
"LastModifiedBy" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"LastModifiedDate" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"Platform" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"Version" : {
|
||||||
|
"shape" : "__integer"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"ADMMessage" : {
|
||||||
|
"type" : "structure",
|
||||||
|
"members" : {
|
||||||
|
"Action" : {
|
||||||
|
"shape" : "Action"
|
||||||
|
},
|
||||||
|
"Body" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"ConsolidationKey" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"Data" : {
|
||||||
|
"shape" : "MapOf__string"
|
||||||
|
},
|
||||||
|
"ExpiresAfter" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"IconReference" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"ImageIconUrl" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"ImageUrl" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"JsonData" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"MD5" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"RawContent" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"SilentPush" : {
|
||||||
|
"shape" : "__boolean"
|
||||||
|
},
|
||||||
|
"SmallImageIconUrl" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"Sound" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"Substitutions" : {
|
||||||
|
"shape" : "MapOfListOf__string"
|
||||||
|
},
|
||||||
|
"Title" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"Url" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"APNSChannelRequest" : {
|
"APNSChannelRequest" : {
|
||||||
"type" : "structure",
|
"type" : "structure",
|
||||||
"members" : {
|
"members" : {
|
||||||
|
"BundleId" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
"Certificate" : {
|
"Certificate" : {
|
||||||
"shape" : "__string"
|
"shape" : "__string"
|
||||||
},
|
},
|
||||||
|
"DefaultAuthenticationMethod" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
"Enabled" : {
|
"Enabled" : {
|
||||||
"shape" : "__boolean"
|
"shape" : "__boolean"
|
||||||
},
|
},
|
||||||
"PrivateKey" : {
|
"PrivateKey" : {
|
||||||
"shape" : "__string"
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"TeamId" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"TokenKey" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"TokenKeyId" : {
|
||||||
|
"shape" : "__string"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -1344,9 +1651,15 @@
|
|||||||
"Data" : {
|
"Data" : {
|
||||||
"shape" : "MapOf__string"
|
"shape" : "MapOf__string"
|
||||||
},
|
},
|
||||||
|
"JsonData" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
"MediaUrl" : {
|
"MediaUrl" : {
|
||||||
"shape" : "__string"
|
"shape" : "__string"
|
||||||
},
|
},
|
||||||
|
"PreferredAuthenticationMethod" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
"RawContent" : {
|
"RawContent" : {
|
||||||
"shape" : "__string"
|
"shape" : "__string"
|
||||||
},
|
},
|
||||||
@ -1373,14 +1686,29 @@
|
|||||||
"APNSSandboxChannelRequest" : {
|
"APNSSandboxChannelRequest" : {
|
||||||
"type" : "structure",
|
"type" : "structure",
|
||||||
"members" : {
|
"members" : {
|
||||||
|
"BundleId" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
"Certificate" : {
|
"Certificate" : {
|
||||||
"shape" : "__string"
|
"shape" : "__string"
|
||||||
},
|
},
|
||||||
|
"DefaultAuthenticationMethod" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
"Enabled" : {
|
"Enabled" : {
|
||||||
"shape" : "__boolean"
|
"shape" : "__boolean"
|
||||||
},
|
},
|
||||||
"PrivateKey" : {
|
"PrivateKey" : {
|
||||||
"shape" : "__string"
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"TeamId" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"TokenKey" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"TokenKeyId" : {
|
||||||
|
"shape" : "__string"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -1564,6 +1892,102 @@
|
|||||||
"httpStatusCode" : 400
|
"httpStatusCode" : 400
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"BaiduChannelRequest" : {
|
||||||
|
"type" : "structure",
|
||||||
|
"members" : {
|
||||||
|
"ApiKey" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"Enabled" : {
|
||||||
|
"shape" : "__boolean"
|
||||||
|
},
|
||||||
|
"SecretKey" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"BaiduChannelResponse" : {
|
||||||
|
"type" : "structure",
|
||||||
|
"members" : {
|
||||||
|
"ApplicationId" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"CreationDate" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"Credential" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"Enabled" : {
|
||||||
|
"shape" : "__boolean"
|
||||||
|
},
|
||||||
|
"Id" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"IsArchived" : {
|
||||||
|
"shape" : "__boolean"
|
||||||
|
},
|
||||||
|
"LastModifiedBy" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"LastModifiedDate" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"Platform" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"Version" : {
|
||||||
|
"shape" : "__integer"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"BaiduMessage" : {
|
||||||
|
"type" : "structure",
|
||||||
|
"members" : {
|
||||||
|
"Action" : {
|
||||||
|
"shape" : "Action"
|
||||||
|
},
|
||||||
|
"Body" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"Data" : {
|
||||||
|
"shape" : "MapOf__string"
|
||||||
|
},
|
||||||
|
"IconReference" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"ImageIconUrl" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"ImageUrl" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"JsonData" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"RawContent" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"SilentPush" : {
|
||||||
|
"shape" : "__boolean"
|
||||||
|
},
|
||||||
|
"SmallImageIconUrl" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"Sound" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"Substitutions" : {
|
||||||
|
"shape" : "MapOfListOf__string"
|
||||||
|
},
|
||||||
|
"Title" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"Url" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"CampaignEmailMessage" : {
|
"CampaignEmailMessage" : {
|
||||||
"type" : "structure",
|
"type" : "structure",
|
||||||
"members" : {
|
"members" : {
|
||||||
@ -1587,6 +2011,12 @@
|
|||||||
"Daily" : {
|
"Daily" : {
|
||||||
"shape" : "__integer"
|
"shape" : "__integer"
|
||||||
},
|
},
|
||||||
|
"MaximumDuration" : {
|
||||||
|
"shape" : "__integer"
|
||||||
|
},
|
||||||
|
"MessagesPerSecond" : {
|
||||||
|
"shape" : "__integer"
|
||||||
|
},
|
||||||
"Total" : {
|
"Total" : {
|
||||||
"shape" : "__integer"
|
"shape" : "__integer"
|
||||||
}
|
}
|
||||||
@ -1693,7 +2123,7 @@
|
|||||||
},
|
},
|
||||||
"ChannelType" : {
|
"ChannelType" : {
|
||||||
"type" : "string",
|
"type" : "string",
|
||||||
"enum" : [ "GCM", "APNS", "APNS_SANDBOX", "ADM", "SMS", "EMAIL" ]
|
"enum" : [ "GCM", "APNS", "APNS_SANDBOX", "ADM", "SMS", "EMAIL", "BAIDU" ]
|
||||||
},
|
},
|
||||||
"CreateAppRequest" : {
|
"CreateAppRequest" : {
|
||||||
"type" : "structure",
|
"type" : "structure",
|
||||||
@ -1821,6 +2251,9 @@
|
|||||||
"Data" : {
|
"Data" : {
|
||||||
"shape" : "MapOf__string"
|
"shape" : "MapOf__string"
|
||||||
},
|
},
|
||||||
|
"JsonData" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
"SilentPush" : {
|
"SilentPush" : {
|
||||||
"shape" : "__boolean"
|
"shape" : "__boolean"
|
||||||
},
|
},
|
||||||
@ -1835,6 +2268,27 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"DeleteAdmChannelRequest" : {
|
||||||
|
"type" : "structure",
|
||||||
|
"members" : {
|
||||||
|
"ApplicationId" : {
|
||||||
|
"shape" : "__string",
|
||||||
|
"location" : "uri",
|
||||||
|
"locationName" : "application-id"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required" : [ "ApplicationId" ]
|
||||||
|
},
|
||||||
|
"DeleteAdmChannelResponse" : {
|
||||||
|
"type" : "structure",
|
||||||
|
"members" : {
|
||||||
|
"ADMChannelResponse" : {
|
||||||
|
"shape" : "ADMChannelResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required" : [ "ADMChannelResponse" ],
|
||||||
|
"payload" : "ADMChannelResponse"
|
||||||
|
},
|
||||||
"DeleteApnsChannelRequest" : {
|
"DeleteApnsChannelRequest" : {
|
||||||
"type" : "structure",
|
"type" : "structure",
|
||||||
"members" : {
|
"members" : {
|
||||||
@ -1898,6 +2352,27 @@
|
|||||||
"required" : [ "ApplicationResponse" ],
|
"required" : [ "ApplicationResponse" ],
|
||||||
"payload" : "ApplicationResponse"
|
"payload" : "ApplicationResponse"
|
||||||
},
|
},
|
||||||
|
"DeleteBaiduChannelRequest" : {
|
||||||
|
"type" : "structure",
|
||||||
|
"members" : {
|
||||||
|
"ApplicationId" : {
|
||||||
|
"shape" : "__string",
|
||||||
|
"location" : "uri",
|
||||||
|
"locationName" : "application-id"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required" : [ "ApplicationId" ]
|
||||||
|
},
|
||||||
|
"DeleteBaiduChannelResponse" : {
|
||||||
|
"type" : "structure",
|
||||||
|
"members" : {
|
||||||
|
"BaiduChannelResponse" : {
|
||||||
|
"shape" : "BaiduChannelResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required" : [ "BaiduChannelResponse" ],
|
||||||
|
"payload" : "BaiduChannelResponse"
|
||||||
|
},
|
||||||
"DeleteCampaignRequest" : {
|
"DeleteCampaignRequest" : {
|
||||||
"type" : "structure",
|
"type" : "structure",
|
||||||
"members" : {
|
"members" : {
|
||||||
@ -2036,7 +2511,7 @@
|
|||||||
},
|
},
|
||||||
"DeliveryStatus" : {
|
"DeliveryStatus" : {
|
||||||
"type" : "string",
|
"type" : "string",
|
||||||
"enum" : [ "SUCCESSFUL", "THROTTLED", "TEMPORARY_FAILURE", "PERMANENT_FAILURE" ]
|
"enum" : [ "SUCCESSFUL", "THROTTLED", "TEMPORARY_FAILURE", "PERMANENT_FAILURE", "UNKNOWN_FAILURE", "OPT_OUT", "DUPLICATE" ]
|
||||||
},
|
},
|
||||||
"DimensionType" : {
|
"DimensionType" : {
|
||||||
"type" : "string",
|
"type" : "string",
|
||||||
@ -2045,15 +2520,24 @@
|
|||||||
"DirectMessageConfiguration" : {
|
"DirectMessageConfiguration" : {
|
||||||
"type" : "structure",
|
"type" : "structure",
|
||||||
"members" : {
|
"members" : {
|
||||||
|
"ADMMessage" : {
|
||||||
|
"shape" : "ADMMessage"
|
||||||
|
},
|
||||||
"APNSMessage" : {
|
"APNSMessage" : {
|
||||||
"shape" : "APNSMessage"
|
"shape" : "APNSMessage"
|
||||||
},
|
},
|
||||||
|
"BaiduMessage" : {
|
||||||
|
"shape" : "BaiduMessage"
|
||||||
|
},
|
||||||
"DefaultMessage" : {
|
"DefaultMessage" : {
|
||||||
"shape" : "DefaultMessage"
|
"shape" : "DefaultMessage"
|
||||||
},
|
},
|
||||||
"DefaultPushNotificationMessage" : {
|
"DefaultPushNotificationMessage" : {
|
||||||
"shape" : "DefaultPushNotificationMessage"
|
"shape" : "DefaultPushNotificationMessage"
|
||||||
},
|
},
|
||||||
|
"EmailMessage" : {
|
||||||
|
"shape" : "EmailMessage"
|
||||||
|
},
|
||||||
"GCMMessage" : {
|
"GCMMessage" : {
|
||||||
"shape" : "GCMMessage"
|
"shape" : "GCMMessage"
|
||||||
},
|
},
|
||||||
@ -2124,6 +2608,29 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"EmailMessage" : {
|
||||||
|
"type" : "structure",
|
||||||
|
"members" : {
|
||||||
|
"Body" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"FromAddress" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"HtmlBody" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"Substitutions" : {
|
||||||
|
"shape" : "MapOfListOf__string"
|
||||||
|
},
|
||||||
|
"TemplateArn" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"Title" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"EndpointBatchItem" : {
|
"EndpointBatchItem" : {
|
||||||
"type" : "structure",
|
"type" : "structure",
|
||||||
"members" : {
|
"members" : {
|
||||||
@ -2225,6 +2732,26 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"EndpointMessageResult" : {
|
||||||
|
"type" : "structure",
|
||||||
|
"members" : {
|
||||||
|
"Address" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"DeliveryStatus" : {
|
||||||
|
"shape" : "DeliveryStatus"
|
||||||
|
},
|
||||||
|
"StatusCode" : {
|
||||||
|
"shape" : "__integer"
|
||||||
|
},
|
||||||
|
"StatusMessage" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"UpdatedToken" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"EndpointRequest" : {
|
"EndpointRequest" : {
|
||||||
"type" : "structure",
|
"type" : "structure",
|
||||||
"members" : {
|
"members" : {
|
||||||
@ -2316,6 +2843,26 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"EndpointSendConfiguration" : {
|
||||||
|
"type" : "structure",
|
||||||
|
"members" : {
|
||||||
|
"BodyOverride" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"Context" : {
|
||||||
|
"shape" : "MapOf__string"
|
||||||
|
},
|
||||||
|
"RawContent" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"Substitutions" : {
|
||||||
|
"shape" : "MapOfListOf__string"
|
||||||
|
},
|
||||||
|
"TitleOverride" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"EndpointUser" : {
|
"EndpointUser" : {
|
||||||
"type" : "structure",
|
"type" : "structure",
|
||||||
"members" : {
|
"members" : {
|
||||||
@ -2443,6 +2990,9 @@
|
|||||||
"ImageUrl" : {
|
"ImageUrl" : {
|
||||||
"shape" : "__string"
|
"shape" : "__string"
|
||||||
},
|
},
|
||||||
|
"JsonData" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
"RawContent" : {
|
"RawContent" : {
|
||||||
"shape" : "__string"
|
"shape" : "__string"
|
||||||
},
|
},
|
||||||
@ -2469,6 +3019,27 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"GetAdmChannelRequest" : {
|
||||||
|
"type" : "structure",
|
||||||
|
"members" : {
|
||||||
|
"ApplicationId" : {
|
||||||
|
"shape" : "__string",
|
||||||
|
"location" : "uri",
|
||||||
|
"locationName" : "application-id"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required" : [ "ApplicationId" ]
|
||||||
|
},
|
||||||
|
"GetAdmChannelResponse" : {
|
||||||
|
"type" : "structure",
|
||||||
|
"members" : {
|
||||||
|
"ADMChannelResponse" : {
|
||||||
|
"shape" : "ADMChannelResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required" : [ "ADMChannelResponse" ],
|
||||||
|
"payload" : "ADMChannelResponse"
|
||||||
|
},
|
||||||
"GetApnsChannelRequest" : {
|
"GetApnsChannelRequest" : {
|
||||||
"type" : "structure",
|
"type" : "structure",
|
||||||
"members" : {
|
"members" : {
|
||||||
@ -2578,6 +3149,27 @@
|
|||||||
"required" : [ "ApplicationsResponse" ],
|
"required" : [ "ApplicationsResponse" ],
|
||||||
"payload" : "ApplicationsResponse"
|
"payload" : "ApplicationsResponse"
|
||||||
},
|
},
|
||||||
|
"GetBaiduChannelRequest" : {
|
||||||
|
"type" : "structure",
|
||||||
|
"members" : {
|
||||||
|
"ApplicationId" : {
|
||||||
|
"shape" : "__string",
|
||||||
|
"location" : "uri",
|
||||||
|
"locationName" : "application-id"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required" : [ "ApplicationId" ]
|
||||||
|
},
|
||||||
|
"GetBaiduChannelResponse" : {
|
||||||
|
"type" : "structure",
|
||||||
|
"members" : {
|
||||||
|
"BaiduChannelResponse" : {
|
||||||
|
"shape" : "BaiduChannelResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required" : [ "BaiduChannelResponse" ],
|
||||||
|
"payload" : "BaiduChannelResponse"
|
||||||
|
},
|
||||||
"GetCampaignActivitiesRequest" : {
|
"GetCampaignActivitiesRequest" : {
|
||||||
"type" : "structure",
|
"type" : "structure",
|
||||||
"members" : {
|
"members" : {
|
||||||
@ -3269,6 +3861,24 @@
|
|||||||
"shape" : "AttributeDimension"
|
"shape" : "AttributeDimension"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"MapOfEndpointMessageResult" : {
|
||||||
|
"type" : "map",
|
||||||
|
"key" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"value" : {
|
||||||
|
"shape" : "EndpointMessageResult"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"MapOfEndpointSendConfiguration" : {
|
||||||
|
"type" : "map",
|
||||||
|
"key" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"value" : {
|
||||||
|
"shape" : "EndpointSendConfiguration"
|
||||||
|
}
|
||||||
|
},
|
||||||
"MapOfListOf__string" : {
|
"MapOfListOf__string" : {
|
||||||
"type" : "map",
|
"type" : "map",
|
||||||
"key" : {
|
"key" : {
|
||||||
@ -3278,6 +3888,15 @@
|
|||||||
"shape" : "ListOf__string"
|
"shape" : "ListOf__string"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"MapOfMapOfEndpointMessageResult" : {
|
||||||
|
"type" : "map",
|
||||||
|
"key" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"value" : {
|
||||||
|
"shape" : "MapOfEndpointMessageResult"
|
||||||
|
}
|
||||||
|
},
|
||||||
"MapOfMessageResult" : {
|
"MapOfMessageResult" : {
|
||||||
"type" : "map",
|
"type" : "map",
|
||||||
"key" : {
|
"key" : {
|
||||||
@ -3389,11 +4008,20 @@
|
|||||||
"Addresses" : {
|
"Addresses" : {
|
||||||
"shape" : "MapOfAddressConfiguration"
|
"shape" : "MapOfAddressConfiguration"
|
||||||
},
|
},
|
||||||
|
"Campaign" : {
|
||||||
|
"shape" : "MapOf__string"
|
||||||
|
},
|
||||||
"Context" : {
|
"Context" : {
|
||||||
"shape" : "MapOf__string"
|
"shape" : "MapOf__string"
|
||||||
},
|
},
|
||||||
|
"Endpoints" : {
|
||||||
|
"shape" : "MapOfEndpointSendConfiguration"
|
||||||
|
},
|
||||||
"MessageConfiguration" : {
|
"MessageConfiguration" : {
|
||||||
"shape" : "DirectMessageConfiguration"
|
"shape" : "DirectMessageConfiguration"
|
||||||
|
},
|
||||||
|
"RequestId" : {
|
||||||
|
"shape" : "__string"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -3403,6 +4031,9 @@
|
|||||||
"ApplicationId" : {
|
"ApplicationId" : {
|
||||||
"shape" : "__string"
|
"shape" : "__string"
|
||||||
},
|
},
|
||||||
|
"EndpointResult" : {
|
||||||
|
"shape" : "MapOfEndpointMessageResult"
|
||||||
|
},
|
||||||
"RequestId" : {
|
"RequestId" : {
|
||||||
"shape" : "__string"
|
"shape" : "__string"
|
||||||
},
|
},
|
||||||
@ -3521,6 +4152,9 @@
|
|||||||
},
|
},
|
||||||
"SenderId" : {
|
"SenderId" : {
|
||||||
"shape" : "__string"
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"ShortCode" : {
|
||||||
|
"shape" : "__string"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -3756,6 +4390,62 @@
|
|||||||
"required" : [ "MessageResponse" ],
|
"required" : [ "MessageResponse" ],
|
||||||
"payload" : "MessageResponse"
|
"payload" : "MessageResponse"
|
||||||
},
|
},
|
||||||
|
"SendUsersMessageRequest" : {
|
||||||
|
"type" : "structure",
|
||||||
|
"members" : {
|
||||||
|
"Context" : {
|
||||||
|
"shape" : "MapOf__string"
|
||||||
|
},
|
||||||
|
"MessageConfiguration" : {
|
||||||
|
"shape" : "DirectMessageConfiguration"
|
||||||
|
},
|
||||||
|
"RequestId" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"Users" : {
|
||||||
|
"shape" : "MapOfEndpointSendConfiguration"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"SendUsersMessageResponse" : {
|
||||||
|
"type" : "structure",
|
||||||
|
"members" : {
|
||||||
|
"ApplicationId" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"RequestId" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
|
"Result" : {
|
||||||
|
"shape" : "MapOfMapOfEndpointMessageResult"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"SendUsersMessagesRequest" : {
|
||||||
|
"type" : "structure",
|
||||||
|
"members" : {
|
||||||
|
"ApplicationId" : {
|
||||||
|
"shape" : "__string",
|
||||||
|
"location" : "uri",
|
||||||
|
"locationName" : "application-id"
|
||||||
|
},
|
||||||
|
"SendUsersMessageRequest" : {
|
||||||
|
"shape" : "SendUsersMessageRequest"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required" : [ "ApplicationId", "SendUsersMessageRequest" ],
|
||||||
|
"payload" : "SendUsersMessageRequest"
|
||||||
|
},
|
||||||
|
"SendUsersMessagesResponse" : {
|
||||||
|
"type" : "structure",
|
||||||
|
"members" : {
|
||||||
|
"SendUsersMessageResponse" : {
|
||||||
|
"shape" : "SendUsersMessageResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required" : [ "SendUsersMessageResponse" ],
|
||||||
|
"payload" : "SendUsersMessageResponse"
|
||||||
|
},
|
||||||
"SetDimension" : {
|
"SetDimension" : {
|
||||||
"type" : "structure",
|
"type" : "structure",
|
||||||
"members" : {
|
"members" : {
|
||||||
@ -3808,6 +4498,31 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"UpdateAdmChannelRequest" : {
|
||||||
|
"type" : "structure",
|
||||||
|
"members" : {
|
||||||
|
"ADMChannelRequest" : {
|
||||||
|
"shape" : "ADMChannelRequest"
|
||||||
|
},
|
||||||
|
"ApplicationId" : {
|
||||||
|
"shape" : "__string",
|
||||||
|
"location" : "uri",
|
||||||
|
"locationName" : "application-id"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required" : [ "ApplicationId", "ADMChannelRequest" ],
|
||||||
|
"payload" : "ADMChannelRequest"
|
||||||
|
},
|
||||||
|
"UpdateAdmChannelResponse" : {
|
||||||
|
"type" : "structure",
|
||||||
|
"members" : {
|
||||||
|
"ADMChannelResponse" : {
|
||||||
|
"shape" : "ADMChannelResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required" : [ "ADMChannelResponse" ],
|
||||||
|
"payload" : "ADMChannelResponse"
|
||||||
|
},
|
||||||
"UpdateApnsChannelRequest" : {
|
"UpdateApnsChannelRequest" : {
|
||||||
"type" : "structure",
|
"type" : "structure",
|
||||||
"members" : {
|
"members" : {
|
||||||
@ -3883,6 +4598,31 @@
|
|||||||
"required" : [ "ApplicationSettingsResource" ],
|
"required" : [ "ApplicationSettingsResource" ],
|
||||||
"payload" : "ApplicationSettingsResource"
|
"payload" : "ApplicationSettingsResource"
|
||||||
},
|
},
|
||||||
|
"UpdateBaiduChannelRequest" : {
|
||||||
|
"type" : "structure",
|
||||||
|
"members" : {
|
||||||
|
"ApplicationId" : {
|
||||||
|
"shape" : "__string",
|
||||||
|
"location" : "uri",
|
||||||
|
"locationName" : "application-id"
|
||||||
|
},
|
||||||
|
"BaiduChannelRequest" : {
|
||||||
|
"shape" : "BaiduChannelRequest"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required" : [ "ApplicationId", "BaiduChannelRequest" ],
|
||||||
|
"payload" : "BaiduChannelRequest"
|
||||||
|
},
|
||||||
|
"UpdateBaiduChannelResponse" : {
|
||||||
|
"type" : "structure",
|
||||||
|
"members" : {
|
||||||
|
"BaiduChannelResponse" : {
|
||||||
|
"shape" : "BaiduChannelResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required" : [ "BaiduChannelResponse" ],
|
||||||
|
"payload" : "BaiduChannelResponse"
|
||||||
|
},
|
||||||
"UpdateCampaignRequest" : {
|
"UpdateCampaignRequest" : {
|
||||||
"type" : "structure",
|
"type" : "structure",
|
||||||
"members" : {
|
"members" : {
|
||||||
@ -4117,6 +4857,9 @@
|
|||||||
"SegmentVersion" : {
|
"SegmentVersion" : {
|
||||||
"shape" : "__integer"
|
"shape" : "__integer"
|
||||||
},
|
},
|
||||||
|
"Trace" : {
|
||||||
|
"shape" : "__boolean"
|
||||||
|
},
|
||||||
"TreatmentDescription" : {
|
"TreatmentDescription" : {
|
||||||
"shape" : "__string"
|
"shape" : "__string"
|
||||||
},
|
},
|
||||||
@ -4131,6 +4874,9 @@
|
|||||||
"DestinationStreamArn" : {
|
"DestinationStreamArn" : {
|
||||||
"shape" : "__string"
|
"shape" : "__string"
|
||||||
},
|
},
|
||||||
|
"ExternalId" : {
|
||||||
|
"shape" : "__string"
|
||||||
|
},
|
||||||
"RoleArn" : {
|
"RoleArn" : {
|
||||||
"shape" : "__string"
|
"shape" : "__string"
|
||||||
}
|
}
|
||||||
|
177
vendor/github.com/aws/aws-sdk-go/models/apis/pinpoint/2016-12-01/docs-2.json
generated
vendored
177
vendor/github.com/aws/aws-sdk-go/models/apis/pinpoint/2016-12-01/docs-2.json
generated
vendored
@ -2,24 +2,28 @@
|
|||||||
"version" : "2.0",
|
"version" : "2.0",
|
||||||
"service" : null,
|
"service" : null,
|
||||||
"operations" : {
|
"operations" : {
|
||||||
"CreateApp" : "Used to create an app.",
|
"CreateApp" : "Creates or updates an app.",
|
||||||
"CreateCampaign" : "Creates or updates a campaign.",
|
"CreateCampaign" : "Creates or updates a campaign.",
|
||||||
"CreateImportJob" : "Creates or updates an import job.",
|
"CreateImportJob" : "Creates or updates an import job.",
|
||||||
"CreateSegment" : "Used to create or update a segment.",
|
"CreateSegment" : "Used to create or update a segment.",
|
||||||
|
"DeleteAdmChannel" : "Delete an ADM channel",
|
||||||
"DeleteApnsChannel" : "Deletes the APNs channel for an app.",
|
"DeleteApnsChannel" : "Deletes the APNs channel for an app.",
|
||||||
"DeleteApnsSandboxChannel" : "Delete an APNS sandbox channel",
|
"DeleteApnsSandboxChannel" : "Delete an APNS sandbox channel",
|
||||||
"DeleteApp" : "Deletes an app.",
|
"DeleteApp" : "Deletes an app.",
|
||||||
|
"DeleteBaiduChannel" : "Delete a BAIDU GCM channel",
|
||||||
"DeleteCampaign" : "Deletes a campaign.",
|
"DeleteCampaign" : "Deletes a campaign.",
|
||||||
"DeleteEmailChannel" : "Delete an email channel",
|
"DeleteEmailChannel" : "Delete an email channel",
|
||||||
"DeleteEventStream" : "Deletes the event stream for an app.",
|
"DeleteEventStream" : "Deletes the event stream for an app.",
|
||||||
"DeleteGcmChannel" : "Deletes the GCM channel for an app.",
|
"DeleteGcmChannel" : "Deletes the GCM channel for an app.",
|
||||||
"DeleteSegment" : "Deletes a segment.",
|
"DeleteSegment" : "Deletes a segment.",
|
||||||
"DeleteSmsChannel" : "Delete an SMS channel",
|
"DeleteSmsChannel" : "Delete an SMS channel",
|
||||||
|
"GetAdmChannel" : "Get an ADM channel",
|
||||||
"GetApnsChannel" : "Returns information about the APNs channel for an app.",
|
"GetApnsChannel" : "Returns information about the APNs channel for an app.",
|
||||||
"GetApnsSandboxChannel" : "Get an APNS sandbox channel",
|
"GetApnsSandboxChannel" : "Get an APNS sandbox channel",
|
||||||
"GetApp" : "Returns information about an app.",
|
"GetApp" : "Returns information about an app.",
|
||||||
"GetApplicationSettings" : "Used to request the settings for an app.",
|
"GetApplicationSettings" : "Used to request the settings for an app.",
|
||||||
"GetApps" : "Returns information about your apps.",
|
"GetApps" : "Returns information about your apps.",
|
||||||
|
"GetBaiduChannel" : "Get a BAIDU GCM channel",
|
||||||
"GetCampaign" : "Returns information about a campaign.",
|
"GetCampaign" : "Returns information about a campaign.",
|
||||||
"GetCampaignActivities" : "Returns information about the activity performed by a campaign.",
|
"GetCampaignActivities" : "Returns information about the activity performed by a campaign.",
|
||||||
"GetCampaignVersion" : "Returns information about a specific version of a campaign.",
|
"GetCampaignVersion" : "Returns information about a specific version of a campaign.",
|
||||||
@ -39,9 +43,12 @@
|
|||||||
"GetSmsChannel" : "Get an SMS channel",
|
"GetSmsChannel" : "Get an SMS channel",
|
||||||
"PutEventStream" : "Use to create or update the event stream for an app.",
|
"PutEventStream" : "Use to create or update the event stream for an app.",
|
||||||
"SendMessages" : "Send a batch of messages",
|
"SendMessages" : "Send a batch of messages",
|
||||||
|
"SendUsersMessages" : "Send a batch of messages to users",
|
||||||
|
"UpdateAdmChannel" : "Update an ADM channel",
|
||||||
"UpdateApnsChannel" : "Use to update the APNs channel for an app.",
|
"UpdateApnsChannel" : "Use to update the APNs channel for an app.",
|
||||||
"UpdateApnsSandboxChannel" : "Update an APNS sandbox channel",
|
"UpdateApnsSandboxChannel" : "Update an APNS sandbox channel",
|
||||||
"UpdateApplicationSettings" : "Used to update the settings for an app.",
|
"UpdateApplicationSettings" : "Used to update the settings for an app.",
|
||||||
|
"UpdateBaiduChannel" : "Update a BAIDU GCM channel",
|
||||||
"UpdateCampaign" : "Use to update a campaign.",
|
"UpdateCampaign" : "Use to update a campaign.",
|
||||||
"UpdateEmailChannel" : "Update an email channel",
|
"UpdateEmailChannel" : "Update an email channel",
|
||||||
"UpdateEndpoint" : "Use to update an endpoint.",
|
"UpdateEndpoint" : "Use to update an endpoint.",
|
||||||
@ -51,6 +58,20 @@
|
|||||||
"UpdateSmsChannel" : "Update an SMS channel"
|
"UpdateSmsChannel" : "Update an SMS channel"
|
||||||
},
|
},
|
||||||
"shapes" : {
|
"shapes" : {
|
||||||
|
"ADMChannelRequest" : {
|
||||||
|
"base" : "Amazon Device Messaging channel definition.",
|
||||||
|
"refs" : { }
|
||||||
|
},
|
||||||
|
"ADMChannelResponse" : {
|
||||||
|
"base" : "Amazon Device Messaging channel definition.",
|
||||||
|
"refs" : { }
|
||||||
|
},
|
||||||
|
"ADMMessage" : {
|
||||||
|
"base" : "ADM Message.",
|
||||||
|
"refs" : {
|
||||||
|
"DirectMessageConfiguration$ADMMessage" : "The message to ADM channels. Overrides the default push notification message."
|
||||||
|
}
|
||||||
|
},
|
||||||
"APNSChannelRequest" : {
|
"APNSChannelRequest" : {
|
||||||
"base" : "Apple Push Notification Service channel definition.",
|
"base" : "Apple Push Notification Service channel definition.",
|
||||||
"refs" : { }
|
"refs" : { }
|
||||||
@ -76,7 +97,9 @@
|
|||||||
"Action" : {
|
"Action" : {
|
||||||
"base" : null,
|
"base" : null,
|
||||||
"refs" : {
|
"refs" : {
|
||||||
|
"ADMMessage$Action" : "The action that occurs if the user taps a push notification delivered by the campaign: OPEN_APP - Your app launches, or it becomes the foreground app if it has been sent to the background. This is the default action. DEEP_LINK - Uses deep linking features in iOS and Android to open your app and display a designated user interface within the app. URL - The default mobile browser on the user's device launches and opens a web page at the URL you specify. Possible values include: OPEN_APP | DEEP_LINK | URL",
|
||||||
"APNSMessage$Action" : "The action that occurs if the user taps a push notification delivered by the campaign: OPEN_APP - Your app launches, or it becomes the foreground app if it has been sent to the background. This is the default action. DEEP_LINK - Uses deep linking features in iOS and Android to open your app and display a designated user interface within the app. URL - The default mobile browser on the user's device launches and opens a web page at the URL you specify. Possible values include: OPEN_APP | DEEP_LINK | URL",
|
"APNSMessage$Action" : "The action that occurs if the user taps a push notification delivered by the campaign: OPEN_APP - Your app launches, or it becomes the foreground app if it has been sent to the background. This is the default action. DEEP_LINK - Uses deep linking features in iOS and Android to open your app and display a designated user interface within the app. URL - The default mobile browser on the user's device launches and opens a web page at the URL you specify. Possible values include: OPEN_APP | DEEP_LINK | URL",
|
||||||
|
"BaiduMessage$Action" : "The action that occurs if the user taps a push notification delivered by the campaign: OPEN_APP - Your app launches, or it becomes the foreground app if it has been sent to the background. This is the default action. DEEP_LINK - Uses deep linking features in iOS and Android to open your app and display a designated user interface within the app. URL - The default mobile browser on the user's device launches and opens a web page at the URL you specify. Possible values include: OPEN_APP | DEEP_LINK | URL",
|
||||||
"DefaultPushNotificationMessage$Action" : "The action that occurs if the user taps a push notification delivered by the campaign: OPEN_APP - Your app launches, or it becomes the foreground app if it has been sent to the background. This is the default action. DEEP_LINK - Uses deep linking features in iOS and Android to open your app and display a designated user interface within the app. URL - The default mobile browser on the user's device launches and opens a web page at the URL you specify. Possible values include: OPEN_APP | DEEP_LINK | URL",
|
"DefaultPushNotificationMessage$Action" : "The action that occurs if the user taps a push notification delivered by the campaign: OPEN_APP - Your app launches, or it becomes the foreground app if it has been sent to the background. This is the default action. DEEP_LINK - Uses deep linking features in iOS and Android to open your app and display a designated user interface within the app. URL - The default mobile browser on the user's device launches and opens a web page at the URL you specify. Possible values include: OPEN_APP | DEEP_LINK | URL",
|
||||||
"GCMMessage$Action" : "The action that occurs if the user taps a push notification delivered by the campaign: OPEN_APP - Your app launches, or it becomes the foreground app if it has been sent to the background. This is the default action. DEEP_LINK - Uses deep linking features in iOS and Android to open your app and display a designated user interface within the app. URL - The default mobile browser on the user's device launches and opens a web page at the URL you specify. Possible values include: OPEN_APP | DEEP_LINK | URL",
|
"GCMMessage$Action" : "The action that occurs if the user taps a push notification delivered by the campaign: OPEN_APP - Your app launches, or it becomes the foreground app if it has been sent to the background. This is the default action. DEEP_LINK - Uses deep linking features in iOS and Android to open your app and display a designated user interface within the app. URL - The default mobile browser on the user's device launches and opens a web page at the URL you specify. Possible values include: OPEN_APP | DEEP_LINK | URL",
|
||||||
"Message$Action" : "The action that occurs if the user taps a push notification delivered by the campaign:\nOPEN_APP - Your app launches, or it becomes the foreground app if it has been sent to the background. This is the default action.\n\nDEEP_LINK - Uses deep linking features in iOS and Android to open your app and display a designated user interface within the app.\n\nURL - The default mobile browser on the user's device launches and opens a web page at the URL you specify."
|
"Message$Action" : "The action that occurs if the user taps a push notification delivered by the campaign:\nOPEN_APP - Your app launches, or it becomes the foreground app if it has been sent to the background. This is the default action.\n\nDEEP_LINK - Uses deep linking features in iOS and Android to open your app and display a designated user interface within the app.\n\nURL - The default mobile browser on the user's device launches and opens a web page at the URL you specify."
|
||||||
@ -129,6 +152,20 @@
|
|||||||
"base" : null,
|
"base" : null,
|
||||||
"refs" : { }
|
"refs" : { }
|
||||||
},
|
},
|
||||||
|
"BaiduChannelRequest" : {
|
||||||
|
"base" : "Baidu Cloud Push credentials",
|
||||||
|
"refs" : { }
|
||||||
|
},
|
||||||
|
"BaiduChannelResponse" : {
|
||||||
|
"base" : "Baidu Cloud Messaging channel definition",
|
||||||
|
"refs" : { }
|
||||||
|
},
|
||||||
|
"BaiduMessage" : {
|
||||||
|
"base" : "Baidu Message.",
|
||||||
|
"refs" : {
|
||||||
|
"DirectMessageConfiguration$BaiduMessage" : "The message to Baidu GCM channels. Overrides the default push notification message."
|
||||||
|
}
|
||||||
|
},
|
||||||
"CampaignEmailMessage" : {
|
"CampaignEmailMessage" : {
|
||||||
"base" : "The email message configuration.",
|
"base" : "The email message configuration.",
|
||||||
"refs" : {
|
"refs" : {
|
||||||
@ -202,6 +239,7 @@
|
|||||||
"DeliveryStatus" : {
|
"DeliveryStatus" : {
|
||||||
"base" : null,
|
"base" : null,
|
||||||
"refs" : {
|
"refs" : {
|
||||||
|
"EndpointMessageResult$DeliveryStatus" : "Delivery status of message.",
|
||||||
"MessageResult$DeliveryStatus" : "Delivery status of message."
|
"MessageResult$DeliveryStatus" : "Delivery status of message."
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -214,7 +252,8 @@
|
|||||||
"DirectMessageConfiguration" : {
|
"DirectMessageConfiguration" : {
|
||||||
"base" : "The message configuration.",
|
"base" : "The message configuration.",
|
||||||
"refs" : {
|
"refs" : {
|
||||||
"MessageRequest$MessageConfiguration" : "Message configuration."
|
"MessageRequest$MessageConfiguration" : "Message configuration.",
|
||||||
|
"SendUsersMessageRequest$MessageConfiguration" : "Message configuration."
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"Duration" : {
|
"Duration" : {
|
||||||
@ -231,6 +270,12 @@
|
|||||||
"base" : "Email Channel Response.",
|
"base" : "Email Channel Response.",
|
||||||
"refs" : { }
|
"refs" : { }
|
||||||
},
|
},
|
||||||
|
"EmailMessage" : {
|
||||||
|
"base" : "Email Message.",
|
||||||
|
"refs" : {
|
||||||
|
"DirectMessageConfiguration$EmailMessage" : "The message to Email channels. Overrides the default message."
|
||||||
|
}
|
||||||
|
},
|
||||||
"EndpointBatchItem" : {
|
"EndpointBatchItem" : {
|
||||||
"base" : "Endpoint update request",
|
"base" : "Endpoint update request",
|
||||||
"refs" : {
|
"refs" : {
|
||||||
@ -257,6 +302,13 @@
|
|||||||
"EndpointResponse$Location" : "The endpoint location attributes."
|
"EndpointResponse$Location" : "The endpoint location attributes."
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"EndpointMessageResult" : {
|
||||||
|
"base" : "The result from sending a message to an endpoint.",
|
||||||
|
"refs" : {
|
||||||
|
"MessageResponse$EndpointResult" : "A map containing a multi part response for each address, with the endpointId as the key and the result as the value.",
|
||||||
|
"SendUsersMessageResponse$Result" : "A map containing of UserId to Map of EndpointId to Endpoint Message Result."
|
||||||
|
}
|
||||||
|
},
|
||||||
"EndpointRequest" : {
|
"EndpointRequest" : {
|
||||||
"base" : "Endpoint update request",
|
"base" : "Endpoint update request",
|
||||||
"refs" : { }
|
"refs" : { }
|
||||||
@ -265,6 +317,13 @@
|
|||||||
"base" : "Endpoint response",
|
"base" : "Endpoint response",
|
||||||
"refs" : { }
|
"refs" : { }
|
||||||
},
|
},
|
||||||
|
"EndpointSendConfiguration" : {
|
||||||
|
"base" : "Endpoint send configuration.",
|
||||||
|
"refs" : {
|
||||||
|
"MessageRequest$Endpoints" : "A map of destination addresses, with the address as the key(Email address, phone number or push token) and the Address Configuration as the value.",
|
||||||
|
"SendUsersMessageRequest$Users" : "A map of destination endpoints, with the EndpointId as the key Endpoint Message Configuration as the value."
|
||||||
|
}
|
||||||
|
},
|
||||||
"EndpointUser" : {
|
"EndpointUser" : {
|
||||||
"base" : "Endpoint user specific custom userAttributes",
|
"base" : "Endpoint user specific custom userAttributes",
|
||||||
"refs" : {
|
"refs" : {
|
||||||
@ -383,10 +442,22 @@
|
|||||||
"base" : null,
|
"base" : null,
|
||||||
"refs" : { }
|
"refs" : { }
|
||||||
},
|
},
|
||||||
|
"MapOfEndpointMessageResult" : {
|
||||||
|
"base" : null,
|
||||||
|
"refs" : { }
|
||||||
|
},
|
||||||
|
"MapOfEndpointSendConfiguration" : {
|
||||||
|
"base" : null,
|
||||||
|
"refs" : { }
|
||||||
|
},
|
||||||
"MapOfListOf__string" : {
|
"MapOfListOf__string" : {
|
||||||
"base" : null,
|
"base" : null,
|
||||||
"refs" : { }
|
"refs" : { }
|
||||||
},
|
},
|
||||||
|
"MapOfMapOfEndpointMessageResult" : {
|
||||||
|
"base" : null,
|
||||||
|
"refs" : { }
|
||||||
|
},
|
||||||
"MapOfMessageResult" : {
|
"MapOfMessageResult" : {
|
||||||
"base" : null,
|
"base" : null,
|
||||||
"refs" : { }
|
"refs" : { }
|
||||||
@ -543,6 +614,14 @@
|
|||||||
"base" : "Segments in your account.",
|
"base" : "Segments in your account.",
|
||||||
"refs" : { }
|
"refs" : { }
|
||||||
},
|
},
|
||||||
|
"SendUsersMessageRequest" : {
|
||||||
|
"base" : "Send message request.",
|
||||||
|
"refs" : { }
|
||||||
|
},
|
||||||
|
"SendUsersMessageResponse" : {
|
||||||
|
"base" : "User send message response.",
|
||||||
|
"refs" : { }
|
||||||
|
},
|
||||||
"SetDimension" : {
|
"SetDimension" : {
|
||||||
"base" : "Dimension specification of a segment.",
|
"base" : "Dimension specification of a segment.",
|
||||||
"refs" : {
|
"refs" : {
|
||||||
@ -590,6 +669,10 @@
|
|||||||
"__boolean" : {
|
"__boolean" : {
|
||||||
"base" : null,
|
"base" : null,
|
||||||
"refs" : {
|
"refs" : {
|
||||||
|
"ADMChannelRequest$Enabled" : "If the channel is enabled for sending messages.",
|
||||||
|
"ADMChannelResponse$Enabled" : "If the channel is enabled for sending messages.",
|
||||||
|
"ADMChannelResponse$IsArchived" : "Is this channel archived",
|
||||||
|
"ADMMessage$SilentPush" : "Indicates if the message should display on the users device. Silent pushes can be used for Remote Configuration and Phone Home use cases.",
|
||||||
"APNSChannelRequest$Enabled" : "If the channel is enabled for sending messages.",
|
"APNSChannelRequest$Enabled" : "If the channel is enabled for sending messages.",
|
||||||
"APNSChannelResponse$Enabled" : "If the channel is enabled for sending messages.",
|
"APNSChannelResponse$Enabled" : "If the channel is enabled for sending messages.",
|
||||||
"APNSChannelResponse$IsArchived" : "Is this channel archived",
|
"APNSChannelResponse$IsArchived" : "Is this channel archived",
|
||||||
@ -597,6 +680,10 @@
|
|||||||
"APNSSandboxChannelRequest$Enabled" : "If the channel is enabled for sending messages.",
|
"APNSSandboxChannelRequest$Enabled" : "If the channel is enabled for sending messages.",
|
||||||
"APNSSandboxChannelResponse$Enabled" : "If the channel is enabled for sending messages.",
|
"APNSSandboxChannelResponse$Enabled" : "If the channel is enabled for sending messages.",
|
||||||
"APNSSandboxChannelResponse$IsArchived" : "Is this channel archived",
|
"APNSSandboxChannelResponse$IsArchived" : "Is this channel archived",
|
||||||
|
"BaiduChannelRequest$Enabled" : "If the channel is enabled for sending messages.",
|
||||||
|
"BaiduChannelResponse$Enabled" : "If the channel is enabled for sending messages.",
|
||||||
|
"BaiduChannelResponse$IsArchived" : "Is this channel archived",
|
||||||
|
"BaiduMessage$SilentPush" : "Indicates if the message should display on the users device. Silent pushes can be used for Remote Configuration and Phone Home use cases.",
|
||||||
"CampaignResponse$IsPaused" : "Indicates whether the campaign is paused. A paused campaign does not send messages unless you resume it by setting IsPaused to false.",
|
"CampaignResponse$IsPaused" : "Indicates whether the campaign is paused. A paused campaign does not send messages unless you resume it by setting IsPaused to false.",
|
||||||
"DefaultPushNotificationMessage$SilentPush" : "Indicates if the message should display on the users device. Silent pushes can be used for Remote Configuration and Phone Home use cases.",
|
"DefaultPushNotificationMessage$SilentPush" : "Indicates if the message should display on the users device. Silent pushes can be used for Remote Configuration and Phone Home use cases.",
|
||||||
"EmailChannelRequest$Enabled" : "If the channel is enabled for sending messages.",
|
"EmailChannelRequest$Enabled" : "If the channel is enabled for sending messages.",
|
||||||
@ -615,7 +702,8 @@
|
|||||||
"SMSChannelResponse$Enabled" : "If the channel is enabled for sending messages.",
|
"SMSChannelResponse$Enabled" : "If the channel is enabled for sending messages.",
|
||||||
"SMSChannelResponse$IsArchived" : "Is this channel archived",
|
"SMSChannelResponse$IsArchived" : "Is this channel archived",
|
||||||
"Schedule$IsLocalTime" : "Indicates whether the campaign schedule takes effect according to each user's local time.",
|
"Schedule$IsLocalTime" : "Indicates whether the campaign schedule takes effect according to each user's local time.",
|
||||||
"WriteCampaignRequest$IsPaused" : "Indicates whether the campaign is paused. A paused campaign does not send messages unless you resume it by setting IsPaused to false."
|
"WriteCampaignRequest$IsPaused" : "Indicates whether the campaign is paused. A paused campaign does not send messages unless you resume it by setting IsPaused to false.",
|
||||||
|
"WriteCampaignRequest$Trace" : "Whether or not to enable trace logging for the campaign. Undocumented"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"__double" : {
|
"__double" : {
|
||||||
@ -628,6 +716,7 @@
|
|||||||
"__integer" : {
|
"__integer" : {
|
||||||
"base" : null,
|
"base" : null,
|
||||||
"refs" : {
|
"refs" : {
|
||||||
|
"ADMChannelResponse$Version" : "Version of channel",
|
||||||
"APNSChannelResponse$Version" : "Version of channel",
|
"APNSChannelResponse$Version" : "Version of channel",
|
||||||
"APNSMessage$Badge" : "Include this key when you want the system to modify the badge of your app icon. If this key is not included in the dictionary, the badge is not changed. To remove the badge, set the value of this key to 0.",
|
"APNSMessage$Badge" : "Include this key when you want the system to modify the badge of your app icon. If this key is not included in the dictionary, the badge is not changed. To remove the badge, set the value of this key to 0.",
|
||||||
"APNSSandboxChannelResponse$Version" : "Version of channel",
|
"APNSSandboxChannelResponse$Version" : "Version of channel",
|
||||||
@ -635,12 +724,16 @@
|
|||||||
"ActivityResponse$TimezonesCompletedCount" : "The total number of timezones completed.",
|
"ActivityResponse$TimezonesCompletedCount" : "The total number of timezones completed.",
|
||||||
"ActivityResponse$TimezonesTotalCount" : "The total number of unique timezones present in the segment.",
|
"ActivityResponse$TimezonesTotalCount" : "The total number of unique timezones present in the segment.",
|
||||||
"ActivityResponse$TotalEndpointCount" : "The total number of endpoints to which the campaign attempts to deliver messages.",
|
"ActivityResponse$TotalEndpointCount" : "The total number of endpoints to which the campaign attempts to deliver messages.",
|
||||||
|
"BaiduChannelResponse$Version" : "Version of channel",
|
||||||
"CampaignLimits$Daily" : "The maximum number of messages that the campaign can send daily.",
|
"CampaignLimits$Daily" : "The maximum number of messages that the campaign can send daily.",
|
||||||
|
"CampaignLimits$MaximumDuration" : "The maximum duration of a campaign from the scheduled start. Must be a minimum of 60 seconds.",
|
||||||
|
"CampaignLimits$MessagesPerSecond" : "The maximum number of messages per second that the campaign will send. This is a best effort maximum cap and can go as high as 20000 and as low as 50",
|
||||||
"CampaignLimits$Total" : "The maximum total number of messages that the campaign can send.",
|
"CampaignLimits$Total" : "The maximum total number of messages that the campaign can send.",
|
||||||
"CampaignResponse$HoldoutPercent" : "The allocated percentage of end users who will not receive messages from this campaign.",
|
"CampaignResponse$HoldoutPercent" : "The allocated percentage of end users who will not receive messages from this campaign.",
|
||||||
"CampaignResponse$SegmentVersion" : "The version of the segment to which the campaign sends messages.",
|
"CampaignResponse$SegmentVersion" : "The version of the segment to which the campaign sends messages.",
|
||||||
"CampaignResponse$Version" : "The campaign version number.",
|
"CampaignResponse$Version" : "The campaign version number.",
|
||||||
"EmailChannelResponse$Version" : "Version of channel",
|
"EmailChannelResponse$Version" : "Version of channel",
|
||||||
|
"EndpointMessageResult$StatusCode" : "Downstream service status code.",
|
||||||
"GCMChannelResponse$Version" : "Version of channel",
|
"GCMChannelResponse$Version" : "Version of channel",
|
||||||
"ImportJobResponse$CompletedPieces" : "The number of pieces that have successfully imported as of the time of the request.",
|
"ImportJobResponse$CompletedPieces" : "The number of pieces that have successfully imported as of the time of the request.",
|
||||||
"ImportJobResponse$FailedPieces" : "The number of pieces that have failed to import as of the time of the request.",
|
"ImportJobResponse$FailedPieces" : "The number of pieces that have failed to import as of the time of the request.",
|
||||||
@ -660,8 +753,34 @@
|
|||||||
"__string" : {
|
"__string" : {
|
||||||
"base" : null,
|
"base" : null,
|
||||||
"refs" : {
|
"refs" : {
|
||||||
|
"ADMChannelRequest$ClientId" : "Client ID as gotten from Amazon",
|
||||||
|
"ADMChannelRequest$ClientSecret" : "Client secret as gotten from Amazon",
|
||||||
|
"ADMChannelResponse$ApplicationId" : "Application id",
|
||||||
|
"ADMChannelResponse$CreationDate" : "When was this segment created",
|
||||||
|
"ADMChannelResponse$Id" : "Channel ID. Not used, only for backwards compatibility.",
|
||||||
|
"ADMChannelResponse$LastModifiedBy" : "Who last updated this entry",
|
||||||
|
"ADMChannelResponse$LastModifiedDate" : "Last date this was updated",
|
||||||
|
"ADMChannelResponse$Platform" : "Platform type. Will be \"ADM\"",
|
||||||
|
"ADMMessage$Body" : "The message body of the notification, the email body or the text message.",
|
||||||
|
"ADMMessage$ConsolidationKey" : "Optional. Arbitrary string used to indicate multiple messages are logically the same and that ADM is allowed to drop previously enqueued messages in favor of this one.",
|
||||||
|
"ADMMessage$ExpiresAfter" : "Optional. Number of seconds ADM should retain the message if the device is offline",
|
||||||
|
"ADMMessage$IconReference" : "The icon image name of the asset saved in your application.",
|
||||||
|
"ADMMessage$ImageIconUrl" : "The URL that points to an image used as the large icon to the notification content view.",
|
||||||
|
"ADMMessage$ImageUrl" : "The URL that points to an image used in the push notification.",
|
||||||
|
"ADMMessage$JsonData" : "The data payload used for a silent push. This payload is added to the notifications' data.pinpoint.jsonBody' object",
|
||||||
|
"ADMMessage$MD5" : "Optional. Base-64-encoded MD5 checksum of the data parameter. Used to verify data integrity",
|
||||||
|
"ADMMessage$RawContent" : "The Raw JSON formatted string to be used as the payload. This value overrides the message.",
|
||||||
|
"ADMMessage$SmallImageIconUrl" : "The URL that points to an image used as the small icon for the notification which will be used to represent the notification in the status bar and content view",
|
||||||
|
"ADMMessage$Sound" : "Indicates a sound to play when the device receives the notification. Supports default, or the filename of a sound resource bundled in the app. Android sound files must reside in /res/raw/",
|
||||||
|
"ADMMessage$Title" : "The message title that displays above the message on the user's device.",
|
||||||
|
"ADMMessage$Url" : "The URL to open in the user's mobile browser. Used if the value for Action is URL.",
|
||||||
|
"APNSChannelRequest$BundleId" : "The bundle id used for APNs Tokens.",
|
||||||
"APNSChannelRequest$Certificate" : "The distribution certificate from Apple.",
|
"APNSChannelRequest$Certificate" : "The distribution certificate from Apple.",
|
||||||
|
"APNSChannelRequest$DefaultAuthenticationMethod" : "The default authentication method used for APNs.",
|
||||||
"APNSChannelRequest$PrivateKey" : "The certificate private key.",
|
"APNSChannelRequest$PrivateKey" : "The certificate private key.",
|
||||||
|
"APNSChannelRequest$TeamId" : "The team id used for APNs Tokens.",
|
||||||
|
"APNSChannelRequest$TokenKey" : "The token key used for APNs Tokens.",
|
||||||
|
"APNSChannelRequest$TokenKeyId" : "The token key used for APNs Tokens.",
|
||||||
"APNSChannelResponse$ApplicationId" : "The ID of the application to which the channel applies.",
|
"APNSChannelResponse$ApplicationId" : "The ID of the application to which the channel applies.",
|
||||||
"APNSChannelResponse$CreationDate" : "When was this segment created",
|
"APNSChannelResponse$CreationDate" : "When was this segment created",
|
||||||
"APNSChannelResponse$Id" : "Channel ID. Not used. Present only for backwards compatibility.",
|
"APNSChannelResponse$Id" : "Channel ID. Not used. Present only for backwards compatibility.",
|
||||||
@ -670,14 +789,21 @@
|
|||||||
"APNSChannelResponse$Platform" : "The platform type. Will be APNS.",
|
"APNSChannelResponse$Platform" : "The platform type. Will be APNS.",
|
||||||
"APNSMessage$Body" : "The message body of the notification, the email body or the text message.",
|
"APNSMessage$Body" : "The message body of the notification, the email body or the text message.",
|
||||||
"APNSMessage$Category" : "Provide this key with a string value that represents the notification's type. This value corresponds to the value in the identifier property of one of your app's registered categories.",
|
"APNSMessage$Category" : "Provide this key with a string value that represents the notification's type. This value corresponds to the value in the identifier property of one of your app's registered categories.",
|
||||||
|
"APNSMessage$JsonData" : "The data payload used for a silent push. This payload is added to the notifications' data.pinpoint.jsonBody' object",
|
||||||
"APNSMessage$MediaUrl" : "The URL that points to a video used in the push notification.",
|
"APNSMessage$MediaUrl" : "The URL that points to a video used in the push notification.",
|
||||||
|
"APNSMessage$PreferredAuthenticationMethod" : "The preferred authentication method, either \"CERTIFICATE\" or \"TOKEN\"",
|
||||||
"APNSMessage$RawContent" : "The Raw JSON formatted string to be used as the payload. This value overrides the message.",
|
"APNSMessage$RawContent" : "The Raw JSON formatted string to be used as the payload. This value overrides the message.",
|
||||||
"APNSMessage$Sound" : "Include this key when you want the system to play a sound. The value of this key is the name of a sound file in your app's main bundle or in the Library/Sounds folder of your app's data container. If the sound file cannot be found, or if you specify defaultfor the value, the system plays the default alert sound.",
|
"APNSMessage$Sound" : "Include this key when you want the system to play a sound. The value of this key is the name of a sound file in your app's main bundle or in the Library/Sounds folder of your app's data container. If the sound file cannot be found, or if you specify defaultfor the value, the system plays the default alert sound.",
|
||||||
"APNSMessage$ThreadId" : "Provide this key with a string value that represents the app-specific identifier for grouping notifications. If you provide a Notification Content app extension, you can use this value to group your notifications together.",
|
"APNSMessage$ThreadId" : "Provide this key with a string value that represents the app-specific identifier for grouping notifications. If you provide a Notification Content app extension, you can use this value to group your notifications together.",
|
||||||
"APNSMessage$Title" : "The message title that displays above the message on the user's device.",
|
"APNSMessage$Title" : "The message title that displays above the message on the user's device.",
|
||||||
"APNSMessage$Url" : "The URL to open in the user's mobile browser. Used if the value for Action is URL.",
|
"APNSMessage$Url" : "The URL to open in the user's mobile browser. Used if the value for Action is URL.",
|
||||||
|
"APNSSandboxChannelRequest$BundleId" : "The bundle id used for APNs Tokens.",
|
||||||
"APNSSandboxChannelRequest$Certificate" : "The distribution certificate from Apple.",
|
"APNSSandboxChannelRequest$Certificate" : "The distribution certificate from Apple.",
|
||||||
|
"APNSSandboxChannelRequest$DefaultAuthenticationMethod" : "The default authentication method used for APNs.",
|
||||||
"APNSSandboxChannelRequest$PrivateKey" : "The certificate private key.",
|
"APNSSandboxChannelRequest$PrivateKey" : "The certificate private key.",
|
||||||
|
"APNSSandboxChannelRequest$TeamId" : "The team id used for APNs Tokens.",
|
||||||
|
"APNSSandboxChannelRequest$TokenKey" : "The token key used for APNs Tokens.",
|
||||||
|
"APNSSandboxChannelRequest$TokenKeyId" : "The token key used for APNs Tokens.",
|
||||||
"APNSSandboxChannelResponse$ApplicationId" : "Application id",
|
"APNSSandboxChannelResponse$ApplicationId" : "Application id",
|
||||||
"APNSSandboxChannelResponse$CreationDate" : "When was this segment created",
|
"APNSSandboxChannelResponse$CreationDate" : "When was this segment created",
|
||||||
"APNSSandboxChannelResponse$Id" : "Channel ID. Not used, only for backwards compatibility.",
|
"APNSSandboxChannelResponse$Id" : "Channel ID. Not used, only for backwards compatibility.",
|
||||||
@ -701,6 +827,25 @@
|
|||||||
"ApplicationSettingsResource$ApplicationId" : "The unique ID for the application.",
|
"ApplicationSettingsResource$ApplicationId" : "The unique ID for the application.",
|
||||||
"ApplicationSettingsResource$LastModifiedDate" : "The date that the settings were last updated in ISO 8601 format.",
|
"ApplicationSettingsResource$LastModifiedDate" : "The date that the settings were last updated in ISO 8601 format.",
|
||||||
"ApplicationsResponse$NextToken" : "The string that you use in a subsequent request to get the next page of results in a paginated response.",
|
"ApplicationsResponse$NextToken" : "The string that you use in a subsequent request to get the next page of results in a paginated response.",
|
||||||
|
"BaiduChannelRequest$ApiKey" : "Platform credential API key from Baidu.",
|
||||||
|
"BaiduChannelRequest$SecretKey" : "Platform credential Secret key from Baidu.",
|
||||||
|
"BaiduChannelResponse$ApplicationId" : "Application id",
|
||||||
|
"BaiduChannelResponse$CreationDate" : "When was this segment created",
|
||||||
|
"BaiduChannelResponse$Credential" : "The Baidu API key from Baidu.",
|
||||||
|
"BaiduChannelResponse$Id" : "Channel ID. Not used, only for backwards compatibility.",
|
||||||
|
"BaiduChannelResponse$LastModifiedBy" : "Who made the last change",
|
||||||
|
"BaiduChannelResponse$LastModifiedDate" : "Last date this was updated",
|
||||||
|
"BaiduChannelResponse$Platform" : "The platform type. Will be BAIDU",
|
||||||
|
"BaiduMessage$Body" : "The message body of the notification, the email body or the text message.",
|
||||||
|
"BaiduMessage$IconReference" : "The icon image name of the asset saved in your application.",
|
||||||
|
"BaiduMessage$ImageIconUrl" : "The URL that points to an image used as the large icon to the notification content view.",
|
||||||
|
"BaiduMessage$ImageUrl" : "The URL that points to an image used in the push notification.",
|
||||||
|
"BaiduMessage$JsonData" : "The data payload used for a silent push. This payload is added to the notifications' data.pinpoint.jsonBody' object",
|
||||||
|
"BaiduMessage$RawContent" : "The Raw JSON formatted string to be used as the payload. This value overrides the message.",
|
||||||
|
"BaiduMessage$SmallImageIconUrl" : "The URL that points to an image used as the small icon for the notification which will be used to represent the notification in the status bar and content view",
|
||||||
|
"BaiduMessage$Sound" : "Indicates a sound to play when the device receives the notification. Supports default, or the filename of a sound resource bundled in the app. Android sound files must reside in /res/raw/",
|
||||||
|
"BaiduMessage$Title" : "The message title that displays above the message on the user's device.",
|
||||||
|
"BaiduMessage$Url" : "The URL to open in the user's mobile browser. Used if the value for Action is URL.",
|
||||||
"CampaignEmailMessage$Body" : "The email text body.",
|
"CampaignEmailMessage$Body" : "The email text body.",
|
||||||
"CampaignEmailMessage$FromAddress" : "The email address used to send the email from. Defaults to use FromAddress specified in the Email Channel.",
|
"CampaignEmailMessage$FromAddress" : "The email address used to send the email from. Defaults to use FromAddress specified in the Email Channel.",
|
||||||
"CampaignEmailMessage$HtmlBody" : "The email html body.",
|
"CampaignEmailMessage$HtmlBody" : "The email html body.",
|
||||||
@ -720,6 +865,7 @@
|
|||||||
"CreateApplicationRequest$Name" : "The display name of the application. Used in the Amazon Pinpoint console.",
|
"CreateApplicationRequest$Name" : "The display name of the application. Used in the Amazon Pinpoint console.",
|
||||||
"DefaultMessage$Body" : "The message body of the notification, the email body or the text message.",
|
"DefaultMessage$Body" : "The message body of the notification, the email body or the text message.",
|
||||||
"DefaultPushNotificationMessage$Body" : "The message body of the notification, the email body or the text message.",
|
"DefaultPushNotificationMessage$Body" : "The message body of the notification, the email body or the text message.",
|
||||||
|
"DefaultPushNotificationMessage$JsonData" : "The data payload used for a silent push. This payload is added to the notifications' data.pinpoint.jsonBody' object",
|
||||||
"DefaultPushNotificationMessage$Title" : "The message title that displays above the message on the user's device.",
|
"DefaultPushNotificationMessage$Title" : "The message title that displays above the message on the user's device.",
|
||||||
"DefaultPushNotificationMessage$Url" : "The URL to open in the user's mobile browser. Used if the value for Action is URL.",
|
"DefaultPushNotificationMessage$Url" : "The URL to open in the user's mobile browser. Used if the value for Action is URL.",
|
||||||
"EmailChannelRequest$FromAddress" : "The email address used to send emails from.",
|
"EmailChannelRequest$FromAddress" : "The email address used to send emails from.",
|
||||||
@ -734,6 +880,11 @@
|
|||||||
"EmailChannelResponse$LastModifiedDate" : "Last date this was updated",
|
"EmailChannelResponse$LastModifiedDate" : "Last date this was updated",
|
||||||
"EmailChannelResponse$Platform" : "Platform type. Will be \"EMAIL\"",
|
"EmailChannelResponse$Platform" : "Platform type. Will be \"EMAIL\"",
|
||||||
"EmailChannelResponse$RoleArn" : "The ARN of an IAM Role used to submit events to Mobile Analytics' event ingestion service",
|
"EmailChannelResponse$RoleArn" : "The ARN of an IAM Role used to submit events to Mobile Analytics' event ingestion service",
|
||||||
|
"EmailMessage$Body" : "The message body of the notification, the email body or the text message.",
|
||||||
|
"EmailMessage$FromAddress" : "The email address used to send the email from. Defaults to use FromAddress specified in the Email Channel.",
|
||||||
|
"EmailMessage$HtmlBody" : "The HTML part of the email.",
|
||||||
|
"EmailMessage$TemplateArn" : "The ARN of the template to use for the email.",
|
||||||
|
"EmailMessage$Title" : "The subject of the email.",
|
||||||
"EndpointBatchItem$Address" : "The address or token of the endpoint as provided by your push provider (e.g. DeviceToken or RegistrationId).",
|
"EndpointBatchItem$Address" : "The address or token of the endpoint as provided by your push provider (e.g. DeviceToken or RegistrationId).",
|
||||||
"EndpointBatchItem$EffectiveDate" : "The last time the endpoint was updated. Provided in ISO 8601 format.",
|
"EndpointBatchItem$EffectiveDate" : "The last time the endpoint was updated. Provided in ISO 8601 format.",
|
||||||
"EndpointBatchItem$EndpointStatus" : "The endpoint status. Can be either ACTIVE or INACTIVE. Will be set to INACTIVE if a delivery fails. Will be set to ACTIVE if the address is updated.",
|
"EndpointBatchItem$EndpointStatus" : "The endpoint status. Can be either ACTIVE or INACTIVE. Will be set to INACTIVE if a delivery fails. Will be set to ACTIVE if the address is updated.",
|
||||||
@ -752,6 +903,9 @@
|
|||||||
"EndpointLocation$Country" : "Country according to ISO 3166-1 Alpha-2 codes. For example, US.",
|
"EndpointLocation$Country" : "Country according to ISO 3166-1 Alpha-2 codes. For example, US.",
|
||||||
"EndpointLocation$PostalCode" : "The postal code or zip code of the endpoint.",
|
"EndpointLocation$PostalCode" : "The postal code or zip code of the endpoint.",
|
||||||
"EndpointLocation$Region" : "The region of the endpoint location. For example, corresponds to a state in US.",
|
"EndpointLocation$Region" : "The region of the endpoint location. For example, corresponds to a state in US.",
|
||||||
|
"EndpointMessageResult$Address" : "Address that endpoint message was delivered to.",
|
||||||
|
"EndpointMessageResult$StatusMessage" : "Status message for message delivery.",
|
||||||
|
"EndpointMessageResult$UpdatedToken" : "If token was updated as part of delivery. (This is GCM Specific)",
|
||||||
"EndpointRequest$Address" : "The address or token of the endpoint as provided by your push provider (e.g. DeviceToken or RegistrationId).",
|
"EndpointRequest$Address" : "The address or token of the endpoint as provided by your push provider (e.g. DeviceToken or RegistrationId).",
|
||||||
"EndpointRequest$EffectiveDate" : "The last time the endpoint was updated. Provided in ISO 8601 format.",
|
"EndpointRequest$EffectiveDate" : "The last time the endpoint was updated. Provided in ISO 8601 format.",
|
||||||
"EndpointRequest$EndpointStatus" : "The endpoint status. Can be either ACTIVE or INACTIVE. Will be set to INACTIVE if a delivery fails. Will be set to ACTIVE if the address is updated.",
|
"EndpointRequest$EndpointStatus" : "The endpoint status. Can be either ACTIVE or INACTIVE. Will be set to INACTIVE if a delivery fails. Will be set to ACTIVE if the address is updated.",
|
||||||
@ -766,6 +920,10 @@
|
|||||||
"EndpointResponse$Id" : "The unique ID that you assigned to the endpoint. The ID should be a globally unique identifier (GUID) to ensure that it is unique compared to all other endpoints for the application.",
|
"EndpointResponse$Id" : "The unique ID that you assigned to the endpoint. The ID should be a globally unique identifier (GUID) to ensure that it is unique compared to all other endpoints for the application.",
|
||||||
"EndpointResponse$OptOut" : "Indicates whether a user has opted out of receiving messages with one of the following values:\n\nALL - User has opted out of all messages.\n\nNONE - Users has not opted out and receives all messages.",
|
"EndpointResponse$OptOut" : "Indicates whether a user has opted out of receiving messages with one of the following values:\n\nALL - User has opted out of all messages.\n\nNONE - Users has not opted out and receives all messages.",
|
||||||
"EndpointResponse$RequestId" : "The unique ID for the most recent request to update the endpoint.",
|
"EndpointResponse$RequestId" : "The unique ID for the most recent request to update the endpoint.",
|
||||||
|
"EndpointResponse$ShardId" : "The ShardId of endpoint.",
|
||||||
|
"EndpointSendConfiguration$BodyOverride" : "Body override. If specified will override default body.",
|
||||||
|
"EndpointSendConfiguration$RawContent" : "The Raw JSON formatted string to be used as the payload. This value overrides the message.",
|
||||||
|
"EndpointSendConfiguration$TitleOverride" : "Title override. If specified will override default title if applicable.",
|
||||||
"EndpointUser$UserId" : "The unique ID of the user.",
|
"EndpointUser$UserId" : "The unique ID of the user.",
|
||||||
"EventStream$ApplicationId" : "The ID of the application from which events should be published.",
|
"EventStream$ApplicationId" : "The ID of the application from which events should be published.",
|
||||||
"EventStream$DestinationStreamArn" : "The Amazon Resource Name (ARN) of the Amazon Kinesis stream or Firehose delivery stream to which you want to publish events.\n Firehose ARN: arn:aws:firehose:REGION:ACCOUNT_ID:deliverystream/STREAM_NAME\n Kinesis ARN: arn:aws:kinesis:REGION:ACCOUNT_ID:stream/STREAM_NAME",
|
"EventStream$DestinationStreamArn" : "The Amazon Resource Name (ARN) of the Amazon Kinesis stream or Firehose delivery stream to which you want to publish events.\n Firehose ARN: arn:aws:firehose:REGION:ACCOUNT_ID:deliverystream/STREAM_NAME\n Kinesis ARN: arn:aws:kinesis:REGION:ACCOUNT_ID:stream/STREAM_NAME",
|
||||||
@ -786,6 +944,7 @@
|
|||||||
"GCMMessage$IconReference" : "The icon image name of the asset saved in your application.",
|
"GCMMessage$IconReference" : "The icon image name of the asset saved in your application.",
|
||||||
"GCMMessage$ImageIconUrl" : "The URL that points to an image used as the large icon to the notification content view.",
|
"GCMMessage$ImageIconUrl" : "The URL that points to an image used as the large icon to the notification content view.",
|
||||||
"GCMMessage$ImageUrl" : "The URL that points to an image used in the push notification.",
|
"GCMMessage$ImageUrl" : "The URL that points to an image used in the push notification.",
|
||||||
|
"GCMMessage$JsonData" : "The data payload used for a silent push. This payload is added to the notifications' data.pinpoint.jsonBody' object",
|
||||||
"GCMMessage$RawContent" : "The Raw JSON formatted string to be used as the payload. This value overrides the message.",
|
"GCMMessage$RawContent" : "The Raw JSON formatted string to be used as the payload. This value overrides the message.",
|
||||||
"GCMMessage$RestrictedPackageName" : "This parameter specifies the package name of the application where the registration tokens must match in order to receive the message.",
|
"GCMMessage$RestrictedPackageName" : "This parameter specifies the package name of the application where the registration tokens must match in order to receive the message.",
|
||||||
"GCMMessage$SmallImageIconUrl" : "The URL that points to an image used as the small icon for the notification which will be used to represent the notification in the status bar and content view",
|
"GCMMessage$SmallImageIconUrl" : "The URL that points to an image used as the small icon for the notification which will be used to represent the notification in the status bar and content view",
|
||||||
@ -819,6 +978,7 @@
|
|||||||
"Message$Url" : "The URL to open in the user's mobile browser. Used if the value for Action is URL.",
|
"Message$Url" : "The URL to open in the user's mobile browser. Used if the value for Action is URL.",
|
||||||
"MessageBody$Message" : "The error message returned from the API.",
|
"MessageBody$Message" : "The error message returned from the API.",
|
||||||
"MessageBody$RequestID" : "The unique message body ID.",
|
"MessageBody$RequestID" : "The unique message body ID.",
|
||||||
|
"MessageRequest$RequestId" : "Original request Id for which this message is delivered.",
|
||||||
"MessageResponse$ApplicationId" : "Application id of the message.",
|
"MessageResponse$ApplicationId" : "Application id of the message.",
|
||||||
"MessageResponse$RequestId" : "Original request Id for which this message was delivered.",
|
"MessageResponse$RequestId" : "Original request Id for which this message was delivered.",
|
||||||
"MessageResult$StatusMessage" : "Status message for message delivery.",
|
"MessageResult$StatusMessage" : "Status message for message delivery.",
|
||||||
@ -826,6 +986,7 @@
|
|||||||
"QuietTime$End" : "The default end time for quiet time in ISO 8601 format.",
|
"QuietTime$End" : "The default end time for quiet time in ISO 8601 format.",
|
||||||
"QuietTime$Start" : "The default start time for quiet time in ISO 8601 format.",
|
"QuietTime$Start" : "The default start time for quiet time in ISO 8601 format.",
|
||||||
"SMSChannelRequest$SenderId" : "Sender identifier of your messages.",
|
"SMSChannelRequest$SenderId" : "Sender identifier of your messages.",
|
||||||
|
"SMSChannelRequest$ShortCode" : "ShortCode registered with phone provider.",
|
||||||
"SMSChannelResponse$ApplicationId" : "The unique ID of the application to which the SMS channel belongs.",
|
"SMSChannelResponse$ApplicationId" : "The unique ID of the application to which the SMS channel belongs.",
|
||||||
"SMSChannelResponse$CreationDate" : "The date that the settings were last updated in ISO 8601 format.",
|
"SMSChannelResponse$CreationDate" : "The date that the settings were last updated in ISO 8601 format.",
|
||||||
"SMSChannelResponse$Id" : "Channel ID. Not used, only for backwards compatibility.",
|
"SMSChannelResponse$Id" : "Channel ID. Not used, only for backwards compatibility.",
|
||||||
@ -848,6 +1009,9 @@
|
|||||||
"SegmentResponse$LastModifiedDate" : "The date the segment was last updated in ISO 8601 format.",
|
"SegmentResponse$LastModifiedDate" : "The date the segment was last updated in ISO 8601 format.",
|
||||||
"SegmentResponse$Name" : "The name of segment",
|
"SegmentResponse$Name" : "The name of segment",
|
||||||
"SegmentsResponse$NextToken" : "An identifier used to retrieve the next page of results. The token is null if no additional pages exist.",
|
"SegmentsResponse$NextToken" : "An identifier used to retrieve the next page of results. The token is null if no additional pages exist.",
|
||||||
|
"SendUsersMessageRequest$RequestId" : "Original request Id for which this message is delivered.",
|
||||||
|
"SendUsersMessageResponse$ApplicationId" : "Application id of the message.",
|
||||||
|
"SendUsersMessageResponse$RequestId" : "Original request Id for which this message was delivered.",
|
||||||
"TreatmentResource$Id" : "The unique treatment ID.",
|
"TreatmentResource$Id" : "The unique treatment ID.",
|
||||||
"TreatmentResource$TreatmentDescription" : "A custom description for the treatment.",
|
"TreatmentResource$TreatmentDescription" : "A custom description for the treatment.",
|
||||||
"TreatmentResource$TreatmentName" : "The custom name of a variation of the campaign used for A/B testing.",
|
"TreatmentResource$TreatmentName" : "The custom name of a variation of the campaign used for A/B testing.",
|
||||||
@ -857,14 +1021,11 @@
|
|||||||
"WriteCampaignRequest$TreatmentDescription" : "A custom description for the treatment.",
|
"WriteCampaignRequest$TreatmentDescription" : "A custom description for the treatment.",
|
||||||
"WriteCampaignRequest$TreatmentName" : "The custom name of a variation of the campaign used for A/B testing.",
|
"WriteCampaignRequest$TreatmentName" : "The custom name of a variation of the campaign used for A/B testing.",
|
||||||
"WriteEventStream$DestinationStreamArn" : "The Amazon Resource Name (ARN) of the Amazon Kinesis stream or Firehose delivery stream to which you want to publish events.\n Firehose ARN: arn:aws:firehose:REGION:ACCOUNT_ID:deliverystream/STREAM_NAME\n Kinesis ARN: arn:aws:kinesis:REGION:ACCOUNT_ID:stream/STREAM_NAME",
|
"WriteEventStream$DestinationStreamArn" : "The Amazon Resource Name (ARN) of the Amazon Kinesis stream or Firehose delivery stream to which you want to publish events.\n Firehose ARN: arn:aws:firehose:REGION:ACCOUNT_ID:deliverystream/STREAM_NAME\n Kinesis ARN: arn:aws:kinesis:REGION:ACCOUNT_ID:stream/STREAM_NAME",
|
||||||
|
"WriteEventStream$ExternalId" : "The external ID assigned the IAM role that authorizes Amazon Pinpoint to publish to the stream.",
|
||||||
"WriteEventStream$RoleArn" : "The IAM role that authorizes Amazon Pinpoint to publish events to the stream in your account.",
|
"WriteEventStream$RoleArn" : "The IAM role that authorizes Amazon Pinpoint to publish events to the stream in your account.",
|
||||||
"WriteSegmentRequest$Name" : "The name of segment",
|
"WriteSegmentRequest$Name" : "The name of segment",
|
||||||
"WriteTreatmentResource$TreatmentDescription" : "A custom description for the treatment.",
|
"WriteTreatmentResource$TreatmentDescription" : "A custom description for the treatment.",
|
||||||
"WriteTreatmentResource$TreatmentName" : "The custom name of a variation of the campaign used for A/B testing.",
|
"WriteTreatmentResource$TreatmentName" : "The custom name of a variation of the campaign used for A/B testing."
|
||||||
"PutEventStreamRequest$ApplicationId": "Application Id.",
|
|
||||||
"PutEventStreamRequest$WriteEventStream": "Write event stream wrapper.",
|
|
||||||
"GetEventStreamRequest$ApplicationId": "Application Id.",
|
|
||||||
"DeleteEventStreamRequest$ApplicationId": "Application Id."
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
22
vendor/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/api-2.json
generated
vendored
22
vendor/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/api-2.json
generated
vendored
@ -1983,7 +1983,9 @@
|
|||||||
"DomainIAMRoleName":{"shape":"String"},
|
"DomainIAMRoleName":{"shape":"String"},
|
||||||
"PromotionTier":{"shape":"IntegerOptional"},
|
"PromotionTier":{"shape":"IntegerOptional"},
|
||||||
"Timezone":{"shape":"String"},
|
"Timezone":{"shape":"String"},
|
||||||
"EnableIAMDatabaseAuthentication":{"shape":"BooleanOptional"}
|
"EnableIAMDatabaseAuthentication":{"shape":"BooleanOptional"},
|
||||||
|
"EnablePerformanceInsights":{"shape":"BooleanOptional"},
|
||||||
|
"PerformanceInsightsKMSKeyId":{"shape":"String"}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"CreateDBInstanceReadReplicaMessage":{
|
"CreateDBInstanceReadReplicaMessage":{
|
||||||
@ -2010,7 +2012,9 @@
|
|||||||
"MonitoringRoleArn":{"shape":"String"},
|
"MonitoringRoleArn":{"shape":"String"},
|
||||||
"KmsKeyId":{"shape":"String"},
|
"KmsKeyId":{"shape":"String"},
|
||||||
"PreSignedUrl":{"shape":"String"},
|
"PreSignedUrl":{"shape":"String"},
|
||||||
"EnableIAMDatabaseAuthentication":{"shape":"BooleanOptional"}
|
"EnableIAMDatabaseAuthentication":{"shape":"BooleanOptional"},
|
||||||
|
"EnablePerformanceInsights":{"shape":"BooleanOptional"},
|
||||||
|
"PerformanceInsightsKMSKeyId":{"shape":"String"}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"CreateDBInstanceReadReplicaResult":{
|
"CreateDBInstanceReadReplicaResult":{
|
||||||
@ -2522,7 +2526,9 @@
|
|||||||
"PromotionTier":{"shape":"IntegerOptional"},
|
"PromotionTier":{"shape":"IntegerOptional"},
|
||||||
"DBInstanceArn":{"shape":"String"},
|
"DBInstanceArn":{"shape":"String"},
|
||||||
"Timezone":{"shape":"String"},
|
"Timezone":{"shape":"String"},
|
||||||
"IAMDatabaseAuthenticationEnabled":{"shape":"Boolean"}
|
"IAMDatabaseAuthenticationEnabled":{"shape":"Boolean"},
|
||||||
|
"PerformanceInsightsEnabled":{"shape":"BooleanOptional"},
|
||||||
|
"PerformanceInsightsKMSKeyId":{"shape":"String"}
|
||||||
},
|
},
|
||||||
"wrapper":true
|
"wrapper":true
|
||||||
},
|
},
|
||||||
@ -3952,7 +3958,9 @@
|
|||||||
"MonitoringRoleArn":{"shape":"String"},
|
"MonitoringRoleArn":{"shape":"String"},
|
||||||
"DomainIAMRoleName":{"shape":"String"},
|
"DomainIAMRoleName":{"shape":"String"},
|
||||||
"PromotionTier":{"shape":"IntegerOptional"},
|
"PromotionTier":{"shape":"IntegerOptional"},
|
||||||
"EnableIAMDatabaseAuthentication":{"shape":"BooleanOptional"}
|
"EnableIAMDatabaseAuthentication":{"shape":"BooleanOptional"},
|
||||||
|
"EnablePerformanceInsights":{"shape":"BooleanOptional"},
|
||||||
|
"PerformanceInsightsKMSKeyId":{"shape":"String"}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"ModifyDBInstanceResult":{
|
"ModifyDBInstanceResult":{
|
||||||
@ -3996,7 +4004,8 @@
|
|||||||
"required":["DBSnapshotIdentifier"],
|
"required":["DBSnapshotIdentifier"],
|
||||||
"members":{
|
"members":{
|
||||||
"DBSnapshotIdentifier":{"shape":"String"},
|
"DBSnapshotIdentifier":{"shape":"String"},
|
||||||
"EngineVersion":{"shape":"String"}
|
"EngineVersion":{"shape":"String"},
|
||||||
|
"OptionGroupName":{"shape":"String"}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"ModifyDBSnapshotResult":{
|
"ModifyDBSnapshotResult":{
|
||||||
@ -4299,7 +4308,8 @@
|
|||||||
"StorageType":{"shape":"String"},
|
"StorageType":{"shape":"String"},
|
||||||
"SupportsIops":{"shape":"Boolean"},
|
"SupportsIops":{"shape":"Boolean"},
|
||||||
"SupportsEnhancedMonitoring":{"shape":"Boolean"},
|
"SupportsEnhancedMonitoring":{"shape":"Boolean"},
|
||||||
"SupportsIAMDatabaseAuthentication":{"shape":"Boolean"}
|
"SupportsIAMDatabaseAuthentication":{"shape":"Boolean"},
|
||||||
|
"SupportsPerformanceInsights":{"shape":"Boolean"}
|
||||||
},
|
},
|
||||||
"wrapper":true
|
"wrapper":true
|
||||||
},
|
},
|
||||||
|
28
vendor/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/docs-2.json
generated
vendored
28
vendor/github.com/aws/aws-sdk-go/models/apis/rds/2014-10-31/docs-2.json
generated
vendored
File diff suppressed because one or more lines are too long
7
vendor/github.com/aws/aws-sdk-go/models/apis/redshift/2012-12-01/api-2.json
generated
vendored
7
vendor/github.com/aws/aws-sdk-go/models/apis/redshift/2012-12-01/api-2.json
generated
vendored
@ -545,7 +545,8 @@
|
|||||||
"resultWrapper":"DescribeEventSubscriptionsResult"
|
"resultWrapper":"DescribeEventSubscriptionsResult"
|
||||||
},
|
},
|
||||||
"errors":[
|
"errors":[
|
||||||
{"shape":"SubscriptionNotFoundFault"}
|
{"shape":"SubscriptionNotFoundFault"},
|
||||||
|
{"shape":"InvalidTagFault"}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"DescribeEvents":{
|
"DescribeEvents":{
|
||||||
@ -2097,7 +2098,9 @@
|
|||||||
"members":{
|
"members":{
|
||||||
"SubscriptionName":{"shape":"String"},
|
"SubscriptionName":{"shape":"String"},
|
||||||
"MaxRecords":{"shape":"IntegerOptional"},
|
"MaxRecords":{"shape":"IntegerOptional"},
|
||||||
"Marker":{"shape":"String"}
|
"Marker":{"shape":"String"},
|
||||||
|
"TagKeys":{"shape":"TagKeyList"},
|
||||||
|
"TagValues":{"shape":"TagValueList"}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"DescribeEventsMessage":{
|
"DescribeEventsMessage":{
|
||||||
|
26
vendor/github.com/aws/aws-sdk-go/models/apis/redshift/2012-12-01/docs-2.json
generated
vendored
26
vendor/github.com/aws/aws-sdk-go/models/apis/redshift/2012-12-01/docs-2.json
generated
vendored
@ -34,7 +34,7 @@
|
|||||||
"DescribeClusters": "<p>Returns properties of provisioned clusters including general cluster properties, cluster database properties, maintenance and backup properties, and security and access properties. This operation supports pagination. For more information about managing clusters, go to <a href=\"http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html\">Amazon Redshift Clusters</a> in the <i>Amazon Redshift Cluster Management Guide</i>.</p> <p>If you specify both tag keys and tag values in the same request, Amazon Redshift returns all clusters that match any combination of the specified keys and values. For example, if you have <code>owner</code> and <code>environment</code> for tag keys, and <code>admin</code> and <code>test</code> for tag values, all clusters that have any combination of those values are returned.</p> <p>If both tag keys and values are omitted from the request, clusters are returned regardless of whether they have tag keys or values associated with them.</p>",
|
"DescribeClusters": "<p>Returns properties of provisioned clusters including general cluster properties, cluster database properties, maintenance and backup properties, and security and access properties. This operation supports pagination. For more information about managing clusters, go to <a href=\"http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html\">Amazon Redshift Clusters</a> in the <i>Amazon Redshift Cluster Management Guide</i>.</p> <p>If you specify both tag keys and tag values in the same request, Amazon Redshift returns all clusters that match any combination of the specified keys and values. For example, if you have <code>owner</code> and <code>environment</code> for tag keys, and <code>admin</code> and <code>test</code> for tag values, all clusters that have any combination of those values are returned.</p> <p>If both tag keys and values are omitted from the request, clusters are returned regardless of whether they have tag keys or values associated with them.</p>",
|
||||||
"DescribeDefaultClusterParameters": "<p>Returns a list of parameter settings for the specified parameter group family.</p> <p> For more information about parameters and parameter groups, go to <a href=\"http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html\">Amazon Redshift Parameter Groups</a> in the <i>Amazon Redshift Cluster Management Guide</i>.</p>",
|
"DescribeDefaultClusterParameters": "<p>Returns a list of parameter settings for the specified parameter group family.</p> <p> For more information about parameters and parameter groups, go to <a href=\"http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html\">Amazon Redshift Parameter Groups</a> in the <i>Amazon Redshift Cluster Management Guide</i>.</p>",
|
||||||
"DescribeEventCategories": "<p>Displays a list of event categories for all event source types, or for a specified source type. For a list of the event categories and source types, go to <a href=\"http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-event-notifications.html\">Amazon Redshift Event Notifications</a>.</p>",
|
"DescribeEventCategories": "<p>Displays a list of event categories for all event source types, or for a specified source type. For a list of the event categories and source types, go to <a href=\"http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-event-notifications.html\">Amazon Redshift Event Notifications</a>.</p>",
|
||||||
"DescribeEventSubscriptions": "<p>Lists descriptions of all the Amazon Redshift event notifications subscription for a customer account. If you specify a subscription name, lists the description for that subscription.</p>",
|
"DescribeEventSubscriptions": "<p>Lists descriptions of all the Amazon Redshift event notification subscriptions for a customer account. If you specify a subscription name, lists the description for that subscription.</p> <p>If you specify both tag keys and tag values in the same request, Amazon Redshift returns all event notification subscriptions that match any combination of the specified keys and values. For example, if you have <code>owner</code> and <code>environment</code> for tag keys, and <code>admin</code> and <code>test</code> for tag values, all subscriptions that have any combination of those values are returned.</p> <p>If both tag keys and values are omitted from the request, subscriptions are returned regardless of whether they have tag keys or values associated with them.</p>",
|
||||||
"DescribeEvents": "<p>Returns events related to clusters, security groups, snapshots, and parameter groups for the past 14 days. Events specific to a particular cluster, security group, snapshot or parameter group can be obtained by providing the name as a parameter. By default, the past hour of events are returned.</p>",
|
"DescribeEvents": "<p>Returns events related to clusters, security groups, snapshots, and parameter groups for the past 14 days. Events specific to a particular cluster, security group, snapshot or parameter group can be obtained by providing the name as a parameter. By default, the past hour of events are returned.</p>",
|
||||||
"DescribeHsmClientCertificates": "<p>Returns information about the specified HSM client certificate. If no certificate ID is specified, returns information about all the HSM certificates owned by your AWS customer account.</p> <p>If you specify both tag keys and tag values in the same request, Amazon Redshift returns all HSM client certificates that match any combination of the specified keys and values. For example, if you have <code>owner</code> and <code>environment</code> for tag keys, and <code>admin</code> and <code>test</code> for tag values, all HSM client certificates that have any combination of those values are returned.</p> <p>If both tag keys and values are omitted from the request, HSM client certificates are returned regardless of whether they have tag keys or values associated with them.</p>",
|
"DescribeHsmClientCertificates": "<p>Returns information about the specified HSM client certificate. If no certificate ID is specified, returns information about all the HSM certificates owned by your AWS customer account.</p> <p>If you specify both tag keys and tag values in the same request, Amazon Redshift returns all HSM client certificates that match any combination of the specified keys and values. For example, if you have <code>owner</code> and <code>environment</code> for tag keys, and <code>admin</code> and <code>test</code> for tag values, all HSM client certificates that have any combination of those values are returned.</p> <p>If both tag keys and values are omitted from the request, HSM client certificates are returned regardless of whether they have tag keys or values associated with them.</p>",
|
||||||
"DescribeHsmConfigurations": "<p>Returns information about the specified Amazon Redshift HSM configuration. If no configuration ID is specified, returns information about all the HSM configurations owned by your AWS customer account.</p> <p>If you specify both tag keys and tag values in the same request, Amazon Redshift returns all HSM connections that match any combination of the specified keys and values. For example, if you have <code>owner</code> and <code>environment</code> for tag keys, and <code>admin</code> and <code>test</code> for tag values, all HSM connections that have any combination of those values are returned.</p> <p>If both tag keys and values are omitted from the request, HSM connections are returned regardless of whether they have tag keys or values associated with them.</p>",
|
"DescribeHsmConfigurations": "<p>Returns information about the specified Amazon Redshift HSM configuration. If no configuration ID is specified, returns information about all the HSM configurations owned by your AWS customer account.</p> <p>If you specify both tag keys and tag values in the same request, Amazon Redshift returns all HSM connections that match any combination of the specified keys and values. For example, if you have <code>owner</code> and <code>environment</code> for tag keys, and <code>admin</code> and <code>test</code> for tag values, all HSM connections that have any combination of those values are returned.</p> <p>If both tag keys and values are omitted from the request, HSM connections are returned regardless of whether they have tag keys or values associated with them.</p>",
|
||||||
@ -50,7 +50,7 @@
|
|||||||
"DisableSnapshotCopy": "<p>Disables the automatic copying of snapshots from one region to another region for a specified cluster.</p> <p>If your cluster and its snapshots are encrypted using a customer master key (CMK) from AWS KMS, use <a>DeleteSnapshotCopyGrant</a> to delete the grant that grants Amazon Redshift permission to the CMK in the destination region. </p>",
|
"DisableSnapshotCopy": "<p>Disables the automatic copying of snapshots from one region to another region for a specified cluster.</p> <p>If your cluster and its snapshots are encrypted using a customer master key (CMK) from AWS KMS, use <a>DeleteSnapshotCopyGrant</a> to delete the grant that grants Amazon Redshift permission to the CMK in the destination region. </p>",
|
||||||
"EnableLogging": "<p>Starts logging information, such as queries and connection attempts, for the specified Amazon Redshift cluster.</p>",
|
"EnableLogging": "<p>Starts logging information, such as queries and connection attempts, for the specified Amazon Redshift cluster.</p>",
|
||||||
"EnableSnapshotCopy": "<p>Enables the automatic copy of snapshots from one region to another region for a specified cluster.</p>",
|
"EnableSnapshotCopy": "<p>Enables the automatic copy of snapshots from one region to another region for a specified cluster.</p>",
|
||||||
"GetClusterCredentials": "<p>Returns a database user name and temporary password with temporary authorization to log in to an Amazon Redshift database. The action returns the database user name prefixed with <code>IAM:</code> if <code>AutoCreate</code> is <code>False</code> or <code>IAMA:</code> if <code>AutoCreate</code> is <code>True</code>. You can optionally specify one or more database user groups that the user will join at log in. By default, the temporary credentials expire in 900 seconds. You can optionally specify a duration between 900 seconds (15 minutes) and 3600 seconds (60 minutes). For more information, see Generating IAM Database User Credentials in the Amazon Redshift Cluster Management Guide.</p> <p>The IAM user or role that executes GetClusterCredentials must have an IAM policy attached that allows the <code>redshift:GetClusterCredentials</code> action with access to the <code>dbuser</code> resource on the cluster. The user name specified for <code>dbuser</code> in the IAM policy and the user name specified for the <code>DbUser</code> parameter must match.</p> <p>If the <code>DbGroups</code> parameter is specified, the IAM policy must allow the <code>redshift:JoinGroup</code> action with access to the listed <code>dbgroups</code>. </p> <p>In addition, if the <code>AutoCreate</code> parameter is set to <code>True</code>, then the policy must include the <code>redshift:CreateClusterUser</code> privilege.</p> <p>If the <code>DbName</code> parameter is specified, the IAM policy must allow access to the resource <code>dbname</code> for the specified database name. </p>",
|
"GetClusterCredentials": "<p>Returns a database user name and temporary password with temporary authorization to log on to an Amazon Redshift database. The action returns the database user name prefixed with <code>IAM:</code> if <code>AutoCreate</code> is <code>False</code> or <code>IAMA:</code> if <code>AutoCreate</code> is <code>True</code>. You can optionally specify one or more database user groups that the user will join at log on. By default, the temporary credentials expire in 900 seconds. You can optionally specify a duration between 900 seconds (15 minutes) and 3600 seconds (60 minutes). For more information, see <a href=\"http://docs.aws.amazon.com/redshift/latest/mgmt/generating-user-credentials.html\">Using IAM Authentication to Generate Database User Credentials</a> in the Amazon Redshift Cluster Management Guide.</p> <p>The AWS Identity and Access Management (IAM)user or role that executes GetClusterCredentials must have an IAM policy attached that allows access to all necessary actions and resources. For more information about permissions, see <a href=\"http://docs.aws.amazon.com/redshift/latest/mgmt/redshift-iam-access-control-identity-based.html#redshift-policy-resources.getclustercredentials-resources\">Resource Policies for GetClusterCredentials</a> in the Amazon Redshift Cluster Management Guide.</p> <p>If the <code>DbGroups</code> parameter is specified, the IAM policy must allow the <code>redshift:JoinGroup</code> action with access to the listed <code>dbgroups</code>. </p> <p>In addition, if the <code>AutoCreate</code> parameter is set to <code>True</code>, then the policy must include the <code>redshift:CreateClusterUser</code> privilege.</p> <p>If the <code>DbName</code> parameter is specified, the IAM policy must allow access to the resource <code>dbname</code> for the specified database name. </p>",
|
||||||
"ModifyCluster": "<p>Modifies the settings for a cluster. For example, you can add another security or parameter group, update the preferred maintenance window, or change the master user password. Resetting a cluster password or modifying the security groups associated with a cluster do not need a reboot. However, modifying a parameter group requires a reboot for parameters to take effect. For more information about managing clusters, go to <a href=\"http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html\">Amazon Redshift Clusters</a> in the <i>Amazon Redshift Cluster Management Guide</i>.</p> <p>You can also change node type and the number of nodes to scale up or down the cluster. When resizing a cluster, you must specify both the number of nodes and the node type even if one of the parameters does not change.</p>",
|
"ModifyCluster": "<p>Modifies the settings for a cluster. For example, you can add another security or parameter group, update the preferred maintenance window, or change the master user password. Resetting a cluster password or modifying the security groups associated with a cluster do not need a reboot. However, modifying a parameter group requires a reboot for parameters to take effect. For more information about managing clusters, go to <a href=\"http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html\">Amazon Redshift Clusters</a> in the <i>Amazon Redshift Cluster Management Guide</i>.</p> <p>You can also change node type and the number of nodes to scale up or down the cluster. When resizing a cluster, you must specify both the number of nodes and the node type even if one of the parameters does not change.</p>",
|
||||||
"ModifyClusterIamRoles": "<p>Modifies the list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services.</p> <p>A cluster can have up to 10 IAM roles associated at any time.</p>",
|
"ModifyClusterIamRoles": "<p>Modifies the list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services.</p> <p>A cluster can have up to 10 IAM roles associated at any time.</p>",
|
||||||
"ModifyClusterParameterGroup": "<p>Modifies the parameters of a parameter group.</p> <p> For more information about parameters and parameter groups, go to <a href=\"http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html\">Amazon Redshift Parameter Groups</a> in the <i>Amazon Redshift Cluster Management Guide</i>.</p>",
|
"ModifyClusterParameterGroup": "<p>Modifies the parameters of a parameter group.</p> <p> For more information about parameters and parameter groups, go to <a href=\"http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html\">Amazon Redshift Parameter Groups</a> in the <i>Amazon Redshift Cluster Management Guide</i>.</p>",
|
||||||
@ -157,7 +157,7 @@
|
|||||||
"CreateClusterMessage$Encrypted": "<p>If <code>true</code>, the data in the cluster is encrypted at rest. </p> <p>Default: false</p>",
|
"CreateClusterMessage$Encrypted": "<p>If <code>true</code>, the data in the cluster is encrypted at rest. </p> <p>Default: false</p>",
|
||||||
"CreateClusterMessage$EnhancedVpcRouting": "<p>An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see <a href=\"http://docs.aws.amazon.com/redshift/latest/mgmt/enhanced-vpc-routing.html\">Enhanced VPC Routing</a> in the Amazon Redshift Cluster Management Guide.</p> <p>If this option is <code>true</code>, enhanced VPC routing is enabled. </p> <p>Default: false</p>",
|
"CreateClusterMessage$EnhancedVpcRouting": "<p>An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see <a href=\"http://docs.aws.amazon.com/redshift/latest/mgmt/enhanced-vpc-routing.html\">Enhanced VPC Routing</a> in the Amazon Redshift Cluster Management Guide.</p> <p>If this option is <code>true</code>, enhanced VPC routing is enabled. </p> <p>Default: false</p>",
|
||||||
"CreateEventSubscriptionMessage$Enabled": "<p>A Boolean value; set to <code>true</code> to activate the subscription, set to <code>false</code> to create the subscription but not active it. </p>",
|
"CreateEventSubscriptionMessage$Enabled": "<p>A Boolean value; set to <code>true</code> to activate the subscription, set to <code>false</code> to create the subscription but not active it. </p>",
|
||||||
"GetClusterCredentialsMessage$AutoCreate": "<p>Create a database user with the name specified for <code>DbUser</code> if one does not exist.</p>",
|
"GetClusterCredentialsMessage$AutoCreate": "<p>Create a database user with the name specified for the user named in <code>DbUser</code> if one does not exist.</p>",
|
||||||
"ModifyClusterMessage$AllowVersionUpgrade": "<p>If <code>true</code>, major version upgrades will be applied automatically to the cluster during the maintenance window. </p> <p>Default: <code>false</code> </p>",
|
"ModifyClusterMessage$AllowVersionUpgrade": "<p>If <code>true</code>, major version upgrades will be applied automatically to the cluster during the maintenance window. </p> <p>Default: <code>false</code> </p>",
|
||||||
"ModifyClusterMessage$PubliclyAccessible": "<p>If <code>true</code>, the cluster can be accessed from a public network. Only clusters in VPCs can be set to be publicly available.</p>",
|
"ModifyClusterMessage$PubliclyAccessible": "<p>If <code>true</code>, the cluster can be accessed from a public network. Only clusters in VPCs can be set to be publicly available.</p>",
|
||||||
"ModifyClusterMessage$EnhancedVpcRouting": "<p>An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see <a href=\"http://docs.aws.amazon.com/redshift/latest/mgmt/enhanced-vpc-routing.html\">Enhanced VPC Routing</a> in the Amazon Redshift Cluster Management Guide.</p> <p>If this option is <code>true</code>, enhanced VPC routing is enabled. </p> <p>Default: false</p>",
|
"ModifyClusterMessage$EnhancedVpcRouting": "<p>An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see <a href=\"http://docs.aws.amazon.com/redshift/latest/mgmt/enhanced-vpc-routing.html\">Enhanced VPC Routing</a> in the Amazon Redshift Cluster Management Guide.</p> <p>If this option is <code>true</code>, enhanced VPC routing is enabled. </p> <p>Default: false</p>",
|
||||||
@ -196,7 +196,7 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"ClusterCredentials": {
|
"ClusterCredentials": {
|
||||||
"base": "<p>Temporary credentials with authorization to log in to an Amazon Redshift database. </p>",
|
"base": "<p>Temporary credentials with authorization to log on to an Amazon Redshift database. </p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -551,7 +551,7 @@
|
|||||||
"DbGroupList": {
|
"DbGroupList": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"GetClusterCredentialsMessage$DbGroups": "<p>A list of the names of existing database groups that <code>DbUser</code> will join for the current session. If not specified, the new user is added only to PUBLIC.</p>"
|
"GetClusterCredentialsMessage$DbGroups": "<p>A list of the names of existing database groups that the user named in <code>DbUser</code> will join for the current session, in addition to any group memberships for an existing user. If not specified, a new user is added only to PUBLIC.</p> <p>Database group name constraints</p> <ul> <li> <p>Must be 1 to 64 alphanumeric characters or hyphens</p> </li> <li> <p>Must contain only lowercase letters, numbers, underscore, plus sign, period (dot), at symbol (@), or hyphen.</p> </li> <li> <p>First character must be a letter.</p> </li> <li> <p>Must not contain a colon ( : ) or slash ( / ). </p> </li> <li> <p>Cannot be a reserved word. A list of reserved words can be found in <a href=\"http://docs.aws.amazon.com/redshift/latest/dg/r_pg_keywords.html\">Reserved Words</a> in the Amazon Redshift Database Developer Guide.</p> </li> </ul>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"DefaultClusterParameters": {
|
"DefaultClusterParameters": {
|
||||||
@ -1628,7 +1628,7 @@
|
|||||||
"Cluster$ClusterPublicKey": "<p>The public key for the cluster.</p>",
|
"Cluster$ClusterPublicKey": "<p>The public key for the cluster.</p>",
|
||||||
"Cluster$ClusterRevisionNumber": "<p>The specific revision number of the database in the cluster.</p>",
|
"Cluster$ClusterRevisionNumber": "<p>The specific revision number of the database in the cluster.</p>",
|
||||||
"Cluster$KmsKeyId": "<p>The AWS Key Management Service (AWS KMS) key ID of the encryption key used to encrypt data in the cluster.</p>",
|
"Cluster$KmsKeyId": "<p>The AWS Key Management Service (AWS KMS) key ID of the encryption key used to encrypt data in the cluster.</p>",
|
||||||
"ClusterCredentials$DbUser": "<p>A database user name that is authorized to log on to the database <code>DbName</code> using the password <code>DbPassword</code>. If the <code>DbGroups</code> parameter is specifed, <code>DbUser</code> is added to the listed groups for the current session. The user name is prefixed with <code>IAM:</code> for an existing user name or <code>IAMA:</code> if the user was auto-created. </p>",
|
"ClusterCredentials$DbUser": "<p>A database user name that is authorized to log on to the database <code>DbName</code> using the password <code>DbPassword</code>. If the specified DbUser exists in the database, the new user name has the same database privileges as the the user named in DbUser. By default, the user is added to PUBLIC. If the <code>DbGroups</code> parameter is specifed, <code>DbUser</code> is added to the listed groups for any sessions created using these credentials.</p>",
|
||||||
"ClusterIamRole$IamRoleArn": "<p>The Amazon Resource Name (ARN) of the IAM role, for example, <code>arn:aws:iam::123456789012:role/RedshiftCopyUnload</code>. </p>",
|
"ClusterIamRole$IamRoleArn": "<p>The Amazon Resource Name (ARN) of the IAM role, for example, <code>arn:aws:iam::123456789012:role/RedshiftCopyUnload</code>. </p>",
|
||||||
"ClusterIamRole$ApplyStatus": "<p>A value that describes the status of the IAM role's association with an Amazon Redshift cluster.</p> <p>The following are possible statuses and descriptions.</p> <ul> <li> <p> <code>in-sync</code>: The role is available for use by the cluster.</p> </li> <li> <p> <code>adding</code>: The role is in the process of being associated with the cluster.</p> </li> <li> <p> <code>removing</code>: The role is in the process of being disassociated with the cluster.</p> </li> </ul>",
|
"ClusterIamRole$ApplyStatus": "<p>A value that describes the status of the IAM role's association with an Amazon Redshift cluster.</p> <p>The following are possible statuses and descriptions.</p> <ul> <li> <p> <code>in-sync</code>: The role is available for use by the cluster.</p> </li> <li> <p> <code>adding</code>: The role is in the process of being associated with the cluster.</p> </li> <li> <p> <code>removing</code>: The role is in the process of being disassociated with the cluster.</p> </li> </ul>",
|
||||||
"ClusterNode$NodeRole": "<p>Whether the node is a leader node or a compute node.</p>",
|
"ClusterNode$NodeRole": "<p>Whether the node is a leader node or a compute node.</p>",
|
||||||
@ -1744,7 +1744,7 @@
|
|||||||
"DescribeDefaultClusterParametersMessage$Marker": "<p>An optional parameter that specifies the starting point to return a set of response records. When the results of a <a>DescribeDefaultClusterParameters</a> request exceed the value specified in <code>MaxRecords</code>, AWS returns a value in the <code>Marker</code> field of the response. You can retrieve the next set of response records by providing the returned marker value in the <code>Marker</code> parameter and retrying the request. </p>",
|
"DescribeDefaultClusterParametersMessage$Marker": "<p>An optional parameter that specifies the starting point to return a set of response records. When the results of a <a>DescribeDefaultClusterParameters</a> request exceed the value specified in <code>MaxRecords</code>, AWS returns a value in the <code>Marker</code> field of the response. You can retrieve the next set of response records by providing the returned marker value in the <code>Marker</code> parameter and retrying the request. </p>",
|
||||||
"DescribeEventCategoriesMessage$SourceType": "<p>The source type, such as cluster or parameter group, to which the described event categories apply.</p> <p>Valid values: cluster, cluster-snapshot, cluster-parameter-group, and cluster-security-group.</p>",
|
"DescribeEventCategoriesMessage$SourceType": "<p>The source type, such as cluster or parameter group, to which the described event categories apply.</p> <p>Valid values: cluster, cluster-snapshot, cluster-parameter-group, and cluster-security-group.</p>",
|
||||||
"DescribeEventSubscriptionsMessage$SubscriptionName": "<p>The name of the Amazon Redshift event notification subscription to be described.</p>",
|
"DescribeEventSubscriptionsMessage$SubscriptionName": "<p>The name of the Amazon Redshift event notification subscription to be described.</p>",
|
||||||
"DescribeEventSubscriptionsMessage$Marker": "<p>An optional parameter that specifies the starting point to return a set of response records. When the results of a <a>DescribeEventSubscriptions</a> request exceed the value specified in <code>MaxRecords</code>, AWS returns a value in the <code>Marker</code> field of the response. You can retrieve the next set of response records by providing the returned marker value in the <code>Marker</code> parameter and retrying the request. </p>",
|
"DescribeEventSubscriptionsMessage$Marker": "<p>An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeEventSubscriptions request exceed the value specified in <code>MaxRecords</code>, AWS returns a value in the <code>Marker</code> field of the response. You can retrieve the next set of response records by providing the returned marker value in the <code>Marker</code> parameter and retrying the request. </p>",
|
||||||
"DescribeEventsMessage$SourceIdentifier": "<p>The identifier of the event source for which events will be returned. If this parameter is not specified, then all sources are included in the response.</p> <p>Constraints:</p> <p>If <i>SourceIdentifier</i> is supplied, <i>SourceType</i> must also be provided.</p> <ul> <li> <p>Specify a cluster identifier when <i>SourceType</i> is <code>cluster</code>.</p> </li> <li> <p>Specify a cluster security group name when <i>SourceType</i> is <code>cluster-security-group</code>.</p> </li> <li> <p>Specify a cluster parameter group name when <i>SourceType</i> is <code>cluster-parameter-group</code>.</p> </li> <li> <p>Specify a cluster snapshot identifier when <i>SourceType</i> is <code>cluster-snapshot</code>.</p> </li> </ul>",
|
"DescribeEventsMessage$SourceIdentifier": "<p>The identifier of the event source for which events will be returned. If this parameter is not specified, then all sources are included in the response.</p> <p>Constraints:</p> <p>If <i>SourceIdentifier</i> is supplied, <i>SourceType</i> must also be provided.</p> <ul> <li> <p>Specify a cluster identifier when <i>SourceType</i> is <code>cluster</code>.</p> </li> <li> <p>Specify a cluster security group name when <i>SourceType</i> is <code>cluster-security-group</code>.</p> </li> <li> <p>Specify a cluster parameter group name when <i>SourceType</i> is <code>cluster-parameter-group</code>.</p> </li> <li> <p>Specify a cluster snapshot identifier when <i>SourceType</i> is <code>cluster-snapshot</code>.</p> </li> </ul>",
|
||||||
"DescribeEventsMessage$Marker": "<p>An optional parameter that specifies the starting point to return a set of response records. When the results of a <a>DescribeEvents</a> request exceed the value specified in <code>MaxRecords</code>, AWS returns a value in the <code>Marker</code> field of the response. You can retrieve the next set of response records by providing the returned marker value in the <code>Marker</code> parameter and retrying the request. </p>",
|
"DescribeEventsMessage$Marker": "<p>An optional parameter that specifies the starting point to return a set of response records. When the results of a <a>DescribeEvents</a> request exceed the value specified in <code>MaxRecords</code>, AWS returns a value in the <code>Marker</code> field of the response. You can retrieve the next set of response records by providing the returned marker value in the <code>Marker</code> parameter and retrying the request. </p>",
|
||||||
"DescribeHsmClientCertificatesMessage$HsmClientCertificateIdentifier": "<p>The identifier of a specific HSM client certificate for which you want information. If no identifier is specified, information is returned for all HSM client certificates owned by your AWS customer account.</p>",
|
"DescribeHsmClientCertificatesMessage$HsmClientCertificateIdentifier": "<p>The identifier of a specific HSM client certificate for which you want information. If no identifier is specified, information is returned for all HSM client certificates owned by your AWS customer account.</p>",
|
||||||
@ -1766,7 +1766,7 @@
|
|||||||
"DescribeTableRestoreStatusMessage$TableRestoreRequestId": "<p>The identifier of the table restore request to return status for. If you don't specify a <code>TableRestoreRequestId</code> value, then <code>DescribeTableRestoreStatus</code> returns the status of all in-progress table restore requests.</p>",
|
"DescribeTableRestoreStatusMessage$TableRestoreRequestId": "<p>The identifier of the table restore request to return status for. If you don't specify a <code>TableRestoreRequestId</code> value, then <code>DescribeTableRestoreStatus</code> returns the status of all in-progress table restore requests.</p>",
|
||||||
"DescribeTableRestoreStatusMessage$Marker": "<p>An optional pagination token provided by a previous <code>DescribeTableRestoreStatus</code> request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by the <code>MaxRecords</code> parameter.</p>",
|
"DescribeTableRestoreStatusMessage$Marker": "<p>An optional pagination token provided by a previous <code>DescribeTableRestoreStatus</code> request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by the <code>MaxRecords</code> parameter.</p>",
|
||||||
"DescribeTagsMessage$ResourceName": "<p>The Amazon Resource Name (ARN) for which you want to describe the tag or tags. For example, <code>arn:aws:redshift:us-east-1:123456789:cluster:t1</code>. </p>",
|
"DescribeTagsMessage$ResourceName": "<p>The Amazon Resource Name (ARN) for which you want to describe the tag or tags. For example, <code>arn:aws:redshift:us-east-1:123456789:cluster:t1</code>. </p>",
|
||||||
"DescribeTagsMessage$ResourceType": "<p>The type of resource with which you want to view tags. Valid resource types are: </p> <ul> <li> <p>Cluster</p> </li> <li> <p>CIDR/IP</p> </li> <li> <p>EC2 security group</p> </li> <li> <p>Snapshot</p> </li> <li> <p>Cluster security group</p> </li> <li> <p>Subnet group</p> </li> <li> <p>HSM connection</p> </li> <li> <p>HSM certificate</p> </li> <li> <p>Parameter group</p> </li> <li> <p>Snapshot copy grant</p> </li> </ul> <p>For more information about Amazon Redshift resource types and constructing ARNs, go to <a href=\"http://docs.aws.amazon.com/redshift/latest/mgmt/constructing-redshift-arn.html\">Constructing an Amazon Redshift Amazon Resource Name (ARN)</a> in the Amazon Redshift Cluster Management Guide. </p>",
|
"DescribeTagsMessage$ResourceType": "<p>The type of resource with which you want to view tags. Valid resource types are: </p> <ul> <li> <p>Cluster</p> </li> <li> <p>CIDR/IP</p> </li> <li> <p>EC2 security group</p> </li> <li> <p>Snapshot</p> </li> <li> <p>Cluster security group</p> </li> <li> <p>Subnet group</p> </li> <li> <p>HSM connection</p> </li> <li> <p>HSM certificate</p> </li> <li> <p>Parameter group</p> </li> <li> <p>Snapshot copy grant</p> </li> </ul> <p>For more information about Amazon Redshift resource types and constructing ARNs, go to <a href=\"http://docs.aws.amazon.com/redshift/latest/mgmt/redshift-iam-access-control-overview.html#redshift-iam-access-control-specify-actions\">Specifying Policy Elements: Actions, Effects, Resources, and Principals</a> in the Amazon Redshift Cluster Management Guide. </p>",
|
||||||
"DescribeTagsMessage$Marker": "<p>A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the <code>marker</code> parameter and retrying the command. If the <code>marker</code> field is empty, all response records have been retrieved for the request. </p>",
|
"DescribeTagsMessage$Marker": "<p>A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the <code>marker</code> parameter and retrying the command. If the <code>marker</code> field is empty, all response records have been retrieved for the request. </p>",
|
||||||
"DisableLoggingMessage$ClusterIdentifier": "<p>The identifier of the cluster on which logging is to be stopped.</p> <p>Example: <code>examplecluster</code> </p>",
|
"DisableLoggingMessage$ClusterIdentifier": "<p>The identifier of the cluster on which logging is to be stopped.</p> <p>Example: <code>examplecluster</code> </p>",
|
||||||
"DisableSnapshotCopyMessage$ClusterIdentifier": "<p>The unique identifier of the source cluster that you want to disable copying of snapshots to a destination region.</p> <p>Constraints: Must be the valid name of an existing cluster that has cross-region snapshot copy enabled.</p>",
|
"DisableSnapshotCopyMessage$ClusterIdentifier": "<p>The unique identifier of the source cluster that you want to disable copying of snapshots to a destination region.</p> <p>Constraints: Must be the valid name of an existing cluster that has cross-region snapshot copy enabled.</p>",
|
||||||
@ -1799,8 +1799,8 @@
|
|||||||
"EventSubscription$Severity": "<p>The event severity specified in the Amazon Redshift event notification subscription.</p> <p>Values: ERROR, INFO</p>",
|
"EventSubscription$Severity": "<p>The event severity specified in the Amazon Redshift event notification subscription.</p> <p>Values: ERROR, INFO</p>",
|
||||||
"EventSubscriptionsMessage$Marker": "<p>A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the <code>Marker</code> parameter and retrying the command. If the <code>Marker</code> field is empty, all response records have been retrieved for the request. </p>",
|
"EventSubscriptionsMessage$Marker": "<p>A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the <code>Marker</code> parameter and retrying the command. If the <code>Marker</code> field is empty, all response records have been retrieved for the request. </p>",
|
||||||
"EventsMessage$Marker": "<p>A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the <code>Marker</code> parameter and retrying the command. If the <code>Marker</code> field is empty, all response records have been retrieved for the request. </p>",
|
"EventsMessage$Marker": "<p>A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the <code>Marker</code> parameter and retrying the command. If the <code>Marker</code> field is empty, all response records have been retrieved for the request. </p>",
|
||||||
"GetClusterCredentialsMessage$DbUser": "<p>The name of a database user. If a user name matching <code>DbUser</code> exists in the database, the temporary user credentials have the same permissions as the existing user. If <code>DbUser</code> doesn't exist in the database and <code>Autocreate</code> is <code>True</code>, a new user is created using the value for <code>DbUser</code> with PUBLIC permissions. If a database user matching the value for <code>DbUser</code> doesn't exist and <code>Autocreate</code> is <code>False</code>, then the command succeeds but the connection attempt will fail because the user doesn't exist in the database.</p> <p>For more information, see <a href=\"http://docs.aws.amazon.com/http:/docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html\">CREATE USER</a> in the Amazon Redshift Database Developer Guide. </p> <p>Constraints:</p> <ul> <li> <p>Must be 1 to 128 alphanumeric characters or hyphens</p> </li> <li> <p>Must contain only lowercase letters.</p> </li> <li> <p>First character must be a letter.</p> </li> <li> <p>Must not contain a colon ( : ) or slash ( / ). </p> </li> <li> <p>Cannot be a reserved word. A list of reserved words can be found in <a href=\"http://docs.aws.amazon.com/redshift/latest/dg/r_pg_keywords.html\">Reserved Words</a> in the Amazon Redshift Database Developer Guide.</p> </li> </ul>",
|
"GetClusterCredentialsMessage$DbUser": "<p>The name of a database user. If a user name matching <code>DbUser</code> exists in the database, the temporary user credentials have the same permissions as the existing user. If <code>DbUser</code> doesn't exist in the database and <code>Autocreate</code> is <code>True</code>, a new user is created using the value for <code>DbUser</code> with PUBLIC permissions. If a database user matching the value for <code>DbUser</code> doesn't exist and <code>Autocreate</code> is <code>False</code>, then the command succeeds but the connection attempt will fail because the user doesn't exist in the database.</p> <p>For more information, see <a href=\"http://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html\">CREATE USER</a> in the Amazon Redshift Database Developer Guide. </p> <p>Constraints:</p> <ul> <li> <p>Must be 1 to 64 alphanumeric characters or hyphens</p> </li> <li> <p>Must contain only lowercase letters, numbers, underscore, plus sign, period (dot), at symbol (@), or hyphen.</p> </li> <li> <p>First character must be a letter.</p> </li> <li> <p>Must not contain a colon ( : ) or slash ( / ). </p> </li> <li> <p>Cannot be a reserved word. A list of reserved words can be found in <a href=\"http://docs.aws.amazon.com/redshift/latest/dg/r_pg_keywords.html\">Reserved Words</a> in the Amazon Redshift Database Developer Guide.</p> </li> </ul>",
|
||||||
"GetClusterCredentialsMessage$DbName": "<p>The name of a database that <code>DbUser</code> is authorized to log on to. If <code>DbName</code> is not specified, <code>DbUser</code> can log in to any existing database.</p> <p>Constraints:</p> <ul> <li> <p>Must be 1 to 64 alphanumeric characters or hyphens</p> </li> <li> <p>Must contain only lowercase letters.</p> </li> <li> <p>Cannot be a reserved word. A list of reserved words can be found in <a href=\"http://docs.aws.amazon.com/redshift/latest/dg/r_pg_keywords.html\">Reserved Words</a> in the Amazon Redshift Database Developer Guide.</p> </li> </ul>",
|
"GetClusterCredentialsMessage$DbName": "<p>The name of a database that <code>DbUser</code> is authorized to log on to. If <code>DbName</code> is not specified, <code>DbUser</code> can log on to any existing database.</p> <p>Constraints:</p> <ul> <li> <p>Must be 1 to 64 alphanumeric characters or hyphens</p> </li> <li> <p>Must contain only lowercase letters, numbers, underscore, plus sign, period (dot), at symbol (@), or hyphen.</p> </li> <li> <p>First character must be a letter.</p> </li> <li> <p>Must not contain a colon ( : ) or slash ( / ). </p> </li> <li> <p>Cannot be a reserved word. A list of reserved words can be found in <a href=\"http://docs.aws.amazon.com/redshift/latest/dg/r_pg_keywords.html\">Reserved Words</a> in the Amazon Redshift Database Developer Guide.</p> </li> </ul>",
|
||||||
"GetClusterCredentialsMessage$ClusterIdentifier": "<p>The unique identifier of the cluster that contains the database for which your are requesting credentials. This parameter is case sensitive.</p>",
|
"GetClusterCredentialsMessage$ClusterIdentifier": "<p>The unique identifier of the cluster that contains the database for which your are requesting credentials. This parameter is case sensitive.</p>",
|
||||||
"HsmClientCertificate$HsmClientCertificateIdentifier": "<p>The identifier of the HSM client certificate.</p>",
|
"HsmClientCertificate$HsmClientCertificateIdentifier": "<p>The identifier of the HSM client certificate.</p>",
|
||||||
"HsmClientCertificate$HsmClientCertificatePublicKey": "<p>The public key that the Amazon Redshift cluster will use to connect to the HSM. You must register the public key in the HSM.</p>",
|
"HsmClientCertificate$HsmClientCertificatePublicKey": "<p>The public key that the Amazon Redshift cluster will use to connect to the HSM. You must register the public key in the HSM.</p>",
|
||||||
@ -1946,7 +1946,7 @@
|
|||||||
"TagKeyList$member": null,
|
"TagKeyList$member": null,
|
||||||
"TagValueList$member": null,
|
"TagValueList$member": null,
|
||||||
"TaggedResource$ResourceName": "<p>The Amazon Resource Name (ARN) with which the tag is associated. For example, <code>arn:aws:redshift:us-east-1:123456789:cluster:t1</code>.</p>",
|
"TaggedResource$ResourceName": "<p>The Amazon Resource Name (ARN) with which the tag is associated. For example, <code>arn:aws:redshift:us-east-1:123456789:cluster:t1</code>.</p>",
|
||||||
"TaggedResource$ResourceType": "<p>The type of resource with which the tag is associated. Valid resource types are: </p> <ul> <li> <p>Cluster</p> </li> <li> <p>CIDR/IP</p> </li> <li> <p>EC2 security group</p> </li> <li> <p>Snapshot</p> </li> <li> <p>Cluster security group</p> </li> <li> <p>Subnet group</p> </li> <li> <p>HSM connection</p> </li> <li> <p>HSM certificate</p> </li> <li> <p>Parameter group</p> </li> </ul> <p>For more information about Amazon Redshift resource types and constructing ARNs, go to <a href=\"http://docs.aws.amazon.com/redshift/latest/mgmt/constructing-redshift-arn.html\">Constructing an Amazon Redshift Amazon Resource Name (ARN)</a> in the Amazon Redshift Cluster Management Guide. </p>",
|
"TaggedResource$ResourceType": "<p>The type of resource with which the tag is associated. Valid resource types are: </p> <ul> <li> <p>Cluster</p> </li> <li> <p>CIDR/IP</p> </li> <li> <p>EC2 security group</p> </li> <li> <p>Snapshot</p> </li> <li> <p>Cluster security group</p> </li> <li> <p>Subnet group</p> </li> <li> <p>HSM connection</p> </li> <li> <p>HSM certificate</p> </li> <li> <p>Parameter group</p> </li> </ul> <p>For more information about Amazon Redshift resource types and constructing ARNs, go to <a href=\"http://docs.aws.amazon.com/redshift/latest/mgmt/redshift-iam-access-control-overview.html#redshift-iam-access-control-specify-actions\">Constructing an Amazon Redshift Amazon Resource Name (ARN)</a> in the Amazon Redshift Cluster Management Guide. </p>",
|
||||||
"TaggedResourceListMessage$Marker": "<p>A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the <code>Marker</code> parameter and retrying the command. If the <code>Marker</code> field is empty, all response records have been retrieved for the request. </p>",
|
"TaggedResourceListMessage$Marker": "<p>A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the <code>Marker</code> parameter and retrying the command. If the <code>Marker</code> field is empty, all response records have been retrieved for the request. </p>",
|
||||||
"VpcSecurityGroupIdList$member": null,
|
"VpcSecurityGroupIdList$member": null,
|
||||||
"VpcSecurityGroupMembership$VpcSecurityGroupId": "<p>The identifier of the VPC security group.</p>",
|
"VpcSecurityGroupMembership$VpcSecurityGroupId": "<p>The identifier of the VPC security group.</p>",
|
||||||
@ -2006,7 +2006,7 @@
|
|||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"Cluster$ClusterCreateTime": "<p>The date and time that the cluster was created.</p>",
|
"Cluster$ClusterCreateTime": "<p>The date and time that the cluster was created.</p>",
|
||||||
"ClusterCredentials$Expiration": "<p>The date and time <code>DbPassword</code> expires.</p>",
|
"ClusterCredentials$Expiration": "<p>The date and time the password in <code>DbPassword</code> expires.</p>",
|
||||||
"DescribeClusterSnapshotsMessage$StartTime": "<p>A value that requests only snapshots created at or after the specified time. The time value is specified in ISO 8601 format. For more information about ISO 8601, go to the <a href=\"http://en.wikipedia.org/wiki/ISO_8601\">ISO8601 Wikipedia page.</a> </p> <p>Example: <code>2012-07-16T18:00:00Z</code> </p>",
|
"DescribeClusterSnapshotsMessage$StartTime": "<p>A value that requests only snapshots created at or after the specified time. The time value is specified in ISO 8601 format. For more information about ISO 8601, go to the <a href=\"http://en.wikipedia.org/wiki/ISO_8601\">ISO8601 Wikipedia page.</a> </p> <p>Example: <code>2012-07-16T18:00:00Z</code> </p>",
|
||||||
"DescribeClusterSnapshotsMessage$EndTime": "<p>A time value that requests only snapshots created at or before the specified time. The time value is specified in ISO 8601 format. For more information about ISO 8601, go to the <a href=\"http://en.wikipedia.org/wiki/ISO_8601\">ISO8601 Wikipedia page.</a> </p> <p>Example: <code>2012-07-16T18:00:00Z</code> </p>",
|
"DescribeClusterSnapshotsMessage$EndTime": "<p>A time value that requests only snapshots created at or before the specified time. The time value is specified in ISO 8601 format. For more information about ISO 8601, go to the <a href=\"http://en.wikipedia.org/wiki/ISO_8601\">ISO8601 Wikipedia page.</a> </p> <p>Example: <code>2012-07-16T18:00:00Z</code> </p>",
|
||||||
"DescribeEventsMessage$StartTime": "<p>The beginning of the time interval to retrieve events for, specified in ISO 8601 format. For more information about ISO 8601, go to the <a href=\"http://en.wikipedia.org/wiki/ISO_8601\">ISO8601 Wikipedia page.</a> </p> <p>Example: <code>2009-07-08T18:00Z</code> </p>",
|
"DescribeEventsMessage$StartTime": "<p>The beginning of the time interval to retrieve events for, specified in ISO 8601 format. For more information about ISO 8601, go to the <a href=\"http://en.wikipedia.org/wiki/ISO_8601\">ISO8601 Wikipedia page.</a> </p> <p>Example: <code>2009-07-08T18:00Z</code> </p>",
|
||||||
@ -2066,6 +2066,7 @@
|
|||||||
"DescribeClusterSnapshotsMessage$TagKeys": "<p>A tag key or keys for which you want to return all matching cluster snapshots that are associated with the specified key or keys. For example, suppose that you have snapshots that are tagged with keys called <code>owner</code> and <code>environment</code>. If you specify both of these tag keys in the request, Amazon Redshift returns a response with the snapshots that have either or both of these tag keys associated with them.</p>",
|
"DescribeClusterSnapshotsMessage$TagKeys": "<p>A tag key or keys for which you want to return all matching cluster snapshots that are associated with the specified key or keys. For example, suppose that you have snapshots that are tagged with keys called <code>owner</code> and <code>environment</code>. If you specify both of these tag keys in the request, Amazon Redshift returns a response with the snapshots that have either or both of these tag keys associated with them.</p>",
|
||||||
"DescribeClusterSubnetGroupsMessage$TagKeys": "<p>A tag key or keys for which you want to return all matching cluster subnet groups that are associated with the specified key or keys. For example, suppose that you have subnet groups that are tagged with keys called <code>owner</code> and <code>environment</code>. If you specify both of these tag keys in the request, Amazon Redshift returns a response with the subnet groups that have either or both of these tag keys associated with them.</p>",
|
"DescribeClusterSubnetGroupsMessage$TagKeys": "<p>A tag key or keys for which you want to return all matching cluster subnet groups that are associated with the specified key or keys. For example, suppose that you have subnet groups that are tagged with keys called <code>owner</code> and <code>environment</code>. If you specify both of these tag keys in the request, Amazon Redshift returns a response with the subnet groups that have either or both of these tag keys associated with them.</p>",
|
||||||
"DescribeClustersMessage$TagKeys": "<p>A tag key or keys for which you want to return all matching clusters that are associated with the specified key or keys. For example, suppose that you have clusters that are tagged with keys called <code>owner</code> and <code>environment</code>. If you specify both of these tag keys in the request, Amazon Redshift returns a response with the clusters that have either or both of these tag keys associated with them.</p>",
|
"DescribeClustersMessage$TagKeys": "<p>A tag key or keys for which you want to return all matching clusters that are associated with the specified key or keys. For example, suppose that you have clusters that are tagged with keys called <code>owner</code> and <code>environment</code>. If you specify both of these tag keys in the request, Amazon Redshift returns a response with the clusters that have either or both of these tag keys associated with them.</p>",
|
||||||
|
"DescribeEventSubscriptionsMessage$TagKeys": "<p>A tag key or keys for which you want to return all matching event notification subscriptions that are associated with the specified key or keys. For example, suppose that you have subscriptions that are tagged with keys called <code>owner</code> and <code>environment</code>. If you specify both of these tag keys in the request, Amazon Redshift returns a response with the subscriptions that have either or both of these tag keys associated with them.</p>",
|
||||||
"DescribeHsmClientCertificatesMessage$TagKeys": "<p>A tag key or keys for which you want to return all matching HSM client certificates that are associated with the specified key or keys. For example, suppose that you have HSM client certificates that are tagged with keys called <code>owner</code> and <code>environment</code>. If you specify both of these tag keys in the request, Amazon Redshift returns a response with the HSM client certificates that have either or both of these tag keys associated with them.</p>",
|
"DescribeHsmClientCertificatesMessage$TagKeys": "<p>A tag key or keys for which you want to return all matching HSM client certificates that are associated with the specified key or keys. For example, suppose that you have HSM client certificates that are tagged with keys called <code>owner</code> and <code>environment</code>. If you specify both of these tag keys in the request, Amazon Redshift returns a response with the HSM client certificates that have either or both of these tag keys associated with them.</p>",
|
||||||
"DescribeHsmConfigurationsMessage$TagKeys": "<p>A tag key or keys for which you want to return all matching HSM configurations that are associated with the specified key or keys. For example, suppose that you have HSM configurations that are tagged with keys called <code>owner</code> and <code>environment</code>. If you specify both of these tag keys in the request, Amazon Redshift returns a response with the HSM configurations that have either or both of these tag keys associated with them.</p>",
|
"DescribeHsmConfigurationsMessage$TagKeys": "<p>A tag key or keys for which you want to return all matching HSM configurations that are associated with the specified key or keys. For example, suppose that you have HSM configurations that are tagged with keys called <code>owner</code> and <code>environment</code>. If you specify both of these tag keys in the request, Amazon Redshift returns a response with the HSM configurations that have either or both of these tag keys associated with them.</p>",
|
||||||
"DescribeSnapshotCopyGrantsMessage$TagKeys": "<p>A tag key or keys for which you want to return all matching resources that are associated with the specified key or keys. For example, suppose that you have resources tagged with keys called <code>owner</code> and <code>environment</code>. If you specify both of these tag keys in the request, Amazon Redshift returns a response with all resources that have either or both of these tag keys associated with them.</p>",
|
"DescribeSnapshotCopyGrantsMessage$TagKeys": "<p>A tag key or keys for which you want to return all matching resources that are associated with the specified key or keys. For example, suppose that you have resources tagged with keys called <code>owner</code> and <code>environment</code>. If you specify both of these tag keys in the request, Amazon Redshift returns a response with all resources that have either or both of these tag keys associated with them.</p>",
|
||||||
@ -2111,6 +2112,7 @@
|
|||||||
"DescribeClusterSnapshotsMessage$TagValues": "<p>A tag value or values for which you want to return all matching cluster snapshots that are associated with the specified tag value or values. For example, suppose that you have snapshots that are tagged with values called <code>admin</code> and <code>test</code>. If you specify both of these tag values in the request, Amazon Redshift returns a response with the snapshots that have either or both of these tag values associated with them.</p>",
|
"DescribeClusterSnapshotsMessage$TagValues": "<p>A tag value or values for which you want to return all matching cluster snapshots that are associated with the specified tag value or values. For example, suppose that you have snapshots that are tagged with values called <code>admin</code> and <code>test</code>. If you specify both of these tag values in the request, Amazon Redshift returns a response with the snapshots that have either or both of these tag values associated with them.</p>",
|
||||||
"DescribeClusterSubnetGroupsMessage$TagValues": "<p>A tag value or values for which you want to return all matching cluster subnet groups that are associated with the specified tag value or values. For example, suppose that you have subnet groups that are tagged with values called <code>admin</code> and <code>test</code>. If you specify both of these tag values in the request, Amazon Redshift returns a response with the subnet groups that have either or both of these tag values associated with them.</p>",
|
"DescribeClusterSubnetGroupsMessage$TagValues": "<p>A tag value or values for which you want to return all matching cluster subnet groups that are associated with the specified tag value or values. For example, suppose that you have subnet groups that are tagged with values called <code>admin</code> and <code>test</code>. If you specify both of these tag values in the request, Amazon Redshift returns a response with the subnet groups that have either or both of these tag values associated with them.</p>",
|
||||||
"DescribeClustersMessage$TagValues": "<p>A tag value or values for which you want to return all matching clusters that are associated with the specified tag value or values. For example, suppose that you have clusters that are tagged with values called <code>admin</code> and <code>test</code>. If you specify both of these tag values in the request, Amazon Redshift returns a response with the clusters that have either or both of these tag values associated with them.</p>",
|
"DescribeClustersMessage$TagValues": "<p>A tag value or values for which you want to return all matching clusters that are associated with the specified tag value or values. For example, suppose that you have clusters that are tagged with values called <code>admin</code> and <code>test</code>. If you specify both of these tag values in the request, Amazon Redshift returns a response with the clusters that have either or both of these tag values associated with them.</p>",
|
||||||
|
"DescribeEventSubscriptionsMessage$TagValues": "<p>A tag value or values for which you want to return all matching event notification subscriptions that are associated with the specified tag value or values. For example, suppose that you have subscriptions that are tagged with values called <code>admin</code> and <code>test</code>. If you specify both of these tag values in the request, Amazon Redshift returns a response with the subscriptions that have either or both of these tag values associated with them.</p>",
|
||||||
"DescribeHsmClientCertificatesMessage$TagValues": "<p>A tag value or values for which you want to return all matching HSM client certificates that are associated with the specified tag value or values. For example, suppose that you have HSM client certificates that are tagged with values called <code>admin</code> and <code>test</code>. If you specify both of these tag values in the request, Amazon Redshift returns a response with the HSM client certificates that have either or both of these tag values associated with them.</p>",
|
"DescribeHsmClientCertificatesMessage$TagValues": "<p>A tag value or values for which you want to return all matching HSM client certificates that are associated with the specified tag value or values. For example, suppose that you have HSM client certificates that are tagged with values called <code>admin</code> and <code>test</code>. If you specify both of these tag values in the request, Amazon Redshift returns a response with the HSM client certificates that have either or both of these tag values associated with them.</p>",
|
||||||
"DescribeHsmConfigurationsMessage$TagValues": "<p>A tag value or values for which you want to return all matching HSM configurations that are associated with the specified tag value or values. For example, suppose that you have HSM configurations that are tagged with values called <code>admin</code> and <code>test</code>. If you specify both of these tag values in the request, Amazon Redshift returns a response with the HSM configurations that have either or both of these tag values associated with them.</p>",
|
"DescribeHsmConfigurationsMessage$TagValues": "<p>A tag value or values for which you want to return all matching HSM configurations that are associated with the specified tag value or values. For example, suppose that you have HSM configurations that are tagged with values called <code>admin</code> and <code>test</code>. If you specify both of these tag values in the request, Amazon Redshift returns a response with the HSM configurations that have either or both of these tag values associated with them.</p>",
|
||||||
"DescribeSnapshotCopyGrantsMessage$TagValues": "<p>A tag value or values for which you want to return all matching resources that are associated with the specified value or values. For example, suppose that you have resources tagged with values called <code>admin</code> and <code>test</code>. If you specify both of these tag values in the request, Amazon Redshift returns a response with all resources that have either or both of these tag values associated with them.</p>",
|
"DescribeSnapshotCopyGrantsMessage$TagValues": "<p>A tag value or values for which you want to return all matching resources that are associated with the specified value or values. For example, suppose that you have resources tagged with values called <code>admin</code> and <code>test</code>. If you specify both of these tag values in the request, Amazon Redshift returns a response with all resources that have either or both of these tag values associated with them.</p>",
|
||||||
|
22
vendor/github.com/aws/aws-sdk-go/models/apis/route53/2013-04-01/api-2.json
generated
vendored
22
vendor/github.com/aws/aws-sdk-go/models/apis/route53/2013-04-01/api-2.json
generated
vendored
@ -2913,6 +2913,25 @@
|
|||||||
"max":30,
|
"max":30,
|
||||||
"min":10
|
"min":10
|
||||||
},
|
},
|
||||||
|
"ResettableElementName":{
|
||||||
|
"type":"string",
|
||||||
|
"enum":[
|
||||||
|
"FullyQualifiedDomainName",
|
||||||
|
"Regions",
|
||||||
|
"ResourcePath",
|
||||||
|
"ChildHealthChecks"
|
||||||
|
],
|
||||||
|
"max":64,
|
||||||
|
"min":1
|
||||||
|
},
|
||||||
|
"ResettableElementNameList":{
|
||||||
|
"type":"list",
|
||||||
|
"member":{
|
||||||
|
"shape":"ResettableElementName",
|
||||||
|
"locationName":"ResettableElementName"
|
||||||
|
},
|
||||||
|
"max":64
|
||||||
|
},
|
||||||
"ResourceDescription":{
|
"ResourceDescription":{
|
||||||
"type":"string",
|
"type":"string",
|
||||||
"max":256
|
"max":256
|
||||||
@ -3382,7 +3401,8 @@
|
|||||||
"EnableSNI":{"shape":"EnableSNI"},
|
"EnableSNI":{"shape":"EnableSNI"},
|
||||||
"Regions":{"shape":"HealthCheckRegionList"},
|
"Regions":{"shape":"HealthCheckRegionList"},
|
||||||
"AlarmIdentifier":{"shape":"AlarmIdentifier"},
|
"AlarmIdentifier":{"shape":"AlarmIdentifier"},
|
||||||
"InsufficientDataHealthStatus":{"shape":"InsufficientDataHealthStatus"}
|
"InsufficientDataHealthStatus":{"shape":"InsufficientDataHealthStatus"},
|
||||||
|
"ResetElements":{"shape":"ResettableElementNameList"}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"UpdateHealthCheckResponse":{
|
"UpdateHealthCheckResponse":{
|
||||||
|
16
vendor/github.com/aws/aws-sdk-go/models/apis/route53/2013-04-01/docs-2.json
generated
vendored
16
vendor/github.com/aws/aws-sdk-go/models/apis/route53/2013-04-01/docs-2.json
generated
vendored
@ -295,7 +295,7 @@
|
|||||||
"DNSName": {
|
"DNSName": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"AliasTarget$DNSName": "<p> <i>Alias resource record sets only:</i> The value that you specify depends on where you want to route queries:</p> <dl> <dt>CloudFront distribution</dt> <dd> <p>Specify the domain name that CloudFront assigned when you created your distribution.</p> <p>Your CloudFront distribution must include an alternate domain name that matches the name of the resource record set. For example, if the name of the resource record set is <i>acme.example.com</i>, your CloudFront distribution must include <i>acme.example.com</i> as one of the alternate domain names. For more information, see <a href=\"http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/CNAMEs.html\">Using Alternate Domain Names (CNAMEs)</a> in the <i>Amazon CloudFront Developer Guide</i>.</p> </dd> <dt>Elastic Beanstalk environment</dt> <dd> <p>Specify the <code>CNAME</code> attribute for the environment. (The environment must have a regionalized domain name.) You can use the following methods to get the value of the CNAME attribute:</p> <ul> <li> <p> <i>AWS Management Console</i>: For information about how to get the value by using the console, see <a href=\"http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/customdomains.html\">Using Custom Domains with AWS Elastic Beanstalk</a> in the <i>AWS Elastic Beanstalk Developer Guide</i>.</p> </li> <li> <p> <i>Elastic Beanstalk API</i>: Use the <code>DescribeEnvironments</code> action to get the value of the <code>CNAME</code> attribute. For more information, see <a href=\"http://docs.aws.amazon.com/elasticbeanstalk/latest/api/API_DescribeEnvironments.html\">DescribeEnvironments</a> in the <i>AWS Elastic Beanstalk API Reference</i>.</p> </li> <li> <p> <i>AWS CLI</i>: Use the <code>describe-environments</code> command to get the value of the <code>CNAME</code> attribute. For more information, see <a href=\"http://docs.aws.amazon.com/cli/latest/reference/elasticbeanstalk/describe-environments.html\">describe-environments</a> in the <i>AWS Command Line Interface Reference</i>.</p> </li> </ul> </dd> <dt>ELB load balancer</dt> <dd> <p>Specify the DNS name that is associated with the load balancer. Get the DNS name by using the AWS Management Console, the ELB API, or the AWS CLI. </p> <ul> <li> <p> <b>AWS Management Console</b>: Go to the EC2 page, choose <b>Load Balancers</b> in the navigation pane, choose the load balancer, choose the <b>Description</b> tab, and get the value of the <b>DNS name</b> field. (If you're routing traffic to a Classic Load Balancer, get the value that begins with <b>dualstack</b>.) </p> </li> <li> <p> <b>Elastic Load Balancing API</b>: Use <code>DescribeLoadBalancers</code> to get the value of <code>DNSName</code>. For more information, see the applicable guide:</p> <ul> <li> <p>Classic Load Balancer: <a href=\"http://docs.aws.amazon.com/elasticloadbalancing/2012-06-01/APIReference/API_DescribeLoadBalancers.html\">DescribeLoadBalancers</a> </p> </li> <li> <p>Application Load Balancer: <a href=\"http://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_DescribeLoadBalancers.html\">DescribeLoadBalancers</a> </p> </li> </ul> </li> <li> <p> <b>AWS CLI</b>: Use <code> <a href=\"http://docs.aws.amazon.com/cli/latest/reference/elb/describe-load-balancers.html\">describe-load-balancers</a> </code> to get the value of <code>DNSName</code>.</p> </li> </ul> </dd> <dt>Amazon S3 bucket that is configured as a static website</dt> <dd> <p>Specify the domain name of the Amazon S3 website endpoint in which you created the bucket, for example, <code>s3-website-us-east-2.amazonaws.com</code>. For more information about valid values, see the table <a href=\"http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region\">Amazon Simple Storage Service (S3) Website Endpoints</a> in the <i>Amazon Web Services General Reference</i>. For more information about using S3 buckets for websites, see <a href=\"http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/getting-started.html\">Getting Started with Amazon Route 53</a> in the <i>Amazon Route 53 Developer Guide.</i> </p> </dd> <dt>Another Amazon Route 53 resource record set</dt> <dd> <p>Specify the value of the <code>Name</code> element for a resource record set in the current hosted zone.</p> </dd> </dl>",
|
"AliasTarget$DNSName": "<p> <i>Alias resource record sets only:</i> The value that you specify depends on where you want to route queries:</p> <dl> <dt>CloudFront distribution</dt> <dd> <p>Specify the domain name that CloudFront assigned when you created your distribution.</p> <p>Your CloudFront distribution must include an alternate domain name that matches the name of the resource record set. For example, if the name of the resource record set is <i>acme.example.com</i>, your CloudFront distribution must include <i>acme.example.com</i> as one of the alternate domain names. For more information, see <a href=\"http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/CNAMEs.html\">Using Alternate Domain Names (CNAMEs)</a> in the <i>Amazon CloudFront Developer Guide</i>.</p> </dd> <dt>Elastic Beanstalk environment</dt> <dd> <p>Specify the <code>CNAME</code> attribute for the environment. (The environment must have a regionalized domain name.) You can use the following methods to get the value of the CNAME attribute:</p> <ul> <li> <p> <i>AWS Management Console</i>: For information about how to get the value by using the console, see <a href=\"http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/customdomains.html\">Using Custom Domains with AWS Elastic Beanstalk</a> in the <i>AWS Elastic Beanstalk Developer Guide</i>.</p> </li> <li> <p> <i>Elastic Beanstalk API</i>: Use the <code>DescribeEnvironments</code> action to get the value of the <code>CNAME</code> attribute. For more information, see <a href=\"http://docs.aws.amazon.com/elasticbeanstalk/latest/api/API_DescribeEnvironments.html\">DescribeEnvironments</a> in the <i>AWS Elastic Beanstalk API Reference</i>.</p> </li> <li> <p> <i>AWS CLI</i>: Use the <code>describe-environments</code> command to get the value of the <code>CNAME</code> attribute. For more information, see <a href=\"http://docs.aws.amazon.com/cli/latest/reference/elasticbeanstalk/describe-environments.html\">describe-environments</a> in the <i>AWS Command Line Interface Reference</i>.</p> </li> </ul> </dd> <dt>ELB load balancer</dt> <dd> <p>Specify the DNS name that is associated with the load balancer. Get the DNS name by using the AWS Management Console, the ELB API, or the AWS CLI. </p> <ul> <li> <p> <b>AWS Management Console</b>: Go to the EC2 page, choose <b>Load Balancers</b> in the navigation pane, choose the load balancer, choose the <b>Description</b> tab, and get the value of the <b>DNS name</b> field. (If you're routing traffic to a Classic Load Balancer, get the value that begins with <b>dualstack</b>.) </p> </li> <li> <p> <b>Elastic Load Balancing API</b>: Use <code>DescribeLoadBalancers</code> to get the value of <code>DNSName</code>. For more information, see the applicable guide:</p> <ul> <li> <p>Classic Load Balancers: <a href=\"http://docs.aws.amazon.com/elasticloadbalancing/2012-06-01/APIReference/API_DescribeLoadBalancers.html\">DescribeLoadBalancers</a> </p> </li> <li> <p>Application and Network Load Balancers: <a href=\"http://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_DescribeLoadBalancers.html\">DescribeLoadBalancers</a> </p> </li> </ul> </li> <li> <p> <b>AWS CLI</b>: Use <code>describe-load-balancers</code> to get the value of <code>DNSName</code>. For more information, see the applicable guide:</p> <ul> <li> <p>Classic Load Balancers: <a href=\"http://docs.aws.amazon.com/cli/latest/reference/elb/describe-load-balancers.html\">describe-load-balancers</a> </p> </li> <li> <p>Application and Network Load Balancers: <a href=\"http://docs.aws.amazon.com/cli/latest/reference/elbv2/describe-load-balancers.html\">describe-load-balancers</a> </p> </li> </ul> </li> </ul> </dd> <dt>Amazon S3 bucket that is configured as a static website</dt> <dd> <p>Specify the domain name of the Amazon S3 website endpoint in which you created the bucket, for example, <code>s3-website-us-east-2.amazonaws.com</code>. For more information about valid values, see the table <a href=\"http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region\">Amazon Simple Storage Service (S3) Website Endpoints</a> in the <i>Amazon Web Services General Reference</i>. For more information about using S3 buckets for websites, see <a href=\"http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/getting-started.html\">Getting Started with Amazon Route 53</a> in the <i>Amazon Route 53 Developer Guide.</i> </p> </dd> <dt>Another Amazon Route 53 resource record set</dt> <dd> <p>Specify the value of the <code>Name</code> element for a resource record set in the current hosted zone.</p> </dd> </dl>",
|
||||||
"CreateHostedZoneRequest$Name": "<p>The name of the domain. For resource record types that include a domain name, specify a fully qualified domain name, for example, <i>www.example.com</i>. The trailing dot is optional; Amazon Route 53 assumes that the domain name is fully qualified. This means that Amazon Route 53 treats <i>www.example.com</i> (without a trailing dot) and <i>www.example.com.</i> (with a trailing dot) as identical.</p> <p>If you're creating a public hosted zone, this is the name you have registered with your DNS registrar. If your domain name is registered with a registrar other than Amazon Route 53, change the name servers for your domain to the set of <code>NameServers</code> that <code>CreateHostedZone</code> returns in <code>DelegationSet</code>.</p>",
|
"CreateHostedZoneRequest$Name": "<p>The name of the domain. For resource record types that include a domain name, specify a fully qualified domain name, for example, <i>www.example.com</i>. The trailing dot is optional; Amazon Route 53 assumes that the domain name is fully qualified. This means that Amazon Route 53 treats <i>www.example.com</i> (without a trailing dot) and <i>www.example.com.</i> (with a trailing dot) as identical.</p> <p>If you're creating a public hosted zone, this is the name you have registered with your DNS registrar. If your domain name is registered with a registrar other than Amazon Route 53, change the name servers for your domain to the set of <code>NameServers</code> that <code>CreateHostedZone</code> returns in <code>DelegationSet</code>.</p>",
|
||||||
"CreateTrafficPolicyInstanceRequest$Name": "<p>The domain name (such as example.com) or subdomain name (such as www.example.com) for which Amazon Route 53 responds to DNS queries by using the resource record sets that Amazon Route 53 creates for this traffic policy instance.</p>",
|
"CreateTrafficPolicyInstanceRequest$Name": "<p>The domain name (such as example.com) or subdomain name (such as www.example.com) for which Amazon Route 53 responds to DNS queries by using the resource record sets that Amazon Route 53 creates for this traffic policy instance.</p>",
|
||||||
"DelegationSetNameServers$member": null,
|
"DelegationSetNameServers$member": null,
|
||||||
@ -1426,6 +1426,18 @@
|
|||||||
"HealthCheckConfig$RequestInterval": "<p>The number of seconds between the time that Amazon Route 53 gets a response from your endpoint and the time that it sends the next health check request. Each Amazon Route 53 health checker makes requests at this interval.</p> <important> <p>You can't change the value of <code>RequestInterval</code> after you create a health check.</p> </important> <p>If you don't specify a value for <code>RequestInterval</code>, the default value is <code>30</code> seconds.</p>"
|
"HealthCheckConfig$RequestInterval": "<p>The number of seconds between the time that Amazon Route 53 gets a response from your endpoint and the time that it sends the next health check request. Each Amazon Route 53 health checker makes requests at this interval.</p> <important> <p>You can't change the value of <code>RequestInterval</code> after you create a health check.</p> </important> <p>If you don't specify a value for <code>RequestInterval</code>, the default value is <code>30</code> seconds.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"ResettableElementName": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
"ResettableElementNameList$member": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"ResettableElementNameList": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
"UpdateHealthCheckRequest$ResetElements": "<p>A complex type that contains one <code>ResetElement</code> element for each element that you want to reset to the default value. Valid values for <code>ResetElement</code> include the following:</p> <ul> <li> <p> <code>ChildHealthChecks</code>: Amazon Route 53 resets <a>HealthCheckConfig$ChildHealthChecks</a> to null.</p> </li> <li> <p> <code>FullyQualifiedDomainName</code>: Amazon Route 53 resets <a>HealthCheckConfig$FullyQualifiedDomainName</a> to null.</p> </li> <li> <p> <code>Regions</code>: Amazon Route 53 resets the <a>HealthCheckConfig$Regions</a> list to the default set of regions. </p> </li> <li> <p> <code>ResourcePath</code>: Amazon Route 53 resets <a>HealthCheckConfig$ResourcePath</a> to null.</p> </li> </ul>"
|
||||||
|
}
|
||||||
|
},
|
||||||
"ResourceDescription": {
|
"ResourceDescription": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
@ -1438,7 +1450,7 @@
|
|||||||
"ResourceId": {
|
"ResourceId": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"AliasTarget$HostedZoneId": "<p> <i>Alias resource records sets only</i>: The value used depends on where you want to route traffic:</p> <dl> <dt>CloudFront distribution</dt> <dd> <p>Specify <code>Z2FDTNDATAQYW2</code>.</p> <note> <p>Alias resource record sets for CloudFront can't be created in a private zone.</p> </note> </dd> <dt>Elastic Beanstalk environment</dt> <dd> <p>Specify the hosted zone ID for the region in which you created the environment. The environment must have a regionalized subdomain. For a list of regions and the corresponding hosted zone IDs, see <a href=\"http://docs.aws.amazon.com/general/latest/gr/rande.html#elasticbeanstalk_region\">AWS Elastic Beanstalk</a> in the \"AWS Regions and Endpoints\" chapter of the <i>Amazon Web Services General Reference</i>.</p> </dd> <dt>ELB load balancer</dt> <dd> <p>Specify the value of the hosted zone ID for the load balancer. Use the following methods to get the hosted zone ID:</p> <ul> <li> <p> <a href=\"http://docs.aws.amazon.com/general/latest/gr/rande.html#elb_region\">Elastic Load Balancing</a> table in the \"AWS Regions and Endpoints\" chapter of the <i>Amazon Web Services General Reference</i>: Use the value in the \"Amazon Route 53 Hosted Zone ID\" column that corresponds with the region that you created your load balancer in.</p> </li> <li> <p> <b>AWS Management Console</b>: Go to the Amazon EC2 page, click <b>Load Balancers</b> in the navigation pane, select the load balancer, and get the value of the <b>Hosted zone</b> field on the <b>Description</b> tab.</p> </li> <li> <p> <b>Elastic Load Balancing API</b>: Use <code>DescribeLoadBalancers</code> to get the value of <code>CanonicalHostedZoneNameId</code>. For more information, see the applicable guide:</p> <ul> <li> <p>Classic Load Balancer: <a href=\"http://docs.aws.amazon.com/elasticloadbalancing/2012-06-01/APIReference/API_DescribeLoadBalancers.html\">DescribeLoadBalancers</a> </p> </li> <li> <p>Application Load Balancer: <a href=\"http://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_DescribeLoadBalancers.html\">DescribeLoadBalancers</a> </p> </li> </ul> </li> <li> <p> <b>AWS CLI</b>: Use <code> <a href=\"http://docs.aws.amazon.com/cli/latest/reference/elb/describe-load-balancers.html\">describe-load-balancers</a> </code> to get the value of <code>CanonicalHostedZoneNameID</code>.</p> </li> </ul> </dd> <dt>An Amazon S3 bucket configured as a static website</dt> <dd> <p>Specify the hosted zone ID for the region that you created the bucket in. For more information about valid values, see the <a href=\"http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region\">Amazon Simple Storage Service Website Endpoints</a> table in the \"AWS Regions and Endpoints\" chapter of the <i>Amazon Web Services General Reference</i>.</p> </dd> <dt>Another Amazon Route 53 resource record set in your hosted zone</dt> <dd> <p>Specify the hosted zone ID of your hosted zone. (An alias resource record set can't reference a resource record set in a different hosted zone.)</p> </dd> </dl>",
|
"AliasTarget$HostedZoneId": "<p> <i>Alias resource records sets only</i>: The value used depends on where you want to route traffic:</p> <dl> <dt>CloudFront distribution</dt> <dd> <p>Specify <code>Z2FDTNDATAQYW2</code>.</p> <note> <p>Alias resource record sets for CloudFront can't be created in a private zone.</p> </note> </dd> <dt>Elastic Beanstalk environment</dt> <dd> <p>Specify the hosted zone ID for the region in which you created the environment. The environment must have a regionalized subdomain. For a list of regions and the corresponding hosted zone IDs, see <a href=\"http://docs.aws.amazon.com/general/latest/gr/rande.html#elasticbeanstalk_region\">AWS Elastic Beanstalk</a> in the \"AWS Regions and Endpoints\" chapter of the <i>Amazon Web Services General Reference</i>.</p> </dd> <dt>ELB load balancer</dt> <dd> <p>Specify the value of the hosted zone ID for the load balancer. Use the following methods to get the hosted zone ID:</p> <ul> <li> <p> <a href=\"http://docs.aws.amazon.com/general/latest/gr/rande.html#elb_region\">Elastic Load Balancing</a> table in the \"AWS Regions and Endpoints\" chapter of the <i>Amazon Web Services General Reference</i>: Use the value that corresponds with the region that you created your load balancer in. Note that there are separate columns for Application and Classic Load Balancers and for Network Load Balancers.</p> </li> <li> <p> <b>AWS Management Console</b>: Go to the Amazon EC2 page, choose <b>Load Balancers</b> in the navigation pane, select the load balancer, and get the value of the <b>Hosted zone</b> field on the <b>Description</b> tab.</p> </li> <li> <p> <b>Elastic Load Balancing API</b>: Use <code>DescribeLoadBalancers</code> to get the value of <code>CanonicalHostedZoneNameId</code>. For more information, see the applicable guide:</p> <ul> <li> <p>Classic Load Balancers: <a href=\"http://docs.aws.amazon.com/elasticloadbalancing/2012-06-01/APIReference/API_DescribeLoadBalancers.html\">DescribeLoadBalancers</a> </p> </li> <li> <p>Application and Network Load Balancers: <a href=\"http://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_DescribeLoadBalancers.html\">DescribeLoadBalancers</a> </p> </li> </ul> </li> <li> <p> <b>AWS CLI</b>: Use <code>describe-load-balancers</code> to get the value of <code>CanonicalHostedZoneNameID</code> (for Classic Load Balancers) or <code>CanonicalHostedZoneNameID</code> (for Application and Network Load Balancers). For more information, see the applicable guide:</p> <ul> <li> <p>Classic Load Balancers: <a href=\"http://docs.aws.amazon.com/cli/latest/reference/elb/describe-load-balancers.html\">describe-load-balancers</a> </p> </li> <li> <p>Application and Network Load Balancers: <a href=\"http://docs.aws.amazon.com/cli/latest/reference/elbv2/describe-load-balancers.html\">describe-load-balancers</a> </p> </li> </ul> </li> </ul> </dd> <dt>An Amazon S3 bucket configured as a static website</dt> <dd> <p>Specify the hosted zone ID for the region that you created the bucket in. For more information about valid values, see the <a href=\"http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region\">Amazon Simple Storage Service Website Endpoints</a> table in the \"AWS Regions and Endpoints\" chapter of the <i>Amazon Web Services General Reference</i>.</p> </dd> <dt>Another Amazon Route 53 resource record set in your hosted zone</dt> <dd> <p>Specify the hosted zone ID of your hosted zone. (An alias resource record set can't reference a resource record set in a different hosted zone.)</p> </dd> </dl>",
|
||||||
"AssociateVPCWithHostedZoneRequest$HostedZoneId": "<p>The ID of the private hosted zone that you want to associate an Amazon VPC with.</p> <p>Note that you can't associate a VPC with a hosted zone that doesn't have an existing VPC association.</p>",
|
"AssociateVPCWithHostedZoneRequest$HostedZoneId": "<p>The ID of the private hosted zone that you want to associate an Amazon VPC with.</p> <p>Note that you can't associate a VPC with a hosted zone that doesn't have an existing VPC association.</p>",
|
||||||
"ChangeInfo$Id": "<p>The ID of the request.</p>",
|
"ChangeInfo$Id": "<p>The ID of the request.</p>",
|
||||||
"ChangeResourceRecordSetsRequest$HostedZoneId": "<p>The ID of the hosted zone that contains the resource record sets that you want to change.</p>",
|
"ChangeResourceRecordSetsRequest$HostedZoneId": "<p>The ID of the hosted zone that contains the resource record sets that you want to change.</p>",
|
||||||
|
68
vendor/github.com/aws/aws-sdk-go/models/apis/route53domains/2014-05-15/api-2.json
generated
vendored
68
vendor/github.com/aws/aws-sdk-go/models/apis/route53domains/2014-05-15/api-2.json
generated
vendored
@ -24,6 +24,19 @@
|
|||||||
{"shape":"UnsupportedTLD"}
|
{"shape":"UnsupportedTLD"}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"CheckDomainTransferability":{
|
||||||
|
"name":"CheckDomainTransferability",
|
||||||
|
"http":{
|
||||||
|
"method":"POST",
|
||||||
|
"requestUri":"/"
|
||||||
|
},
|
||||||
|
"input":{"shape":"CheckDomainTransferabilityRequest"},
|
||||||
|
"output":{"shape":"CheckDomainTransferabilityResponse"},
|
||||||
|
"errors":[
|
||||||
|
{"shape":"InvalidInput"},
|
||||||
|
{"shape":"UnsupportedTLD"}
|
||||||
|
]
|
||||||
|
},
|
||||||
"DeleteTagsForDomain":{
|
"DeleteTagsForDomain":{
|
||||||
"name":"DeleteTagsForDomain",
|
"name":"DeleteTagsForDomain",
|
||||||
"http":{
|
"http":{
|
||||||
@ -374,6 +387,21 @@
|
|||||||
"Availability":{"shape":"DomainAvailability"}
|
"Availability":{"shape":"DomainAvailability"}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"CheckDomainTransferabilityRequest":{
|
||||||
|
"type":"structure",
|
||||||
|
"required":["DomainName"],
|
||||||
|
"members":{
|
||||||
|
"DomainName":{"shape":"DomainName"},
|
||||||
|
"AuthCode":{"shape":"DomainAuthCode"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"CheckDomainTransferabilityResponse":{
|
||||||
|
"type":"structure",
|
||||||
|
"required":["Transferability"],
|
||||||
|
"members":{
|
||||||
|
"Transferability":{"shape":"DomainTransferability"}
|
||||||
|
}
|
||||||
|
},
|
||||||
"City":{
|
"City":{
|
||||||
"type":"string",
|
"type":"string",
|
||||||
"max":255
|
"max":255
|
||||||
@ -721,8 +749,7 @@
|
|||||||
},
|
},
|
||||||
"DomainName":{
|
"DomainName":{
|
||||||
"type":"string",
|
"type":"string",
|
||||||
"max":255,
|
"max":255
|
||||||
"pattern":"[a-zA-Z0-9_\\-.]*"
|
|
||||||
},
|
},
|
||||||
"DomainStatus":{"type":"string"},
|
"DomainStatus":{"type":"string"},
|
||||||
"DomainStatusList":{
|
"DomainStatusList":{
|
||||||
@ -754,6 +781,12 @@
|
|||||||
"type":"list",
|
"type":"list",
|
||||||
"member":{"shape":"DomainSummary"}
|
"member":{"shape":"DomainSummary"}
|
||||||
},
|
},
|
||||||
|
"DomainTransferability":{
|
||||||
|
"type":"structure",
|
||||||
|
"members":{
|
||||||
|
"Transferable":{"shape":"Transferable"}
|
||||||
|
}
|
||||||
|
},
|
||||||
"DuplicateRequest":{
|
"DuplicateRequest":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
"members":{
|
"members":{
|
||||||
@ -831,11 +864,16 @@
|
|||||||
"ES_LEGAL_FORM",
|
"ES_LEGAL_FORM",
|
||||||
"FI_BUSINESS_NUMBER",
|
"FI_BUSINESS_NUMBER",
|
||||||
"FI_ID_NUMBER",
|
"FI_ID_NUMBER",
|
||||||
|
"FI_NATIONALITY",
|
||||||
|
"FI_ORGANIZATION_TYPE",
|
||||||
"IT_PIN",
|
"IT_PIN",
|
||||||
|
"IT_REGISTRANT_ENTITY_TYPE",
|
||||||
"RU_PASSPORT_DATA",
|
"RU_PASSPORT_DATA",
|
||||||
"SE_ID_NUMBER",
|
"SE_ID_NUMBER",
|
||||||
"SG_ID_NUMBER",
|
"SG_ID_NUMBER",
|
||||||
"VAT_NUMBER"
|
"VAT_NUMBER",
|
||||||
|
"UK_CONTACT_TYPE",
|
||||||
|
"UK_COMPANY_NUMBER"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"ExtraParamValue":{
|
"ExtraParamValue":{
|
||||||
@ -1064,7 +1102,16 @@
|
|||||||
"UPDATE_DOMAIN_CONTACT",
|
"UPDATE_DOMAIN_CONTACT",
|
||||||
"UPDATE_NAMESERVER",
|
"UPDATE_NAMESERVER",
|
||||||
"CHANGE_PRIVACY_PROTECTION",
|
"CHANGE_PRIVACY_PROTECTION",
|
||||||
"DOMAIN_LOCK"
|
"DOMAIN_LOCK",
|
||||||
|
"ENABLE_AUTORENEW",
|
||||||
|
"DISABLE_AUTORENEW",
|
||||||
|
"ADD_DNSSEC",
|
||||||
|
"REMOVE_DNSSEC",
|
||||||
|
"EXPIRE_DOMAIN",
|
||||||
|
"TRANSFER_OUT_DOMAIN",
|
||||||
|
"CHANGE_DOMAIN_OWNER",
|
||||||
|
"RENEW_DOMAIN",
|
||||||
|
"PUSH_DOMAIN"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"PageMarker":{
|
"PageMarker":{
|
||||||
@ -1226,6 +1273,14 @@
|
|||||||
"OperationId":{"shape":"OperationId"}
|
"OperationId":{"shape":"OperationId"}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"Transferable":{
|
||||||
|
"type":"string",
|
||||||
|
"enum":[
|
||||||
|
"TRANSFERABLE",
|
||||||
|
"UNTRANSFERABLE",
|
||||||
|
"DONT_KNOW"
|
||||||
|
]
|
||||||
|
},
|
||||||
"UnsupportedTLD":{
|
"UnsupportedTLD":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
"members":{
|
"members":{
|
||||||
@ -1275,7 +1330,10 @@
|
|||||||
],
|
],
|
||||||
"members":{
|
"members":{
|
||||||
"DomainName":{"shape":"DomainName"},
|
"DomainName":{"shape":"DomainName"},
|
||||||
"FIAuthKey":{"shape":"FIAuthKey"},
|
"FIAuthKey":{
|
||||||
|
"shape":"FIAuthKey",
|
||||||
|
"deprecated":true
|
||||||
|
},
|
||||||
"Nameservers":{"shape":"NameserverList"}
|
"Nameservers":{"shape":"NameserverList"}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
41
vendor/github.com/aws/aws-sdk-go/models/apis/route53domains/2014-05-15/docs-2.json
generated
vendored
41
vendor/github.com/aws/aws-sdk-go/models/apis/route53domains/2014-05-15/docs-2.json
generated
vendored
@ -3,7 +3,8 @@
|
|||||||
"service": "<p>Amazon Route 53 API actions let you register domain names and perform related operations.</p>",
|
"service": "<p>Amazon Route 53 API actions let you register domain names and perform related operations.</p>",
|
||||||
"operations": {
|
"operations": {
|
||||||
"CheckDomainAvailability": "<p>This operation checks the availability of one domain name. Note that if the availability status of a domain is pending, you must submit another request to determine the availability of the domain name.</p>",
|
"CheckDomainAvailability": "<p>This operation checks the availability of one domain name. Note that if the availability status of a domain is pending, you must submit another request to determine the availability of the domain name.</p>",
|
||||||
"DeleteTagsForDomain": "<p>This operation deletes the specified tags for a domain.</p> <p>All tag operations are eventually consistent; subsequent operations may not immediately represent all issued operations.</p>",
|
"CheckDomainTransferability": "<p>Checks whether a domain name can be transferred to Amazon Route 53. </p>",
|
||||||
|
"DeleteTagsForDomain": "<p>This operation deletes the specified tags for a domain.</p> <p>All tag operations are eventually consistent; subsequent operations might not immediately represent all issued operations.</p>",
|
||||||
"DisableDomainAutoRenew": "<p>This operation disables automatic renewal of domain registration for the specified domain.</p>",
|
"DisableDomainAutoRenew": "<p>This operation disables automatic renewal of domain registration for the specified domain.</p>",
|
||||||
"DisableDomainTransferLock": "<p>This operation removes the transfer lock on the domain (specifically the <code>clientTransferProhibited</code> status) to allow domain transfers. We recommend you refrain from performing this action unless you intend to transfer the domain to a different registrar. Successful submission returns an operation ID that you can use to track the progress and completion of the action. If the request is not completed successfully, the domain registrant will be notified by email.</p>",
|
"DisableDomainTransferLock": "<p>This operation removes the transfer lock on the domain (specifically the <code>clientTransferProhibited</code> status) to allow domain transfers. We recommend you refrain from performing this action unless you intend to transfer the domain to a different registrar. Successful submission returns an operation ID that you can use to track the progress and completion of the action. If the request is not completed successfully, the domain registrant will be notified by email.</p>",
|
||||||
"EnableDomainAutoRenew": "<p>This operation configures Amazon Route 53 to automatically renew the specified domain before the domain registration expires. The cost of renewing your domain registration is billed to your AWS account.</p> <p>The period during which you can renew a domain name varies by TLD. For a list of TLDs and their renewal policies, see <a href=\"http://wiki.gandi.net/en/domains/renew#renewal_restoration_and_deletion_times\">\"Renewal, restoration, and deletion times\"</a> on the website for our registrar partner, Gandi. Route 53 requires that you renew before the end of the renewal period that is listed on the Gandi website so we can complete processing before the deadline.</p>",
|
"EnableDomainAutoRenew": "<p>This operation configures Amazon Route 53 to automatically renew the specified domain before the domain registration expires. The cost of renewing your domain registration is billed to your AWS account.</p> <p>The period during which you can renew a domain name varies by TLD. For a list of TLDs and their renewal policies, see <a href=\"http://wiki.gandi.net/en/domains/renew#renewal_restoration_and_deletion_times\">\"Renewal, restoration, and deletion times\"</a> on the website for our registrar partner, Gandi. Route 53 requires that you renew before the end of the renewal period that is listed on the Gandi website so we can complete processing before the deadline.</p>",
|
||||||
@ -14,7 +15,7 @@
|
|||||||
"GetOperationDetail": "<p>This operation returns the current status of an operation that is not completed.</p>",
|
"GetOperationDetail": "<p>This operation returns the current status of an operation that is not completed.</p>",
|
||||||
"ListDomains": "<p>This operation returns all the domain names registered with Amazon Route 53 for the current AWS account.</p>",
|
"ListDomains": "<p>This operation returns all the domain names registered with Amazon Route 53 for the current AWS account.</p>",
|
||||||
"ListOperations": "<p>This operation returns the operation IDs of operations that are not yet complete.</p>",
|
"ListOperations": "<p>This operation returns the operation IDs of operations that are not yet complete.</p>",
|
||||||
"ListTagsForDomain": "<p>This operation returns all of the tags that are associated with the specified domain.</p> <p>All tag operations are eventually consistent; subsequent operations may not immediately represent all issued operations.</p>",
|
"ListTagsForDomain": "<p>This operation returns all of the tags that are associated with the specified domain.</p> <p>All tag operations are eventually consistent; subsequent operations might not immediately represent all issued operations.</p>",
|
||||||
"RegisterDomain": "<p>This operation registers a domain. Domains are registered by the AWS registrar partner, Gandi. For some top-level domains (TLDs), this operation requires extra parameters.</p> <p>When you register a domain, Amazon Route 53 does the following:</p> <ul> <li> <p>Creates a Amazon Route 53 hosted zone that has the same name as the domain. Amazon Route 53 assigns four name servers to your hosted zone and automatically updates your domain registration with the names of these name servers.</p> </li> <li> <p>Enables autorenew, so your domain registration will renew automatically each year. We'll notify you in advance of the renewal date so you can choose whether to renew the registration.</p> </li> <li> <p>Optionally enables privacy protection, so WHOIS queries return contact information for our registrar partner, Gandi, instead of the information you entered for registrant, admin, and tech contacts.</p> </li> <li> <p>If registration is successful, returns an operation ID that you can use to track the progress and completion of the action. If the request is not completed successfully, the domain registrant is notified by email.</p> </li> <li> <p>Charges your AWS account an amount based on the top-level domain. For more information, see <a href=\"http://aws.amazon.com/route53/pricing/\">Amazon Route 53 Pricing</a>.</p> </li> </ul>",
|
"RegisterDomain": "<p>This operation registers a domain. Domains are registered by the AWS registrar partner, Gandi. For some top-level domains (TLDs), this operation requires extra parameters.</p> <p>When you register a domain, Amazon Route 53 does the following:</p> <ul> <li> <p>Creates a Amazon Route 53 hosted zone that has the same name as the domain. Amazon Route 53 assigns four name servers to your hosted zone and automatically updates your domain registration with the names of these name servers.</p> </li> <li> <p>Enables autorenew, so your domain registration will renew automatically each year. We'll notify you in advance of the renewal date so you can choose whether to renew the registration.</p> </li> <li> <p>Optionally enables privacy protection, so WHOIS queries return contact information for our registrar partner, Gandi, instead of the information you entered for registrant, admin, and tech contacts.</p> </li> <li> <p>If registration is successful, returns an operation ID that you can use to track the progress and completion of the action. If the request is not completed successfully, the domain registrant is notified by email.</p> </li> <li> <p>Charges your AWS account an amount based on the top-level domain. For more information, see <a href=\"http://aws.amazon.com/route53/pricing/\">Amazon Route 53 Pricing</a>.</p> </li> </ul>",
|
||||||
"RenewDomain": "<p>This operation renews a domain for the specified number of years. The cost of renewing your domain is billed to your AWS account.</p> <p>We recommend that you renew your domain several weeks before the expiration date. Some TLD registries delete domains before the expiration date if you haven't renewed far enough in advance. For more information about renewing domain registration, see <a href=\"http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/domain-renew.html\">Renewing Registration for a Domain</a> in the Amazon Route 53 Developer Guide.</p>",
|
"RenewDomain": "<p>This operation renews a domain for the specified number of years. The cost of renewing your domain is billed to your AWS account.</p> <p>We recommend that you renew your domain several weeks before the expiration date. Some TLD registries delete domains before the expiration date if you haven't renewed far enough in advance. For more information about renewing domain registration, see <a href=\"http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/domain-renew.html\">Renewing Registration for a Domain</a> in the Amazon Route 53 Developer Guide.</p>",
|
||||||
"ResendContactReachabilityEmail": "<p>For operations that require confirmation that the email address for the registrant contact is valid, such as registering a new domain, this operation resends the confirmation email to the current email address for the registrant contact.</p>",
|
"ResendContactReachabilityEmail": "<p>For operations that require confirmation that the email address for the registrant contact is valid, such as registering a new domain, this operation resends the confirmation email to the current email address for the registrant contact.</p>",
|
||||||
@ -23,7 +24,7 @@
|
|||||||
"UpdateDomainContact": "<p>This operation updates the contact information for a particular domain. Information for at least one contact (registrant, administrator, or technical) must be supplied for update.</p> <p>If the update is successful, this method returns an operation ID that you can use to track the progress and completion of the action. If the request is not completed successfully, the domain registrant will be notified by email.</p>",
|
"UpdateDomainContact": "<p>This operation updates the contact information for a particular domain. Information for at least one contact (registrant, administrator, or technical) must be supplied for update.</p> <p>If the update is successful, this method returns an operation ID that you can use to track the progress and completion of the action. If the request is not completed successfully, the domain registrant will be notified by email.</p>",
|
||||||
"UpdateDomainContactPrivacy": "<p>This operation updates the specified domain contact's privacy setting. When the privacy option is enabled, personal information such as postal or email address is hidden from the results of a public WHOIS query. The privacy services are provided by the AWS registrar, Gandi. For more information, see the <a href=\"http://www.gandi.net/domain/whois/?currency=USD&amp;lang=en\">Gandi privacy features</a>.</p> <p>This operation only affects the privacy of the specified contact type (registrant, administrator, or tech). Successful acceptance returns an operation ID that you can use with <a>GetOperationDetail</a> to track the progress and completion of the action. If the request is not completed successfully, the domain registrant will be notified by email.</p>",
|
"UpdateDomainContactPrivacy": "<p>This operation updates the specified domain contact's privacy setting. When the privacy option is enabled, personal information such as postal or email address is hidden from the results of a public WHOIS query. The privacy services are provided by the AWS registrar, Gandi. For more information, see the <a href=\"http://www.gandi.net/domain/whois/?currency=USD&amp;lang=en\">Gandi privacy features</a>.</p> <p>This operation only affects the privacy of the specified contact type (registrant, administrator, or tech). Successful acceptance returns an operation ID that you can use with <a>GetOperationDetail</a> to track the progress and completion of the action. If the request is not completed successfully, the domain registrant will be notified by email.</p>",
|
||||||
"UpdateDomainNameservers": "<p>This operation replaces the current set of name servers for the domain with the specified set of name servers. If you use Amazon Route 53 as your DNS service, specify the four name servers in the delegation set for the hosted zone for the domain.</p> <p>If successful, this operation returns an operation ID that you can use to track the progress and completion of the action. If the request is not completed successfully, the domain registrant will be notified by email.</p>",
|
"UpdateDomainNameservers": "<p>This operation replaces the current set of name servers for the domain with the specified set of name servers. If you use Amazon Route 53 as your DNS service, specify the four name servers in the delegation set for the hosted zone for the domain.</p> <p>If successful, this operation returns an operation ID that you can use to track the progress and completion of the action. If the request is not completed successfully, the domain registrant will be notified by email.</p>",
|
||||||
"UpdateTagsForDomain": "<p>This operation adds or updates tags for a specified domain.</p> <p>All tag operations are eventually consistent; subsequent operations may not immediately represent all issued operations.</p>",
|
"UpdateTagsForDomain": "<p>This operation adds or updates tags for a specified domain.</p> <p>All tag operations are eventually consistent; subsequent operations might not immediately represent all issued operations.</p>",
|
||||||
"ViewBilling": "<p>Returns all the domain-related billing records for the current AWS account for a specified period</p>"
|
"ViewBilling": "<p>Returns all the domain-related billing records for the current AWS account for a specified period</p>"
|
||||||
},
|
},
|
||||||
"shapes": {
|
"shapes": {
|
||||||
@ -80,6 +81,16 @@
|
|||||||
"refs": {
|
"refs": {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"CheckDomainTransferabilityRequest": {
|
||||||
|
"base": "<p>The CheckDomainTransferability request contains the following elements.</p>",
|
||||||
|
"refs": {
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"CheckDomainTransferabilityResponse": {
|
||||||
|
"base": "<p>The CheckDomainTransferability response includes the following elements.</p>",
|
||||||
|
"refs": {
|
||||||
|
}
|
||||||
|
},
|
||||||
"City": {
|
"City": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
@ -176,6 +187,7 @@
|
|||||||
"DomainAuthCode": {
|
"DomainAuthCode": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
|
"CheckDomainTransferabilityRequest$AuthCode": "<p>If the registrar for the top-level domain (TLD) requires an authorization code to transfer the domain, the code that you got from the current registrar for the domain.</p>",
|
||||||
"RetrieveDomainAuthCodeResponse$AuthCode": "<p>The authorization code for the domain.</p>",
|
"RetrieveDomainAuthCodeResponse$AuthCode": "<p>The authorization code for the domain.</p>",
|
||||||
"TransferDomainRequest$AuthCode": "<p>The authorization code for the domain. You get this value from the current registrar.</p>"
|
"TransferDomainRequest$AuthCode": "<p>The authorization code for the domain. You get this value from the current registrar.</p>"
|
||||||
}
|
}
|
||||||
@ -183,7 +195,7 @@
|
|||||||
"DomainAvailability": {
|
"DomainAvailability": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"CheckDomainAvailabilityResponse$Availability": "<p>Whether the domain name is available for registering.</p> <note> <p>You can only register domains designated as <code>AVAILABLE</code>.</p> </note> <p>Valid values:</p> <dl> <dt>AVAILABLE</dt> <dd> <p>The domain name is available.</p> </dd> <dt>AVAILABLE_RESERVED</dt> <dd> <p>The domain name is reserved under specific conditions.</p> </dd> <dt>AVAILABLE_PREORDER</dt> <dd> <p>The domain name is available and can be preordered.</p> </dd> <dt>DONT_KNOW</dt> <dd> <p>The TLD registry didn't reply with a definitive answer about whether the domain name is available. Amazon Route 53 can return this response for a variety of reasons, for example, the registry is performing maintenance. Try again later.</p> </dd> <dt>PENDING</dt> <dd> <p>The TLD registry didn't return a response in the expected amount of time. When the response is delayed, it usually takes just a few extra seconds. You can resubmit the request immediately.</p> </dd> <dt>RESERVED</dt> <dd> <p>The domain name has been reserved for another person or organization.</p> </dd> <dt>UNAVAILABLE</dt> <dd> <p>The domain name is not available.</p> </dd> <dt>UNAVAILABLE_PREMIUM</dt> <dd> <p>The domain name is not available.</p> </dd> <dt>UNAVAILABLE_RESTRICTED</dt> <dd> <p>The domain name is forbidden.</p> </dd> </dl>"
|
"CheckDomainAvailabilityResponse$Availability": "<p>Whether the domain name is available for registering.</p> <note> <p>You can register only domains designated as <code>AVAILABLE</code>.</p> </note> <p>Valid values:</p> <dl> <dt>AVAILABLE</dt> <dd> <p>The domain name is available.</p> </dd> <dt>AVAILABLE_RESERVED</dt> <dd> <p>The domain name is reserved under specific conditions.</p> </dd> <dt>AVAILABLE_PREORDER</dt> <dd> <p>The domain name is available and can be preordered.</p> </dd> <dt>DONT_KNOW</dt> <dd> <p>The TLD registry didn't reply with a definitive answer about whether the domain name is available. Amazon Route 53 can return this response for a variety of reasons, for example, the registry is performing maintenance. Try again later.</p> </dd> <dt>PENDING</dt> <dd> <p>The TLD registry didn't return a response in the expected amount of time. When the response is delayed, it usually takes just a few extra seconds. You can resubmit the request immediately.</p> </dd> <dt>RESERVED</dt> <dd> <p>The domain name has been reserved for another person or organization.</p> </dd> <dt>UNAVAILABLE</dt> <dd> <p>The domain name is not available.</p> </dd> <dt>UNAVAILABLE_PREMIUM</dt> <dd> <p>The domain name is not available.</p> </dd> <dt>UNAVAILABLE_RESTRICTED</dt> <dd> <p>The domain name is forbidden.</p> </dd> </dl>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"DomainLimitExceeded": {
|
"DomainLimitExceeded": {
|
||||||
@ -196,6 +208,7 @@
|
|||||||
"refs": {
|
"refs": {
|
||||||
"BillingRecord$DomainName": "<p>The name of the domain that the billing record applies to. If the domain name contains characters other than a-z, 0-9, and - (hyphen), such as an internationalized domain name, then this value is in Punycode. For more information, see <a href=\"http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DomainNameFormat.html\">DNS Domain Name Format</a> in the <i>Amazon Route 53 Developer Guidezzz</i>.</p>",
|
"BillingRecord$DomainName": "<p>The name of the domain that the billing record applies to. If the domain name contains characters other than a-z, 0-9, and - (hyphen), such as an internationalized domain name, then this value is in Punycode. For more information, see <a href=\"http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DomainNameFormat.html\">DNS Domain Name Format</a> in the <i>Amazon Route 53 Developer Guidezzz</i>.</p>",
|
||||||
"CheckDomainAvailabilityRequest$DomainName": "<p>The name of the domain that you want to get availability for.</p> <p>Constraints: The domain name can contain only the letters a through z, the numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not supported.</p>",
|
"CheckDomainAvailabilityRequest$DomainName": "<p>The name of the domain that you want to get availability for.</p> <p>Constraints: The domain name can contain only the letters a through z, the numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not supported.</p>",
|
||||||
|
"CheckDomainTransferabilityRequest$DomainName": "<p>The name of the domain that you want to transfer to Amazon Route 53.</p> <p>Constraints: The domain name can contain only the letters a through z, the numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not supported.</p>",
|
||||||
"DeleteTagsForDomainRequest$DomainName": "<p>The domain for which you want to delete one or more tags.</p>",
|
"DeleteTagsForDomainRequest$DomainName": "<p>The domain for which you want to delete one or more tags.</p>",
|
||||||
"DisableDomainAutoRenewRequest$DomainName": "<p>The name of the domain that you want to disable automatic renewal for.</p>",
|
"DisableDomainAutoRenewRequest$DomainName": "<p>The name of the domain that you want to disable automatic renewal for.</p>",
|
||||||
"DisableDomainTransferLockRequest$DomainName": "<p>The name of the domain that you want to remove the transfer lock for.</p>",
|
"DisableDomainTransferLockRequest$DomainName": "<p>The name of the domain that you want to remove the transfer lock for.</p>",
|
||||||
@ -258,6 +271,12 @@
|
|||||||
"ListDomainsResponse$Domains": "<p>A summary of domains.</p>"
|
"ListDomainsResponse$Domains": "<p>A summary of domains.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"DomainTransferability": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
"CheckDomainTransferabilityResponse$Transferability": "<p>A complex type that contains information about whether the specified domain can be transferred to Amazon Route 53.</p>"
|
||||||
|
}
|
||||||
|
},
|
||||||
"DuplicateRequest": {
|
"DuplicateRequest": {
|
||||||
"base": "<p>The request is already in progress for the domain.</p>",
|
"base": "<p>The request is already in progress for the domain.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
@ -305,10 +324,10 @@
|
|||||||
"DomainLimitExceeded$message": "<p>The number of domains has exceeded the allowed threshold for the account.</p>",
|
"DomainLimitExceeded$message": "<p>The number of domains has exceeded the allowed threshold for the account.</p>",
|
||||||
"DuplicateRequest$message": "<p>The request is already in progress for the domain.</p>",
|
"DuplicateRequest$message": "<p>The request is already in progress for the domain.</p>",
|
||||||
"GetOperationDetailResponse$Message": "<p>Detailed information on the status including possible errors.</p>",
|
"GetOperationDetailResponse$Message": "<p>Detailed information on the status including possible errors.</p>",
|
||||||
"InvalidInput$message": "<p>The requested item is not acceptable. For example, for an OperationId it may refer to the ID of an operation that is already completed. For a domain name, it may not be a valid domain name or belong to the requester account.</p>",
|
"InvalidInput$message": "<p>The requested item is not acceptable. For example, for an OperationId it might refer to the ID of an operation that is already completed. For a domain name, it might not be a valid domain name or belong to the requester account.</p>",
|
||||||
"OperationLimitExceeded$message": "<p>The number of operations or jobs running exceeded the allowed threshold for the account.</p>",
|
"OperationLimitExceeded$message": "<p>The number of operations or jobs running exceeded the allowed threshold for the account.</p>",
|
||||||
"TLDRulesViolation$message": "<p>The top-level domain does not support this operation.</p>",
|
"TLDRulesViolation$message": "<p>The top-level domain does not support this operation.</p>",
|
||||||
"UnsupportedTLD$message": "<p>Amazon Route 53 does not support this top-level domain.</p>"
|
"UnsupportedTLD$message": "<p>Amazon Route 53 does not support this top-level domain (TLD).</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"ExtraParam": {
|
"ExtraParam": {
|
||||||
@ -406,7 +425,7 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"InvalidInput": {
|
"InvalidInput": {
|
||||||
"base": "<p>The requested item is not acceptable. For example, for an OperationId it may refer to the ID of an operation that is already completed. For a domain name, it may not be a valid domain name or belong to the requester account.</p>",
|
"base": "<p>The requested item is not acceptable. For example, for an OperationId it might refer to the ID of an operation that is already completed. For a domain name, it might not be a valid domain name or belong to the requester account.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -690,8 +709,14 @@
|
|||||||
"refs": {
|
"refs": {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"Transferable": {
|
||||||
|
"base": "<p>Whether the domain name can be transferred to Amazon Route 53.</p> <note> <p>You can transfer only domains that have a value of <code>TRANSFERABLE</code> for <code>Transferable</code>.</p> </note> <p>Valid values:</p> <dl> <dt>TRANSFERABLE</dt> <dd> <p>The domain name can be transferred to Amazon Route 53.</p> </dd> <dt>UNTRANSFERRABLE</dt> <dd> <p>The domain name can't be transferred to Amazon Route 53.</p> </dd> <dt>DONT_KNOW</dt> <dd> <p>Reserved for future use.</p> </dd> </dl>",
|
||||||
|
"refs": {
|
||||||
|
"DomainTransferability$Transferable": null
|
||||||
|
}
|
||||||
|
},
|
||||||
"UnsupportedTLD": {
|
"UnsupportedTLD": {
|
||||||
"base": "<p>Amazon Route 53 does not support this top-level domain.</p>",
|
"base": "<p>Amazon Route 53 does not support this top-level domain (TLD).</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
19
vendor/github.com/aws/aws-sdk-go/models/apis/runtime.lex/2016-11-28/api-2.json
generated
vendored
19
vendor/github.com/aws/aws-sdk-go/models/apis/runtime.lex/2016-11-28/api-2.json
generated
vendored
@ -56,6 +56,10 @@
|
|||||||
},
|
},
|
||||||
"shapes":{
|
"shapes":{
|
||||||
"Accept":{"type":"string"},
|
"Accept":{"type":"string"},
|
||||||
|
"AttributesString":{
|
||||||
|
"type":"string",
|
||||||
|
"sensitive":true
|
||||||
|
},
|
||||||
"BadGatewayException":{
|
"BadGatewayException":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
"members":{
|
"members":{
|
||||||
@ -215,11 +219,17 @@
|
|||||||
"locationName":"userId"
|
"locationName":"userId"
|
||||||
},
|
},
|
||||||
"sessionAttributes":{
|
"sessionAttributes":{
|
||||||
"shape":"String",
|
"shape":"AttributesString",
|
||||||
"jsonvalue":true,
|
"jsonvalue":true,
|
||||||
"location":"header",
|
"location":"header",
|
||||||
"locationName":"x-amz-lex-session-attributes"
|
"locationName":"x-amz-lex-session-attributes"
|
||||||
},
|
},
|
||||||
|
"requestAttributes":{
|
||||||
|
"shape":"AttributesString",
|
||||||
|
"jsonvalue":true,
|
||||||
|
"location":"header",
|
||||||
|
"locationName":"x-amz-lex-request-attributes"
|
||||||
|
},
|
||||||
"contentType":{
|
"contentType":{
|
||||||
"shape":"HttpContentType",
|
"shape":"HttpContentType",
|
||||||
"location":"header",
|
"location":"header",
|
||||||
@ -308,6 +318,7 @@
|
|||||||
"locationName":"userId"
|
"locationName":"userId"
|
||||||
},
|
},
|
||||||
"sessionAttributes":{"shape":"StringMap"},
|
"sessionAttributes":{"shape":"StringMap"},
|
||||||
|
"requestAttributes":{"shape":"StringMap"},
|
||||||
"inputText":{"shape":"Text"}
|
"inputText":{"shape":"Text"}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -343,7 +354,8 @@
|
|||||||
"StringMap":{
|
"StringMap":{
|
||||||
"type":"map",
|
"type":"map",
|
||||||
"key":{"shape":"String"},
|
"key":{"shape":"String"},
|
||||||
"value":{"shape":"String"}
|
"value":{"shape":"String"},
|
||||||
|
"sensitive":true
|
||||||
},
|
},
|
||||||
"StringUrlWithLength":{
|
"StringUrlWithLength":{
|
||||||
"type":"string",
|
"type":"string",
|
||||||
@ -358,7 +370,8 @@
|
|||||||
"Text":{
|
"Text":{
|
||||||
"type":"string",
|
"type":"string",
|
||||||
"max":1024,
|
"max":1024,
|
||||||
"min":1
|
"min":1,
|
||||||
|
"sensitive":true
|
||||||
},
|
},
|
||||||
"UnsupportedMediaTypeException":{
|
"UnsupportedMediaTypeException":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
|
37
vendor/github.com/aws/aws-sdk-go/models/apis/runtime.lex/2016-11-28/docs-2.json
generated
vendored
37
vendor/github.com/aws/aws-sdk-go/models/apis/runtime.lex/2016-11-28/docs-2.json
generated
vendored
@ -2,7 +2,7 @@
|
|||||||
"version": "2.0",
|
"version": "2.0",
|
||||||
"service": "<p>Amazon Lex provides both build and runtime endpoints. Each endpoint provides a set of operations (API). Your conversational bot uses the runtime API to understand user utterances (user input text or voice). For example, suppose a user says \"I want pizza\", your bot sends this input to Amazon Lex using the runtime API. Amazon Lex recognizes that the user request is for the OrderPizza intent (one of the intents defined in the bot). Then Amazon Lex engages in user conversation on behalf of the bot to elicit required information (slot values, such as pizza size and crust type), and then performs fulfillment activity (that you configured when you created the bot). You use the build-time API to create and manage your Amazon Lex bot. For a list of build-time operations, see the build-time API, . </p>",
|
"service": "<p>Amazon Lex provides both build and runtime endpoints. Each endpoint provides a set of operations (API). Your conversational bot uses the runtime API to understand user utterances (user input text or voice). For example, suppose a user says \"I want pizza\", your bot sends this input to Amazon Lex using the runtime API. Amazon Lex recognizes that the user request is for the OrderPizza intent (one of the intents defined in the bot). Then Amazon Lex engages in user conversation on behalf of the bot to elicit required information (slot values, such as pizza size and crust type), and then performs fulfillment activity (that you configured when you created the bot). You use the build-time API to create and manage your Amazon Lex bot. For a list of build-time operations, see the build-time API, . </p>",
|
||||||
"operations": {
|
"operations": {
|
||||||
"PostContent": "<p> Sends user input (text or speech) to Amazon Lex. Clients use this API to send requests to Amazon Lex at runtime. Amazon Lex interprets the user input using the machine learning model that it built for the bot. </p> <p> In response, Amazon Lex returns the next message to convey to the user. Consider the following example messages: </p> <ul> <li> <p> For a user input \"I would like a pizza,\" Amazon Lex might return a response with a message eliciting slot data (for example, <code>PizzaSize</code>): \"What size pizza would you like?\". </p> </li> <li> <p> After the user provides all of the pizza order information, Amazon Lex might return a response with a message to get user confirmation: \"Order the pizza?\". </p> </li> <li> <p> After the user replies \"Yes\" to the confirmation prompt, Amazon Lex might return a conclusion statement: \"Thank you, your cheese pizza has been ordered.\". </p> </li> </ul> <p> Not all Amazon Lex messages require a response from the user. For example, conclusion statements do not require a response. Some messages require only a yes or no response. In addition to the <code>message</code>, Amazon Lex provides additional context about the message in the response that you can use to enhance client behavior, such as displaying the appropriate client user interface. Consider the following examples: </p> <ul> <li> <p> If the message is to elicit slot data, Amazon Lex returns the following context information: </p> <ul> <li> <p> <code>x-amz-lex-dialog-state</code> header set to <code>ElicitSlot</code> </p> </li> <li> <p> <code>x-amz-lex-intent-name</code> header set to the intent name in the current context </p> </li> <li> <p> <code>x-amz-lex-slot-to-elicit</code> header set to the slot name for which the <code>message</code> is eliciting information </p> </li> <li> <p> <code>x-amz-lex-slots</code> header set to a map of slots configured for the intent with their current values </p> </li> </ul> </li> <li> <p> If the message is a confirmation prompt, the <code>x-amz-lex-dialog-state</code> header is set to <code>Confirmation</code> and the <code>x-amz-lex-slot-to-elicit</code> header is omitted. </p> </li> <li> <p> If the message is a clarification prompt configured for the intent, indicating that the user intent is not understood, the <code>x-amz-dialog-state</code> header is set to <code>ElicitIntent</code> and the <code>x-amz-slot-to-elicit</code> header is omitted. </p> </li> </ul> <p> In addition, Amazon Lex also returns your application-specific <code>sessionAttributes</code>. For more information, see <a href=\"http://docs.aws.amazon.com/lex/latest/dg/context-mgmt.html\">Managing Conversation Context</a>. </p>",
|
"PostContent": "<p> Sends user input (text or speech) to Amazon Lex. Clients use this API to send text and audio requests to Amazon Lex at runtime. Amazon Lex interprets the user input using the machine learning model that it built for the bot. </p> <p>The <code>PostContent</code> operation supports audio input at 8kHz and 16kHz. You can use 8kHz audio to achieve higher speech recognition accuracy in telephone audio applications. </p> <p> In response, Amazon Lex returns the next message to convey to the user. Consider the following example messages: </p> <ul> <li> <p> For a user input \"I would like a pizza,\" Amazon Lex might return a response with a message eliciting slot data (for example, <code>PizzaSize</code>): \"What size pizza would you like?\". </p> </li> <li> <p> After the user provides all of the pizza order information, Amazon Lex might return a response with a message to get user confirmation: \"Order the pizza?\". </p> </li> <li> <p> After the user replies \"Yes\" to the confirmation prompt, Amazon Lex might return a conclusion statement: \"Thank you, your cheese pizza has been ordered.\". </p> </li> </ul> <p> Not all Amazon Lex messages require a response from the user. For example, conclusion statements do not require a response. Some messages require only a yes or no response. In addition to the <code>message</code>, Amazon Lex provides additional context about the message in the response that you can use to enhance client behavior, such as displaying the appropriate client user interface. Consider the following examples: </p> <ul> <li> <p> If the message is to elicit slot data, Amazon Lex returns the following context information: </p> <ul> <li> <p> <code>x-amz-lex-dialog-state</code> header set to <code>ElicitSlot</code> </p> </li> <li> <p> <code>x-amz-lex-intent-name</code> header set to the intent name in the current context </p> </li> <li> <p> <code>x-amz-lex-slot-to-elicit</code> header set to the slot name for which the <code>message</code> is eliciting information </p> </li> <li> <p> <code>x-amz-lex-slots</code> header set to a map of slots configured for the intent with their current values </p> </li> </ul> </li> <li> <p> If the message is a confirmation prompt, the <code>x-amz-lex-dialog-state</code> header is set to <code>Confirmation</code> and the <code>x-amz-lex-slot-to-elicit</code> header is omitted. </p> </li> <li> <p> If the message is a clarification prompt configured for the intent, indicating that the user intent is not understood, the <code>x-amz-dialog-state</code> header is set to <code>ElicitIntent</code> and the <code>x-amz-slot-to-elicit</code> header is omitted. </p> </li> </ul> <p> In addition, Amazon Lex also returns your application-specific <code>sessionAttributes</code>. For more information, see <a href=\"http://docs.aws.amazon.com/lex/latest/dg/context-mgmt.html\">Managing Conversation Context</a>. </p>",
|
||||||
"PostText": "<p>Sends user input (text-only) to Amazon Lex. Client applications can use this API to send requests to Amazon Lex at runtime. Amazon Lex then interprets the user input using the machine learning model it built for the bot. </p> <p> In response, Amazon Lex returns the next <code>message</code> to convey to the user an optional <code>responseCard</code> to display. Consider the following example messages: </p> <ul> <li> <p> For a user input \"I would like a pizza\", Amazon Lex might return a response with a message eliciting slot data (for example, PizzaSize): \"What size pizza would you like?\" </p> </li> <li> <p> After the user provides all of the pizza order information, Amazon Lex might return a response with a message to obtain user confirmation \"Proceed with the pizza order?\". </p> </li> <li> <p> After the user replies to a confirmation prompt with a \"yes\", Amazon Lex might return a conclusion statement: \"Thank you, your cheese pizza has been ordered.\". </p> </li> </ul> <p> Not all Amazon Lex messages require a user response. For example, a conclusion statement does not require a response. Some messages require only a \"yes\" or \"no\" user response. In addition to the <code>message</code>, Amazon Lex provides additional context about the message in the response that you might use to enhance client behavior, for example, to display the appropriate client user interface. These are the <code>slotToElicit</code>, <code>dialogState</code>, <code>intentName</code>, and <code>slots</code> fields in the response. Consider the following examples: </p> <ul> <li> <p>If the message is to elicit slot data, Amazon Lex returns the following context information:</p> <ul> <li> <p> <code>dialogState</code> set to ElicitSlot </p> </li> <li> <p> <code>intentName</code> set to the intent name in the current context </p> </li> <li> <p> <code>slotToElicit</code> set to the slot name for which the <code>message</code> is eliciting information </p> </li> <li> <p> <code>slots</code> set to a map of slots, configured for the intent, with currently known values </p> </li> </ul> </li> <li> <p> If the message is a confirmation prompt, the <code>dialogState</code> is set to ConfirmIntent and <code>SlotToElicit</code> is set to null. </p> </li> <li> <p>If the message is a clarification prompt (configured for the intent) that indicates that user intent is not understood, the <code>dialogState</code> is set to ElicitIntent and <code>slotToElicit</code> is set to null. </p> </li> </ul> <p> In addition, Amazon Lex also returns your application-specific <code>sessionAttributes</code>. For more information, see <a href=\"http://docs.aws.amazon.com/lex/latest/dg/context-mgmt.html\">Managing Conversation Context</a>. </p>"
|
"PostText": "<p>Sends user input (text-only) to Amazon Lex. Client applications can use this API to send requests to Amazon Lex at runtime. Amazon Lex then interprets the user input using the machine learning model it built for the bot. </p> <p> In response, Amazon Lex returns the next <code>message</code> to convey to the user an optional <code>responseCard</code> to display. Consider the following example messages: </p> <ul> <li> <p> For a user input \"I would like a pizza\", Amazon Lex might return a response with a message eliciting slot data (for example, PizzaSize): \"What size pizza would you like?\" </p> </li> <li> <p> After the user provides all of the pizza order information, Amazon Lex might return a response with a message to obtain user confirmation \"Proceed with the pizza order?\". </p> </li> <li> <p> After the user replies to a confirmation prompt with a \"yes\", Amazon Lex might return a conclusion statement: \"Thank you, your cheese pizza has been ordered.\". </p> </li> </ul> <p> Not all Amazon Lex messages require a user response. For example, a conclusion statement does not require a response. Some messages require only a \"yes\" or \"no\" user response. In addition to the <code>message</code>, Amazon Lex provides additional context about the message in the response that you might use to enhance client behavior, for example, to display the appropriate client user interface. These are the <code>slotToElicit</code>, <code>dialogState</code>, <code>intentName</code>, and <code>slots</code> fields in the response. Consider the following examples: </p> <ul> <li> <p>If the message is to elicit slot data, Amazon Lex returns the following context information:</p> <ul> <li> <p> <code>dialogState</code> set to ElicitSlot </p> </li> <li> <p> <code>intentName</code> set to the intent name in the current context </p> </li> <li> <p> <code>slotToElicit</code> set to the slot name for which the <code>message</code> is eliciting information </p> </li> <li> <p> <code>slots</code> set to a map of slots, configured for the intent, with currently known values </p> </li> </ul> </li> <li> <p> If the message is a confirmation prompt, the <code>dialogState</code> is set to ConfirmIntent and <code>SlotToElicit</code> is set to null. </p> </li> <li> <p>If the message is a clarification prompt (configured for the intent) that indicates that user intent is not understood, the <code>dialogState</code> is set to ElicitIntent and <code>slotToElicit</code> is set to null. </p> </li> </ul> <p> In addition, Amazon Lex also returns your application-specific <code>sessionAttributes</code>. For more information, see <a href=\"http://docs.aws.amazon.com/lex/latest/dg/context-mgmt.html\">Managing Conversation Context</a>. </p>"
|
||||||
},
|
},
|
||||||
"shapes": {
|
"shapes": {
|
||||||
@ -12,20 +12,27 @@
|
|||||||
"PostContentRequest$accept": "<p> You pass this value as the <code>Accept</code> HTTP header. </p> <p> The message Amazon Lex returns in the response can be either text or speech based on the <code>Accept</code> HTTP header value in the request. </p> <ul> <li> <p> If the value is <code>text/plain; charset=utf-8</code>, Amazon Lex returns text in the response. </p> </li> <li> <p> If the value begins with <code>audio/</code>, Amazon Lex returns speech in the response. Amazon Lex uses Amazon Polly to generate the speech (using the configuration you specified in the <code>Accept</code> header). For example, if you specify <code>audio/mpeg</code> as the value, Amazon Lex returns speech in the MPEG format.</p> <p>The following are the accepted values:</p> <ul> <li> <p>audio/mpeg</p> </li> <li> <p>audio/ogg</p> </li> <li> <p>audio/pcm</p> </li> <li> <p>text/plain; charset=utf-8</p> </li> <li> <p>audio/* (defaults to mpeg)</p> </li> </ul> </li> </ul>"
|
"PostContentRequest$accept": "<p> You pass this value as the <code>Accept</code> HTTP header. </p> <p> The message Amazon Lex returns in the response can be either text or speech based on the <code>Accept</code> HTTP header value in the request. </p> <ul> <li> <p> If the value is <code>text/plain; charset=utf-8</code>, Amazon Lex returns text in the response. </p> </li> <li> <p> If the value begins with <code>audio/</code>, Amazon Lex returns speech in the response. Amazon Lex uses Amazon Polly to generate the speech (using the configuration you specified in the <code>Accept</code> header). For example, if you specify <code>audio/mpeg</code> as the value, Amazon Lex returns speech in the MPEG format.</p> <p>The following are the accepted values:</p> <ul> <li> <p>audio/mpeg</p> </li> <li> <p>audio/ogg</p> </li> <li> <p>audio/pcm</p> </li> <li> <p>text/plain; charset=utf-8</p> </li> <li> <p>audio/* (defaults to mpeg)</p> </li> </ul> </li> </ul>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"AttributesString": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
"PostContentRequest$sessionAttributes": "<p>You pass this value as the <code>x-amz-lex-session-attributes</code> HTTP header.</p> <p>Application-specific information passed between Amazon Lex and a client application. The value must be a JSON serialized and base64 encoded map with string keys and values. The total size of the <code>sessionAttributes</code> and <code>requestAttributes</code> headers is limited to 12 KB.</p> <p>For more information, see <a href=\"http://docs.aws.amazon.com/lex/latest/dg/context-mgmt.html#context-mgmt-session-attribs\">Setting Session Attributes</a>.</p>",
|
||||||
|
"PostContentRequest$requestAttributes": "<p>You pass this value as the <code>x-amz-lex-request-attributes</code> HTTP header.</p> <p>Request-specific information passed between Amazon Lex and a client application. The value must be a JSON serialized and base64 encoded map with string keys and values. The total size of the <code>requestAttributes</code> and <code>sessionAttributes</code> headers is limited to 12 KB.</p> <p>The namespace <code>x-amz-lex:</code> is reserved for special attributes. Don't create any request attributes with the prefix <code>x-amz-lex:</code>.</p> <p>For more information, see <a href=\"http://docs.aws.amazon.com/lex/latest/dg/context-mgmt.html#context-mgmt-request-attribs\">Setting Request Attributes</a>.</p>"
|
||||||
|
}
|
||||||
|
},
|
||||||
"BadGatewayException": {
|
"BadGatewayException": {
|
||||||
"base": "<p>Either the Amazon Lex bot is still building, or one of the dependent services (Amazon Polly, AWS Lambda) failed with an internal service error.</p>",
|
"base": "<p>Either the Amazon Lex bot is still building, or one of the dependent services (Amazon Polly, AWS Lambda) failed with an internal service error.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"BadRequestException": {
|
"BadRequestException": {
|
||||||
"base": "<p> Request validation failed, there is no usable message in the context, or the bot build failed. </p>",
|
"base": "<p> Request validation failed, there is no usable message in the context, or the bot build failed, is still in progress, or contains unbuilt changes. </p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"BlobStream": {
|
"BlobStream": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"PostContentRequest$inputStream": "<p> User input in PCM or Opus audio format or text format as described in the <code>Content-Type</code> HTTP header. </p>",
|
"PostContentRequest$inputStream": "<p> User input in PCM or Opus audio format or text format as described in the <code>Content-Type</code> HTTP header. </p> <p>You can stream audio data to Amazon Lex or you can create a local buffer that captures all of the audio data before sending. In general, you get better performance if you stream audio data rather than buffering the data locally.</p>",
|
||||||
"PostContentResponse$audioStream": "<p>The prompt (or statement) to convey to the user. This is based on the bot configuration and context. For example, if Amazon Lex did not understand the user intent, it sends the <code>clarificationPrompt</code> configured for the bot. If the intent requires confirmation before taking the fulfillment action, it sends the <code>confirmationPrompt</code>. Another example: Suppose that the Lambda function successfully fulfilled the intent, and sent a message to convey to the user. Then Amazon Lex sends that message in the response. </p>"
|
"PostContentResponse$audioStream": "<p>The prompt (or statement) to convey to the user. This is based on the bot configuration and context. For example, if Amazon Lex did not understand the user intent, it sends the <code>clarificationPrompt</code> configured for the bot. If the intent requires confirmation before taking the fulfillment action, it sends the <code>confirmationPrompt</code>. Another example: Suppose that the Lambda function successfully fulfilled the intent, and sent a message to convey to the user. Then Amazon Lex sends that message in the response. </p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -73,15 +80,15 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"DependencyFailedException": {
|
"DependencyFailedException": {
|
||||||
"base": "<p> One of the downstream dependencies, such as AWS Lambda or Amazon Polly, threw an exception. For example, if Amazon Lex does not have sufficient permissions to call a Lambda function, it results in Lambda throwing an exception. </p>",
|
"base": "<p> One of the dependencies, such as AWS Lambda or Amazon Polly, threw an exception. For example, </p> <ul> <li> <p>If Amazon Lex does not have sufficient permissions to call a Lambda function.</p> </li> <li> <p>If a Lambda function takes longer than 30 seconds to execute.</p> </li> <li> <p>If a fulfillment Lambda function returns a <code>Delegate</code> dialog action without removing any slot values.</p> </li> </ul>",
|
||||||
"refs": {
|
"refs": {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"DialogState": {
|
"DialogState": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"PostContentResponse$dialogState": "<p>Identifies the current state of the user interaction. Amazon Lex returns one of the following values as <code>dialogState</code>. The client can optionally use this information to customize the user interface. </p> <ul> <li> <p> <code>ElicitIntent</code> – Amazon Lex wants to elicit the user's intent. Consider the following examples: </p> <p> For example, a user might utter an intent (\"I want to order a pizza\"). If Amazon Lex cannot infer the user intent from this utterance, it will return this dialog state. </p> </li> <li> <p> <code>ConfirmIntent</code> – Amazon Lex is expecting a \"yes\" or \"no\" response. </p> <p>For example, Amazon Lex wants user confirmation before fulfilling an intent. Instead of a simple \"yes\" or \"no\" response, a user might respond with additional information. For example, \"yes, but make it a thick crust pizza\" or \"no, I want to order a drink.\" Amazon Lex can process such additional information (in these examples, update the crust type slot or change the intent from OrderPizza to OrderDrink). </p> </li> <li> <p> <code>ElicitSlot</code> – Amazon Lex is expecting the value of a slot for the current intent. </p> <p> For example, suppose that in the response Amazon Lex sends this message: \"What size pizza would you like?\". A user might reply with the slot value (e.g., \"medium\"). The user might also provide additional information in the response (e.g., \"medium thick crust pizza\"). Amazon Lex can process such additional information appropriately. </p> </li> <li> <p> <code>Fulfilled</code> – Conveys that the Lambda function has successfully fulfilled the intent. </p> </li> <li> <p> <code>ReadyForFulfillment</code> – Conveys that the client has to fullfill the request. </p> </li> <li> <p> <code>Failed</code> – Conveys that the conversation with the user failed. </p> <p> This can happen for various reasons, including that the user does not provide an appropriate response to prompts from the service (you can configure how many times Amazon Lex can prompt a user for specific information), or if the Lambda function fails to fulfill the intent. </p> </li> </ul>",
|
"PostContentResponse$dialogState": "<p>Identifies the current state of the user interaction. Amazon Lex returns one of the following values as <code>dialogState</code>. The client can optionally use this information to customize the user interface. </p> <ul> <li> <p> <code>ElicitIntent</code> - Amazon Lex wants to elicit the user's intent. Consider the following examples: </p> <p> For example, a user might utter an intent (\"I want to order a pizza\"). If Amazon Lex cannot infer the user intent from this utterance, it will return this dialog state. </p> </li> <li> <p> <code>ConfirmIntent</code> - Amazon Lex is expecting a \"yes\" or \"no\" response. </p> <p>For example, Amazon Lex wants user confirmation before fulfilling an intent. Instead of a simple \"yes\" or \"no\" response, a user might respond with additional information. For example, \"yes, but make it a thick crust pizza\" or \"no, I want to order a drink.\" Amazon Lex can process such additional information (in these examples, update the crust type slot or change the intent from OrderPizza to OrderDrink). </p> </li> <li> <p> <code>ElicitSlot</code> - Amazon Lex is expecting the value of a slot for the current intent. </p> <p> For example, suppose that in the response Amazon Lex sends this message: \"What size pizza would you like?\". A user might reply with the slot value (e.g., \"medium\"). The user might also provide additional information in the response (e.g., \"medium thick crust pizza\"). Amazon Lex can process such additional information appropriately. </p> </li> <li> <p> <code>Fulfilled</code> - Conveys that the Lambda function has successfully fulfilled the intent. </p> </li> <li> <p> <code>ReadyForFulfillment</code> - Conveys that the client has to fulfill the request. </p> </li> <li> <p> <code>Failed</code> - Conveys that the conversation with the user failed. </p> <p> This can happen for various reasons, including that the user does not provide an appropriate response to prompts from the service (you can configure how many times Amazon Lex can prompt a user for specific information), or if the Lambda function fails to fulfill the intent. </p> </li> </ul>",
|
||||||
"PostTextResponse$dialogState": "<p> Identifies the current state of the user interaction. Amazon Lex returns one of the following values as <code>dialogState</code>. The client can optionally use this information to customize the user interface. </p> <ul> <li> <p> <code>ElicitIntent</code> – Amazon Lex wants to elicit user intent. </p> <p>For example, a user might utter an intent (\"I want to order a pizza\"). If Amazon Lex cannot infer the user intent from this utterance, it will return this dialogState.</p> </li> <li> <p> <code>ConfirmIntent</code> – Amazon Lex is expecting a \"yes\" or \"no\" response. </p> <p> For example, Amazon Lex wants user confirmation before fulfilling an intent. </p> <p>Instead of a simple \"yes\" or \"no,\" a user might respond with additional information. For example, \"yes, but make it thick crust pizza\" or \"no, I want to order a drink\". Amazon Lex can process such additional information (in these examples, update the crust type slot value, or change intent from OrderPizza to OrderDrink).</p> </li> <li> <p> <code>ElicitSlot</code> – Amazon Lex is expecting a slot value for the current intent. </p> <p>For example, suppose that in the response Amazon Lex sends this message: \"What size pizza would you like?\". A user might reply with the slot value (e.g., \"medium\"). The user might also provide additional information in the response (e.g., \"medium thick crust pizza\"). Amazon Lex can process such additional information appropriately. </p> </li> <li> <p> <code>Fulfilled</code> – Conveys that the Lambda function configured for the intent has successfully fulfilled the intent. </p> </li> <li> <p> <code>ReadyForFulfillment</code> – Conveys that the client has to fulfill the intent. </p> </li> <li> <p> <code>Failed</code> – Conveys that the conversation with the user failed. </p> <p> This can happen for various reasons including that the user did not provide an appropriate response to prompts from the service (you can configure how many times Amazon Lex can prompt a user for specific information), or the Lambda function failed to fulfill the intent. </p> </li> </ul>"
|
"PostTextResponse$dialogState": "<p> Identifies the current state of the user interaction. Amazon Lex returns one of the following values as <code>dialogState</code>. The client can optionally use this information to customize the user interface. </p> <ul> <li> <p> <code>ElicitIntent</code> - Amazon Lex wants to elicit user intent. </p> <p>For example, a user might utter an intent (\"I want to order a pizza\"). If Amazon Lex cannot infer the user intent from this utterance, it will return this dialogState.</p> </li> <li> <p> <code>ConfirmIntent</code> - Amazon Lex is expecting a \"yes\" or \"no\" response. </p> <p> For example, Amazon Lex wants user confirmation before fulfilling an intent. </p> <p>Instead of a simple \"yes\" or \"no,\" a user might respond with additional information. For example, \"yes, but make it thick crust pizza\" or \"no, I want to order a drink\". Amazon Lex can process such additional information (in these examples, update the crust type slot value, or change intent from OrderPizza to OrderDrink).</p> </li> <li> <p> <code>ElicitSlot</code> - Amazon Lex is expecting a slot value for the current intent. </p> <p>For example, suppose that in the response Amazon Lex sends this message: \"What size pizza would you like?\". A user might reply with the slot value (e.g., \"medium\"). The user might also provide additional information in the response (e.g., \"medium thick crust pizza\"). Amazon Lex can process such additional information appropriately. </p> </li> <li> <p> <code>Fulfilled</code> - Conveys that the Lambda function configured for the intent has successfully fulfilled the intent. </p> </li> <li> <p> <code>ReadyForFulfillment</code> - Conveys that the client has to fulfill the intent. </p> </li> <li> <p> <code>Failed</code> - Conveys that the conversation with the user failed. </p> <p> This can happen for various reasons including that the user did not provide an appropriate response to prompts from the service (you can configure how many times Amazon Lex can prompt a user for specific information), or the Lambda function failed to fulfill the intent. </p> </li> </ul>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"ErrorMessage": {
|
"ErrorMessage": {
|
||||||
@ -101,7 +108,7 @@
|
|||||||
"HttpContentType": {
|
"HttpContentType": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"PostContentRequest$contentType": "<p> You pass this values as the <code>Content-Type</code> HTTP header. </p> <p> Indicates the audio format or text. The header value must start with one of the following prefixes: </p> <ul> <li> <p>PCM format</p> <ul> <li> <p>audio/l16; rate=16000; channels=1</p> </li> <li> <p>audio/x-l16; sample-rate=16000; channel-count=1</p> </li> </ul> </li> <li> <p>Opus format</p> <ul> <li> <p>audio/x-cbr-opus-with-preamble; preamble-size=0; bit-rate=1; frame-size-milliseconds=1.1</p> </li> </ul> </li> <li> <p>Text format</p> <ul> <li> <p>text/plain; charset=utf-8</p> </li> </ul> </li> </ul>",
|
"PostContentRequest$contentType": "<p> You pass this value as the <code>Content-Type</code> HTTP header. </p> <p> Indicates the audio format or text. The header value must start with one of the following prefixes: </p> <ul> <li> <p>PCM format, audio data must be in little-endian byte order.</p> <ul> <li> <p>audio/l16; rate=16000; channels=1</p> </li> <li> <p>audio/x-l16; sample-rate=16000; channel-count=1</p> </li> <li> <p>audio/lpcm; sample-rate=8000; sample-size-bits=16; channel-count=1; is-big-endian=false </p> </li> </ul> </li> <li> <p>Opus format</p> <ul> <li> <p>audio/x-cbr-opus-with-preamble; preamble-size=0; bit-rate=256000; frame-size-milliseconds=4</p> </li> </ul> </li> <li> <p>Text format</p> <ul> <li> <p>text/plain; charset=utf-8</p> </li> </ul> </li> </ul>",
|
||||||
"PostContentResponse$contentType": "<p>Content type as specified in the <code>Accept</code> HTTP header in the request.</p>"
|
"PostContentResponse$contentType": "<p>Content type as specified in the <code>Accept</code> HTTP header in the request.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -123,7 +130,7 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"LoopDetectedException": {
|
"LoopDetectedException": {
|
||||||
"base": "<p>Lambda fulfilment function returned <code>DelegateDialogAction</code> to Amazon Lex without changing any slot values. </p>",
|
"base": "<p>This exception is not used.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -178,11 +185,10 @@
|
|||||||
"LimitExceededException$message": null,
|
"LimitExceededException$message": null,
|
||||||
"NotAcceptableException$message": null,
|
"NotAcceptableException$message": null,
|
||||||
"NotFoundException$message": null,
|
"NotFoundException$message": null,
|
||||||
"PostContentRequest$sessionAttributes": "<p>You pass this value in the <code>x-amz-lex-session-attributes</code> HTTP header. The value must be map (keys and values must be strings) that is JSON serialized and then base64 encoded.</p> <p> A session represents dialog between a user and Amazon Lex. At runtime, a client application can pass contextual information, in the request to Amazon Lex. For example, </p> <ul> <li> <p>You might use session attributes to track the requestID of user requests.</p> </li> <li> <p>In Getting Started Exercise 1, the example bot uses the price session attribute to maintain the price of flowers ordered (for example, \"price\":25). The code hook (Lambda function) sets this attribute based on the type of flowers ordered. For more information, see <a href=\"http://docs.aws.amazon.com/lex/latest/dg/gs-bp-details-after-lambda.html\">Review the Details of Information Flow</a>. </p> </li> <li> <p>In the BookTrip bot exercise, the bot uses the <code>currentReservation</code> session attribute to maintains the slot data during the in-progress conversation to book a hotel or book a car. For more information, see <a href=\"http://docs.aws.amazon.com/lex/latest/dg/book-trip-detail-flow.html\">Details of Information Flow</a>. </p> </li> </ul> <p> Amazon Lex passes these session attributes to the Lambda functions configured for the intent In the your Lambda function, you can use the session attributes for initialization and customization (prompts). Some examples are: </p> <ul> <li> <p> Initialization - In a pizza ordering bot, if you pass user location (for example, <code>\"Location : 111 Maple Street\"</code>), then your Lambda function might use this information to determine the closest pizzeria to place the order (and perhaps set the storeAddress slot value as well). </p> <p> Personalized prompts - For example, you can configure prompts to refer to the user by name (for example, \"Hey [firstName], what toppings would you like?\"). You can pass the user's name as a session attribute (\"firstName\": \"Joe\") so that Amazon Lex can substitute the placeholder to provide a personalized prompt to the user (\"Hey Joe, what toppings would you like?\"). </p> </li> </ul> <note> <p> Amazon Lex does not persist session attributes. </p> <p> If you configured a code hook for the intent, Amazon Lex passes the incoming session attributes to the Lambda function. The Lambda function must return these session attributes if you want Amazon Lex to return them to the client. </p> <p> If there is no code hook configured for the intent Amazon Lex simply returns the session attributes to the client application. </p> </note>",
|
"PostContentResponse$slots": "<p>Map of zero or more intent slots (name/value pairs) Amazon Lex detected from the user input during the conversation.</p> <p>Amazon Lex creates a resolution list containing likely values for a slot. The value that it returns is determined by the <code>valueSelectionStrategy</code> selected when the slot type was created or updated. If <code>valueSelectionStrategy</code> is set to <code>ORIGINAL_VALUE</code>, the value provided by the user is returned, if the user value is similar to the slot values. If <code>valueSelectionStrategy</code> is set to <code>TOP_RESOLUTION</code> Amazon Lex returns the first value in the resolution list or, if there is no resolution list, null. If you don't specify a <code>valueSelectionStrategy</code>, the default is <code>ORIGINAL_VALUE</code>.</p>",
|
||||||
"PostContentResponse$slots": "<p>Map of zero or more intent slots (name/value pairs) Amazon Lex detected from the user input during the conversation.</p>",
|
|
||||||
"PostContentResponse$sessionAttributes": "<p> Map of key/value pairs representing the session-specific context information. </p>",
|
"PostContentResponse$sessionAttributes": "<p> Map of key/value pairs representing the session-specific context information. </p>",
|
||||||
"PostContentResponse$slotToElicit": "<p> If the <code>dialogState</code> value is <code>ElicitSlot</code>, returns the name of the slot for which Amazon Lex is eliciting a value. </p>",
|
"PostContentResponse$slotToElicit": "<p> If the <code>dialogState</code> value is <code>ElicitSlot</code>, returns the name of the slot for which Amazon Lex is eliciting a value. </p>",
|
||||||
"PostContentResponse$inputTranscript": "<p>Transcript of the voice input to the operation.</p>",
|
"PostContentResponse$inputTranscript": "<p>The text used to process the request.</p> <p>If the input was an audio stream, the <code>inputTranscript</code> field contains the text extracted from the audio stream. This is the text that is actually processed to recognize intents and slot values. You can use this information to determine if Amazon Lex is correctly processing the audio that you send.</p>",
|
||||||
"PostTextResponse$slotToElicit": "<p>If the <code>dialogState</code> value is <code>ElicitSlot</code>, returns the name of the slot for which Amazon Lex is eliciting a value. </p>",
|
"PostTextResponse$slotToElicit": "<p>If the <code>dialogState</code> value is <code>ElicitSlot</code>, returns the name of the slot for which Amazon Lex is eliciting a value. </p>",
|
||||||
"RequestTimeoutException$message": null,
|
"RequestTimeoutException$message": null,
|
||||||
"ResponseCard$version": "<p>The version of the response card format.</p>",
|
"ResponseCard$version": "<p>The version of the response card format.</p>",
|
||||||
@ -194,8 +200,9 @@
|
|||||||
"StringMap": {
|
"StringMap": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"PostTextRequest$sessionAttributes": "<p> By using session attributes, a client application can pass contextual information in the request to Amazon Lex For example, </p> <ul> <li> <p>In Getting Started Exercise 1, the example bot uses the <code>price</code> session attribute to maintain the price of the flowers ordered (for example, \"Price\":25). The code hook (the Lambda function) sets this attribute based on the type of flowers ordered. For more information, see <a href=\"http://docs.aws.amazon.com/lex/latest/dg/gs-bp-details-after-lambda.html\">Review the Details of Information Flow</a>. </p> </li> <li> <p>In the BookTrip bot exercise, the bot uses the <code>currentReservation</code> session attribute to maintain slot data during the in-progress conversation to book a hotel or book a car. For more information, see <a href=\"http://docs.aws.amazon.com/lex/latest/dg/book-trip-detail-flow.html\">Details of Information Flow</a>. </p> </li> <li> <p>You might use the session attributes (key, value pairs) to track the requestID of user requests.</p> </li> </ul> <p> Amazon Lex simply passes these session attributes to the Lambda functions configured for the intent.</p> <p>In your Lambda function, you can also use the session attributes for initialization and customization (prompts and response cards). Some examples are:</p> <ul> <li> <p> Initialization - In a pizza ordering bot, if you can pass the user location as a session attribute (for example, <code>\"Location\" : \"111 Maple street\"</code>), then your Lambda function might use this information to determine the closest pizzeria to place the order (perhaps to set the storeAddress slot value). </p> </li> <li> <p> Personalize prompts - For example, you can configure prompts to refer to the user name. (For example, \"Hey [FirstName], what toppings would you like?\"). You can pass the user name as a session attribute (<code>\"FirstName\" : \"Joe\"</code>) so that Amazon Lex can substitute the placeholder to provide a personalize prompt to the user (\"Hey Joe, what toppings would you like?\"). </p> </li> </ul> <note> <p> Amazon Lex does not persist session attributes. </p> <p> If you configure a code hook for the intent, Amazon Lex passes the incoming session attributes to the Lambda function. If you want Amazon Lex to return these session attributes back to the client, the Lambda function must return them. </p> <p> If there is no code hook configured for the intent, Amazon Lex simply returns the session attributes back to the client application. </p> </note>",
|
"PostTextRequest$sessionAttributes": "<p>Application-specific information passed between Amazon Lex and a client application.</p> <p>For more information, see <a href=\"http://docs.aws.amazon.com/lex/latest/dg/context-mgmt.html#context-mgmt-session-attribs\">Setting Session Attributes</a>.</p>",
|
||||||
"PostTextResponse$slots": "<p> The intent slots (name/value pairs) that Amazon Lex detected so far from the user input in the conversation. </p>",
|
"PostTextRequest$requestAttributes": "<p>Request-specific information passed between Amazon Lex and a client application.</p> <p>The namespace <code>x-amz-lex:</code> is reserved for special attributes. Don't create any request attributes with the prefix <code>x-amz-lex:</code>.</p> <p>For more information, see <a href=\"http://docs.aws.amazon.com/lex/latest/dg/context-mgmt.html#context-mgmt-request-attribs\">Setting Request Attributes</a>.</p>",
|
||||||
|
"PostTextResponse$slots": "<p> The intent slots that Amazon Lex detected from the user input in the conversation. </p> <p>Amazon Lex creates a resolution list containing likely values for a slot. The value that it returns is determined by the <code>valueSelectionStrategy</code> selected when the slot type was created or updated. If <code>valueSelectionStrategy</code> is set to <code>ORIGINAL_VALUE</code>, the value provided by the user is returned, if the user value is similar to the slot values. If <code>valueSelectionStrategy</code> is set to <code>TOP_RESOLUTION</code> Amazon Lex returns the first value in the resolution list or, if there is no resolution list, null. If you don't specify a <code>valueSelectionStrategy</code>, the default is <code>ORIGINAL_VALUE</code>.</p>",
|
||||||
"PostTextResponse$sessionAttributes": "<p>A map of key-value pairs representing the session-specific context information.</p>"
|
"PostTextResponse$sessionAttributes": "<p>A map of key-value pairs representing the session-specific context information.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -229,8 +236,8 @@
|
|||||||
"UserId": {
|
"UserId": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"PostContentRequest$userId": "<p>ID of the client application user. Typically, each of your application users should have a unique ID. The application developer decides the user IDs. At runtime, each request must include the user ID. Note the following considerations:</p> <ul> <li> <p> If you want a user to start conversation on one device and continue the conversation on another device, you might choose a user-specific identifier, such as the user's login, or Amazon Cognito user ID (assuming your application is using Amazon Cognito). </p> </li> <li> <p> If you want the same user to be able to have two independent conversations on two different devices, you might choose device-specific identifier, such as device ID, or some globally unique identifier. </p> </li> </ul>",
|
"PostContentRequest$userId": "<p>The ID of the client application user. Amazon Lex uses this to identify a user's conversation with your bot. At runtime, each request must contain the <code>userID</code> field.</p> <p>To decide the user ID to use for your application, consider the following factors.</p> <ul> <li> <p>The <code>userID</code> field must not contain any personally identifiable information of the user, for example, name, personal identification numbers, or other end user personal information.</p> </li> <li> <p>If you want a user to start a conversation on one device and continue on another device, use a user-specific identifier.</p> </li> <li> <p>If you want the same user to be able to have two independent conversations on two different devices, choose a device-specific identifier.</p> </li> <li> <p>A user can't have two independent conversations with two different versions of the same bot. For example, a user can't have a conversation with the PROD and BETA versions of the same bot. If you anticipate that a user will need to have conversation with two different versions, for example, while testing, include the bot alias in the user ID to separate the two conversations.</p> </li> </ul>",
|
||||||
"PostTextRequest$userId": "<p>The ID of the client application user. The application developer decides the user IDs. At runtime, each request must include the user ID. Typically, each of your application users should have a unique ID. Note the following considerations: </p> <ul> <li> <p> If you want a user to start a conversation on one device and continue the conversation on another device, you might choose a user-specific identifier, such as a login or Amazon Cognito user ID (assuming your application is using Amazon Cognito). </p> </li> <li> <p> If you want the same user to be able to have two independent conversations on two different devices, you might choose a device-specific identifier, such as device ID, or some globally unique identifier. </p> </li> </ul>"
|
"PostTextRequest$userId": "<p>The ID of the client application user. Amazon Lex uses this to identify a user's conversation with your bot. At runtime, each request must contain the <code>userID</code> field.</p> <p>To decide the user ID to use for your application, consider the following factors.</p> <ul> <li> <p>The <code>userID</code> field must not contain any personally identifiable information of the user, for example, name, personal identification numbers, or other end user personal information.</p> </li> <li> <p>If you want a user to start a conversation on one device and continue on another device, use a user-specific identifier.</p> </li> <li> <p>If you want the same user to be able to have two independent conversations on two different devices, choose a device-specific identifier.</p> </li> <li> <p>A user can't have two independent conversations with two different versions of the same bot. For example, a user can't have a conversation with the PROD and BETA versions of the same bot. If you anticipate that a user will need to have conversation with two different versions, for example, while testing, include the bot alias in the user ID to separate the two conversations.</p> </li> </ul>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"genericAttachmentList": {
|
"genericAttachmentList": {
|
||||||
|
38
vendor/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/api-2.json
generated
vendored
38
vendor/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/api-2.json
generated
vendored
@ -3064,7 +3064,8 @@
|
|||||||
"DocumentType":{"shape":"DocumentType"},
|
"DocumentType":{"shape":"DocumentType"},
|
||||||
"SchemaVersion":{"shape":"DocumentSchemaVersion"},
|
"SchemaVersion":{"shape":"DocumentSchemaVersion"},
|
||||||
"LatestVersion":{"shape":"DocumentVersion"},
|
"LatestVersion":{"shape":"DocumentVersion"},
|
||||||
"DefaultVersion":{"shape":"DocumentVersion"}
|
"DefaultVersion":{"shape":"DocumentVersion"},
|
||||||
|
"Tags":{"shape":"TagList"}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"DocumentFilter":{
|
"DocumentFilter":{
|
||||||
@ -3118,7 +3119,8 @@
|
|||||||
"PlatformTypes":{"shape":"PlatformTypeList"},
|
"PlatformTypes":{"shape":"PlatformTypeList"},
|
||||||
"DocumentVersion":{"shape":"DocumentVersion"},
|
"DocumentVersion":{"shape":"DocumentVersion"},
|
||||||
"DocumentType":{"shape":"DocumentType"},
|
"DocumentType":{"shape":"DocumentType"},
|
||||||
"SchemaVersion":{"shape":"DocumentSchemaVersion"}
|
"SchemaVersion":{"shape":"DocumentSchemaVersion"},
|
||||||
|
"Tags":{"shape":"TagList"}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"DocumentIdentifierList":{
|
"DocumentIdentifierList":{
|
||||||
@ -3128,6 +3130,33 @@
|
|||||||
"locationName":"DocumentIdentifier"
|
"locationName":"DocumentIdentifier"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"DocumentKeyValuesFilter":{
|
||||||
|
"type":"structure",
|
||||||
|
"members":{
|
||||||
|
"Key":{"shape":"DocumentKeyValuesFilterKey"},
|
||||||
|
"Values":{"shape":"DocumentKeyValuesFilterValues"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"DocumentKeyValuesFilterKey":{
|
||||||
|
"type":"string",
|
||||||
|
"max":128,
|
||||||
|
"min":1
|
||||||
|
},
|
||||||
|
"DocumentKeyValuesFilterList":{
|
||||||
|
"type":"list",
|
||||||
|
"member":{"shape":"DocumentKeyValuesFilter"},
|
||||||
|
"max":5,
|
||||||
|
"min":0
|
||||||
|
},
|
||||||
|
"DocumentKeyValuesFilterValue":{
|
||||||
|
"type":"string",
|
||||||
|
"max":256,
|
||||||
|
"min":1
|
||||||
|
},
|
||||||
|
"DocumentKeyValuesFilterValues":{
|
||||||
|
"type":"list",
|
||||||
|
"member":{"shape":"DocumentKeyValuesFilterValue"}
|
||||||
|
},
|
||||||
"DocumentLimitExceeded":{
|
"DocumentLimitExceeded":{
|
||||||
"type":"structure",
|
"type":"structure",
|
||||||
"members":{
|
"members":{
|
||||||
@ -4631,6 +4660,7 @@
|
|||||||
"type":"structure",
|
"type":"structure",
|
||||||
"members":{
|
"members":{
|
||||||
"DocumentFilterList":{"shape":"DocumentFilterList"},
|
"DocumentFilterList":{"shape":"DocumentFilterList"},
|
||||||
|
"Filters":{"shape":"DocumentKeyValuesFilterList"},
|
||||||
"MaxResults":{
|
"MaxResults":{
|
||||||
"shape":"MaxResults",
|
"shape":"MaxResults",
|
||||||
"box":true
|
"box":true
|
||||||
@ -6010,9 +6040,11 @@
|
|||||||
"ResourceTypeForTagging":{
|
"ResourceTypeForTagging":{
|
||||||
"type":"string",
|
"type":"string",
|
||||||
"enum":[
|
"enum":[
|
||||||
|
"Document",
|
||||||
"ManagedInstance",
|
"ManagedInstance",
|
||||||
"MaintenanceWindow",
|
"MaintenanceWindow",
|
||||||
"Parameter"
|
"Parameter",
|
||||||
|
"PatchBaseline"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"ResponseCode":{"type":"integer"},
|
"ResponseCode":{"type":"integer"},
|
||||||
|
94
vendor/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/docs-2.json
generated
vendored
94
vendor/github.com/aws/aws-sdk-go/models/apis/ssm/2014-11-06/docs-2.json
generated
vendored
@ -1,8 +1,8 @@
|
|||||||
{
|
{
|
||||||
"version": "2.0",
|
"version": "2.0",
|
||||||
"service": "<fullname>Amazon EC2 Systems Manager</fullname> <p>Amazon EC2 Systems Manager is a collection of capabilities that helps you automate management tasks such as collecting system inventory, applying operating system (OS) patches, automating the creation of Amazon Machine Images (AMIs), and configuring operating systems (OSs) and applications at scale. Systems Manager lets you remotely and securely manage the configuration of your managed instances. A <i>managed instance</i> is any Amazon EC2 instance or on-premises machine in your hybrid environment that has been configured for Systems Manager.</p> <p>This reference is intended to be used with the <a href=\"http://docs.aws.amazon.com/systems-manager/latest/userguide/\">Amazon EC2 Systems Manager User Guide</a>.</p> <p>To get started, verify prerequisites and configure managed instances. For more information, see <a href=\"http://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-setting-up.html\">Systems Manager Prerequisites</a>.</p>",
|
"service": "<fullname>Amazon EC2 Systems Manager</fullname> <p>Amazon EC2 Systems Manager is a collection of capabilities that helps you automate management tasks such as collecting system inventory, applying operating system (OS) patches, automating the creation of Amazon Machine Images (AMIs), and configuring operating systems (OSs) and applications at scale. Systems Manager lets you remotely and securely manage the configuration of your managed instances. A <i>managed instance</i> is any Amazon EC2 instance or on-premises machine in your hybrid environment that has been configured for Systems Manager.</p> <p>This reference is intended to be used with the <a href=\"http://docs.aws.amazon.com/systems-manager/latest/userguide/\">Amazon EC2 Systems Manager User Guide</a>.</p> <p>To get started, verify prerequisites and configure managed instances. For more information, see <a href=\"http://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-setting-up.html\">Systems Manager Prerequisites</a>.</p> <p>For information about other API actions you can perform on Amazon EC2 instances, see the <a href=\"http://docs.aws.amazon.com/AWSEC2/latest/APIReference/\">Amazon EC2 API Reference</a>. For information about how to use a Query API, see <a href=\"http://docs.aws.amazon.com/AWSEC2/latest/APIReference/making-api-requests.html\">Making API Requests</a>. </p>",
|
||||||
"operations": {
|
"operations": {
|
||||||
"AddTagsToResource": "<p>Adds or overwrites one or more tags for the specified resource. Tags are metadata that you assign to your managed instances, Maintenance Windows, or Parameter Store parameters. Tags enable you to categorize your resources in different ways, for example, by purpose, owner, or environment. Each tag consists of a key and an optional value, both of which you define. For example, you could define a set of tags for your account's managed instances that helps you track each instance's owner and stack level. For example: Key=Owner and Value=DbAdmin, SysAdmin, or Dev. Or Key=Stack and Value=Production, Pre-Production, or Test.</p> <p>Each resource can have a maximum of 10 tags. </p> <p>We recommend that you devise a set of tag keys that meets your needs for each resource type. Using a consistent set of tag keys makes it easier for you to manage your resources. You can search and filter the resources based on the tags you add. Tags don't have any semantic meaning to Amazon EC2 and are interpreted strictly as a string of characters. </p> <p>For more information about tags, see <a href=\"http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html\">Tagging Your Amazon EC2 Resources</a> in the <i>Amazon EC2 User Guide</i>.</p>",
|
"AddTagsToResource": "<p>Adds or overwrites one or more tags for the specified resource. Tags are metadata that you can assign to your documents, managed instances, Maintenance Windows, Parameter Store parameters, and patch baselines. Tags enable you to categorize your resources in different ways, for example, by purpose, owner, or environment. Each tag consists of a key and an optional value, both of which you define. For example, you could define a set of tags for your account's managed instances that helps you track each instance's owner and stack level. For example: Key=Owner and Value=DbAdmin, SysAdmin, or Dev. Or Key=Stack and Value=Production, Pre-Production, or Test.</p> <p>Each resource can have a maximum of 10 tags. </p> <p>We recommend that you devise a set of tag keys that meets your needs for each resource type. Using a consistent set of tag keys makes it easier for you to manage your resources. You can search and filter the resources based on the tags you add. Tags don't have any semantic meaning to Amazon EC2 and are interpreted strictly as a string of characters. </p> <p>For more information about tags, see <a href=\"http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html\">Tagging Your Amazon EC2 Resources</a> in the <i>Amazon EC2 User Guide</i>.</p>",
|
||||||
"CancelCommand": "<p>Attempts to cancel the command specified by the Command ID. There is no guarantee that the command will be terminated and the underlying process stopped.</p>",
|
"CancelCommand": "<p>Attempts to cancel the command specified by the Command ID. There is no guarantee that the command will be terminated and the underlying process stopped.</p>",
|
||||||
"CreateActivation": "<p>Registers your on-premises server or virtual machine with Amazon EC2 so that you can manage these resources using Run Command. An on-premises server or virtual machine that has been registered with EC2 is called a managed instance. For more information about activations, see <a href=\"http://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-managedinstances.html\">Setting Up Systems Manager in Hybrid Environments</a>.</p>",
|
"CreateActivation": "<p>Registers your on-premises server or virtual machine with Amazon EC2 so that you can manage these resources using Run Command. An on-premises server or virtual machine that has been registered with EC2 is called a managed instance. For more information about activations, see <a href=\"http://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-managedinstances.html\">Setting Up Systems Manager in Hybrid Environments</a>.</p>",
|
||||||
"CreateAssociation": "<p>Associates the specified Systems Manager document with the specified instances or targets.</p> <p>When you associate a document with one or more instances using instance IDs or tags, the SSM Agent running on the instance processes the document and configures the instance as specified.</p> <p>If you associate a document with an instance that already has an associated document, the system throws the AssociationAlreadyExists exception.</p>",
|
"CreateAssociation": "<p>Associates the specified Systems Manager document with the specified instances or targets.</p> <p>When you associate a document with one or more instances using instance IDs or tags, the SSM Agent running on the instance processes the document and configures the instance as specified.</p> <p>If you associate a document with an instance that already has an associated document, the system throws the AssociationAlreadyExists exception.</p>",
|
||||||
@ -27,7 +27,7 @@
|
|||||||
"DescribeAssociation": "<p>Describes the associations for the specified Systems Manager document or instance.</p>",
|
"DescribeAssociation": "<p>Describes the associations for the specified Systems Manager document or instance.</p>",
|
||||||
"DescribeAutomationExecutions": "<p>Provides details about all active and terminated Automation executions.</p>",
|
"DescribeAutomationExecutions": "<p>Provides details about all active and terminated Automation executions.</p>",
|
||||||
"DescribeAvailablePatches": "<p>Lists all patches that could possibly be included in a patch baseline.</p>",
|
"DescribeAvailablePatches": "<p>Lists all patches that could possibly be included in a patch baseline.</p>",
|
||||||
"DescribeDocument": "<p>Describes the specified SSM document.</p>",
|
"DescribeDocument": "<p>Describes the specified Systems Manager document.</p>",
|
||||||
"DescribeDocumentPermission": "<p>Describes the permissions for a Systems Manager document. If you created the document, you are the owner. If a document is shared, it can either be shared privately (by specifying a user's AWS account ID) or publicly (<i>All</i>). </p>",
|
"DescribeDocumentPermission": "<p>Describes the permissions for a Systems Manager document. If you created the document, you are the owner. If a document is shared, it can either be shared privately (by specifying a user's AWS account ID) or publicly (<i>All</i>). </p>",
|
||||||
"DescribeEffectiveInstanceAssociations": "<p>All associations for the instance(s).</p>",
|
"DescribeEffectiveInstanceAssociations": "<p>All associations for the instance(s).</p>",
|
||||||
"DescribeEffectivePatchesForPatchBaseline": "<p>Retrieves the current effective patches (the patch and the approval state) for the specified patch baseline. Note that this API applies only to Windows patch baselines.</p>",
|
"DescribeEffectivePatchesForPatchBaseline": "<p>Retrieves the current effective patches (the patch and the approval state) for the specified patch baseline. Note that this API applies only to Windows patch baselines.</p>",
|
||||||
@ -50,7 +50,7 @@
|
|||||||
"GetCommandInvocation": "<p>Returns detailed information about command execution for an invocation or plugin. </p>",
|
"GetCommandInvocation": "<p>Returns detailed information about command execution for an invocation or plugin. </p>",
|
||||||
"GetDefaultPatchBaseline": "<p>Retrieves the default patch baseline. Note that Systems Manager supports creating multiple default patch baselines. For example, you can create a default patch baseline for each operating system.</p>",
|
"GetDefaultPatchBaseline": "<p>Retrieves the default patch baseline. Note that Systems Manager supports creating multiple default patch baselines. For example, you can create a default patch baseline for each operating system.</p>",
|
||||||
"GetDeployablePatchSnapshotForInstance": "<p>Retrieves the current snapshot for the patch baseline the instance uses. This API is primarily used by the AWS-RunPatchBaseline Systems Manager document. </p>",
|
"GetDeployablePatchSnapshotForInstance": "<p>Retrieves the current snapshot for the patch baseline the instance uses. This API is primarily used by the AWS-RunPatchBaseline Systems Manager document. </p>",
|
||||||
"GetDocument": "<p>Gets the contents of the specified SSM document.</p>",
|
"GetDocument": "<p>Gets the contents of the specified Systems Manager document.</p>",
|
||||||
"GetInventory": "<p>Query inventory information.</p>",
|
"GetInventory": "<p>Query inventory information.</p>",
|
||||||
"GetInventorySchema": "<p>Return a list of inventory type names for the account, or return a list of attribute names for a specific Inventory item type. </p>",
|
"GetInventorySchema": "<p>Return a list of inventory type names for the account, or return a list of attribute names for a specific Inventory item type. </p>",
|
||||||
"GetMaintenanceWindow": "<p>Retrieves a Maintenance Window.</p>",
|
"GetMaintenanceWindow": "<p>Retrieves a Maintenance Window.</p>",
|
||||||
@ -71,7 +71,7 @@
|
|||||||
"ListComplianceItems": "<p>For a specified resource ID, this API action returns a list of compliance statuses for different resource types. Currently, you can only specify one resource ID per call. List results depend on the criteria specified in the filter. </p>",
|
"ListComplianceItems": "<p>For a specified resource ID, this API action returns a list of compliance statuses for different resource types. Currently, you can only specify one resource ID per call. List results depend on the criteria specified in the filter. </p>",
|
||||||
"ListComplianceSummaries": "<p>Returns a summary count of compliant and non-compliant resources for a compliance type. For example, this call can return State Manager associations, patches, or custom compliance types according to the filter criteria that you specify. </p>",
|
"ListComplianceSummaries": "<p>Returns a summary count of compliant and non-compliant resources for a compliance type. For example, this call can return State Manager associations, patches, or custom compliance types according to the filter criteria that you specify. </p>",
|
||||||
"ListDocumentVersions": "<p>List all versions for a document.</p>",
|
"ListDocumentVersions": "<p>List all versions for a document.</p>",
|
||||||
"ListDocuments": "<p>Describes one or more of your SSM documents.</p>",
|
"ListDocuments": "<p>Describes one or more of your Systems Manager documents.</p>",
|
||||||
"ListInventoryEntries": "<p>A list of inventory items returned by the request.</p>",
|
"ListInventoryEntries": "<p>A list of inventory items returned by the request.</p>",
|
||||||
"ListResourceComplianceSummaries": "<p>Returns a resource-level summary count. The summary includes information about compliant and non-compliant statuses and detailed compliance-item severity counts, according to the filter criteria you specify.</p>",
|
"ListResourceComplianceSummaries": "<p>Returns a resource-level summary count. The summary includes information about compliant and non-compliant statuses and detailed compliance-item severity counts, according to the filter criteria you specify.</p>",
|
||||||
"ListResourceDataSync": "<p>Lists your resource data sync configurations. Includes information about the last time a sync attempted to start, the last sync status, and the last time a sync successfully completed.</p> <p>The number of sync configurations might be too large to return using a single call to <code>ListResourceDataSync</code>. You can limit the number of sync configurations returned by using the <code>MaxResults</code> parameter. To determine whether there are more sync configurations to list, check the value of <code>NextToken</code> in the output. If there are more sync configurations to list, you can request them by specifying the <code>NextToken</code> returned in the call to the parameter of a subsequent call. </p>",
|
"ListResourceDataSync": "<p>Lists your resource data sync configurations. Includes information about the last time a sync attempted to start, the last sync status, and the last time a sync successfully completed.</p> <p>The number of sync configurations might be too large to return using a single call to <code>ListResourceDataSync</code>. You can limit the number of sync configurations returned by using the <code>MaxResults</code> parameter. To determine whether there are more sync configurations to list, check the value of <code>NextToken</code> in the output. If there are more sync configurations to list, you can request them by specifying the <code>NextToken</code> returned in the call to the parameter of a subsequent call. </p>",
|
||||||
@ -1440,11 +1440,11 @@
|
|||||||
"DocumentARN": {
|
"DocumentARN": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"DescribeDocumentRequest$Name": "<p>The name of the SSM document.</p>",
|
"DescribeDocumentRequest$Name": "<p>The name of the Systems Manager document.</p>",
|
||||||
"DocumentDescription$Name": "<p>The name of the SSM document.</p>",
|
"DocumentDescription$Name": "<p>The name of the Systems Manager document.</p>",
|
||||||
"DocumentIdentifier$Name": "<p>The name of the SSM document.</p>",
|
"DocumentIdentifier$Name": "<p>The name of the Systems Manager document.</p>",
|
||||||
"GetDocumentRequest$Name": "<p>The name of the SSM document.</p>",
|
"GetDocumentRequest$Name": "<p>The name of the Systems Manager document.</p>",
|
||||||
"GetDocumentResult$Name": "<p>The name of the SSM document.</p>",
|
"GetDocumentResult$Name": "<p>The name of the Systems Manager document.</p>",
|
||||||
"SendCommandRequest$DocumentName": "<p>Required. The name of the Systems Manager document to execute. This can be a public document or a custom document.</p>",
|
"SendCommandRequest$DocumentName": "<p>Required. The name of the Systems Manager document to execute. This can be a public document or a custom document.</p>",
|
||||||
"StartAutomationExecutionRequest$DocumentName": "<p>The name of the Automation document to use for this execution.</p>"
|
"StartAutomationExecutionRequest$DocumentName": "<p>The name of the Automation document to use for this execution.</p>"
|
||||||
}
|
}
|
||||||
@ -1458,7 +1458,7 @@
|
|||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"CreateDocumentRequest$Content": "<p>A valid JSON string.</p>",
|
"CreateDocumentRequest$Content": "<p>A valid JSON string.</p>",
|
||||||
"GetDocumentResult$Content": "<p>The contents of the SSM document.</p>",
|
"GetDocumentResult$Content": "<p>The contents of the Systems Manager document.</p>",
|
||||||
"InstanceAssociation$Content": "<p>The content of the association document for the instance(s).</p>",
|
"InstanceAssociation$Content": "<p>The content of the association document for the instance(s).</p>",
|
||||||
"UpdateDocumentRequest$Content": "<p>The content in a document that you want to update.</p>"
|
"UpdateDocumentRequest$Content": "<p>The content in a document that you want to update.</p>"
|
||||||
}
|
}
|
||||||
@ -1470,10 +1470,10 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"DocumentDescription": {
|
"DocumentDescription": {
|
||||||
"base": "<p>Describes an SSM document. </p>",
|
"base": "<p>Describes a Systems Manager document. </p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
"CreateDocumentResult$DocumentDescription": "<p>Information about the Systems Manager document.</p>",
|
"CreateDocumentResult$DocumentDescription": "<p>Information about the Systems Manager document.</p>",
|
||||||
"DescribeDocumentResult$Document": "<p>Information about the SSM document.</p>",
|
"DescribeDocumentResult$Document": "<p>Information about the Systems Manager document.</p>",
|
||||||
"UpdateDocumentResult$DocumentDescription": "<p>A description of the document that was updated.</p>"
|
"UpdateDocumentResult$DocumentDescription": "<p>A description of the document that was updated.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -1518,7 +1518,7 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"DocumentIdentifier": {
|
"DocumentIdentifier": {
|
||||||
"base": "<p>Describes the name of an SSM document.</p>",
|
"base": "<p>Describes the name of a Systems Manager document.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
"DocumentIdentifierList$member": null
|
"DocumentIdentifierList$member": null
|
||||||
}
|
}
|
||||||
@ -1526,19 +1526,49 @@
|
|||||||
"DocumentIdentifierList": {
|
"DocumentIdentifierList": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"ListDocumentsResult$DocumentIdentifiers": "<p>The names of the SSM documents.</p>"
|
"ListDocumentsResult$DocumentIdentifiers": "<p>The names of the Systems Manager documents.</p>"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"DocumentKeyValuesFilter": {
|
||||||
|
"base": "<p>One or more filters. Use a filter to return a more specific list of documents.</p> <p>For keys, you can specify one or more tags that have been applied to a document. </p> <p>Other valid values include Owner, Name, PlatformTypes, and DocumentType.</p> <p>Note that only one Owner can be specified in a request. For example: <code>Key=Owner,Values=Self</code>.</p> <p>If you use Name as a key, you can use a name prefix to return a list of documents. For example, in the AWS CLI, to return a list of all documents that begin with <code>Te</code>, run the following command:</p> <p> <code>aws ssm list-documents --filters Key=Name,Values=Te</code> </p> <p>If you specify more than two keys, only documents that are identified by all the tags are returned in the results. If you specify more than two values for a key, documents that are identified by any of the values are returned in the results.</p> <p>To specify a custom key and value pair, use the format <code>Key=tag:[tagName],Values=[valueName]</code>.</p> <p>For example, if you created a Key called region and are using the AWS CLI to call the <code>list-documents</code> command: </p> <p> <code>aws ssm list-documents --filters Key=tag:region,Values=east,west Key=Owner,Values=Self</code> </p>",
|
||||||
|
"refs": {
|
||||||
|
"DocumentKeyValuesFilterList$member": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"DocumentKeyValuesFilterKey": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
"DocumentKeyValuesFilter$Key": "<p>The name of the filter key.</p>"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"DocumentKeyValuesFilterList": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
"ListDocumentsRequest$Filters": "<p>One or more filters. Use a filter to return a more specific list of results.</p>"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"DocumentKeyValuesFilterValue": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
"DocumentKeyValuesFilterValues$member": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"DocumentKeyValuesFilterValues": {
|
||||||
|
"base": null,
|
||||||
|
"refs": {
|
||||||
|
"DocumentKeyValuesFilter$Values": "<p>The value for the filter key.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"DocumentLimitExceeded": {
|
"DocumentLimitExceeded": {
|
||||||
"base": "<p>You can have at most 200 active SSM documents.</p>",
|
"base": "<p>You can have at most 200 active Systems Manager documents.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"DocumentName": {
|
"DocumentName": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"Association$Name": "<p>The name of the SSM document.</p>",
|
"Association$Name": "<p>The name of the Systems Manager document.</p>",
|
||||||
"AssociationDescription$Name": "<p>The name of the SSM document.</p>",
|
"AssociationDescription$Name": "<p>The name of the Systems Manager document.</p>",
|
||||||
"AssociationVersionInfo$Name": "<p>The name specified when the association was created.</p>",
|
"AssociationVersionInfo$Name": "<p>The name specified when the association was created.</p>",
|
||||||
"AutomationExecution$DocumentName": "<p>The name of the Automation document used during the execution.</p>",
|
"AutomationExecution$DocumentName": "<p>The name of the Automation document used during the execution.</p>",
|
||||||
"AutomationExecutionMetadata$DocumentName": "<p>The name of the Automation document used during execution.</p>",
|
"AutomationExecutionMetadata$DocumentName": "<p>The name of the Automation document used during execution.</p>",
|
||||||
@ -1549,7 +1579,7 @@
|
|||||||
"CreateDocumentRequest$Name": "<p>A name for the Systems Manager document.</p>",
|
"CreateDocumentRequest$Name": "<p>A name for the Systems Manager document.</p>",
|
||||||
"DeleteAssociationRequest$Name": "<p>The name of the Systems Manager document.</p>",
|
"DeleteAssociationRequest$Name": "<p>The name of the Systems Manager document.</p>",
|
||||||
"DeleteDocumentRequest$Name": "<p>The name of the document.</p>",
|
"DeleteDocumentRequest$Name": "<p>The name of the document.</p>",
|
||||||
"DescribeAssociationRequest$Name": "<p>The name of the SSM document.</p>",
|
"DescribeAssociationRequest$Name": "<p>The name of the Systems Manager document.</p>",
|
||||||
"DescribeDocumentPermissionRequest$Name": "<p>The name of the document for which you are the owner.</p>",
|
"DescribeDocumentPermissionRequest$Name": "<p>The name of the document for which you are the owner.</p>",
|
||||||
"DocumentDefaultVersionDescription$Name": "<p>The name of the document.</p>",
|
"DocumentDefaultVersionDescription$Name": "<p>The name of the document.</p>",
|
||||||
"DocumentVersionInfo$Name": "<p>The document name.</p>",
|
"DocumentVersionInfo$Name": "<p>The document name.</p>",
|
||||||
@ -1558,7 +1588,7 @@
|
|||||||
"ListDocumentVersionsRequest$Name": "<p>The name of the document about which you want version information.</p>",
|
"ListDocumentVersionsRequest$Name": "<p>The name of the document about which you want version information.</p>",
|
||||||
"ModifyDocumentPermissionRequest$Name": "<p>The name of the document that you want to share.</p>",
|
"ModifyDocumentPermissionRequest$Name": "<p>The name of the document that you want to share.</p>",
|
||||||
"UpdateAssociationRequest$Name": "<p>The name of the association document.</p>",
|
"UpdateAssociationRequest$Name": "<p>The name of the association document.</p>",
|
||||||
"UpdateAssociationStatusRequest$Name": "<p>The name of the SSM document.</p>",
|
"UpdateAssociationStatusRequest$Name": "<p>The name of the Systems Manager document.</p>",
|
||||||
"UpdateDocumentDefaultVersionRequest$Name": "<p>The name of a custom document that you want to set as the default version.</p>",
|
"UpdateDocumentDefaultVersionRequest$Name": "<p>The name of a custom document that you want to set as the default version.</p>",
|
||||||
"UpdateDocumentRequest$Name": "<p>The name of the document that you want to update.</p>"
|
"UpdateDocumentRequest$Name": "<p>The name of the document that you want to update.</p>"
|
||||||
}
|
}
|
||||||
@ -1566,8 +1596,8 @@
|
|||||||
"DocumentOwner": {
|
"DocumentOwner": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"DocumentDescription$Owner": "<p>The AWS user account of the person who created the document.</p>",
|
"DocumentDescription$Owner": "<p>The AWS user account that created the document.</p>",
|
||||||
"DocumentIdentifier$Owner": "<p>The AWS user account of the person who created the document.</p>"
|
"DocumentIdentifier$Owner": "<p>The AWS user account that created the document.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"DocumentParameter": {
|
"DocumentParameter": {
|
||||||
@ -1628,13 +1658,13 @@
|
|||||||
"DocumentSha1": {
|
"DocumentSha1": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"DocumentDescription$Sha1": "<p>The SHA1 hash of the document, which you can use for verification purposes.</p>"
|
"DocumentDescription$Sha1": "<p>The SHA1 hash of the document, which you can use for verification.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"DocumentStatus": {
|
"DocumentStatus": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"DocumentDescription$Status": "<p>The status of the SSM document.</p>"
|
"DocumentDescription$Status": "<p>The status of the Systems Manager document.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"DocumentType": {
|
"DocumentType": {
|
||||||
@ -1651,7 +1681,7 @@
|
|||||||
"refs": {
|
"refs": {
|
||||||
"Association$DocumentVersion": "<p>The version of the document used in the association.</p>",
|
"Association$DocumentVersion": "<p>The version of the document used in the association.</p>",
|
||||||
"AssociationDescription$DocumentVersion": "<p>The document version.</p>",
|
"AssociationDescription$DocumentVersion": "<p>The document version.</p>",
|
||||||
"AssociationVersionInfo$DocumentVersion": "<p>The version of an SSM document used when the association version was created.</p>",
|
"AssociationVersionInfo$DocumentVersion": "<p>The version of a Systems Manager document used when the association version was created.</p>",
|
||||||
"AutomationExecution$DocumentVersion": "<p>The version of the document to use during execution.</p>",
|
"AutomationExecution$DocumentVersion": "<p>The version of the document to use during execution.</p>",
|
||||||
"AutomationExecutionMetadata$DocumentVersion": "<p>The document version used during the execution.</p>",
|
"AutomationExecutionMetadata$DocumentVersion": "<p>The document version used during the execution.</p>",
|
||||||
"CreateAssociationBatchRequestEntry$DocumentVersion": "<p>The document version.</p>",
|
"CreateAssociationBatchRequestEntry$DocumentVersion": "<p>The document version.</p>",
|
||||||
@ -2366,7 +2396,7 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"InvalidParameters": {
|
"InvalidParameters": {
|
||||||
"base": "<p>You must specify values for all required parameters in the SSM document. You can only supply values to parameters defined in the SSM document.</p>",
|
"base": "<p>You must specify values for all required parameters in the Systems Manager document. You can only supply values to parameters defined in the Systems Manager document.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -2386,7 +2416,7 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"InvalidResourceType": {
|
"InvalidResourceType": {
|
||||||
"base": "<p>The resource type is not valid. If you are attempting to tag an instance, the instance must be a registered, managed instance.</p>",
|
"base": "<p>The resource type is not valid. For example, if you are attempting to tag an instance, the instance must be a registered, managed instance.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -3170,9 +3200,9 @@
|
|||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"GetMaintenanceWindowExecutionTaskResult$TaskArn": "<p>The ARN of the executed task.</p>",
|
"GetMaintenanceWindowExecutionTaskResult$TaskArn": "<p>The ARN of the executed task.</p>",
|
||||||
"GetMaintenanceWindowTaskResult$TaskArn": "<p>The resource that the task used during execution. For RUN_COMMAND and AUTOMATION task types, the TaskArn is the SSM Document name/ARN. For LAMBDA tasks, the value is the function name/ARN. For STEP_FUNCTION tasks, the value is the state machine ARN.</p>",
|
"GetMaintenanceWindowTaskResult$TaskArn": "<p>The resource that the task used during execution. For RUN_COMMAND and AUTOMATION task types, the TaskArn is the Systems Manager Document name/ARN. For LAMBDA tasks, the value is the function name/ARN. For STEP_FUNCTION tasks, the value is the state machine ARN.</p>",
|
||||||
"MaintenanceWindowExecutionTaskIdentity$TaskArn": "<p>The ARN of the executed task.</p>",
|
"MaintenanceWindowExecutionTaskIdentity$TaskArn": "<p>The ARN of the executed task.</p>",
|
||||||
"MaintenanceWindowTask$TaskArn": "<p>The resource that the task uses during execution. For RUN_COMMAND and AUTOMATION task types, <code>TaskArn</code> is the SSM document name or ARN. For LAMBDA tasks, it's the function name or ARN. For STEP_FUNCTION tasks, it's the state machine ARN.</p>",
|
"MaintenanceWindowTask$TaskArn": "<p>The resource that the task uses during execution. For RUN_COMMAND and AUTOMATION task types, <code>TaskArn</code> is the Systems Manager document name or ARN. For LAMBDA tasks, it's the function name or ARN. For STEP_FUNCTION tasks, it's the state machine ARN.</p>",
|
||||||
"RegisterTaskWithMaintenanceWindowRequest$TaskArn": "<p>The ARN of the task to execute </p>",
|
"RegisterTaskWithMaintenanceWindowRequest$TaskArn": "<p>The ARN of the task to execute </p>",
|
||||||
"UpdateMaintenanceWindowTaskRequest$TaskArn": "<p>The task ARN to modify.</p>",
|
"UpdateMaintenanceWindowTaskRequest$TaskArn": "<p>The task ARN to modify.</p>",
|
||||||
"UpdateMaintenanceWindowTaskResult$TaskArn": "<p>The updated task ARN value.</p>"
|
"UpdateMaintenanceWindowTaskResult$TaskArn": "<p>The updated task ARN value.</p>"
|
||||||
@ -4067,7 +4097,7 @@
|
|||||||
"PlatformTypeList": {
|
"PlatformTypeList": {
|
||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"DocumentDescription$PlatformTypes": "<p>The list of OS platforms compatible with this SSM document. </p>",
|
"DocumentDescription$PlatformTypes": "<p>The list of OS platforms compatible with this Systems Manager document. </p>",
|
||||||
"DocumentIdentifier$PlatformTypes": "<p>The operating system platform. </p>"
|
"DocumentIdentifier$PlatformTypes": "<p>The operating system platform. </p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -4629,7 +4659,7 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"Tag": {
|
"Tag": {
|
||||||
"base": "<p>Metadata that you assign to your managed instances. Tags enable you to categorize your managed instances in different ways, for example, by purpose, owner, or environment.</p>",
|
"base": "<p>Metadata that you assign to your AWS resources. Tags enable you to categorize your resources in different ways, for example, by purpose, owner, or environment. In Systems Manager, you can apply tags to documents, managed instances, Maintenance Windows, Parameter Store parameters, and patch baselines.</p>",
|
||||||
"refs": {
|
"refs": {
|
||||||
"TagList$member": null
|
"TagList$member": null
|
||||||
}
|
}
|
||||||
@ -4645,6 +4675,8 @@
|
|||||||
"base": null,
|
"base": null,
|
||||||
"refs": {
|
"refs": {
|
||||||
"AddTagsToResourceRequest$Tags": "<p> One or more tags. The value parameter is required, but if you don't want the tag to have a value, specify the parameter with no value, and we set the value to an empty string. </p>",
|
"AddTagsToResourceRequest$Tags": "<p> One or more tags. The value parameter is required, but if you don't want the tag to have a value, specify the parameter with no value, and we set the value to an empty string. </p>",
|
||||||
|
"DocumentDescription$Tags": "<p>The tags, or metadata, that have been applied to the document.</p>",
|
||||||
|
"DocumentIdentifier$Tags": "<p>The tags, or metadata, that have been applied to the document.</p>",
|
||||||
"ListTagsForResourceResult$TagList": "<p>A list of tags.</p>"
|
"ListTagsForResourceResult$TagList": "<p>A list of tags.</p>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
8
vendor/github.com/aws/aws-sdk-go/models/endpoints/endpoints.json
generated
vendored
8
vendor/github.com/aws/aws-sdk-go/models/endpoints/endpoints.json
generated
vendored
@ -793,6 +793,7 @@
|
|||||||
"ap-northeast-1" : { },
|
"ap-northeast-1" : { },
|
||||||
"ap-northeast-2" : { },
|
"ap-northeast-2" : { },
|
||||||
"ap-south-1" : { },
|
"ap-south-1" : { },
|
||||||
|
"ap-southeast-1" : { },
|
||||||
"ap-southeast-2" : { },
|
"ap-southeast-2" : { },
|
||||||
"ca-central-1" : { },
|
"ca-central-1" : { },
|
||||||
"eu-central-1" : { },
|
"eu-central-1" : { },
|
||||||
@ -806,7 +807,9 @@
|
|||||||
},
|
},
|
||||||
"glue" : {
|
"glue" : {
|
||||||
"endpoints" : {
|
"endpoints" : {
|
||||||
"us-east-1" : { }
|
"us-east-1" : { },
|
||||||
|
"us-east-2" : { },
|
||||||
|
"us-west-2" : { }
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"greengrass" : {
|
"greengrass" : {
|
||||||
@ -814,6 +817,7 @@
|
|||||||
"protocols" : [ "https" ]
|
"protocols" : [ "https" ]
|
||||||
},
|
},
|
||||||
"endpoints" : {
|
"endpoints" : {
|
||||||
|
"ap-northeast-1" : { },
|
||||||
"ap-southeast-2" : { },
|
"ap-southeast-2" : { },
|
||||||
"eu-central-1" : { },
|
"eu-central-1" : { },
|
||||||
"us-east-1" : { },
|
"us-east-1" : { },
|
||||||
@ -1299,6 +1303,7 @@
|
|||||||
},
|
},
|
||||||
"snowball" : {
|
"snowball" : {
|
||||||
"endpoints" : {
|
"endpoints" : {
|
||||||
|
"ap-northeast-1" : { },
|
||||||
"ap-south-1" : { },
|
"ap-south-1" : { },
|
||||||
"ap-southeast-2" : { },
|
"ap-southeast-2" : { },
|
||||||
"eu-central-1" : { },
|
"eu-central-1" : { },
|
||||||
@ -1569,6 +1574,7 @@
|
|||||||
"ap-southeast-2" : { },
|
"ap-southeast-2" : { },
|
||||||
"eu-central-1" : { },
|
"eu-central-1" : { },
|
||||||
"eu-west-1" : { },
|
"eu-west-1" : { },
|
||||||
|
"eu-west-2" : { },
|
||||||
"us-east-1" : { },
|
"us-east-1" : { },
|
||||||
"us-west-2" : { }
|
"us-west-2" : { }
|
||||||
}
|
}
|
||||||
|
73
vendor/github.com/aws/aws-sdk-go/models/protocol_tests/generate.go
generated
vendored
73
vendor/github.com/aws/aws-sdk-go/models/protocol_tests/generate.go
generated
vendored
@ -98,7 +98,6 @@ var extraImports = []string{
|
|||||||
"github.com/aws/aws-sdk-go/private/protocol",
|
"github.com/aws/aws-sdk-go/private/protocol",
|
||||||
"github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil",
|
"github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil",
|
||||||
"github.com/aws/aws-sdk-go/private/util",
|
"github.com/aws/aws-sdk-go/private/util",
|
||||||
"github.com/stretchr/testify/assert",
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func addImports(code string) string {
|
func addImports(code string) string {
|
||||||
@ -140,18 +139,25 @@ func Test{{ .OpName }}(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
{{ .TestCase.TestSuite.API.ProtocolPackage }}.Build(req)
|
{{ .TestCase.TestSuite.API.ProtocolPackage }}.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
{{ if ne .TestCase.InputTest.Body "" }}// assert body
|
{{ if ne .TestCase.InputTest.Body "" }}// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
{{ .BodyAssertions }}{{ end }}
|
{{ .BodyAssertions }}{{ end }}
|
||||||
|
|
||||||
{{ if ne .TestCase.InputTest.URI "" }}// assert URL
|
{{ if ne .TestCase.InputTest.URI "" }}// assert URL
|
||||||
awstesting.AssertURL(t, "https://test{{ .TestCase.InputTest.URI }}", r.URL.String()){{ end }}
|
awstesting.AssertURL(t, "https://test{{ .TestCase.InputTest.URI }}", r.URL.String()){{ end }}
|
||||||
|
|
||||||
// assert headers
|
// assert headers
|
||||||
{{ range $k, $v := .TestCase.InputTest.Headers }}assert.Equal(t, "{{ $v }}", r.Header.Get("{{ $k }}"))
|
{{ range $k, $v := .TestCase.InputTest.Headers -}}
|
||||||
{{ end }}
|
if e, a := "{{ $v }}", r.Header.Get("{{ $k }}"); e != a {
|
||||||
|
t.Errorf("expect %v to be %v", e, a)
|
||||||
|
}
|
||||||
|
{{ end }}
|
||||||
}
|
}
|
||||||
`))
|
`))
|
||||||
|
|
||||||
@ -184,25 +190,40 @@ func (t tplInputTestCaseData) BodyAssertions() string {
|
|||||||
fmt.Fprintf(code, "awstesting.AssertXML(t, `%s`, util.Trim(string(body)), %s{})",
|
fmt.Fprintf(code, "awstesting.AssertXML(t, `%s`, util.Trim(string(body)), %s{})",
|
||||||
expectedBody, t.TestCase.Given.InputRef.ShapeName)
|
expectedBody, t.TestCase.Given.InputRef.ShapeName)
|
||||||
} else {
|
} else {
|
||||||
fmt.Fprintf(code, "assert.Equal(t, `%s`, util.Trim(string(body)))",
|
code.WriteString(fmtAssertEqual(fmt.Sprintf("%q", expectedBody), "util.Trim(string(body))"))
|
||||||
expectedBody)
|
|
||||||
}
|
}
|
||||||
case "json", "jsonrpc", "rest-json":
|
case "json", "jsonrpc", "rest-json":
|
||||||
if strings.HasPrefix(expectedBody, "{") {
|
if strings.HasPrefix(expectedBody, "{") {
|
||||||
fmt.Fprintf(code, "awstesting.AssertJSON(t, `%s`, util.Trim(string(body)))",
|
fmt.Fprintf(code, "awstesting.AssertJSON(t, `%s`, util.Trim(string(body)))",
|
||||||
expectedBody)
|
expectedBody)
|
||||||
} else {
|
} else {
|
||||||
fmt.Fprintf(code, "assert.Equal(t, `%s`, util.Trim(string(body)))",
|
code.WriteString(fmtAssertEqual(fmt.Sprintf("%q", expectedBody), "util.Trim(string(body))"))
|
||||||
expectedBody)
|
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
fmt.Fprintf(code, "assert.Equal(t, `%s`, util.Trim(string(body)))",
|
code.WriteString(fmtAssertEqual(expectedBody, "util.Trim(string(body))"))
|
||||||
expectedBody)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return code.String()
|
return code.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func fmtAssertEqual(e, a string) string {
|
||||||
|
const format = `if e, a := %s, %s; e != a {
|
||||||
|
t.Errorf("expect %%v, got %%v", e, a)
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
return fmt.Sprintf(format, e, a)
|
||||||
|
}
|
||||||
|
|
||||||
|
func fmtAssertNil(v string) string {
|
||||||
|
const format = `if e := %s; e != nil {
|
||||||
|
t.Errorf("expect nil, got %%v", e)
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
return fmt.Sprintf(format, v)
|
||||||
|
}
|
||||||
|
|
||||||
var tplOutputTestCase = template.Must(template.New("outputcase").Parse(`
|
var tplOutputTestCase = template.Must(template.New("outputcase").Parse(`
|
||||||
func Test{{ .OpName }}(t *testing.T) {
|
func Test{{ .OpName }}(t *testing.T) {
|
||||||
svc := New{{ .TestCase.TestSuite.API.StructName }}(unit.Session, &aws.Config{Endpoint: aws.String("https://test")})
|
svc := New{{ .TestCase.TestSuite.API.StructName }}(unit.Session, &aws.Config{Endpoint: aws.String("https://test")})
|
||||||
@ -218,10 +239,14 @@ func Test{{ .OpName }}(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
{{ .TestCase.TestSuite.API.ProtocolPackage }}.UnmarshalMeta(req)
|
{{ .TestCase.TestSuite.API.ProtocolPackage }}.UnmarshalMeta(req)
|
||||||
{{ .TestCase.TestSuite.API.ProtocolPackage }}.Unmarshal(req)
|
{{ .TestCase.TestSuite.API.ProtocolPackage }}.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
|
t.Errorf("expect not to be nil")
|
||||||
|
}
|
||||||
{{ .Assertions }}
|
{{ .Assertions }}
|
||||||
}
|
}
|
||||||
`))
|
`))
|
||||||
@ -454,16 +479,28 @@ func GenerateAssertions(out interface{}, shape *api.Shape, prefix string) string
|
|||||||
default:
|
default:
|
||||||
switch shape.Type {
|
switch shape.Type {
|
||||||
case "timestamp":
|
case "timestamp":
|
||||||
return fmt.Sprintf("assert.Equal(t, time.Unix(%#v, 0).UTC().String(), %s.String())\n", out, prefix)
|
return fmtAssertEqual(
|
||||||
|
fmt.Sprintf("time.Unix(%#v, 0).UTC().String()", out),
|
||||||
|
fmt.Sprintf("%s.String()", prefix),
|
||||||
|
)
|
||||||
case "blob":
|
case "blob":
|
||||||
return fmt.Sprintf("assert.Equal(t, %#v, string(%s))\n", out, prefix)
|
return fmtAssertEqual(
|
||||||
|
fmt.Sprintf("%#v", out),
|
||||||
|
fmt.Sprintf("string(%s)", prefix),
|
||||||
|
)
|
||||||
case "integer", "long":
|
case "integer", "long":
|
||||||
return fmt.Sprintf("assert.Equal(t, int64(%#v), *%s)\n", out, prefix)
|
return fmtAssertEqual(
|
||||||
|
fmt.Sprintf("int64(%#v)", out),
|
||||||
|
fmt.Sprintf("*%s", prefix),
|
||||||
|
)
|
||||||
default:
|
default:
|
||||||
if !reflect.ValueOf(out).IsValid() {
|
if !reflect.ValueOf(out).IsValid() {
|
||||||
return fmt.Sprintf("assert.Nil(t, %s)\n", prefix)
|
return fmtAssertNil(prefix)
|
||||||
}
|
}
|
||||||
return fmt.Sprintf("assert.Equal(t, %#v, *%s)\n", out, prefix)
|
return fmtAssertEqual(
|
||||||
|
fmt.Sprintf("%#v", out),
|
||||||
|
fmt.Sprintf("*%s", prefix),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
44
vendor/github.com/aws/aws-sdk-go/models/protocol_tests/input/query.json
generated
vendored
44
vendor/github.com/aws/aws-sdk-go/models/protocol_tests/input/query.json
generated
vendored
@ -553,6 +553,50 @@
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"description": "Base64 encoded Blobs nested",
|
||||||
|
"metadata": {
|
||||||
|
"protocol": "query",
|
||||||
|
"apiVersion": "2014-01-01"
|
||||||
|
},
|
||||||
|
"shapes": {
|
||||||
|
"InputShape": {
|
||||||
|
"type": "structure",
|
||||||
|
"members": {
|
||||||
|
"BlobArgs": {
|
||||||
|
"shape": "BlobsType"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"BlobsType": {
|
||||||
|
"type": "list",
|
||||||
|
"member": {
|
||||||
|
"shape": "BlobType"
|
||||||
|
},
|
||||||
|
"flattened": true
|
||||||
|
},
|
||||||
|
"BlobType": {
|
||||||
|
"type": "blob"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"cases": [
|
||||||
|
{
|
||||||
|
"given": {
|
||||||
|
"input": {
|
||||||
|
"shape": "InputShape"
|
||||||
|
},
|
||||||
|
"name": "OperationName"
|
||||||
|
},
|
||||||
|
"params": {
|
||||||
|
"BlobArgs": ["foo"]
|
||||||
|
},
|
||||||
|
"serialized": {
|
||||||
|
"uri": "/",
|
||||||
|
"body": "Action=OperationName&Version=2014-01-01&BlobArgs.1=Zm9v"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"description": "Timestamp values",
|
"description": "Timestamp values",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
|
14
vendor/github.com/aws/aws-sdk-go/private/model/api/customization_passes.go
generated
vendored
14
vendor/github.com/aws/aws-sdk-go/private/model/api/customization_passes.go
generated
vendored
@ -53,12 +53,20 @@ func (a *API) customizationPasses() {
|
|||||||
func s3Customizations(a *API) {
|
func s3Customizations(a *API) {
|
||||||
var strExpires *Shape
|
var strExpires *Shape
|
||||||
|
|
||||||
|
var keepContentMD5Ref = map[string]struct{}{
|
||||||
|
"PutObjectInput": struct{}{},
|
||||||
|
"UploadPartInput": struct{}{},
|
||||||
|
}
|
||||||
|
|
||||||
for name, s := range a.Shapes {
|
for name, s := range a.Shapes {
|
||||||
// Remove ContentMD5 members
|
// Remove ContentMD5 members unless specified otherwise.
|
||||||
if _, ok := s.MemberRefs["ContentMD5"]; ok {
|
if _, keep := keepContentMD5Ref[name]; !keep {
|
||||||
delete(s.MemberRefs, "ContentMD5")
|
if _, have := s.MemberRefs["ContentMD5"]; have {
|
||||||
|
delete(s.MemberRefs, "ContentMD5")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Generate getter methods for API operation fields used by customizations.
|
||||||
for _, refName := range []string{"Bucket", "SSECustomerKey", "CopySourceSSECustomerKey"} {
|
for _, refName := range []string{"Bucket", "SSECustomerKey", "CopySourceSSECustomerKey"} {
|
||||||
if ref, ok := s.MemberRefs[refName]; ok {
|
if ref, ok := s.MemberRefs[refName]; ok {
|
||||||
ref.GenerateGetter = true
|
ref.GenerateGetter = true
|
||||||
|
81
vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build_test.go
generated
vendored
81
vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build_test.go
generated
vendored
@ -24,7 +24,6 @@ import (
|
|||||||
"github.com/aws/aws-sdk-go/private/protocol/ec2query"
|
"github.com/aws/aws-sdk-go/private/protocol/ec2query"
|
||||||
"github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
|
"github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
|
||||||
"github.com/aws/aws-sdk-go/private/util"
|
"github.com/aws/aws-sdk-go/private/util"
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ bytes.Buffer // always import bytes
|
var _ bytes.Buffer // always import bytes
|
||||||
@ -1476,10 +1475,14 @@ func TestInputService1ProtocolTestScalarMembersCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
ec2query.Build(req)
|
ec2query.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body, _ := ioutil.ReadAll(r.Body)
|
body, _ := ioutil.ReadAll(r.Body)
|
||||||
awstesting.AssertQuery(t, `Action=OperationName&Bar=val2&Foo=val1&Version=2014-01-01`, util.Trim(string(body)))
|
awstesting.AssertQuery(t, `Action=OperationName&Bar=val2&Foo=val1&Version=2014-01-01`, util.Trim(string(body)))
|
||||||
|
|
||||||
@ -1502,10 +1505,14 @@ func TestInputService2ProtocolTestStructureWithLocationNameAndQueryNameAppliedTo
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
ec2query.Build(req)
|
ec2query.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body, _ := ioutil.ReadAll(r.Body)
|
body, _ := ioutil.ReadAll(r.Body)
|
||||||
awstesting.AssertQuery(t, `Action=OperationName&BarLocationName=val2&Foo=val1&Version=2014-01-01&yuckQueryName=val3`, util.Trim(string(body)))
|
awstesting.AssertQuery(t, `Action=OperationName&BarLocationName=val2&Foo=val1&Version=2014-01-01&yuckQueryName=val3`, util.Trim(string(body)))
|
||||||
|
|
||||||
@ -1528,10 +1535,14 @@ func TestInputService3ProtocolTestNestedStructureMembersCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
ec2query.Build(req)
|
ec2query.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body, _ := ioutil.ReadAll(r.Body)
|
body, _ := ioutil.ReadAll(r.Body)
|
||||||
awstesting.AssertQuery(t, `Action=OperationName&Struct.Scalar=foo&Version=2014-01-01`, util.Trim(string(body)))
|
awstesting.AssertQuery(t, `Action=OperationName&Struct.Scalar=foo&Version=2014-01-01`, util.Trim(string(body)))
|
||||||
|
|
||||||
@ -1556,10 +1567,14 @@ func TestInputService4ProtocolTestListTypesCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
ec2query.Build(req)
|
ec2query.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body, _ := ioutil.ReadAll(r.Body)
|
body, _ := ioutil.ReadAll(r.Body)
|
||||||
awstesting.AssertQuery(t, `Action=OperationName&ListArg.1=foo&ListArg.2=bar&ListArg.3=baz&Version=2014-01-01`, util.Trim(string(body)))
|
awstesting.AssertQuery(t, `Action=OperationName&ListArg.1=foo&ListArg.2=bar&ListArg.3=baz&Version=2014-01-01`, util.Trim(string(body)))
|
||||||
|
|
||||||
@ -1584,10 +1599,14 @@ func TestInputService5ProtocolTestListWithLocationNameAppliedToMemberCase1(t *te
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
ec2query.Build(req)
|
ec2query.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body, _ := ioutil.ReadAll(r.Body)
|
body, _ := ioutil.ReadAll(r.Body)
|
||||||
awstesting.AssertQuery(t, `Action=OperationName&ListMemberName.1=a&ListMemberName.2=b&ListMemberName.3=c&Version=2014-01-01`, util.Trim(string(body)))
|
awstesting.AssertQuery(t, `Action=OperationName&ListMemberName.1=a&ListMemberName.2=b&ListMemberName.3=c&Version=2014-01-01`, util.Trim(string(body)))
|
||||||
|
|
||||||
@ -1612,10 +1631,14 @@ func TestInputService6ProtocolTestListWithLocationNameAndQueryNameCase1(t *testi
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
ec2query.Build(req)
|
ec2query.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body, _ := ioutil.ReadAll(r.Body)
|
body, _ := ioutil.ReadAll(r.Body)
|
||||||
awstesting.AssertQuery(t, `Action=OperationName&ListQueryName.1=a&ListQueryName.2=b&ListQueryName.3=c&Version=2014-01-01`, util.Trim(string(body)))
|
awstesting.AssertQuery(t, `Action=OperationName&ListQueryName.1=a&ListQueryName.2=b&ListQueryName.3=c&Version=2014-01-01`, util.Trim(string(body)))
|
||||||
|
|
||||||
@ -1636,10 +1659,14 @@ func TestInputService7ProtocolTestBase64EncodedBlobsCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
ec2query.Build(req)
|
ec2query.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body, _ := ioutil.ReadAll(r.Body)
|
body, _ := ioutil.ReadAll(r.Body)
|
||||||
awstesting.AssertQuery(t, `Action=OperationName&BlobArg=Zm9v&Version=2014-01-01`, util.Trim(string(body)))
|
awstesting.AssertQuery(t, `Action=OperationName&BlobArg=Zm9v&Version=2014-01-01`, util.Trim(string(body)))
|
||||||
|
|
||||||
@ -1660,10 +1687,14 @@ func TestInputService8ProtocolTestTimestampValuesCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
ec2query.Build(req)
|
ec2query.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body, _ := ioutil.ReadAll(r.Body)
|
body, _ := ioutil.ReadAll(r.Body)
|
||||||
awstesting.AssertQuery(t, `Action=OperationName&TimeArg=2015-01-25T08%3A00%3A00Z&Version=2014-01-01`, util.Trim(string(body)))
|
awstesting.AssertQuery(t, `Action=OperationName&TimeArg=2015-01-25T08%3A00%3A00Z&Version=2014-01-01`, util.Trim(string(body)))
|
||||||
|
|
||||||
@ -1684,10 +1715,14 @@ func TestInputService9ProtocolTestIdempotencyTokenAutoFillCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
ec2query.Build(req)
|
ec2query.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body, _ := ioutil.ReadAll(r.Body)
|
body, _ := ioutil.ReadAll(r.Body)
|
||||||
awstesting.AssertQuery(t, `Action=OperationName&Token=abc123&Version=2014-01-01`, util.Trim(string(body)))
|
awstesting.AssertQuery(t, `Action=OperationName&Token=abc123&Version=2014-01-01`, util.Trim(string(body)))
|
||||||
|
|
||||||
@ -1706,10 +1741,14 @@ func TestInputService9ProtocolTestIdempotencyTokenAutoFillCase2(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
ec2query.Build(req)
|
ec2query.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body, _ := ioutil.ReadAll(r.Body)
|
body, _ := ioutil.ReadAll(r.Body)
|
||||||
awstesting.AssertQuery(t, `Action=OperationName&Token=00000000-0000-4000-8000-000000000000&Version=2014-01-01`, util.Trim(string(body)))
|
awstesting.AssertQuery(t, `Action=OperationName&Token=00000000-0000-4000-8000-000000000000&Version=2014-01-01`, util.Trim(string(body)))
|
||||||
|
|
||||||
|
161
vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal_test.go
generated
vendored
161
vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal_test.go
generated
vendored
@ -24,7 +24,6 @@ import (
|
|||||||
"github.com/aws/aws-sdk-go/private/protocol/ec2query"
|
"github.com/aws/aws-sdk-go/private/protocol/ec2query"
|
||||||
"github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
|
"github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
|
||||||
"github.com/aws/aws-sdk-go/private/util"
|
"github.com/aws/aws-sdk-go/private/util"
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ bytes.Buffer // always import bytes
|
var _ bytes.Buffer // always import bytes
|
||||||
@ -1417,18 +1416,38 @@ func TestOutputService1ProtocolTestScalarMembersCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
ec2query.UnmarshalMeta(req)
|
ec2query.UnmarshalMeta(req)
|
||||||
ec2query.Unmarshal(req)
|
ec2query.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "a", *out.Char)
|
t.Errorf("expect not to be nil")
|
||||||
assert.Equal(t, 1.3, *out.Double)
|
}
|
||||||
assert.Equal(t, false, *out.FalseBool)
|
if e, a := "a", *out.Char; e != a {
|
||||||
assert.Equal(t, 1.2, *out.Float)
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
assert.Equal(t, int64(200), *out.Long)
|
}
|
||||||
assert.Equal(t, int64(123), *out.Num)
|
if e, a := 1.3, *out.Double; e != a {
|
||||||
assert.Equal(t, "myname", *out.Str)
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
assert.Equal(t, true, *out.TrueBool)
|
}
|
||||||
|
if e, a := false, *out.FalseBool; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := 1.2, *out.Float; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := int64(200), *out.Long; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := int64(123), *out.Num; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "myname", *out.Str; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := true, *out.TrueBool; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1444,11 +1463,17 @@ func TestOutputService2ProtocolTestBlobCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
ec2query.UnmarshalMeta(req)
|
ec2query.UnmarshalMeta(req)
|
||||||
ec2query.Unmarshal(req)
|
ec2query.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "value", string(out.Blob))
|
t.Errorf("expect not to be nil")
|
||||||
|
}
|
||||||
|
if e, a := "value", string(out.Blob); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1464,12 +1489,20 @@ func TestOutputService3ProtocolTestListsCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
ec2query.UnmarshalMeta(req)
|
ec2query.UnmarshalMeta(req)
|
||||||
ec2query.Unmarshal(req)
|
ec2query.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "abc", *out.ListMember[0])
|
t.Errorf("expect not to be nil")
|
||||||
assert.Equal(t, "123", *out.ListMember[1])
|
}
|
||||||
|
if e, a := "abc", *out.ListMember[0]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "123", *out.ListMember[1]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1485,12 +1518,20 @@ func TestOutputService4ProtocolTestListWithCustomMemberNameCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
ec2query.UnmarshalMeta(req)
|
ec2query.UnmarshalMeta(req)
|
||||||
ec2query.Unmarshal(req)
|
ec2query.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "abc", *out.ListMember[0])
|
t.Errorf("expect not to be nil")
|
||||||
assert.Equal(t, "123", *out.ListMember[1])
|
}
|
||||||
|
if e, a := "abc", *out.ListMember[0]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "123", *out.ListMember[1]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1506,12 +1547,20 @@ func TestOutputService5ProtocolTestFlattenedListCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
ec2query.UnmarshalMeta(req)
|
ec2query.UnmarshalMeta(req)
|
||||||
ec2query.Unmarshal(req)
|
ec2query.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "abc", *out.ListMember[0])
|
t.Errorf("expect not to be nil")
|
||||||
assert.Equal(t, "123", *out.ListMember[1])
|
}
|
||||||
|
if e, a := "abc", *out.ListMember[0]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "123", *out.ListMember[1]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1527,12 +1576,20 @@ func TestOutputService6ProtocolTestNormalMapCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
ec2query.UnmarshalMeta(req)
|
ec2query.UnmarshalMeta(req)
|
||||||
ec2query.Unmarshal(req)
|
ec2query.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "bam", *out.Map["baz"].Foo)
|
t.Errorf("expect not to be nil")
|
||||||
assert.Equal(t, "bar", *out.Map["qux"].Foo)
|
}
|
||||||
|
if e, a := "bam", *out.Map["baz"].Foo; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "bar", *out.Map["qux"].Foo; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1548,12 +1605,20 @@ func TestOutputService7ProtocolTestFlattenedMapCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
ec2query.UnmarshalMeta(req)
|
ec2query.UnmarshalMeta(req)
|
||||||
ec2query.Unmarshal(req)
|
ec2query.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "bam", *out.Map["baz"])
|
t.Errorf("expect not to be nil")
|
||||||
assert.Equal(t, "bar", *out.Map["qux"])
|
}
|
||||||
|
if e, a := "bam", *out.Map["baz"]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "bar", *out.Map["qux"]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1569,12 +1634,20 @@ func TestOutputService8ProtocolTestNamedMapCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
ec2query.UnmarshalMeta(req)
|
ec2query.UnmarshalMeta(req)
|
||||||
ec2query.Unmarshal(req)
|
ec2query.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "bam", *out.Map["baz"])
|
t.Errorf("expect not to be nil")
|
||||||
assert.Equal(t, "bar", *out.Map["qux"])
|
}
|
||||||
|
if e, a := "bam", *out.Map["baz"]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "bar", *out.Map["qux"]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1590,10 +1663,16 @@ func TestOutputService9ProtocolTestEmptyStringCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
ec2query.UnmarshalMeta(req)
|
ec2query.UnmarshalMeta(req)
|
||||||
ec2query.Unmarshal(req)
|
ec2query.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "", *out.Foo)
|
t.Errorf("expect not to be nil")
|
||||||
|
}
|
||||||
|
if e, a := "", *out.Foo; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
209
vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/build_test.go
generated
vendored
209
vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/build_test.go
generated
vendored
@ -24,7 +24,6 @@ import (
|
|||||||
"github.com/aws/aws-sdk-go/private/protocol/jsonrpc"
|
"github.com/aws/aws-sdk-go/private/protocol/jsonrpc"
|
||||||
"github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
|
"github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
|
||||||
"github.com/aws/aws-sdk-go/private/util"
|
"github.com/aws/aws-sdk-go/private/util"
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ bytes.Buffer // always import bytes
|
var _ bytes.Buffer // always import bytes
|
||||||
@ -1654,10 +1653,14 @@ func TestInputService1ProtocolTestScalarMembersCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
jsonrpc.Build(req)
|
jsonrpc.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body, _ := ioutil.ReadAll(r.Body)
|
body, _ := ioutil.ReadAll(r.Body)
|
||||||
awstesting.AssertJSON(t, `{"Name":"myname"}`, util.Trim(string(body)))
|
awstesting.AssertJSON(t, `{"Name":"myname"}`, util.Trim(string(body)))
|
||||||
|
|
||||||
@ -1665,8 +1668,12 @@ func TestInputService1ProtocolTestScalarMembersCase1(t *testing.T) {
|
|||||||
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
||||||
|
|
||||||
// assert headers
|
// assert headers
|
||||||
assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type"))
|
if e, a := "application/x-amz-json-1.1", r.Header.Get("Content-Type"); e != a {
|
||||||
assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target"))
|
t.Errorf("expect %v to be %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target"); e != a {
|
||||||
|
t.Errorf("expect %v to be %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1680,10 +1687,14 @@ func TestInputService2ProtocolTestTimestampValuesCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
jsonrpc.Build(req)
|
jsonrpc.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body, _ := ioutil.ReadAll(r.Body)
|
body, _ := ioutil.ReadAll(r.Body)
|
||||||
awstesting.AssertJSON(t, `{"TimeArg":1422172800}`, util.Trim(string(body)))
|
awstesting.AssertJSON(t, `{"TimeArg":1422172800}`, util.Trim(string(body)))
|
||||||
|
|
||||||
@ -1691,8 +1702,12 @@ func TestInputService2ProtocolTestTimestampValuesCase1(t *testing.T) {
|
|||||||
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
||||||
|
|
||||||
// assert headers
|
// assert headers
|
||||||
assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type"))
|
if e, a := "application/x-amz-json-1.1", r.Header.Get("Content-Type"); e != a {
|
||||||
assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target"))
|
t.Errorf("expect %v to be %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target"); e != a {
|
||||||
|
t.Errorf("expect %v to be %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1706,10 +1721,14 @@ func TestInputService3ProtocolTestBase64EncodedBlobsCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
jsonrpc.Build(req)
|
jsonrpc.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body, _ := ioutil.ReadAll(r.Body)
|
body, _ := ioutil.ReadAll(r.Body)
|
||||||
awstesting.AssertJSON(t, `{"BlobArg":"Zm9v"}`, util.Trim(string(body)))
|
awstesting.AssertJSON(t, `{"BlobArg":"Zm9v"}`, util.Trim(string(body)))
|
||||||
|
|
||||||
@ -1717,8 +1736,12 @@ func TestInputService3ProtocolTestBase64EncodedBlobsCase1(t *testing.T) {
|
|||||||
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
||||||
|
|
||||||
// assert headers
|
// assert headers
|
||||||
assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type"))
|
if e, a := "application/x-amz-json-1.1", r.Header.Get("Content-Type"); e != a {
|
||||||
assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target"))
|
t.Errorf("expect %v to be %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target"); e != a {
|
||||||
|
t.Errorf("expect %v to be %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1735,10 +1758,14 @@ func TestInputService3ProtocolTestBase64EncodedBlobsCase2(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
jsonrpc.Build(req)
|
jsonrpc.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body, _ := ioutil.ReadAll(r.Body)
|
body, _ := ioutil.ReadAll(r.Body)
|
||||||
awstesting.AssertJSON(t, `{"BlobMap":{"key1":"Zm9v","key2":"YmFy"}}`, util.Trim(string(body)))
|
awstesting.AssertJSON(t, `{"BlobMap":{"key1":"Zm9v","key2":"YmFy"}}`, util.Trim(string(body)))
|
||||||
|
|
||||||
@ -1746,8 +1773,12 @@ func TestInputService3ProtocolTestBase64EncodedBlobsCase2(t *testing.T) {
|
|||||||
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
||||||
|
|
||||||
// assert headers
|
// assert headers
|
||||||
assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type"))
|
if e, a := "application/x-amz-json-1.1", r.Header.Get("Content-Type"); e != a {
|
||||||
assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target"))
|
t.Errorf("expect %v to be %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target"); e != a {
|
||||||
|
t.Errorf("expect %v to be %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1764,10 +1795,14 @@ func TestInputService4ProtocolTestNestedBlobsCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
jsonrpc.Build(req)
|
jsonrpc.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body, _ := ioutil.ReadAll(r.Body)
|
body, _ := ioutil.ReadAll(r.Body)
|
||||||
awstesting.AssertJSON(t, `{"ListParam":["Zm9v","YmFy"]}`, util.Trim(string(body)))
|
awstesting.AssertJSON(t, `{"ListParam":["Zm9v","YmFy"]}`, util.Trim(string(body)))
|
||||||
|
|
||||||
@ -1775,8 +1810,12 @@ func TestInputService4ProtocolTestNestedBlobsCase1(t *testing.T) {
|
|||||||
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
||||||
|
|
||||||
// assert headers
|
// assert headers
|
||||||
assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type"))
|
if e, a := "application/x-amz-json-1.1", r.Header.Get("Content-Type"); e != a {
|
||||||
assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target"))
|
t.Errorf("expect %v to be %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target"); e != a {
|
||||||
|
t.Errorf("expect %v to be %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1792,10 +1831,14 @@ func TestInputService5ProtocolTestRecursiveShapesCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
jsonrpc.Build(req)
|
jsonrpc.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body, _ := ioutil.ReadAll(r.Body)
|
body, _ := ioutil.ReadAll(r.Body)
|
||||||
awstesting.AssertJSON(t, `{"RecursiveStruct":{"NoRecurse":"foo"}}`, util.Trim(string(body)))
|
awstesting.AssertJSON(t, `{"RecursiveStruct":{"NoRecurse":"foo"}}`, util.Trim(string(body)))
|
||||||
|
|
||||||
@ -1803,8 +1846,12 @@ func TestInputService5ProtocolTestRecursiveShapesCase1(t *testing.T) {
|
|||||||
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
||||||
|
|
||||||
// assert headers
|
// assert headers
|
||||||
assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type"))
|
if e, a := "application/x-amz-json-1.1", r.Header.Get("Content-Type"); e != a {
|
||||||
assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target"))
|
t.Errorf("expect %v to be %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target"); e != a {
|
||||||
|
t.Errorf("expect %v to be %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1822,10 +1869,14 @@ func TestInputService5ProtocolTestRecursiveShapesCase2(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
jsonrpc.Build(req)
|
jsonrpc.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body, _ := ioutil.ReadAll(r.Body)
|
body, _ := ioutil.ReadAll(r.Body)
|
||||||
awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveStruct":{"NoRecurse":"foo"}}}`, util.Trim(string(body)))
|
awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveStruct":{"NoRecurse":"foo"}}}`, util.Trim(string(body)))
|
||||||
|
|
||||||
@ -1833,8 +1884,12 @@ func TestInputService5ProtocolTestRecursiveShapesCase2(t *testing.T) {
|
|||||||
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
||||||
|
|
||||||
// assert headers
|
// assert headers
|
||||||
assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type"))
|
if e, a := "application/x-amz-json-1.1", r.Header.Get("Content-Type"); e != a {
|
||||||
assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target"))
|
t.Errorf("expect %v to be %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target"); e != a {
|
||||||
|
t.Errorf("expect %v to be %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1856,10 +1911,14 @@ func TestInputService5ProtocolTestRecursiveShapesCase3(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
jsonrpc.Build(req)
|
jsonrpc.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body, _ := ioutil.ReadAll(r.Body)
|
body, _ := ioutil.ReadAll(r.Body)
|
||||||
awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveStruct":{"RecursiveStruct":{"RecursiveStruct":{"NoRecurse":"foo"}}}}}`, util.Trim(string(body)))
|
awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveStruct":{"RecursiveStruct":{"RecursiveStruct":{"NoRecurse":"foo"}}}}}`, util.Trim(string(body)))
|
||||||
|
|
||||||
@ -1867,8 +1926,12 @@ func TestInputService5ProtocolTestRecursiveShapesCase3(t *testing.T) {
|
|||||||
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
||||||
|
|
||||||
// assert headers
|
// assert headers
|
||||||
assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type"))
|
if e, a := "application/x-amz-json-1.1", r.Header.Get("Content-Type"); e != a {
|
||||||
assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target"))
|
t.Errorf("expect %v to be %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target"); e != a {
|
||||||
|
t.Errorf("expect %v to be %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1891,10 +1954,14 @@ func TestInputService5ProtocolTestRecursiveShapesCase4(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
jsonrpc.Build(req)
|
jsonrpc.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body, _ := ioutil.ReadAll(r.Body)
|
body, _ := ioutil.ReadAll(r.Body)
|
||||||
awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveList":[{"NoRecurse":"foo"},{"NoRecurse":"bar"}]}}`, util.Trim(string(body)))
|
awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveList":[{"NoRecurse":"foo"},{"NoRecurse":"bar"}]}}`, util.Trim(string(body)))
|
||||||
|
|
||||||
@ -1902,8 +1969,12 @@ func TestInputService5ProtocolTestRecursiveShapesCase4(t *testing.T) {
|
|||||||
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
||||||
|
|
||||||
// assert headers
|
// assert headers
|
||||||
assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type"))
|
if e, a := "application/x-amz-json-1.1", r.Header.Get("Content-Type"); e != a {
|
||||||
assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target"))
|
t.Errorf("expect %v to be %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target"); e != a {
|
||||||
|
t.Errorf("expect %v to be %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1928,10 +1999,14 @@ func TestInputService5ProtocolTestRecursiveShapesCase5(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
jsonrpc.Build(req)
|
jsonrpc.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body, _ := ioutil.ReadAll(r.Body)
|
body, _ := ioutil.ReadAll(r.Body)
|
||||||
awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveList":[{"NoRecurse":"foo"},{"RecursiveStruct":{"NoRecurse":"bar"}}]}}`, util.Trim(string(body)))
|
awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveList":[{"NoRecurse":"foo"},{"RecursiveStruct":{"NoRecurse":"bar"}}]}}`, util.Trim(string(body)))
|
||||||
|
|
||||||
@ -1939,8 +2014,12 @@ func TestInputService5ProtocolTestRecursiveShapesCase5(t *testing.T) {
|
|||||||
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
||||||
|
|
||||||
// assert headers
|
// assert headers
|
||||||
assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type"))
|
if e, a := "application/x-amz-json-1.1", r.Header.Get("Content-Type"); e != a {
|
||||||
assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target"))
|
t.Errorf("expect %v to be %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target"); e != a {
|
||||||
|
t.Errorf("expect %v to be %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1963,10 +2042,14 @@ func TestInputService5ProtocolTestRecursiveShapesCase6(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
jsonrpc.Build(req)
|
jsonrpc.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body, _ := ioutil.ReadAll(r.Body)
|
body, _ := ioutil.ReadAll(r.Body)
|
||||||
awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveMap":{"foo":{"NoRecurse":"foo"},"bar":{"NoRecurse":"bar"}}}}`, util.Trim(string(body)))
|
awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveMap":{"foo":{"NoRecurse":"foo"},"bar":{"NoRecurse":"bar"}}}}`, util.Trim(string(body)))
|
||||||
|
|
||||||
@ -1974,8 +2057,12 @@ func TestInputService5ProtocolTestRecursiveShapesCase6(t *testing.T) {
|
|||||||
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
||||||
|
|
||||||
// assert headers
|
// assert headers
|
||||||
assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type"))
|
if e, a := "application/x-amz-json-1.1", r.Header.Get("Content-Type"); e != a {
|
||||||
assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target"))
|
t.Errorf("expect %v to be %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target"); e != a {
|
||||||
|
t.Errorf("expect %v to be %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1989,10 +2076,14 @@ func TestInputService6ProtocolTestEmptyMapsCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
jsonrpc.Build(req)
|
jsonrpc.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body, _ := ioutil.ReadAll(r.Body)
|
body, _ := ioutil.ReadAll(r.Body)
|
||||||
awstesting.AssertJSON(t, `{"Map":{}}`, util.Trim(string(body)))
|
awstesting.AssertJSON(t, `{"Map":{}}`, util.Trim(string(body)))
|
||||||
|
|
||||||
@ -2000,8 +2091,12 @@ func TestInputService6ProtocolTestEmptyMapsCase1(t *testing.T) {
|
|||||||
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
||||||
|
|
||||||
// assert headers
|
// assert headers
|
||||||
assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type"))
|
if e, a := "application/x-amz-json-1.1", r.Header.Get("Content-Type"); e != a {
|
||||||
assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target"))
|
t.Errorf("expect %v to be %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target"); e != a {
|
||||||
|
t.Errorf("expect %v to be %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2015,10 +2110,14 @@ func TestInputService7ProtocolTestIdempotencyTokenAutoFillCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
jsonrpc.Build(req)
|
jsonrpc.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body, _ := ioutil.ReadAll(r.Body)
|
body, _ := ioutil.ReadAll(r.Body)
|
||||||
awstesting.AssertJSON(t, `{"Token":"abc123"}`, util.Trim(string(body)))
|
awstesting.AssertJSON(t, `{"Token":"abc123"}`, util.Trim(string(body)))
|
||||||
|
|
||||||
@ -2037,10 +2136,14 @@ func TestInputService7ProtocolTestIdempotencyTokenAutoFillCase2(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
jsonrpc.Build(req)
|
jsonrpc.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body, _ := ioutil.ReadAll(r.Body)
|
body, _ := ioutil.ReadAll(r.Body)
|
||||||
awstesting.AssertJSON(t, `{"Token":"00000000-0000-4000-8000-000000000000"}`, util.Trim(string(body)))
|
awstesting.AssertJSON(t, `{"Token":"00000000-0000-4000-8000-000000000000"}`, util.Trim(string(body)))
|
||||||
|
|
||||||
|
153
vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_test.go
generated
vendored
153
vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_test.go
generated
vendored
@ -24,7 +24,6 @@ import (
|
|||||||
"github.com/aws/aws-sdk-go/private/protocol/jsonrpc"
|
"github.com/aws/aws-sdk-go/private/protocol/jsonrpc"
|
||||||
"github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
|
"github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
|
||||||
"github.com/aws/aws-sdk-go/private/util"
|
"github.com/aws/aws-sdk-go/private/util"
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ bytes.Buffer // always import bytes
|
var _ bytes.Buffer // always import bytes
|
||||||
@ -1109,18 +1108,38 @@ func TestOutputService1ProtocolTestScalarMembersCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
jsonrpc.UnmarshalMeta(req)
|
jsonrpc.UnmarshalMeta(req)
|
||||||
jsonrpc.Unmarshal(req)
|
jsonrpc.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "a", *out.Char)
|
t.Errorf("expect not to be nil")
|
||||||
assert.Equal(t, 1.3, *out.Double)
|
}
|
||||||
assert.Equal(t, false, *out.FalseBool)
|
if e, a := "a", *out.Char; e != a {
|
||||||
assert.Equal(t, 1.2, *out.Float)
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
assert.Equal(t, int64(200), *out.Long)
|
}
|
||||||
assert.Equal(t, int64(123), *out.Num)
|
if e, a := 1.3, *out.Double; e != a {
|
||||||
assert.Equal(t, "myname", *out.Str)
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
assert.Equal(t, true, *out.TrueBool)
|
}
|
||||||
|
if e, a := false, *out.FalseBool; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := 1.2, *out.Float; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := int64(200), *out.Long; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := int64(123), *out.Num; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "myname", *out.Str; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := true, *out.TrueBool; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1136,12 +1155,20 @@ func TestOutputService2ProtocolTestBlobMembersCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
jsonrpc.UnmarshalMeta(req)
|
jsonrpc.UnmarshalMeta(req)
|
||||||
jsonrpc.Unmarshal(req)
|
jsonrpc.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "hi!", string(out.BlobMember))
|
t.Errorf("expect not to be nil")
|
||||||
assert.Equal(t, "there!", string(out.StructMember.Foo))
|
}
|
||||||
|
if e, a := "hi!", string(out.BlobMember); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "there!", string(out.StructMember.Foo); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1157,12 +1184,20 @@ func TestOutputService3ProtocolTestTimestampMembersCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
jsonrpc.UnmarshalMeta(req)
|
jsonrpc.UnmarshalMeta(req)
|
||||||
jsonrpc.Unmarshal(req)
|
jsonrpc.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, time.Unix(1.398796238e+09, 0).UTC().String(), out.StructMember.Foo.String())
|
t.Errorf("expect not to be nil")
|
||||||
assert.Equal(t, time.Unix(1.398796238e+09, 0).UTC().String(), out.TimeMember.String())
|
}
|
||||||
|
if e, a := time.Unix(1.398796238e+09, 0).UTC().String(), out.StructMember.Foo.String(); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := time.Unix(1.398796238e+09, 0).UTC().String(), out.TimeMember.String(); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1178,12 +1213,20 @@ func TestOutputService4ProtocolTestListsCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
jsonrpc.UnmarshalMeta(req)
|
jsonrpc.UnmarshalMeta(req)
|
||||||
jsonrpc.Unmarshal(req)
|
jsonrpc.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "a", *out.ListMember[0])
|
t.Errorf("expect not to be nil")
|
||||||
assert.Equal(t, "b", *out.ListMember[1])
|
}
|
||||||
|
if e, a := "a", *out.ListMember[0]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "b", *out.ListMember[1]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1199,16 +1242,32 @@ func TestOutputService4ProtocolTestListsCase2(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
jsonrpc.UnmarshalMeta(req)
|
jsonrpc.UnmarshalMeta(req)
|
||||||
jsonrpc.Unmarshal(req)
|
jsonrpc.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "a", *out.ListMember[0])
|
t.Errorf("expect not to be nil")
|
||||||
assert.Nil(t, out.ListMember[1])
|
}
|
||||||
assert.Nil(t, out.ListMemberMap[1])
|
if e, a := "a", *out.ListMember[0]; e != a {
|
||||||
assert.Nil(t, out.ListMemberMap[2])
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
assert.Nil(t, out.ListMemberStruct[1])
|
}
|
||||||
assert.Nil(t, out.ListMemberStruct[2])
|
if e := out.ListMember[1]; e != nil {
|
||||||
|
t.Errorf("expect nil, got %v", e)
|
||||||
|
}
|
||||||
|
if e := out.ListMemberMap[1]; e != nil {
|
||||||
|
t.Errorf("expect nil, got %v", e)
|
||||||
|
}
|
||||||
|
if e := out.ListMemberMap[2]; e != nil {
|
||||||
|
t.Errorf("expect nil, got %v", e)
|
||||||
|
}
|
||||||
|
if e := out.ListMemberStruct[1]; e != nil {
|
||||||
|
t.Errorf("expect nil, got %v", e)
|
||||||
|
}
|
||||||
|
if e := out.ListMemberStruct[2]; e != nil {
|
||||||
|
t.Errorf("expect nil, got %v", e)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1224,14 +1283,26 @@ func TestOutputService5ProtocolTestMapsCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
jsonrpc.UnmarshalMeta(req)
|
jsonrpc.UnmarshalMeta(req)
|
||||||
jsonrpc.Unmarshal(req)
|
jsonrpc.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, int64(1), *out.MapMember["a"][0])
|
t.Errorf("expect not to be nil")
|
||||||
assert.Equal(t, int64(2), *out.MapMember["a"][1])
|
}
|
||||||
assert.Equal(t, int64(3), *out.MapMember["b"][0])
|
if e, a := int64(1), *out.MapMember["a"][0]; e != a {
|
||||||
assert.Equal(t, int64(4), *out.MapMember["b"][1])
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := int64(2), *out.MapMember["a"][1]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := int64(3), *out.MapMember["b"][0]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := int64(4), *out.MapMember["b"][1]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1247,9 +1318,13 @@ func TestOutputService6ProtocolTestIgnoresExtraDataCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
jsonrpc.UnmarshalMeta(req)
|
jsonrpc.UnmarshalMeta(req)
|
||||||
jsonrpc.Unmarshal(req)
|
jsonrpc.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
|
t.Errorf("expect not to be nil")
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
1332
vendor/github.com/aws/aws-sdk-go/private/protocol/query/build_test.go
generated
vendored
1332
vendor/github.com/aws/aws-sdk-go/private/protocol/query/build_test.go
generated
vendored
File diff suppressed because it is too large
Load Diff
4
vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
generated
vendored
4
vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
generated
vendored
@ -121,6 +121,10 @@ func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if _, ok := value.Interface().([]byte); ok {
|
||||||
|
return q.parseScalar(v, value, prefix, tag)
|
||||||
|
}
|
||||||
|
|
||||||
// check for unflattened list member
|
// check for unflattened list member
|
||||||
if !q.isEC2 && tag.Get("flattened") == "" {
|
if !q.isEC2 && tag.Get("flattened") == "" {
|
||||||
if listName := tag.Get("locationNameList"); listName == "" {
|
if listName := tag.Get("locationNameList"); listName == "" {
|
||||||
|
281
vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_test.go
generated
vendored
281
vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_test.go
generated
vendored
@ -24,7 +24,6 @@ import (
|
|||||||
"github.com/aws/aws-sdk-go/private/protocol/query"
|
"github.com/aws/aws-sdk-go/private/protocol/query"
|
||||||
"github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
|
"github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
|
||||||
"github.com/aws/aws-sdk-go/private/util"
|
"github.com/aws/aws-sdk-go/private/util"
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ bytes.Buffer // always import bytes
|
var _ bytes.Buffer // always import bytes
|
||||||
@ -2347,19 +2346,41 @@ func TestOutputService1ProtocolTestScalarMembersCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
query.UnmarshalMeta(req)
|
query.UnmarshalMeta(req)
|
||||||
query.Unmarshal(req)
|
query.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "a", *out.Char)
|
t.Errorf("expect not to be nil")
|
||||||
assert.Equal(t, 1.3, *out.Double)
|
}
|
||||||
assert.Equal(t, false, *out.FalseBool)
|
if e, a := "a", *out.Char; e != a {
|
||||||
assert.Equal(t, 1.2, *out.Float)
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
assert.Equal(t, int64(200), *out.Long)
|
}
|
||||||
assert.Equal(t, int64(123), *out.Num)
|
if e, a := 1.3, *out.Double; e != a {
|
||||||
assert.Equal(t, "myname", *out.Str)
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
assert.Equal(t, time.Unix(1.4221728e+09, 0).UTC().String(), out.Timestamp.String())
|
}
|
||||||
assert.Equal(t, true, *out.TrueBool)
|
if e, a := false, *out.FalseBool; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := 1.2, *out.Float; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := int64(200), *out.Long; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := int64(123), *out.Num; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "myname", *out.Str; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := time.Unix(1.4221728e+09, 0).UTC().String(), out.Timestamp.String(); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := true, *out.TrueBool; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2375,11 +2396,17 @@ func TestOutputService2ProtocolTestNotAllMembersInResponseCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
query.UnmarshalMeta(req)
|
query.UnmarshalMeta(req)
|
||||||
query.Unmarshal(req)
|
query.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "myname", *out.Str)
|
t.Errorf("expect not to be nil")
|
||||||
|
}
|
||||||
|
if e, a := "myname", *out.Str; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2395,11 +2422,17 @@ func TestOutputService3ProtocolTestBlobCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
query.UnmarshalMeta(req)
|
query.UnmarshalMeta(req)
|
||||||
query.Unmarshal(req)
|
query.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "value", string(out.Blob))
|
t.Errorf("expect not to be nil")
|
||||||
|
}
|
||||||
|
if e, a := "value", string(out.Blob); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2415,12 +2448,20 @@ func TestOutputService4ProtocolTestListsCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
query.UnmarshalMeta(req)
|
query.UnmarshalMeta(req)
|
||||||
query.Unmarshal(req)
|
query.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "abc", *out.ListMember[0])
|
t.Errorf("expect not to be nil")
|
||||||
assert.Equal(t, "123", *out.ListMember[1])
|
}
|
||||||
|
if e, a := "abc", *out.ListMember[0]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "123", *out.ListMember[1]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2436,12 +2477,20 @@ func TestOutputService5ProtocolTestListWithCustomMemberNameCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
query.UnmarshalMeta(req)
|
query.UnmarshalMeta(req)
|
||||||
query.Unmarshal(req)
|
query.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "abc", *out.ListMember[0])
|
t.Errorf("expect not to be nil")
|
||||||
assert.Equal(t, "123", *out.ListMember[1])
|
}
|
||||||
|
if e, a := "abc", *out.ListMember[0]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "123", *out.ListMember[1]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2457,12 +2506,20 @@ func TestOutputService6ProtocolTestFlattenedListCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
query.UnmarshalMeta(req)
|
query.UnmarshalMeta(req)
|
||||||
query.Unmarshal(req)
|
query.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "abc", *out.ListMember[0])
|
t.Errorf("expect not to be nil")
|
||||||
assert.Equal(t, "123", *out.ListMember[1])
|
}
|
||||||
|
if e, a := "abc", *out.ListMember[0]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "123", *out.ListMember[1]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2478,11 +2535,17 @@ func TestOutputService7ProtocolTestFlattenedSingleElementListCase1(t *testing.T)
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
query.UnmarshalMeta(req)
|
query.UnmarshalMeta(req)
|
||||||
query.Unmarshal(req)
|
query.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "abc", *out.ListMember[0])
|
t.Errorf("expect not to be nil")
|
||||||
|
}
|
||||||
|
if e, a := "abc", *out.ListMember[0]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2498,16 +2561,32 @@ func TestOutputService8ProtocolTestListOfStructuresCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
query.UnmarshalMeta(req)
|
query.UnmarshalMeta(req)
|
||||||
query.Unmarshal(req)
|
query.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "firstbar", *out.List[0].Bar)
|
t.Errorf("expect not to be nil")
|
||||||
assert.Equal(t, "firstbaz", *out.List[0].Baz)
|
}
|
||||||
assert.Equal(t, "firstfoo", *out.List[0].Foo)
|
if e, a := "firstbar", *out.List[0].Bar; e != a {
|
||||||
assert.Equal(t, "secondbar", *out.List[1].Bar)
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
assert.Equal(t, "secondbaz", *out.List[1].Baz)
|
}
|
||||||
assert.Equal(t, "secondfoo", *out.List[1].Foo)
|
if e, a := "firstbaz", *out.List[0].Baz; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "firstfoo", *out.List[0].Foo; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "secondbar", *out.List[1].Bar; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "secondbaz", *out.List[1].Baz; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "secondfoo", *out.List[1].Foo; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2523,16 +2602,32 @@ func TestOutputService9ProtocolTestFlattenedListOfStructuresCase1(t *testing.T)
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
query.UnmarshalMeta(req)
|
query.UnmarshalMeta(req)
|
||||||
query.Unmarshal(req)
|
query.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "firstbar", *out.List[0].Bar)
|
t.Errorf("expect not to be nil")
|
||||||
assert.Equal(t, "firstbaz", *out.List[0].Baz)
|
}
|
||||||
assert.Equal(t, "firstfoo", *out.List[0].Foo)
|
if e, a := "firstbar", *out.List[0].Bar; e != a {
|
||||||
assert.Equal(t, "secondbar", *out.List[1].Bar)
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
assert.Equal(t, "secondbaz", *out.List[1].Baz)
|
}
|
||||||
assert.Equal(t, "secondfoo", *out.List[1].Foo)
|
if e, a := "firstbaz", *out.List[0].Baz; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "firstfoo", *out.List[0].Foo; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "secondbar", *out.List[1].Bar; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "secondbaz", *out.List[1].Baz; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "secondfoo", *out.List[1].Foo; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2548,12 +2643,20 @@ func TestOutputService10ProtocolTestFlattenedListWithLocationNameCase1(t *testin
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
query.UnmarshalMeta(req)
|
query.UnmarshalMeta(req)
|
||||||
query.Unmarshal(req)
|
query.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "a", *out.List[0])
|
t.Errorf("expect not to be nil")
|
||||||
assert.Equal(t, "b", *out.List[1])
|
}
|
||||||
|
if e, a := "a", *out.List[0]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "b", *out.List[1]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2569,12 +2672,20 @@ func TestOutputService11ProtocolTestNormalMapCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
query.UnmarshalMeta(req)
|
query.UnmarshalMeta(req)
|
||||||
query.Unmarshal(req)
|
query.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "bam", *out.Map["baz"].Foo)
|
t.Errorf("expect not to be nil")
|
||||||
assert.Equal(t, "bar", *out.Map["qux"].Foo)
|
}
|
||||||
|
if e, a := "bam", *out.Map["baz"].Foo; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "bar", *out.Map["qux"].Foo; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2590,12 +2701,20 @@ func TestOutputService12ProtocolTestFlattenedMapCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
query.UnmarshalMeta(req)
|
query.UnmarshalMeta(req)
|
||||||
query.Unmarshal(req)
|
query.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "bam", *out.Map["baz"])
|
t.Errorf("expect not to be nil")
|
||||||
assert.Equal(t, "bar", *out.Map["qux"])
|
}
|
||||||
|
if e, a := "bam", *out.Map["baz"]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "bar", *out.Map["qux"]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2611,11 +2730,17 @@ func TestOutputService13ProtocolTestFlattenedMapInShapeDefinitionCase1(t *testin
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
query.UnmarshalMeta(req)
|
query.UnmarshalMeta(req)
|
||||||
query.Unmarshal(req)
|
query.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "bar", *out.Map["qux"])
|
t.Errorf("expect not to be nil")
|
||||||
|
}
|
||||||
|
if e, a := "bar", *out.Map["qux"]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2631,12 +2756,20 @@ func TestOutputService14ProtocolTestNamedMapCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
query.UnmarshalMeta(req)
|
query.UnmarshalMeta(req)
|
||||||
query.Unmarshal(req)
|
query.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "bam", *out.Map["baz"])
|
t.Errorf("expect not to be nil")
|
||||||
assert.Equal(t, "bar", *out.Map["qux"])
|
}
|
||||||
|
if e, a := "bam", *out.Map["baz"]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "bar", *out.Map["qux"]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2652,10 +2785,16 @@ func TestOutputService15ProtocolTestEmptyStringCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
query.UnmarshalMeta(req)
|
query.UnmarshalMeta(req)
|
||||||
query.Unmarshal(req)
|
query.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "", *out.Foo)
|
t.Errorf("expect not to be nil")
|
||||||
|
}
|
||||||
|
if e, a := "", *out.Foo; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
452
vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/build_bench_test.go
generated
vendored
452
vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/build_bench_test.go
generated
vendored
@ -3,232 +3,131 @@
|
|||||||
package restjson_test
|
package restjson_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"net/http"
|
||||||
"encoding/json"
|
"net/http/httptest"
|
||||||
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/endpoints"
|
||||||
"github.com/aws/aws-sdk-go/aws/request"
|
"github.com/aws/aws-sdk-go/aws/request"
|
||||||
"github.com/aws/aws-sdk-go/awstesting"
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
"github.com/aws/aws-sdk-go/private/protocol/rest"
|
|
||||||
"github.com/aws/aws-sdk-go/private/protocol/restjson"
|
"github.com/aws/aws-sdk-go/private/protocol/restjson"
|
||||||
"github.com/aws/aws-sdk-go/service/elastictranscoder"
|
"github.com/aws/aws-sdk-go/service/elastictranscoder"
|
||||||
)
|
)
|
||||||
|
|
||||||
func BenchmarkRESTJSONBuild_Complex_elastictranscoderCreateJobInput(b *testing.B) {
|
var (
|
||||||
svc := awstesting.NewClient()
|
elastictranscoderSvc *elastictranscoder.ElasticTranscoder
|
||||||
svc.ServiceName = "elastictranscoder"
|
)
|
||||||
svc.APIVersion = "2012-09-25"
|
|
||||||
|
func TestMain(m *testing.M) {
|
||||||
|
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
}))
|
||||||
|
|
||||||
|
sess := session.Must(session.NewSession(&aws.Config{
|
||||||
|
Credentials: credentials.NewStaticCredentials("Key", "Secret", "Token"),
|
||||||
|
Endpoint: aws.String(server.URL),
|
||||||
|
S3ForcePathStyle: aws.Bool(true),
|
||||||
|
DisableSSL: aws.Bool(true),
|
||||||
|
Region: aws.String(endpoints.UsWest2RegionID),
|
||||||
|
}))
|
||||||
|
elastictranscoderSvc = elastictranscoder.New(sess)
|
||||||
|
|
||||||
|
c := m.Run()
|
||||||
|
server.Close()
|
||||||
|
os.Exit(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkRESTJSONBuild_Complex_ETCCreateJob(b *testing.B) {
|
||||||
|
params := elastictranscoderCreateJobInput()
|
||||||
|
|
||||||
|
benchRESTJSONBuild(b, func() *request.Request {
|
||||||
|
req, _ := elastictranscoderSvc.CreateJobRequest(params)
|
||||||
|
return req
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkRESTJSONBuild_Simple_ETCListJobsByPipeline(b *testing.B) {
|
||||||
|
params := elastictranscoderListJobsByPipeline()
|
||||||
|
|
||||||
|
benchRESTJSONBuild(b, func() *request.Request {
|
||||||
|
req, _ := elastictranscoderSvc.ListJobsByPipelineRequest(params)
|
||||||
|
return req
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkRESTJSONRequest_Complex_CFCreateJob(b *testing.B) {
|
||||||
|
benchRESTJSONRequest(b, func() *request.Request {
|
||||||
|
req, _ := elastictranscoderSvc.CreateJobRequest(elastictranscoderCreateJobInput())
|
||||||
|
return req
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkRESTJSONRequest_Simple_ETCListJobsByPipeline(b *testing.B) {
|
||||||
|
benchRESTJSONRequest(b, func() *request.Request {
|
||||||
|
req, _ := elastictranscoderSvc.ListJobsByPipelineRequest(elastictranscoderListJobsByPipeline())
|
||||||
|
return req
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchRESTJSONBuild(b *testing.B, reqFn func() *request.Request) {
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
r := svc.NewRequest(&request.Operation{Name: "CreateJobInput"}, restjsonBuildParms, nil)
|
req := reqFn()
|
||||||
restjson.Build(r)
|
restjson.Build(req)
|
||||||
if r.Error != nil {
|
if req.Error != nil {
|
||||||
b.Fatal("Unexpected error", r.Error)
|
b.Fatal("Unexpected error", req.Error)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkRESTBuild_Complex_elastictranscoderCreateJobInput(b *testing.B) {
|
func benchRESTJSONRequest(b *testing.B, reqFn func() *request.Request) {
|
||||||
svc := awstesting.NewClient()
|
b.ResetTimer()
|
||||||
svc.ServiceName = "elastictranscoder"
|
|
||||||
svc.APIVersion = "2012-09-25"
|
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
r := svc.NewRequest(&request.Operation{Name: "CreateJobInput"}, restjsonBuildParms, nil)
|
err := reqFn().Send()
|
||||||
rest.Build(r)
|
if err != nil {
|
||||||
if r.Error != nil {
|
|
||||||
b.Fatal("Unexpected error", r.Error)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkEncodingJSONMarshal_Complex_elastictranscoderCreateJobInput(b *testing.B) {
|
|
||||||
params := restjsonBuildParms
|
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
buf := &bytes.Buffer{}
|
|
||||||
encoder := json.NewEncoder(buf)
|
|
||||||
if err := encoder.Encode(params); err != nil {
|
|
||||||
b.Fatal("Unexpected error", err)
|
b.Fatal("Unexpected error", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkRESTJSONBuild_Simple_elastictranscoderListJobsByPipeline(b *testing.B) {
|
func elastictranscoderListJobsByPipeline() *elastictranscoder.ListJobsByPipelineInput {
|
||||||
svc := awstesting.NewClient()
|
return &elastictranscoder.ListJobsByPipelineInput{
|
||||||
svc.ServiceName = "elastictranscoder"
|
|
||||||
svc.APIVersion = "2012-09-25"
|
|
||||||
|
|
||||||
params := &elastictranscoder.ListJobsByPipelineInput{
|
|
||||||
PipelineId: aws.String("Id"), // Required
|
PipelineId: aws.String("Id"), // Required
|
||||||
Ascending: aws.String("Ascending"),
|
Ascending: aws.String("Ascending"),
|
||||||
PageToken: aws.String("Id"),
|
PageToken: aws.String("Id"),
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
r := svc.NewRequest(&request.Operation{Name: "ListJobsByPipeline"}, params, nil)
|
|
||||||
restjson.Build(r)
|
|
||||||
if r.Error != nil {
|
|
||||||
b.Fatal("Unexpected error", r.Error)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkRESTBuild_Simple_elastictranscoderListJobsByPipeline(b *testing.B) {
|
func elastictranscoderCreateJobInput() *elastictranscoder.CreateJobInput {
|
||||||
svc := awstesting.NewClient()
|
return &elastictranscoder.CreateJobInput{
|
||||||
svc.ServiceName = "elastictranscoder"
|
Input: &elastictranscoder.JobInput{ // Required
|
||||||
svc.APIVersion = "2012-09-25"
|
AspectRatio: aws.String("AspectRatio"),
|
||||||
|
Container: aws.String("JobContainer"),
|
||||||
params := &elastictranscoder.ListJobsByPipelineInput{
|
DetectedProperties: &elastictranscoder.DetectedProperties{
|
||||||
|
DurationMillis: aws.Int64(1),
|
||||||
|
FileSize: aws.Int64(1),
|
||||||
|
FrameRate: aws.String("FloatString"),
|
||||||
|
Height: aws.Int64(1),
|
||||||
|
Width: aws.Int64(1),
|
||||||
|
},
|
||||||
|
Encryption: &elastictranscoder.Encryption{
|
||||||
|
InitializationVector: aws.String("ZeroTo255String"),
|
||||||
|
Key: aws.String("Base64EncodedString"),
|
||||||
|
KeyMd5: aws.String("Base64EncodedString"),
|
||||||
|
Mode: aws.String("EncryptionMode"),
|
||||||
|
},
|
||||||
|
FrameRate: aws.String("FrameRate"),
|
||||||
|
Interlaced: aws.String("Interlaced"),
|
||||||
|
Key: aws.String("Key"),
|
||||||
|
Resolution: aws.String("Resolution"),
|
||||||
|
},
|
||||||
PipelineId: aws.String("Id"), // Required
|
PipelineId: aws.String("Id"), // Required
|
||||||
Ascending: aws.String("Ascending"),
|
Output: &elastictranscoder.CreateJobOutput{
|
||||||
PageToken: aws.String("Id"),
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
r := svc.NewRequest(&request.Operation{Name: "ListJobsByPipeline"}, params, nil)
|
|
||||||
rest.Build(r)
|
|
||||||
if r.Error != nil {
|
|
||||||
b.Fatal("Unexpected error", r.Error)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkEncodingJSONMarshal_Simple_elastictranscoderListJobsByPipeline(b *testing.B) {
|
|
||||||
params := &elastictranscoder.ListJobsByPipelineInput{
|
|
||||||
PipelineId: aws.String("Id"), // Required
|
|
||||||
Ascending: aws.String("Ascending"),
|
|
||||||
PageToken: aws.String("Id"),
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
buf := &bytes.Buffer{}
|
|
||||||
encoder := json.NewEncoder(buf)
|
|
||||||
if err := encoder.Encode(params); err != nil {
|
|
||||||
b.Fatal("Unexpected error", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var restjsonBuildParms = &elastictranscoder.CreateJobInput{
|
|
||||||
Input: &elastictranscoder.JobInput{ // Required
|
|
||||||
AspectRatio: aws.String("AspectRatio"),
|
|
||||||
Container: aws.String("JobContainer"),
|
|
||||||
DetectedProperties: &elastictranscoder.DetectedProperties{
|
|
||||||
DurationMillis: aws.Int64(1),
|
|
||||||
FileSize: aws.Int64(1),
|
|
||||||
FrameRate: aws.String("FloatString"),
|
|
||||||
Height: aws.Int64(1),
|
|
||||||
Width: aws.Int64(1),
|
|
||||||
},
|
|
||||||
Encryption: &elastictranscoder.Encryption{
|
|
||||||
InitializationVector: aws.String("ZeroTo255String"),
|
|
||||||
Key: aws.String("Base64EncodedString"),
|
|
||||||
KeyMd5: aws.String("Base64EncodedString"),
|
|
||||||
Mode: aws.String("EncryptionMode"),
|
|
||||||
},
|
|
||||||
FrameRate: aws.String("FrameRate"),
|
|
||||||
Interlaced: aws.String("Interlaced"),
|
|
||||||
Key: aws.String("Key"),
|
|
||||||
Resolution: aws.String("Resolution"),
|
|
||||||
},
|
|
||||||
PipelineId: aws.String("Id"), // Required
|
|
||||||
Output: &elastictranscoder.CreateJobOutput{
|
|
||||||
AlbumArt: &elastictranscoder.JobAlbumArt{
|
|
||||||
Artwork: []*elastictranscoder.Artwork{
|
|
||||||
{ // Required
|
|
||||||
AlbumArtFormat: aws.String("JpgOrPng"),
|
|
||||||
Encryption: &elastictranscoder.Encryption{
|
|
||||||
InitializationVector: aws.String("ZeroTo255String"),
|
|
||||||
Key: aws.String("Base64EncodedString"),
|
|
||||||
KeyMd5: aws.String("Base64EncodedString"),
|
|
||||||
Mode: aws.String("EncryptionMode"),
|
|
||||||
},
|
|
||||||
InputKey: aws.String("WatermarkKey"),
|
|
||||||
MaxHeight: aws.String("DigitsOrAuto"),
|
|
||||||
MaxWidth: aws.String("DigitsOrAuto"),
|
|
||||||
PaddingPolicy: aws.String("PaddingPolicy"),
|
|
||||||
SizingPolicy: aws.String("SizingPolicy"),
|
|
||||||
},
|
|
||||||
// More values...
|
|
||||||
},
|
|
||||||
MergePolicy: aws.String("MergePolicy"),
|
|
||||||
},
|
|
||||||
Captions: &elastictranscoder.Captions{
|
|
||||||
CaptionFormats: []*elastictranscoder.CaptionFormat{
|
|
||||||
{ // Required
|
|
||||||
Encryption: &elastictranscoder.Encryption{
|
|
||||||
InitializationVector: aws.String("ZeroTo255String"),
|
|
||||||
Key: aws.String("Base64EncodedString"),
|
|
||||||
KeyMd5: aws.String("Base64EncodedString"),
|
|
||||||
Mode: aws.String("EncryptionMode"),
|
|
||||||
},
|
|
||||||
Format: aws.String("CaptionFormatFormat"),
|
|
||||||
Pattern: aws.String("CaptionFormatPattern"),
|
|
||||||
},
|
|
||||||
// More values...
|
|
||||||
},
|
|
||||||
CaptionSources: []*elastictranscoder.CaptionSource{
|
|
||||||
{ // Required
|
|
||||||
Encryption: &elastictranscoder.Encryption{
|
|
||||||
InitializationVector: aws.String("ZeroTo255String"),
|
|
||||||
Key: aws.String("Base64EncodedString"),
|
|
||||||
KeyMd5: aws.String("Base64EncodedString"),
|
|
||||||
Mode: aws.String("EncryptionMode"),
|
|
||||||
},
|
|
||||||
Key: aws.String("Key"),
|
|
||||||
Label: aws.String("Name"),
|
|
||||||
Language: aws.String("Key"),
|
|
||||||
TimeOffset: aws.String("TimeOffset"),
|
|
||||||
},
|
|
||||||
// More values...
|
|
||||||
},
|
|
||||||
MergePolicy: aws.String("CaptionMergePolicy"),
|
|
||||||
},
|
|
||||||
Composition: []*elastictranscoder.Clip{
|
|
||||||
{ // Required
|
|
||||||
TimeSpan: &elastictranscoder.TimeSpan{
|
|
||||||
Duration: aws.String("Time"),
|
|
||||||
StartTime: aws.String("Time"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
// More values...
|
|
||||||
},
|
|
||||||
Encryption: &elastictranscoder.Encryption{
|
|
||||||
InitializationVector: aws.String("ZeroTo255String"),
|
|
||||||
Key: aws.String("Base64EncodedString"),
|
|
||||||
KeyMd5: aws.String("Base64EncodedString"),
|
|
||||||
Mode: aws.String("EncryptionMode"),
|
|
||||||
},
|
|
||||||
Key: aws.String("Key"),
|
|
||||||
PresetId: aws.String("Id"),
|
|
||||||
Rotate: aws.String("Rotate"),
|
|
||||||
SegmentDuration: aws.String("FloatString"),
|
|
||||||
ThumbnailEncryption: &elastictranscoder.Encryption{
|
|
||||||
InitializationVector: aws.String("ZeroTo255String"),
|
|
||||||
Key: aws.String("Base64EncodedString"),
|
|
||||||
KeyMd5: aws.String("Base64EncodedString"),
|
|
||||||
Mode: aws.String("EncryptionMode"),
|
|
||||||
},
|
|
||||||
ThumbnailPattern: aws.String("ThumbnailPattern"),
|
|
||||||
Watermarks: []*elastictranscoder.JobWatermark{
|
|
||||||
{ // Required
|
|
||||||
Encryption: &elastictranscoder.Encryption{
|
|
||||||
InitializationVector: aws.String("ZeroTo255String"),
|
|
||||||
Key: aws.String("Base64EncodedString"),
|
|
||||||
KeyMd5: aws.String("Base64EncodedString"),
|
|
||||||
Mode: aws.String("EncryptionMode"),
|
|
||||||
},
|
|
||||||
InputKey: aws.String("WatermarkKey"),
|
|
||||||
PresetWatermarkId: aws.String("PresetWatermarkId"),
|
|
||||||
},
|
|
||||||
// More values...
|
|
||||||
},
|
|
||||||
},
|
|
||||||
OutputKeyPrefix: aws.String("Key"),
|
|
||||||
Outputs: []*elastictranscoder.CreateJobOutput{
|
|
||||||
{ // Required
|
|
||||||
AlbumArt: &elastictranscoder.JobAlbumArt{
|
AlbumArt: &elastictranscoder.JobAlbumArt{
|
||||||
Artwork: []*elastictranscoder.Artwork{
|
Artwork: []*elastictranscoder.Artwork{
|
||||||
{ // Required
|
{ // Required
|
||||||
@ -320,37 +219,132 @@ var restjsonBuildParms = &elastictranscoder.CreateJobInput{
|
|||||||
// More values...
|
// More values...
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// More values...
|
OutputKeyPrefix: aws.String("Key"),
|
||||||
},
|
Outputs: []*elastictranscoder.CreateJobOutput{
|
||||||
Playlists: []*elastictranscoder.CreateJobPlaylist{
|
{ // Required
|
||||||
{ // Required
|
AlbumArt: &elastictranscoder.JobAlbumArt{
|
||||||
Format: aws.String("PlaylistFormat"),
|
Artwork: []*elastictranscoder.Artwork{
|
||||||
HlsContentProtection: &elastictranscoder.HlsContentProtection{
|
{ // Required
|
||||||
InitializationVector: aws.String("ZeroTo255String"),
|
AlbumArtFormat: aws.String("JpgOrPng"),
|
||||||
Key: aws.String("Base64EncodedString"),
|
Encryption: &elastictranscoder.Encryption{
|
||||||
KeyMd5: aws.String("Base64EncodedString"),
|
InitializationVector: aws.String("ZeroTo255String"),
|
||||||
KeyStoragePolicy: aws.String("KeyStoragePolicy"),
|
Key: aws.String("Base64EncodedString"),
|
||||||
LicenseAcquisitionUrl: aws.String("ZeroTo512String"),
|
KeyMd5: aws.String("Base64EncodedString"),
|
||||||
Method: aws.String("HlsContentProtectionMethod"),
|
Mode: aws.String("EncryptionMode"),
|
||||||
},
|
},
|
||||||
Name: aws.String("Filename"),
|
InputKey: aws.String("WatermarkKey"),
|
||||||
OutputKeys: []*string{
|
MaxHeight: aws.String("DigitsOrAuto"),
|
||||||
aws.String("Key"), // Required
|
MaxWidth: aws.String("DigitsOrAuto"),
|
||||||
// More values...
|
PaddingPolicy: aws.String("PaddingPolicy"),
|
||||||
},
|
SizingPolicy: aws.String("SizingPolicy"),
|
||||||
PlayReadyDrm: &elastictranscoder.PlayReadyDrm{
|
},
|
||||||
Format: aws.String("PlayReadyDrmFormatString"),
|
// More values...
|
||||||
InitializationVector: aws.String("ZeroTo255String"),
|
},
|
||||||
Key: aws.String("NonEmptyBase64EncodedString"),
|
MergePolicy: aws.String("MergePolicy"),
|
||||||
KeyId: aws.String("KeyIdGuid"),
|
},
|
||||||
KeyMd5: aws.String("NonEmptyBase64EncodedString"),
|
Captions: &elastictranscoder.Captions{
|
||||||
LicenseAcquisitionUrl: aws.String("OneTo512String"),
|
CaptionFormats: []*elastictranscoder.CaptionFormat{
|
||||||
|
{ // Required
|
||||||
|
Encryption: &elastictranscoder.Encryption{
|
||||||
|
InitializationVector: aws.String("ZeroTo255String"),
|
||||||
|
Key: aws.String("Base64EncodedString"),
|
||||||
|
KeyMd5: aws.String("Base64EncodedString"),
|
||||||
|
Mode: aws.String("EncryptionMode"),
|
||||||
|
},
|
||||||
|
Format: aws.String("CaptionFormatFormat"),
|
||||||
|
Pattern: aws.String("CaptionFormatPattern"),
|
||||||
|
},
|
||||||
|
// More values...
|
||||||
|
},
|
||||||
|
CaptionSources: []*elastictranscoder.CaptionSource{
|
||||||
|
{ // Required
|
||||||
|
Encryption: &elastictranscoder.Encryption{
|
||||||
|
InitializationVector: aws.String("ZeroTo255String"),
|
||||||
|
Key: aws.String("Base64EncodedString"),
|
||||||
|
KeyMd5: aws.String("Base64EncodedString"),
|
||||||
|
Mode: aws.String("EncryptionMode"),
|
||||||
|
},
|
||||||
|
Key: aws.String("Key"),
|
||||||
|
Label: aws.String("Name"),
|
||||||
|
Language: aws.String("Key"),
|
||||||
|
TimeOffset: aws.String("TimeOffset"),
|
||||||
|
},
|
||||||
|
// More values...
|
||||||
|
},
|
||||||
|
MergePolicy: aws.String("CaptionMergePolicy"),
|
||||||
|
},
|
||||||
|
Composition: []*elastictranscoder.Clip{
|
||||||
|
{ // Required
|
||||||
|
TimeSpan: &elastictranscoder.TimeSpan{
|
||||||
|
Duration: aws.String("Time"),
|
||||||
|
StartTime: aws.String("Time"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// More values...
|
||||||
|
},
|
||||||
|
Encryption: &elastictranscoder.Encryption{
|
||||||
|
InitializationVector: aws.String("ZeroTo255String"),
|
||||||
|
Key: aws.String("Base64EncodedString"),
|
||||||
|
KeyMd5: aws.String("Base64EncodedString"),
|
||||||
|
Mode: aws.String("EncryptionMode"),
|
||||||
|
},
|
||||||
|
Key: aws.String("Key"),
|
||||||
|
PresetId: aws.String("Id"),
|
||||||
|
Rotate: aws.String("Rotate"),
|
||||||
|
SegmentDuration: aws.String("FloatString"),
|
||||||
|
ThumbnailEncryption: &elastictranscoder.Encryption{
|
||||||
|
InitializationVector: aws.String("ZeroTo255String"),
|
||||||
|
Key: aws.String("Base64EncodedString"),
|
||||||
|
KeyMd5: aws.String("Base64EncodedString"),
|
||||||
|
Mode: aws.String("EncryptionMode"),
|
||||||
|
},
|
||||||
|
ThumbnailPattern: aws.String("ThumbnailPattern"),
|
||||||
|
Watermarks: []*elastictranscoder.JobWatermark{
|
||||||
|
{ // Required
|
||||||
|
Encryption: &elastictranscoder.Encryption{
|
||||||
|
InitializationVector: aws.String("ZeroTo255String"),
|
||||||
|
Key: aws.String("Base64EncodedString"),
|
||||||
|
KeyMd5: aws.String("Base64EncodedString"),
|
||||||
|
Mode: aws.String("EncryptionMode"),
|
||||||
|
},
|
||||||
|
InputKey: aws.String("WatermarkKey"),
|
||||||
|
PresetWatermarkId: aws.String("PresetWatermarkId"),
|
||||||
|
},
|
||||||
|
// More values...
|
||||||
|
},
|
||||||
},
|
},
|
||||||
|
// More values...
|
||||||
},
|
},
|
||||||
// More values...
|
Playlists: []*elastictranscoder.CreateJobPlaylist{
|
||||||
},
|
{ // Required
|
||||||
UserMetadata: map[string]*string{
|
Format: aws.String("PlaylistFormat"),
|
||||||
"Key": aws.String("String"), // Required
|
HlsContentProtection: &elastictranscoder.HlsContentProtection{
|
||||||
// More values...
|
InitializationVector: aws.String("ZeroTo255String"),
|
||||||
},
|
Key: aws.String("Base64EncodedString"),
|
||||||
|
KeyMd5: aws.String("Base64EncodedString"),
|
||||||
|
KeyStoragePolicy: aws.String("KeyStoragePolicy"),
|
||||||
|
LicenseAcquisitionUrl: aws.String("ZeroTo512String"),
|
||||||
|
Method: aws.String("HlsContentProtectionMethod"),
|
||||||
|
},
|
||||||
|
Name: aws.String("Filename"),
|
||||||
|
OutputKeys: []*string{
|
||||||
|
aws.String("Key"), // Required
|
||||||
|
// More values...
|
||||||
|
},
|
||||||
|
PlayReadyDrm: &elastictranscoder.PlayReadyDrm{
|
||||||
|
Format: aws.String("PlayReadyDrmFormatString"),
|
||||||
|
InitializationVector: aws.String("ZeroTo255String"),
|
||||||
|
Key: aws.String("NonEmptyBase64EncodedString"),
|
||||||
|
KeyId: aws.String("KeyIdGuid"),
|
||||||
|
KeyMd5: aws.String("NonEmptyBase64EncodedString"),
|
||||||
|
LicenseAcquisitionUrl: aws.String("OneTo512String"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// More values...
|
||||||
|
},
|
||||||
|
UserMetadata: map[string]*string{
|
||||||
|
"Key": aws.String("String"), // Required
|
||||||
|
// More values...
|
||||||
|
},
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
229
vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/build_test.go
generated
vendored
229
vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/build_test.go
generated
vendored
@ -24,7 +24,6 @@ import (
|
|||||||
"github.com/aws/aws-sdk-go/private/protocol/restjson"
|
"github.com/aws/aws-sdk-go/private/protocol/restjson"
|
||||||
"github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
|
"github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
|
||||||
"github.com/aws/aws-sdk-go/private/util"
|
"github.com/aws/aws-sdk-go/private/util"
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ bytes.Buffer // always import bytes
|
var _ bytes.Buffer // always import bytes
|
||||||
@ -4251,7 +4250,9 @@ func TestInputService1ProtocolTestNoParametersCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restjson.Build(req)
|
restjson.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert URL
|
// assert URL
|
||||||
awstesting.AssertURL(t, "https://test/2014-01-01/jobs", r.URL.String())
|
awstesting.AssertURL(t, "https://test/2014-01-01/jobs", r.URL.String())
|
||||||
@ -4270,7 +4271,9 @@ func TestInputService2ProtocolTestURIParameterOnlyWithNoLocationNameCase1(t *tes
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restjson.Build(req)
|
restjson.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert URL
|
// assert URL
|
||||||
awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/foo", r.URL.String())
|
awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/foo", r.URL.String())
|
||||||
@ -4289,7 +4292,9 @@ func TestInputService3ProtocolTestURIParameterOnlyWithLocationNameCase1(t *testi
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restjson.Build(req)
|
restjson.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert URL
|
// assert URL
|
||||||
awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/bar", r.URL.String())
|
awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/bar", r.URL.String())
|
||||||
@ -4311,7 +4316,9 @@ func TestInputService4ProtocolTestQuerystringListOfStringsCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restjson.Build(req)
|
restjson.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert URL
|
// assert URL
|
||||||
awstesting.AssertURL(t, "https://test/path?item=value1&item=value2", r.URL.String())
|
awstesting.AssertURL(t, "https://test/path?item=value1&item=value2", r.URL.String())
|
||||||
@ -4334,7 +4341,9 @@ func TestInputService5ProtocolTestStringToStringMapsInQuerystringCase1(t *testin
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restjson.Build(req)
|
restjson.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert URL
|
// assert URL
|
||||||
awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/foo?bar=baz&fizz=buzz", r.URL.String())
|
awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/foo?bar=baz&fizz=buzz", r.URL.String())
|
||||||
@ -4363,7 +4372,9 @@ func TestInputService6ProtocolTestStringToStringListMapsInQuerystringCase1(t *te
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restjson.Build(req)
|
restjson.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert URL
|
// assert URL
|
||||||
awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/id?foo=bar&foo=baz&fizz=buzz&fizz=pop", r.URL.String())
|
awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/id?foo=bar&foo=baz&fizz=buzz&fizz=pop", r.URL.String())
|
||||||
@ -4382,7 +4393,9 @@ func TestInputService7ProtocolTestBooleanInQuerystringCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restjson.Build(req)
|
restjson.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert URL
|
// assert URL
|
||||||
awstesting.AssertURL(t, "https://test/path?bool-query=true", r.URL.String())
|
awstesting.AssertURL(t, "https://test/path?bool-query=true", r.URL.String())
|
||||||
@ -4401,7 +4414,9 @@ func TestInputService7ProtocolTestBooleanInQuerystringCase2(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restjson.Build(req)
|
restjson.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert URL
|
// assert URL
|
||||||
awstesting.AssertURL(t, "https://test/path?bool-query=false", r.URL.String())
|
awstesting.AssertURL(t, "https://test/path?bool-query=false", r.URL.String())
|
||||||
@ -4422,7 +4437,9 @@ func TestInputService8ProtocolTestURIParameterAndQuerystringParamsCase1(t *testi
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restjson.Build(req)
|
restjson.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert URL
|
// assert URL
|
||||||
awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/foo?Ascending=true&PageToken=bar", r.URL.String())
|
awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/foo?Ascending=true&PageToken=bar", r.URL.String())
|
||||||
@ -4447,10 +4464,14 @@ func TestInputService9ProtocolTestURIParameterQuerystringParamsAndJSONBodyCase1(
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restjson.Build(req)
|
restjson.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body, _ := ioutil.ReadAll(r.Body)
|
body, _ := ioutil.ReadAll(r.Body)
|
||||||
awstesting.AssertJSON(t, `{"Config":{"A":"one","B":"two"}}`, util.Trim(string(body)))
|
awstesting.AssertJSON(t, `{"Config":{"A":"one","B":"two"}}`, util.Trim(string(body)))
|
||||||
|
|
||||||
@ -4478,10 +4499,14 @@ func TestInputService10ProtocolTestURIParameterQuerystringParamsHeadersAndJSONBo
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restjson.Build(req)
|
restjson.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body, _ := ioutil.ReadAll(r.Body)
|
body, _ := ioutil.ReadAll(r.Body)
|
||||||
awstesting.AssertJSON(t, `{"Config":{"A":"one","B":"two"}}`, util.Trim(string(body)))
|
awstesting.AssertJSON(t, `{"Config":{"A":"one","B":"two"}}`, util.Trim(string(body)))
|
||||||
|
|
||||||
@ -4489,7 +4514,9 @@ func TestInputService10ProtocolTestURIParameterQuerystringParamsHeadersAndJSONBo
|
|||||||
awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/foo?Ascending=true&PageToken=bar", r.URL.String())
|
awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/foo?Ascending=true&PageToken=bar", r.URL.String())
|
||||||
|
|
||||||
// assert headers
|
// assert headers
|
||||||
assert.Equal(t, "12345", r.Header.Get("x-amz-checksum"))
|
if e, a := "12345", r.Header.Get("x-amz-checksum"); e != a {
|
||||||
|
t.Errorf("expect %v to be %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4505,18 +4532,26 @@ func TestInputService11ProtocolTestStreamingPayloadCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restjson.Build(req)
|
restjson.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body, _ := ioutil.ReadAll(r.Body)
|
body, _ := ioutil.ReadAll(r.Body)
|
||||||
assert.Equal(t, `contents`, util.Trim(string(body)))
|
if e, a := "contents", util.Trim(string(body)); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
// assert URL
|
// assert URL
|
||||||
awstesting.AssertURL(t, "https://test/2014-01-01/vaults/name/archives", r.URL.String())
|
awstesting.AssertURL(t, "https://test/2014-01-01/vaults/name/archives", r.URL.String())
|
||||||
|
|
||||||
// assert headers
|
// assert headers
|
||||||
assert.Equal(t, "foo", r.Header.Get("x-amz-sha256-tree-hash"))
|
if e, a := "foo", r.Header.Get("x-amz-sha256-tree-hash"); e != a {
|
||||||
|
t.Errorf("expect %v to be %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4531,10 +4566,14 @@ func TestInputService12ProtocolTestSerializeBlobsInBodyCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restjson.Build(req)
|
restjson.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body, _ := ioutil.ReadAll(r.Body)
|
body, _ := ioutil.ReadAll(r.Body)
|
||||||
awstesting.AssertJSON(t, `{"Bar":"QmxvYiBwYXJhbQ=="}`, util.Trim(string(body)))
|
awstesting.AssertJSON(t, `{"Bar":"QmxvYiBwYXJhbQ=="}`, util.Trim(string(body)))
|
||||||
|
|
||||||
@ -4555,12 +4594,18 @@ func TestInputService13ProtocolTestBlobPayloadCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restjson.Build(req)
|
restjson.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body, _ := ioutil.ReadAll(r.Body)
|
body, _ := ioutil.ReadAll(r.Body)
|
||||||
assert.Equal(t, `bar`, util.Trim(string(body)))
|
if e, a := "bar", util.Trim(string(body)); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
// assert URL
|
// assert URL
|
||||||
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
||||||
@ -4577,7 +4622,9 @@ func TestInputService13ProtocolTestBlobPayloadCase2(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restjson.Build(req)
|
restjson.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert URL
|
// assert URL
|
||||||
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
||||||
@ -4598,10 +4645,14 @@ func TestInputService14ProtocolTestStructurePayloadCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restjson.Build(req)
|
restjson.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body, _ := ioutil.ReadAll(r.Body)
|
body, _ := ioutil.ReadAll(r.Body)
|
||||||
awstesting.AssertJSON(t, `{"baz":"bar"}`, util.Trim(string(body)))
|
awstesting.AssertJSON(t, `{"baz":"bar"}`, util.Trim(string(body)))
|
||||||
|
|
||||||
@ -4620,7 +4671,9 @@ func TestInputService14ProtocolTestStructurePayloadCase2(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restjson.Build(req)
|
restjson.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert URL
|
// assert URL
|
||||||
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
||||||
@ -4637,7 +4690,9 @@ func TestInputService15ProtocolTestOmitsNullQueryParamsButSerializesEmptyStrings
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restjson.Build(req)
|
restjson.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert URL
|
// assert URL
|
||||||
awstesting.AssertURL(t, "https://test/path", r.URL.String())
|
awstesting.AssertURL(t, "https://test/path", r.URL.String())
|
||||||
@ -4656,7 +4711,9 @@ func TestInputService15ProtocolTestOmitsNullQueryParamsButSerializesEmptyStrings
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restjson.Build(req)
|
restjson.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert URL
|
// assert URL
|
||||||
awstesting.AssertURL(t, "https://test/path?abc=mno¶m-name=", r.URL.String())
|
awstesting.AssertURL(t, "https://test/path?abc=mno¶m-name=", r.URL.String())
|
||||||
@ -4677,10 +4734,14 @@ func TestInputService16ProtocolTestRecursiveShapesCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restjson.Build(req)
|
restjson.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body, _ := ioutil.ReadAll(r.Body)
|
body, _ := ioutil.ReadAll(r.Body)
|
||||||
awstesting.AssertJSON(t, `{"RecursiveStruct":{"NoRecurse":"foo"}}`, util.Trim(string(body)))
|
awstesting.AssertJSON(t, `{"RecursiveStruct":{"NoRecurse":"foo"}}`, util.Trim(string(body)))
|
||||||
|
|
||||||
@ -4705,10 +4766,14 @@ func TestInputService16ProtocolTestRecursiveShapesCase2(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restjson.Build(req)
|
restjson.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body, _ := ioutil.ReadAll(r.Body)
|
body, _ := ioutil.ReadAll(r.Body)
|
||||||
awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveStruct":{"NoRecurse":"foo"}}}`, util.Trim(string(body)))
|
awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveStruct":{"NoRecurse":"foo"}}}`, util.Trim(string(body)))
|
||||||
|
|
||||||
@ -4737,10 +4802,14 @@ func TestInputService16ProtocolTestRecursiveShapesCase3(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restjson.Build(req)
|
restjson.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body, _ := ioutil.ReadAll(r.Body)
|
body, _ := ioutil.ReadAll(r.Body)
|
||||||
awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveStruct":{"RecursiveStruct":{"RecursiveStruct":{"NoRecurse":"foo"}}}}}`, util.Trim(string(body)))
|
awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveStruct":{"RecursiveStruct":{"RecursiveStruct":{"NoRecurse":"foo"}}}}}`, util.Trim(string(body)))
|
||||||
|
|
||||||
@ -4770,10 +4839,14 @@ func TestInputService16ProtocolTestRecursiveShapesCase4(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restjson.Build(req)
|
restjson.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body, _ := ioutil.ReadAll(r.Body)
|
body, _ := ioutil.ReadAll(r.Body)
|
||||||
awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveList":[{"NoRecurse":"foo"},{"NoRecurse":"bar"}]}}`, util.Trim(string(body)))
|
awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveList":[{"NoRecurse":"foo"},{"NoRecurse":"bar"}]}}`, util.Trim(string(body)))
|
||||||
|
|
||||||
@ -4805,10 +4878,14 @@ func TestInputService16ProtocolTestRecursiveShapesCase5(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restjson.Build(req)
|
restjson.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body, _ := ioutil.ReadAll(r.Body)
|
body, _ := ioutil.ReadAll(r.Body)
|
||||||
awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveList":[{"NoRecurse":"foo"},{"RecursiveStruct":{"NoRecurse":"bar"}}]}}`, util.Trim(string(body)))
|
awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveList":[{"NoRecurse":"foo"},{"RecursiveStruct":{"NoRecurse":"bar"}}]}}`, util.Trim(string(body)))
|
||||||
|
|
||||||
@ -4838,10 +4915,14 @@ func TestInputService16ProtocolTestRecursiveShapesCase6(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restjson.Build(req)
|
restjson.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body, _ := ioutil.ReadAll(r.Body)
|
body, _ := ioutil.ReadAll(r.Body)
|
||||||
awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveMap":{"foo":{"NoRecurse":"foo"},"bar":{"NoRecurse":"bar"}}}}`, util.Trim(string(body)))
|
awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveMap":{"foo":{"NoRecurse":"foo"},"bar":{"NoRecurse":"bar"}}}}`, util.Trim(string(body)))
|
||||||
|
|
||||||
@ -4862,10 +4943,14 @@ func TestInputService17ProtocolTestTimestampValuesCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restjson.Build(req)
|
restjson.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body, _ := ioutil.ReadAll(r.Body)
|
body, _ := ioutil.ReadAll(r.Body)
|
||||||
awstesting.AssertJSON(t, `{"TimeArg":1422172800}`, util.Trim(string(body)))
|
awstesting.AssertJSON(t, `{"TimeArg":1422172800}`, util.Trim(string(body)))
|
||||||
|
|
||||||
@ -4886,13 +4971,17 @@ func TestInputService17ProtocolTestTimestampValuesCase2(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restjson.Build(req)
|
restjson.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert URL
|
// assert URL
|
||||||
awstesting.AssertURL(t, "https://test/path", r.URL.String())
|
awstesting.AssertURL(t, "https://test/path", r.URL.String())
|
||||||
|
|
||||||
// assert headers
|
// assert headers
|
||||||
assert.Equal(t, "Sun, 25 Jan 2015 08:00:00 GMT", r.Header.Get("x-amz-timearg"))
|
if e, a := "Sun, 25 Jan 2015 08:00:00 GMT", r.Header.Get("x-amz-timearg"); e != a {
|
||||||
|
t.Errorf("expect %v to be %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4906,10 +4995,14 @@ func TestInputService18ProtocolTestNamedLocationsInJSONBodyCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restjson.Build(req)
|
restjson.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body, _ := ioutil.ReadAll(r.Body)
|
body, _ := ioutil.ReadAll(r.Body)
|
||||||
awstesting.AssertJSON(t, `{"timestamp_location":1422172800}`, util.Trim(string(body)))
|
awstesting.AssertJSON(t, `{"timestamp_location":1422172800}`, util.Trim(string(body)))
|
||||||
|
|
||||||
@ -4930,12 +5023,18 @@ func TestInputService19ProtocolTestStringPayloadCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restjson.Build(req)
|
restjson.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body, _ := ioutil.ReadAll(r.Body)
|
body, _ := ioutil.ReadAll(r.Body)
|
||||||
assert.Equal(t, `bar`, util.Trim(string(body)))
|
if e, a := "bar", util.Trim(string(body)); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
// assert URL
|
// assert URL
|
||||||
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
||||||
@ -4954,10 +5053,14 @@ func TestInputService20ProtocolTestIdempotencyTokenAutoFillCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restjson.Build(req)
|
restjson.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body, _ := ioutil.ReadAll(r.Body)
|
body, _ := ioutil.ReadAll(r.Body)
|
||||||
awstesting.AssertJSON(t, `{"Token":"abc123"}`, util.Trim(string(body)))
|
awstesting.AssertJSON(t, `{"Token":"abc123"}`, util.Trim(string(body)))
|
||||||
|
|
||||||
@ -4976,10 +5079,14 @@ func TestInputService20ProtocolTestIdempotencyTokenAutoFillCase2(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restjson.Build(req)
|
restjson.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body, _ := ioutil.ReadAll(r.Body)
|
body, _ := ioutil.ReadAll(r.Body)
|
||||||
awstesting.AssertJSON(t, `{"Token":"00000000-0000-4000-8000-000000000000"}`, util.Trim(string(body)))
|
awstesting.AssertJSON(t, `{"Token":"00000000-0000-4000-8000-000000000000"}`, util.Trim(string(body)))
|
||||||
|
|
||||||
@ -4999,13 +5106,17 @@ func TestInputService21ProtocolTestJSONValueTraitCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restjson.Build(req)
|
restjson.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert URL
|
// assert URL
|
||||||
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
||||||
|
|
||||||
// assert headers
|
// assert headers
|
||||||
assert.Equal(t, "eyJGb28iOiJCYXIifQ==", r.Header.Get("X-Amz-Foo"))
|
if e, a := "eyJGb28iOiJCYXIifQ==", r.Header.Get("X-Amz-Foo"); e != a {
|
||||||
|
t.Errorf("expect %v to be %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -5017,7 +5128,9 @@ func TestInputService21ProtocolTestJSONValueTraitCase2(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restjson.Build(req)
|
restjson.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert URL
|
// assert URL
|
||||||
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
||||||
|
229
vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_test.go
generated
vendored
229
vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_test.go
generated
vendored
@ -24,7 +24,6 @@ import (
|
|||||||
"github.com/aws/aws-sdk-go/private/protocol/restjson"
|
"github.com/aws/aws-sdk-go/private/protocol/restjson"
|
||||||
"github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
|
"github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
|
||||||
"github.com/aws/aws-sdk-go/private/util"
|
"github.com/aws/aws-sdk-go/private/util"
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ bytes.Buffer // always import bytes
|
var _ bytes.Buffer // always import bytes
|
||||||
@ -1941,21 +1940,47 @@ func TestOutputService1ProtocolTestScalarMembersCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
restjson.UnmarshalMeta(req)
|
restjson.UnmarshalMeta(req)
|
||||||
restjson.Unmarshal(req)
|
restjson.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "a", *out.Char)
|
t.Errorf("expect not to be nil")
|
||||||
assert.Equal(t, 1.3, *out.Double)
|
}
|
||||||
assert.Equal(t, false, *out.FalseBool)
|
if e, a := "a", *out.Char; e != a {
|
||||||
assert.Equal(t, 1.2, *out.Float)
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
assert.Equal(t, "test", *out.ImaHeader)
|
}
|
||||||
assert.Equal(t, "abc", *out.ImaHeaderLocation)
|
if e, a := 1.3, *out.Double; e != a {
|
||||||
assert.Equal(t, int64(200), *out.Long)
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
assert.Equal(t, int64(123), *out.Num)
|
}
|
||||||
assert.Equal(t, int64(200), *out.Status)
|
if e, a := false, *out.FalseBool; e != a {
|
||||||
assert.Equal(t, "myname", *out.Str)
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
assert.Equal(t, true, *out.TrueBool)
|
}
|
||||||
|
if e, a := 1.2, *out.Float; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "test", *out.ImaHeader; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "abc", *out.ImaHeaderLocation; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := int64(200), *out.Long; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := int64(123), *out.Num; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := int64(200), *out.Status; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "myname", *out.Str; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := true, *out.TrueBool; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1971,12 +1996,20 @@ func TestOutputService2ProtocolTestBlobMembersCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
restjson.UnmarshalMeta(req)
|
restjson.UnmarshalMeta(req)
|
||||||
restjson.Unmarshal(req)
|
restjson.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "hi!", string(out.BlobMember))
|
t.Errorf("expect not to be nil")
|
||||||
assert.Equal(t, "there!", string(out.StructMember.Foo))
|
}
|
||||||
|
if e, a := "hi!", string(out.BlobMember); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "there!", string(out.StructMember.Foo); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1992,12 +2025,20 @@ func TestOutputService3ProtocolTestTimestampMembersCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
restjson.UnmarshalMeta(req)
|
restjson.UnmarshalMeta(req)
|
||||||
restjson.Unmarshal(req)
|
restjson.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, time.Unix(1.398796238e+09, 0).UTC().String(), out.StructMember.Foo.String())
|
t.Errorf("expect not to be nil")
|
||||||
assert.Equal(t, time.Unix(1.398796238e+09, 0).UTC().String(), out.TimeMember.String())
|
}
|
||||||
|
if e, a := time.Unix(1.398796238e+09, 0).UTC().String(), out.StructMember.Foo.String(); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := time.Unix(1.398796238e+09, 0).UTC().String(), out.TimeMember.String(); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2013,12 +2054,20 @@ func TestOutputService4ProtocolTestListsCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
restjson.UnmarshalMeta(req)
|
restjson.UnmarshalMeta(req)
|
||||||
restjson.Unmarshal(req)
|
restjson.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "a", *out.ListMember[0])
|
t.Errorf("expect not to be nil")
|
||||||
assert.Equal(t, "b", *out.ListMember[1])
|
}
|
||||||
|
if e, a := "a", *out.ListMember[0]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "b", *out.ListMember[1]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2034,12 +2083,20 @@ func TestOutputService5ProtocolTestListsWithStructureMemberCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
restjson.UnmarshalMeta(req)
|
restjson.UnmarshalMeta(req)
|
||||||
restjson.Unmarshal(req)
|
restjson.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "a", *out.ListMember[0].Foo)
|
t.Errorf("expect not to be nil")
|
||||||
assert.Equal(t, "b", *out.ListMember[1].Foo)
|
}
|
||||||
|
if e, a := "a", *out.ListMember[0].Foo; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "b", *out.ListMember[1].Foo; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2055,14 +2112,26 @@ func TestOutputService6ProtocolTestMapsCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
restjson.UnmarshalMeta(req)
|
restjson.UnmarshalMeta(req)
|
||||||
restjson.Unmarshal(req)
|
restjson.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, int64(1), *out.MapMember["a"][0])
|
t.Errorf("expect not to be nil")
|
||||||
assert.Equal(t, int64(2), *out.MapMember["a"][1])
|
}
|
||||||
assert.Equal(t, int64(3), *out.MapMember["b"][0])
|
if e, a := int64(1), *out.MapMember["a"][0]; e != a {
|
||||||
assert.Equal(t, int64(4), *out.MapMember["b"][1])
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := int64(2), *out.MapMember["a"][1]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := int64(3), *out.MapMember["b"][0]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := int64(4), *out.MapMember["b"][1]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2078,12 +2147,20 @@ func TestOutputService7ProtocolTestComplexMapValuesCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
restjson.UnmarshalMeta(req)
|
restjson.UnmarshalMeta(req)
|
||||||
restjson.Unmarshal(req)
|
restjson.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, time.Unix(1.398796238e+09, 0).UTC().String(), out.MapMember["a"].String())
|
t.Errorf("expect not to be nil")
|
||||||
assert.Equal(t, time.Unix(1.398796238e+09, 0).UTC().String(), out.MapMember["b"].String())
|
}
|
||||||
|
if e, a := time.Unix(1.398796238e+09, 0).UTC().String(), out.MapMember["a"].String(); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := time.Unix(1.398796238e+09, 0).UTC().String(), out.MapMember["b"].String(); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2099,10 +2176,14 @@ func TestOutputService8ProtocolTestIgnoresExtraDataCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
restjson.UnmarshalMeta(req)
|
restjson.UnmarshalMeta(req)
|
||||||
restjson.Unmarshal(req)
|
restjson.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
|
t.Errorf("expect not to be nil")
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2121,15 +2202,29 @@ func TestOutputService9ProtocolTestSupportsHeaderMapsCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
restjson.UnmarshalMeta(req)
|
restjson.UnmarshalMeta(req)
|
||||||
restjson.Unmarshal(req)
|
restjson.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "10", *out.AllHeaders["Content-Length"])
|
t.Errorf("expect not to be nil")
|
||||||
assert.Equal(t, "boo", *out.AllHeaders["X-Bam"])
|
}
|
||||||
assert.Equal(t, "bar", *out.AllHeaders["X-Foo"])
|
if e, a := "10", *out.AllHeaders["Content-Length"]; e != a {
|
||||||
assert.Equal(t, "boo", *out.PrefixedHeaders["Bam"])
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
assert.Equal(t, "bar", *out.PrefixedHeaders["Foo"])
|
}
|
||||||
|
if e, a := "boo", *out.AllHeaders["X-Bam"]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "bar", *out.AllHeaders["X-Foo"]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "boo", *out.PrefixedHeaders["Bam"]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "bar", *out.PrefixedHeaders["Foo"]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2146,12 +2241,20 @@ func TestOutputService10ProtocolTestJSONPayloadCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
restjson.UnmarshalMeta(req)
|
restjson.UnmarshalMeta(req)
|
||||||
restjson.Unmarshal(req)
|
restjson.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "abc", *out.Data.Foo)
|
t.Errorf("expect not to be nil")
|
||||||
assert.Equal(t, "baz", *out.Header)
|
}
|
||||||
|
if e, a := "abc", *out.Data.Foo; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "baz", *out.Header; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2167,11 +2270,17 @@ func TestOutputService11ProtocolTestStreamingPayloadCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
restjson.UnmarshalMeta(req)
|
restjson.UnmarshalMeta(req)
|
||||||
restjson.Unmarshal(req)
|
restjson.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "abc", string(out.Stream))
|
t.Errorf("expect not to be nil")
|
||||||
|
}
|
||||||
|
if e, a := "abc", string(out.Stream); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2188,9 +2297,13 @@ func TestOutputService12ProtocolTestJSONValueTraitCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
restjson.UnmarshalMeta(req)
|
restjson.UnmarshalMeta(req)
|
||||||
restjson.Unmarshal(req)
|
restjson.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
|
t.Errorf("expect not to be nil")
|
||||||
|
}
|
||||||
reflect.DeepEqual(out.Attr, map[string]interface{}{"Foo": "Bar"})
|
reflect.DeepEqual(out.Attr, map[string]interface{}{"Foo": "Bar"})
|
||||||
}
|
}
|
||||||
|
42
vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/build_bench_test.go
generated
vendored
42
vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/build_bench_test.go
generated
vendored
@ -55,13 +55,6 @@ func BenchmarkRESTXMLBuild_Complex_CFCreateDistro(b *testing.B) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkRESTXMLRequest_Complex_CFCreateDistro(b *testing.B) {
|
|
||||||
benchRESTXMLRequest(b, func() *request.Request {
|
|
||||||
req, _ := cloudfrontSvc.CreateDistributionRequest(cloudfrontCreateDistributionInput())
|
|
||||||
return req
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkRESTXMLBuild_Simple_CFDeleteDistro(b *testing.B) {
|
func BenchmarkRESTXMLBuild_Simple_CFDeleteDistro(b *testing.B) {
|
||||||
params := cloudfrontDeleteDistributionInput()
|
params := cloudfrontDeleteDistributionInput()
|
||||||
|
|
||||||
@ -71,13 +64,6 @@ func BenchmarkRESTXMLBuild_Simple_CFDeleteDistro(b *testing.B) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkRESTXMLRequest_Simple_CFDeleteDistro(b *testing.B) {
|
|
||||||
benchRESTXMLRequest(b, func() *request.Request {
|
|
||||||
req, _ := cloudfrontSvc.DeleteDistributionRequest(cloudfrontDeleteDistributionInput())
|
|
||||||
return req
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkRESTXMLBuild_REST_S3HeadObject(b *testing.B) {
|
func BenchmarkRESTXMLBuild_REST_S3HeadObject(b *testing.B) {
|
||||||
params := s3HeadObjectInput()
|
params := s3HeadObjectInput()
|
||||||
|
|
||||||
@ -87,13 +73,6 @@ func BenchmarkRESTXMLBuild_REST_S3HeadObject(b *testing.B) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkRESTXMLRequest_REST_S3HeadObject(b *testing.B) {
|
|
||||||
benchRESTXMLRequest(b, func() *request.Request {
|
|
||||||
req, _ := s3Svc.HeadObjectRequest(s3HeadObjectInput())
|
|
||||||
return req
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkRESTXMLBuild_XML_S3PutObjectAcl(b *testing.B) {
|
func BenchmarkRESTXMLBuild_XML_S3PutObjectAcl(b *testing.B) {
|
||||||
params := s3PutObjectAclInput()
|
params := s3PutObjectAclInput()
|
||||||
|
|
||||||
@ -103,6 +82,27 @@ func BenchmarkRESTXMLBuild_XML_S3PutObjectAcl(b *testing.B) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func BenchmarkRESTXMLRequest_Complex_CFCreateDistro(b *testing.B) {
|
||||||
|
benchRESTXMLRequest(b, func() *request.Request {
|
||||||
|
req, _ := cloudfrontSvc.CreateDistributionRequest(cloudfrontCreateDistributionInput())
|
||||||
|
return req
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkRESTXMLRequest_Simple_CFDeleteDistro(b *testing.B) {
|
||||||
|
benchRESTXMLRequest(b, func() *request.Request {
|
||||||
|
req, _ := cloudfrontSvc.DeleteDistributionRequest(cloudfrontDeleteDistributionInput())
|
||||||
|
return req
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkRESTXMLRequest_REST_S3HeadObject(b *testing.B) {
|
||||||
|
benchRESTXMLRequest(b, func() *request.Request {
|
||||||
|
req, _ := s3Svc.HeadObjectRequest(s3HeadObjectInput())
|
||||||
|
return req
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func BenchmarkRESTXMLRequest_XML_S3PutObjectAcl(b *testing.B) {
|
func BenchmarkRESTXMLRequest_XML_S3PutObjectAcl(b *testing.B) {
|
||||||
benchRESTXMLRequest(b, func() *request.Request {
|
benchRESTXMLRequest(b, func() *request.Request {
|
||||||
req, _ := s3Svc.PutObjectAclRequest(s3PutObjectAclInput())
|
req, _ := s3Svc.PutObjectAclRequest(s3PutObjectAclInput())
|
||||||
|
277
vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/build_test.go
generated
vendored
277
vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/build_test.go
generated
vendored
@ -24,7 +24,6 @@ import (
|
|||||||
"github.com/aws/aws-sdk-go/private/protocol/restxml"
|
"github.com/aws/aws-sdk-go/private/protocol/restxml"
|
||||||
"github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
|
"github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
|
||||||
"github.com/aws/aws-sdk-go/private/util"
|
"github.com/aws/aws-sdk-go/private/util"
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ bytes.Buffer // always import bytes
|
var _ bytes.Buffer // always import bytes
|
||||||
@ -4918,10 +4917,14 @@ func TestInputService1ProtocolTestBasicXMLSerializationCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restxml.Build(req)
|
restxml.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body := util.SortXML(r.Body)
|
body := util.SortXML(r.Body)
|
||||||
awstesting.AssertXML(t, `<OperationRequest xmlns="https://foo/"><Description xmlns="https://foo/">bar</Description><Name xmlns="https://foo/">foo</Name></OperationRequest>`, util.Trim(string(body)), InputService1TestShapeInputService1TestCaseOperation2Input{})
|
awstesting.AssertXML(t, `<OperationRequest xmlns="https://foo/"><Description xmlns="https://foo/">bar</Description><Name xmlns="https://foo/">foo</Name></OperationRequest>`, util.Trim(string(body)), InputService1TestShapeInputService1TestCaseOperation2Input{})
|
||||||
|
|
||||||
@ -4943,10 +4946,14 @@ func TestInputService1ProtocolTestBasicXMLSerializationCase2(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restxml.Build(req)
|
restxml.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body := util.SortXML(r.Body)
|
body := util.SortXML(r.Body)
|
||||||
awstesting.AssertXML(t, `<OperationRequest xmlns="https://foo/"><Description xmlns="https://foo/">bar</Description><Name xmlns="https://foo/">foo</Name></OperationRequest>`, util.Trim(string(body)), InputService1TestShapeInputService1TestCaseOperation2Input{})
|
awstesting.AssertXML(t, `<OperationRequest xmlns="https://foo/"><Description xmlns="https://foo/">bar</Description><Name xmlns="https://foo/">foo</Name></OperationRequest>`, util.Trim(string(body)), InputService1TestShapeInputService1TestCaseOperation2Input{})
|
||||||
|
|
||||||
@ -4965,7 +4972,9 @@ func TestInputService1ProtocolTestBasicXMLSerializationCase3(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restxml.Build(req)
|
restxml.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert URL
|
// assert URL
|
||||||
awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String())
|
awstesting.AssertURL(t, "https://test/2014-01-01/hostedzone", r.URL.String())
|
||||||
@ -4987,10 +4996,14 @@ func TestInputService2ProtocolTestSerializeOtherScalarTypesCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restxml.Build(req)
|
restxml.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body := util.SortXML(r.Body)
|
body := util.SortXML(r.Body)
|
||||||
awstesting.AssertXML(t, `<OperationRequest xmlns="https://foo/"><First xmlns="https://foo/">true</First><Fourth xmlns="https://foo/">3</Fourth><Second xmlns="https://foo/">false</Second><Third xmlns="https://foo/">1.2</Third></OperationRequest>`, util.Trim(string(body)), InputService2TestShapeInputService2TestCaseOperation1Input{})
|
awstesting.AssertXML(t, `<OperationRequest xmlns="https://foo/"><First xmlns="https://foo/">true</First><Fourth xmlns="https://foo/">3</Fourth><Second xmlns="https://foo/">false</Second><Third xmlns="https://foo/">1.2</Third></OperationRequest>`, util.Trim(string(body)), InputService2TestShapeInputService2TestCaseOperation1Input{})
|
||||||
|
|
||||||
@ -5015,10 +5028,14 @@ func TestInputService3ProtocolTestNestedStructuresCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restxml.Build(req)
|
restxml.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body := util.SortXML(r.Body)
|
body := util.SortXML(r.Body)
|
||||||
awstesting.AssertXML(t, `<OperationRequest xmlns="https://foo/"><Description xmlns="https://foo/">baz</Description><SubStructure xmlns="https://foo/"><Bar xmlns="https://foo/">b</Bar><Foo xmlns="https://foo/">a</Foo></SubStructure></OperationRequest>`, util.Trim(string(body)), InputService3TestShapeInputService3TestCaseOperation2Input{})
|
awstesting.AssertXML(t, `<OperationRequest xmlns="https://foo/"><Description xmlns="https://foo/">baz</Description><SubStructure xmlns="https://foo/"><Bar xmlns="https://foo/">b</Bar><Foo xmlns="https://foo/">a</Foo></SubStructure></OperationRequest>`, util.Trim(string(body)), InputService3TestShapeInputService3TestCaseOperation2Input{})
|
||||||
|
|
||||||
@ -5042,10 +5059,14 @@ func TestInputService3ProtocolTestNestedStructuresCase2(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restxml.Build(req)
|
restxml.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body := util.SortXML(r.Body)
|
body := util.SortXML(r.Body)
|
||||||
awstesting.AssertXML(t, `<OperationRequest xmlns="https://foo/"><Description xmlns="https://foo/">baz</Description><SubStructure xmlns="https://foo/"><Foo xmlns="https://foo/">a</Foo></SubStructure></OperationRequest>`, util.Trim(string(body)), InputService3TestShapeInputService3TestCaseOperation2Input{})
|
awstesting.AssertXML(t, `<OperationRequest xmlns="https://foo/"><Description xmlns="https://foo/">baz</Description><SubStructure xmlns="https://foo/"><Foo xmlns="https://foo/">a</Foo></SubStructure></OperationRequest>`, util.Trim(string(body)), InputService3TestShapeInputService3TestCaseOperation2Input{})
|
||||||
|
|
||||||
@ -5067,10 +5088,14 @@ func TestInputService4ProtocolTestNestedStructuresCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restxml.Build(req)
|
restxml.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body := util.SortXML(r.Body)
|
body := util.SortXML(r.Body)
|
||||||
awstesting.AssertXML(t, `<OperationRequest xmlns="https://foo/"><Description xmlns="https://foo/">baz</Description><SubStructure xmlns="https://foo/"></SubStructure></OperationRequest>`, util.Trim(string(body)), InputService4TestShapeInputService4TestCaseOperation1Input{})
|
awstesting.AssertXML(t, `<OperationRequest xmlns="https://foo/"><Description xmlns="https://foo/">baz</Description><SubStructure xmlns="https://foo/"></SubStructure></OperationRequest>`, util.Trim(string(body)), InputService4TestShapeInputService4TestCaseOperation1Input{})
|
||||||
|
|
||||||
@ -5095,10 +5120,14 @@ func TestInputService5ProtocolTestNonFlattenedListsCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restxml.Build(req)
|
restxml.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body := util.SortXML(r.Body)
|
body := util.SortXML(r.Body)
|
||||||
awstesting.AssertXML(t, `<OperationRequest xmlns="https://foo/"><ListParam xmlns="https://foo/"><member xmlns="https://foo/">one</member><member xmlns="https://foo/">two</member><member xmlns="https://foo/">three</member></ListParam></OperationRequest>`, util.Trim(string(body)), InputService5TestShapeInputService5TestCaseOperation1Input{})
|
awstesting.AssertXML(t, `<OperationRequest xmlns="https://foo/"><ListParam xmlns="https://foo/"><member xmlns="https://foo/">one</member><member xmlns="https://foo/">two</member><member xmlns="https://foo/">three</member></ListParam></OperationRequest>`, util.Trim(string(body)), InputService5TestShapeInputService5TestCaseOperation1Input{})
|
||||||
|
|
||||||
@ -5123,10 +5152,14 @@ func TestInputService6ProtocolTestNonFlattenedListsWithLocationNameCase1(t *test
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restxml.Build(req)
|
restxml.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body := util.SortXML(r.Body)
|
body := util.SortXML(r.Body)
|
||||||
awstesting.AssertXML(t, `<OperationRequest xmlns="https://foo/"><AlternateName xmlns="https://foo/"><NotMember xmlns="https://foo/">one</NotMember><NotMember xmlns="https://foo/">two</NotMember><NotMember xmlns="https://foo/">three</NotMember></AlternateName></OperationRequest>`, util.Trim(string(body)), InputService6TestShapeInputService6TestCaseOperation1Input{})
|
awstesting.AssertXML(t, `<OperationRequest xmlns="https://foo/"><AlternateName xmlns="https://foo/"><NotMember xmlns="https://foo/">one</NotMember><NotMember xmlns="https://foo/">two</NotMember><NotMember xmlns="https://foo/">three</NotMember></AlternateName></OperationRequest>`, util.Trim(string(body)), InputService6TestShapeInputService6TestCaseOperation1Input{})
|
||||||
|
|
||||||
@ -5151,10 +5184,14 @@ func TestInputService7ProtocolTestFlattenedListsCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restxml.Build(req)
|
restxml.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body := util.SortXML(r.Body)
|
body := util.SortXML(r.Body)
|
||||||
awstesting.AssertXML(t, `<OperationRequest xmlns="https://foo/"><ListParam xmlns="https://foo/">one</ListParam><ListParam xmlns="https://foo/">two</ListParam><ListParam xmlns="https://foo/">three</ListParam></OperationRequest>`, util.Trim(string(body)), InputService7TestShapeInputService7TestCaseOperation1Input{})
|
awstesting.AssertXML(t, `<OperationRequest xmlns="https://foo/"><ListParam xmlns="https://foo/">one</ListParam><ListParam xmlns="https://foo/">two</ListParam><ListParam xmlns="https://foo/">three</ListParam></OperationRequest>`, util.Trim(string(body)), InputService7TestShapeInputService7TestCaseOperation1Input{})
|
||||||
|
|
||||||
@ -5179,10 +5216,14 @@ func TestInputService8ProtocolTestFlattenedListsWithLocationNameCase1(t *testing
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restxml.Build(req)
|
restxml.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body := util.SortXML(r.Body)
|
body := util.SortXML(r.Body)
|
||||||
awstesting.AssertXML(t, `<OperationRequest xmlns="https://foo/"><item xmlns="https://foo/">one</item><item xmlns="https://foo/">two</item><item xmlns="https://foo/">three</item></OperationRequest>`, util.Trim(string(body)), InputService8TestShapeInputService8TestCaseOperation1Input{})
|
awstesting.AssertXML(t, `<OperationRequest xmlns="https://foo/"><item xmlns="https://foo/">one</item><item xmlns="https://foo/">two</item><item xmlns="https://foo/">three</item></OperationRequest>`, util.Trim(string(body)), InputService8TestShapeInputService8TestCaseOperation1Input{})
|
||||||
|
|
||||||
@ -5213,10 +5254,14 @@ func TestInputService9ProtocolTestListOfStructuresCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restxml.Build(req)
|
restxml.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body := util.SortXML(r.Body)
|
body := util.SortXML(r.Body)
|
||||||
awstesting.AssertXML(t, `<OperationRequest xmlns="https://foo/"><item xmlns="https://foo/"><value xmlns="https://foo/">one</value></item><item xmlns="https://foo/"><value xmlns="https://foo/">two</value></item><item xmlns="https://foo/"><value xmlns="https://foo/">three</value></item></OperationRequest>`, util.Trim(string(body)), InputService9TestShapeInputService9TestCaseOperation1Input{})
|
awstesting.AssertXML(t, `<OperationRequest xmlns="https://foo/"><item xmlns="https://foo/"><value xmlns="https://foo/">one</value></item><item xmlns="https://foo/"><value xmlns="https://foo/">two</value></item><item xmlns="https://foo/"><value xmlns="https://foo/">three</value></item></OperationRequest>`, util.Trim(string(body)), InputService9TestShapeInputService9TestCaseOperation1Input{})
|
||||||
|
|
||||||
@ -5240,10 +5285,14 @@ func TestInputService10ProtocolTestBlobAndTimestampShapesCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restxml.Build(req)
|
restxml.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body := util.SortXML(r.Body)
|
body := util.SortXML(r.Body)
|
||||||
awstesting.AssertXML(t, `<OperationRequest xmlns="https://foo/"><StructureParam xmlns="https://foo/"><b xmlns="https://foo/">Zm9v</b><t xmlns="https://foo/">2015-01-25T08:00:00Z</t></StructureParam></OperationRequest>`, util.Trim(string(body)), InputService10TestShapeInputService10TestCaseOperation1Input{})
|
awstesting.AssertXML(t, `<OperationRequest xmlns="https://foo/"><StructureParam xmlns="https://foo/"><b xmlns="https://foo/">Zm9v</b><t xmlns="https://foo/">2015-01-25T08:00:00Z</t></StructureParam></OperationRequest>`, util.Trim(string(body)), InputService10TestShapeInputService10TestCaseOperation1Input{})
|
||||||
|
|
||||||
@ -5267,14 +5316,20 @@ func TestInputService11ProtocolTestHeaderMapsCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restxml.Build(req)
|
restxml.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert URL
|
// assert URL
|
||||||
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
||||||
|
|
||||||
// assert headers
|
// assert headers
|
||||||
assert.Equal(t, "b", r.Header.Get("x-foo-a"))
|
if e, a := "b", r.Header.Get("x-foo-a"); e != a {
|
||||||
assert.Equal(t, "d", r.Header.Get("x-foo-c"))
|
t.Errorf("expect %v to be %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "d", r.Header.Get("x-foo-c"); e != a {
|
||||||
|
t.Errorf("expect %v to be %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -5291,7 +5346,9 @@ func TestInputService12ProtocolTestQuerystringListOfStringsCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restxml.Build(req)
|
restxml.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert URL
|
// assert URL
|
||||||
awstesting.AssertURL(t, "https://test/path?item=value1&item=value2", r.URL.String())
|
awstesting.AssertURL(t, "https://test/path?item=value1&item=value2", r.URL.String())
|
||||||
@ -5314,7 +5371,9 @@ func TestInputService13ProtocolTestStringToStringMapsInQuerystringCase1(t *testi
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restxml.Build(req)
|
restxml.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert URL
|
// assert URL
|
||||||
awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/foo?bar=baz&fizz=buzz", r.URL.String())
|
awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/foo?bar=baz&fizz=buzz", r.URL.String())
|
||||||
@ -5343,7 +5402,9 @@ func TestInputService14ProtocolTestStringToStringListMapsInQuerystringCase1(t *t
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restxml.Build(req)
|
restxml.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert URL
|
// assert URL
|
||||||
awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/id?foo=bar&foo=baz&fizz=buzz&fizz=pop", r.URL.String())
|
awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/id?foo=bar&foo=baz&fizz=buzz&fizz=pop", r.URL.String())
|
||||||
@ -5362,7 +5423,9 @@ func TestInputService15ProtocolTestBooleanInQuerystringCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restxml.Build(req)
|
restxml.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert URL
|
// assert URL
|
||||||
awstesting.AssertURL(t, "https://test/path?bool-query=true", r.URL.String())
|
awstesting.AssertURL(t, "https://test/path?bool-query=true", r.URL.String())
|
||||||
@ -5381,7 +5444,9 @@ func TestInputService15ProtocolTestBooleanInQuerystringCase2(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restxml.Build(req)
|
restxml.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert URL
|
// assert URL
|
||||||
awstesting.AssertURL(t, "https://test/path?bool-query=false", r.URL.String())
|
awstesting.AssertURL(t, "https://test/path?bool-query=false", r.URL.String())
|
||||||
@ -5400,12 +5465,18 @@ func TestInputService16ProtocolTestStringPayloadCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restxml.Build(req)
|
restxml.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body := util.SortXML(r.Body)
|
body := util.SortXML(r.Body)
|
||||||
assert.Equal(t, `bar`, util.Trim(string(body)))
|
if e, a := "bar", util.Trim(string(body)); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
// assert URL
|
// assert URL
|
||||||
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
||||||
@ -5424,12 +5495,18 @@ func TestInputService17ProtocolTestBlobPayloadCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restxml.Build(req)
|
restxml.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body := util.SortXML(r.Body)
|
body := util.SortXML(r.Body)
|
||||||
assert.Equal(t, `bar`, util.Trim(string(body)))
|
if e, a := "bar", util.Trim(string(body)); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
// assert URL
|
// assert URL
|
||||||
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
||||||
@ -5446,7 +5523,9 @@ func TestInputService17ProtocolTestBlobPayloadCase2(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restxml.Build(req)
|
restxml.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert URL
|
// assert URL
|
||||||
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
||||||
@ -5467,10 +5546,14 @@ func TestInputService18ProtocolTestStructurePayloadCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restxml.Build(req)
|
restxml.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body := util.SortXML(r.Body)
|
body := util.SortXML(r.Body)
|
||||||
awstesting.AssertXML(t, `<foo><baz>bar</baz></foo>`, util.Trim(string(body)), InputService18TestShapeInputService18TestCaseOperation4Input{})
|
awstesting.AssertXML(t, `<foo><baz>bar</baz></foo>`, util.Trim(string(body)), InputService18TestShapeInputService18TestCaseOperation4Input{})
|
||||||
|
|
||||||
@ -5489,7 +5572,9 @@ func TestInputService18ProtocolTestStructurePayloadCase2(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restxml.Build(req)
|
restxml.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert URL
|
// assert URL
|
||||||
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
||||||
@ -5508,10 +5593,14 @@ func TestInputService18ProtocolTestStructurePayloadCase3(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restxml.Build(req)
|
restxml.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body := util.SortXML(r.Body)
|
body := util.SortXML(r.Body)
|
||||||
awstesting.AssertXML(t, `<foo></foo>`, util.Trim(string(body)), InputService18TestShapeInputService18TestCaseOperation4Input{})
|
awstesting.AssertXML(t, `<foo></foo>`, util.Trim(string(body)), InputService18TestShapeInputService18TestCaseOperation4Input{})
|
||||||
|
|
||||||
@ -5530,7 +5619,9 @@ func TestInputService18ProtocolTestStructurePayloadCase4(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restxml.Build(req)
|
restxml.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert URL
|
// assert URL
|
||||||
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
awstesting.AssertURL(t, "https://test/", r.URL.String())
|
||||||
@ -5554,10 +5645,14 @@ func TestInputService19ProtocolTestXMLAttributeCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restxml.Build(req)
|
restxml.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body := util.SortXML(r.Body)
|
body := util.SortXML(r.Body)
|
||||||
awstesting.AssertXML(t, `<Grant xmlns:_xmlns="xmlns" _xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:XMLSchema-instance="http://www.w3.org/2001/XMLSchema-instance" XMLSchema-instance:type="CanonicalUser"><Grantee><EmailAddress>foo@example.com</EmailAddress></Grantee></Grant>`, util.Trim(string(body)), InputService19TestShapeInputService19TestCaseOperation1Input{})
|
awstesting.AssertXML(t, `<Grant xmlns:_xmlns="xmlns" _xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:XMLSchema-instance="http://www.w3.org/2001/XMLSchema-instance" XMLSchema-instance:type="CanonicalUser"><Grantee><EmailAddress>foo@example.com</EmailAddress></Grantee></Grant>`, util.Trim(string(body)), InputService19TestShapeInputService19TestCaseOperation1Input{})
|
||||||
|
|
||||||
@ -5579,7 +5674,9 @@ func TestInputService20ProtocolTestGreedyKeysCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restxml.Build(req)
|
restxml.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert URL
|
// assert URL
|
||||||
awstesting.AssertURL(t, "https://test/my%2Fbucket/testing%20/123", r.URL.String())
|
awstesting.AssertURL(t, "https://test/my%2Fbucket/testing%20/123", r.URL.String())
|
||||||
@ -5596,7 +5693,9 @@ func TestInputService21ProtocolTestOmitsNullQueryParamsButSerializesEmptyStrings
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restxml.Build(req)
|
restxml.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert URL
|
// assert URL
|
||||||
awstesting.AssertURL(t, "https://test/path", r.URL.String())
|
awstesting.AssertURL(t, "https://test/path", r.URL.String())
|
||||||
@ -5615,7 +5714,9 @@ func TestInputService21ProtocolTestOmitsNullQueryParamsButSerializesEmptyStrings
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restxml.Build(req)
|
restxml.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert URL
|
// assert URL
|
||||||
awstesting.AssertURL(t, "https://test/path?abc=mno¶m-name=", r.URL.String())
|
awstesting.AssertURL(t, "https://test/path?abc=mno¶m-name=", r.URL.String())
|
||||||
@ -5636,10 +5737,14 @@ func TestInputService22ProtocolTestRecursiveShapesCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restxml.Build(req)
|
restxml.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body := util.SortXML(r.Body)
|
body := util.SortXML(r.Body)
|
||||||
awstesting.AssertXML(t, `<OperationRequest xmlns="https://foo/"><RecursiveStruct xmlns="https://foo/"><NoRecurse xmlns="https://foo/">foo</NoRecurse></RecursiveStruct></OperationRequest>`, util.Trim(string(body)), InputService22TestShapeInputService22TestCaseOperation6Input{})
|
awstesting.AssertXML(t, `<OperationRequest xmlns="https://foo/"><RecursiveStruct xmlns="https://foo/"><NoRecurse xmlns="https://foo/">foo</NoRecurse></RecursiveStruct></OperationRequest>`, util.Trim(string(body)), InputService22TestShapeInputService22TestCaseOperation6Input{})
|
||||||
|
|
||||||
@ -5664,10 +5769,14 @@ func TestInputService22ProtocolTestRecursiveShapesCase2(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restxml.Build(req)
|
restxml.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body := util.SortXML(r.Body)
|
body := util.SortXML(r.Body)
|
||||||
awstesting.AssertXML(t, `<OperationRequest xmlns="https://foo/"><RecursiveStruct xmlns="https://foo/"><RecursiveStruct xmlns="https://foo/"><NoRecurse xmlns="https://foo/">foo</NoRecurse></RecursiveStruct></RecursiveStruct></OperationRequest>`, util.Trim(string(body)), InputService22TestShapeInputService22TestCaseOperation6Input{})
|
awstesting.AssertXML(t, `<OperationRequest xmlns="https://foo/"><RecursiveStruct xmlns="https://foo/"><RecursiveStruct xmlns="https://foo/"><NoRecurse xmlns="https://foo/">foo</NoRecurse></RecursiveStruct></RecursiveStruct></OperationRequest>`, util.Trim(string(body)), InputService22TestShapeInputService22TestCaseOperation6Input{})
|
||||||
|
|
||||||
@ -5696,10 +5805,14 @@ func TestInputService22ProtocolTestRecursiveShapesCase3(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restxml.Build(req)
|
restxml.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body := util.SortXML(r.Body)
|
body := util.SortXML(r.Body)
|
||||||
awstesting.AssertXML(t, `<OperationRequest xmlns="https://foo/"><RecursiveStruct xmlns="https://foo/"><RecursiveStruct xmlns="https://foo/"><RecursiveStruct xmlns="https://foo/"><RecursiveStruct xmlns="https://foo/"><NoRecurse xmlns="https://foo/">foo</NoRecurse></RecursiveStruct></RecursiveStruct></RecursiveStruct></RecursiveStruct></OperationRequest>`, util.Trim(string(body)), InputService22TestShapeInputService22TestCaseOperation6Input{})
|
awstesting.AssertXML(t, `<OperationRequest xmlns="https://foo/"><RecursiveStruct xmlns="https://foo/"><RecursiveStruct xmlns="https://foo/"><RecursiveStruct xmlns="https://foo/"><RecursiveStruct xmlns="https://foo/"><NoRecurse xmlns="https://foo/">foo</NoRecurse></RecursiveStruct></RecursiveStruct></RecursiveStruct></RecursiveStruct></OperationRequest>`, util.Trim(string(body)), InputService22TestShapeInputService22TestCaseOperation6Input{})
|
||||||
|
|
||||||
@ -5729,10 +5842,14 @@ func TestInputService22ProtocolTestRecursiveShapesCase4(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restxml.Build(req)
|
restxml.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body := util.SortXML(r.Body)
|
body := util.SortXML(r.Body)
|
||||||
awstesting.AssertXML(t, `<OperationRequest xmlns="https://foo/"><RecursiveStruct xmlns="https://foo/"><RecursiveList xmlns="https://foo/"><member xmlns="https://foo/"><NoRecurse xmlns="https://foo/">foo</NoRecurse></member><member xmlns="https://foo/"><NoRecurse xmlns="https://foo/">bar</NoRecurse></member></RecursiveList></RecursiveStruct></OperationRequest>`, util.Trim(string(body)), InputService22TestShapeInputService22TestCaseOperation6Input{})
|
awstesting.AssertXML(t, `<OperationRequest xmlns="https://foo/"><RecursiveStruct xmlns="https://foo/"><RecursiveList xmlns="https://foo/"><member xmlns="https://foo/"><NoRecurse xmlns="https://foo/">foo</NoRecurse></member><member xmlns="https://foo/"><NoRecurse xmlns="https://foo/">bar</NoRecurse></member></RecursiveList></RecursiveStruct></OperationRequest>`, util.Trim(string(body)), InputService22TestShapeInputService22TestCaseOperation6Input{})
|
||||||
|
|
||||||
@ -5764,10 +5881,14 @@ func TestInputService22ProtocolTestRecursiveShapesCase5(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restxml.Build(req)
|
restxml.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body := util.SortXML(r.Body)
|
body := util.SortXML(r.Body)
|
||||||
awstesting.AssertXML(t, `<OperationRequest xmlns="https://foo/"><RecursiveStruct xmlns="https://foo/"><RecursiveList xmlns="https://foo/"><member xmlns="https://foo/"><NoRecurse xmlns="https://foo/">foo</NoRecurse></member><member xmlns="https://foo/"><RecursiveStruct xmlns="https://foo/"><NoRecurse xmlns="https://foo/">bar</NoRecurse></RecursiveStruct></member></RecursiveList></RecursiveStruct></OperationRequest>`, util.Trim(string(body)), InputService22TestShapeInputService22TestCaseOperation6Input{})
|
awstesting.AssertXML(t, `<OperationRequest xmlns="https://foo/"><RecursiveStruct xmlns="https://foo/"><RecursiveList xmlns="https://foo/"><member xmlns="https://foo/"><NoRecurse xmlns="https://foo/">foo</NoRecurse></member><member xmlns="https://foo/"><RecursiveStruct xmlns="https://foo/"><NoRecurse xmlns="https://foo/">bar</NoRecurse></RecursiveStruct></member></RecursiveList></RecursiveStruct></OperationRequest>`, util.Trim(string(body)), InputService22TestShapeInputService22TestCaseOperation6Input{})
|
||||||
|
|
||||||
@ -5797,10 +5918,14 @@ func TestInputService22ProtocolTestRecursiveShapesCase6(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restxml.Build(req)
|
restxml.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body := util.SortXML(r.Body)
|
body := util.SortXML(r.Body)
|
||||||
awstesting.AssertXML(t, `<OperationRequest xmlns="https://foo/"><RecursiveStruct xmlns="https://foo/"><RecursiveMap xmlns="https://foo/"><entry xmlns="https://foo/"><key xmlns="https://foo/">foo</key><value xmlns="https://foo/"><NoRecurse xmlns="https://foo/">foo</NoRecurse></value></entry><entry xmlns="https://foo/"><key xmlns="https://foo/">bar</key><value xmlns="https://foo/"><NoRecurse xmlns="https://foo/">bar</NoRecurse></value></entry></RecursiveMap></RecursiveStruct></OperationRequest>`, util.Trim(string(body)), InputService22TestShapeInputService22TestCaseOperation6Input{})
|
awstesting.AssertXML(t, `<OperationRequest xmlns="https://foo/"><RecursiveStruct xmlns="https://foo/"><RecursiveMap xmlns="https://foo/"><entry xmlns="https://foo/"><key xmlns="https://foo/">foo</key><value xmlns="https://foo/"><NoRecurse xmlns="https://foo/">foo</NoRecurse></value></entry><entry xmlns="https://foo/"><key xmlns="https://foo/">bar</key><value xmlns="https://foo/"><NoRecurse xmlns="https://foo/">bar</NoRecurse></value></entry></RecursiveMap></RecursiveStruct></OperationRequest>`, util.Trim(string(body)), InputService22TestShapeInputService22TestCaseOperation6Input{})
|
||||||
|
|
||||||
@ -5821,13 +5946,17 @@ func TestInputService23ProtocolTestTimestampInHeaderCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restxml.Build(req)
|
restxml.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert URL
|
// assert URL
|
||||||
awstesting.AssertURL(t, "https://test/path", r.URL.String())
|
awstesting.AssertURL(t, "https://test/path", r.URL.String())
|
||||||
|
|
||||||
// assert headers
|
// assert headers
|
||||||
assert.Equal(t, "Sun, 25 Jan 2015 08:00:00 GMT", r.Header.Get("x-amz-timearg"))
|
if e, a := "Sun, 25 Jan 2015 08:00:00 GMT", r.Header.Get("x-amz-timearg"); e != a {
|
||||||
|
t.Errorf("expect %v to be %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -5841,10 +5970,14 @@ func TestInputService24ProtocolTestIdempotencyTokenAutoFillCase1(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restxml.Build(req)
|
restxml.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body := util.SortXML(r.Body)
|
body := util.SortXML(r.Body)
|
||||||
awstesting.AssertXML(t, `<InputShape><Token>abc123</Token></InputShape>`, util.Trim(string(body)), InputService24TestShapeInputService24TestCaseOperation2Input{})
|
awstesting.AssertXML(t, `<InputShape><Token>abc123</Token></InputShape>`, util.Trim(string(body)), InputService24TestShapeInputService24TestCaseOperation2Input{})
|
||||||
|
|
||||||
@ -5863,10 +5996,14 @@ func TestInputService24ProtocolTestIdempotencyTokenAutoFillCase2(t *testing.T) {
|
|||||||
|
|
||||||
// build request
|
// build request
|
||||||
restxml.Build(req)
|
restxml.Build(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect no error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert body
|
// assert body
|
||||||
assert.NotNil(t, r.Body)
|
if r.Body == nil {
|
||||||
|
t.Errorf("expect body not to be nil")
|
||||||
|
}
|
||||||
body := util.SortXML(r.Body)
|
body := util.SortXML(r.Body)
|
||||||
awstesting.AssertXML(t, `<InputShape><Token>00000000-0000-4000-8000-000000000000</Token></InputShape>`, util.Trim(string(body)), InputService24TestShapeInputService24TestCaseOperation2Input{})
|
awstesting.AssertXML(t, `<InputShape><Token>00000000-0000-4000-8000-000000000000</Token></InputShape>`, util.Trim(string(body)), InputService24TestShapeInputService24TestCaseOperation2Input{})
|
||||||
|
|
||||||
|
297
vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/unmarshal_test.go
generated
vendored
297
vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/unmarshal_test.go
generated
vendored
@ -24,7 +24,6 @@ import (
|
|||||||
"github.com/aws/aws-sdk-go/private/protocol/restxml"
|
"github.com/aws/aws-sdk-go/private/protocol/restxml"
|
||||||
"github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
|
"github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
|
||||||
"github.com/aws/aws-sdk-go/private/util"
|
"github.com/aws/aws-sdk-go/private/util"
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ bytes.Buffer // always import bytes
|
var _ bytes.Buffer // always import bytes
|
||||||
@ -2028,21 +2027,47 @@ func TestOutputService1ProtocolTestScalarMembersCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
restxml.UnmarshalMeta(req)
|
restxml.UnmarshalMeta(req)
|
||||||
restxml.Unmarshal(req)
|
restxml.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "a", *out.Char)
|
t.Errorf("expect not to be nil")
|
||||||
assert.Equal(t, 1.3, *out.Double)
|
}
|
||||||
assert.Equal(t, false, *out.FalseBool)
|
if e, a := "a", *out.Char; e != a {
|
||||||
assert.Equal(t, 1.2, *out.Float)
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
assert.Equal(t, "test", *out.ImaHeader)
|
}
|
||||||
assert.Equal(t, "abc", *out.ImaHeaderLocation)
|
if e, a := 1.3, *out.Double; e != a {
|
||||||
assert.Equal(t, int64(200), *out.Long)
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
assert.Equal(t, int64(123), *out.Num)
|
}
|
||||||
assert.Equal(t, "myname", *out.Str)
|
if e, a := false, *out.FalseBool; e != a {
|
||||||
assert.Equal(t, time.Unix(1.4221728e+09, 0).UTC().String(), out.Timestamp.String())
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
assert.Equal(t, true, *out.TrueBool)
|
}
|
||||||
|
if e, a := 1.2, *out.Float; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "test", *out.ImaHeader; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "abc", *out.ImaHeaderLocation; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := int64(200), *out.Long; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := int64(123), *out.Num; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "myname", *out.Str; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := time.Unix(1.4221728e+09, 0).UTC().String(), out.Timestamp.String(); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := true, *out.TrueBool; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2060,21 +2085,47 @@ func TestOutputService1ProtocolTestScalarMembersCase2(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
restxml.UnmarshalMeta(req)
|
restxml.UnmarshalMeta(req)
|
||||||
restxml.Unmarshal(req)
|
restxml.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "a", *out.Char)
|
t.Errorf("expect not to be nil")
|
||||||
assert.Equal(t, 1.3, *out.Double)
|
}
|
||||||
assert.Equal(t, false, *out.FalseBool)
|
if e, a := "a", *out.Char; e != a {
|
||||||
assert.Equal(t, 1.2, *out.Float)
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
assert.Equal(t, "test", *out.ImaHeader)
|
}
|
||||||
assert.Equal(t, "abc", *out.ImaHeaderLocation)
|
if e, a := 1.3, *out.Double; e != a {
|
||||||
assert.Equal(t, int64(200), *out.Long)
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
assert.Equal(t, int64(123), *out.Num)
|
}
|
||||||
assert.Equal(t, "", *out.Str)
|
if e, a := false, *out.FalseBool; e != a {
|
||||||
assert.Equal(t, time.Unix(1.4221728e+09, 0).UTC().String(), out.Timestamp.String())
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
assert.Equal(t, true, *out.TrueBool)
|
}
|
||||||
|
if e, a := 1.2, *out.Float; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "test", *out.ImaHeader; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "abc", *out.ImaHeaderLocation; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := int64(200), *out.Long; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := int64(123), *out.Num; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "", *out.Str; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := time.Unix(1.4221728e+09, 0).UTC().String(), out.Timestamp.String(); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := true, *out.TrueBool; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2090,11 +2141,17 @@ func TestOutputService2ProtocolTestBlobCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
restxml.UnmarshalMeta(req)
|
restxml.UnmarshalMeta(req)
|
||||||
restxml.Unmarshal(req)
|
restxml.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "value", string(out.Blob))
|
t.Errorf("expect not to be nil")
|
||||||
|
}
|
||||||
|
if e, a := "value", string(out.Blob); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2110,12 +2167,20 @@ func TestOutputService3ProtocolTestListsCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
restxml.UnmarshalMeta(req)
|
restxml.UnmarshalMeta(req)
|
||||||
restxml.Unmarshal(req)
|
restxml.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "abc", *out.ListMember[0])
|
t.Errorf("expect not to be nil")
|
||||||
assert.Equal(t, "123", *out.ListMember[1])
|
}
|
||||||
|
if e, a := "abc", *out.ListMember[0]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "123", *out.ListMember[1]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2131,12 +2196,20 @@ func TestOutputService4ProtocolTestListWithCustomMemberNameCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
restxml.UnmarshalMeta(req)
|
restxml.UnmarshalMeta(req)
|
||||||
restxml.Unmarshal(req)
|
restxml.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "abc", *out.ListMember[0])
|
t.Errorf("expect not to be nil")
|
||||||
assert.Equal(t, "123", *out.ListMember[1])
|
}
|
||||||
|
if e, a := "abc", *out.ListMember[0]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "123", *out.ListMember[1]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2152,12 +2225,20 @@ func TestOutputService5ProtocolTestFlattenedListCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
restxml.UnmarshalMeta(req)
|
restxml.UnmarshalMeta(req)
|
||||||
restxml.Unmarshal(req)
|
restxml.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "abc", *out.ListMember[0])
|
t.Errorf("expect not to be nil")
|
||||||
assert.Equal(t, "123", *out.ListMember[1])
|
}
|
||||||
|
if e, a := "abc", *out.ListMember[0]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "123", *out.ListMember[1]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2173,12 +2254,20 @@ func TestOutputService6ProtocolTestNormalMapCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
restxml.UnmarshalMeta(req)
|
restxml.UnmarshalMeta(req)
|
||||||
restxml.Unmarshal(req)
|
restxml.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "bam", *out.Map["baz"].Foo)
|
t.Errorf("expect not to be nil")
|
||||||
assert.Equal(t, "bar", *out.Map["qux"].Foo)
|
}
|
||||||
|
if e, a := "bam", *out.Map["baz"].Foo; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "bar", *out.Map["qux"].Foo; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2194,12 +2283,20 @@ func TestOutputService7ProtocolTestFlattenedMapCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
restxml.UnmarshalMeta(req)
|
restxml.UnmarshalMeta(req)
|
||||||
restxml.Unmarshal(req)
|
restxml.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "bam", *out.Map["baz"])
|
t.Errorf("expect not to be nil")
|
||||||
assert.Equal(t, "bar", *out.Map["qux"])
|
}
|
||||||
|
if e, a := "bam", *out.Map["baz"]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "bar", *out.Map["qux"]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2215,12 +2312,20 @@ func TestOutputService8ProtocolTestNamedMapCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
restxml.UnmarshalMeta(req)
|
restxml.UnmarshalMeta(req)
|
||||||
restxml.Unmarshal(req)
|
restxml.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "bam", *out.Map["baz"])
|
t.Errorf("expect not to be nil")
|
||||||
assert.Equal(t, "bar", *out.Map["qux"])
|
}
|
||||||
|
if e, a := "bam", *out.Map["baz"]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "bar", *out.Map["qux"]; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2237,12 +2342,20 @@ func TestOutputService9ProtocolTestXMLPayloadCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
restxml.UnmarshalMeta(req)
|
restxml.UnmarshalMeta(req)
|
||||||
restxml.Unmarshal(req)
|
restxml.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "abc", *out.Data.Foo)
|
t.Errorf("expect not to be nil")
|
||||||
assert.Equal(t, "baz", *out.Header)
|
}
|
||||||
|
if e, a := "abc", *out.Data.Foo; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "baz", *out.Header; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2258,11 +2371,17 @@ func TestOutputService10ProtocolTestStreamingPayloadCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
restxml.UnmarshalMeta(req)
|
restxml.UnmarshalMeta(req)
|
||||||
restxml.Unmarshal(req)
|
restxml.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "abc", string(out.Stream))
|
t.Errorf("expect not to be nil")
|
||||||
|
}
|
||||||
|
if e, a := "abc", string(out.Stream); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2287,19 +2406,41 @@ func TestOutputService11ProtocolTestScalarMembersInHeadersCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
restxml.UnmarshalMeta(req)
|
restxml.UnmarshalMeta(req)
|
||||||
restxml.Unmarshal(req)
|
restxml.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "a", *out.Char)
|
t.Errorf("expect not to be nil")
|
||||||
assert.Equal(t, 1.5, *out.Double)
|
}
|
||||||
assert.Equal(t, false, *out.FalseBool)
|
if e, a := "a", *out.Char; e != a {
|
||||||
assert.Equal(t, 1.5, *out.Float)
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
assert.Equal(t, int64(1), *out.Integer)
|
}
|
||||||
assert.Equal(t, int64(100), *out.Long)
|
if e, a := 1.5, *out.Double; e != a {
|
||||||
assert.Equal(t, "string", *out.Str)
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
assert.Equal(t, time.Unix(1.4221728e+09, 0).UTC().String(), out.Timestamp.String())
|
}
|
||||||
assert.Equal(t, true, *out.TrueBool)
|
if e, a := false, *out.FalseBool; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := 1.5, *out.Float; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := int64(1), *out.Integer; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := int64(100), *out.Long; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := "string", *out.Str; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := time.Unix(1.4221728e+09, 0).UTC().String(), out.Timestamp.String(); e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
if e, a := true, *out.TrueBool; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2315,10 +2456,16 @@ func TestOutputService12ProtocolTestEmptyStringCase1(t *testing.T) {
|
|||||||
// unmarshal response
|
// unmarshal response
|
||||||
restxml.UnmarshalMeta(req)
|
restxml.UnmarshalMeta(req)
|
||||||
restxml.Unmarshal(req)
|
restxml.Unmarshal(req)
|
||||||
assert.NoError(t, req.Error)
|
if req.Error != nil {
|
||||||
|
t.Errorf("expect not error, got %v", req.Error)
|
||||||
|
}
|
||||||
|
|
||||||
// assert response
|
// assert response
|
||||||
assert.NotNil(t, out) // ensure out variable is used
|
if out == nil {
|
||||||
assert.Equal(t, "", *out.Foo)
|
t.Errorf("expect not to be nil")
|
||||||
|
}
|
||||||
|
if e, a := "", *out.Foo; e != a {
|
||||||
|
t.Errorf("expect %v, got %v", e, a)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
1861
vendor/github.com/aws/aws-sdk-go/service/appstream/api.go
generated
vendored
1861
vendor/github.com/aws/aws-sdk-go/service/appstream/api.go
generated
vendored
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user