diff --git a/samples/dynamo-db-export-as-csv/readme.md b/samples/dynamo-db-export-as-csv/readme.md index bdc44b6..88299ae 100644 --- a/samples/dynamo-db-export-as-csv/readme.md +++ b/samples/dynamo-db-export-as-csv/readme.md @@ -5,6 +5,6 @@ Steps to run the pipeline using the cli. 1) aws datapipeline create-pipeline --name ddb-backup --unique-id some-unique-id => Returns a pipeline-id -2) aws datapipeline put-pipeline-definition --pipeline-id --pipeline-definition file:///home/user/ddb-to-csv.json +2) aws datapipeline put-pipeline-definition --pipeline-id <pipeline-id> --pipeline-definition file:///home/user/ddb-to-csv.json -3) aws datapipeline activate-pipeline --pipeline-id +3) aws datapipeline activate-pipeline --pipeline-id <pipeline-id> diff --git a/samples/dynamo-db-export/DynamoDB-export.json b/samples/dynamo-db-export/DynamoDB-export.json index 6624eb5..b6a53bb 100644 --- a/samples/dynamo-db-export/DynamoDB-export.json +++ b/samples/dynamo-db-export/DynamoDB-export.json @@ -40,9 +40,9 @@ "masterInstanceType": "m1.medium", "coreInstanceType": "#{myInstanceType}", "coreInstanceCount": "#{myInstanceCount}", - "region" : "#{myRegion}", - "terminateAfter" : "12 hours", - "keyPair" : "ramsug-test-desktop" + "region" : "#{myRegion}", + "terminateAfter" : "12 hours", + "keyPair" : "#{myKeyPair}" } ], "parameters": [ @@ -71,15 +71,21 @@ }, { "description": "Instance Count", - "watermark" : " (IOPS / 300) for m1.medium.(IOPS / 1500) for m3.xlarge", + "watermark" : " (IOPS / 300) for m1.medium.(IOPS / 1500) for m3.xlarge", "id": "myInstanceCount", "type": "Integer" }, - { - "description" : "Region", - "watermark" : "Region of DynamoDB Table/EMR cluster", + { + "description" : "Region", + "watermark" : "Region of DynamoDB Table/EMR cluster", "id" : "myRegion", "type" : "String" - } + }, + { + "description" : "KeyPair", + "watermark" : "KeyPair for EC2 instances", + "id" : "myKeyPair", + "type" : "String" + } ] } diff --git a/samples/dynamo-db-export/example-parameters.json b/samples/dynamo-db-export/example-parameters.json index a32388b..65934fd 100644 --- a/samples/dynamo-db-export/example-parameters.json +++ b/samples/dynamo-db-export/example-parameters.json @@ -6,6 +6,7 @@ "myDDBTableName" : "dynamo-table-name", "myInstanceType" : "m1.medium", "myInstanceCount" : "1", - "myRegion" : "eu-west-1" + "myRegion" : "eu-west-1", + "myKeyPair" : "key-for-ddb-backup" } } diff --git a/samples/dynamo-db-export/readme.md b/samples/dynamo-db-export/readme.md index e0b6e9c..157ed76 100644 --- a/samples/dynamo-db-export/readme.md +++ b/samples/dynamo-db-export/readme.md @@ -2,9 +2,11 @@ This pipeline exports data from a Dynamo DB Table to a S3 location using an EMR Steps to run the pipeline using the cli. -1) aws datapipeline create-pipeline --name ddb-backup --unique-id some-unique-id +1) aws ec2 create-key-pair --key-name key-for-ddb-backup + +2) aws datapipeline create-pipeline --name ddb-backup --unique-id some-unique-id => Returns a pipeline-id -2) aws datapipeline put-pipeline-definition --pipeline-id --pipeline-definition file:///home/user/DynamoDB-export.json --parameter-values-uri file:///home/user/example-parameters.json +3) aws datapipeline put-pipeline-definition --pipeline-id <pipeline-id> --pipeline-definition file:///home/user/DynamoDB-export.json --parameter-values-uri file:///home/user/example-parameters.json -3) aws datapipeline activate-pipeline --pipeline-id +4) aws datapipeline activate-pipeline --pipeline-id <pipeline-id> diff --git a/samples/dynamo-db-to-redshift/readme.md b/samples/dynamo-db-to-redshift/readme.md index 79f5e2e..8d775ed 100644 --- a/samples/dynamo-db-to-redshift/readme.md +++ b/samples/dynamo-db-to-redshift/readme.md @@ -4,6 +4,6 @@ Steps to run the pipeline using the cli. 1) aws datapipeline create-pipeline --name ddb-backup --unique-id some-unique-id => Returns a pipeline-id -2) aws datapipeline put-pipeline-definition --pipeline-id --pipeline-definition file:///home/user/dynamo-db-to-redshift.json +2) aws datapipeline put-pipeline-definition --pipeline-id <pipeline-id> --pipeline-definition file:///home/user/dynamo-db-to-redshift.json -3) aws datapipeline activate-pipeline --pipeline-id +3) aws datapipeline activate-pipeline --pipeline-id <pipeline-id> diff --git a/samples/rds-to-rds-copy/readme.md b/samples/rds-to-rds-copy/readme.md index b336cda..d236c7a 100644 --- a/samples/rds-to-rds-copy/readme.md +++ b/samples/rds-to-rds-copy/readme.md @@ -7,6 +7,6 @@ Steps to run the pipeline using the cli. 1) aws datapipeline create-pipeline --name ddb-backup --unique-id some-unique-id => Returns a pipeline-id -2) aws datapipeline put-pipeline-definition --pipeline-id --pipeline-definition file:///home/user/rds-to-rds-copy.json +2) aws datapipeline put-pipeline-definition --pipeline-id <pipeline-id> --pipeline-definition file:///home/user/rds-to-rds-copy.json -3) aws datapipeline activate-pipeline --pipeline-id +3) aws datapipeline activate-pipeline --pipeline-id <pipeline-id>