DB間csvインポート

 

(8.0.29)
show variables like 'secure_file_priv';

drop table tab1;
create table tab1(col1 numeric(10,2), col2 varchar(1), col3 varchar(10), col4 datetime(3) );

insert into tab1 values(1.01, 'あ', '1234567890', now(3) );
insert into tab1 values(1.02, ',', '', now(3) );
insert into tab1 values(1.03, 'X', NULL, now(3) );

select * from tab1;

-- MySQL,Oracle,PostgreSQLcsvファイル作成

select * into outfile '/tmp/a.csv'
fields terminated by ',' optionally enclosed by '"'
from tab1;

-- SQL Servercsvファイル作成
select * into outfile '/tmp/b.csv'
fields terminated by '|'
from tab1;

yum -y install nkf
nkf -s -Lw b.csv > c.csv

 

-- ①MySQL -> MySQL

show variables like 'secure_file_priv';


drop table tab1;
create table tab1(col1 numeric(10,2), col2 varchar(1), col3 varchar(10), col4 datetime(3) );

load data infile '/tmp/a.csv' into table tab1
fields terminated by ',' optionally enclosed by '"'
;

select * from tab1;

-- ②MySQL -> Oracle

drop table tab1 purge;
create table tab1(col1 number(10,2), col2 varchar2(3), col3 varchar2(10), col4 timestamp(3) );

vim a.ctl

load data
infile '/tmp/a.csv'  "str '\n'"
into table tab1
insert
fields terminated by ','
optionally enclosed by '"'
NULLIF = "\\N"
trailing nullcols
( col1
 ,col2 char(3)
 ,col3 char(10)
 ,col4 timestamp "YYYY/MM/DD HH24:MI:SS.FF3"
)

sqlldr test/test@pdb1 control=a.ctl log=a.log

alter session set nls_timestamp_format = 'YYYY/MM/DD HH24:MI:SS.FF3';
select * from tab1;


-- ③MySQL -> PostgreSQL

drop table tab1;
create table tab1(col1 numeric(10,2), col2 varchar(1), col3 varchar(10), col4 timestamp(3) );

\copy tab1 from '/tmp/a.csv' with (format csv, null '\N');

\pset null '<<null>>'

select * from tab1;


-- ④MySQL -> SQL Server


drop table tab1;
create table tab1(col1 numeric(10,2) , col2 nvarchar(1), col3 varchar(10), col4 datetime2(3) );

bcp test.dbo.tab1 in "C:\c.csv"  -T -c -t"|"


select * from tab1;

-- 空文字とヌルの修正
update tab1 set col3 = '' where col3 is null;
update tab1 set col3 = null where col3 = '\N';

 

(19c)

drop table tab1 purge;
create table tab1(col1 number(10,2), col2 varchar2(3), col3 varchar2(10), col4 timestamp(3) );

alter session set nls_timestamp_format = 'YYYY/MM/DD HH24:MI:SS.FF3';

insert into tab1 values(1.01, 'あ', '1234567890', systimestamp);
insert into tab1 values(1.02, ',', '', systimestamp);
insert into tab1 values(1.03, 'X', NULL, systimestamp);

commit;
select * from tab1;

-- MySQL,Oracle,PostgreSQLcsvファイル作成

sqlplus -S test/test@pdb1 <<EOF
alter session set nls_timestamp_format = 'YYYY/MM/DD HH24:MI:SS.FF3';
set head off
set markup csv on
set trimspool off
set echo off
spool a.csv
select * from tab1;
spool off
EOF

cat a.csv
sed -i '/^$/d' a.csv
cat a.csv


-- SQL Servercsvファイル作成

sqlplus -S test/test@pdb1 <<EOF
alter session set nls_timestamp_format = 'YYYY-MM-DD HH24:MI:SS.FF3';
set head off
set markup csv on delimiter | quote off
set trimspool off
set echo off
spool b.csv
select * from tab1;
spool off
EOF

cat b.csv
sed -i '/^$/d' b.csv
cat b.csv

sudo yum install -y nkf

nkf -s -Lw b.csv > c.csv


-- ⑤Oracle -> MySQL

show variables like 'secure_file_priv';

vim /etc/my.cnf
secure_file_priv = ''

systemctl restart mysqld


drop table tab1;
create table tab1(col1 numeric(10,2), col2 varchar(1), col3 varchar(10), col4 datetime(3) );

load data infile '/tmp/a.csv' into table tab1
fields terminated by ',' optionally enclosed by '"'
;

select * from tab1;

ヌルは空文字に移行される


-- ⑥Oracle -> Oracle

drop table tab1 purge;
create table tab1(col1 number(10,2), col2 varchar2(3), col3 varchar2(10), col4 timestamp(3) );

vim a.ctl

load data
infile 'a.csv'  "str '\n'"
into table tab1
insert
fields terminated by ','
optionally enclosed by '"'
trailing nullcols
( col1 
 ,col2 char(3)
 ,col3 char(10)
 ,col4 timestamp "YYYY/MM/DD HH24:MI:SS.FF3"
)

sqlldr test/test@pdb1 control=a.ctl log=a.log

alter session set nls_timestamp_format = 'YYYY/MM/DD HH24:MI:SS.FF3';
select * from tab1;

-- ⑦Oracle -> PostgreSQL

drop table tab1;
create table tab1(col1 numeric(10,2), col2 varchar(1), col3 varchar(10), col4 timestamp(3) );

\copy tab1 from 'a.csv' with (format csv, null '');

\pset null '<<null>>'

select * from tab1;

ヌルはヌルに移行される


-- ⑧Oracle -> SQL Server


drop table tab1;
create table tab1(col1 numeric(10,2) , col2 nvarchar(1), col3 varchar(10), col4 datetime2(3) );

bcp test.dbo.tab1 in "C:\c.csv"  -T -c -t"|"


select * from tab1;

ヌルはヌルに移行される

 

(14)

drop table tab1;
create table tab1(col1 numeric(10,2), col2 varchar(1), col3 varchar(10), col4 timestamp(3) );


insert into tab1 values(1.01, 'あ', '1234567890', clock_timestamp() );
insert into tab1 values(1.02, ',', '', clock_timestamp() );
insert into tab1 values(1.03, 'X', NULL, clock_timestamp() );

\pset null '<<null>>'

select * from tab1;

-- Oracle,PostgreSQLcsvファイル作成

\copy tab1 to '/tmp/a.csv' with ( format csv,  null '', quote '"' );


-- MySQLcsvファイル作成

\copy tab1 to '/tmp/b.csv' with ( format csv,  null "\N", quote '"' );


-- SQL Servercsvファイル作成

\copy tab1 to '/tmp/c.csv' with ( format csv,  null '', delimiter "|" );

cat c.csv

sudo dnf -y --enablerepo=powertools install nkf

nkf -s -Lw c.csv > d.csv


-- ⑨PostgreSQL -> MySQL

show variables like 'secure_file_priv';


drop table tab1;
create table tab1(col1 numeric(10,2), col2 varchar(1), col3 varchar(10), col4 datetime(3) );

load data infile '/tmp/b.csv' into table tab1
fields terminated by ',' optionally enclosed by '"'
;

select * from tab1;

-- ⑩PostgreSQL -> Oracle

drop table tab1 purge;
create table tab1(col1 number(10,2), col2 varchar2(3), col3 varchar2(10), col4 timestamp(3) );

vim a.ctl

load data
infile '/tmp/a.csv'  "str '\n'"
into table tab1
insert
fields terminated by ','
optionally enclosed by '"'
trailing nullcols
( col1 
 ,col2 char(3)
 ,col3 char(10)
 ,col4 timestamp "YYYY-MM-DD HH24:MI:SS.FF3"
)

sqlldr test/test@pdb1 control=a.ctl log=a.log

alter session set nls_timestamp_format = 'YYYY/MM/DD HH24:MI:SS.FF3';
select * from tab1;


-- ⑪PostgreSQL -> PostgreSQL


drop table tab1;
create table tab1(col1 numeric(10,2), col2 varchar(1), col3 varchar(10), col4 timestamp(3) );

\copy tab1 from '/tmp/a.csv' with ( format csv,  null '', quote '"' );

\pset null '<<null>>'

select * from tab1;

 

-- ⑫PostgreSQL -> SQL Server

drop table tab1;
create table tab1(col1 numeric(10,2) , col2 nvarchar(1), col3 varchar(10), col4 datetime2(3) );

bcp test.dbo.tab1 in "C:\d.csv"  -T -c -t"|"


select * from tab1;

-- 空文字の修正
update tab1 set col3 = '' where col3 = '""';

 

(2019)

drop table tab1;
create table tab1(col1 numeric(10,2) , col2 nvarchar(1), col3 varchar(10), col4 datetime2(3) );


insert into tab1 values(1.01, 'あ', '1234567890', getdate() );
insert into tab1 values(1.02, ',', '', getdate() );
insert into tab1 values(1.03, 'X', NULL, getdate() );

select * from tab1;


bcp test.dbo.tab1 out "C:\a.csv"  -T -c -t"|"


-- ⑬SQL Server -> MySQL

nkf -w -Lu a.csv > b.csv

SQL Serverbcpで出力した空文字には\0がセットされている

od -c b.csv
grep -Pa "\x00" b.csv

cat b.csv | sed 's/^|/\\N|/g' | sed 's/||/|\\N|/g'  | sed 's/|$/|\\N/g'  > c.csv
sed -i 's/\x00//g' c.csv

 

show variables like 'secure_file_priv';

drop table tab1;
create table tab1(col1 numeric(10,2), col2 varchar(1), col3 varchar(10), col4 datetime(3) );

load data infile '/tmp/c.csv' into table tab1
fields terminated by '|' optionally enclosed by '"'
;

select * from tab1;


-- ⑭SQL Server -> Oracle

nkf -w -Lu a.csv > b.csv

SQL Serverbcpで出力した空文字には\0がセットされている

od -c b.csv
grep -Pa "\x00" b.csv

sed -i 's/\x00/""/g' b.csv


drop table tab1 purge;
create table tab1(col1 number(10,2), col2 varchar2(3), col3 varchar2(10), col4 timestamp(3) );

vim a.ctl

load data
infile '/tmp/b.csv'  "str '\n'"
into table tab1
insert
fields terminated by '|'
optionally enclosed by '"'
trailing nullcols
( col1 
 ,col2 char(3)
 ,col3 char(10)
 ,col4 timestamp "YYYY-MM-DD HH24:MI:SS.FF3"
)

sqlldr test/test@pdb1 control=a.ctl log=a.log

alter session set nls_timestamp_format = 'YYYY/MM/DD HH24:MI:SS.FF3';
select * from tab1;

 

-- ⑮SQL Server -> PostgreSQL

nkf -w -Lu a.csv > b.csv

SQL Serverbcpで出力した空文字には\0がセットされている

od -c b.csv
grep -Pa "\x00" b.csv

sed -i 's/\x00/""/g' b.csv


drop table tab1;
create table tab1(col1 numeric(10,2), col2 varchar(1), col3 varchar(10), col4 timestamp(3) );

\copy tab1 from '/tmp/b.csv' with ( format csv,  null '', quote '"' , delimiter '|');

\pset null '<<null>>'

select * from tab1;

 


-- ⑯SQL Server -> SQL Server

drop table tab1;
create table tab1(col1 numeric(10,2) , col2 nvarchar(1), col3 varchar(10), col4 datetime2(3) );

bcp test.dbo.tab1 in "C:\a.csv"  -T -c -t"|"

select * from tab1;

 

{SageMaker}Amazon SageMaker を使用して

 

https://dev.classmethod.jp/articles/a-10-minute-tutorial-from-building-to-deploying-a-machine-learning-model-with-amazon-sagemaker/

https://aws.amazon.com/jp/getting-started/hands-on/build-train-deploy-machine-learning-model-sagemaker/

 

-- 1. コマンド等のインストール

-- 1.1 aws cli version 2 インストール

curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
unzip awscliv2.zip
sudo ./aws/install
aws --version


-- 1.2 jqインストール
sudo yum -y install jq

-- 1.3 ライブラリインストール

pip3 install boto3
pip3 install sagemaker
pip3 install matplotlib
pip3 install IPython


-- 2. S3 バケットを作成する

aws s3 mb s3://bucket123

aws s3 ls

-- 3. ロールの作成

-- 3.1 ポリシーの作成


vim policy01.json

{
    "Version": "2012-10-17",
    "Statement": [
        {
            "Effect": "Allow",
            "Action": [
                "s3:GetObject",
                "s3:PutObject",
                "s3:DeleteObject",
                "s3:ListBucket"
            ],
            "Resource": [
                "arn:aws:s3:::*"
            ]
        }
    ]
}


aws iam create-policy \
--policy-name policy01 \
--policy-document file://policy01.json


-- 3.2 ロールの作成

vim role01.json

{
    "Version": "2012-10-17",
    "Statement": [
        {
            "Effect": "Allow",
            "Principal": {
                "Service": "sagemaker.amazonaws.com"
            },
            "Action": "sts:AssumeRole"
        }
    ]
}

 

aws iam create-role \
--role-name role01 \
--assume-role-policy-document file://role01.json

-- 3.3 ポリシーをロールにアタッチ

aws iam attach-role-policy --policy-arn arn:aws:iam::999999999999:policy/policy01 --role-name role01
aws iam attach-role-policy --policy-arn arn:aws:iam::aws:policy/AmazonSageMakerFullAccess --role-name role01

 


-- 4. データの準備
-- 4.1 ライブラリインポート

python3


# import libraries
import boto3, re, sys, math, json, os, sagemaker, urllib.request
from sagemaker import get_execution_role
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from IPython.display import Image
from IPython.display import display
from time import gmtime, strftime
from sagemaker.predictor import csv_serializer

# Define IAM role
#role = get_execution_role()
role = 'role01'

prefix = 'sagemaker/DEMO-xgboost-dm'
my_region = boto3.session.Session().region_name # set the region of the instance

# this line automatically looks for the XGBoost image URI and builds an XGBoost container.
xgboost_container = sagemaker.image_uris.retrieve("xgboost", my_region, "latest")

print("Success - the MySageMakerInstance is in the " + my_region + " region. You will use the " + xgboost_container + " container for your SageMaker endpoint.")

-- 4.2 S3バケット作成


bucket_name = 'bucket123' # <--- CHANGE THIS VARIABLE TO A UNIQUE NAME FOR YOUR BUCKET

#s3 = boto3.resource('s3')
#try:
#    if  my_region == 'us-east-1':
#      s3.create_bucket(Bucket=bucket_name)
#    else: 
#      s3.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={ 'LocationConstraint': my_region })
#    print('S3 bucket created successfully')
#except Exception as e:
#    print('S3 error: ',e)

-- 4.3 テストデータダウンロード


try:
  urllib.request.urlretrieve ("https://d1.awsstatic.com/tmt/build-train-deploy-machine-learning-model-sagemaker/bank_clean.27f01fbbdf43271788427f3682996ae29ceca05d.csv", "bank_clean.csv")
  print('Success: downloaded bank_clean.csv.')
except Exception as e:
  print('Data load error: ',e)

try:
  model_data = pd.read_csv('./bank_clean.csv',index_col=0)
  print('Success: Data loaded into dataframe.')
except Exception as e:
    print('Data load error: ',e)

-- 4.4 トレーニングデータとテストデータに分割

train_data, test_data = np.split(model_data.sample(frac=1, random_state=1729), [int(0.7 * len(model_data))])
print(train_data.shape, test_data.shape)

-- 5. ML モデルをトレーニングする

-- 5.1 トレーニングデータをS3にアップロード

pd.concat([train_data['y_yes'], train_data.drop(['y_no', 'y_yes'], axis=1)], axis=1).to_csv('train.csv', index=False, header=False)
boto3.Session().resource('s3').Bucket(bucket_name).Object(os.path.join(prefix, 'train/train.csv')).upload_file('train.csv')
s3_input_train = sagemaker.inputs.TrainingInput(s3_data='s3://{}/{}/train'.format(bucket_name, prefix), content_type='csv')

-- 5.2 XGBoost モデル (予測ツール) のインスタンスを作成

sess = sagemaker.Session()
xgb = sagemaker.estimator.Estimator(xgboost_container,role, instance_count=1, instance_type='ml.m4.xlarge',output_path='s3://{}/{}/output'.format(bucket_name, prefix),sagemaker_session=sess)
xgb.set_hyperparameters(max_depth=5,eta=0.2,gamma=4,min_child_weight=6,subsample=0.8,silent=0,objective='binary:logistic',num_round=100)

-- 5.3 トレーニングJobを開始

xgb.fit({'train': s3_input_train})

 

-- 6. モデルのデプロイ
-- 6.1 モデルのデプロイ(SageMaker エンドポイントを作成)

xgb_predictor = xgb.deploy(initial_instance_count=1,instance_type='ml.m4.xlarge')

-- 6.2 テストデータで予測を実施

from sagemaker.serializers import CSVSerializer

test_data_array = test_data.drop(['y_no', 'y_yes'], axis=1).values #load the data into an array
xgb_predictor.serializer = CSVSerializer() # set the serializer type
predictions = xgb_predictor.predict(test_data_array).decode('utf-8') # predict!
predictions_array = np.fromstring(predictions[1:], sep=',') # and turn the prediction into an array
print(predictions_array.shape)

-- 7. モデルの性能評価


cm = pd.crosstab(index=test_data['y_yes'], columns=np.round(predictions_array), rownames=['Observed'], colnames=['Predicted'])
tn = cm.iloc[0,0]; fn = cm.iloc[1,0]; tp = cm.iloc[1,1]; fp = cm.iloc[0,1]; p = (tp+tn)/(tp+tn+fp+fn)*100
print("\n{0:<20}{1:<4.1f}%\n".format("Overall Classification Rate: ", p))
print("{0:<15}{1:<15}{2:>8}".format("Predicted", "No Purchase", "Purchase"))
print("Observed")
print("{0:<15}{1:<2.0f}% ({2:<}){3:>6.0f}% ({4:<})".format("No Purchase", tn/(tn+fn)*100,tn, fp/(tp+fp)*100, fp))
print("{0:<16}{1:<1.0f}% ({2:<}){3:>7.0f}% ({4:<}) \n".format("Purchase", fn/(tn+fn)*100,fn, tp/(tp+fp)*100, tp))


-- 8. クリーンアップ


-- SageMakerエンドポイント削除
xgb_predictor.delete_endpoint(delete_endpoint_config=True)

 

Ctrl-D


-- ロールの削除
aws iam list-roles | grep role01

aws iam detach-role-policy \
--role-name role01 \
--policy-arn arn:aws:iam::999999999999:policy/policy01

aws iam detach-role-policy \
--role-name role01 \
--policy-arn arn:aws:iam::aws:policy/AmazonSageMakerFullAccess

aws iam delete-role --role-name role01

-- ポリシーの削除
aws iam list-policies | grep policy01

aws iam delete-policy \
--policy-arn arn:aws:iam::999999999999:policy/policy01

 

-- S3バケット削除

aws s3 ls

aws s3 rb s3://bucket123 --force


-- モデル削除

aws sagemaker list-models

aws sagemaker delete-model \
--model-name xgboost-2022-09-11-05-44-54-123

 

 

{DataSync}AWS DataSync での AWS Command Line Interface の使用


https://docs.aws.amazon.com/ja_jp/datasync/latest/userguide/using-cli.html

AWS DataSyncは、ストレージシステム間、サービス間でのデータの移動を簡素化、自動化、および高速化するオンラインデータ転送サービスです。


東京リージョンS3 ---> 大阪リージョンS3

 

-- 1. コマンド等のインストール

-- 1.1 aws cli version 2 インストール

curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
unzip awscliv2.zip
sudo ./aws/install
aws --version


-- 1.2 jqインストール
sudo yum -y install jq

 


-- 2. 送信元S3 バケットを作成する

aws s3 mb s3://bucket123src

aws s3 ls


-- 3. 送信先S3 バケットを作成する
export AWS_DEFAULT_REGION=ap-northeast-3

aws s3 mb s3://bucket123dst
aws s3 ls

export AWS_DEFAULT_REGION=ap-northeast-1


-- 4. ロールの作成(送信元S3用)

-- 4.1 ポリシーの作成


vim policy01.json

{
    "Version": "2012-10-17",
    "Statement": [
        {
            "Action": [
                "s3:GetBucketLocation",
                "s3:ListBucket",
                "s3:ListBucketMultipartUploads"
            ],
            "Effect": "Allow",
            "Resource": "arn:aws:s3:::bucket123src"
        },
        {
            "Action": [
                "s3:AbortMultipartUpload",
                "s3:DeleteObject",
                "s3:GetObject",
                "s3:ListMultipartUploadParts",
                "s3:PutObjectTagging",
                "s3:GetObjectTagging",
                "s3:PutObject"
            ],
            "Effect": "Allow",
            "Resource": "arn:aws:s3:::bucket123src/*"
        }
    ]
}


aws iam create-policy \
--policy-name policy01 \
--policy-document file://policy01.json


-- 4.2 ロールの作成

vim role01.json

{
  "Version": "2012-10-17",
  "Statement": [
    {
      "Effect": "Allow",
      "Principal": {
        "Service": "datasync.amazonaws.com"
      },
      "Action": "sts:AssumeRole"
    }
  ]
}

 

aws iam create-role \
--role-name role01 \
--assume-role-policy-document file://role01.json

-- 4.3 ポリシーをロールにアタッチ

aws iam attach-role-policy --policy-arn arn:aws:iam::999999999999:policy/policy01 --role-name role01


-- 5. ロールの作成(送信先S3用)

-- 5.1 ポリシーの作成


vim policy02.json

{
    "Version": "2012-10-17",
    "Statement": [
        {
            "Action": [
                "s3:GetBucketLocation",
                "s3:ListBucket",
                "s3:ListBucketMultipartUploads"
            ],
            "Effect": "Allow",
            "Resource": "arn:aws:s3:::bucket123dst"
        },
        {
            "Action": [
                "s3:AbortMultipartUpload",
                "s3:DeleteObject",
                "s3:GetObject",
                "s3:ListMultipartUploadParts",
                "s3:PutObjectTagging",
                "s3:GetObjectTagging",
                "s3:PutObject"
            ],
            "Effect": "Allow",
            "Resource": "arn:aws:s3:::bucket123dst/*"
        }
    ]
}


aws iam create-policy \
--policy-name policy02 \
--policy-document file://policy02.json


-- 5.2 ロールの作成

vim role02.json

{
  "Version": "2012-10-17",
  "Statement": [
    {
      "Effect": "Allow",
      "Principal": {
        "Service": "datasync.amazonaws.com"
      },
      "Action": "sts:AssumeRole"
    }
  ]
}

 

aws iam create-role \
--role-name role02 \
--assume-role-policy-document file://role02.json

-- 5.3 ポリシーをロールにアタッチ

aws iam attach-role-policy --policy-arn arn:aws:iam::999999999999:policy/policy02 --role-name role02

 

 


-- 6. 送信元S3ロケーションを作成する

aws datasync create-location-s3 \
--s3-bucket-arn 'arn:aws:s3:::bucket123src' \
--s3-storage-class 'STANDARD' \
--s3-config 'BucketAccessRoleArn=arn:aws:iam::999999999999:role/role01'

aws datasync list-locations

aws datasync describe-location-s3 \
--location-arn arn:aws:datasync:ap-northeast-1:999999999999:location/loc-11111111111111111


-- 7. 送信先S3ロケーションを作成する
export AWS_DEFAULT_REGION=ap-northeast-3

aws datasync create-location-s3 \
--s3-bucket-arn 'arn:aws:s3:::bucket123dst' \
--s3-storage-class 'STANDARD' \
--s3-config 'BucketAccessRoleArn=arn:aws:iam::999999999999:role/role02'

aws datasync list-locations

aws datasync describe-location-s3 \
--location-arn arn:aws:datasync:ap-northeast-3:999999999999:location/loc-22222222222222222

export AWS_DEFAULT_REGION=ap-northeast-1

 

-- 8. タスクの作成

-- 8.1 ロググループの作成

aws logs create-log-group --log-group-name lg01

aws logs describe-log-groups --log-group-name-prefix lg01

-- 8.2 ロググループリソースポリシーの設定

vim policy03.json

{
    "Statement": [
        {
            "Sid": "DataSyncLogsToCloudWatchLogs",
            "Effect": "Allow",
            "Action": [
                "logs:PutLogEvents",
                "logs:CreateLogStream"
            ],
            "Principal": {
                "Service": "datasync.amazonaws.com"
            },
            "Condition": {
                "ArnLike": {
                    "aws:SourceArn": [
                        "arn:aws:datasync:ap-northeast-1:999999999999:task/*"
                    ]
                },
                "StringEquals": {
                    "aws:SourceAccount": "999999999999"
                }
            },
            "Resource": "arn:aws:logs:ap-northeast-1:999999999999:log-group:*:*"
        }
    ],
    "Version": "2012-10-17"
}

aws logs put-resource-policy \
--policy-name policy03 \
--policy-document file://policy03.json

aws logs describe-resource-policies

 

-- 8.3 タスクの作成

aws datasync create-task \
--source-location-arn 'arn:aws:datasync:ap-northeast-1:999999999999:location/loc-11111111111111111' \
--destination-location-arn 'arn:aws:datasync:ap-northeast-3:999999999999:location/loc-22222222222222222' \
--cloud-watch-log-group-arn 'arn:aws:logs:ap-northeast-1:999999999999:log-group:lg01:*' \
--name task01 \
--options '{
  "VerifyMode": "NONE",
  "OverwriteMode": "ALWAYS",
  "Atime": "BEST_EFFORT",
  "Mtime": "PRESERVE",
  "PreserveDeletedFiles": "REMOVE",
  "PreserveDevices": "NONE",
  "TaskQueueing": "ENABLED",
  "LogLevel": "TRANSFER",
  "TransferMode": "CHANGED",
  "ObjectTags": "PRESERVE"
}'

 

 


aws datasync list-tasks

aws datasync describe-task \
--task-arn arn:aws:datasync:ap-northeast-1:999999999999:task/task-33333333333333333

-- 9. テスト用ファイルの作成

dd if=/dev/urandom of=20M.dmp bs=1M count=20


aws s3api put-object --bucket bucket123src --key test01.dmp --body 20M.dmp
aws s3api put-object --bucket bucket123src --key test02.dmp --body 20M.dmp
aws s3api put-object --bucket bucket123src --key test03.dmp --body 20M.dmp


aws s3 ls s3://bucket123src --recursive
aws s3 ls s3://bucket123dst --recursive

 

-- 10. タスクを開始する

aws datasync start-task-execution \
--task-arn arn:aws:datasync:ap-northeast-1:999999999999:task/task-33333333333333333

aws datasync list-task-executions

 

-- 11. タスクのモニタリング

aws datasync describe-task-execution \
--task-execution-arn arn:aws:datasync:ap-northeast-1:999999999999:task/task-33333333333333333/execution/exec-44444444444444444


aws s3 ls s3://bucket123src --recursive
aws s3 ls s3://bucket123dst --recursive

 

-- 12. 送信元ファイル変更時の挙動確認

dd if=/dev/urandom of=1M.dmp bs=1M count=1

aws s3api delete-object --bucket bucket123src --key test01.dmp
aws s3api put-object --bucket bucket123src --key test04.dmp --body 20M.dmp
aws s3api put-object --bucket bucket123src --key test03.dmp --body 1M.dmp

aws s3 ls s3://bucket123src --recursive
aws s3 ls s3://bucket123dst --recursive


aws datasync start-task-execution \
--task-arn arn:aws:datasync:ap-northeast-1:999999999999:task/task-33333333333333333

aws datasync list-task-executions

aws datasync describe-task-execution \
--task-execution-arn arn:aws:datasync:ap-northeast-1:999999999999:task/task-33333333333333333/execution/exec-55555555555555555


aws s3 ls s3://bucket123src --recursive
aws s3 ls s3://bucket123dst --recursive

 


-- 13. クリーンアップ

 

-- タスクの削除

aws datasync list-tasks

aws datasync delete-task \
--task-arn arn:aws:datasync:ap-northeast-1:999999999999:task/task-33333333333333333


-- ロググループリソースポリシーの削除
aws logs describe-resource-policies

aws logs delete-resource-policy \
--policy-name policy03


-- ロググループの削除
aws logs describe-log-groups --log-group-name-prefix lg01
aws logs delete-log-group --log-group-name lg01

 

 

-- 送信先S3ロケーションの削除
export AWS_DEFAULT_REGION=ap-northeast-3

aws datasync list-locations

aws datasync delete-location \
--location-arn arn:aws:datasync:ap-northeast-3:999999999999:location/loc-22222222222222222

export AWS_DEFAULT_REGION=ap-northeast-1

-- 送信元S3ロケーションの削除

aws datasync list-locations

aws datasync delete-location \
--location-arn arn:aws:datasync:ap-northeast-1:999999999999:location/loc-11111111111111111


-- ロールの削除(送信先S3用)
aws iam list-roles | grep role02

aws iam detach-role-policy \
--role-name role02 \
--policy-arn arn:aws:iam::999999999999:policy/policy02

aws iam delete-role --role-name role02


-- ポリシーの削除(送信先S3用)
aws iam list-policies | grep policy02

aws iam delete-policy \
--policy-arn arn:aws:iam::999999999999:policy/policy02

 


-- ロールの削除(送信元S3用)
aws iam list-roles | grep role01

aws iam detach-role-policy \
--role-name role01 \
--policy-arn arn:aws:iam::999999999999:policy/policy01

aws iam delete-role --role-name role01

-- ポリシーの削除(送信元S3用)
aws iam list-policies | grep policy01

aws iam delete-policy \
--policy-arn arn:aws:iam::999999999999:policy/policy01

 

-- 送信先S3バケットの削除
export AWS_DEFAULT_REGION=ap-northeast-3

aws s3 ls

aws s3 rb s3://bucket123dst --force

export AWS_DEFAULT_REGION=ap-northeast-1


-- 送信元S3バケットの削除
aws s3 ls

aws s3 rb s3://bucket123src --force

 

{Kinesis}Kinesis Video Streams の開始方法


https://docs.aws.amazon.com/ja_jp/kinesisvideostreams/latest/dg/getting-started.html

https://qiita.com/yh1224/items/596fa5d14175746c73d8


前提
作業インスタンスOS = Amazon Linux 2
作業インスタンスタイプ = t3.micro

 

-- 1. コマンド等のインストール

-- 1.1 aws cli version 2 インストール

curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
unzip awscliv2.zip
sudo ./aws/install
aws --version


-- 1.2 jqインストール
sudo yum -y install jq

-- 1.3 dockerインストール

sudo amazon-linux-extras install docker -y


sudo systemctl start docker
sudo systemctl enable docker
sudo systemctl status docker
sudo usermod -a -G docker ec2-user

 


-- 2. ビデオストリームの作成

aws kinesisvideo create-stream \
--stream-name vs01 \
--data-retention-in-hours 12

★保持時間を0にすると再生時にエラーとなる


aws kinesisvideo list-streams

aws kinesisvideo describe-stream \
--stream-name vs01

-- 3. Kinesis ビデオストリームにデータを送信する

sample.mp4をカレントディレクトリへアップロード

aws ecr get-login-password --region us-west-2 | docker login --username AWS --password-stdin 546150905175.dkr.ecr.us-west-2.amazonaws.com

docker pull 546150905175.dkr.ecr.us-west-2.amazonaws.com/kinesis-video-producer-sdk-cpp-amazon-linux:latest

docker run -it -v $(pwd):/work --network="host" \
546150905175.dkr.ecr.us-west-2.amazonaws.com/kinesis-video-producer-sdk-cpp-amazon-linux \
/bin/bash

 


vim a.sh


export LD_LIBRARY_PATH=/opt/awssdk/amazon-kinesis-video-streams-producer-sdk-cpp/kinesis-video-native-build/downloads/local/lib:$LD_LIBRARY_PATH
export PATH=/opt/awssdk/amazon-kinesis-video-streams-producer-sdk-cpp/kinesis-video-native-build/downloads/local/bin:$PATH
export GST_PLUGIN_PATH=/opt/awssdk/amazon-kinesis-video-streams-producer-sdk-cpp/kinesis-video-native-build/downloads/local/lib:$GST_PLUGIN_PATH


export AWS_ACCESS_KEY_ID=XXXXXXXXXXXXXXXXXXXX
export AWS_SECRET_ACCESS_KEY=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
export AWS_DEFAULT_REGION=ap-northeast-1

 

STREAM_NAME=vs01
MOVIE_FILE=/work/sample.mp4

while [ 1 ]
do
    gst-launch-1.0 \
        filesrc location="${MOVIE_FILE}" \
        ! qtdemux \
        ! queue \
        ! h264parse \
        ! video/x-h264,stream-format=avc,alignment=au \
        ! kvssink stream-name="${STREAM_NAME}" storage-size=512
done

 

chmod +x a.sh
./a.sh

 


コンソールから動画を確認


★エラー発生
StartTimestamp must be before the current time for LIVE_REPLAY PlaybackMode.

→戻るボタン押すと動画が表示された

 

-- 4. クリーンアップ

-- ビデオストリームの削除

aws kinesisvideo list-streams

aws kinesisvideo delete-stream \
--stream-arn arn:aws:kinesisvideo:ap-northeast-1:999999999999:stream/vs01/1111111111111

 

{Proton}AWS CLI の開始方法


https://docs.aws.amazon.com/ja_jp/proton/latest/userguide/ag-getting-started-cli.html

https://dev.classmethod.jp/articles/aws-proton-general-availability/


AWS Proton は、プラットフォームエンジニアがセルフサービスのデプロイと更新のために
インフラストラクチャテンプレートを使用してプロセスを拡張できる完全マネージド型サービスです。

 

-- 1. コマンド等のインストール

-- 1.1 aws cli version 2 インストール

curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
unzip awscliv2.zip
sudo ./aws/install
aws --version

 

-- 1.2 jqインストール
sudo yum -y install jq

-- 1.3 gitインストール
sudo yum -y install git

 


-- 2. ロールの作成

※マニュアルでは下記を区別しているが、相違がないので区別せずにロールを作成する
AWS Proton service role
AWS Proton pipeline service role

-- 2.1 ポリシーの作成


vim policy01.json

{
    "Version": "2012-10-17",
    "Statement": [
        {
            "Effect": "Allow",
            "Action": [
                "cloudformation:CancelUpdateStack",
                "cloudformation:ContinueUpdateRollback",
                "cloudformation:CreateChangeSet",
                "cloudformation:CreateStack",
                "cloudformation:DeleteChangeSet",
                "cloudformation:DeleteStack",
                "cloudformation:DescribeChangeSet",
                "cloudformation:DescribeStackDriftDetectionStatus",
                "cloudformation:DescribeStackEvents",
                "cloudformation:DescribeStackResourceDrifts",
                "cloudformation:DescribeStacks",
                "cloudformation:DetectStackResourceDrift",
                "cloudformation:ExecuteChangeSet",
                "cloudformation:ListChangeSets",
                "cloudformation:ListStackResources",
                "cloudformation:UpdateStack"
            ],
            "Resource": "arn:aws:cloudformation:*:999999999999:stack/AWSProton-*"
        },
        {
            "Effect": "Allow",
            "NotAction": [
                "organizations:*",
                "account:*"
            ],
            "Resource": "*",
            "Condition": {
                "ForAnyValue:StringEquals": {
                    "aws:CalledVia": ["cloudformation.amazonaws.com"]
                }
            }
        },
        {
            "Effect": "Allow",
            "Action": [
                "organizations:DescribeOrganization",
                "account:ListRegions"
            ],
            "Resource": "*",
            "Condition": {
                "ForAnyValue:StringEquals": {
                    "aws:CalledVia": ["cloudformation.amazonaws.com"]
                }
            }
        }
    ]
}

aws iam create-policy \
--policy-name policy01 \
--policy-document file://policy01.json


-- 2.2 ロールの作成

vim role01.json

{
    "Version": "2012-10-17",
    "Statement": {
        "Sid": "ServiceTrustRelationshipWithConfusedDeputyPrevention",
        "Effect": "Allow",
        "Principal": {"Service": "proton.amazonaws.com"},
        "Action": "sts:AssumeRole"
    }
}


★マニュアル記載のConditionがあるとエラーとなる


aws iam create-role \
--role-name role01 \
--assume-role-policy-document file://role01.json

-- 2.3 ポリシーをロールにアタッチ

aws iam attach-role-policy --policy-arn arn:aws:iam::999999999999:policy/policy01 --role-name role01

 


-- 3. CodeStar接続の作成

aws codestar-connections list-connections

aws codestar-connections create-connection \
--provider-type GitHub \
--connection-name con01

 

 

-- 4. 環境テンプレートを登録する

-- 4.1 下記リポジトリをフォークする

https://github.com/aws-samples/aws-proton-cloudformation-sample-templates/

 


-- 4.2 リポジトリリンクを作成する

aws proton create-repository \
--name username/aws-proton-cloudformation-sample-templates \
--connection-arn "arn:aws:codestar-connections:ap-northeast-1:999999999999:connection/11111111-1111-1111-1111-111111111111" \
--provider "GITHUB"

aws proton list-repositories

aws proton get-repository \
--name username/aws-proton-cloudformation-sample-templates \
--provider "GITHUB"

 

コンソールを見るとステータスが保留中となっている。
→「保留中の接続を更新」から指示通りに設定


-- 4.3 アカウント設定を構成


aws proton update-account-settings \
--pipeline-provisioning-repository '{
            "branch": "main",
            "name": "username/aws-proton-cloudformation-sample-templates",
            "provider": "GITHUB"
        }' \
--pipeline-service-role-arn arn:aws:iam::999999999999:role/role01


aws proton get-account-settings

 


-- 4.4 環境テンプレートを作成する


aws proton create-environment-template \
--name fargate-env \
--display-name fargate-env \
--description "fargate-env"

aws proton list-environment-templates


-- 4.5 環境テンプレート同期設定を作成する

aws proton create-template-sync-config \
--template-name fargate-env \
--template-type "ENVIRONMENT" \
--repository-name username/aws-proton-cloudformation-sample-templates \
--repository-provider "GITHUB" \
--branch "main" \
--subdirectory "environment-templates/fargate-env"

aws proton get-template-sync-config \
--template-name fargate-env \
--template-type "ENVIRONMENT"


-- 4.6 環境テンプレートバージョンを作成する

※バージョンが作成されない場合、GitHubのルートにあるREADME.mdをコミットするよい模様

aws proton wait environment-template-version-registered \
--template-name fargate-env \
--major-version "1" \
--minor-version "0"

 

aws proton update-environment-template-version \
--template-name fargate-env \
--major-version "1" \
--minor-version "0" \
--status "PUBLISHED"

aws proton list-environment-template-versions \
--template-name fargate-env

aws proton get-environment-template-version \
--template-name fargate-env \
--major-version "1" \
--minor-version "0"

 

-- 5. サービステンプレートを登録する

-- 5.1 サービステンプレートを作成する

aws proton create-service-template \
--name load-balanced-fargate-svc \
--display-name load-balanced-fargate-svc \
--description "load-balanced-fargate-svc"

aws proton list-service-templates


-- 5.2 サービステンプレート同期設定を作成する

aws proton create-template-sync-config \
--template-name load-balanced-fargate-svc \
--template-type "SERVICE" \
--repository-name username/aws-proton-cloudformation-sample-templates \
--repository-provider "GITHUB" \
--branch "main" \
--subdirectory "service-templates/load-balanced-fargate-svc"

 

aws proton get-template-sync-config \
--template-name load-balanced-fargate-svc \
--template-type "SERVICE"


-- 5.3 サービステンプレートバージョンを作成する


aws proton wait service-template-version-registered \
--template-name load-balanced-fargate-svc \
--major-version "1" \
--minor-version "0"

aws proton update-service-template-version \
--template-name load-balanced-fargate-svc \
--major-version "1" \
--minor-version "0" \
--status "PUBLISHED"


aws proton list-service-template-versions \
--template-name load-balanced-fargate-svc

aws proton get-service-template-version \
--template-name load-balanced-fargate-svc \
--major-version "1" \
--minor-version "0"

 

 

-- 6. 環境をデプロイする

-- 6.1 登録した環境テンプレートのサンプルスペックファイルを入手する

environment-templates/fargate-env/spec/spec.yaml


vim spec.yaml

proton: EnvironmentSpec
spec:
  vpc_cidr: 10.0.0.0/16
  public_subnet_one_cidr: 10.0.0.0/18
  public_subnet_two_cidr: 10.0.64.0/18
  private_subnet_one_cidr: 10.0.128.0/18
  private_subnet_two_cidr: 10.0.192.0/18

 

-- 6.2 環境を作成する

aws proton create-environment \
--name env01 \
--template-name "fargate-env" \
--template-major-version 1 \
--proton-service-role-arn arn:aws:iam::999999999999:role/role01 \
--spec file://spec.yaml \
--component-role-arn  arn:aws:iam::999999999999:role/role01


aws proton wait environment-deployed --name env01


aws proton list-environments

aws proton get-environment --name env01

 


-- 7. サービスをデプロイする [アプリケーション開発者]

-- 7.1 登録したサービステンプレートのサンプルスペックファイルを入手する

service-templates/load-balanced-fargate-svc/spec/spec.yaml

vim spec.yaml

proton: ServiceSpec
pipeline:
  service_dir: ecs-static-website
  dockerfile: Dockerfile
  unit_test_command: echo 'add your unit test command here'
instances:
  - name: load-balanced-fargate-svc-prod
    environment: env01


★environment名は環境名と合わせる必要がある


-- 7.2 下記リポジトリをフォークする

https://github.com/aws-samples/aws-proton-sample-services/

-- 7.3 サービスを作成する

aws proton create-service \
--name svc01 \
--repository-connection-arn "arn:aws:codestar-connections:ap-northeast-1:999999999999:connection/11111111-1111-1111-1111-111111111111" \
--repository-id username/aws-proton-sample-services \
--branch-name "main" \
--template-major-version 1 \
--template-name load-balanced-fargate-svc \
--spec file://spec.yaml

aws proton wait service-created --name svc01

aws proton list-services


-- 7.4 動作確認

aws proton list-service-instances

aws proton list-service-instance-outputs \
--service-name svc01 \
--service-instance-name load-balanced-fargate-svc-prod

 

 

-- 8. クリーンアップ

-- サービスの削除
aws proton list-services

aws proton delete-service \
--name svc01


-- 環境の削除
aws proton list-environments

aws proton delete-environment \
--name env01

-- CloudFormationスタックの削除確認


-- サービステンプレートバージョンの削除

aws proton list-service-template-versions \
--template-name load-balanced-fargate-svc

aws proton delete-service-template-version \
--template-name load-balanced-fargate-svc \
--major-version "1" \
--minor-version "0"

 

-- サービステンプレート同期設定の削除

aws proton get-template-sync-config \
--template-name load-balanced-fargate-svc \
--template-type "SERVICE"

aws proton delete-template-sync-config \
--template-name load-balanced-fargate-svc \
--template-type "SERVICE"

-- サービステンプレートの削除

aws proton list-service-templates

aws proton delete-service-template \
--name load-balanced-fargate-svc

 

-- 環境テンプレートバージョンの削除

aws proton list-environment-template-versions \
--template-name fargate-env

aws proton delete-environment-template-version \
--template-name fargate-env \
--major-version "1" \
--minor-version "0"


-- 環境テンプレート同期設定の削除

aws proton get-template-sync-config \
--template-name fargate-env \
--template-type "ENVIRONMENT"

aws proton delete-template-sync-config \
--template-name fargate-env \
--template-type "ENVIRONMENT"

 

-- 環境テンプレートの削除

aws proton list-environment-templates


aws proton delete-environment-template \
--name fargate-env

 

-- リポジトリリンクの削除

aws proton list-repositories

aws proton delete-repository \
--name username/aws-proton-cloudformation-sample-templates \
--provider "GITHUB"

 

-- CodeStar接続の削除

aws codestar-connections list-connections

aws codestar-connections delete-connection \
--connection-arn arn:aws:codestar-connections:ap-northeast-1:999999999999:connection/11111111-1111-1111-1111-111111111111


-- ロールの削除
aws iam list-roles | grep role01

aws iam detach-role-policy \
--role-name role01 \
--policy-arn arn:aws:iam::999999999999:policy/policy01

aws iam delete-role --role-name role01

 

-- ポリシーの削除
aws iam list-policies | grep policy01

aws iam delete-policy \
--policy-arn arn:aws:iam::999999999999:policy/policy01

 

-- S3の削除


-- フォークしたリポジトリの削除