{Alibaba ACK} Create an ACK Serverless cluster by using Alibaba Cloud CLI

https://www.alibabacloud.com/help/ja/ack/serverless-kubernetes/user-guide/create-an-ask-cluster-by-using-alibaba-cloud-cli

 


前提: 
1. コンソールより下記実施済み (実施不要の可能性あり)

1.1 下記権限付与

Cloud Resource Access Authorization
To modify the permissions of a RAM role, log on to the RAM console and go to the RAM Roles page. Make sure that you grant the Alibaba Cloud service the required permissions.

Authorize CS to use the following roles to access your cloud resources.
AliyunCSManagedVKRole
AliyunCSDefaultRole
AliyunCSManagedKubernetesRole
AliyunCSManagedLogRole
AliyunCSManagedCmsRole
AliyunCSManagedCsiRole
AliyunCSKubernetesAuditRole
AliyunCSManagedNetworkRole
AliyunCSManagedArmsRole
AliyunCSServerlessKubernetesRole


1.2 下記権限付与
Below are the roles created by the system that are available for use by the system, and after authorization, the service has the appropriate access to your cloud resources.
AliyunOOSLifecycleHook4CSRole

 

1.3 下記アクティベート済み
プロフェッショナルマネージド Kubernetes クラスターサービスおよび標準マネージド Kubernetes クラスターサービスの両方

Both the professional managed Kubernetes cluster service and the standard managed Kubernetes cluster service will be activated


1.4 下記権限付与
以下は、システムで使用するために作成されたロールです。許可後、サービスにはクラウドリソースに対する適切な権限が付与されます。
AliyunCISDefaultRole

1.5 下記アクティベート済み
Managed Service for Prometheus (従量課金) - 観測可能なデータに基づく課金

1.6 下記アクティベート済み
Apsara File Storage NAS - 従量課金


1.7 下記権限付与
ARMS が権限付与を要求しています。
以下の権限情報を確認し、[権限付与] または [キャンセル] をクリックして操作が完了するまでお待ちください。

AliyunServiceRoleForARMS


2. kubectlインストール済み

 


-- 1. Create an ACK Serverless cluster


cat <<-'EOF' > create.json

{
    "cluster_type": "Ask",
    "name": "cluster11",
    "region_id": "ap-northeast-1",
    "zoneid": "ap-northeast-1a",
    "nat_gateway": true,
    "private_zone": false,
    "tags": [
        {"key": "env", "value": "test"}
    ]
}

EOF

cat create.json

aliyun cs  POST /clusters --header "Content-Type=application/json" --body "$(cat create.json)" 

aliyun cs DescribeClusters 

aliyun cs GET /clusters/111111111111111111111111111111111

ls -l $HOME/.kube/config

KUBECONFIG=$HOME/.kube/config
aliyun cs GET /k8s/111111111111111111111111111111111/user_config | jq -r '.config' > ${KUBECONFIG}

 

cat ${KUBECONFIG}


-- 2. Test the ACK Serverless cluster


kubectl get nodes

kubectl run hello-world --image hello-world --restart=Never
kubectl get pod
kubectl logs pod/hello-world
kubectl delete pod/hello-world
kubectl get pod


-- 3. Delete and release resources


aliyun cs DELETE /clusters/111111111111111111111111111111111

aliyun cs DescribeClusters 

 

 

scpのProxyCommand

 

https://qiita.com/ponsuke0531/items/4721ac64f82a8e191580
https://qiita.com/S-T/items/18af2bfcc4e5a72202da

https://webmasters-journal.com/application/winscp-ssh-tunnel/
https://docs.oracle.com/ja/learn/generate_ssh_keys/index.html#use-putty

 

前提: 
subnet01,vm01,sl01,rt01,パブリックサブネット
subnet02,vm02,sl02,rt02,プライベートサブネット,サービス・ゲートウェイのみ

subnet01~subnet02間の全通信許可


-- 1. 環境準備 (OCIを使用する)

cat <<-'EOF' > variables.tf

locals {
  tenancy_ocid = "ocid1.tenancy.oc1..111111111111111111111111111111111111111111111111111111111111"
# MYIP
  myip = "192.0.2.1/32"

}

variable "compartment_name" {
  description = "compartment_name"
  type = string
  default = "cmp01"
}

EOF

 

cat <<-'EOF' > main.tf

terraform {
  required_version = ">= 1.0.0, < 2.0.0"
  required_providers {
    oci = {
       source  = "hashicorp/oci"
       version = "= 5.23.0"
    }
  }
}

provider "oci" {
  tenancy_ocid = local.tenancy_ocid
  user_ocid = "ocid1.user.oc1..111111111111111111111111111111111111111111111111111111111111" 
  private_key_path = "~/.oci/oci_api_key.pem"
  fingerprint = "45:ed:22:e6:cc:fd:63:97:12:9d:62:7a:90:12:65:7a"
  region = "us-ashburn-1"
}


resource "oci_identity_compartment" "cmp01" {
    # Required
    compartment_id = local.tenancy_ocid
    description = var.compartment_name
    name = var.compartment_name
    
    enable_delete = true
}

resource "oci_core_vcn" "vcn01" {
    #Required
    compartment_id = oci_identity_compartment.cmp01.id

    #Optional
    cidr_block = "10.0.0.0/16"
    display_name = "vcn01"
    dns_label = "vcn01"

}


resource "oci_core_internet_gateway" "igw01" {
    #Required
    compartment_id = oci_identity_compartment.cmp01.id
    vcn_id = oci_core_vcn.vcn01.id

    #Optional
    enabled = true
    display_name = "igw01"
}

 

 

resource "oci_core_route_table" "rt01" {
    #Required
    compartment_id = oci_identity_compartment.cmp01.id
    vcn_id = oci_core_vcn.vcn01.id

    #Optional
    display_name = "rt01"
    route_rules {
        #Required
        network_entity_id = oci_core_internet_gateway.igw01.id
        #Optional
        destination = "0.0.0.0/0"
    }
    
}


resource "oci_core_route_table" "rt02" {
    #Required
    compartment_id = oci_identity_compartment.cmp01.id
    vcn_id = oci_core_vcn.vcn01.id

    #Optional
    display_name = "rt02"
    
}

resource "oci_core_security_list" "sl01" {
    #Required
    compartment_id = oci_identity_compartment.cmp01.id
    vcn_id = oci_core_vcn.vcn01.id

    #Optional
    display_name = "sl01"
    
    egress_security_rules {
        destination = "0.0.0.0/0"
        protocol = "all"
        stateless = false
    }
    
    ingress_security_rules {
        protocol = "6"
        source = local.myip
        stateless = false
        tcp_options {
            max = 22
            min = 22
        }
    }
    ingress_security_rules {
        protocol = "all"
        source = "10.0.2.0/24"
        stateless = false
    }
}


resource "oci_core_security_list" "sl02" {
    #Required
    compartment_id = oci_identity_compartment.cmp01.id
    vcn_id = oci_core_vcn.vcn01.id

    #Optional
    display_name = "sl02"
    
    egress_security_rules {
        destination = "0.0.0.0/0"
        protocol = "all"
        stateless = false
    }
    

    ingress_security_rules {
        protocol = "all"
        source = "10.0.1.0/24"
        stateless = false
    }

}

 

resource "oci_core_subnet" "subnet01" {
    #Required
    cidr_block = "10.0.1.0/24"
    compartment_id = oci_identity_compartment.cmp01.id
    vcn_id = oci_core_vcn.vcn01.id

    #Optional

    display_name = "subnet01"
    dns_label = "subnet01"
    route_table_id = oci_core_route_table.rt01.id
    security_list_ids = [oci_core_security_list.sl01.id]
}

resource "oci_core_subnet" "subnet02" {
    #Required
    cidr_block = "10.0.2.0/24"
    compartment_id = oci_identity_compartment.cmp01.id
    vcn_id = oci_core_vcn.vcn01.id

    #Optional

    display_name = "subnet02"
    dns_label = "subnet02"
    route_table_id = oci_core_route_table.rt02.id
    security_list_ids = [oci_core_security_list.sl02.id]
}

 


EOF

 


# インスタンス(always free)
# Canonical-Ubuntu-22.04-aarch64-2023.10.13-0
# VM.Standard.A1.Flex


cat <<-'EOF' > instance.tf


resource "oci_core_instance" "vm01" {
    #Required
    availability_domain = "OEIw:US-ASHBURN-AD-1"
    compartment_id = oci_identity_compartment.cmp01.id
    shape = "VM.Standard.A1.Flex"

    shape_config {

        memory_in_gbs = 6
        ocpus = 1
    }
        
    #Optional

    create_vnic_details {
        #Optional
        assign_public_ip = true
        subnet_id = oci_core_subnet.subnet01.id
    }

    display_name = "vm01"

    metadata = {
        ssh_authorized_keys = file("~/.ssh/id_rsa.pub")
    } 

    source_details {
        #Required
        source_id = "ocid1.image.oc1.iad.aaaaaaaamphrdqdgcjfdmo5fzql4m6ewcuxkbepjbobgky254svsk3ueppfa"
        source_type = "image"

        #Optional
        boot_volume_size_in_gbs = 50
    }
    preserve_boot_volume = false
}

resource "oci_core_instance" "vm02" {
    #Required
    availability_domain = "OEIw:US-ASHBURN-AD-1"
    compartment_id = oci_identity_compartment.cmp01.id
    shape = "VM.Standard.A1.Flex"

    shape_config {

        memory_in_gbs = 6
        ocpus = 1
    }
    
    #Optional

    create_vnic_details {
        #Optional
        assign_public_ip = false
        subnet_id = oci_core_subnet.subnet02.id
    }

    display_name = "vm02"

    metadata = {
        ssh_authorized_keys = file("~/.ssh/id_rsa.pub")
    } 

    source_details {
        #Required
        source_id = "ocid1.image.oc1.iad.aaaaaaaamphrdqdgcjfdmo5fzql4m6ewcuxkbepjbobgky254svsk3ueppfa"
        source_type = "image"

        #Optional
        boot_volume_size_in_gbs = 50
    }
    preserve_boot_volume = false
}

EOF

 


cat <<-'EOF' > outputs.tf

output "cmp01_id" {
  value = oci_identity_compartment.cmp01.id
  description = "cmp01.id"
}

output "vcn01_id" {
  value = oci_core_vcn.vcn01.id
  description = "vcn01.id"
}

output "igw01_id" {
  value = oci_core_internet_gateway.igw01.id
  description = "igw01.id"
}


output "rt01_id" {
  value = oci_core_route_table.rt01.id
  description = "rt01.id"
}
output "rt02_id" {
  value = oci_core_route_table.rt02.id
  description = "rt02.id"
}


output "sl01_id" {
  value = oci_core_security_list.sl01.id
  description = "sl01.id"
}

output "sl02_id" {
  value = oci_core_security_list.sl02.id
  description = "sl02.id"
}

output "subnet01_id" {
  value = oci_core_subnet.subnet01.id
  description = "subnet01.id"
}
output "subnet02_id" {
  value = oci_core_subnet.subnet02.id
  description = "subnet02.id"
}

output "vm01_id" {
  value = oci_core_instance.vm01.id
  description = "vm01.id"
}

output "vm02_id" {
  value = oci_core_instance.vm02.id
  description = "vm02.id"
}


output "vm01_public_ip" {
  value = oci_core_instance.vm01.public_ip
  description = "vm01.public_ip"
}

output "vm01_private_ip" {
  value = oci_core_instance.vm01.private_ip
  description = "vm01.private_ip"
}

output "vm02_private_ip" {
  value = oci_core_instance.vm02.private_ip
  description = "vm02.private_ip"
}

 

EOF

 


terraform init
terraform fmt
terraform -version

terraform plan

terraform apply -auto-approve

 

# terraform destroy -auto-approve

 


-- 2. 動作確認


-- 2.1 多段接続する場合

scp -i {接続先鍵} -r -o ProxyCommand='ssh -i {踏み台鍵} {踏み台ユーザ}@{踏み台アドレス} -W %h:%p' {ローカルファイル} {接続先ユーザ}@{接続先アドレス}:{接続先パス}

ssh -i {接続先鍵}    -o ProxyCommand='ssh -i {踏み台鍵} {踏み台ユーザ}@{踏み台アドレス} -W %h:%p'                    {接続先ユーザ}@{接続先アドレス}

 

scp -i $HOME/.ssh/id_rsa -r -o ProxyCommand='ssh -i $HOME/.ssh/id_rsa ubuntu@192.0.2.2 -W %h:%p' oci-cli-3.39.0-Oracle-Linux-9-Offline.zip ubuntu@10.0.2.89:/home/ubuntu

ssh -i $HOME/.ssh/id_rsa    -o ProxyCommand='ssh -i $HOME/.ssh/id_rsa ubuntu@192.0.2.2 -W %h:%p' ubuntu@10.0.2.89

 

-- 2.2 順番に接続する場合

scp -i $HOME/.ssh/id_rsa $HOME/.ssh/id_rsa ubuntu@192.0.2.2:/home/ubuntu
ssh -i $HOME/.ssh/id_rsa ubuntu@192.0.2.2
ssh -i $HOME/id_rsa ubuntu@10.0.2.89

 

 


WinSCPを使用する場合、ppkファイルを使用する必要がある

ノート: PuTTYは、OpenSSH互換形式でキーを保存しません。
したがって、OpenSSHを使用してPuTTYで作成された公開キー・ファイルをLinux/Unixシステムにアップロードすると、キーは正しく読み取られません。
ただし、PuTTYgenアプリケーションから直接コピーすると、キー情報自体は、貼り付けされたフィールドに対して正しく動作し、
その情報を使用して適切なOpenSSH互換キーを作成します。
たとえば、OCIでインスタンスを作成する場合は、PuTTYからSSHキーを貼り付け、正常に動作します。

https://docs.oracle.com/ja/learn/generate_ssh_keys/index.html#use-putty

 

 

 

{OCI 仮想クラウド・ネットワーク} アップグレードされたDRGを介したリモートVCNピアリング

 

https://docs.oracle.com/ja-jp/iaas/Content/Network/Tasks/scenario_e.htm

https://blogs.techvan.co.jp/oci/2021/05/25/%E3%83%AA%E3%83%A2%E3%83%BC%E3%83%88vcn%E3%83%94%E3%82%A2%E3%83%AA%E3%83%B3%E3%82%B0%E3%81%A7%E6%9D%B1%E4%BA%AC%EF%BD%9E%E5%A4%A7%E9%98%AA%E3%83%AA%E3%83%BC%E3%82%B8%E3%83%A7%E3%83%B3%E9%96%93/

https://qiita.com/dingtianhongjie/items/485c8abf7b3af9529f69

https://docs.oracle.com/en-us/iaas/tools/oci-cli/3.39.1/oci_cli_docs/cmdref/network/remote-peering-connection.html

https://docs.public.oneportal.content.oci.oraclecloud.com/en-us/iaas/Content/Network/Tasks/drg-rpc-connect.htm


確認事項: ashburnリージョンでインターネットを使用せずにtokyoリージョンのオブジェクトAPIを実行する

 

前提: 
-- ashburn
subnet01,vm01,sl01,rt01,パブリックサブネット
subnet02,vm02,sl02,rt02,プライベートサブネット,サービス・ゲートウェイのみ

subnet01~subnet02間の全通信許可
subnet02~subnet12間の全通信許可

-- tokyo
subnet11,vm11,sl11,rt11,パブリックサブネット
subnet12,vm12,sl12,rt12,プライベートサブネット,サービス・ゲートウェイのみ

subnet11~subnet12間の全通信許可
subnet02~subnet12間の全通信許可


DRGはrt02やrt12に紐づける

drg01
drg0101 <== VCNアタッチメント名
drg0102 <== RPCアタッチメント名

drg11
drg1101 <== VCNアタッチメント名
drg1102 <== RPCアタッチメント名


-- 1. VCN作成 (ashburn)

mkdir ashburn
cd ashburn


cat <<-'EOF' > variables.tf

locals {
  tenancy_ocid = "ocid1.tenancy.oc1..111111111111111111111111111111111111111111111111111111111111"
# MYIP
  myip = "192.0.2.1/32"

}

variable "compartment_name" {
  description = "compartment_name"
  type = string
  default = "cmp20240421"
}

EOF

 

cat <<-'EOF' > main.tf

terraform {
  required_version = ">= 1.0.0, < 2.0.0"
  required_providers {
    oci = {
       source  = "hashicorp/oci"
       version = "= 5.23.0"
    }
  }
}

provider "oci" {
  tenancy_ocid = local.tenancy_ocid
  user_ocid = "ocid1.user.oc1..111111111111111111111111111111111111111111111111111111111111" 
  private_key_path = "~/.oci/oci_api_key.pem"
  fingerprint = "45:ed:22:e6:cc:fd:63:97:12:9d:62:7a:90:12:65:7a"
  region = "us-ashburn-1"
}


resource "oci_identity_compartment" "cmp01" {
    # Required
    compartment_id = local.tenancy_ocid
    description = var.compartment_name
    name = var.compartment_name
    
    enable_delete = true
}

resource "oci_core_vcn" "vcn01" {
    #Required
    compartment_id = oci_identity_compartment.cmp01.id

    #Optional
    cidr_block = "10.0.0.0/16"
    display_name = "vcn01"
    dns_label = "vcn01"

}


resource "oci_core_internet_gateway" "igw01" {
    #Required
    compartment_id = oci_identity_compartment.cmp01.id
    vcn_id = oci_core_vcn.vcn01.id

    #Optional
    enabled = true
    display_name = "igw01"
}


data "oci_core_services" "svc01" {
  filter {
    name   = "name"
    values = ["All .* Services In Oracle Services Network"]
    regex  = true
  }
}


resource "oci_core_service_gateway" "sgw01" {
    #Required
    compartment_id = oci_identity_compartment.cmp01.id
    services {
        #Required
        service_id = data.oci_core_services.svc01.services.0.id
    }
    vcn_id = oci_core_vcn.vcn01.id

    #Optional
    display_name = "sgw01"
}

 

resource "oci_core_route_table" "rt01" {
    #Required
    compartment_id = oci_identity_compartment.cmp01.id
    vcn_id = oci_core_vcn.vcn01.id

    #Optional
    display_name = "rt01"
    route_rules {
        #Required
        network_entity_id = oci_core_internet_gateway.igw01.id
        #Optional
        destination = "0.0.0.0/0"
    }
    
}


resource "oci_core_route_table" "rt02" {
    #Required
    compartment_id = oci_identity_compartment.cmp01.id
    vcn_id = oci_core_vcn.vcn01.id

    #Optional
    display_name = "rt02"
    route_rules {
        #Required
        network_entity_id = oci_core_service_gateway.sgw01.id
        #Optional
        destination = "all-iad-services-in-oracle-services-network"
        destination_type = "SERVICE_CIDR_BLOCK"
    }
    
}

resource "oci_core_security_list" "sl01" {
    #Required
    compartment_id = oci_identity_compartment.cmp01.id
    vcn_id = oci_core_vcn.vcn01.id

    #Optional
    display_name = "sl01"
    
    egress_security_rules {
        destination = "0.0.0.0/0"
        protocol = "all"
        stateless = false
    }
    
    ingress_security_rules {
        protocol = "6"
        source = local.myip
        stateless = false
        tcp_options {
            max = 22
            min = 22
        }
    }
    ingress_security_rules {
        protocol = "all"
        source = "10.0.2.0/24"
        stateless = false
    }
}


resource "oci_core_security_list" "sl02" {
    #Required
    compartment_id = oci_identity_compartment.cmp01.id
    vcn_id = oci_core_vcn.vcn01.id

    #Optional
    display_name = "sl02"
    
    egress_security_rules {
        destination = "0.0.0.0/0"
        protocol = "all"
        stateless = false
    }
    

    ingress_security_rules {
        protocol = "all"
        source = "10.0.1.0/24"
        stateless = false
    }
    ingress_security_rules {
        protocol = "all"
        source = "10.1.2.0/24"
        stateless = false
    }
}

 

resource "oci_core_subnet" "subnet01" {
    #Required
    cidr_block = "10.0.1.0/24"
    compartment_id = oci_identity_compartment.cmp01.id
    vcn_id = oci_core_vcn.vcn01.id

    #Optional

    display_name = "subnet01"
    dns_label = "subnet01"
    route_table_id = oci_core_route_table.rt01.id
    security_list_ids = [oci_core_security_list.sl01.id]
}

resource "oci_core_subnet" "subnet02" {
    #Required
    cidr_block = "10.0.2.0/24"
    compartment_id = oci_identity_compartment.cmp01.id
    vcn_id = oci_core_vcn.vcn01.id

    #Optional

    display_name = "subnet02"
    dns_label = "subnet02"
    route_table_id = oci_core_route_table.rt02.id
    security_list_ids = [oci_core_security_list.sl02.id]
}

 


EOF

 

 

cat <<-'EOF' > instance.tf


resource "oci_core_instance" "vm01" {
    #Required
    availability_domain = "OEIw:US-ASHBURN-AD-1"
    compartment_id = oci_identity_compartment.cmp01.id
    shape = "VM.Standard.E2.1"
    
    #Optional

    create_vnic_details {
        #Optional
        assign_public_ip = true
        subnet_id = oci_core_subnet.subnet01.id
    }

    display_name = "vm01"

    metadata = {
        ssh_authorized_keys = file("~/.ssh/id_rsa.pub")
    } 

    source_details {
        #Required
        source_id = "ocid1.image.oc1.iad.111111111111111111111111111111111111111111111111111111111111"
        source_type = "image"

        #Optional
        boot_volume_size_in_gbs = 50
    }
    preserve_boot_volume = false
}

resource "oci_core_instance" "vm02" {
    #Required
    availability_domain = "OEIw:US-ASHBURN-AD-1"
    compartment_id = oci_identity_compartment.cmp01.id
    shape = "VM.Standard.E2.1"
    
    #Optional

    create_vnic_details {
        #Optional
        assign_public_ip = false
        subnet_id = oci_core_subnet.subnet02.id
    }

    display_name = "vm02"

    metadata = {
        ssh_authorized_keys = file("~/.ssh/id_rsa.pub")
    } 

    source_details {
        #Required
        source_id = "ocid1.image.oc1.iad.111111111111111111111111111111111111111111111111111111111111"
        source_type = "image"

        #Optional
        boot_volume_size_in_gbs = 50
    }
    preserve_boot_volume = false
}

EOF


cat <<-'EOF' > bucket.tf


data "oci_objectstorage_namespace" "ns01" {
    compartment_id = local.tenancy_ocid
}

resource "oci_objectstorage_bucket" "bucket01" {
    #Required
    compartment_id = oci_identity_compartment.cmp01.id
    name = "bucket01"
    namespace = data.oci_objectstorage_namespace.ns01.namespace

    #Optional
    access_type = "NoPublicAccess"
    auto_tiering = "Disabled"
    object_events_enabled = false
    storage_tier = "Standard"
    versioning = "Disabled"
    
}


EOF

 

cat <<-'EOF' > outputs.tf

output "cmp01_id" {
  value = oci_identity_compartment.cmp01.id
  description = "cmp01.id"
}

output "vcn01_id" {
  value = oci_core_vcn.vcn01.id
  description = "vcn01.id"
}

output "igw01_id" {
  value = oci_core_internet_gateway.igw01.id
  description = "igw01.id"
}

output "sgw01_id" {
  value = oci_core_service_gateway.sgw01.id
  description = "sgw01.id"
}

output "rt01_id" {
  value = oci_core_route_table.rt01.id
  description = "rt01.id"
}
output "rt02_id" {
  value = oci_core_route_table.rt02.id
  description = "rt02.id"
}


output "sl01_id" {
  value = oci_core_security_list.sl01.id
  description = "sl01.id"
}

output "sl02_id" {
  value = oci_core_security_list.sl02.id
  description = "sl02.id"
}

output "subnet01_id" {
  value = oci_core_subnet.subnet01.id
  description = "subnet01.id"
}
output "subnet02_id" {
  value = oci_core_subnet.subnet02.id
  description = "subnet02.id"
}

output "vm01_id" {
  value = oci_core_instance.vm01.id
  description = "vm01.id"
}

output "vm02_id" {
  value = oci_core_instance.vm02.id
  description = "vm02.id"
}


output "vm01_public_ip" {
  value = oci_core_instance.vm01.public_ip
  description = "vm01.public_ip"
}

output "vm01_private_ip" {
  value = oci_core_instance.vm01.private_ip
  description = "vm01.private_ip"
}

output "vm02_private_ip" {
  value = oci_core_instance.vm02.private_ip
  description = "vm02.private_ip"
}

 

EOF

 


terraform init
terraform fmt
terraform -version

terraform plan

terraform apply -auto-approve

 

 

-- 2. OCIインストール (ashburn)

-- 2.1 vm01からvm02にログイン

scp -i $HOME/.ssh/id_rsa $HOME/.ssh/id_rsa opc@192.0.2.2:/home/opc
scp -i $HOME/.ssh/id_rsa  oci-cli-3.39.0-Oracle-Linux-9-Offline.zip opc@192.0.2.2:/home/opc

ssh -i $HOME/.ssh/id_rsa opc@192.0.2.2

scp -i $HOME/id_rsa oci-cli-3.39.0-Oracle-Linux-9-Offline.zip opc@10.0.2.162:/home/opc

ssh -i $HOME/id_rsa opc@10.0.2.162


-- 2.2 vm02からインターネットアクセスできないことを確認

curl --connect-timeout 10 https://www.oracle.com/

-- 2.3 OCIインストール (vm02での作業)

unzip oci-cli-3.39.0-Oracle-Linux-9-Offline.zip

cd oci-cli-installation

bash install.sh --offline-install
exec -l $SHELL
oci -v

 

 


-- 3. VCN作成 (tokyo)

mkdir tokyo
cd tokyo


cat <<-'EOF' > variables.tf

locals {
  tenancy_ocid = "ocid1.tenancy.oc1..111111111111111111111111111111111111111111111111111111111111"
# MYIP
  myip = "192.0.2.1/32"

}

#variable "compartment_name" {
#  description = "compartment_name"
#  type = string
#  default = "cmp21240419"
#}

variable "compartment_id" {
  description = "compartment_id"
  type = string
  default = "ocid1.compartment.oc1..111111111111111111111111111111111111111111111111111111111111"
}

EOF

 

cat <<-'EOF' > main.tf

terraform {
  required_version = ">= 1.0.0, < 2.0.0"
  required_providers {
    oci = {
       source  = "hashicorp/oci"
       version = "= 5.23.0"
    }
  }
}

provider "oci" {
  tenancy_ocid = local.tenancy_ocid
  user_ocid = "ocid1.user.oc1..111111111111111111111111111111111111111111111111111111111111" 
  private_key_path = "~/.oci/oci_api_key.pem"
  fingerprint = "45:ed:22:e6:cc:fd:63:97:12:9d:62:7a:90:12:65:7a"
  region = "ap-tokyo-1"
}


#resource "oci_identity_compartment" "cmp11" {
#    # Required
#    compartment_id = local.tenancy_ocid
#    description = var.compartment_name
#    name = var.compartment_name
#    
#    enable_delete = true
#}

 

resource "oci_core_vcn" "vcn11" {
    #Required
    compartment_id = var.compartment_id

    #Optional
    cidr_block = "10.1.0.0/16"
    display_name = "vcn11"
    dns_label = "vcn11"

}


resource "oci_core_internet_gateway" "igw11" {
    #Required
    compartment_id = var.compartment_id
    vcn_id = oci_core_vcn.vcn11.id

    #Optional
    enabled = true
    display_name = "igw11"
}


data "oci_core_services" "svc11" {
  filter {
    name   = "name"
    values = ["All .* Services In Oracle Services Network"]
    regex  = true
  }
}


resource "oci_core_service_gateway" "sgw11" {
    #Required
    compartment_id = var.compartment_id
    services {
        #Required
        service_id = data.oci_core_services.svc11.services.0.id
    }
    vcn_id = oci_core_vcn.vcn11.id

    #Optional
    display_name = "sgw11"
}

 

resource "oci_core_route_table" "rt11" {
    #Required
    compartment_id = var.compartment_id
    vcn_id = oci_core_vcn.vcn11.id

    #Optional
    display_name = "rt11"
    route_rules {
        #Required
        network_entity_id = oci_core_internet_gateway.igw11.id
        #Optional
        destination = "0.0.0.0/0"
    }
    
}


resource "oci_core_route_table" "rt12" {
    #Required
    compartment_id = var.compartment_id
    vcn_id = oci_core_vcn.vcn11.id

    #Optional
    display_name = "rt12"
    route_rules {
        #Required
        network_entity_id = oci_core_service_gateway.sgw11.id
        #Optional
        destination = "all-nrt-services-in-oracle-services-network"
        destination_type = "SERVICE_CIDR_BLOCK"
    }
    
}

resource "oci_core_security_list" "sl11" {
    #Required
    compartment_id = var.compartment_id
    vcn_id = oci_core_vcn.vcn11.id

    #Optional
    display_name = "sl11"
    
    egress_security_rules {
        destination = "0.0.0.0/0"
        protocol = "all"
        stateless = false
    }
    
    ingress_security_rules {
        protocol = "6"
        source = local.myip
        stateless = false
        tcp_options {
            max = 22
            min = 22
        }
    }
    ingress_security_rules {
        protocol = "all"
        source = "10.1.2.0/24"
        stateless = false
    }
}


resource "oci_core_security_list" "sl12" {
    #Required
    compartment_id = var.compartment_id
    vcn_id = oci_core_vcn.vcn11.id

    #Optional
    display_name = "sl12"
    
    egress_security_rules {
        destination = "0.0.0.0/0"
        protocol = "all"
        stateless = false
    }
    

    ingress_security_rules {
        protocol = "all"
        source = "10.1.1.0/24"
        stateless = false
    }
    ingress_security_rules {
        protocol = "all"
        source = "10.0.2.0/24"
        stateless = false
    }
}

 

resource "oci_core_subnet" "subnet11" {
    #Required
    cidr_block = "10.1.1.0/24"
    compartment_id = var.compartment_id
    vcn_id = oci_core_vcn.vcn11.id

    #Optional

    display_name = "subnet11"
    dns_label = "subnet11"
    route_table_id = oci_core_route_table.rt11.id
    security_list_ids = [oci_core_security_list.sl11.id]
}

resource "oci_core_subnet" "subnet12" {
    #Required
    cidr_block = "10.1.2.0/24"
    compartment_id = var.compartment_id
    vcn_id = oci_core_vcn.vcn11.id

    #Optional

    display_name = "subnet12"
    dns_label = "subnet12"
    route_table_id = oci_core_route_table.rt12.id
    security_list_ids = [oci_core_security_list.sl12.id]
}

 


EOF

 

 

cat <<-'EOF' > instance.tf


resource "oci_core_instance" "vm11" {
    #Required
    availability_domain = "OEIw:AP-TOKYO-1-AD-1"
    compartment_id = var.compartment_id
    shape = "VM.Standard.E2.1"
    
    #Optional

    create_vnic_details {
        #Optional
        assign_public_ip = true
        subnet_id = oci_core_subnet.subnet11.id
    }

    display_name = "vm11"

    metadata = {
        ssh_authorized_keys = file("~/.ssh/id_rsa.pub")
    } 

    source_details {
        #Required
        source_id = "ocid1.image.oc1.ap-tokyo-1.111111111111111111111111111111111111111111111111111111111111"
        source_type = "image"

        #Optional
        boot_volume_size_in_gbs = 50
    }
    preserve_boot_volume = false
}

resource "oci_core_instance" "vm12" {
    #Required
    availability_domain = "OEIw:AP-TOKYO-1-AD-1"
    compartment_id = var.compartment_id
    shape = "VM.Standard.E2.1"
    
    #Optional

    create_vnic_details {
        #Optional
        assign_public_ip = false
        subnet_id = oci_core_subnet.subnet12.id
    }

    display_name = "vm12"

    metadata = {
        ssh_authorized_keys = file("~/.ssh/id_rsa.pub")
    } 

    source_details {
        #Required
        source_id = "ocid1.image.oc1.ap-tokyo-1.111111111111111111111111111111111111111111111111111111111111"
        source_type = "image"

        #Optional
        boot_volume_size_in_gbs = 50
    }
    preserve_boot_volume = false
}

EOF


cat <<-'EOF' > bucket.tf


data "oci_objectstorage_namespace" "ns11" {
    compartment_id = local.tenancy_ocid
}

resource "oci_objectstorage_bucket" "bucket11" {
    #Required
    compartment_id = var.compartment_id
    name = "bucket11"
    namespace = data.oci_objectstorage_namespace.ns11.namespace

    #Optional
    access_type = "NoPublicAccess"
    auto_tiering = "Disabled"
    object_events_enabled = false
    storage_tier = "Standard"
    versioning = "Disabled"
    
}


EOF

 

cat <<-'EOF' > outputs.tf

#output "cmp11_id" {
#  value = oci_identity_compartment.cmp11.id
#  description = "cmp11.id"
#}

output "vcn11_id" {
  value = oci_core_vcn.vcn11.id
  description = "vcn11.id"
}

output "igw11_id" {
  value = oci_core_internet_gateway.igw11.id
  description = "igw11.id"
}

output "sgw11_id" {
  value = oci_core_service_gateway.sgw11.id
  description = "sgw11.id"
}

output "rt11_id" {
  value = oci_core_route_table.rt11.id
  description = "rt11.id"
}
output "rt12_id" {
  value = oci_core_route_table.rt12.id
  description = "rt12.id"
}


output "sl11_id" {
  value = oci_core_security_list.sl11.id
  description = "sl11.id"
}

output "sl12_id" {
  value = oci_core_security_list.sl12.id
  description = "sl12.id"
}

output "subnet11_id" {
  value = oci_core_subnet.subnet11.id
  description = "subnet11.id"
}
output "subnet12_id" {
  value = oci_core_subnet.subnet12.id
  description = "subnet12.id"
}

output "vm11_id" {
  value = oci_core_instance.vm11.id
  description = "vm11.id"
}

output "vm12_id" {
  value = oci_core_instance.vm12.id
  description = "vm12.id"
}


output "vm11_public_ip" {
  value = oci_core_instance.vm11.public_ip
  description = "vm11.public_ip"
}

output "vm11_private_ip" {
  value = oci_core_instance.vm11.private_ip
  description = "vm11.private_ip"
}

output "vm12_private_ip" {
  value = oci_core_instance.vm12.private_ip
  description = "vm12.private_ip"
}

 

EOF

 


terraform init
terraform fmt
terraform -version

terraform plan

terraform apply -auto-approve

 


-- 4. OCIインストール (tokyo)

-- 4.1 vm11からvm12にログイン

scp -i $HOME/.ssh/id_rsa $HOME/.ssh/id_rsa opc@192.0.2.3:/home/opc
scp -i $HOME/.ssh/id_rsa  oci-cli-3.39.0-Oracle-Linux-9-Offline.zip opc@192.0.2.3:/home/opc

ssh -i $HOME/.ssh/id_rsa opc@192.0.2.3

scp -i $HOME/id_rsa oci-cli-3.39.0-Oracle-Linux-9-Offline.zip opc@10.1.2.10:/home/opc

ssh -i $HOME/id_rsa opc@10.1.2.10


-- 4.2 vm12からインターネットアクセスできないことを確認

curl --connect-timeout 10 https://www.oracle.com/

-- 4.3 OCIインストール (vm12での作業)

unzip oci-cli-3.39.0-Oracle-Linux-9-Offline.zip

cd oci-cli-installation

bash install.sh --offline-install
exec -l $SHELL
oci -v

 

 

 

-- 5. DRG作成

oci network drg list \
--compartment-id ocid1.compartment.oc1..111111111111111111111111111111111111111111111111111111111111 \
--region us-ashburn-1 


oci network drg create \
--compartment-id ocid1.compartment.oc1..111111111111111111111111111111111111111111111111111111111111 \
--region us-ashburn-1 \
--display-name drg01 

 

oci network drg list \
--compartment-id ocid1.compartment.oc1..111111111111111111111111111111111111111111111111111111111111 \
--region ap-tokyo-1 


oci network drg create \
--compartment-id ocid1.compartment.oc1..111111111111111111111111111111111111111111111111111111111111 \
--region ap-tokyo-1 \
--display-name drg11 

 

-- 6. DRGアタッチメント(VCN側)作成

oci network drg-attachment create  --generate-full-command-json-input

 

oci network drg-attachment list \
--compartment-id ocid1.compartment.oc1..111111111111111111111111111111111111111111111111111111111111 \
--region us-ashburn-1 

oci network drg-attachment create \
--region us-ashburn-1 \
--drg-id ocid1.drg.oc1.iad.111111111111111111111111111111111111111111111111111111111111 \
--display-name drg0101 \
--network-details '{
        "id": "ocid1.vcn.oc1.iad.111111111111111111111111111111111111111111111111111111111111",
        "route-table-id": null,
        "type": "VCN",
        "vcn-route-type": "SUBNET_CIDRS"
      }' 

 


oci network drg-attachment list \
--compartment-id ocid1.compartment.oc1..111111111111111111111111111111111111111111111111111111111111 \
--region ap-tokyo-1 

oci network drg-attachment create \
--region ap-tokyo-1 \
--drg-id ocid1.drg.oc1.ap-tokyo-1.111111111111111111111111111111111111111111111111111111111111 \
--display-name drg1101 \
--network-details '{
        "id": "ocid1.vcn.oc1.ap-tokyo-1.111111111111111111111111111111111111111111111111111111111111",
        "route-table-id": null,
        "type": "VCN",
        "vcn-route-type": "SUBNET_CIDRS"
      }' 

 

 


-- 7. DRGアタッチメント(RPC側)作成

oci network remote-peering-connection list \
--compartment-id ocid1.compartment.oc1..111111111111111111111111111111111111111111111111111111111111 \
--region us-ashburn-1 

oci network remote-peering-connection create \
--compartment-id ocid1.compartment.oc1..111111111111111111111111111111111111111111111111111111111111 \
--region us-ashburn-1 \
--drg-id ocid1.drg.oc1.iad.111111111111111111111111111111111111111111111111111111111111 \
--display-name drg0102 

 


oci network remote-peering-connection list \
--compartment-id ocid1.compartment.oc1..111111111111111111111111111111111111111111111111111111111111 \
--region ap-tokyo-1 

 

oci network remote-peering-connection create \
--compartment-id ocid1.compartment.oc1..111111111111111111111111111111111111111111111111111111111111 \
--region ap-tokyo-1 \
--drg-id ocid1.drg.oc1.ap-tokyo-1.111111111111111111111111111111111111111111111111111111111111 \
--display-name drg1102

 


-- 8. RPC接続

oci network remote-peering-connection connect \
--peer-id ocid1.remotepeeringconnection.oc1.ap-tokyo-1.111111111111111111111111111111111111111111111111111111111111 \
--peer-region-name ap-tokyo-1 \
--remote-peering-connection-id ocid1.remotepeeringconnection.oc1.iad.111111111111111111111111111111111111111111111111111111111111 \
--region us-ashburn-1 

 

oci network remote-peering-connection list \
--compartment-id ocid1.compartment.oc1..111111111111111111111111111111111111111111111111111111111111 \
--region us-ashburn-1 

oci network remote-peering-connection list \
--compartment-id ocid1.compartment.oc1..111111111111111111111111111111111111111111111111111111111111 \
--region ap-tokyo-1 

 

 

-- 9. ルートテーブルにルートルール追加

 


cd ashburn
vi main.tf

resource "oci_core_route_table" "rt02" {

    route_rules {
        #Required
        network_entity_id = "ocid1.drg.oc1.iad.111111111111111111111111111111111111111111111111111111111111"
        #Optional
        destination = "10.1.0.0/16"
    }


terraform plan
terraform apply -auto-approve

 

cd tokyo
vi main.tf

resource "oci_core_route_table" "rt12" {

    route_rules {
        #Required
        network_entity_id = "ocid1.drg.oc1.ap-tokyo-1.111111111111111111111111111111111111111111111111111111111111"
        #Optional
        destination = "10.0.0.0/16"
    }

terraform plan
terraform apply -auto-approve

 

-- 10. 疎通確認


ping 10.1.2.10

ping 10.0.2.162

 

 

 

-- 11. 動的グループ作成

 

oci iam dynamic-group list 


oci iam dynamic-group create \
--description dg01 \
--matching-rule "Any {instance.compartment.id = 'ocid1.compartment.oc1..111111111111111111111111111111111111111111111111111111111111'}" \
--name dg01 


-- 12. 動的グループポリシー作成


oci iam policy list \
--compartment-id ocid1.tenancy.oc1..111111111111111111111111111111111111111111111111111111111111 

oci iam policy create \
--compartment-id ocid1.tenancy.oc1..111111111111111111111111111111111111111111111111111111111111 \
--description policy11 \
--name policy11 \
--statements '[
"Allow dynamic-group dg01 to manage all-resources in tenancy"
]' 

 


-- 13. squidインストール (vm12での作業)


※サービスゲートウェイがあるため、dnfでsquidインストール可能な模様

sudo su -
dnf install squid

 

cp /etc/squid/squid.conf /etc/squid/squid.conf.org

cat <<-'EOF' > /etc/squid/squid.conf

# should be allowed
acl localnet src 10.0.0.0/16

# Squid normally listens to port 3128
http_port 3128

# Wite List
acl whitelist dstdomain "/etc/squid/whitelist"
http_access allow whitelist

EOF

cat <<-'EOF' > /etc/squid/whitelist

objectstorage.ap-tokyo-1.oraclecloud.com
auth.ap-tokyo-1.oraclecloud.com


EOF

systemctl restart squid
systemctl status squid
systemctl enable squid

systemctl stop firewalld
systemctl status firewalld
systemctl disable firewalld


tail -f /var/log/squid/access.log

 

 


-- 14. 動作確認 (vm02での作業)
export OCI_CLI_AUTH=instance_principal

インスタンスプリンシパル反映まで時間がかかる場合あり

 

oci os object list \
--bucket-name bucket11 \
--region  ap-tokyo-1 

tokyoのプライベートサブネットのプロキシを使用する
※auth.us-ashburn-1.oraclecloud.com への通信が発生するので、ashburnのサービスゲートウェイも必要
※auth.us-ashburn-1.oraclecloud.com への通信はプロキシ対象外とする必要あり


export https_proxy="http://10.1.2.10:3128"
export no_proxy=auth.us-ashburn-1.oraclecloud.com

env | grep proxy

oci os object list \
--bucket-name bucket11 \
--region  ap-tokyo-1 

 

 

 


-- 15. クリーンアップ

-- 動的グループポリシー削除

oci iam policy list \
--compartment-id ocid1.tenancy.oc1..111111111111111111111111111111111111111111111111111111111111 

oci iam policy delete \
--policy-id ocid1.policy.oc1..111111111111111111111111111111111111111111111111111111111111 \
--force 


-- 動的グループ削除

oci iam dynamic-group list 

oci iam dynamic-group delete \
--dynamic-group-id ocid1.dynamicgroup.oc1..111111111111111111111111111111111111111111111111111111111111 \
--force 

 


-- DRGアタッチメント(RPC側)削除

oci network remote-peering-connection list \
--compartment-id ocid1.compartment.oc1..111111111111111111111111111111111111111111111111111111111111 \
--region us-ashburn-1 


oci network remote-peering-connection delete \
--region us-ashburn-1 \
--remote-peering-connection-id ocid1.remotepeeringconnection.oc1.iad.111111111111111111111111111111111111111111111111111111111111 \
--force

oci network remote-peering-connection list \
--compartment-id ocid1.compartment.oc1..111111111111111111111111111111111111111111111111111111111111 \
--region ap-tokyo-1 


oci network remote-peering-connection delete \
--region ap-tokyo-1 \
--remote-peering-connection-id ocid1.remotepeeringconnection.oc1.ap-tokyo-1.111111111111111111111111111111111111111111111111111111111111 \
--force

 


-- DRGアタッチメント(VCN側)削除

oci network drg-attachment list \
--compartment-id ocid1.compartment.oc1..111111111111111111111111111111111111111111111111111111111111 \
--region us-ashburn-1 


oci network drg-attachment delete \
--region us-ashburn-1 \
--drg-attachment-id ocid1.drgattachment.oc1.iad.111111111111111111111111111111111111111111111111111111111111 \
--force 


oci network drg-attachment list \
--compartment-id ocid1.compartment.oc1..111111111111111111111111111111111111111111111111111111111111 \
--region ap-tokyo-1 

 

oci network drg-attachment delete \
--region ap-tokyo-1 \
--drg-attachment-id ocid1.drgattachment.oc1.ap-tokyo-1.111111111111111111111111111111111111111111111111111111111111 \
--force 

 

-- DRG削除

oci network drg list \
--compartment-id ocid1.compartment.oc1..111111111111111111111111111111111111111111111111111111111111 \
--region us-ashburn-1 

oci network drg delete \
--region us-ashburn-1 \
--drg-id ocid1.drg.oc1.iad.111111111111111111111111111111111111111111111111111111111111 \
--force 

oci network drg list \
--compartment-id ocid1.compartment.oc1..111111111111111111111111111111111111111111111111111111111111 \
--region ap-tokyo-1 


oci network drg delete \
--region ap-tokyo-1 \
--drg-id ocid1.drg.oc1.ap-tokyo-1.111111111111111111111111111111111111111111111111111111111111 \
--force 

 

 


cd tokyo
terraform destroy -auto-approve

 

cd ashburn

terraform destroy -auto-approve

 

 

 

 

 

 

OCI CLIでプロキシ使用

https://qiita.com/dingtianhongjie/items/485c8abf7b3af9529f69

https://qiita.com/shirok/items/dc3908c85b6bd448fed9

 

前提: 
subnet01,vm01,sl01,rt01,パブリックサブネット
subnet02,vm02,sl02,rt02,プライベートサブネット,サービス・ゲートウェイのみ

subnet01~subnet02間の全通信許可

vm01にsquidをインストールし、vm02からプロキシ経由でociコマンド実行

 

-- 1. VCN作成

mkdir ashburn
cd ashburn


cat <<-'EOF' > variables.tf

locals {
  tenancy_ocid = "ocid1.tenancy.oc1..111111111111111111111111111111111111111111111111111111111111"
# MYIP
  myip = "192.0.2.1/32"

}

variable "compartment_name" {
  description = "compartment_name"
  type = string
  default = "cmp01"
}

EOF

 

cat <<-'EOF' > main.tf

terraform {
  required_version = ">= 1.0.0, < 2.0.0"
  required_providers {
    oci = {
       source  = "hashicorp/oci"
       version = "= 5.23.0"
    }
  }
}

provider "oci" {
  tenancy_ocid = local.tenancy_ocid
  user_ocid = "ocid1.user.oc1..111111111111111111111111111111111111111111111111111111111111" 
  private_key_path = "~/.oci/oci_api_key.pem"
  fingerprint = "45:ed:22:e6:cc:fd:63:97:12:9d:62:7a:90:12:65:7a"
  region = "us-ashburn-1"
}


resource "oci_identity_compartment" "cmp01" {
    # Required
    compartment_id = local.tenancy_ocid
    description = var.compartment_name
    name = var.compartment_name
    
    enable_delete = true
}

resource "oci_core_vcn" "vcn01" {
    #Required
    compartment_id = oci_identity_compartment.cmp01.id

    #Optional
    cidr_block = "10.0.0.0/16"
    display_name = "vcn01"
    dns_label = "vcn01"

}


resource "oci_core_internet_gateway" "igw01" {
    #Required
    compartment_id = oci_identity_compartment.cmp01.id
    vcn_id = oci_core_vcn.vcn01.id

    #Optional
    enabled = true
    display_name = "igw01"
}


data "oci_core_services" "svc01" {
  filter {
    name   = "name"
    values = ["All .* Services In Oracle Services Network"]
    regex  = true
  }
}


resource "oci_core_service_gateway" "sgw01" {
    #Required
    compartment_id = oci_identity_compartment.cmp01.id
    services {
        #Required
        service_id = data.oci_core_services.svc01.services.0.id
    }
    vcn_id = oci_core_vcn.vcn01.id

    #Optional
    display_name = "sgw01"
}

 

resource "oci_core_route_table" "rt01" {
    #Required
    compartment_id = oci_identity_compartment.cmp01.id
    vcn_id = oci_core_vcn.vcn01.id

    #Optional
    display_name = "rt01"
    route_rules {
        #Required
        network_entity_id = oci_core_internet_gateway.igw01.id
        #Optional
        destination = "0.0.0.0/0"
    }
    
}


resource "oci_core_route_table" "rt02" {
    #Required
    compartment_id = oci_identity_compartment.cmp01.id
    vcn_id = oci_core_vcn.vcn01.id

    #Optional
    display_name = "rt02"
#    route_rules {
#        #Required
#        network_entity_id = oci_core_service_gateway.sgw01.id
#        #Optional
#        destination = "all-iad-services-in-oracle-services-network"
#        destination_type = "SERVICE_CIDR_BLOCK"
#    }
    
}

resource "oci_core_security_list" "sl01" {
    #Required
    compartment_id = oci_identity_compartment.cmp01.id
    vcn_id = oci_core_vcn.vcn01.id

    #Optional
    display_name = "sl01"
    
    egress_security_rules {
        destination = "0.0.0.0/0"
        protocol = "all"
        stateless = false
    }
    
    ingress_security_rules {
        protocol = "6"
        source = local.myip
        stateless = false
        tcp_options {
            max = 22
            min = 22
        }
    }
    ingress_security_rules {
        protocol = "all"
        source = "10.0.2.0/24"
        stateless = false
    }
}


resource "oci_core_security_list" "sl02" {
    #Required
    compartment_id = oci_identity_compartment.cmp01.id
    vcn_id = oci_core_vcn.vcn01.id

    #Optional
    display_name = "sl02"
    
    egress_security_rules {
        destination = "0.0.0.0/0"
        protocol = "all"
        stateless = false
    }
    

    ingress_security_rules {
        protocol = "all"
        source = "10.0.1.0/24"
        stateless = false
    }
}

 

resource "oci_core_subnet" "subnet01" {
    #Required
    cidr_block = "10.0.1.0/24"
    compartment_id = oci_identity_compartment.cmp01.id
    vcn_id = oci_core_vcn.vcn01.id

    #Optional

    display_name = "subnet01"
    dns_label = "subnet01"
    route_table_id = oci_core_route_table.rt01.id
    security_list_ids = [oci_core_security_list.sl01.id]
}

resource "oci_core_subnet" "subnet02" {
    #Required
    cidr_block = "10.0.2.0/24"
    compartment_id = oci_identity_compartment.cmp01.id
    vcn_id = oci_core_vcn.vcn01.id

    #Optional

    display_name = "subnet02"
    dns_label = "subnet02"
    route_table_id = oci_core_route_table.rt02.id
    security_list_ids = [oci_core_security_list.sl02.id]
}

 


EOF

 

 

cat <<-'EOF' > instance.tf


resource "oci_core_instance" "vm01" {
    #Required
    availability_domain = "OEIw:US-ASHBURN-AD-1"
    compartment_id = oci_identity_compartment.cmp01.id
    shape = "VM.Standard.E2.1"
    
    #Optional

    create_vnic_details {
        #Optional
        assign_public_ip = true
        subnet_id = oci_core_subnet.subnet01.id
    }

    display_name = "vm01"

    metadata = {
        ssh_authorized_keys = file("~/.ssh/id_rsa.pub")
    } 

    source_details {
        #Required
        source_id = "ocid1.image.oc1.iad.111111111111111111111111111111111111111111111111111111111111"
        source_type = "image"

        #Optional
        boot_volume_size_in_gbs = 50
    }
    preserve_boot_volume = false
}

resource "oci_core_instance" "vm02" {
    #Required
    availability_domain = "OEIw:US-ASHBURN-AD-1"
    compartment_id = oci_identity_compartment.cmp01.id
    shape = "VM.Standard.E2.1"
    
    #Optional

    create_vnic_details {
        #Optional
        assign_public_ip = false
        subnet_id = oci_core_subnet.subnet02.id
    }

    display_name = "vm02"

    metadata = {
        ssh_authorized_keys = file("~/.ssh/id_rsa.pub")
    } 

    source_details {
        #Required
        source_id = "ocid1.image.oc1.iad.111111111111111111111111111111111111111111111111111111111111"
        source_type = "image"

        #Optional
        boot_volume_size_in_gbs = 50
    }
    preserve_boot_volume = false
}

EOF


cat <<-'EOF' > bucket.tf


data "oci_objectstorage_namespace" "ns01" {
    compartment_id = local.tenancy_ocid
}

resource "oci_objectstorage_bucket" "bucket01" {
    #Required
    compartment_id = oci_identity_compartment.cmp01.id
    name = "bucket01"
    namespace = data.oci_objectstorage_namespace.ns01.namespace

    #Optional
    access_type = "NoPublicAccess"
    auto_tiering = "Disabled"
    object_events_enabled = false
    storage_tier = "Standard"
    versioning = "Disabled"
    
}


EOF

 

cat <<-'EOF' > outputs.tf

output "cmp01_id" {
  value = oci_identity_compartment.cmp01.id
  description = "cmp01.id"
}

output "vcn01_id" {
  value = oci_core_vcn.vcn01.id
  description = "vcn01.id"
}

output "igw01_id" {
  value = oci_core_internet_gateway.igw01.id
  description = "igw01.id"
}

output "sgw01_id" {
  value = oci_core_service_gateway.sgw01.id
  description = "sgw01.id"
}

output "rt01_id" {
  value = oci_core_route_table.rt01.id
  description = "rt01.id"
}
output "rt02_id" {
  value = oci_core_route_table.rt02.id
  description = "rt02.id"
}


output "sl01_id" {
  value = oci_core_security_list.sl01.id
  description = "sl01.id"
}

output "sl02_id" {
  value = oci_core_security_list.sl02.id
  description = "sl02.id"
}

output "subnet01_id" {
  value = oci_core_subnet.subnet01.id
  description = "subnet01.id"
}
output "subnet02_id" {
  value = oci_core_subnet.subnet02.id
  description = "subnet02.id"
}

output "vm01_id" {
  value = oci_core_instance.vm01.id
  description = "vm01.id"
}

output "vm02_id" {
  value = oci_core_instance.vm02.id
  description = "vm02.id"
}


output "vm01_public_ip" {
  value = oci_core_instance.vm01.public_ip
  description = "vm01.public_ip"
}

output "vm01_private_ip" {
  value = oci_core_instance.vm01.private_ip
  description = "vm01.private_ip"
}

output "vm02_private_ip" {
  value = oci_core_instance.vm02.private_ip
  description = "vm02.private_ip"
}

 

EOF

 


terraform init
terraform fmt
terraform -version

terraform plan

terraform apply -auto-approve

 

 

-- 2. 動的グループ作成

 

oci iam dynamic-group list 


oci iam dynamic-group create \
--description dg01 \
--matching-rule "Any {instance.compartment.id = 'ocid1.compartment.oc1..111111111111111111111111111111111111111111111111111111111111'}" \
--name dg01 


-- 3. 動的グループポリシー作成


oci iam policy list \
--compartment-id ocid1.tenancy.oc1..111111111111111111111111111111111111111111111111111111111111 

oci iam policy create \
--compartment-id ocid1.tenancy.oc1..111111111111111111111111111111111111111111111111111111111111 \
--description policy11 \
--name policy11 \
--statements '[
"Allow dynamic-group dg01 to manage all-resources in tenancy"
]' 

 

-- 4. OCIインストール

-- 4.1 vm01からvm02にログイン

scp -i $HOME/.ssh/id_rsa $HOME/.ssh/id_rsa opc@192.0.2.2:/home/opc
scp -i $HOME/.ssh/id_rsa  oci-cli-3.39.0-Oracle-Linux-9-Offline.zip opc@192.0.2.2:/home/opc

ssh -i $HOME/.ssh/id_rsa opc@192.0.2.2

scp -i $HOME/id_rsa oci-cli-3.39.0-Oracle-Linux-9-Offline.zip opc@10.0.2.166:/home/opc

ssh -i $HOME/id_rsa opc@10.0.2.166


-- 4.2 vm02からインターネットアクセスできないことを確認

curl --connect-timeout 10 https://www.oracle.com/

-- 4.3 OCIインストール (vm02での作業)

unzip oci-cli-3.39.0-Oracle-Linux-9-Offline.zip

cd oci-cli-installation

bash install.sh --offline-install
exec -l $SHELL
oci -v

 


-- 5. squidインストール (vm01での作業)

sudo su -
dnf install squid

cp /etc/squid/squid.conf /etc/squid/squid.conf.org

cat <<-'EOF' > /etc/squid/squid.conf

# should be allowed
acl localnet src 10.0.0.0/16

# Squid normally listens to port 3128
http_port 3128

# Wite List
acl whitelist dstdomain "/etc/squid/whitelist"
http_access allow whitelist

EOF

cat <<-'EOF' > /etc/squid/whitelist

objectstorage.us-ashburn-1.oraclecloud.com
auth.us-ashburn-1.oraclecloud.com

EOF

systemctl restart squid
systemctl status squid
systemctl enable squid

systemctl stop firewalld
systemctl status firewalld
systemctl disable firewalld

tail -f /var/log/squid/access.log

-- 6. 動作確認 (vm02での作業)
export OCI_CLI_AUTH=instance_principal

インスタンスプリンシパル反映まで時間がかかる場合あり


oci os object list \
--bucket-name bucket01 \
--region us-ashburn-1 


export https_proxy="http://10.0.1.237:3128"


oci os object list \
--bucket-name bucket01 \
--region us-ashburn-1 

 

 

-- 7. クリーンアップ

oci iam policy list \
--compartment-id ocid1.tenancy.oc1..111111111111111111111111111111111111111111111111111111111111 

oci iam policy delete \
--policy-id ocid1.policy.oc1..111111111111111111111111111111111111111111111111111111111111 \
--force 

oci iam dynamic-group list 

oci iam dynamic-group delete \
--dynamic-group-id ocid1.dynamicgroup.oc1..111111111111111111111111111111111111111111111111111111111111 \
--force 

 


terraform destroy -auto-approve

 

 

 

OEM Management Repository Views

Enterprise Manager Cloud Control Management Repository Views Reference
https://docs.oracle.com/cd/cloud-control-13.3/EMVWS/toc.htm

How To Query Target Status From The OEM Repository DB ? (Doc ID 2669538.1)

 


SET MARKUP CSV ON


-- 2 Blackout Views

select * from sysman.MGMT$BLACKOUT_HISTORY;
select * from sysman.MGMT$BLACKOUTS;


-- 4 Compliance Views

select * from sysman.MGMT$COMPLIANCE_STANDARD_RULE;
select * from sysman.MGMT$COMPLIANCE_STANDARD;
select * from sysman.MGMT$COMPLIANCE_STANDARD_GROUP;
select * from sysman.MGMT$CS_EVAL_SUMMARY;
select * from sysman.MGMT$COMPOSITE_CS_EVAL_SUMMARY;
select * from sysman.MGMT$CS_RULE_EVAL_SUMMARY;
select * from sysman.MGMT$CS_GROUP_EVAL_SUMMARY;
select * from sysman.MGMT$CS_TARGET_ASSOC;
select * from sysman.MGMT$CSR_CURRENT_VIOLATION;
select * from sysman.MGMT$CSR_VIOLATION_CONTEXT;
select * from sysman.MGMT$EM_RULE_VIOL_CTXT_DEF;
select * from sysman.MGMT$RULE_KEYWORD;
select * from sysman.MGMT$CS_KEYWORD;
select * from sysman.MGMT$CS_GROUP_KEYWORD;
select * from sysman.MGMT$CS_RULE_ATTRS;
select * from sysman.MGMT$CS_HIERARCHY;
select * from sysman.MGMT$CS_RQS_HIERARCHY;
select * from sysman.MGMT$CS_RULEFOLDER;
select * from sysman.MGMT$CSG_HIERARCHY;
select * from sysman.MGMT$CSG_SUBGROUP;
select * from sysman.MGMT$CSR_TARGET_ASSOC;
select * from sysman.MGMT$CSRF_TARGET_ASSOC;
select * from sysman.MGMT$REPO_RULE_CHK_DETAILS;
select * from sysman.MGMT$REPOSITORY_RULE_BIND_VARS;
select * from sysman.MGMT$REPOSITORY_RULE_PARAMS;
select * from sysman.MGMT$CS_TGT_ASSOC_TXF_REQ;
select * from sysman.MGMT$EM_CS_RULE_EVENT_ERROR;
select * from sysman.MGMT$CCC_ALL_OBS_BUNDLES;
select * from sysman.MGMT$CCC_ALL_OBSERVATIONS;
select * from sysman.MGMT$CCC_ALL_VIOLATIONS;
select * from sysman.MGMT$CCC_ALL_AGENT_WARNINGS;
select * from sysman.MGMT$CCC_ALL_WATCHDOG_ALERTS;
select * from sysman.MGMT$COMPLIANT_TARGETS;
select * from sysman.MGMT$COMPLIANCE_SUMMARY;
select * from sysman.MGMT$COMPLIANCE_TREND;
select * from sysman.MGMT$CCC_DIAG_ANALYTICS;
select * from sysman.MGMT$CCC_DIAG_QUEUEBACKLOG;

-- 5 Configuration Management Views

-- 5.1 Custom Configuration Specification Views

select * from sysman.MGMT$CCS_DATA;
select * from sysman.MGMT$CCS_DATA_SOURCE;
select * from sysman.MGMT$CCS_DATA_VISIBLE;


-- 5.2  Database Configuration Views

select * from sysman.MGMT$DB_TABLESPACES;
select * from sysman.MGMT$DB_DATAFILES;
select * from sysman.MGMT$DB_CONTROLFILES;
select * from sysman.MGMT$DB_DBNINSTANCEINFO;
select * from sysman.MGMT$DB_FEATUREUSAGE;
select * from sysman.MGMT$DB_INIT_PARAMS;
select * from sysman.MGMT$DB_LICENSE;
select * from sysman.MGMT$DB_REDOLOGS;
select * from sysman.MGMT$DB_ROLLBACK_SEGS;
select * from sysman.MGMT$DB_SGA;
select * from sysman.MGMT$DB_TABLESPACES_ALL;
select * from sysman.MGMT$DB_OPTIONS;


-- 5.3  Enterprise Configuration Management Views

select * from sysman.MGMT$ECM_CMP_JOBS;
select * from sysman.MGMT$ECM_CMP_JOB_LAST_RESULTS;
select * from sysman.MGMT$ECM_CMP_RPT_CCS_DS;
select * from sysman.MGMT$ECM_CMP_RPT_CCS_DS_DTLS;
select * from sysman.MGMT$ECM_CMP_RPT_CCS_PD_ALL;
select * from sysman.MGMT$ECM_CMP_RPT_CCS_PD_DIFFS;
select * from sysman.MGMT$ECM_CMP_RPT_CI_DIFFS;
select * from sysman.MGMT$ECM_CMP_VISIBLE_CONFIGS;
select * from sysman.MGMT$ECM_CURRENT_SNAPSHOTS;
select * from sysman.MGMT$ECM_VISIBLE_SNAPSHOTS;

 

-- 6 Events Views

select * from sysman.MGMT$INCIDENTS;
select * from sysman.MGMT$INCIDENT_CATEGORY;
select * from sysman.MGMT$INCIDENT_TARGET;
select * from sysman.MGMT$INCIDENT_ANNOTATION;
select * from sysman.MGMT$EVENTS_LATEST;
select * from sysman.MGMT$EVENTS;
select * from sysman.MGMT$EVENT_ANNOTATION;
select * from sysman.MGMT$PROBLEMS;
select * from sysman.MGMT$PROBLEM_ANNOTATION;

-- 7 Hardware Views

-- 7.1  Hardware Views

select * from sysman.MGMT$HW_CPU_DETAILS;
select * from sysman.MGMT$HW_NIC;
select * from sysman.MGMT$HW_NIC_BONDS;
select * from sysman.MGMT$HW_IO_DEVICES;
select * from sysman.MGMT$EM_ECM_HOST_VIRTUAL;
select * from sysman.MGMT$HW_HOSTS_FILE;


-- 7.2 Service Tag Views

select * from sysman.MGMT$SERVICETAG_INSTANCES;
select * from sysman.MGMT$SERVICETAG_REGISTRY;

 

-- 8 Inventory Views

-- 8.1 Inventory Views

select * from sysman.MGMT$TARGET;
select * from sysman.MGMT$TARGET_TYPE;
select * from sysman.MGMT$TARGET_TYPE_DEF;
select * from sysman.MGMT$TARGET_ASSOCIATIONS;
select * from sysman.MGMT$TARGET_MEMBERS;
select * from sysman.MGMT$TARGET_FLAT_MEMBERS;
select * from sysman.MGMT$TARGET_TYPE_PROPERTIES;
select * from sysman.MGMT$TARGET_PROPERTIES;


-- 8.2 Oracle Home Directory Patching Views

select * from sysman.MGMT$EM_HOMES_PLATFORM;
select * from sysman.MGMT$APPL_PATCH_AND_PATCHSET;
select * from sysman.MGMT$HOMES_AFFECTED;
select * from sysman.MGMT$APPLIED_PATCHES;
select * from sysman.MGMT$APPLIED_PATCHSETS;


-- 8.3 Oracle Home Directory Views

select * from sysman.MGMT$OH_HOME_INFO;
select * from sysman.MGMT$OH_DEP_HOMES;
select * from sysman.MGMT$OH_CRS_NODES;
select * from sysman.MGMT$OH_CLONE_PROPERTIES;
select * from sysman.MGMT$OH_COMPONENT;
select * from sysman.MGMT$OH_COMP_INST_TYPE;
select * from sysman.MGMT$OH_COMP_DEP_RULE;
select * from sysman.MGMT$OH_PATCHSET;
select * from sysman.MGMT$OH_VERSIONED_PATCH;
select * from sysman.MGMT$OH_PATCH;
select * from sysman.MGMT$OH_PATCHED_COMPONENT;
select * from sysman.MGMT$OH_PATCH_FIXED_BUG;
select * from sysman.MGMT$OH_PATCHED_FILE;
select * from sysman.MGMT$OH_FILE;
select * from sysman.MGMT$PA_RECOM_METRIC_SOURCE;
select * from sysman.MGMT$OH_INV_SUMMARY;
select * from sysman.MGMT$OH_INSTALLED_TARGETS;

 

-- 9 Job Views

select * from sysman.MGMT$CA_TARGETS;
select * from sysman.MGMT$CA_EXECUTIONS;
select * from sysman.MGMT$JOBS;
select * from sysman.MGMT$JOB_TARGETS;
select * from sysman.MGMT$JOB_EXECUTION_HISTORY;
select * from sysman.MGMT$JOB_STEP_HISTORY;
select * from sysman.MGMT$JOB_ANNOTATIONS;
select * from sysman.MGMT$JOB_NOTIFICATION_LOG;

-- 10 Linux Patching Views

select * from sysman.MGMT$HOSTPATCH_HOSTS;
select * from sysman.MGMT$HOSTPATCH_GROUPS;
select * from sysman.MGMT$HOSTPATCH_GRP_COMPL_HIST;
select * from sysman.MGMT$HOSTPATCH_HOST_COMPL;

-- 11 Management Template Views

select * from sysman.MGMT$TEMPLATES;
select * from sysman.MGMT$TEMPLATE_POLICY_SETTINGS;
select * from sysman.MGMT$TEMPLATE_METRICCOLLECTION;
select * from sysman.MGMT$TEMPLATE_METRIC_SETTINGS;

-- 12 Metric Views

select * from sysman.MGMT$METRIC_CATEGORIES;
select * from sysman.MGMT$METRIC_COLLECTION;
select * from sysman.MGMT$METRIC_ERROR_CURRENT;
select * from sysman.MGMT$METRIC_ERROR_HISTORY;

-- 13 Middleware Management Views

-- 13.1 Application Deployment Views

select * from sysman.MGMT$J2EE_APPLICATION;
select * from sysman.MGMT$J2EEAPP_EJBCOMPONENT;
select * from sysman.MGMT$J2EEAPP_JRFWS;
select * from sysman.MGMT$J2EEAPP_JRFWSOPER;
select * from sysman.MGMT$J2EEAPP_JRFWSPOLICY;
select * from sysman.MGMT$J2EEAPP_JRFWSPORT;
select * from sysman.MGMT$J2EEAPP_WEBAPPCOMPONENT;
select * from sysman.MGMT$J2EEAPP_WSCONFIG;
select * from sysman.MGMT$J2EEAPP_WSPORTCONFIG;


-- 13.2 Glassfish Views

select * from sysman.MGMT$EMAS_GLASSFISH_DOMAIN;
select * from sysman.MGMT$EMAS_GLASSFISH_NODES;
select * from sysman.MGMT$EMAS_GLASSFISH_SERVER;
select * from sysman.MGMT$EMAS_GLASSFISH_SVR_PROP;
select * from sysman.MGMT$EMAS_GLASSFISH_NW_LSTNR;
select * from sysman.MGMT$EMAS_GLASSFISH_DATASOURCE;
select * from sysman.MGMT$EMAS_GLASSFISH_DS_PROP;


-- 13.3 Oracle WebLogic Server Views

select * from sysman.MGMT$WEBLOGIC_APPLICATIONS;
select * from sysman.MGMT$WEBLOGIC_EJBCOMPONENT;
select * from sysman.MGMT$WEBLOGIC_FILESTORE;
select * from sysman.MGMT$WEBLOGIC_JDBCDATASOURCE;
select * from sysman.MGMT$WEBLOGIC_JDBCMULTIDS;
select * from sysman.MGMT$WEBLOGIC_JMSCONNFACTORY;
select * from sysman.MGMT$WEBLOGIC_JMSQUEUE;
select * from sysman.MGMT$WEBLOGIC_JMSSERVER;
select * from sysman.MGMT$WEBLOGIC_JMSTOPIC;
select * from sysman.MGMT$WEBLOGIC_JOLTCONNPOOL;
select * from sysman.MGMT$WEBLOGIC_JVMSYSPROPS;
select * from sysman.MGMT$WEBLOGIC_MACHINE;
select * from sysman.MGMT$WEBLOGIC_NETWORK_CHANNELS;
select * from sysman.MGMT$WEBLOGIC_NODEMANAGER;
select * from sysman.MGMT$WEBLOGIC_RACONFIG;
select * from sysman.MGMT$WEBLOGIC_RAOUTBOUNDCONFIG;
select * from sysman.MGMT$WEBLOGIC_RESOURCECONFIG;
select * from sysman.MGMT$WEBLOGIC_SERVER;
select * from sysman.MGMT$WEBLOGIC_STARTSHUTCLASSES;
select * from sysman.MGMT$WEBLOGIC_VIRTUALHOST;
select * from sysman.MGMT$WEBLOGIC_WEBAPPCOMPONENT;
select * from sysman.MGMT$WEBLOGIC_WORKMANAGER;
select * from sysman.MGMT$WEBLOGIC_WSCONFIG;
select * from sysman.MGMT$WEBLOGIC_WSPORTCONFIG;


-- 13.4 Oracle WebLogic Domain Views

select * from sysman.MGMT$WEBLOGIC_DOMAIN;
select * from sysman.MGMT$WEBLOGIC_OPSSSYSPROP;
select * from sysman.MGMT$WEBLOGIC_OAMCONFIG;


-- 13.5 Oracle WebLogic Cluster View

select * from sysman.MGMT$WEBLOGIC_CLUSTER;

 

-- 14 Monitoring Views

select * from sysman.MGMT$ALERT_CURRENT;
select * from sysman.MGMT$TARGET_METRIC_COLLECTIONS;
select * from sysman.MGMT$TARGET_METRIC_SETTINGS;
select * from sysman.MGMT$AVAILABILITY_CURRENT;
select * from sysman.MGMT$AVAILABILITY_HISTORY;
select * from sysman.MGMT$ALERT_HISTORY;
select * from sysman.MGMT$AVAIL_ALERT_HISTORY;
select * from sysman.MGMT$METRIC_DETAILS;
select * from sysman.MGMT$METRIC_CURRENT;
select * from sysman.MGMT$METRIC_HOURLY;
select * from sysman.MGMT$METRIC_DAILY;

-- 15 Operating System Views

select * from sysman.MGMT$OS_SUMMARY;
select * from sysman.MGMT$OS_COMPONENTS;
select * from sysman.MGMT$OS_HW_SUMMARY;
select * from sysman.MGMT$OS_PATCH_SUMMARY;
select * from sysman.MGMT$OS_FS_MOUNT;
select * from sysman.MGMT$OS_KERNEL_PARAMS;
select * from sysman.MGMT$OS_PATCHES;
select * from sysman.MGMT$OS_PROPERTIES;
select * from sysman.MGMT$OS_MODULES;
select * from sysman.MGMT$OS_LIMITS;
select * from sysman.MGMT$OS_INIT_SERVICES;

-- 16 Security Views

select * from sysman.MGMT$ESA_ALL_PRIVS_REPORT;
select * from sysman.MGMT$ESA_ANY_DICT_REPORT;
select * from sysman.MGMT$ESA_ANY_PRIV_REPORT;
select * from sysman.MGMT$ESA_AUDIT_SYSTEM_REPORT;
select * from sysman.MGMT$ESA_BECOME_USER_REPORT;
select * from sysman.MGMT$ESA_CATALOG_REPORT;
select * from sysman.MGMT$ESA_CONN_PRIV_REPORT;
select * from sysman.MGMT$ESA_CREATE_PRIV_REPORT;
select * from sysman.MGMT$ESA_DBA_GROUP_REPORT;
select * from sysman.MGMT$ESA_DBA_ROLE_REPORT;
select * from sysman.MGMT$ESA_DIRECT_PRIV_REPORT;
select * from sysman.MGMT$ESA_EXMPT_ACCESS_REPORT;
select * from sysman.MGMT$ESA_KEY_OBJECTS_REPORT;
select * from sysman.MGMT$ESA_OH_OWNERSHIP_REPORT;
select * from sysman.MGMT$ESA_OH_PERMISSION_REPORT;
select * from sysman.MGMT$ESA_POWER_PRIV_REPORT;
select * from sysman.MGMT$ESA_PUB_PRIV_REPORT;
select * from sysman.MGMT$ESA_SYS_PUB_PKG_REPORT;
select * from sysman.MGMT$ESA_TABSP_OWNERS_REPORT;
select * from sysman.MGMT$ESA_TRC_AUD_PERM_REPORT;
select * from sysman.MGMT$ESA_WITH_ADMIN_REPORT;
select * from sysman.MGMT$ESA_WITH_GRANT_REPORT;
select * from sysman.MGMT$ESM_COLLECTION_LATEST;
select * from sysman.MGMT$ESM_FILE_SYSTEM_LATEST;
select * from sysman.MGMT$ESM_PORTS_LATEST;
select * from sysman.MGMT$ESM_SERVICE_LATEST;
select * from sysman.MGMT$ESM_STACK_LATEST;

-- 17 Storage Reporting Views

select * from sysman.MGMT$STORAGE_REPORT_DATA;
select * from sysman.MGMT$STORAGE_REPORT_KEYS;
select * from sysman.MGMT$STORAGE_REPORT_PATHS;
select * from sysman.MGMT$STORAGE_REPORT_ISSUES;
select * from sysman.MGMT$STORAGE_REPORT_DISK;
select * from sysman.MGMT$STORAGE_REPORT_VOLUME;
select * from sysman.MGMT$STORAGE_REPORT_LOCALFS;
select * from sysman.MGMT$STORAGE_REPORT_NFS;

-- 18 Target Views

select * from sysman.MGMT$AGENTS_MONITORING_TARGETS;
select * from sysman.MGMT$EM_ECM_MOS_PROPERTIES;
select * from sysman.MGMT$EM_ECM_TARGET_FRESHNESS;
select * from sysman.MGMT$MANAGEABLE_ENTITIES;