ansible – add-hoc commands

#ansible-playbook -l host_subset playbook.yml

 

Make changes to just one server

ovi@work:~$ ansible open  -a “free -m” -k
SSH password:
n1 | success | rc=0 >>
total       used       free     shared    buffers     cached
Mem:          7942       6484       1457         10        235       4377
-/+ buffers/cache:       1871       6070
Swap:         4061          0       4061

work | success | rc=0 >>
total       used       free     shared    buffers     cached
Mem:         16004      15845        159          0         33      14142
-/+ buffers/cache:       1668      14335
Swap:         8147        691       7456

use limit to make changes to just one server

ovi@work:~$ ansible open  -a “free -m” -k –limit n1
SSH password:
n1 | success | rc=0 >>
total       used       free     shared    buffers     cached
Mem:          7942       6484       1457         10        235       4377
-/+ buffers/cache:       1871       6070
Swap:         4061          0       4061

create a directory

ovi@work:~$ ansible open -m file -a “dest=/tmp/test mode=644 state=directory” -k
SSH password:
work | success >> {
“changed”: true,
“gid”: 1001,
“group”: “asix”,
“mode”: “0644”,
“owner”: “asix”,
“path”: “/tmp/test”,
“size”: 4096,
“state”: “directory”,
“uid”: 1001
}

n1 | success >> {
“changed”: true,
“gid”: 1001,
“group”: “asix”,
“mode”: “0644”,
“owner”: “asix”,
“path”: “/tmp/test”,
“size”: 4096,
“state”: “directory”,
“uid”: 1001
}

 

ovi@work:~$ ansible open -m stat -a “path=/etc/hosts” -k
SSH password:
n1 | success >> {
“changed”: false,
“stat”: {
“atime”: 1470711574.6343062,
“ctime”: 1469933973.2155738,
“dev”: 2049,
“exists”: true,
“gid”: 0,
“inode”: 62128485,
“isblk”: false,
“ischr”: false,
“isdir”: false,
“isfifo”: false,
“isgid”: false,
“islnk”: false,
“isreg”: true,
“issock”: false,
“isuid”: false,
“md5”: “8a22e2c2a4eb70dabb08c3527c5f8dfb”,
“mode”: “0644”,
“mtime”: 1469933973.2155738,
“nlink”: 1,
“pw_name”: “root”,
“rgrp”: true,
“roth”: true,
“rusr”: true,
“size”: 245,
“uid”: 0,
“wgrp”: false,
“woth”: false,
“wusr”: true,
“xgrp”: false,
“xoth”: false,
“xusr”: false
}
}

work | success >> {
“changed”: false,
“stat”: {
“atime”: 1470730625.26867,
“ctime”: 1467791669.1463304,
“dev”: 2049,
“exists”: true,
“gid”: 0,
“inode”: 45875357,
“isblk”: false,
“ischr”: false,
“isdir”: false,
“isfifo”: false,
“isgid”: false,
“islnk”: false,
“isreg”: true,
“issock”: false,
“isuid”: false,
“md5”: “f4abed992d2152fbb99e6c5a3bc4343d”,
“mode”: “0644”,
“mtime”: 1467791669.1463304,
“nlink”: 1,
“pw_name”: “root”,
“rgrp”: true,
“roth”: true,
“rusr”: true,
“size”: 238,
“uid”: 0,
“wgrp”: false,
“woth”: false,
“wusr”: true,
“xgrp”: false,
“xoth”: false,
“xusr”: false
}
}

[root@ip-172-..-126 ~]# ansible localhost -m setup | grep distribution

“ansible_distribution”: “Amazon”,
“ansible_distribution_major_version”: “NA”,
“ansible_distribution_release”: “NA”,
“ansible_distribution_version”: “2017.03”,

[root@ip-172-..-126 ~]# ansible localhost -m setup -a ‘filter=ansible_dist*’

localhost | SUCCESS => {
“ansible_facts”: {
“ansible_distribution”: “Amazon”,
“ansible_distribution_major_version”: “NA”,
“ansible_distribution_release”: “NA”,
“ansible_distribution_version”: “2017.03”
},
“changed”: false
}

OpenStack – Neutron debug

root@osc:/# nova list –all_tenants | grep ov1
| 3b49799f-4149-4f17-8f04-55e0a683066a | ov1        | ACTIVE | –          | Running     | net_ext2=192.168.122.6  |

root@n1:/var/lib/nova/instances/3b49799f-4149-4f17-8f04-55e0a683066a# grep -i tap libvirt.xml
<target dev=”tapdc783dde-d2“/>

 

root@n1:/var/lib/nova/instances/3b49799f-4149-4f17-8f04-55e0a683066a# iptables -S | grep dc783dde
-N neutron-openvswi-idc783dde-d
-N neutron-openvswi-odc783dde-d
-N neutron-openvswi-sdc783dde-d
-A neutron-openvswi-FORWARD -m physdev –physdev-out tapdc783dde-d2 –physdev-is-bridged -j neutron-openvswi-sg-chain
-A neutron-openvswi-FORWARD -m physdev –physdev-in tapdc783dde-d2 –physdev-is-bridged -j neutron-openvswi-sg-chain
-A neutron-openvswi-INPUT -m physdev –physdev-in tapdc783dde-d2 –physdev-is-bridged -j neutron-openvswi-odc783dde-d
-A neutron-openvswi-idc783dde-d -m state –state RELATED,ESTABLISHED -j RETURN
-A neutron-openvswi-idc783dde-d -s 192.168.122.3/32 -p udp -m udp –sport 67 –dport 68 -j RETURN
-A neutron-openvswi-idc783dde-d -p tcp -m tcp –dport 22 -j RETURN
-A neutron-openvswi-idc783dde-d -p icmp -j RETURN
-A neutron-openvswi-idc783dde-d -m set –match-set NETIPv44ccfe044-b6e2-423 src -j RETURN
-A neutron-openvswi-idc783dde-d -m state –state INVALID -j DROP
-A neutron-openvswi-idc783dde-d -j neutron-openvswi-sg-fallback
A neutron-openvswi-odc783dde-d -p udp -m udp –sport 68 –dport 67 -j RETURN
-A neutron-openvswi-odc783dde-d -j neutron-openvswi-sdc783dde-d
-A neutron-openvswi-odc783dde-d -p udp -m udp –sport 67 –dport 68 -j DROP
-A neutron-openvswi-odc783dde-d -m state –state RELATED,ESTABLISHED -j RETURN
-A neutron-openvswi-odc783dde-d -j RETURN
-A neutron-openvswi-odc783dde-d -m state –state INVALID -j DROP
-A neutron-openvswi-odc783dde-d -j neutron-openvswi-sg-fallback
-A neutron-openvswi-sdc783dde-d -s 192.168.122.6/32 -m mac –mac-source FA:16:3E:4F:FD:53 -j RETURN
-A neutron-openvswi-sdc783dde-d -j DROP
-A neutron-openvswi-sg-chain -m physdev –physdev-out tapdc783dde-d2 –physdev-is-bridged -j neutron-openvswi-idc783dde-d
-A neutron-openvswi-sg-chain -m physdev –physdev-in tapdc783dde-d2 –physdev-is-bridged -j neutron-openvswi-odc783dde-d

 

root@n1:~# brctl show
bridge name                        bridge id        STP          enabled    interfaces
qbr68056772-f9        8000.fa13574c4ed1    no        qvb68056772-f9
tap68056772-f9
qbr73435e86-ea        8000.ce293c1486a0    no        qvb73435e86-ea
tap73435e86-ea
qbr9774fd60-c9        8000.468449c27608    no        qvb9774fd60-c9
tap9774fd60-c9
qbrd5b4ec48-7a        8000.76161ed62b88    no        qvbd5b4ec48-7a
tapd5b4ec48-7a
qbrdc783dde-d2        8000.3ef673f5c71d    no        qvbdc783dde-d2
tapdc783dde-d2
virbr0        8000.000000000000    yes

 

root@n1:~# ovs-vsctl show
99f3e195-b0e4-4d2b-af97-0ad02c8e0125
Bridge br-int
fail_mode: secure
Port br-int
Interface br-int
type: internal
Port patch-tun
Interface patch-tun
type: patch
options: {peer=patch-int}
Port “qvodc783dde-d2”
tag: 1
Interface “qvodc783dde-d2”
Port “qvo73435e86-ea”
tag: 1
Interface “qvo73435e86-ea”
Port “qvo9774fd60-c9”
tag: 4
Interface “qvo9774fd60-c9”
Port “qvo68056772-f9”
tag: 2
Interface “qvo68056772-f9”
Port “qvod5b4ec48-7a”
tag: 3
Interface “qvod5b4ec48-7a”
Bridge br-tun
fail_mode: secure
Port br-tun
Interface br-tun
type: internal
Port “gre-c0a8640a”
Interface “gre-c0a8640a”
type: gre
options: {df_default=”true”, in_key=flow, local_ip=”192.168.100.12″, out_key=flow, remote_ip=”192.168.100.10″}
Port patch-int
Interface patch-int
type: patch
options: {peer=patch-tun}
ovs_version: “2.0.2”

 

root@work:~# ip netns list
qdhcp-79521d7b-5be6-46ec-ad26-2cef476e238c
qdhcp-3d08e795-22cf-4210-841e-749670aae23a
qdhcp-5a443267-f277-4d3d-b19b-9d5540c38cd4
qdhcp-8fba63e3-7650-4b23-9495-1ba81ee1b310
qdhcp-473bfc4b-866f-4fbb-bd11-a975d300f710 ——–>>>>  Network ID ( EXT 2 )
qdhcp-947108a9-b157-44ac-bfa0-5652fb6e3480
qrouter-2078f2ea-3b8a-4811-8173-b38bb84c9b6b
qrouter-b69e49fa-a09a-4d4a-927e-f764f25a2778

root@work:~# ip netns exec qdhcp-473bfc4b-866f-4fbb-bd11-a975d300f710 ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
20: tap480bbf91-8b: <BROADCAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN group default
link/ether fa:16:3e:c2:d4:87 brd ff:ff:ff:ff:ff:ff
inet 192.168.122.3/24 brd 192.168.122.255 scope global tap480bbf91-8b
valid_lft forever preferred_lft forever
inet6 fe80::f816:3eff:fec2:d487/64 scope link
valid_lft forever preferred_lft forever

 

root@work:~# ps -auxww | grep 473bfc4b-866f-4fbb-bd11-a975d300f710
nobody    3979  0.0  0.0  28208  1352 ?        S    aug05   0:00 dnsmasq –no-hosts –no-resolv –strict-order –bind-interfaces –interface=tap480bbf91-8b –except-interface=lo –pid-file=/var/lib/neutron/dhcp/473bfc4b-866f-4fbb-bd11-a975d300f710/pid –dhcp-hostsfile=/var/lib/neutron/dhcp/473bfc4b-866f-4fbb-bd11-a975d300f710/host –addn-hosts=/var/lib/neutron/dhcp/473bfc4b-866f-4fbb-bd11-a975d300f710/addn_hosts –dhcp-optsfile=/var/lib/neutron/dhcp/473bfc4b-866f-4fbb-bd11-a975d300f710/opts –dhcp-leasefile=/var/lib/neutron/dhcp/473bfc4b-866f-4fbb-bd11-a975d300f710/leases –dhcp-range=set:tag0,192.168.122.0,static,86400s –dhcp-lease-max=256 –conf-file= –domain=openstacklocal
root     17864  0.0  0.0  17552  2536 pts/7    S+   19:40   0:00 grep –color=auto 473bfc4b-866f-4fbb-bd11-a975d300f710

 

root@work:~# ip netns exec qdhcp-473bfc4b-866f-4fbb-bd11-a975d300f710 tcpdump port 67 or port 68 -lne
tcpdump: verbose output suppressed, use -v or -vv for full protocol decode
listening on tap480bbf91-8b, link-type EN10MB (Ethernet), capture size 65535 bytes

 

root@work:~# ip netns exec qdhcp-473bfc4b-866f-4fbb-bd11-a975d300f710 ip li
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
20: tap480bbf91-8b: <BROADCAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN mode DEFAULT group default
link/ether fa:16:3e:c2:d4:87 brd ff:ff:ff:ff:ff:ff

root@work:~# ovs-vsctl show | grep -A1 tap480bbf91-8b
        Port “tap480bbf91-8b”
tag: 1
Interface “tap480bbf91-8b”
type: internal

 

root@work:~# ip netns exec qrouter-b69e49fa-a09a-4d4a-927e-f764f25a2778 ip rule
0:    from all lookup local
32766:    from all lookup main
32767:    from all lookup default

root@work:~# ip netns exec qrouter-2078f2ea-3b8a-4811-8173-b38bb84c9b6b ip rule
0:    from all lookup local
32766:    from all lookup main
32767:    from all lookup default

 

root@osc:/etc/nova# neutron router-list

root@osc:/etc/nova# neutron l3-agent-list-hosting-router router_dev2
+————————————–+—————- – +—————-      +——-+
| id                                                                     | host  | admin_state_up | alive  |
+————————————–                  +——+—————-+——-+
| e6071dcc-4c12-45ba-a7b1-01e64af15c42 | work  | True                   |    🙂    |
+————————————–+     —–           ——+—————-+——-+

 

root@osc:/home/asix# nova list
+————————————–+——–+——–+————+————-+————————————-+
| ID | Name | Status | Task State | Power State | Networks |
+————————————–+——–+——–+————+————-+————————————-+
| 0bf8961d-6326-49b9-a501-16cb7bdcd48c | asix1 | ACTIVE | – | Running | lingesh-net=192.168.4.10 |
| b581acb0-337c-497e-ad35-09da84492277 | i1 | ACTIVE | – | Running | lingesh-net=192.168.4.9 |
| 718f28af-4126-4af3-8f69-4505874216b9 | ovi777 | ACTIVE | – | Running | test-metadata-in-dhcp=192.168.111.4 |
| e6c1bbdf-3bcd-42ba-a70d-d9bad0b7de14 | ovi778 | ACTIVE | – | Running | test-metadata-in-dhcp=192.168.111.5 |
+————————————–+——–+——–+————+————-+————————————-+
root@osc:/home/asix# nova show asix1
+————————————–+———————————————————-+
| Property | Value |
+————————————–+———————————————————-+
| OS-DCF:diskConfig | AUTO |
| OS-EXT-AZ:availability_zone | nova |
| OS-EXT-SRV-ATTR:host | n1 |
| OS-EXT-SRV-ATTR:hypervisor_hostname | n1 |
| OS-EXT-SRV-ATTR:instance_name | instance-00000064 |
| OS-EXT-STS:power_state | 1 |
| OS-EXT-STS:task_state | – |
| OS-EXT-STS:vm_state | active |
| OS-SRV-USG:launched_at | 2016-08-20T20:56:12.000000 |
| OS-SRV-USG:terminated_at | – |
| accessIPv4 | |
| accessIPv6 | |
| config_drive | |
| created | 2016-08-20T20:55:39Z |
| flavor | m1.small (2) |
| hostId | 410946e5d0a018e7b0327d3072ee33edc19b066d2024d804e3a62e07 |
| id | 0bf8961d-6326-49b9-a501-16cb7bdcd48c |
| image | CentOS (6d3b314b-de25-4f09-aefa-64cbb762937d) |
| key_name | k1 |
| lingesh-net network | 192.168.4.10 |
| metadata | {} |
| name | asix1 |
| os-extended-volumes:volumes_attached | [] |
| progress | 0 |
| security_groups | default |
| status | ACTIVE |
| tenant_id | 859ccf28d2824d4a844f819b4f33e257 |
| updated | 2016-08-20T20:56:12Z |
| user_id | 81bf40676b004e2cba0cd583ac9d4dc8 |
+————————————–+———————————————————-+

 

root@osc:/home/asix# nova-manage service list
Binary           Host                                 Zone             Status     State Updated_At
nova-cert        osc                                  internal         enabled    XXX   2016-08-21 17:18:19
nova-consoleauth osc                                  internal         enabled    🙂   2016-08-21 20:01:10
nova-scheduler   osc                                  internal         enabled    XXX   2016-08-21 17:18:14
nova-conductor   osc                                  internal         enabled    🙂   2016-08-21 20:01:10
nova-compute     n1                                   nova             enabled    🙂   2016-08-21 20:01:04

root@osc:/home/asix# service nova-scheduler status
nova-scheduler stop/waiting

root@osc:/home/asix# service nova-scheduler start
nova-scheduler start/running, process 4075

root@osc:/home/asix# service nova-cert status
nova-cert stop/waiting

root@osc:/home/asix# service nova-cert start
nova-cert start/running, process 4105

root@osc:/home/asix# service nova-cert status
nova-cert start/running, process 4105

 

aws – Placement Groups

Placement Groups

A placement groups is a logical grouping of instances within a single Availability Zone.

Placement Groups are recommended for applications that benefit from low network latency, high network throughput, or both

Placement Groups Limitations 

Placement Group have the flowing limitations:

  • A placement group can’t span multiple AZ’s
  • Not all instance types support placement groups
  • The name of specific placement group mus be unique within your AWS account
  • Not all the instances types that can be lunched into a placement group can take full advantage of the 10 Gbps network

You can delete placement group if you no longer need. Before you can delete you placement group, you must terminate all instances that you lunched into the placement group .

aws – Direct Connect

AWS Direct Connect is a network service that provides an alternatives to using  the Internet to utilize  AWS cloud services.With AWS Direct Connect, you can provision  a direct link between your internal network and an AWS region using a high-throughput, dedicated connections

  • reduce your network cost
  • improve your througput
  • provide more consistent network experience

AWS Direct connect speed

1Gbps and 10Gbps ports are available . Speeds of 50Mbs, 100Mbps, 200|Mbps, 300MBps, 400Mbps and 500Mbps can be ordered from any APN

 

OpenStack

root@osc:~# glance –version
0.14.0

root@osc:~# neutron –version
2.3.8

root@osc:~# nova –version
2.19.0

 

root@osc:~# nova service-list
+—-+——————+——+———-+———+——-+—————————-+—-+
| Id    | Binary               | Host | Zone     | Status | State | Updated_at | Disabled Reason |
+—-+——————+——+———-+———+——-+—————————-+—–+
| 1 | nova-cert                | osc | internal | enabled | up | 2016-07-28T12:48:15.000000 | – |
| 2 | nova-consoleauth | osc | internal | enabled | up | 2016-07-28T12:48:21.000000 | – |
| 3 | nova-scheduler  | osc | internal | enabled | up | 2016-07-28T12:48:15.000000 | – |
| 4 | nova-conductor | osc | internal | enabled | up | 2016-07-28T12:48:21.000000 | – |
| 5 | nova-compute    | n1   | nova      | enabled | up | 2016-07-28T12:47:25.000000 | – |
+—-+——————+——+———-+———+——-+—————————-+—-+

 

IP Address allocations

 

IP_address_allocations

 

 

AWS – Elastic MapReduce (Amazon ERM)

Amazon Elastic MapReduce (Amazon EMR) is a web service that makes it easy to quickly and cost-effectively process vast amounts of data.

Amazon EMR simplifies big data processing, providing a managed Hadoop framework that makes it easy, fast, and cost-effective for you to distribute and process vast amounts of your data across dynamically scalable Amazon EC2 instances. You can also run other popular distributed frameworks such as Apache Spark and Presto in Amazon EMR, and interact with data in other AWS data stores such as Amazon S3 and Amazon DynamoDB.

Amazon EMR securely and reliably handles your big data use cases, including log analysis, web indexing, data warehousing, machine learning, financial analysis, scientific simulation, and bioinformatics.

AWS Elastic Beanstalk

AWS Elastic Beanstalk – is the fastest and easy way to get an application up and running on AWS. Developers can simply upload their application code and the service automatically handles all the details, such a resource provisioning, load balancing, auto scaling, and monitoring.

 

AWS Elastic Beanstalk supports the following languages and developments stacks:

-Apache Tomcat for Java apps

-Apache HTTP Server for PHP applications

-Apache HTTP Server for Python applications

-Nngix or Apache HTTP Server for Node.js applications

– Microsoft IIS for .NE

 

Reference:

https://aws.amazon.com/faqs/

AWS – Cloud HSM

The AWS CloudHSM service helps you meet corporate, contractual and regulatory compliance requirements for data security by using dedicated Hardware Security Module (HSM) appliances within the AWS cloud.

CloudHSM complements existing data protection solutions and allows you to protect your encryption keys within HSMs that are designed and validated to government standards for secure key management. CloudHSM allows you to securely generate, store and manage cryptographic keys used for data encryption in a way that keys are accessible only by you.

– use CloudHSM to store keys or encrypt data used by other AWS services?

You can write custom applications and integrate them with CloudHSM, or you can leverage one of the third party encryption solutions available from AWS Technology Partners. Examples include EBS volume encryption and S3 object encryption and key management.

 

– other AWS services use CloudHSM to store and manage keys
Amazon  (RDS) for Oracle Database and Amazon Redshift can be configured to store master keys in CloudHSM instances.

AWS – Auto Scaling

Auto Scaling helps you to maintain application availability and allows you to scale your Amazon EC2 capacity up or down automatically according to condition to define

Steps to create an Auto Scaling

  1. Create an Auto Scaling Group
  2. Configure your Auto Scaling Group
  3. Add an Elastic Load Balancer ( Optional)
  4. Configure Scaling Policies

Auto-scaling improves availability and will keep your infrastructure at the size needed to run your application.

Auto Scaling Components 

Groups 

Launch Configuration 

Your Group uses a lunch configuration as a template for its EC2 instances . When you create a lunch configuration, you can specify information such as :

  • AMI ID
  • instance type
  • key pair
  • security groups
  • block device mapping for your instance

When you create an Auto Scaling Group, you must specify the lunch configuration . You can specify your lunch configuration with multiple Auto Scaling Groups

You can’t modify a lunch configuration after you’ve created it 

Scaling Plans 

A scaling plan tells Auto Scaling when and how to scale . For example, you can base a scaling plan on the occurrence of specific conditions ( dynamic scaling ) or an a schedule.

 

 

Attach EC2 Instances to Your Auto Scaling Group

Auto Scaling provides you with an option to Enable Auto Scaling Group for one or more EC2 Instances by attaching them to your existing Auto Scaling Group. After the instances are attached they become part of Auto Scaling group

The instance that you want attach must meet the following criteria

  • the instance is in the running state
  • The AMI used the lunch instance must still exist
  • The instance is not a member of Auto Scaling group
  • The instance is in a same Availability Zone as the Auto Scaling Group
  • If the Auto Scaling Group has an attached load balancer, the instance and the load balancer must both be in EC2- Classic or the same VPC

 

Auto Scaling lifecycle hooks enable you to perform custom actions as Auto Scaling launches or terminate instances . For example, you could install or configure software on newly lunched instances, or download log files from an instance before is terminates.

Adding lifecycle hooks to Auto Scaling group gives you a grater control over how instance launch and terminate . Here is some things to consider when adding a lifecycle hook to your Auto Scaling, to help ensure that group continues to perform as expected.

considerations :

  • keep instance in a wait state
  • cooldowns and custom actions
  • health check grace period
  • Lifecycle Action Result
  • Spot Instances

When we create an Auto Scaling group, we must specify a launch configuration, and we can only specify one for an auto scaling group at a time.

you can only specify one launch configuration for an Auto Scaling group at a time, and you can’t modify a launch configuration after you’ve created it

AWS – Resource Group

In Amazon Web Services, a resource is an entity that you can work with, such as an Amazon Elastic Compute Cloud (Amazon EC2) instance, an AWS CloudFormation stack, an Amazon Simple Storage Service (Amazon S3) bucket, and so on. If you oversee more than one of these resources, you might find it useful to manage them as a group rather than move from one AWS service to another for each task.

-With the Resource Groups tool, you use a single page to view and manage your resources