Friday, 30 August 2013

CREATE NAMENODE BACKUP




#!/bin/bash

nameNode=namenode.Cloudera.com
timeStamp=$(date +%Y-%m-%d-%H)
workDir="/Backup/Hadoop_Namenode_backup/Backup"
targetDir="/Backup/Hadoop_Namenode_backup/zipfiles"
logfile="/var/log/namenode/${nameNode}.log.${timeStamp}"

curl -s http://${nameNode}:50070/getimage?getimage=1 > $workDir/fsimage
curl -s http://${nameNode}:50070/getimage?getedit=1 > $workDir/edits

zip -j $targetDir/namenode.${timeStamp}.zip $workDir/* 1>> ${logfile} 2>> ${logfile}

rm -f $workDir/edits 1>> ${logfile} 2>> ${logfile}
rm -f $workDir/fsimage 1>> ${logfile} 2>> ${logfile}

###Retension Policy
find ${targetDir} -name "*.zip" -mtime +5 -exec rm -rf {} \; 1>> ${logfile} 2>> ${logfile}

exit 0

Thursday, 29 August 2013

MONGODB USER CREATION


                                        

STEP 1 :-

Connect to MongoDB

[ nitin@nitin-ubuntu:~] # mongo
MongoDB shell version: 2.4.4
connecting to: test
>


STEP 2:-

Go to admin database

> use admin ;

STEP 3 :-

> db.addUser('USERNAME','PASSWORD');

STEP 4 :-

Allow mongodb to ask password

[ nitin@nitin-ubuntu:~] # sudo vim /etc/mongodb.conf

Uncomment below parameter
auth = true

STEP 5:-

Restart Mongodb

[ nitin@nitin-ubuntu:~] # sudo /etc/init.d/mongodb restart

STEP 6 :-

[ nitin@nitin-ubuntu:~] # mongo -u USERNAME localhost/admin -p
Enter password:


Tuesday, 27 August 2013

CREATING STATIC WEB-HOSTING ON AMAZON S3




                                 

Step 1 :-

             Login to Amazon console

Step 2 :-

            Go to S3 tab Click on Create Bucket Give Name of Your Bucket

Step 3 :-

            Click On Action tab go to property . On left site your will get bucket property configration

Step 4 :-

Click on Static Web hosting

Select enable website hosting radio button . Edit index document filed add index.html . click on save button

Step 5 :-

                Click on permission on same property page . Click on edit bucket policy

                Add below details

              {
              "Version": "2008-10-17",
              "Statement": [
              {
              "Sid": "PublicReadGetObject",
              "Effect": "Allow",
              "Principal": {
              "AWS": "*"
              },
              "Action": "s3:GetObject",
              "Resource": "arn:aws:s3:::<BUCKET-NAME>/*"
              }
              ]
              }

Click on Save

Step 6 :-

Test your configrations

Click on Static Web hosting you will find check for Endpoint

it will show url to access your website

buckte-name.s3-region-1.amazonaws.com

for E.g

Endpoint: - Test-bucket.s3-website-ap-southeast-1.amazonaws.com

Step 7 :- 

Go to Browser and access the URL :

http://Test-bucket.s3-website-ap-southeast-1.amazonaws.com



Friday, 16 August 2013

CREATE A SNAPSHOT AND DELETE OLD SNAPSHOT FROM AWS




#!/bin/bash



########


        # purpose :- To take a backup of MongoDB Collections.

        # Requirement :- Make Sure database.config file is present in /data/Backup/mongodb
        # cat /data/Backup/mongodb/database.config
        #  db1
        #  db2
########

PROGNAME=$(basename $0)

BACKUP_DIR="/data/Backup/mongodb/Dump"
#DATE=$(date +"%F")
DATE=$(date +%Y_%m_%d)
S3_DEL=$(date --date "30 days ago" +%Y_%m_%d)
BOX=$(uname -n)
DATBASE_FILE=$(dirname ${BACKUP_DIR})/database.config
LOGDIR=$(dirname ${BACKUP_DIR})/logs/Dump/
LOGFILE="backup_${DATE}.log"
LOCKFILE="/tmp/${PROGNAME}.lock"
REMOVE_FILE_DAYS=7
MONGODUMP_PATH="/usr/bin/mongodump"
MONGO_HOST="localhost"
MONGO_PORT="27017"
MONGO_USER=""
MONGO_PASSWORD=""
BUCKET="s3://working/mongo-backup/"
LOG_BUCKET="s3://working/mongo-backup/logs/"
#MAILTO="<MAIL-ADDRESS>"

LOCK_FILE ()

{
        if [ "$1" = "create" ]; then
                if [ -f $LOCKFILE ]; then
                        SEND_MAIL "ERROR::" "Unable to create lock may lock file not removed"
                        exit 0
                fi
                touch $LOCKFILE
        fi
        if [ "$1" = "remove" ]; then
                rm -fr $LOCKFILE
        fi
}

SEND_MAIL ()

{
        mail -s "${BOX} :: ${PROGNAME} : $1 $2"   -t $MAILTO < $LOGDIR/$LOGFILE
        LOCK_FILE "remove"
        exit 1
}

LOCK_FILE "create"

echo "Script started at :- $(date)" 1>> $LOGDIR/$LOGFILE 2>> $LOGDIR/$LOGFILE

####### Backup and log Directory checking and creations


for dir in "${BACKUP_DIR}" "${LOGDIR}"


do

        if [ ! -d "${dir}" ]; then
                mkdir -p ${dir} 1>/dev/null 2>/dev/null
                        if [ $? -ne 0 ]; then
                                SEND_MAIL "ERROR::" "Unable to Create ${dir} Directory :"
                                LOCK_FILE "remove"
                                exit 1
                        fi
        fi
done

####### Collection file  checking


if [ ! -f "${DATBASE_FILE}" ]; then

        SEND_MAIL "ERROR::" " DATABASE Config file is not Present :"
else
        if [ ! -s "${DATBASE_FILE}" ]; then
        SEND_MAIL "ERROR ::" "DATABASE Config file is ZERO byte"
        fi

####### Dump logic started

        for MONGO_DATABASE in $(cat ${DATBASE_FILE})
        do

                ${MONGODUMP_PATH} --db ${MONGO_DATABASE}  --host ${MONGO_HOST} --port ${MONGO_PORT}  --out ${BACKUP_DIR}/${DATE}  1>> $LOGDIR/$LOGFILE 2>> $LOGDIR/$LOGFILE

                        if [ $? -ne 0 ]; then
                                SEND_MAIL "ERROR ::" " Unable to take dump for database ${MONGO_DATABASE}"
                        fi
        done

###### Dump Logic ended


###### Compression Logic started

        tar -zvcf ${BACKUP_DIR}/${DATE}.tgz ${BACKUP_DIR}/${DATE} 1>> $LOGDIR/$LOGFILE 2>> $LOGDIR/$LOGFILE
        if [ $? -ne 0 ]; then
                SEND_MAIL "ERROR ::" " Unable to compress ${BACKUP_DIR}/${MONGO_DATABASE}_${DATE} directory"

        else

                rm -fr ${BACKUP_DIR}/${DATE}
                find ${BACKUP_DIR} -name *.tgz -mtime +${REMOVE_FILE_DAYS} -exec rm {} \; 1>> $LOGDIR/$LOGFILE 2>> $LOGDIR/$LOGFILE
                if [ $? -eq 0 ]; then
                        echo "Removed old backup files." 1>> $LOGDIR/$LOGFILE 2>> $LOGDIR/$LOGFILE
                fi
                find ${LOGDIR} -name *.log -mtime +${REMOVE_FILE_DAYS} -exec rm {} \; 1>> $LOGDIR/$LOGFILE 2>> $LOGDIR/$LOGFILE
                if [ $? -eq 0 ]; then
                        echo "Removed old log files." 1>> $LOGDIR/$LOGFILE 2>> $LOGDIR/$LOGFILE
                fi
        fi

######################### Pushing data to S3 bucket


       s3cmd put ${BACKUP_DIR}/${DATE}.tgz ${BUCKET} 1>> $LOGDIR/$LOGFILE 2>> $LOGDIR/$LOGFILE

       if [ $? -ne 0 ];then
       SEND_MAIL "ERROR ::" " Unable to send data to S3 Bucket"
       fi

fi

       s3cmd put $LOGDIR/$LOGFILE ${LOG_BUCKET} 1>> $LOGDIR/$LOGFILE 2>> $LOGDIR/$LOGFILE
       if [ $? -ne 0 ];then
       SEND_MAIL "ERROR ::" " Unable to send logs to S3 Bucket"
       fi

       s3cmd del ${BUCKET}${S3_DEL}.tgz 1>> $LOGDIR/$LOGFILE 2>> $LOGDIR/$LOGFILE

       if [ $? -ne 0 ];then
       SEND_MAIL "ERROR ::" " Unable to  delete data from S3 Bucket"
       fi

 echo "Script Ended at $(date)" 1>> $LOGDIR/$LOGFILE 2>> $LOGDIR/$LOGFILE
 LOCK_FILE "remove"
        if [ $? -eq 0 ]; then
                echo "Removed lock file file." 1>> $LOGDIR/$LOGFILE 2>> $LOGDIR/$LOGFILE
                rm -vf ${BACKUP_DIR}/${DATE}.tgz 1>> $LOGDIR/$LOGFILE 2>> $LOGDIR/$LOGFILE
                SEND_MAIL "Success::" "Backup Script executed successfully."
        fi
exit 0
#### END OF LOGIC


#


Wednesday, 14 August 2013

INSTALL MONGODB ON RHEL AND CENTOS



STEP 1:- 
          Configure Package Management System (YUM)

Create a /etc/yum.repos.d/10gen.repo file 
[ nitin@nitin-ubuntu: ~ ] $ sudo vim /etc/yum.repos.d/10gen.repo
[10gen]
name=10gen Repository
baseurl=http://downloads-distro.mongodb.org/repo/redhat/os/x86_64
gpgcheck=0
enabled=1


[ nitin@nitin-ubuntu: ~ ] $ yum clean all
[ nitin@nitin-ubuntu: ~ ] $ sudo yum install mongo-10gen mongo-10gen-server

STEP 2:-

Configure MongoDB
These packages configure MongoDB using the /etc/mongod.conf file in conjunction with the control script. You can find the init script at/etc/rc.d/init.d/mongod.
This MongoDB instance will store its data files in the /var/lib/mongo and its log files in /var/log/mongo, and run using the mongod user account.

STEP 3:- 
Start MongoDB

nitin@nitin-ubuntu: ~ ] $ sudo service mongod start


STEP 4:- 
Stop MongoDB

nitin@nitin-ubuntu: ~ ] $ sudo service mongod stop




USE AWS S3 WITH S3CMD

One of the most popular Amazon S3 command line clients is s3cmd, which is written in python. As a simple AWS S3 command line tool, s3cmd is ideal to use when you want to run scripted cron jobs such as daily backups.

STEP 1:- 


  • To install s3cmd on Ubuntu or Debian:
     [  nitin@nitin-ubuntu: ~ ] $   sudo apt-get install s3cmd

STEP 2:-
  •  Configure:
      [  nitin@nitin-ubuntu: ~ ] $ s3cmd --configure 
Enter new values or accept defaults in brackets with Enter.
Refer to user manual for detailed description of all options.

Access key and Secret key are your identifiers for Amazon S3

Access Key: (Access Key for the admin S3 User we have created before)
Secret Key: (Seccret Key for the admin S3 User we have created before)

Encryption password is used to protect your files from reading

by unauthorized persons while in transfer to S3
Encryption password: (your-password)
Path to GPG program [/usr/bin/gpg]:

When using secure HTTPS protocol all communication with Amazon S3

servers is protected from 3rd party eavesdropping. This method is
slower than plain HTTP and can't be used if you're behind a proxy
Use HTTPS protocol [No]:

On some networks all internet access must go through a HTTP proxy.

Try setting it here if you can't conect to S3 directly
HTTP Proxy server name:

New settings:

  Access Key: ***********************
  Secret Key: ***************************
  Encryption password: ***********
  Path to GPG program: /usr/bin/gpg
  Use HTTPS protocol: False
  HTTP Proxy server name:
  HTTP Proxy server port: 0

Test access with supplied credentials? [Y/n] y

Please wait...
Success. Your access key and secret key worked fine :-)

Now verifying that encryption works...

Success. Encryption and decryption worked fine :-)

Save settings? [y/N] y

Configuration saved to '/home/nitin/.s3cfg'

STEP 3:- 



  • Listing Buckets:
   [  nitin@nitin-ubuntu: ~ ] $ s3cmd ls 


  • Listing Bucket contents (folders):
   [  nitin@nitin-ubuntu: ~ ] $ s3cmd ls s3://Bucket-Name 


  • Listing Bucket contents (files):
   [  nitin@nitin-ubuntu: ~ ] $  s3cmd ls s3://Bucket-Name/Folder-Name 


  • Download all folder content:
   [  nitin@nitin-ubuntu: ~ ] $  s3cmd get s3://Bucket-Name/Folder-Name/* 


  • Delete all folder content:
   [  nitin@nitin-ubuntu: ~ ] $ s3cmd del s3://Bucket-Name/Folder-Name/* 












BACKUP MONGODB AND UPLOAD TO AWS S3




#!/bin/bash



########


        # purpose :- To take a backup of MongoDB Collections.

        # Requirement :- Make Sure database.config file is present in /data/Backup/mongodb
        # cat /data/Backup/mongodb/database.config
        #  db1
        #  db2
########

PROGNAME=$(basename $0)

BACKUP_DIR="/data/Backup/mongodb/Dump"
#DATE=$(date +"%F")
DATE=$(date +%Y_%m_%d)
S3_DEL=$(date --date "30 days ago" +%Y_%m_%d)
BOX=$(uname -n)
DATBASE_FILE=$(dirname ${BACKUP_DIR})/database.config
LOGDIR=$(dirname ${BACKUP_DIR})/logs/Dump/
LOGFILE="backup_${DATE}.log"
LOCKFILE="/tmp/${PROGNAME}.lock"
REMOVE_FILE_DAYS=7
MONGODUMP_PATH="/usr/bin/mongodump"
MONGO_HOST="localhost"
MONGO_PORT="27017"
MONGO_USER=""
MONGO_PASSWORD=""
BUCKET="s3://working/mongo-backup/"
LOG_BUCKET="s3://working/mongo-backup/logs/"
#MAILTO="<MAIL-ADDRESS>"

LOCK_FILE ()

{
        if [ "$1" = "create" ]; then
                if [ -f $LOCKFILE ]; then
                        SEND_MAIL "ERROR::" "Unable to create lock may lock file not removed"
                        exit 0
                fi
                touch $LOCKFILE
        fi
        if [ "$1" = "remove" ]; then
                rm -fr $LOCKFILE
        fi
}

SEND_MAIL ()

{
        mail -s "${BOX} :: ${PROGNAME} : $1 $2"   -t $MAILTO < $LOGDIR/$LOGFILE
        LOCK_FILE "remove"
        exit 1
}

LOCK_FILE "create"

echo "Script started at :- $(date)" 1>> $LOGDIR/$LOGFILE 2>> $LOGDIR/$LOGFILE

####### Backup and log Directory checking and creations


for dir in "${BACKUP_DIR}" "${LOGDIR}"


do

        if [ ! -d "${dir}" ]; then
                mkdir -p ${dir} 1>/dev/null 2>/dev/null
                        if [ $? -ne 0 ]; then
                                SEND_MAIL "ERROR::" "Unable to Create ${dir} Directory :"
                                LOCK_FILE "remove"
                                exit 1
                        fi
        fi
done

####### Collection file  checking


if [ ! -f "${DATBASE_FILE}" ]; then

        SEND_MAIL "ERROR::" " DATABASE Config file is not Present :"
else
        if [ ! -s "${DATBASE_FILE}" ]; then
        SEND_MAIL "ERROR ::" "DATABASE Config file is ZERO byte"
        fi

####### Dump logic started

        for MONGO_DATABASE in $(cat ${DATBASE_FILE})
        do

                ${MONGODUMP_PATH} --db ${MONGO_DATABASE}  --host ${MONGO_HOST} --port ${MONGO_PORT}  --out ${BACKUP_DIR}/${DATE}  1>> $LOGDIR/$LOGFILE 2>> $LOGDIR/$LOGFILE

                        if [ $? -ne 0 ]; then
                                SEND_MAIL "ERROR ::" " Unable to take dump for database ${MONGO_DATABASE}"
                        fi
        done

###### Dump Logic ended


###### Compression Logic started

        tar -zvcf ${BACKUP_DIR}/${DATE}.tgz ${BACKUP_DIR}/${DATE} 1>> $LOGDIR/$LOGFILE 2>> $LOGDIR/$LOGFILE
        if [ $? -ne 0 ]; then
                SEND_MAIL "ERROR ::" " Unable to compress ${BACKUP_DIR}/${MONGO_DATABASE}_${DATE} directory"

        else

                rm -fr ${BACKUP_DIR}/${DATE}
                find ${BACKUP_DIR} -name *.tgz -mtime +${REMOVE_FILE_DAYS} -exec rm {} \; 1>> $LOGDIR/$LOGFILE 2>> $LOGDIR/$LOGFILE
                if [ $? -eq 0 ]; then
                        echo "Removed old backup files." 1>> $LOGDIR/$LOGFILE 2>> $LOGDIR/$LOGFILE
                fi
                find ${LOGDIR} -name *.log -mtime +${REMOVE_FILE_DAYS} -exec rm {} \; 1>> $LOGDIR/$LOGFILE 2>> $LOGDIR/$LOGFILE
                if [ $? -eq 0 ]; then
                        echo "Removed old log files." 1>> $LOGDIR/$LOGFILE 2>> $LOGDIR/$LOGFILE
                fi
        fi

######################### Pushing data to S3 bucket


       s3cmd put ${BACKUP_DIR}/${DATE}.tgz ${BUCKET} 1>> $LOGDIR/$LOGFILE 2>> $LOGDIR/$LOGFILE

       if [ $? -ne 0 ];then
       SEND_MAIL "ERROR ::" " Unable to send data to S3 Bucket"
       fi

fi

       s3cmd put $LOGDIR/$LOGFILE ${LOG_BUCKET} 1>> $LOGDIR/$LOGFILE 2>> $LOGDIR/$LOGFILE
       if [ $? -ne 0 ];then
       SEND_MAIL "ERROR ::" " Unable to send logs to S3 Bucket"
       fi

       s3cmd del ${BUCKET}${S3_DEL}.tgz 1>> $LOGDIR/$LOGFILE 2>> $LOGDIR/$LOGFILE

       if [ $? -ne 0 ];then
       SEND_MAIL "ERROR ::" " Unable to  delete data from S3 Bucket"
       fi

 echo "Script Ended at $(date)" 1>> $LOGDIR/$LOGFILE 2>> $LOGDIR/$LOGFILE
 LOCK_FILE "remove"
        if [ $? -eq 0 ]; then
                echo "Removed lock file file." 1>> $LOGDIR/$LOGFILE 2>> $LOGDIR/$LOGFILE
                rm -vf ${BACKUP_DIR}/${DATE}.tgz 1>> $LOGDIR/$LOGFILE 2>> $LOGDIR/$LOGFILE
                SEND_MAIL "Success::" "Backup Script executed successfully."
        fi
exit 0
#### END OF LOGIC


#


BULK INSTERT INTO HBASE


Below script is used for inserting bulk data into Hbase using import method. 
Script considerations :- 

1) INPUT_file.tsv separated by tab 

2) Proper environment variables set 

Enviroment setup 

[  nitin@nitin-R15: ~ ]$ vim  .bashrc
export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$HBASE_HOME/lib/guava-<version>.jar
[  nitin@nitin-R15: ~ ]$ source .bashrc 


#!/bin/bash


INPUT_FILE="<INPUT_file.tsv>"

head -n 1 ${INPUT_FILE}> schema.txt
sed -i -e 's/\t/,/g' schema.txt
sed -i -e 's/,/,cf1:/g' schema.txt
sed -i -e 's/^/cf1:/g' schema.txt

HADOOP_LOC_FILE=$(basename ${INPUT_FILE})

DATA_COMPUTATION_FILE=$(sed -i -e '1d' ${INPUT_FILE})
COLUMN_FAMILY_NAME="cf1"
HADOOP_BIN="sudo -E -u hdfs hadoop "
TABLE_NAME="NT_AMAZON"
INPUT_DIR_PATH="/user/input/"
JAR_NAME="/usr/lib/hbase/hbase.jar"
CF_FINAL_NAME=$(cat schema.txt)

if [ ! -f ${INPUT_FILE} ]

then
        echo " ERROR :: INPUT FILE is MISSING "
        exit 2
fi

create_table ()

{
hbase shell<<_EOF_
create 'NT_AMAZON','cf1'
_EOF_
}

create_table

if [ $? -ne 0 ];
then
        echo "ERROR :: Unable to create table ${TABLE_NAME} on HBASE"
        exit 2
fi

sed -i -e 's/\t//g' ${INPUT_FILE}



${HADOOP_BIN} fs -put ${INPUT_FILE} ${INPUT_DIR_PATH}

if [ $? -ne 0 ];
then
        echo "ERROR :: Unable to copy file ${INPUT_FILE} on Hadoop"
        exit 2
fi

${HADOOP_BIN} jar ${JAR_NAME} importtsv -Dimporttsv.columns=HBASE_ROW_KEY,${CF_FINAL_NAME} '-Dimporttsv.separator='  ${TABLE_NAME} ${INPUT_DIR_PATH}${HADOOP_LOC_FILE}


if [ $? -ne 0 ];

then
        echo "ERROR :: Unable to insert data into Hbase"
        exit 2
fi

AMAZON EC2 WITH COMMAND LINE


                                
Step 1 :-

  • Sign Up for Amazon EC2

When you create an Amazon Web Services (AWS) account, AWS automatically signs up the account for all AWS services, including Amazon EC2. With Amazon EC2, you pay only for what you use. New AWS customers can get started with Amazon EC2 for free. For more information, see Amazon EC2 Pricing.

If you have an AWS account already, skip to the next step. If you don't have an AWS account, use the following procedure to create one.

To create an AWS account
1. Go to http://aws.amazon.com and click Sign Up Now.
2. Follow the on-screen instructions.
Part of the sign-up process involves receiving a phone call and entering a PIN using the phone keypad.

Step 2:

  • Launch an Amazon EC2 Instance Command line :-

Download

nitin@nitin-ubuntu:~$ wget "http://www.amazon.com/gp/redirect.html/ref=aws_rc_ec2tools?location=http://s3.amazonaws.com/ec2-downloads/ec2-api-tools.zip&token=A80325AA4DAB186C80828ED5138633E3F49160D9"


nitin@nitin-ubuntu:~$ sudo unzip ec2-api-tools.zip

nitin@nitin-ubuntu:~$ sudo mv ec2-api-tools-1.6.7.4 ec2

nitin@nitin-ubuntu:~$ sudo mv ec2 /usr/bin/

  • Configure enviroment variable

nitin@nitin-ubuntu:~$ sudo vim /etc/bash.bashrc
export JAVA_HOME=/usr/local/java
export EC2_HOME=/usr/bin/ec2/
export AWS_ACCESS_KEY=<Give your Access key>
export AWS_SECRET_KEY=<Give your Secret key>
export PATH=/usr/bin/ec2/bin/:$EC2_HOME:$JAVA_HOME:$PATH

Step 3:-

  • Test Your Configration :-

nitin@nitin-ubuntu:~$ sudo ec2-describe-regions
REGION eu-west-1 ec2.eu-west-1.amazonaws.com
REGION sa-east-1 ec2.sa-east-1.amazonaws.com
REGION us-east-1 ec2.us-east-1.amazonaws.com
REGION ap-northeast-1 ec2.ap-northeast-1.amazonaws.com
REGION us-west-2 ec2.us-west-2.amazonaws.com
REGION us-west-1 ec2.us-west-1.amazonaws.com
REGION ap-southeast-1 ec2.ap-southeast-1.amazonaws.com
REGION ap-southeast-2 ec2.ap-southeast-2.amazonaws.com

Make sure you will get above output

  • Check Avaliable images form AWS

nitin@nitin-ubuntu:~$ ec2-describe-images -a

You will get all images (AMI) Amazon machine Images from aws

Step 4:-

  • To Create new instance on EC2

Key Pair additions :-

In order to login securely to a public AMI instance, you will need a public/private keypair.Amazon EC2 public images use this feature to provide secure access without passwords. There is a command in the API tools for this. You only have to choose a name for the keypair as parameter

nitin@nitin-ubuntu:~$ ec2-add-keypair keypair_for_ubuntu > keypair_for_ubuntu.pem

  • Instance Creations :-

nitin@nitin-ubuntu:~$ ec2run <AMI Image ID> -k <Path for key pair file> -t <Type of instance>

nitin@nitin-ubuntu:~$ ec2run ami-76057f24-k keypair_for_ubuntu.pem -t t1.micro

Option Explantion :-

ami-76057f24 -> AMI image
keypair_for_ubuntu.pem -> Key pair genrated by ec2-add-keypair
t1.micro -> Type of instance




Instance ID returned by ec2-run-instances command is an identifier and will be used later to manipulate this instance (in this example, i-27494b46)

  • Check Instance Status

nitin@nitin-ubuntu:~$ ec2-describe-instance-status i-XXXXXX

Status must show active

Step 5:

  • Access the instance with ssh

nitin@nitin-ubuntu:~$ sudo ssh -i <PATH of Key Pair> user@<Hostname address you will get from ec2run command>

nitin@nitin-ubuntu:~$ sudo ssh -i .ec2/keypair_for_ubuntu ubuntu@ec2-<DNS-NAME-IPADDRESS>

Ansible Cheat sheet

Install Ansible  # yum install ansible Host file configuration  File  [ansible@kuber2 ~]$ cat /etc/ansible/hosts     [loca...