Thursday, June 27, 2013

ESXi /KVM commands equivalent

dbus-uuidgen --get              esxcli system uuid get
dmidecode                           smbiosDump
virt-top                                esxtop

virsh list --all                      vim-cmd vmsvc/getallvms
virsh start <VM>                vim-cmd vmsvc/power.on
virsh destroy <VM>           vim-cmd vmsvc/power.off
virsh dumpxml <VM>        vim-cmd vmsvc/getinfo
ps |grep qemu                       ps |grep vmx
virsh net-list --all                esxcfg-vswitch -l
ovs-vsctl show

virsh pool-list --all           esxcfg-scsidevs -a
virsh vol-list <poolname>            esxcfg-scsidevs -m

virsh version/uname -a   vmware -v


virsh -c qemu+ssh://root@10.3.200.20/system list - run commands on remote node

Nutanix Network Config -KVM

virsh # net-list
Name                 State      Autostart     Persistent
--------------------------------------------------
NTNX-Local-Network   active     yes           yes
VM-Network           active     yes           yes

virsh # net-dumpxml VM-Network
<network connections='6'>
  <name>VM-Network</name>
  <uuid>2869d684-71ba-a598-5c4c-bff4a1566282</uuid>
  <forward mode='bridge'/>
  <bridge name='br0' />
  <mac address='52:54:00:53:5C:E1'/>
  <virtualport type='openvswitch'/>
  <portgroup name='VM-Network' default='yes'>
  </portgroup>
</network>

virsh #  net-dumpxml NTNX-Local-Network
<network connections='1'>
  <name>NTNX-Local-Network</name>
  <uuid>74ee8a15-4400-05e5-4924-a4f073437b4d</uuid>
  <forward mode='bridge'/>
  <bridge name='brNutanix' />
  <mac address='52:54:00:F1:FB:26'/>
  <virtualport type='openvswitch'/>
  <portgroup name='svm-iscsi-pg' default='yes'>
  </portgroup>
</network>



virsh # net-info NTNX-Local-Network
Name            NTNX-Local-Network
UUID            74ee8a15-4400-05e5-4924-a4f073437b4d
Active:         yes
Persistent:     yes
Autostart:      yes
Bridge:         brNutanix

virsh # net-info VM-Network
Name            VM-Network
UUID            2869d684-71ba-a598-5c4c-bff4a1566282
Active:         yes
Persistent:     yes
Autostart:      yes
Bridge:         br0

[root@NTNX-12AM2K480036-A ~]# ifconfig br0
br0       Link encap:Ethernet  HWaddr 00:25:90:84:1D:21
          inet addr:10.3.200.19  Bcast:10.3.203.255  Mask:255.255.252.0
          inet6 addr: fe80::225:90ff:fe84:1d21/64 Scope:Link
          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
          RX packets:100441421 errors:0 dropped:0 overruns:0 frame:0
          TX packets:38563532 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0
          RX bytes:249788959789 (232.6 GiB)  TX bytes:153229990823 (142.7 GiB)

[root@NTNX-12AM2K480036-A ~]# cat  /etc/sysconfig/network-scripts/ifcfg-br0
DEVICE="br0"
NM_CONTROLLED="no"
ONBOOT="yes"
BOOTPROTO=none
IPADDR=10.3.200.19
NETMASK=255.255.252.0
GATEWAY=10.3.200.1


[root@NTNX-12AM2K480036-C ~]# virsh dominfo 1
Id:             1
Name:           NTNX-12AM2K480036-C-CVM
UUID:           7508fc1b-d7de-5331-102a-4a41832831ce
OS Type:        hvm
State:          running
CPU(s):         8
CPU time:       3814975.4s
Max memory:     12574720 KiB
Used memory:    12574720 KiB
Persistent:     yes
Autostart:      enable
Managed save:   no
Security model: selinux
Security DOI:   0
Security label: system_u:system_r:svirt_t:s0:c162,c801 (enforcing)


 ovs-vsctl show --- Bridge is Vswitch,  Port vnet is the individual vm ports., bond is NIC teaming.  N
bee10d10-c5f2-4ede-a7d0-2b219507ff3b
    Bridge "br0" ----- VM-Network
        Port "vnet5"
            Interface "vnet5"
        Port "vnet6"
            Interface "vnet6"
        Port "br0"
            Interface "br0"
                type: internal
        Port "vnet9"
            Interface "vnet9"
        Port "vnet10"
            Interface "vnet10"
        Port "vnet11"
            Interface "vnet11"
        Port "vnet4"
            Interface "vnet4"
        Port "vnet3"
            Interface "vnet3"
        Port "vnet7"
            Interface "vnet7"
        Port "vnet0"
            Interface "vnet0"
        Port "vnet8"
            Interface "vnet8"
        Port "bond-10g"
            Interface "eth2"
            Interface "eth3"
        Port "vnet2"
            Interface "vnet2"
    Bridge brNutanix -----------------Nutanix Vsiwtch with one port
        Port "vnet1"
            Interface "vnet1"
        Port brNutanix
            Interface brNutanix
                type: internal
    Bridge "br1"     ---
        Port "bond-1g"
            Interface "eth1"
            Interface "eth0"
        Port "br1"
            Interface "br1"
                type: internal
    ovs_version: "1.9.0"


ovs-appctl           ovsdb-tool           ovs-pki
ovs-benchmark        ovs-dpctl            ovs-tcpundump
ovs-brcompatd        ovs-ofctl            ovs-vlan-test
ovs-bugtool          ovs-parse-backtrace  ovs-vsctl
ovsdb-client         ovs-parse-leaks      ovs-vswitchd
ovsdb-server         ovs-pcap

Active/Passive
ovsdb-tool query

http://libvirt.org/formatnetwork.html


vswitch 1 br1 --- bond-1g --eth0,eth1 - we don' t use this
vswitch 2 br0 ---bond-10g --eth2 and eth3(not there in 24xx)
vswitch brNutanix - internal interface
ifconfig br0 and ifconfig eth2 has same mac address



[root@NTNX-12AM2K480036-B ~]# virsh net-dumpxml VM-VLAN60
<network>
  <name>VM-VLAN60</name>
  <uuid>afd477fe-6f2f-0b63-f83c-6bc3442684aa</uuid>
  <forward mode='bridge'/>
  <bridge name='br0' />
  <virtualport type='openvswitch'/>
  <portgroup name='VM-VLAN60' default='yes'>
    <vlan>
      <tag id='60'/>
    </vlan>
  </portgroup>
</network>


ovs-appctl bond/show bond-1g
---- bond-1g ----
bond_mode: active-backup
bond-hash-basis: 0
updelay: 0 ms
downdelay: 0 ms
lacp_status: off

slave eth0: enabled
        active slave
        may_enable: true

slave eth1: disabled
        may_enable: false

[root@NTNX-12AM2K480036-B ~]# ovs-appctl bond/list
bond    type    slaves
bond-1g active-backup   eth1, eth0

Add a additional management port

ovs-vsctl set port br1 tag=60
ifconfig br1



=====

Create two VMs and add them to same private vlan

virsh net-dumpxml VM-Network > /tmp/win20.net.xml

[root@NTNX-12AM2K480036-B ~]# cat /tmp/win20.net.xml
<network connections='10'>
  <name>Bridge-20</name>
  <forward mode='bridge'/>
  <bridge name='br20' />
  <virtualport type='openvswitch'/>
  <portgroup name='Bridge-20' default='yes'>
  </portgroup>
</network>

virsh net-define /tmp/win20.net.xml
virsh net-start Bridge-20

 [root@NTNX-12AM2K480036-B ~]# virsh net-list
Name                 State      Autostart     Persistent
--------------------------------------------------
Bridge-20            active     no            yes
NTNX-Local-Network   active     yes           yes
VM-Network           active     yes           yes
VM-VLAN60            active     no            yes

Edit the VM.Xml and change the (virsh edit <vm name> add additonal VM
virsh destory and start


http://openvswitch.org/pipermail/dev/2011-July/009993.html







Nutanix Controller VM Config on KVM

CVM config:

virsh # dumpxml 1
<domain type='kvm' id='1'>
  <name>NTNX-12AM2K480036-A-CVM</name>
  <uuid>72933dc9-0ee7-5b3a-708f-2b54c0d7e016</uuid>
  <memory unit='KiB'>12574720</memory>
  <currentMemory unit='KiB'>12574720</currentMemory>
  <vcpu placement='static'>8</vcpu>
  <os>
    <type arch='x86_64' machine='rhel6.3.0'>hvm</type>
    <boot dev='cdrom'/>
    <bootmenu enable='yes'/>
  </os>
  <features>
    <acpi/>
    <apic/>
    <pae/>
  </features>
  <cpu mode='custom' match='exact'>
    <model fallback='allow'>kvm64</model>
    <vendor>Intel</vendor>
    <feature policy='require' name='pbe'/>
    <feature policy='require' name='rdtscp'/>
    <feature policy='require' name='est'/>
    <feature policy='require' name='vmx'/>
    <feature policy='require' name='ds'/>
    <feature policy='require' name='smx'/>
    <feature policy='require' name='ss'/>
    <feature policy='require' name='vme'/>
    <feature policy='require' name='dtes64'/>
    <feature policy='require' name='tm2'/>
    <feature policy='require' name='ht'/>
    <feature policy='require' name='dca'/>
    <feature policy='require' name='pcid'/>
    <feature policy='require' name='tm'/>
    <feature policy='require' name='pdcm'/>
    <feature policy='require' name='pdpe1gb'/>
    <feature policy='require' name='ds_cpl'/>
    <feature policy='require' name='xtpr'/>
    <feature policy='require' name='acpi'/>
    <feature policy='require' name='monitor'/>
  </cpu>
  <clock offset='utc'/>
  <on_poweroff>destroy</on_poweroff>
  <on_reboot>restart</on_reboot>
  <on_crash>restart</on_crash>
  <devices>
    <emulator>/usr/libexec/qemu-kvm</emulator>
    <disk type='file' device='cdrom'>
      <driver name='qemu' type='raw'/>
      <source file='/var/lib/libvirt/NTNX-CVM/svmboot.iso'/>
      <target dev='hdc' bus='ide'/>
      <readonly/>
      <alias name='ide0-1-0'/>
      <address type='drive' controller='0' bus='1' target='0' unit='0'/>
    </disk>
    <controller type='usb' index='0'>
      <alias name='usb0'/>
      <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
    </controller>
    <controller type='ide' index='0'>
      <alias name='ide0'/>
      <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x1'/>
    </controller>
    <interface type='network'>
      <mac address='52:54:00:3c:09:ff'/>
      <source network='VM-Network'/>
      <target dev='vnet0'/>
      <model type='virtio'/>
      <alias name='net0'/>
      <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
    </interface>
    <interface type='network'>
      <mac address='52:54:00:01:e5:2b'/>
      <source network='NTNX-Local-Network'/>
      <target dev='vnet1'/>
      <model type='virtio'/>
      <alias name='net1'/>
      <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
    </interface>
    <serial type='file'>
      <source path='/tmp/NTNX.serial.out.0'/>
      <target port='0'/>
      <alias name='serial0'/>
    </serial>
    <console type='file'>
      <source path='/tmp/NTNX.serial.out.0'/>
      <target type='serial' port='0'/>
      <alias name='serial0'/>
    </console>
    <input type='mouse' bus='ps2'/>
    <graphics type='vnc' port='5900' autoport='yes' listen='127.0.0.1'>
      <listen type='address' address='127.0.0.1'/>
    </graphics>
    <video>
      <model type='cirrus' vram='9216' heads='1'/>
      <alias name='video0'/>
      <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
    </video>
    <hostdev mode='subsystem' type='pci' managed='yes'>
      <source>
        <address domain='0x0000' bus='0x00' slot='0x1f' function='0x2'/>
      </source>
      <alias name='hostdev0'/>
      <rom bar='off'/>
      <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
    </hostdev>
    <hostdev mode='subsystem' type='pci' managed='yes'>
      <source>
        <address domain='0x0000' bus='0x05' slot='0x00' function='0x0'/>
      </source>
      <alias name='hostdev1'/>
      <rom bar='off'/>
      <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
    </hostdev>
    <memballoon model='virtio'>
      <alias name='balloon0'/>
      <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
    </memballoon>
  </devices>
  <seclabel type='dynamic' model='selinux' relabel='yes'>
    <label>system_u:system_r:svirt_t:s0:c559,c759</label>
    <imagelabel>system_u:object_r:svirt_image_t:s0:c559,c759</imagelabel>
  </seclabel>
</domain>



Remove the storage pool:


virsh # pool-destroy kvm-training-disk27
Pool kvm-training-disk27 destroyed

virsh # pool-undefine kvm-training-disk27
Pool kvm-training-disk27 has been undefined

virsh dumpxml First >First.xml
virsh define First.xml (register)
virsh reset First (poweroff and on)

KVM: virt-manager startup failure





virt-manager failed to connect to socket /tmp/dbus -error.
Detailed Error:
 Error starting Virtual Machine Manager: Failed to contact configuration server; some possible causes are that you need to enable TCP/IP networking for ORBit, or you have stale NFS locks due to a system crash. See http://projects.gnome.org/gconf/ for information. (Details -  1: Failed to get connection to session: Failed to connect to socket /tmp/dbus-H3dWpSBlXJ: Connection refused)

Traceback (most recent call last):
  File "/usr/share/virt-manager/virt-manager.py", line 383, in <module>
    main()
  File "/usr/share/virt-manager/virt-manager.py", line 315, in main
    config = virtManager.config.vmmConfig(appname, appversion, glade_dir)
  File "/usr/share/virt-manager/virtManager/config.py", line 98, in __init__
    self.conf.add_dir(self.conf_dir, gconf.CLIENT_PRELOAD_NONE)
GError: Failed to contact configuration server; some possible causes are that you need to enable TCP/IP networking for ORBit, or you have stale NFS locks due to a system crash. See http://projects.gnome.org/gconf/ for information. (Details -  1: Failed to get connection to session: Failed to connect to socket /tmp/dbus-H3dWpSBlXJ: Connection refused)


X-Server: Xming running on Windows7, virt-manager started via SSH Connection. (Putty X11 tunnel enabled)

Eventhough it shows correct uuid:

[root@NTNX-12AM2K480036-D ~]# dbus-uuidgen --get
cd36ef14c5f40ace93f5e0ed0000001a
[root@NTNX-12AM2K480036-D ~]# cat /var/lib/dbus/machine-id
cd36ef14c5f40ace93f5e0ed0000001a


Fix: dbus-uuidgen > /var/lib/dbus/machine-id - recreate it

and then start the virt-manager.
https://bugzilla.redhat.com/show_bug.cgi?id=598200

More details.




Wednesday, June 26, 2013

How to verify that your Centos is KVM ready ?

1. Verify that the kernel is newer than 2.6.15
[root@NTNX-12AM2K480036-A ~]# uname -r

2.6.32-358.6.2.el6.x86_64

2. Check if your CPU supports Hardware Assisted Virt
egrep "vmx|svm" /proc/cpuinfo  --color=always

flags           : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 popcnt lahf_lm ida arat epb dts tpr_shadow vnmi flexpriority ept vpid

3.[root@NTNX-12AM2K480036-A ~]# lsb_release -a

LSB Version:    :base-4.0-amd64:base-4.0-noarch:core-4.0-amd64:core-4.0-noarch
Distributor ID: CentOS
Description:    CentOS release 6.4 (Final)
Release:        6.4
Codename:       Final

4.  yum info libvirt

Installed Packages
Name        : libvirt
Arch        : x86_64
Version     : 0.10.2
Release     : 18.el6_4.5
Size        : 5.6 M
Repo        : installed
From repo   : /libvirt-0.10.2-18.el6_4.5.x86_64
Summary     : Library providing a simple virtualization API
URL         : http://libvirt.org/
License     : LGPLv2+
Description : Libvirt is a C toolkit to interact with the virtualization capabilities
            : of recent versions of Linux (and other OSes). The main package includes
            : the libvirtd server exporting the virtualization support.


5.
[root@NTNX-12AM2K480036-A ~]# yum info qemu=kvm

Installed Packages
Name        : qemu-kvm
Arch        : x86_64
Epoch       : 2
Version     : 0.12.1.2
Release     : 2.355.0.1.el6.centos.2
Size        : 4.1 M
Repo        : installed
From repo   : updates
Summary     : Userspace component of KVM
URL         : http://www.linux-kvm.org
License     : GPLv2+ and LGPLv2+ and BSD
Description : KVM (for Kernel-based Virtual Machine) is a full virtualization solution
            : for Linux on x86 hardware.
            :
            : Using KVM, one can run multiple virtual machines running unmodified Linux
            : or Windows images. Each virtual machine has private virtualized hardware:
            : a network card, disk, graphics adapter, etc.


6.[root@NTNX-12AM2K480036-A ~]# service libvirtd status

libvirtd (pid  4351) is running...

7. chkconfig --list libvirtd
libvirtd        0:off   1:off   2:off   3:on    4:on    5:on    6:off

8.  lsmod |grep kvm
kvm_intel              53484  32
kvm                   316602  1 kvm_intel


9.
[root@NTNX-12AM2K480036-A ~]# virsh sysinfo
<sysinfo type='smbios'>
  <bios>
    <entry name='vendor'>American Megatrends Inc.</entry>
    <entry name='version'>2.1b      </entry>
    <entry name='date'>10/28/2011</entry>
    <entry name='release'>8.16</entry>
  </bios>
  <system>
    <entry name='manufacturer'>Supermicro</entry>
    <entry name='product'>X8DTT-H</entry>
    <entry name='version'>1234567890</entry>
    <entry name='serial'>12AM2K480036</entry>
    <entry name='uuid'>54443858-4E54-2500-9083-00259083FC54</entry>
    <entry name='sku'>1234567890</entry>
    <entry name='family'>Server</entry>
  </system>
  <processor>
    <entry name='socket_destination'>CPU 1</entry>
    <entry name='type'>Central Processor</entry>
    <entry name='family'>Xeon</entry>
    <entry name='manufacturer'>Intel</entry>
    <entry name='signature'>Type 0, Family 6, Model 44, Stepping 2</entry>
    <entry name='version'>Intel(R) Xeon(R) CPU           X5650  @ 2.67GHz</entry>
    <entry name='external_clock'>133 MHz</entry>
    <entry name='max_speed'>2666 MHz</entry>
    <entry name='status'>Populated, Enabled</entry>
    <entry name='serial_number'>To Be Filled By O.E.M.</entry>
    <entry name='part_number'>To Be Filled By O.E.M.</entry>
  </processor>
  <processor>
    <entry name='socket_destination'>CPU 2</entry>
    <entry name='type'>Central Processor</entry>
    <entry name='family'>Xeon</entry>
    <entry name='manufacturer'>Intel</entry>
    <entry name='signature'>Type 0, Family 6, Model 44, Stepping 2</entry>
    <entry name='version'>Intel(R) Xeon(R) CPU           X5650  @ 2.67GHz</entry>
    <entry name='external_clock'>133 MHz</entry>
    <entry name='max_speed'>2666 MHz</entry>
    <entry name='status'>Populated, Enabled</entry>
    <entry name='serial_number'>To Be Filled By O.E.M.</entry>
    <entry name='part_number'>To Be Filled By O.E.M.</entry>
  </processor>
  <memory_device>
    <entry name='size'>8192 MB</entry>
    <entry name='form_factor'>DIMM</entry>
    <entry name='locator'>P1-DIMM1A</entry>
    <entry name='bank_locator'>BANK0</entry>
    <entry name='type'>DDR3</entry>
    <entry name='type_detail'>Other</entry>
    <entry name='speed'>1333 MHz</entry>
    <entry name='manufacturer'>Hyundai</entry>
    <entry name='serial_number'>8BD98C1E</entry>
    <entry name='part_number'>HMT31GR7CFR4A-H9</entry>
  </memory_device>
  <memory_device>
    <entry name='size'>8192 MB</entry>
    <entry name='form_factor'>DIMM</entry>
    <entry name='locator'>P1-DIMM2A</entry>
    <entry name='bank_locator'>BANK2</entry>
    <entry name='type'>DDR3</entry>
    <entry name='type_detail'>Other</entry>
    <entry name='speed'>1333 MHz</entry>
    <entry name='manufacturer'>Hyundai</entry>
    <entry name='serial_number'>3FD13521</entry>
    <entry name='part_number'>HMT31GR7CFR4A-H9</entry>
  </memory_device>
  <memory_device>
    <entry name='size'>8192 MB</entry>
    <entry name='form_factor'>DIMM</entry>
    <entry name='locator'>P1-DIMM3A</entry>
    <entry name='bank_locator'>BANK4</entry>
    <entry name='type'>DDR3</entry>
    <entry name='type_detail'>Other</entry>
    <entry name='speed'>1333 MHz</entry>
    <entry name='manufacturer'>Hyundai</entry>
    <entry name='serial_number'>32D17521</entry>
    <entry name='part_number'>HMT31GR7CFR4A-H9</entry>
  </memory_device>
  <memory_device>
    <entry name='size'>8192 MB</entry>
    <entry name='form_factor'>DIMM</entry>
    <entry name='locator'>P2-DIMM1A</entry>
    <entry name='bank_locator'>BANK6</entry>
    <entry name='type'>DDR3</entry>
    <entry name='type_detail'>Other</entry>
    <entry name='speed'>1333 MHz</entry>
    <entry name='manufacturer'>Hyundai</entry>
    <entry name='serial_number'>2DD13521</entry>
    <entry name='part_number'>HMT31GR7CFR4A-H9</entry>
  </memory_device>
  <memory_device>
    <entry name='size'>8192 MB</entry>
    <entry name='form_factor'>DIMM</entry>
    <entry name='locator'>P2-DIMM2A</entry>
    <entry name='bank_locator'>BANK8</entry>
    <entry name='type'>DDR3</entry>
    <entry name='type_detail'>Other</entry>
    <entry name='speed'>1333 MHz</entry>
    <entry name='manufacturer'>Hyundai</entry>
    <entry name='serial_number'>8CD91C1E</entry>
    <entry name='part_number'>HMT31GR7CFR4A-H9</entry>
  </memory_device>
  <memory_device>
    <entry name='size'>8192 MB</entry>
    <entry name='form_factor'>DIMM</entry>
    <entry name='locator'>P2-DIMM3A</entry>
    <entry name='bank_locator'>BANK10</entry>
    <entry name='type'>DDR3</entry>
    <entry name='type_detail'>Other</entry>
    <entry name='speed'>1333 MHz</entry>
    <entry name='manufacturer'>Hyundai</entry>
    <entry name='serial_number'>38D15521</entry>
    <entry name='part_number'>HMT31GR7CFR4A-H9</entry>
  </memory_device>
</sysinfo>


10.  virsh nodeinfo
CPU model:           x86_64
CPU(s):              24
CPU frequency:       1600 MHz
CPU socket(s):       1
Core(s) per socket:  6
Thread(s) per core:  2
NUMA cell(s):        2
Memory size:         49486468 KiB

How to create a VM on Nutanix Cluster running KVM ?

I will go in depth on configuration modification done for running Nutanix cluster on KVM later.
(if you are interested, i like this doc

KVM Architecture Overview - Google Drive )

For now, let us take a Nutanix cluster running KVM and create VM on it.


[root@NTNX-12AM2K480036-A Create]# lsb_release   - vmware -v
LSB Version:    :base-4.0-amd64:base-4.0-noarch:core-4.0-amd64:core-4.0-noarch

[root@NTNX-12AM2K480036-A Create]# virsh nodeinfo
CPU model:           x86_64
CPU(s):              24
CPU frequency:       1600 MHz
CPU socket(s):       1
Core(s) per socket:  6
Thread(s) per core:  2
NUMA cell(s):        2
Memory size:         49486468 KiB

virsh sysinfo (smbiosDump) - dmidecode on linux works as well.
<sysinfo type='smbios'>
  <bios>
    <entry name='vendor'>American Megatrends Inc.</entry>
    <entry name='version'>2.1b      </entry>
    <entry name='date'>10/28/2011</entry>
    <entry name='release'>8.16</entry>
  </bios>

Step 1.

- Login to nutanix Controller VM and Create Iscsi disk on Nutanix Container ( ncli ctr ls)
a. ncli vdisk create name=kvm-training-disk9 ctr-name=xyz  max-capacity=16

ncli> vdisk ls names=kvm-training-disk9
    Name                      : kvm-training-disk9
    Container ID              : 779
    Max Capacity              : 16 GB (17,179,869,184 bytes)
    ISCSI Target              : iqn.2010-06.com.nutanix:kvm-training-disk9-e3878625
    ISCSI LUN                 : 0









b. On KVM terminal - Verify that you are able to see the iscsi targets.

[root@NTNX-12AM2K480036-A ~]# sudo  iscsiadm -m discovery -t sendtargets -p 192.168.5.2:3260|egrep "iso|disk9" ( esxcfg-scsidevs -m)
192.168.5.2:3260,1 iqn.2010-06.com.nutanix:gasmith-training-cdrom-centos-6.4-x86_64-bin-dvd1.iso-bca6c6aa
192.168.5.2:3260,1 iqn.2010-06.com.nutanix:CentOS-6.4-x86_64-bin-DVD1.iso-c0e9bd87
192.168.5.2:3260,1 iqn.2010-06.com.nutanix:kvm-training-disk9-e3878625


c. Define the pool - same as creating datastore vmkfstools -C

 virsh pool-define-as --name kvm-training-disk9 --type iscsi --source-host 192.168.5.2 \
--source-dev iqn.2010-06.com.nutanix:kvm-training-disk9-e3878625 \
--target /dev/disk/by-path
Pool kvm-training-disk9 defined

where name can be specific to VM name,  192.168.5.2 is internal CVM IP, source dev is iqn name
of the iscsi lun, and  have it defined in /disk/by-path.

[root@NTNX-12AM2K480036-A by-path]# cd /dev/disk/by-path
root@NTNX-12AM2K480036-A by-path]# ls
ip-192.168.5.2:3260-iscsi-iqn.2010-06.com.nutanix:CentOS-6.4-x86_64-bin-DVD1.iso-c0e9bd87-lun-0
ip-192.168.5.2:3260-iscsi-iqn.2010-06.com.nutanix:kvm-training-disk9-e3878625-lun-0

d. Activate the pool

[root@NTNX-12AM2K480036-A ~]# virsh pool-list
Name                 State      Autostart
-----------------------------------------
CentOS-6.4.iso       active     no
default              active     yes
(it shows only active pools)


[root@NTNX-12AM2K480036-A ~]# virsh pool-list --all -- shows all the pool (esxcfg-scsidevs )
Name                 State      Autostart
-----------------------------------------
CentOS-6.4.iso       active     no
default              active     yes
kvm-training-disk9   inactive   no


Activate the pool
virsh # pool-start kvm-training-disk9
Pool kvm-training-disk9 started

Autostart the pool if there is a reboot
virsh # pool-autostart kvm-training-disk9
Pool kvm-training-disk9 marked as autostarted

virsh # pool-list ( esxcfg-scsidevs -m)
Name                 State      Autostart
-----------------------------------------
CentOS-6.4.iso       active     no
default              active     yes
kvm-training-disk9   active     yes

Verify the config
[root@NTNX-12AM2K480036-A ~]# virsh pool-dumpxml kvm-training-disk9
<pool type='iscsi'>
  <name>kvm-training-disk9</name>
  <uuid>3e42d29d-9037-1faa-12e1-af450904b5ab</uuid>
  <capacity unit='bytes'>17179869184</capacity>
  <allocation unit='bytes'>17179869184</allocation>
  <available unit='bytes'>0</available>
  <source>
    <host name='192.168.5.2'/>
    <device path='iqn.2010-06.com.nutanix:kvm-training-disk9-e3878625'/>
  </source>
  <target>
    <path>/dev/disk/by-path</path>
    <permissions>
      <mode>0755</mode>
      <owner>-1</owner>
      <group>-1</group>
    </permissions>
  </target>
</pool>

List the volume

[root@NTNX-12AM2K480036-A ~]# virsh vol-list --pool kvm-training-disk9 (esxcfg-scsidevs -m)

Name                 Path
-----------------------------------------
unit:0:0:0           /dev/disk/by-path/ip-192.168.5.2:3260-iscsi-iqn.2010-06.com.nutanix:kvm-training-disk9-e3878625-lun-0


[root@NTNX-12AM2K480036-A ~]# virsh vol-info --pool kvm-training-disk9 unit:0:0:0
Name:           unit:0:0:0
Type:           block
Capacity:       16.00 GiB
Allocation:     16.00 GiB


Create the VM with following config
cat ~/KVM/Create/disk9 ( chmod +x)

#!/usr/bin/env bash

virt-install \
  --description "CentOS 6.4 - minimal desktop" \
  --connect qemu:///system \
  --name kvm-training9 \
  --disk vol=kvm-training-disk9/unit:0:0:0,format=raw,cache=none,io=native,bus=virtio \
  --ram 1024 \
  --vcpu 1 \
  --graphics vmc,port=5905,listen=0.0.0.0 \
  --os-type linux \
  --os-variant rhel6 \
  --disk vol=CentOS-6.4.iso/unit:0:0:0,format=raw,io=native,bus=ide,device=cdrom \
  --noautoconsole \
  --wait 0  --network network=VM-Network,model=virtio \
  --force


run ~/KVM/Create/disk9

[root@NTNX-12AM2K480036-A by-path]# virsh list ( similar to vim-cmd vmsvc/getallvms or vm-support -V esxcli vm  process list)
 Id    Name                           State
----------------------------------------------------
 1     NTNX-12AM2K480036-A-CVM        running

52    kvm-training9                  running

virsh # dumpxml 52  ---- like vmx file <domain type='kvm' id='52'>
  <name>kvm-training9</name>
  <uuid>81f4f17f-b9e8-d533-1b89-6295c5ff6048</uuid>
  <description>CentOS 6.4 - minimal desktop</description>
  <memory unit='KiB'>1048576</memory>
  <currentMemory unit='KiB'>1048576</currentMemory>
  <vcpu placement='static'>1</vcpu>
  <os>
    <type arch='x86_64' machine='rhel6.4.0'>hvm</type>
    <boot dev='hd'/>
  </os>
  <features>
    <acpi/>
    <apic/>
    <pae/>
  </features>
  <clock offset='utc'/>
  <on_poweroff>destroy</on_poweroff>
  <on_reboot>restart</on_reboot>
  <on_crash>restart</on_crash>
  <devices>
    <emulator>/usr/libexec/qemu-kvm</emulator>
    <disk type='block' device='disk'>
      <driver name='qemu' type='raw' cache='none' io='native'/>
      <source dev='/dev/disk/by-path/ip-192.168.5.2:3260-iscsi-iqn.2010-06.com.nutanix:kvm-training-disk9-e3878625-lun-0'/>
      <target dev='vda' bus='virtio'/>
      <alias name='virtio-disk0'/>
      <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
    </disk>
    <disk type='block' device='cdrom'>
      <driver name='qemu' type='raw' io='native'/>
      <source dev='/dev/disk/by-path/ip-192.168.5.2:3260-iscsi-iqn.2010-06.com.nutanix:CentOS-6.4-x86_64-bin-DVD1.iso-c0e9bd87-lun-0'/>
      <target dev='hdc' bus='ide'/>
      <readonly/>
      <alias name='ide0-1-0'/>
      <address type='drive' controller='0' bus='1' target='0' unit='0'/>
    </disk>
    <controller type='usb' index='0'>
      <alias name='usb0'/>
      <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
    </controller>
    <controller type='ide' index='0'>
      <alias name='ide0'/>
      <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x1'/>
    </controller>
    <interface type='network'>
      <mac address='52:54:00:3e:4c:f5'/>
      <source network='VM-Network'/>
      <target dev='vnet6'/>
      <model type='virtio'/>
      <alias name='net0'/>
      <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
    </interface>
    <serial type='pty'>
      <source path='/dev/pts/6'/>
      <target port='0'/>
      <alias name='serial0'/>
    </serial>
    <console type='pty' tty='/dev/pts/6'>
      <source path='/dev/pts/6'/>
      <target type='serial' port='0'/>
      <alias name='serial0'/>
    </console>
    <input type='tablet' bus='usb'>
      <alias name='input0'/>
    </input>
    <input type='mouse' bus='ps2'/>
    <graphics type='vnc' port='5909' autoport='no' listen='0.0.0.0'>
      <listen type='address' address='0.0.0.0'/>
    </graphics>
    <video>
      <model type='cirrus' vram='9216' heads='1'/>
      <alias name='video0'/>
      <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
    </video>
    <memballoon model='virtio'>
      <alias name='balloon0'/>
      <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
    </memballoon>
  </devices>
  <seclabel type='dynamic' model='selinux' relabel='yes'>
    <label>unconfined_u:system_r:svirt_t:s0:c399,c943</label>
    <imagelabel>unconfined_u:object_r:svirt_image_t:s0:c399,c943</imagelabel>
  </seclabel>
</domain>


 [root@NTNX-12AM2K480036-A Create]# ps -ef|grep qemu |grep training9- similar to vmx module in vmware
qemu     14276     1  0 15:44 ?        00:00:42 /usr/libexec/qemu-kvm -name kvm-training9 -S -M rhel6.4.0 -enable-kvm -m 1024 -smp 1,sockets=1,cores=1,threads=1 -uuid 81f4f17f-b9e8-d533-1b89-6295c5ff6048 -nodefconfig -nodefaults -chardev socket,id=charmonitor,path=/var/lib/libvirt/qemu/kvm-training9.monitor,server,nowait -mon chardev=charmonitor,id=monitor,mode=control -rtc base=utc -no-shutdown -device piix3-usb-uhci,id=usb,bus=pci.0,addr=0x1.0x2 -drive file=/dev/disk/by-path/ip-192.168.5.2:3260-iscsi-iqn.2010-06.com.nutanix:kvm-training-disk9-e3878625-lun-0,if=none,id=drive-virtio-disk0,format=raw,cache=none,aio=native -device virtio-blk-pci,scsi=off,bus=pci.0,addr=0x4,drive=drive-virtio-disk0,id=virtio-disk0,bootindex=1 -drive file=/dev/disk/by-path/ip-192.168.5.2:3260-iscsi-iqn.2010-06.com.nutanix:CentOS-6.4-x86_64-bin-DVD1.iso-c0e9bd87-lun-0,if=none,media=cdrom,id=drive-ide0-1-0,readonly=on,format=raw,aio=native -device ide-drive,bus=ide.1,unit=0,drive=drive-ide0-1-0,id=ide0-1-0 -netdev tap,fd=36,id=hostnet0,vhost=on,vhostfd=39 -device virtio-net-pci,netdev=hostnet0,id=net0,mac=52:54:00:3e:4c:f5,bus=pci.0,addr=0x3 -chardev pty,id=charserial0 -device isa-serial,chardev=charserial0,id=serial0 -device usb-tablet,id=input0 -vnc 0.0.0.0:9 -vga cirrus -device virtio-balloon-pci,id=balloon0,bus=pci.0,addr=0x5


virt-top (esxtop) -1,2,3
 virt-top 17:40:34 - x86_64 24/24CPU 1600MHz 48326MB
9 domains, 8 active, 8 running, 0 sleeping, 0 paused, 1 inactive D:0 O:0 X:0
CPU: 1.0%  Mem: 22008 MB (22008 MB by guests)

   ID S RXBY TXBY RXPK TXPK DOMAIN       INTERFACE
    1 R  23K  25K   96   89 NTNX-12AM2K4 vnet0
   43 R  723    0    9    0 gasmith-trai vnet2
    52 R    0    0    0    0 kvm-training vnet6

We have virt_install in Nutanix CVM to automate these steps ( create iscsi disk, create pool and install VM)


 .CVM:10.3.202.19:~/nutanix_kvm/bin$ ./virt_install --cdrom /ImageStore/win7.iso --disk 128 --nic VM-Network --vnc_port 5999 --os_type windows --os_variant win7 --name kvm-testing-win27


2013-06-27 11:21:43 INFO batch_worker.py:190 Preparing nutanix disks: 0%
2013-06-27 11:21:46 INFO batch_worker.py:190 Preparing nutanix disks: 50%
2013-06-27 11:21:46 INFO batch_worker.py:190 Preparing nutanix disks: 100%
2013-06-27 11:21:46 INFO batch_worker.py:190 Creating libvirt storage pools: 0%
2013-06-27 11:21:50 INFO batch_worker.py:190 Creating libvirt storage pools: 50%
2013-06-27 11:21:52 INFO batch_worker.py:190 Creating libvirt storage pools: 100%
2013-06-27 11:21:52 INFO kvm_domain_template.py:156 Running virt-install

 ( connect to VNC -:99 disable Adapt and max quality in vnc viewer)
Connect to the console and install the CentOS.(virt-manager)

virsh # list --all (vmsvc/getallvms)
 Id    Name                           State
----------------------------------------------------
 1     NTNX-12AM2K480036-C-CVM        running
 40    kvm-training03                 running
 41    kvm-training6                  running
 42    kvm-training4                  running
 47    kvm-testing-win21              running
 48    kvm-testing-win24              running
 -     kvm-testing-win99              shut off



virsh # start kvm-testing-win99  -- vim-cmd vmsvc/power.on
Domain kvm-testing-win99 started