05-kvm企业实战¶
是否支持虚拟化确认
X86服务器查看是否支持虚拟化:
egrep 'vmx|svm' /proc/cpuinfo ###返回有结果则说明CPU支持VT,否则需要进入系统BIOS中设置,开启虚拟化
#信创arm服务器参考是否支持虚拟化:
[root@localhost ~]# dmesg | grep kvm
[ 0.718369] kvm [1]: Hisi ncsnp: enabled
[ 0.718372] kvm [1]: 16-bit VMID
[ 0.718373] kvm [1]: IPA Size Limit: 48bits
[ 0.718377] kvm [1]: GICv4 support disabled
[ 0.718378] kvm [1]: vgic-v2@9b020000
[ 0.718379] kvm [1]: GIC system register CPU interface enabled
[ 0.718394] kvm [1]: vgic interrupt IRQ1
基础包安装
#X86服务器:
yum install qemu-kvm qemu-kvm-tools virt-manager libvirt virt-install vim -y
#centos7.6源得libvirt版本一般是4.5
##检查kvm模块是否加载成功
lsmod |grep kvm
#如没有输出则是没加载成功,执行如下命令加载
modprobe kvm
#信创/arm 服务器kylin v10 sp1操作系统:
yum install virt-manager virt-install vim libgcrypt -y
yum provides qemu-kvm #按照qemu-kvm 2.12.0版本以上
yum install qemu-kvm-xxx -y
##libvirt有6.2和6.9(ctyunos、kylinos等自研协调)两个版本
yum provides libvirt
yum install libvirt-xxx -y
删除多余网桥操作(选做--一般不需要)
如果名字不对,参入下面的步骤进行修改。
先用ifconfig br_name down 停掉网桥
再用brctl delbr br_name 删除网桥
备份/etc/sysconfig/network-scripts/下的桥的网桥配置文件。
重启网络。
创建完成新的桥后,将备份文件中的ip恢复回新的桥(如果桥上配置有ip)
libvirt配置
X86 && Centos7 &&自研Ctyun2.0 && libvirt-4.5 操作步骤:
##X86 && Centos7 &&自研Ctyun2.0 && libvirt-4.5 操作步骤:
sed -i 's/^#listen_tls = 0/listen_tls = 0/' /etc/libvirt/libvirtd.conf
sed -i 's/^#listen_tcp = 1/listen_tcp = 1/' /etc/libvirt/libvirtd.conf
sed -i 's/^#tcp_port = \"16509\"/tcp_port = \"16509\"/' /etc/libvirt/libvirtd.conf
sed -i 's/^#listen_addr = \"192.168.0.1\"/listen_addr = \"0.0.0.0\"/' /etc/libvirt/libvirtd.conf
sed -i 's/^#auth_tcp = \"sasl\"/auth_tcp = \"none\"/' /etc/libvirt/libvirtd.conf
sed -i 's/^#mdns_adv = 1/mdns_adv = 0/' /etc/libvirt/libvirtd.conf
sed -ri 's/^#LIBVIRTD_ARGS=.*/LIBVIRTD_ARGS="--listen"/' /etc/sysconfig/libvirtd
grep 'LIBVIRTD_ARGS' /etc/sysconfig/libvirtd
sed -ri 's/^#vnc_listen.*/vnc_listen = "0.0.0.0"/' /etc/libvirt/qemu.conf
grep 'vnc_listen' /etc/libvirt/qemu.conf
systemctl restart libvirtd
ss -tnl|grep 16509
# 可以看到16509再监听
**信创 arm && kylinos && libvirt-6.2/**libvirt-6.9.1 操作步骤:
##信创 arm && libvirt-6.2/libvirt-6.9.1 操作步骤:
#arm架构要额外做这个的:
mkdir -p /usr/share/AAVMF
chown qemu:qemu /usr/share/AAVMF
ln -s /usr/share/edk2/aarch64/QEMU_EFI-pflash.raw /usr/share/AAVMF/AAVMF_CODE.fd
ln -s /usr/share/edk2/aarch64/vars-template-pflash.raw /usr/share/AAVMF/AAVMF_VARS.fd
#编辑qemu配置
vim /etc/libvirt/qemu.conf
在末尾添加如下一行:
nvram = ["/usr/share/AAVMF/AAVMF_CODE.fd:/usr/share/AAVMF/AAVMF_VARS.fd","/usr/share/edk2/aarch64/QEMU_EFI-pflash.raw:/usr/share/edk2/aarch64/vars-template-pflash.raw"]
还有qemu.conf中vnc_tls要注释掉或者配置为0
###libvirt-6.2/libvirt-6.9.1 配置文件操作步骤:########
#libvirtd配置
sed -i 's/^#listen_tcp = 1/listen_tcp = 1/' /etc/libvirt/libvirtd.conf
sed -i 's/^#log_level = 3/log_level = 3/' /etc/libvirt/libvirtd.conf
sed -i 's/^#auth_tcp = \"sasl\"/auth_tcp = \"none\"/' /etc/libvirt/libvirtd.conf
sed -ri 's|^#log_outputs="3:syslog:libvirtd"|log_outputs ="3:file:/var/log/libvirt/libvirtd.log"|' /etc/libvirt/libvirtd.conf
sed -i 's|^#LIBVIRTD_ARGS=.*|LIBVIRTD_ARGS="--config /etc/libvirt/libvirtd.conf"|' /etc/sysconfig/libvirtd
#开启tls监听(16514)和tcp监听(16509)
systemctl start libvirtd.socket libvirtd-ro.socket libvirtd-admin.socket libvirtd-tcp.socket
systemctl enable libvirtd.socket libvirtd-ro.socket libvirtd-admin.socket libvirtd-tcp.socket
systemctl is-active libvirtd.socket libvirtd-ro.socket libvirtd-admin.socket libvirtd-tcp.socket
#启动libvirt
systemctl restart libvirtd
ss -tnl|grep 16509
# 可以看到16509再监听
KVM宿主机初始化:(****批量创建网桥和存储pool)
kvm-manage脚本批量初始化:(****适用于centos7.6)
##宿主机安装bc包
yum install -y bc
# 修改下面的host信息-----所有宿主机都配置
cat > /etc/vm_manages.conf <<EOF
host: 10.27.0.17 10.27.0.16 10.27.0.15 10.27.0.14
port: 10000
user: root
ImageDir: /data/images/
bridge CN2 bond0.199
bridge os_manage bond0.150
bridge storage_manage bond0.151
bridge sdn_manage bond0.152
bridge zabbix_eShield bond0.153
bridge storage_ext bond2.300
bridge storage_inside bond1.302
bridge ext_net bond2.200
bridge compute_inside bond1.301
bridge provision bond1.309
EOF
###############以上配置文件标红部分根据实际需要修改################
host: 所有管理服务器的openstack管理地址(bond0.150)。
port:ssh登录用的port,根据实际修改。
user:ssh登录用的用户,根据实际修改。
ImageDir: 镜像存放的路径,创建虚拟机的时候需要。根据实际情况修改
初始化条件:
1》脚本所在的机器,到另外所有机器配置ssh免密登录。免密用户是上面2中提到配置文件中写的user。
2》网卡的子接口已经准备好
ip a |grep bond0 # 查看bond0 上缺少的子接口
ip a |grep bond1
ip a |grep bond2
3》在脚本所在机器上的/etc/hosts中写入所有管理服务器的解析。
#####使用脚本进行kvm初始化#########
sh kvm-manage.sh install --all # 配置文件中的所有服务器都初始化
或者
kvm-manage.sh install --host host_ip # 一次初始化一台
非kvm-manage脚本初始化:(****所有宿主机**)**
创建网桥
根据缺少的子接口,选择具体执行的脚本
通过
ip a |grep bond0 # 查看bond0 上缺少的子接口
ip a |grep bond1
ip a |grep bond2
使用iface-bridge 创建(****适用于centos和ctyunos2**)**
#创建bond0.150
ip l add link bond0 name bond0.150 type vlan id 150
ip l set bond0.150 up
cat > /etc/sysconfig/network-scripts/ifcfg-bond0.150 <<EOF
DEVICE=bond0.150
TYPE=Vlan
PHYSDEV=bond0
ONBOOT=yes
BOOTPROTO=none
REORDER_HDR=yes
IPV6INIT=no
BONDING_MASTER=yes
VLAN=yes
MTU=1500
EOF
# 创建bond0.151
ip l add link bond0 name bond0.151 type vlan id 151
ip l set bond0.151 up
cat > /etc/sysconfig/network-scripts/ifcfg-bond0.151 <<EOF
DEVICE=bond0.151
TYPE=Vlan
PHYSDEV=bond0
ONBOOT=yes
BOOTPROTO=none
REORDER_HDR=yes
IPV6INIT=no
BONDING_MASTER=yes
VLAN=yes
MTU=1500
EOF
# 创建bond0.152
ip l add link bond0 name bond0.152 type vlan id 152
ip l set bond0.152 up
cat > /etc/sysconfig/network-scripts/ifcfg-bond0.152 <<EOF
DEVICE=bond0.152
TYPE=Vlan
PHYSDEV=bond0
ONBOOT=yes
BOOTPROTO=none
REORDER_HDR=yes
IPV6INIT=no
BONDING_MASTER=yes
VLAN=yes
MTU=1500
EOF
# 创建bond0.153
ip l add link bond0 name bond0.153 type vlan id 153
ip l set bond0.153 up
cat > /etc/sysconfig/network-scripts/ifcfg-bond0.153 <<EOF
DEVICE=bond0.153
TYPE=Vlan
PHYSDEV=bond0
ONBOOT=yes
BOOTPROTO=none
REORDER_HDR=yes
IPV6INIT=no
BONDING_MASTER=yes
VLAN=yes
MTU=1500
EOF
#创建bond0.199
ip l add link bond0 name bond0.199 type vlan id 199
ip l set bond0.199 up
cat > /etc/sysconfig/network-scripts/ifcfg-bond0.199 <<EOF
DEVICE=bond0.199
TYPE=Vlan
PHYSDEV=bond0
ONBOOT=yes
BOOTPROTO=none
REORDER_HDR=yes
IPV6INIT=no
BONDING_MASTER=yes
VLAN=yes
MTU=1500
EOF
# 配置bond1.301的子接口配置
cat > /etc/sysconfig/network-scripts/ifcfg-bond1.301 <<EOF
DEVICE=bond1.301
TYPE=Vlan
PHYSDEV=bond1
ONBOOT=yes
BOOTPROTO=none
REORDER_HDR=yes
IPV6INIT=no
BONDING_MASTER=yes
VLAN=yes
MTU=9000
EOF
# 创建bond1.302
ip l add link bond1 name bond1.302 type vlan id 302
ip l set bond1.302 up
cat > /etc/sysconfig/network-scripts/ifcfg-bond1.302 <<EOF
DEVICE=bond1.302
TYPE=Vlan
PHYSDEV=bond1
ONBOOT=yes
BOOTPROTO=none
REORDER_HDR=yes
IPV6INIT=no
BONDING_MASTER=yes
VLAN=yes
MTU=9000
EOF
# 创建bond1.309
ip l add link bond1 name bond1.309 type vlan id 309
ip l set bond1.309 up
cat > /etc/sysconfig/network-scripts/ifcfg-bond1.309 <<EOF
DEVICE=bond1.309
TYPE=Vlan
PHYSDEV=bond1
ONBOOT=yes
BOOTPROTO=none
REORDER_HDR=yes
IPV6INIT=no
BONDING_MASTER=yes
VLAN=yes
MTU=9000
EOF
# 创建bond2.200
ip l add link bond2 name bond2.200 type vlan id 200
ip l set bond2.200 up
cat > /etc/sysconfig/network-scripts/ifcfg-bond2.200 <<EOF
DEVICE=bond2.200
TYPE=Vlan
PHYSDEV=bond2
ONBOOT=yes
BOOTPROTO=none
REORDER_HDR=yes
IPV6INIT=no
BONDING_MASTER=yes
VLAN=yes
MTU=9000
EOF
#创建bond2.300
ip l add link bond2 name bond2.300 type vlan id 300
ip l set bond2.300 up
cat > /etc/sysconfig/network-scripts/ifcfg-bond2.300 <<EOF
DEVICE=bond2.300
TYPE=Vlan
PHYSDEV=bond2
ONBOOT=yes
BOOTPROTO=none
REORDER_HDR=yes
IPV6INIT=no
BONDING_MASTER=yes
VLAN=yes
MTU=9000
EOF
### 208vlan的子接口需要确认是在bond1,还是在bond2
### 需要发对象存储或者文件存储相关虚拟机时回用到
ip l add link bond2 name bond2.208 type vlan id 208
ip l set bond2.208 up
cat > /etc/sysconfig/network-scripts/ifcfg-bond2.208 <<EOF
DEVICE=bond2.208
TYPE=Vlan
PHYSDEV=bond2
ONBOOT=yes
BOOTPROTO=none
REORDER_HDR=yes
IPV6INIT=no
BONDING_MASTER=yes
VLAN=yes
MTU=9000
EOF
(****适用于centos和ctyunos2**)**
根据实际缺少的i情况,选择具体的命令
virsh iface-bridge bond0.199 CN2 --no-stp
virsh iface-bridge bond0.150 os_manage --no-stp
virsh iface-bridge bond0.151 storage_manage --no-stp
virsh iface-bridge bond0.152 sdn_manage --no-stp
virsh iface-bridge bond0.153 zabbix_eshield --no-stp
virsh iface-bridge bond2.300 storage_ext --no-stp
virsh iface-bridge bond1.302 storage_inside --no-stp
virsh iface-bridge bond2.200 ext_net --no-stp
virsh iface-bridge bond1.301 compute_inside --no-stp
virsh iface-bridge bond1.309 provision --no-stp
virsh iface-bridge bond2.208 storage_forward --no-stp
使用配置文件的方式创建(****适用kylinos**)**
#创建bond0.150 os_manage
cat > /etc/sysconfig/network-scripts/ifcfg-bond0.150 <<EOF
DEVICE=bond0.150
NAME=bond0.150
TYPE=Vlan
PHYSDEV=bond0
ONBOOT=yes
BOOTPROTO=none
REORDER_HDR=yes
IPV6INIT=no
BONDING_MASTER=yes
VLAN=yes
MTU=1500
BRIDGE=os_manage
EOF
#创建网桥
cat > /etc/sysconfig/network-scripts/ifcfg-os_manage <<EOF
TYPE=Bridge
BOOTPROTO=static
DEFROUTE=yes
PEERDNS=yes
PEERROUTES=yes
NAME=os_manage
DEVICE=os_manage
ONBOOT=yes
IPADDR=10.1.136.13
NETMASK=255.255.248.0
#GATEWAY=10.1.143.254
EOF
###启网桥
ifup os_manage && ifup bond0.150
pool 配置 (****通用**)**
name=nvme0n1
mkdir /data/ssd-$name -p
mkfs.xfs -f /dev/$name >/dev/null
uuid=`blkid -s UUID /dev/$name|awk -F '"' '{print $2}'`
echo "UUID=$uuid /data/ssd-$name xfs defaults 0 0" >>/etc/fstab
mount -a
virsh pool-define-as ssd-$name dir --target /data/ssd-$name --source-path /data/ssd-$name
virsh pool-start ssd-$name
virsh pool-autostart ssd-$name
name=nvme1n1
mkdir /data/ssd-$name -p
mkfs.xfs -f /dev/$name >/dev/null
uuid=`blkid -s UUID /dev/$name|awk -F '"' '{print $2}'`
echo "UUID=$uuid /data/ssd-$name xfs defaults 0 0" >>/etc/fstab
mount -a
virsh pool-define-as ssd-$name dir --target /data/ssd-$name --source-path /data/ssd-$name
virsh pool-start ssd-$name
virsh pool-autostart ssd-$name
name=sdb
mkdir /data/disk_$name -p
mkfs.xfs -f /dev/$name >/dev/null
uuid=`blkid -s UUID /dev/$name|awk -F '"' '{print $2}'`
echo "UUID=$uuid /data/disk_$name xfs defaults 0 0" >>/etc/fstab
mount -a
virsh pool-define-as disk_$name dir --target /data/disk_$name --source-path /data/disk_$name
virsh pool-start disk_$name
virsh pool-autostart disk_$name
###删除存储池######################
#先停止储存池才能删除
virsh pool-destroy --pool vm_zp
#删除存储池的相关数据目录(但存储池的配置还在)
virsh pool-delete --pool vm_zp
#删除存储池配置文件
virsh pool-undefine --pool vm_zp
执行批量创建脚本--单节点执行
准备配置文件
(单机版kvm批量脚本需要)
# 修改下面的host信息
cat > /etc/vm_manages.conf <<EOF
host: 10.27.0.17 10.27.0.16 10.27.0.15 10.27.0.14
port: 10000
user: root
ImageDir: /data/images/
bridge CN2 bond0.199
bridge os_manage bond0.150
bridge storage_manage bond0.151
bridge sdn_manage bond0.152
bridge zabbix_eShield bond0.153
bridge storage_ext bond2.300
bridge storage_inside bond1.302
bridge ext_net bond2.200
bridge compute_inside bond1.301
bridge provision bond1.309
EOF
放置qcow2镜像模板
#放置镜像模板
[root@test-linux-node110 ~]# mkdir -p /data/images/
[root@test-linux-node110 ~]# mv centos_base.qcow2 /data/images/
[root@test-linux-node110 ~]# ls -l /data/images/
total 1181760
-rw-rw-r-- 1 secure secure 1210122240 Sep 16 09:33 centos_base.qcow2
备注:必须得存在/data/images/目录中
手动给KVM虚机添加一块网卡
#添加网卡(第三块网卡不能热添加,需去掉--live参数,重启虚机)
virsh attach-interface --domain zj02-controller-10e84e205e176 --type bridge --source sdn_manage2 --model virtio --config --live
#查看虚机网络
[root@mgmt1 ~]# virsh domiflist hw-zabbis-os
Interface Type Source Model MAC
-------------------------------------------------------
vnet36 bridge zabbix_eShield virtio 52:54:00:cc:f1:2d
#删除网卡
virsh detach-interface hw-zabbix-os --type bridge --mac 52:54:00:ac:e9:7b --config --live
#网桥操作:
virsh iface-unbridge br0 #删除桥,源网卡的配置会自动恢复
系统磁盘扩容
#1 在宿主机调整磁盘文件大小:
virsh blockresize --path /data/disk_sdb/jx04-nfvm-control-03-10e46e203e107.qcow2 --size 100G jx04-nfvm-control-03-10e46e203e107
#2 虚拟机层面扩分区
yum -y install cloud-init cloud-utils-growpart
growpart /dev/sda 2
xfs_growfs /dev/sda2
yum -y remove cloud-init cloud-utils-growpart
给虚机添加磁盘
[root@mgmt3 ~]# qemu-img create -f qcow2 /data/disk_sdi/hw-control-10e100e2e54.qcow2 200G
Formatting '/data/disk_sdi/hw-control-10e100e2e54.qcow2', fmt=qcow2 size=214748364800 cluster_size=65536 lazy_refcounts=off refcount_bits=16
[root@mgmt3 ~]# virsh attach-disk hw-control-10e100e2e54 /data/disk_sdi/hw-control-10e100e2e54.qcow2 --targetbus scsi sdi --sourcetype file --driver qemu --subdriver qcow2 --type disk --live --config
Disk attached successfully
删除磁盘
[root@mgmt3 ~]# virsh detach-disk hw-control-10e100e2e54 sdb --live --config ##热删除
Disk detached successfully
备份qcow2 文件
find / -name "*.qcow2" |xargs -n1 -i cp {} /data/disk_sdl
虚机打快照
[root@localhost ~]# virsh snapshot-list vm1
名称 生成时间 状态
------------------------------------------------------------
centos1_sn1 2019-02-28 10:15:47 +0800 running
centos1_sn2 2019-02-28 10:15:23 +0800 shutoff
其中running代表是开机状态创建的,shutoff是关机状态创建的
2、回滚
virsh snapshot-revert --domain vm1 centos1_sn1
3、快照删除
virsh snapshot-delete vm1 centos1_sn1
虚拟机开机自启/不自启
[root@localhost ~]# virsh autostart rh8-vm1 #设置虚拟机开机自动启动
[root@localhost ~]# virsh autostart --disable rh8-vm1 #设置取消虚拟机自动启动
批量启动虚拟机
for a in `virsh list --all|grep 'shut off' 2>/dev/null |awk '{print $2}'`; do echo $a; virsh start $a 2>/dev/null ; done
批量停止虚拟机
for a in `virsh list --all|grep 'running' 2>/dev/null |awk '{print $2}'`; do echo $a; virsh shutdown $a 2>/dev/nul;done