kvm宿主机 500台
kvm虚拟机 5000台
kvm虚拟机的特点: 虚拟机内部不是透明的,容器内部是透明的
统计centos7.6 系统的虚拟机有多少台?
每个虚拟机的ip地址
统计4c8g 虚拟机数量 2c4g 虚拟机数量
资产表 excel文件
kvm管理平台: 小规模:ovirt WebVirtMgr cloudstack zstack Proxmox VE.....
大规模:openstack 阿里专有云 腾讯专有云 青云 ucloud.....
kvm管理平台 + 计费系统 == 公有云
亚马逊 2006年上线aws 公有云
阿里云 2011年上线
openstack 2012年 飞速发展 2017年 国内云厂商大规模倒闭
openstack 基于apache 2.0
一开始模仿aws,后来加入自己的特色,兼容aws,开源,社区非常活跃
半年更新一次版本
A,B,C,D....K版L M N O Pike Q R S T U V W X Y Z版
L版的中文官方文档https://docs.openstack.org/liberty/zh_CN/install-guide-rdo/
M版的中文官方文档https://docs.openstack.org/mitaka/zh_CN/install-guide-rdo/
N版的中文官方文档https://docs.openstack.org/newton/zh_CN/install-guide-rdo/
最后一个中文版:https://docs.openstack.org/ocata/zh_CN/install-guide-rdo/
P版开始,官方文档大变样,不适合新手学习
T版以后,需要centos8的系统
模仿aws,大规模的云平台产品
主机名 | ip | 虚拟化 | 内存 | |
---|---|---|---|---|
控制节点 | controller | 10.0.0.11 | 开启 | 4G |
计算节点 | compute1 | 10.0.0.31 | 开启 | 1G |
计算节点 | compute2 | 10.0.0.32 | 开启 | 1G |
一定要修改ip地址,主机名,host解析
yum源准备
xecho '10.0.0.1 mirrors.aliyun.com' >>/etc/hosts
curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
yum makecache
yum install centos-release-openstack-ocata.noarch -y
vim /etc/yum.repos.d/CentOS-QEMU-EV.repo
#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=virt-kvm-common
baseurl=http://mirror.centos.org/$contentdir/$releasever/virt/$basearch/kvm-common/
vim /etc/hosts
10.0.0.1 mirrors.aliyun.com mirror.centos.org
yum makecache
yum repolist
安装基础服务
安装openstack客户端
xxxxxxxxxx
#所有节点
yum install python-openstackclient -y
数据库安装
xxxxxxxxxx
#控制节点
#安装
yum install mariadb mariadb-server python2-PyMySQL -y
#配置
vi /etc/my.cnf.d/openstack.cnf
i[mysqld]
bind-address = 10.0.0.11
default-storage-engine = innodb
innodb_file_per_table = on
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8
#启动
systemctl start mariadb
systemctl enable mariadb
#安全初始化
mysql_secure_installation
回车
n
y
y
y
y
#验证
[root@controller ~]# netstat -lntup|grep 3306
tcp 0 0 10.0.0.11:3306 0.0.0.0:* LISTEN 23304/mysqld
安装消息队列
xxxxxxxxxx
#控制节点
#安装
yum install rabbitmq-server -y
#启动
systemctl start rabbitmq-server.service
systemctl enable rabbitmq-server.service
#验证
[root@controller ~]# netstat -lntup|grep 5672
tcp 0 0 0.0.0.0:25672 0.0.0.0:* LISTEN 23847/beam
tcp6 0 0 :::5672 :::* LISTEN 23847/beam
#创建用户并授权
rabbitmqctl add_user openstack RABBIT_PASS
rabbitmqctl set_permissions openstack ".*" ".*" ".*"
安装memcache缓存
xxxxxxxxxx
#控制节点
#安装
yum install memcached python-memcached -y
#配置
vim /etc/sysconfig/memcached
OPTIONS="-l 10.0.0.11"
#启动
systemctl start memcached.service
systemctl enable memcached.service
#验证
[root@controller ~]# netstat -lntup|grep 11211
tcp 0 0 10.0.0.11:11211 0.0.0.0:* LISTEN 24519/memcached
udp 0 0 10.0.0.11:11211 0.0.0.0:* 24519/memcached
xxxxxxxxxx
#控制节点
创建数据库
xxxxxxxxxx
CREATE DATABASE keystone;
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' \
IDENTIFIED BY 'KEYSTONE_DBPASS';
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' \
IDENTIFIED BY 'KEYSTONE_DBPASS';
安装keystone软件包
xxxxxxxxxx
yum install openstack-keystone httpd mod_wsgi -y
修改配置文件
xxxxxxxxxx
cp /etc/keystone/keystone.conf /etc/keystone/keystone.conf.bak
>/etc/keystone/keystone.conf
vim /etc/keystone/keystone.conf
i[DEFAULT]
[assignment]
[auth]
[cache]
[catalog]
[cors]
[cors.subdomain]
[credential]
[database]
connection = mysql+pymysql://keystone:KEYSTONE_DBPASS@controller/keystone
[domain_config]
[endpoint_filter]
[endpoint_policy]
[eventlet_server]
[federation]
[fernet_tokens]
[healthcheck]
[identity]
[identity_mapping]
[kvs]
[ldap]
[matchmaker_redis]
[memcache]
[oauth1]
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[paste_deploy]
[policy]
[profiler]
[resource]
[revoke]
[role]
[saml]
[security_compliance]
[shadow_users]
[signing]
[token]
provider = fernet
[tokenless_auth]
[trust]
同步数据库
xxxxxxxxxx
su -s /bin/sh -c "keystone-manage db_sync" keystone
初始化fernet
xxxxxxxxxx
keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
初始化keystone
xxxxxxxxxx
keystone-manage bootstrap --bootstrap-password ADMIN_PASS \
--bootstrap-admin-url http://controller:35357/v3/ \
--bootstrap-internal-url http://controller:5000/v3/ \
--bootstrap-public-url http://controller:5000/v3/ \
--bootstrap-region-id RegionOne
配置httpd
xxxxxxxxxx
echo "ServerName controller" >>/etc/httpd/conf/httpd.conf
ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/
启动httpd
xxxxxxxxxx
systemctl start httpd
systemctl enable httpd
验证keystone
xxxxxxxxxx
vim admin-openrc
iexport OS_USERNAME=admin
export OS_PASSWORD=ADMIN_PASS
export OS_PROJECT_NAME=admin
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_DOMAIN_NAME=Default
export OS_AUTH_URL=http://controller:35357/v3
export OS_IDENTITY_API_VERSION=3
source admin-openrc
获取token测试
xxxxxxxxxx
openstack token issue
创建services项目
xxxxxxxxxx
openstack project create --domain default \
--description "Service Project" service
xxxxxxxxxx
#控制节点
1:创建数据库,授权
xxxxxxxxxx
CREATE DATABASE glance;
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' \
IDENTIFIED BY 'GLANCE_DBPASS';
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' \
IDENTIFIED BY 'GLANCE_DBPASS';
2:在keystone创建服务用户,并关联角色
xxxxxxxxxx
openstack user create --domain default --password GLANCE_PASS glance
openstack role add --project service --user glance admin
3:在keystone上注册api访问地址
xxxxxxxxxx
openstack service create --name glance \
--description "OpenStack Image" image
openstack endpoint create --region RegionOne \
image public http://controller:9292
openstack endpoint create --region RegionOne \
image internal http://controller:9292
openstack endpoint create --region RegionOne \
image admin http://controller:9292
4:安装软件包
xxxxxxxxxx
yum install openstack-glance -y
5:修改配置文件
修改glance-api配置文件
xxxxxxxxxx
cp /etc/glance/glance-api.conf /etc/glance/glance-api.conf.bak
>/etc/glance/glance-api.conf
vim /etc/glance/glance-api.conf
i[DEFAULT]
[cors]
[cors.subdomain]
[database]
connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance
[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/
[image_format]
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = GLANCE_PASS
[matchmaker_redis]
[oslo_concurrency]
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[paste_deploy]
flavor = keystone
[profiler]
[store_type_location_strategy]
[task]
[taskflow_executor]
修改glance-registry配置文件
xxxxxxxxxx
cp /etc/glance/glance-registry.conf /etc/glance/glance-registry.conf.bak
>/etc/glance/glance-registry.conf
vim /etc/glance/glance-registry.conf
i[DEFAULT]
[database]
connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = GLANCE_PASS
[matchmaker_redis]
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_policy]
[paste_deploy]
flavor = keystone
[profiler]
6:同步数据库
xxxxxxxxxx
su -s /bin/sh -c "glance-manage db_sync" glance
7:启动服务
xxxxxxxxxx
systemctl start openstack-glance-api.service openstack-glance-registry.service
systemctl enable openstack-glance-api.service openstack-glance-registry.service
8:验证
xxxxxxxxxx
#上传镜像cirros-0.3.4-x86_64-disk.img
openstack image create "cirros" --file cirros-0.3.4-x86_64-disk.img --disk-format qcow2 --container-format bare --public
1:创建数据库,授权
xxxxxxxxxx
CREATE DATABASE nova_api;
CREATE DATABASE nova;
CREATE DATABASE nova_cell0;
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' \
IDENTIFIED BY 'NOVA_DBPASS';
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' \
IDENTIFIED BY 'NOVA_DBPASS';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' \
IDENTIFIED BY 'NOVA_DBPASS';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' \
IDENTIFIED BY 'NOVA_DBPASS';
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' \
IDENTIFIED BY 'NOVA_DBPASS';
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' \
IDENTIFIED BY 'NOVA_DBPASS';
2:在keystone创建服务用户,并关联角色
xxxxxxxxxx
openstack user create --domain default --password NOVA_PASS nova
openstack role add --project service --user nova admin
openstack user create --domain default --password PLACEMENT_PASS placement
openstack role add --project service --user placement admin
3:在keystone上注册api访问地址
xxxxxxxxxx
openstack service create --name nova \
--description "OpenStack Compute" compute
openstack endpoint create --region RegionOne \
compute public http://controller:8774/v2.1
openstack endpoint create --region RegionOne \
compute internal http://controller:8774/v2.1
openstack endpoint create --region RegionOne \
compute admin http://controller:8774/v2.1
openstack service create --name placement --description "Placement API" placement
openstack endpoint create --region RegionOne placement public http://controller:8778
openstack endpoint create --region RegionOne placement internal http://controller:8778
openstack endpoint create --region RegionOne placement admin http://controller:8778
4:安装软件包
xxxxxxxxxx
yum install openstack-nova-api openstack-nova-conductor \
openstack-nova-console openstack-nova-novncproxy \
openstack-nova-scheduler openstack-nova-placement-api -y
5:修改配置文件
修改nova配置文件
xxxxxxxxxx
cp /etc/nova/nova.conf /etc/nova/nova.conf.bak
>/etc/nova/nova.conf
vim /etc/nova/nova.conf
i[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:RABBIT_PASS@controller
my_ip = 10.0.0.11
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[api]
auth_strategy = keystone
[api_database]
connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova_api
[barbican]
[cache]
[cells]
[cinder]
[cloudpipe]
[conductor]
[console]
[consoleauth]
[cors]
[cors.subdomain]
[crypto]
[database]
connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova
[ephemeral_storage_encryption]
[filter_scheduler]
[glance]
api_servers = http://controller:9292
[guestfs]
[healthcheck]
[hyperv]
[image_file_url]
[ironic]
[key_manager]
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = NOVA_PASS
[libvirt]
[matchmaker_redis]
[metrics]
[mks]
[neutron]
[notifications]
[osapi_v21]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[pci]
[placement]
os_region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:35357/v3
username = placement
password = PLACEMENT_PASS
[quota]
[rdp]
[remote_debug]
[scheduler]
[serial_console]
[service_user]
[spice]
[ssl]
[trusted_computing]
[upgrade_levels]
[vendordata_dynamic_auth]
[vmware]
[vnc]
enabled = true
vncserver_listen = $my_ip
vncserver_proxyclient_address = $my_ip
[workarounds]
[wsgi]
[xenserver]
[xvp]
修改placement配置
xxxxxxxxxx
vim /etc/httpd/conf.d/00-nova-placement-api.conf
#在</VirtualHost>前插入以下内容
<Directory /usr/bin>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
<IfVersion < 2.4>
Order allow,deny
Allow from all
</IfVersion>
</Directory>
启动placement
xxxxxxxxxx
systemctl restart httpd
6:同步数据库
xxxxxxxxxx
su -s /bin/sh -c "nova-manage api_db sync" nova
su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
su -s /bin/sh -c "nova-manage db sync" nova
验证
xxxxxxxxxx
nova-manage cell_v2 list_cells
7:启动服务
xxxxxxxxxx
systemctl start openstack-nova-api.service \
openstack-nova-consoleauth.service openstack-nova-scheduler.service \
openstack-nova-conductor.service openstack-nova-novncproxy.service
systemctl enable openstack-nova-api.service \
openstack-nova-consoleauth.service openstack-nova-scheduler.service \
openstack-nova-conductor.service openstack-nova-novncproxy.service
8:验证
xxxxxxxxxx
nova service-list
1.安装
xxxxxxxxxx
yum install openstack-nova-compute -y
2.配置
xxxxxxxxxx
cp /etc/nova/nova.conf /etc/nova/nova.conf.bak
>/etc/nova/nova.conf
vim /etc/nova/nova.conf
i[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:RABBIT_PASS@controller
my_ip = 10.0.0.31
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[api]
auth_strategy = keystone
[api_database]
[barbican]
[cache]
[cells]
[cinder]
[cloudpipe]
[conductor]
[console]
[consoleauth]
[cors]
[cors.subdomain]
[crypto]
[database]
[ephemeral_storage_encryption]
[filter_scheduler]
[glance]
api_servers = http://controller:9292
[guestfs]
[healthcheck]
[hyperv]
[image_file_url]
[ironic]
[key_manager]
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = NOVA_PASS
[libvirt]
[matchmaker_redis]
[metrics]
[mks]
[neutron]
[notifications]
[osapi_v21]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[pci]
[placement]
os_region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:35357/v3
username = placement
password = PLACEMENT_PASS
[quota]
[rdp]
[remote_debug]
[scheduler]
[serial_console]
[service_user]
[spice]
[ssl]
[trusted_computing]
[upgrade_levels]
[vendordata_dynamic_auth]
[vmware]
[vnc]
enabled = True
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = $my_ip
novncproxy_base_url = http://controller:6080/vnc_auto.html
[workarounds]
[wsgi]
[xenserver]
[xvp]
3.启动
xxxxxxxxxx
systemctl start libvirtd openstack-nova-compute.service
systemctl enable libvirtd openstack-nova-compute.service
控制节点
xxxxxxxxxx
su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
1:创建数据库,授权
xxxxxxxxxx
CREATE DATABASE neutron;
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' \
IDENTIFIED BY 'NEUTRON_DBPASS';
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' \
IDENTIFIED BY 'NEUTRON_DBPASS';
2:在keystone创建服务用户,并关联角色
xxxxxxxxxx
openstack user create --domain default --password NEUTRON_PASS neutron
openstack role add --project service --user neutron admin
3:在keystone上注册api访问地址
xxxxxxxxxx
openstack service create --name neutron \
--description "OpenStack Networking" network
openstack endpoint create --region RegionOne \
network public http://controller:9696
openstack endpoint create --region RegionOne \
network internal http://controller:9696
openstack endpoint create --region RegionOne \
network admin http://controller:9696
4:安装软件包
xxxxxxxxxx
yum install openstack-neutron openstack-neutron-ml2 \
openstack-neutron-linuxbridge ebtables -y
5:修改配置文件
修改neutron配置文件
xxxxxxxxxx
cp /etc/neutron/neutron.conf /etc/neutron/neutron.conf.bak
>/etc/neutron/neutron.conf
vim /etc/neutron/neutron.conf
i[DEFAULT]
core_plugin = ml2
service_plugins =
transport_url = rabbit://openstack:RABBIT_PASS@controller
auth_strategy = keystone
notify_nova_on_port_status_changes = true
notify_nova_on_port_data_changes = true
[agent]
[cors]
[cors.subdomain]
[database]
connection = mysql+pymysql://neutron:NEUTRON_DBPASS@controller/neutron
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = NEUTRON_PASS
[matchmaker_redis]
[nova]
auth_url = http://controller:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = NOVA_PASS
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[qos]
[quotas]
[ssl]
修改ml2_conf.ini
xxxxxxxxxx
cp /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugins/ml2/ml2_conf.ini.bak
>/etc/neutron/plugins/ml2/ml2_conf.ini
vim /etc/neutron/plugins/ml2/ml2_conf.ini
i[DEFAULT]
[ml2]
type_drivers = flat,vlan
tenant_network_types =
mechanism_drivers = linuxbridge
extension_drivers = port_security
[ml2_type_flat]
flat_networks = provider
[ml2_type_geneve]
[ml2_type_gre]
[ml2_type_vlan]
[ml2_type_vxlan]
[securitygroup]
enable_ipset = true
修改linuxbridge-agent配置
xxxxxxxxxx
cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak
>/etc/neutron/plugins/ml2/linuxbridge_agent.ini
vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
i[DEFAULT]
[agent]
[linux_bridge]
physical_interface_mappings = provider:eth0
[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
[vxlan]
enable_vxlan = false
修改dhcp配置
xxxxxxxxxx
vim /etc/neutron/dhcp_agent.ini
[DEFAULT]
interface_driver = linuxbridge
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = true
修改metadata-agent配置
xxxxxxxxxx
vim /etc/neutron/metadata_agent.ini
[DEFAULT]
nova_metadata_ip = controller
metadata_proxy_shared_secret = METADATA_SECRET
修改nova.conf配置
xxxxxxxxxx
#控制节点
vim /etc/nova/nova.conf
[neutron]
url = http://controller:9696
auth_url = http://controller:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = NEUTRON_PASS
service_metadata_proxy = true
metadata_proxy_shared_secret = METADATA_SECRET
6:同步数据库
xxxxxxxxxx
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \
--config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
7:启动服务
xxxxxxxxxx
systemctl restart openstack-nova-api.service
systemctl start neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
8:验证
1.安装
xxxxxxxxxx
yum install openstack-neutron-linuxbridge ebtables ipset -y
2.配置
修改neutron.conf
xxxxxxxxxx
cp /etc/neutron/neutron.conf /etc/neutron/neutron.conf.bak
>/etc/neutron/neutron.conf
vim /etc/neutron/neutron.conf
i[DEFAULT]
transport_url = rabbit://openstack:RABBIT_PASS@controller
auth_strategy = keystone
[agent]
[cors]
[cors.subdomain]
[database]
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = NEUTRON_PASS
[matchmaker_redis]
[nova]
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[qos]
[quotas]
[ssl]
修改linuxbridge_agent.ini
xxxxxxxxxx
cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak
>/etc/neutron/plugins/ml2/linuxbridge_agent.ini
vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
i[DEFAULT]
[agent]
[linux_bridge]
physical_interface_mappings = provider:eth0
[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
[vxlan]
enable_vxlan = false
修改nova.conf
xxxxxxxxxx
#计算节点
vim /etc/nova/nova.conf
[neutron]
url = http://controller:9696
auth_url = http://controller:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = NEUTRON_PASS
3.启动
xxxxxxxxxx
systemctl restart openstack-nova-compute.service
systemctl start neutron-linuxbridge-agent.service
systemctl enable neutron-linuxbridge-agent.service
4.验证
1.安装
xxxxxxxxxx
#计算节点
yum install openstack-dashboard -y
2.配置
xxxxxxxxxx
详情,参考官方文档
/etc/openstack-dashboard/local_settings
3.启动
xxxxxxxxxx
systemctl start httpd
systemctl enable httpd
4:访问dashboard
1.创建网络
命令行创建
xxxxxxxxxx
openstack network create --share --external \
--provider-physical-network provider \
--provider-network-type flat wan
web界面创建
创建子网
命令行
xxxxxxxxxx
openstack subnet create --network wan \
--allocation-pool start=10.0.0.100,end=10.0.0.200 \
--dns-nameserver 180.76.76.76 --gateway 10.0.0.254 \
--subnet-range 10.0.0.0/24 10.0.0.0
创建虚拟机硬件配置方案
命令行
xxxxxxxxxx
openstack flavor create --id 0 --vcpus 1 --ram 64 --disk 1 m1.nano
web界面创建
创建键值对
启动实例
命令行
xxxxxxxxxx
openstack server create --flavor m1.nano --image cirros \
--nic net-id=0d517510-2e7e-4be4-a948-d0e4b207b2cd --security-group default \
--key-name mykey oldboy
记得修改windows的host解析
xxxxxxxxxx
#计算节点
vim /etc/nova/nova.conf
[libvirt]
cpu_mode = none
virt_type = qemu
systemctl restart openstack-nova-compute.service
硬重启虚拟机
xxxxxxxxxx
#计算节点
vim /etc/nova/nova.conf
[libvirt]
virt_type = kvm
hw_machine_type = x86_64=pc-i440fx-rhel7.2.0
cpu_mode = host-passthrough
systemctl restart openstack-nova-compute.service14
xxxxxxxxxx
openstack image create "centos7.2" --file web03.qcow2 --disk-format qcow2 --container-format bare --public
#添加元数据
hw_qemu_guest_agent=yes
修改密码测试
先检查实例qga服务是否运行
xxxxxxxxxx
nova set-password centos7
New password:
Again:
重置实例状态
xxxxxxxxxx
nova reset-state centos7 --active
xxxxxxxxxx
#控制节点
1:mysql创库授权
xxxxxxxxxx
CREATE DATABASE cinder;
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' \
IDENTIFIED BY 'CINDER_DBPASS';
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' \
IDENTIFIED BY 'CINDER_DBPASS';
2.在keystone上创建用户,关联角色
xxxxxxxxxx
openstack user create --domain default --password CINDER_PASS cinder
openstack role add --project service --user cinder admin
3.keystone注册api
xxxxxxxxxx
openstack service create --name cinderv2 \
--description "OpenStack Block Storage" volumev2
openstack endpoint create --region RegionOne \
volumev2 public http://controller:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne \
volumev2 internal http://controller:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne \
volumev2 admin http://controller:8776/v2/%\(project_id\)s
4.yum安装软件包
xxxxxxxxxx
yum install openstack-cinder -y
5.修改配置文件
xxxxxxxxxx
cp /etc/cinder/cinder.conf /etc/cinder/cinder.conf.bak
>/etc/cinder/cinder.conf
vim /etc/cinder/cinder.conf
i[DEFAULT]
transport_url = rabbit://openstack:RABBIT_PASS@controller
auth_strategy = keystone
my_ip = 10.0.0.11
[backend]
[barbican]
[brcd_fabric_example]
[cisco_fabric_example]
[coordination]
[cors]
[cors.subdomain]
[database]
connection = mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder
[fc-zone-manager]
[healthcheck]
[key_manager]
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = CINDER_PASS
[matchmaker_redis]
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[oslo_reports]
[oslo_versionedobjects]
[profiler]
[ssl]
6.同步数据库
xxxxxxxxxx
su -s /bin/sh -c "cinder-manage db sync" cinder
7.启动服务
修改控制节点nova的配置文件
xxxxxxxxxx
vim /etc/nova/nova.conf
[cinder]
os_region_name = RegionOne
启动服务
xxxxxxxxxx
systemctl restart openstack-nova-api.service
systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service
systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service
8.验证
xxxxxxxxxx
cinder service-list
安装nfs服务端
xxxxxxxxxx
yum install nfs-utils.x86_64 -y
mkdir -p /data
echo '/data 10.0.0.0/24(rw,sync,no_root_squash,no_all_squash)' >/etc/exports
systemctl start nfs
systemctl enable nfs
安装cinder-volume
xxxxxxxxxx
#计算节点
1.安装
xxxxxxxxxx
yum install openstack-cinder targetcli python-keystone -y
2.配置
xxxxxxxxxx
cp /etc/cinder/cinder.conf /etc/cinder/cinder.conf.bak
>/etc/cinder/cinder.conf
vim /etc/cinder/cinder.conf
i[DEFAULT]
transport_url = rabbit://openstack:RABBIT_PASS@controller
auth_strategy = keystone
my_ip = 10.0.0.31
enabled_backends = nfs
glance_api_servers = http://controller:9292
[backend]
[barbican]
[brcd_fabric_example]
[cisco_fabric_example]
[coordination]
[cors]
[cors.subdomain]
[database]
connection = mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder
[fc-zone-manager]
[healthcheck]
[key_manager]
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = CINDER_PASS
[matchmaker_redis]
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[oslo_reports]
[oslo_versionedobjects]
[profiler]
[ssl]
[nfs]
volume_driver=cinder.volume.drivers.nfs.NfsDriver
nfs_shares_config=/etc/cinder/nfs_shares
volume_backend_name=nfs
nfs_qcow2_volumes=True
创建/etc/cinder/nfs_shares配置文件
xxxxxxxxxx
vim /etc/cinder/nfs_shares
10.0.0.11:/data
3.启动
xxxxxxxxxx
systemctl enable openstack-cinder-volume.service
systemctl start openstack-cinder-volume.service
chown -R cinder:cinder /var/lib/cinder/mnt/
4.验证
冷迁移需要配置nova用户互相免密码登录
xxxxxxxxxx
usermod -s /bin/bash nova
su - nova
ssh-keygen -t rsa -N '' -q
cp -a id_rsa.pub authorized_keys
ssh nova@10.0.0.11
scp -rp .ssh root@10.0.0.31:`pwd`
#去节点31上面
[root@compute1 cinder]# chown -R nova:nova /var/lib/nova/.ssh
#回到之前的节点测试
ssh nova@10.0.0.31
硬件配置升级
xxxxxxxxxx
#控制节点
vim /etc/nova/nova.conf
[DEFAULT]
allow_resize_to_same_host=true
systemctl restart openstack-nova-api.service
参考官方文档https://docs.openstack.org/image-guide/centos-image.html
安装ceph环境准备
xxxxxxxxxx
添加一块硬盘200G
修改ip
修改主机名
添加host解析
执行脚本
具体步骤
xxxxxxxxxx
安装ceph的j版
rm -fr /etc/yum.repos.d/local.repo
curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
curl -o /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
vim /etc/yum.repos.d/ceph.repo
[ceph-tool]
name=ceph-tools
baseurl=https://mirror.tuna.tsinghua.edu.cn/ceph/rpm-jewel/el7/noarch/
gpgcheck=0
[ceph]
name=ceph
baseurl=https://mirror.tuna.tsinghua.edu.cn/ceph/rpm-jewel/el7/x86_64/
gpgcheck=0
yum install ceph ceph-radosgw ceph-deploy -y
mkdir cluster
cd cluster/
ceph-deploy new ceph02
vim ceph.conf
#追加下面几行
osd pool default size = 1
osd crush chooseleaf type = 0
osd max object name len = 256
osd journal size = 128
ceph-deploy mon create-initial
ceph-deploy osd prepare ceph02:/dev/sdb
ceph-deploy osd activate ceph02:/dev/sdb1
检测ceph集群
xxxxxxxxxx
[root@ceph01 ~]# ceph -s
cluster d3853d3e-2864-4fe5-88ef-92e64d77cef3
health HEALTH_OK
monmap e1: 1 mons at {ceph01=10.0.0.14:6789/0}
election epoch 2, quorum 0 ceph01
osdmap e5: 1 osds: 1 up, 1 in
pgmap v8: 64 pgs, 1 pools, 0 bytes data, 0 objects
33880 kB used, 199 GB / 199 GB avail
64 active+clean
创建存储资源池
xxxxxxxxxx
ceph osd pool create openstack 128 128
创建一块rbd硬盘
xxxxxxxxxx
rbd create --pool openstack test.raw --size 1024
rbd ls --pool openstack
rbd info --pool openstack test.raw
rbd map openstack/test.raw
mkfs.xfs /dev/rbd0
rbd resize openstack/test.raw --size 2048
xxxxxxxxxx
#控制节点
yum install ceph-common -y
scp -rp root@10.0.0.14:/etc/ceph/ceph.conf /etc/ceph/
scp -rp root@10.0.0.14:/etc/ceph/ceph.client.admin.keyring /etc/ceph/
chmod 777 /etc/ceph/ceph.client.admin.keyring
删除所有实例和镜像
修改glance-api的配置文件
vim /etc/glance/glance-api.conf
[DEFAULT]
show_image_direct_url = True
[glance_store]
stores = rbd
default_store = rbd
rbd_store_pool = openstack
rbd_store_user = admin
rbd_store_ceph_conf = /etc/ceph/ceph.conf
rbd_store_chunk_size = 8
#stores = file,http
#default_store = file
#filesystem_store_datadir = /var/lib/glance/images/
#重启服务
systemctl restart openstack-glance-api.service
#测试
qemu-img convert -f qcow2 -O raw cirros-0.3.4-x86_64-disk.img cirros-0.3.4-x86_64-disk.raw
openstack image create "cirros" --file cirros-0.3.4-x86_64-disk.raw --disk-format raw --container-format bare --public
验证
xxxxxxxxxx
[root@ceph02 ~]# rbd ls --pool openstack
f6d3b60b-46ce-4eaf-8ed4-38a388c99e81
[root@ceph02 ~]# rbd info --pool openstack f6d3b60b-46ce-4eaf-8ed4-38a388c99e81
rbd image 'f6d3b60b-46ce-4eaf-8ed4-38a388c99e81':
size 40162 kB in 10 objects
order 22 (4096 kB objects)
block_name_prefix: rbd_data.10155a5e5f74
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
flags:
xxxxxxxxxx
#存储节点(计算节点)
yum install ceph-common -y
scp -rp root@10.0.0.14:/etc/ceph/ceph.conf /etc/ceph/
scp -rp root@10.0.0.14:/etc/ceph/ceph.client.admin.keyring /etc/ceph/
chmod 777 /etc/ceph/ceph.client.admin.keyring
vim /etc/cinder/cinder.conf
[DEFAULT]
enabled_backends = nfs,ceph
[ceph]
volume_driver = cinder.volume.drivers.rbd.RBDDriver
volume_backend_name = ceph
rbd_pool = openstack
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot = false
rbd_max_clone_depth = 5
rbd_store_chunk_size = 4
rados_connect_timeout = -1
rbd_user = admin
rbd_secret_uuid = 457eb676-33da-42ec-9a8c-9293d545c337
systemctl restart openstack-cinder-volume.service
验证:
xxxxxxxxxx
#控制节点
xxxxxxxxxx
vim /etc/nova/nova.conf
[DEFAULT]
force_raw_images = True
disk_cachemodes = writeback
[libvirt]
#...
images_type = rbd
images_rbd_pool = openstack
images_rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_user = admin
rbd_secret_uuid = 457eb676-33da-42ec-9a8c-9293d545c337
cat > secret.xml <<EOF
<secret ephemeral='no' private='no'>
<uuid>457eb676-33da-42ec-9a8c-9293d545c337</uuid>
<usage type='ceph'>
<name>client.admin secret</name>
</usage>
</secret>
EOF
virsh secret-define --file secret.xml
virsh secret-set-value --secret 457eb676-33da-42ec-9a8c-9293d545c337 --base64 AQDWB3lgVzaUIxAA1OuVEQTJOjrfChXNBq++Wg==
systemctl restart openstack-nova-compute
验证
启动新实例
要求:
增加flat网段 三层网络vxlan Openstack配置lbaashttps://oldqiang.com/archives/157.html
manila文件共享服务 nas存储 swift对象存储服务 oss对象 heat编排服务 编排