Commit d055f842 authored by Jan Siersch's avatar Jan Siersch
Browse files

massive bugfixes based on clusterlab installation

- fix: qpid config file layout and location change
- rem: ceilometer
- rem: trove
- fix: cinder, nova, neutron lock path errors
- new: script to configure firewalld
- fix: openvswitch bridge layout change
- fix: script error when basic networking was not performed
- fix: dashboard crash due to timezone setting
- fix: mysql hostname not resolved in some instances (used ip instead)
- fix: various config parameter adjustments for glance, nova, neutron, cinder
- fix: also unified some inconsistencies accross config files for different node types
parent b9f8fe46
This diff is collapsed.
......@@ -7,7 +7,7 @@ syslog_log_facility = LOG_LOCAL0
# local paths
state_path = /var/lib/cinder
lock_path = /var/lock/cinder
lock_path = /var/lib/cinder/lock
volumes_dir = /var/lib/cinder/volumes
rootwrap_config = /etc/cinder/rootwrap.conf
api_paste_confg = /etc/cinder/api-paste.ini
......@@ -16,7 +16,7 @@ api_paste_confg = /etc/cinder/api-paste.ini
auth_strategy = keystone
notification_driver = cinder.openstack.common.notifier.rpc_notifier
rpc_backend = qpid
qpid_hostname = controlnode
qpid_hostname = ${CONTROLNODE_IP_CTRL}
qpid_username = guest
qpid_password = ${QPID_PASS}
control_exchange = cinder
......@@ -33,7 +33,7 @@ iscsi_helper = lioadm
my_ip = ${COMPUTENODE_IP_CTRL}
[database]
connection = mysql://cinder:${CINDER_DBPASS}@controlnode/cinder
connection = mysql://cinder:${CINDER_DBPASS}@${CONTROLNODE_IP_CTRL}/cinder
[keystone_authtoken]
auth_uri = http://controlnode:5000/v2.0
......
......@@ -7,14 +7,14 @@ syslog_log_facility = LOG_LOCAL0
# local paths
state_path = /var/lib/neutron
lock_path = /var/lock/neutron
lock_path = /var/lib/neutron/lock
api_paste_confg = /etc/neutron/api-paste.ini
# auth & message queue
auth_strategy = keystone
notification_driver = neutron.openstack.common.notifier.rpc_notifier
rpc_backend = qpid
qpid_hostname = controlnode
qpid_hostname = ${CONTROLNODE_IP_CTRL}
qpid_username = guest
qpid_password = ${QPID_PASS}
......@@ -29,7 +29,7 @@ root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
report_interval = 5
[database]
connection = mysql://neutron:${NEUTRON_DBPASS}@controlnode/neutron
connection = mysql://neutron:${NEUTRON_DBPASS}@${CONTROLNODE_IP_CTRL}/neutron
[keystone_authtoken]
auth_uri = http://controlnode:5000/v2.0
......
[ml2]
type_drivers = vlan
tenant_network_types = vlan
type_drivers = flat,gre
tenant_network_types = gre
mechanism_drivers = openvswitch
[ml2_type_flat]
[ml2_type_gre]
tunnel_id_ranges = 1:1000
[ml2_type_vlan]
network_vlan_ranges = physnet1:${SDN_VLAN_RANGES}
tenant_network_type = vlan
bridge_mappings = physnet1:br-data
enable_tunneling = false
[ml2_type_vxlan]
......@@ -22,22 +19,21 @@ password = admin
session_timeout = 30
timeout = 10
[securitygroup]
enable_security_group = True
enable_ipset = True
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
[agent]
root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
[ovs]
enable_tunneling = false
tenant_network_type = vlan
network_vlan_ranges = physnet1:${SDN_VLAN_RANGES}
bridge_mappings = physnet1:br-data
local_ip = ${COMPUTENODE_IP_DATA}
enable_tunneling = true
tunnel_type = gre
[odl]
controllers = ${SDNCONTROL_IP_CTRL}:6633:admin:admin
integration_bridge = br-int
tenant_network_type = vlan
[agent]
root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
[securitygroup]
enable_security_group = True
enable_ipset = True
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
......@@ -11,7 +11,7 @@ notify_on_state_change = vm_and_task_state
# local paths
logdir = /var/log/nova
state_path = /var/lib/nova
lock_path = /var/lock/nova
lock_path = /var/lib/nova/lock
volumes_path = /var/lib/nova/volumes
api_paste_config = /etc/nova/api-paste.ini
root_helper = sudo nova-rootwrap /etc/nova/rootwrap.conf
......@@ -21,7 +21,7 @@ auth_strategy = keystone
notification_driver = ceilometer.compute.nova_notifier
notification_driver = nova.openstack.common.notifier.rpc_notifier
rpc_backend = qpid
qpid_hostname = controlnode
qpid_hostname = ${CONTROLNODE_IP_CTRL}
qpid_username = guest
qpid_password = ${QPID_PASS}
......@@ -58,7 +58,7 @@ ec2_private_dns_show_ip = True
enabled_apis = ec2,osapi_compute,metadata
[database]
connection = mysql://nova:${NOVA_DBPASS}@controlnode/nova
connection = mysql://nova:${NOVA_DBPASS}@${CONTROLNODE_IP_CTRL}/nova
[glance]
host = controlnode
......
......@@ -7,7 +7,7 @@ syslog_log_facility = LOG_LOCAL0
# local paths
state_path = /var/lib/cinder
lock_path = /var/lock/cinder
lock_path = /var/lib/cinder/lock
volumes_dir = /var/lib/cinder/volumes
rootwrap_config = /etc/cinder/rootwrap.conf
api_paste_confg = /etc/cinder/api-paste.ini
......@@ -16,7 +16,7 @@ api_paste_confg = /etc/cinder/api-paste.ini
auth_strategy = keystone
notification_driver = cinder.openstack.common.notifier.rpc_notifier
rpc_backend = qpid
qpid_hostname = controlnode
qpid_hostname = ${CONTROLNODE_IP_CTRL}
qpid_username = guest
qpid_password = ${QPID_PASS}
control_exchange = cinder
......@@ -33,7 +33,7 @@ iscsi_helper = lioadm
my_ip = ${CONTROLNODE_IP_CTRL}
[database]
connection = mysql://cinder:${CINDER_DBPASS}@controlnode/cinder
connection = mysql://cinder:${CINDER_DBPASS}@${CONTROLNODE_IP_CTRL}/cinder
[keystone_authtoken]
auth_uri = http://controlnode:5000/v2.0
......
......@@ -8,7 +8,7 @@ syslog_log_facility = LOG_LOCAL0
# message queue
notification_driver = messaging
rpc_backend = qpid
qpid_hostname = controlnode
qpid_hostname = ${CONTROLNODE_IP_CTRL}
qpid_username = guest
qpid_password = ${QPID_PASS}
......@@ -18,7 +18,7 @@ swift_store_user = swift
swift_store_key = ${SWIFT_PASS}
[database]
connection = mysql://glance:${GLANCE_DBPASS}@controlnode/glance
connection = mysql://glance:${GLANCE_DBPASS}@${CONTROLNODE_IP_CTRL}/glance
[keystone_authtoken]
auth_uri = http://controlnode:5000/v2.0
......
......@@ -6,7 +6,7 @@ use_syslog = True
syslog_log_facility = LOG_LOCAL0
[database]
connection = mysql://glance:${GLANCE_DBPASS}@controlnode/glance
connection = mysql://glance:${GLANCE_DBPASS}@${CONTROLNODE_IP_CTRL}/glance
[keystone_authtoken]
auth_uri = http://controlnode:5000/v2.0
......
......@@ -7,7 +7,7 @@ log_file = keystone.log
log_dir = /var/log/keystone
# message queue
qpid_hostname = controlnode
qpid_hostname = ${CONTROLNODE_IP_CTRL}
qpid_username = guest
qpid_password = ${QPID_PASS}
......@@ -61,5 +61,5 @@ oauth1 = keystone.auth.plugins.oauth1.OAuth
config_file = /etc/keystone/api-paste.ini
[database]
connection = mysql://keystone:${KEYSTONE_DBPASS}@controlnode/keystone
connection = mysql://keystone:${KEYSTONE_DBPASS}@${CONTROLNODE_IP_CTRL}/keystone
......@@ -7,14 +7,14 @@ syslog_log_facility = LOG_LOCAL0
# local paths
state_path = /var/lib/neutron
lock_path = /var/lock/neutron
lock_path = /var/lib/neutron/lock
api_paste_confg = /etc/neutron/api-paste.ini
# auth & message queue
auth_strategy = keystone
notification_driver = neutron.openstack.common.notifier.rpc_notifier
rpc_backend = qpid
qpid_hostname = controlnode
qpid_hostname = ${CONTROLNODE_IP_CTRL}
qpid_username = guest
qpid_password = ${QPID_PASS}
......@@ -39,7 +39,7 @@ root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
report_interval = 5
[database]
connection = mysql://neutron:${NEUTRON_DBPASS}@controlnode/neutron
connection = mysql://neutron:${NEUTRON_DBPASS}@${CONTROLNODE_IP_CTRL}/neutron
[keystone_authtoken]
auth_uri = http://controlnode:5000/v2.0
......
[ml2]
type_drivers = vlan
tenant_network_types = vlan
type_drivers = flat,gre
tenant_network_types = gre
mechanism_drivers = openvswitch
[ml2_type_flat]
[ml2_type_gre]
tunnel_id_ranges = 1:1000
[ml2_type_vlan]
network_vlan_ranges = physnet1:${SDN_VLAN_RANGES}
tenant_network_type = vlan
bridge_mappings = physnet1:br-data
enable_tunneling = false
[ml2_type_vxlan]
......@@ -22,22 +19,20 @@ password = admin
session_timeout = 30
timeout = 10
[securitygroup]
enable_security_group = True
enable_ipset = True
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
[agent]
root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
[ovs]
enable_tunneling = false
tenant_network_type = vlan
network_vlan_ranges = physnet1:${SDN_VLAN_RANGES}
bridge_mappings = physnet1:br-data
enable_tunneling = true
tunnel_type = gre
[odl]
controllers = ${SDNCONTROL_IP_CTRL}:6633:admin:admin
integration_bridge = br-int
tenant_network_type = vlan
[agent]
root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
[securitygroup]
enable_security_group = True
enable_ipset = True
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
......@@ -11,7 +11,7 @@ notify_on_state_change = vm_and_task_state
# local paths
logdir = /var/log/nova
state_path = /var/lib/nova
lock_path = /var/lock/nova
lock_path = /var/lib/nova/lock
volumes_path = /var/lib/nova/volumes
api_paste_config = /etc/nova/api-paste.ini
root_helper = sudo nova-rootwrap /etc/nova/rootwrap.conf
......@@ -21,7 +21,7 @@ auth_strategy = keystone
notification_driver = ceilometer.compute.nova_notifier
notification_driver = nova.openstack.common.notifier.rpc_notifier
rpc_backend = qpid
qpid_hostname = controlnode
qpid_hostname = ${CONTROLNODE_IP_CTRL}
qpid_username = guest
qpid_password = ${QPID_PASS}
......@@ -58,7 +58,7 @@ ec2_private_dns_show_ip = True
enabled_apis = ec2,osapi_compute,metadata
[database]
connection = mysql://nova:${NOVA_DBPASS}@controlnode/nova
connection = mysql://nova:${NOVA_DBPASS}@${CONTROLNODE_IP_CTRL}/nova
[glance]
host = controlnode
......
......@@ -50,7 +50,7 @@ OPENSTACK_NEUTRON_NETWORK = {
}
API_RESULT_LIMIT = 1000
API_RESULT_PAGE_SIZE = 20
TIME_ZONE = "DE"
TIME_ZONE = "UTC"
LOGGING = {
'version': 1,
# When set to True this will disable all logging except
......
acl deny all create link
acl allow all all
cluster-mechanism=DIGEST-MD5 ANONYMOUS
acl-file=/etc/qpid/qpidd.acl
auth=yes
realm=QPID
......
[DEFAULT]
api_paste_config=/etc/trove/trove-api-paste.ini
rpc_backend = qpid
qpid_hostname = controlnode
qpid_username = guest
qpid_password = MQh5zKvpYRyJG1Ky
[composite:trove]
use = call:trove.common.wsgi:versioned_urlmap
/: versions
/v1.0: troveapi
[app:versions]
paste.app_factory = trove.versions:app_factory
[pipeline:troveapi]
pipeline = faultwrapper authtoken authorization contextwrapper ratelimit extensions troveapp
#pipeline = debug extensions troveapp
[filter:extensions]
paste.filter_factory = trove.common.extensions:factory
[filter:authtoken]
signing_dir=/var/cache/trove
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
auth_host = controlnode
auth_port = 35357
auth_protocol = http
admin_tenant_name = service
admin_user = trove
admin_password = ${TROVE_PASS}
admin_token=${ADMIN_TOKEN}
[filter:authorization]
paste.filter_factory = trove.common.auth:AuthorizationMiddleware.factory
[filter:contextwrapper]
paste.filter_factory = trove.common.wsgi:ContextMiddleware.factory
[filter:faultwrapper]
paste.filter_factory = trove.common.wsgi:FaultWrapper.factory
[filter:ratelimit]
paste.filter_factory = trove.common.limits:RateLimitingMiddleware.factory
[app:troveapp]
paste.app_factory = trove.common.api:app_factory
#Add this filter to log request and response for debugging
[filter:debug]
paste.filter_factory = trove.common.wsgi:Debug
[DEFAULT]
trove_auth_url=http://controlnode:35357/v2.0
nova_proxy_admin_tenant_name=service
nova_proxy_admin_pass=${ADMIN_PASS}
nova_proxy_admin_user=admin
rpc_backend=trove.openstack.common.rpc.impl_qpid
qpid_password=${QPID_PASS}
qpid_username=guest
qpid_hostname=controlnode
qpid_host=controlnode
[DEFAULT]
nova_proxy_admin_tenant_name=service
nova_proxy_admin_pass=${ADMIN_PASS}
nova_proxy_admin_user=admin
notifier_queue_hostname=controlnode
sql_connection=mysql://trove:${TROVE_DBPASS}@controlnode/trove
swift_url=http://controlnode:8080/v1/AUTH_
cinder_url=http://controlnode:8776/v1
nova_compute_url=http://controlnode:8774/v2
trove_auth_url=http://controlnode:5000/v2.0
log_dir=/var/log/trove
# Show more verbose log output (sets INFO log level output)
verbose = True
# Show debugging output in logs (sets DEBUG log level output)
debug = True
# Updates service and instance task statuses if instance failed become active
update_status_on_fail = False
# AMQP Connection info
rabbit_password=f7999d1955c5014aa32c
# SQLAlchemy connection string for the reference implementation
# registry server. Any valid SQLAlchemy connection string is fine.
# See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine
# sql_connection = mysql://trove:trove@localhost/trove
# Period in seconds after which SQLAlchemy should reestablish its connection
# to the database.
#
# MySQL uses a default `wait_timeout` of 8 hours, after which it will drop
# idle connections. This can result in 'MySQL Gone Away' exceptions. If you
# notice this, you can lower this value to ensure that SQLAlchemy reconnects
# before MySQL can drop the connection.
sql_idle_timeout = 3600
#DB Api Implementation
db_api_implementation = trove.db.sqlalchemy.api
# Configuration options for talking to nova via the novaclient.
# Config options for enabling volume service
trove_volume_support = True
block_device_mapping = vdb
device_path = /dev/vdb
mount_point = /var/lib/mysql
volume_time_out=30
server_delete_time_out=480
# Configuration options for talking to nova via the novaclient.
# These options are for an admin user in your keystone config.
# It proxy's the token received from the user to send to nova via this admin users creds,
# basically acting like the client via that proxy token.
# Manager impl for the taskmanager
taskmanager_manager=trove.taskmanager.manager.Manager
# Manager sends Exists Notifications
exists_notification_transformer = trove.extensions.mgmt.instances.models.NovaNotificationTransformer
exists_notification_ticks = 30
notification_service_id = mysql:2f3ff068-2bfb-4f70-9a9d-a6bb65bc084b
# Trove DNS
trove_dns_support = False
dns_account_id = 123456
dns_auth_url = http://127.0.0.1:5000/v2.0
dns_username = user
dns_passkey = password
dns_ttl = 3600
dns_domain_name = 'trove.com.'
dns_domain_id = 11111111-1111-1111-1111-111111111111
dns_driver = trove.dns.designate.driver.DesignateDriver
dns_instance_entry_factory = trove.dns.designate.driver.DesignateInstanceEntryFactory
dns_endpoint_url = http://127.0.0.1/v1/
dns_service_type = dns
# Trove Security Groups for Instances
trove_security_groups_support = True
trove_security_group_rule_cidr = 0.0.0.0/0
# Guest related conf
agent_heartbeat_time = 10
agent_call_low_timeout = 5
agent_call_high_timeout = 150
# Whether to use nova's contrib api for create server with volume
use_nova_server_volume = False
# Config option for filtering the IP address that DNS uses
network_label_regex = ^private$
#ip_regex = ^(15.|123.)
# Datastore templates
template_path = /etc/trove/templates/
# ============ notifer queue kombu connection options ========================
notifier_queue_userid = guest
notifier_queue_password = guest
notifier_queue_ssl = False
notifier_queue_port = 5672
notifier_queue_virtual_host = /
notifier_queue_transport = memory
# usage notifications
notification_driver=trove.openstack.common.notifier.rpc_notifier
control_exchange=trove
# ============ Logging information =============================
#log_dir = /integration/report
#log_file = trove-taskmanager.log
# ============ PyDev remote dubugging =============================
# Enable or disable pydev remote debugging.
# There are three values allowed: 'disabled', 'enabled' and 'auto'
# If value is 'auto' tries to connect to remote debugger server,
# but in case of error continue running with disabled debugging
pydev_debug = disabled
# remote debug server host and port options
#pydev_debug_host = localhost
#pydev_debug_port = 5678
# path to pydevd library. It will be used if pydevd is absent in sys.path
#pydev_path = <path>
# ================= Guestagent related ========================
#guest_config = $pybasedir/etc/trove/trove-guestagent.conf.sample
#cloudinit_location = /etc/trove/cloudinit
# ================= Security groups related ========================
# Each future datastore implementation should implement
# its own oslo group with defined in it:
# - tcp_ports; upd_ports;
[mysql]
# Format (single port or port range): A, B-C
# where C greater than B
tcp_ports = 3306
[redis]
# Format (single port or port range): A, B-C
# where C greater than B
tcp_ports = 6379
[cassandra]
tcp_ports = 7000, 7001, 9042, 9160
[couchbase]
tcp_ports = 8091, 8092, 4369, 11209-11211, 21100-21199
[DEFAULT]
rpc_backend = qpid
qpid_hostname = controlnode
qpid_username = guest
qpid_password = ${QPID_PASS}
[DEFAULT]
network_label_regex=^NETWORK_LABEL$
add_addresses=True
default_datastore=mysql
notifier_queue_hostname=controlnode
sql_connection=mysql://trove:${TROVE_DBPASS}@controlnode/trove
swift_url=http://controlnode:8080/v1/AUTH_
cinder_url=http://controlnode:8776/v1
nova_compute_url=http://controlnode:8774/v2
trove_auth_url=http://controlnode:5000/v2.0
log_dir=/var/log/trove
# Show more verbose log output (sets INFO log level output)
#verbose=True
# Show debugging output in logs (sets DEBUG log level output)
#debug=True
# Address to bind the API server
#bind_host=0.0.0.0
# Port the bind the API server to
#bind_port=8779
# Number of child processes to run
#trove_api_workers=5
# AMQP Connection info
#rabbit_password=f7999d1955c5014aa32c
# SQLAlchemy connection string for the reference implementation
# registry server. Any valid SQLAlchemy connection string is fine.
# See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine
# sql_connection = sqlite:///trove_test.sqlite
#sql_connection=mysql://trove:trove@localhost/trove
#sql_connection=postgresql://trove:trove@localhost/trove
# Period in seconds after which SQLAlchemy should reestablish its connection
# to the database.
#
# MySQL uses a default `wait_timeout` of 8 hours, after which it will drop
# idle connections. This can result in 'MySQL Gone Away' exceptions. If you
# notice this, you can lower this value to ensure that SQLAlchemy reconnects
# before MySQL can drop the connection.
#sql_idle_timeout=3600
# Maximum line size of message headers to be accepted.
# max_header_line may need to be increased when using large tokens
# (typically those generated by the Keystone v3 API with big service
# catalogs)
# max_header_line = 16384
#DB Api Implementation