Delete elements moved to tripleo-image-elements.
Change-Id: I062d3dfb538682d72011307e8b96672e8f0844a5
This commit is contained in:
parent
ddcf3db028
commit
e826e77a16
15
README.md
15
README.md
@ -1,9 +1,18 @@
|
||||
Image building tools for Openstack
|
||||
==================================
|
||||
|
||||
These tools are the components of tripleo (https://github.com/tripleo/incubator)
|
||||
that do the plumbing involved in building disk images. Specific configs live
|
||||
in the incubator repository, while the reusable tools live here.
|
||||
These tools are the components of TripleO (https://github.com/tripleo/incubator)
|
||||
umbrella project that do the plumbing involved in building disk images.
|
||||
|
||||
This repository has the core functionality for building disk images, file
|
||||
system images and ramdisk images for use with OpenStack (both virtual and bare
|
||||
metal). The core functionality includes the various operating system specific
|
||||
modules for disk/filesystem images, and deployment and hardware inventory
|
||||
ramdisks.
|
||||
|
||||
The TripleO project also develops elements that can be used to deploy
|
||||
OpenStack itself. These live in the TripleO elements repository
|
||||
(https://github.com/stackforge/tripleo-image-elements).
|
||||
|
||||
What tools are there?
|
||||
---------------------
|
||||
|
32
docs/ci.md
32
docs/ci.md
@ -14,42 +14,18 @@ Jenkins
|
||||
* Grant jenkin builders sudo [may want lxc containers or cloud instances for
|
||||
security isolation]
|
||||
* Jobs to build:
|
||||
* bootstrap VM from-scratch (archive bootstrap.qcow2).
|
||||
* base ubuntu VM.
|
||||
|
||||
disk-image-create vm base devstack -o bootstrap -a i386
|
||||
disk-image-create vm base -o base -a i386
|
||||
|
||||
* devstack nova-bm execution (archive the resulting image).
|
||||
Chained off of the bootstrap vm build
|
||||
|
||||
ssh into the node, run demo/scripts/demo
|
||||
|
||||
* bootstrap VM via image-build chain (archive bm-cloud.qcow2).
|
||||
|
||||
disk-image-create vm base glance nova-bm swift mysql haproxy-api \
|
||||
haproxy-mysql cinder quantum rabbitmq -o bootstrap-prod
|
||||
|
||||
* baremetal SPOF node build (archive the resulting image).
|
||||
|
||||
disk-image-create base mysql haproxy-mysql haproxy-api local-boot \
|
||||
rabbitmq -o baremetal-spof
|
||||
|
||||
* baremetal demo node build (archive the resulting image).
|
||||
|
||||
disk-image-create base vm glance nova-bm swift cinder quantum \
|
||||
-o bootstrap-prod
|
||||
|
||||
* ramdisk deploy image buil
|
||||
* ramdisk deploy image build
|
||||
|
||||
ramdisk-image-create deploy
|
||||
|
||||
* Tempest w/baremetal using libvirt networking as the power API.
|
||||
take a bootstrap baremetal devstack from above, N VM 'bare metal' nodes,
|
||||
and run tempest in that environment.
|
||||
|
||||
Copyright
|
||||
=========
|
||||
|
||||
Copyright 2012 Hewlett-Packard Development Company, L.P.
|
||||
Copyright 2012, 2013 Hewlett-Packard Development Company, L.P.
|
||||
Copyright (c) 2012 NTT DOCOMO, INC.
|
||||
|
||||
All Rights Reserved.
|
||||
|
@ -1,61 +0,0 @@
|
||||
#############
|
||||
# OpenStack #
|
||||
#############
|
||||
|
||||
[composite:osapi_volume]
|
||||
use = call:cinder.api:root_app_factory
|
||||
/: apiversions
|
||||
/v1: openstack_volume_api_v1
|
||||
/v2: openstack_volume_api_v2
|
||||
|
||||
[composite:openstack_volume_api_v1]
|
||||
use = call:cinder.api.middleware.auth:pipeline_factory
|
||||
noauth = faultwrap sizelimit noauth apiv1
|
||||
keystone = faultwrap sizelimit authtoken keystonecontext apiv1
|
||||
keystone_nolimit = faultwrap sizelimit authtoken keystonecontext apiv1
|
||||
|
||||
[composite:openstack_volume_api_v2]
|
||||
use = call:cinder.api.middleware.auth:pipeline_factory
|
||||
noauth = faultwrap sizelimit noauth apiv2
|
||||
keystone = faultwrap sizelimit authtoken keystonecontext apiv2
|
||||
keystone_nolimit = faultwrap sizelimit authtoken keystonecontext apiv2
|
||||
|
||||
[filter:faultwrap]
|
||||
paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory
|
||||
|
||||
[filter:noauth]
|
||||
paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory
|
||||
|
||||
[filter:sizelimit]
|
||||
paste.filter_factory = cinder.api.middleware.sizelimit:RequestBodySizeLimiter.factory
|
||||
|
||||
[app:apiv1]
|
||||
paste.app_factory = cinder.api.v1.router:APIRouter.factory
|
||||
|
||||
[app:apiv2]
|
||||
paste.app_factory = cinder.api.v2.router:APIRouter.factory
|
||||
|
||||
[pipeline:apiversions]
|
||||
pipeline = faultwrap osvolumeversionapp
|
||||
|
||||
[app:osvolumeversionapp]
|
||||
paste.app_factory = cinder.api.versions:Versions.factory
|
||||
|
||||
##########
|
||||
# Shared #
|
||||
##########
|
||||
|
||||
[filter:keystonecontext]
|
||||
paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory
|
||||
|
||||
[filter:authtoken]
|
||||
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
|
||||
service_protocol = http
|
||||
service_host = {{keystone.host}}
|
||||
service_port = 5000
|
||||
auth_host = {{keystone.host}}
|
||||
auth_port = 35357
|
||||
auth_protocol = http
|
||||
admin_tenant_name = service
|
||||
admin_user = cinder
|
||||
admin_password = {{service-password}}
|
@ -1,20 +0,0 @@
|
||||
[DEFAULT]
|
||||
debug = True
|
||||
|
||||
state_path = /var/run/cinder
|
||||
|
||||
rootwrap_config=/etc/cinder/rootwrap.conf
|
||||
api_paste_config = /etc/cinder/api-paste.ini
|
||||
|
||||
iscsi_helper=tgtadm
|
||||
volume_name_template = volume-%s
|
||||
volume_group = cinder-volumes
|
||||
verbose = True
|
||||
auth_strategy = keystone
|
||||
|
||||
sql_connection={{cinder.db}}
|
||||
|
||||
rabbit_host = {{rabbit.host}}
|
||||
rabbit_port = 5672
|
||||
rabbit_userid = {{rabbit.user}}
|
||||
rabbit_password = {{rabbit.password}}
|
@ -1,34 +0,0 @@
|
||||
{
|
||||
"context_is_admin": [["role:admin"]],
|
||||
"admin_or_owner": [["is_admin:True"], ["project_id:%(project_id)s"]],
|
||||
"default": [["rule:admin_or_owner"]],
|
||||
|
||||
"admin_api": [["is_admin:True"]],
|
||||
|
||||
"volume:create": [],
|
||||
"volume:get_all": [],
|
||||
"volume:get_volume_metadata": [],
|
||||
"volume:get_snapshot": [],
|
||||
"volume:get_all_snapshots": [],
|
||||
|
||||
"volume_extension:types_manage": [["rule:admin_api"]],
|
||||
"volume_extension:types_extra_specs": [["rule:admin_api"]],
|
||||
"volume_extension:extended_snapshot_attributes": [],
|
||||
"volume_extension:volume_image_metadata": [],
|
||||
|
||||
"volume_extension:quotas:show": [],
|
||||
"volume_extension:quotas:update_for_project": [["rule:admin_api"]],
|
||||
"volume_extension:quotas:update_for_user": [["rule:admin_or_projectadmin"]],
|
||||
"volume_extension:quota_classes": [],
|
||||
|
||||
"volume_extension:volume_admin_actions:reset_status": [["rule:admin_api"]],
|
||||
"volume_extension:snapshot_admin_actions:reset_status": [["rule:admin_api"]],
|
||||
"volume_extension:volume_admin_actions:force_delete": [["rule:admin_api"]],
|
||||
"volume_extension:snapshot_admin_actions:force_delete": [["rule:admin_api"]],
|
||||
|
||||
"volume_extension:volume_host_attribute": [["rule:admin_api"]],
|
||||
"volume_extension:volume_tenant_attribute": [["rule:admin_api"]],
|
||||
"volume_extension:hosts": [["rule:admin_api"]],
|
||||
"volume_extension:services": [["rule:admin_api"]],
|
||||
"volume:services": [["rule:admin_api"]]
|
||||
}
|
@ -1,27 +0,0 @@
|
||||
# Configuration for cinder-rootwrap
|
||||
# This file should be owned by (and only-writeable by) the root user
|
||||
|
||||
[DEFAULT]
|
||||
# List of directories to load filter definitions from (separated by ',').
|
||||
# These directories MUST all be only writeable by root !
|
||||
filters_path=/etc/cinder/rootwrap.d,/usr/share/cinder/rootwrap
|
||||
|
||||
# List of directories to search executables in, in case filters do not
|
||||
# explicitely specify a full path (separated by ',')
|
||||
# If not specified, defaults to system PATH environment variable.
|
||||
# These directories MUST all be only writeable by root !
|
||||
exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin
|
||||
|
||||
# Enable logging to syslog
|
||||
# Default value is False
|
||||
use_syslog=False
|
||||
|
||||
# Which syslog facility to use.
|
||||
# Valid values include auth, authpriv, syslog, user0, user1...
|
||||
# Default value is 'syslog'
|
||||
syslog_log_facility=syslog
|
||||
|
||||
# Which messages to log.
|
||||
# INFO means log all usage
|
||||
# ERROR means only log unsuccessful attempts
|
||||
syslog_log_level=ERROR
|
@ -1,55 +0,0 @@
|
||||
# cinder-rootwrap command filters for volume nodes
|
||||
# This file should be owned by (and only-writeable by) the root user
|
||||
|
||||
[Filters]
|
||||
# cinder/volume/iscsi.py: iscsi_helper '--op' ...
|
||||
ietadm: CommandFilter, /usr/sbin/ietadm, root
|
||||
tgtadm: CommandFilter, /usr/sbin/tgtadm, root
|
||||
tgt-admin: CommandFilter, /usr/sbin/tgt-admin, root
|
||||
|
||||
# cinder/volume/driver.py: 'vgs', '--noheadings', '-o', 'name'
|
||||
vgs: CommandFilter, /sbin/vgs, root
|
||||
|
||||
# cinder/volume/driver.py: 'lvcreate', '-L', sizestr, '-n', volume_name,..
|
||||
# cinder/volume/driver.py: 'lvcreate', '-L', ...
|
||||
lvcreate: CommandFilter, /sbin/lvcreate, root
|
||||
|
||||
# cinder/volume/driver.py: 'dd', 'if=%s' % srcstr, 'of=%s' % deststr,...
|
||||
dd: CommandFilter, /bin/dd, root
|
||||
|
||||
# cinder/volume/driver.py: 'lvremove', '-f', %s/%s % ...
|
||||
lvremove: CommandFilter, /sbin/lvremove, root
|
||||
|
||||
# cinder/volume/driver.py: 'lvdisplay', '--noheading', '-C', '-o', 'Attr',..
|
||||
lvdisplay: CommandFilter, /sbin/lvdisplay, root
|
||||
|
||||
# cinder/volume/driver.py: 'iscsiadm', '-m', 'discovery', '-t',...
|
||||
# cinder/volume/driver.py: 'iscsiadm', '-m', 'node', '-T', ...
|
||||
iscsiadm: CommandFilter, /sbin/iscsiadm, root
|
||||
iscsiadm_usr: CommandFilter, /usr/bin/iscsiadm, root
|
||||
|
||||
# cinder/volume/drivers/lvm.py: 'shred', '-n3'
|
||||
# cinder/volume/drivers/lvm.py: 'shred', '-n0', '-z', '-s%dMiB'
|
||||
shred: CommandFilter, /usr/bin/shred, root
|
||||
|
||||
#cinder/volume/.py: utils.temporary_chown(path, 0), ...
|
||||
chown: CommandFilter, /bin/chown, root
|
||||
|
||||
# cinder/volume/driver.py
|
||||
dmsetup: CommandFilter, /sbin/dmsetup, root
|
||||
dmsetup_usr: CommandFilter, /usr/sbin/dmsetup, root
|
||||
ln: CommandFilter, /bin/ln, root
|
||||
qemu-img: CommandFilter, /usr/bin/qemu-img, root
|
||||
env: CommandFilter, /usr/bin/env, root
|
||||
|
||||
# cinder/volume/driver.py: utils.read_file_as_root()
|
||||
cat: CommandFilter, /bin/cat, root
|
||||
|
||||
# cinder/volume/nfs.py
|
||||
stat: CommandFilter, /usr/bin/stat, root
|
||||
mount: CommandFilter, /bin/mount, root
|
||||
df: CommandFilter, /bin/df, root
|
||||
truncate: CommandFilter, /usr/bin/truncate, root
|
||||
chmod: CommandFilter, /bin/chmod, root
|
||||
rm: CommandFilter, /bin/rm, root
|
||||
lvs: CommandFilter, /sbin/lvs, root
|
@ -1,2 +0,0 @@
|
||||
ISCSITARGET_ENABLE=true
|
||||
|
@ -1 +0,0 @@
|
||||
os-config-applier
|
@ -1 +0,0 @@
|
||||
Install cinder service from git.
|
@ -1,4 +0,0 @@
|
||||
os-svc-install
|
||||
os-refresh-config
|
||||
os-config-applier
|
||||
cinder-config
|
@ -1,16 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -eux
|
||||
|
||||
install-packages lvm2
|
||||
os-svc-install -n cinder -u cinder -r https://github.com/openstack/cinder.git -c cinder-all
|
||||
|
||||
os-svc-daemon cinder-api cinder cinder-api "--config-dir /etc/cinder"
|
||||
os-svc-daemon cinder-volume cinder cinder-volume "--config-dir /etc/cinder"
|
||||
os-svc-daemon cinder-scheduler cinder cinder-scheduler "--config-dir /etc/cinder"
|
||||
mkdir -p /etc/tgt/conf.d
|
||||
echo 'include /etc/tgt/conf.d/cinder_tgt.conf' > /etc/tgt/targets.conf
|
||||
echo 'include /var/run/cinder/volumes/*' > /etc/tgt/conf.d/cinder_tgt.conf
|
||||
|
||||
echo "cinder ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/cinder
|
||||
chmod 0440 /etc/sudoers.d/cinder
|
||||
visudo -c
|
@ -1,15 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -eu
|
||||
|
||||
# TODO: resize volume group in response to config changes.
|
||||
# TODO: is there a safe way to shrink a volume group?
|
||||
vol_group=cinder-volumes
|
||||
vol_file=/var/run/cinder/$vol_group-backing-file
|
||||
size=$(os-config-applier --key cinder.volume_size_mb --type int)M
|
||||
|
||||
if ! vgs $vol_group; then
|
||||
[[ -f $vol_file ]] || truncate -s $size $vol_file
|
||||
dev=`sudo losetup -f --show $vol_file`
|
||||
if ! vgs $vol_group; then vgcreate $vol_group $dev; fi
|
||||
mkdir -p /var/run/cinder/volumes
|
||||
fi
|
@ -1,8 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -eu
|
||||
|
||||
service iscsitarget restart
|
||||
service open-iscsi restart
|
||||
service cinder-api restart
|
||||
service cinder-volume restart
|
||||
service cinder-scheduler restart
|
@ -1,8 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -eu
|
||||
|
||||
# installation requires building a kernel module.
|
||||
# - TODO: use generic 'install-packages' instead of apt-get once
|
||||
# it is available from first-boot scripts.
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install --yes linux-headers-`uname -r`
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install --yes iscsitarget iscsitarget-dkms openvswitch-datapath-dkms
|
@ -1,5 +0,0 @@
|
||||
Creates an image prepped to make a devstack baremetal cloud. See
|
||||
incubator/scripts/demo within the built image.
|
||||
|
||||
Forces a 16GB image to allow room for Swift, Cinder and instance
|
||||
disk images.
|
@ -1,8 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Add the stack user we recommend folk use.
|
||||
|
||||
set -e
|
||||
|
||||
if (( '16' '>' $IMAGE_SIZE )); then
|
||||
echo IMAGE_SIZE=16
|
||||
fi
|
@ -1 +0,0 @@
|
||||
stackuser
|
@ -1,13 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Install the image creation toolchain so folk can create their own images
|
||||
# (also includes the bootstrap-from-devstack facilities needed until we have
|
||||
# full image mastering of openstack).
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
install-packages git
|
||||
if [ -n "$http_proxy" ]; then
|
||||
sudo -Hiu stack git config --global http.proxy $http_proxy
|
||||
fi
|
||||
sudo -Hiu stack git clone https://github.com/tripleo/incubator-bootstrap.git incubator
|
@ -1,11 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Initialize devstack in the bootstrap image
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
install-packages git
|
||||
if [ -n "$http_proxy" ]; then
|
||||
sudo -Hiu stack git config --global http.proxy $http_proxy
|
||||
fi
|
||||
sudo -Hiu stack git clone https://github.com/openstack-dev/devstack.git
|
@ -1,7 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Install the haveged daemon so ssh config on startup isn't glacial.
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
install-packages haveged
|
@ -1,26 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Configure eth1, the baremetal network.
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
cat << EOF >> /etc/network/interfaces
|
||||
|
||||
auto eth1
|
||||
iface eth1 inet static
|
||||
# This matches the localrc we have configured for demo environments.
|
||||
# It is unroutable and not suitable for production: it is a test network.
|
||||
address 192.0.2.1
|
||||
netmask 255.255.255.0
|
||||
# Expose the metadata service needed by the nodes as they boot.
|
||||
up iptables -t nat -A PREROUTING -d 169.254.169.254 -p tcp -m tcp --dport 80 -j REDIRECT --to-port 8775
|
||||
# Grant access to the rest of the world by routing via the bootstrap node
|
||||
# (libvirt rejects traffic from unknown ip addresses, meaning that using
|
||||
# the default libvirt nat environment requires the MASQUERADE for the bare
|
||||
# metal nodes unless you reconfigure libvirt as well). Alternatively you
|
||||
# can create a second bridge on your machine and attached eth0 to that
|
||||
# (with an appropriate static config (or dhcp on the bridge).
|
||||
up iptables -t nat -A POSTROUTING -s 192.0.2.0/24 -o eth0 -j MASQUERADE
|
||||
# This matches the client range defined in localrc.
|
||||
up ip addr add 192.0.2.33/29 dev eth1
|
||||
EOF
|
@ -1,8 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Regenerate host keys now. XXX: Really should be a cloud-init task, should get
|
||||
# that working.
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
dpkg-reconfigure openssh-server
|
@ -1 +0,0 @@
|
||||
Installs glance service from git.
|
@ -1,3 +0,0 @@
|
||||
os-svc-install
|
||||
os-refresh-config
|
||||
os-config-applier
|
@ -1,13 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -eux
|
||||
|
||||
# TODO: use trunk instead of folsom
|
||||
# trunk glance currently results in CRITICAL 'duplicate config entry log-format' errors:
|
||||
# https://bugs.launchpad.net/ubuntu/+source/python-glanceclient/+bug/1131327
|
||||
os-svc-install -n glance -u glance -r https://github.com/openstack/glance.git -b stable/folsom
|
||||
|
||||
mkdir -p /var/lib/glance/images && chown -R glance:glance /var/lib/glance/images
|
||||
os-svc-daemon glance-api glance glance-api "--debug --log-config /etc/glance/logging.conf"
|
||||
os-svc-daemon glance-reg glance glance-registry "--debug --log-config /etc/glance/logging.conf"
|
||||
|
||||
install -m 0755 -o glance -g glance -d /var/log/glance
|
@ -1,68 +0,0 @@
|
||||
## NB: Unpolished config file
|
||||
## This config file was taken directly from the upstream repo, and tweaked just enough to work.
|
||||
## It has not been audited to ensure that everything present is either Heat controlled or a mandatory as-is setting.
|
||||
## Please submit patches for any setting that should be deleted or Heat-configurable.
|
||||
## https://github.com/stackforge/diskimage-builder
|
||||
|
||||
# Use this pipeline for no auth or image caching - DEFAULT
|
||||
[pipeline:glance-api]
|
||||
pipeline = versionnegotiation unauthenticated-context rootapp
|
||||
|
||||
# Use this pipeline for image caching and no auth
|
||||
[pipeline:glance-api-caching]
|
||||
pipeline = versionnegotiation unauthenticated-context cache rootapp
|
||||
|
||||
# Use this pipeline for caching w/ management interface but no auth
|
||||
[pipeline:glance-api-cachemanagement]
|
||||
pipeline = versionnegotiation unauthenticated-context cache cachemanage rootapp
|
||||
|
||||
# Use this pipeline for keystone auth
|
||||
[pipeline:glance-api-keystone]
|
||||
pipeline = versionnegotiation authtoken context rootapp
|
||||
|
||||
# Use this pipeline for keystone auth with image caching
|
||||
[pipeline:glance-api-keystone+caching]
|
||||
pipeline = versionnegotiation authtoken context cache rootapp
|
||||
|
||||
# Use this pipeline for keystone auth with caching and cache management
|
||||
[pipeline:glance-api-keystone+cachemanagement]
|
||||
pipeline = versionnegotiation authtoken context cache cachemanage rootapp
|
||||
|
||||
[composite:rootapp]
|
||||
paste.composite_factory = glance.api:root_app_factory
|
||||
/: apiversions
|
||||
/v1: apiv1app
|
||||
/v2: apiv2app
|
||||
|
||||
[app:apiversions]
|
||||
paste.app_factory = glance.api.versions:create_resource
|
||||
|
||||
[app:apiv1app]
|
||||
paste.app_factory = glance.api.v1.router:API.factory
|
||||
|
||||
[app:apiv2app]
|
||||
paste.app_factory = glance.api.v2.router:API.factory
|
||||
|
||||
[filter:versionnegotiation]
|
||||
paste.filter_factory = glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory
|
||||
|
||||
[filter:cache]
|
||||
paste.filter_factory = glance.api.middleware.cache:CacheFilter.factory
|
||||
|
||||
[filter:cachemanage]
|
||||
paste.filter_factory = glance.api.middleware.cache_manage:CacheManageFilter.factory
|
||||
|
||||
[filter:context]
|
||||
paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory
|
||||
|
||||
[filter:unauthenticated-context]
|
||||
paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory
|
||||
|
||||
[filter:authtoken]
|
||||
paste.filter_factory = keystone.middleware.auth_token:filter_factory
|
||||
auth_host = {{keystone.host}}
|
||||
auth_port = 35357
|
||||
auth_protocol = http
|
||||
admin_tenant_name = service
|
||||
admin_user = glance
|
||||
admin_password = {{service-password}}
|
@ -1,83 +0,0 @@
|
||||
## NB: Unpolished config file
|
||||
## This config file was taken directly from the upstream repo, and tweaked just enough to work.
|
||||
## It has not been audited to ensure that everything present is either Heat controlled or a mandatory as-is setting.
|
||||
## Please submit patches for any setting that should be deleted or Heat-configurable.
|
||||
## https://github.com/stackforge/diskimage-builder
|
||||
|
||||
[DEFAULT]
|
||||
debug = True
|
||||
|
||||
default_store = file
|
||||
|
||||
bind_host = 0.0.0.0
|
||||
bind_port = 9292
|
||||
|
||||
log_file = /var/log/glance/api.log
|
||||
|
||||
sql_connection = {{glance.db}}
|
||||
|
||||
workers = 1
|
||||
|
||||
|
||||
registry_host = 0.0.0.0
|
||||
registry_port = 9191
|
||||
notifier_strategy = noop
|
||||
|
||||
rabbit_host = {{rabbit.host}}
|
||||
rabbit_port = 5672
|
||||
rabbit_use_ssl = false
|
||||
rabbit_userid = guest
|
||||
rabbit_password = {{rabbit.password}}
|
||||
rabbit_virtual_host = /
|
||||
rabbit_notification_exchange = glance
|
||||
rabbit_notification_topic = notifications
|
||||
rabbit_durable_queues = False
|
||||
|
||||
# Configuration options if sending notifications via Qpid (these are
|
||||
# the defaults)
|
||||
qpid_notification_exchange = glance
|
||||
qpid_notification_topic = notifications
|
||||
qpid_host = localhost
|
||||
qpid_port = 5672
|
||||
qpid_username =
|
||||
qpid_password =
|
||||
qpid_sasl_mechanisms =
|
||||
qpid_reconnect_timeout = 0
|
||||
qpid_reconnect_limit = 0
|
||||
qpid_reconnect_interval_min = 0
|
||||
qpid_reconnect_interval_max = 0
|
||||
qpid_reconnect_interval = 0
|
||||
qpid_heartbeat = 5
|
||||
# Set to 'ssl' to enable SSL
|
||||
qpid_protocol = tcp
|
||||
qpid_tcp_nodelay = True
|
||||
|
||||
filesystem_store_datadir = /var/lib/glance/images/
|
||||
|
||||
swift_store_auth_version = 2
|
||||
swift_store_auth_address = {{keystone.host}}:5000/v2.0/
|
||||
|
||||
swift_store_user = {{swift.store_user}}
|
||||
swift_store_key = {{swift.store_key}}
|
||||
swift_store_container = glance
|
||||
swift_store_create_container_on_put = False
|
||||
swift_store_large_object_size = 5120
|
||||
swift_store_large_object_chunk_size = 200
|
||||
swift_enable_snet = False
|
||||
|
||||
delayed_delete = False
|
||||
scrub_time = 43200
|
||||
scrubber_datadir = /var/lib/glance/scrubber
|
||||
|
||||
image_cache_dir = /var/lib/glance/image-cache/
|
||||
|
||||
[keystone_authtoken]
|
||||
auth_host = {{keystone.host}}
|
||||
auth_port = 35357
|
||||
auth_protocol = http
|
||||
admin_tenant_name = service
|
||||
admin_user = glance
|
||||
admin_password = {{service-password}}
|
||||
|
||||
[paste_deploy]
|
||||
flavor = keystone
|
@ -1,32 +0,0 @@
|
||||
## NB: Unpolished config file
|
||||
## This config file was taken directly from the upstream repo, and tweaked just enough to work.
|
||||
## It has not been audited to ensure that everything present is either Heat controlled or a mandatory as-is setting.
|
||||
## Please submit patches for any setting that should be deleted or Heat-configurable.
|
||||
## https://github.com/stackforge/diskimage-builder
|
||||
|
||||
[DEFAULT]
|
||||
log_file = /var/log/glance/image-cache.log
|
||||
image_cache_dir = /var/lib/glance/image-cache/
|
||||
image_cache_stall_time = 86400
|
||||
image_cache_invalid_entry_grace_period = 3600
|
||||
image_cache_max_size = 10737418240
|
||||
|
||||
registry_host = 0.0.0.0
|
||||
registry_port = 9191
|
||||
|
||||
filesystem_store_datadir = /var/lib/glance/images/
|
||||
swift_store_auth_version = 2
|
||||
swift_store_auth_address = 127.0.0.1:5000/v2.0/
|
||||
swift_store_user = jdoe:jdoe
|
||||
swift_store_key = a86850deb2742ec3cb41518e26aa2d89
|
||||
swift_store_container = glance
|
||||
swift_store_create_container_on_put = False
|
||||
swift_store_large_object_size = 5120
|
||||
swift_store_large_object_chunk_size = 200
|
||||
swift_enable_snet = False
|
||||
|
||||
s3_store_host = 127.0.0.1:8080/v1.0/
|
||||
s3_store_access_key = <20-char AWS access key>
|
||||
s3_store_secret_key = <40-char AWS secret key>
|
||||
s3_store_bucket = <lowercased 20-char aws access key>glance
|
||||
s3_store_create_bucket_on_put = False
|
@ -1,31 +0,0 @@
|
||||
## NB: Unpolished config file
|
||||
## This config file was taken directly from the upstream repo, and tweaked just enough to work.
|
||||
## It has not been audited to ensure that everything present is either Heat controlled or a mandatory as-is setting.
|
||||
## Please submit patches for any setting that should be deleted or Heat-configurable.
|
||||
## https://github.com/stackforge/diskimage-builder
|
||||
|
||||
# Use this pipeline for no auth - DEFAULT
|
||||
[pipeline:glance-registry]
|
||||
pipeline = unauthenticated-context registryapp
|
||||
|
||||
# Use this pipeline for keystone auth
|
||||
[pipeline:glance-registry-keystone]
|
||||
pipeline = authtoken context registryapp
|
||||
|
||||
[app:registryapp]
|
||||
paste.app_factory = glance.registry.api.v1:API.factory
|
||||
|
||||
[filter:context]
|
||||
paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory
|
||||
|
||||
[filter:unauthenticated-context]
|
||||
paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory
|
||||
|
||||
[filter:authtoken]
|
||||
paste.filter_factory = keystone.middleware.auth_token:filter_factory
|
||||
auth_host = {{keystone.host}}
|
||||
auth_port = 35357
|
||||
auth_protocol = http
|
||||
admin_tenant_name = service
|
||||
admin_user = glance
|
||||
admin_password = {{service-password}}
|
@ -1,29 +0,0 @@
|
||||
## NB: Unpolished config file
|
||||
## This config file was taken directly from the upstream repo, and tweaked just enough to work.
|
||||
## It has not been audited to ensure that everything present is either Heat controlled or a mandatory as-is setting.
|
||||
## Please submit patches for any setting that should be deleted or Heat-configurable.
|
||||
## https://github.com/stackforge/diskimage-builder
|
||||
|
||||
[DEFAULT]
|
||||
bind_host = 0.0.0.0
|
||||
bind_port = 9191
|
||||
|
||||
log_file = /var/log/glance/registry.log
|
||||
backlog = 4096
|
||||
|
||||
sql_connection = {{glance.db}}
|
||||
sql_idle_timeout = 3600
|
||||
|
||||
api_limit_max = 1000
|
||||
limit_param_default = 25
|
||||
|
||||
[keystone_authtoken]
|
||||
auth_host = {{keystone.host}}
|
||||
auth_port = 35357
|
||||
auth_protocol = http
|
||||
admin_tenant_name = service
|
||||
admin_user = glance
|
||||
admin_password = {{service-password}}
|
||||
|
||||
[paste_deploy]
|
||||
flavor = keystone
|
@ -1,41 +0,0 @@
|
||||
## NB: Unpolished config file
|
||||
## This config file was taken directly from the upstream repo, and tweaked just enough to work.
|
||||
## It has not been audited to ensure that everything present is either Heat controlled or a mandatory as-is setting.
|
||||
## Please submit patches for any setting that should be deleted or Heat-configurable.
|
||||
## https://github.com/stackforge/diskimage-builder
|
||||
|
||||
[DEFAULT]
|
||||
# Show more verbose log output (sets INFO log level output)
|
||||
#verbose = False
|
||||
|
||||
# Show debugging output in logs (sets DEBUG log level output)
|
||||
#debug = False
|
||||
|
||||
# Log to this file. Make sure you do not set the same log
|
||||
# file for both the API and registry servers!
|
||||
log_file = /var/log/glance/scrubber.log
|
||||
|
||||
# Send logs to syslog (/dev/log) instead of to file specified by `log_file`
|
||||
#use_syslog = False
|
||||
|
||||
# Should we run our own loop or rely on cron/scheduler to run us
|
||||
daemon = False
|
||||
|
||||
# Loop time between checking for new items to schedule for delete
|
||||
wakeup_time = 300
|
||||
|
||||
# Directory that the scrubber will use to remind itself of what to delete
|
||||
# Make sure this is also set in glance-api.conf
|
||||
scrubber_datadir = /var/lib/glance/scrubber
|
||||
|
||||
# Only one server in your deployment should be designated the cleanup host
|
||||
cleanup_scrubber = False
|
||||
|
||||
# pending_delete items older than this time are candidates for cleanup
|
||||
cleanup_scrubber_time = 86400
|
||||
|
||||
# Address to find the registry server for cleanups
|
||||
registry_host = 0.0.0.0
|
||||
|
||||
# Port the registry server is listening on
|
||||
registry_port = 9191
|
@ -1,58 +0,0 @@
|
||||
## NB: Unpolished config file
|
||||
## This config file was taken directly from the upstream repo, and tweaked just enough to work.
|
||||
## It has not been audited to ensure that everything present is either Heat controlled or a mandatory as-is setting.
|
||||
## Please submit patches for any setting that should be deleted or Heat-configurable.
|
||||
## https://github.com/stackforge/diskimage-builder
|
||||
|
||||
[loggers]
|
||||
keys=root,api,registry,combined
|
||||
|
||||
[formatters]
|
||||
keys=normal,normal_with_name,debug
|
||||
|
||||
[handlers]
|
||||
keys=production,devel
|
||||
|
||||
[logger_root]
|
||||
level=NOTSET
|
||||
handlers=devel
|
||||
|
||||
[logger_glance]
|
||||
level=INFO
|
||||
handlers=devel
|
||||
|
||||
[logger_api]
|
||||
level=DEBUG
|
||||
handlers=devel
|
||||
qualname=api
|
||||
|
||||
[logger_registry]
|
||||
level=DEBUG
|
||||
handlers=devel
|
||||
qualname=glance-registry
|
||||
|
||||
[logger_combined]
|
||||
level=DEBUG
|
||||
handlers=devel
|
||||
qualname=glance-combined
|
||||
|
||||
[handler_production]
|
||||
class=handlers.SysLogHandler
|
||||
level=ERROR
|
||||
formatter=normal_with_name
|
||||
args=(('localhost', handlers.SYSLOG_UDP_PORT), handlers.SysLogHandler.LOG_USER)
|
||||
|
||||
[handler_devel]
|
||||
class=StreamHandler
|
||||
level=NOTSET
|
||||
formatter=debug
|
||||
args=(sys.stdout,)
|
||||
|
||||
[formatter_normal]
|
||||
format=%(asctime)s %(levelname)s %(message)s
|
||||
|
||||
[formatter_normal_with_name]
|
||||
format=(%(name)s): %(asctime)s %(levelname)s %(message)s
|
||||
|
||||
[formatter_debug]
|
||||
format=(%(name)s): %(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s
|
@ -1,4 +0,0 @@
|
||||
{
|
||||
"default": "",
|
||||
"manage_image_cache": "role:admin"
|
||||
}
|
@ -1,16 +0,0 @@
|
||||
{
|
||||
"kernel_id": {
|
||||
"type": "string",
|
||||
"pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$",
|
||||
"description": "ID of image stored in Glance that should be used as the kernel when booting an AMI-style image."
|
||||
},
|
||||
"ramdisk_id": {
|
||||
"type": "string",
|
||||
"pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$",
|
||||
"description": "ID of image stored in Glance that should be used as the ramdisk when booting an AMI-style image."
|
||||
},
|
||||
"instance_uuid": {
|
||||
"type": "string",
|
||||
"description": "ID of instance used to create this image."
|
||||
}
|
||||
}
|
@ -1,5 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -eu
|
||||
|
||||
service glance-api restart
|
||||
service glance-reg restart
|
@ -1,3 +0,0 @@
|
||||
Install the Heat cfntools (for CloudFormation) to enable HEAT
|
||||
templates to make use of advanced features of HEAT such as watches and
|
||||
AWS::CloudFormation::Init
|
@ -1,2 +0,0 @@
|
||||
os-config-applier
|
||||
os-refresh-config
|
@ -1,11 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -uex
|
||||
|
||||
install-packages \
|
||||
python-pip python-psutil
|
||||
|
||||
# Boto in Ubuntu 12.10 is too old. Newer boto's aren't
|
||||
# supported by heat-api-cfn. Bug ref: http://pad.lv/1122472
|
||||
pip install 'boto==2.5.2' heat-cfntools
|
||||
cfn-create-aws-symlinks --source /usr/local/bin
|
@ -1,33 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Until http://pad.lv/1101347 is fixed, we need this user to be able to
|
||||
# Log on to heat booted machines using the given SSH keypair
|
||||
|
||||
set -uex
|
||||
|
||||
# This is a specific workaround for Ubuntu
|
||||
distro=$(lsb_release -is || :)
|
||||
if [ "$distro" != "Ubuntu" ] ; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Setup ec2-user as expected by HEAT
|
||||
if ! getent passwd ec2-user ; then
|
||||
useradd -m -G admin ec2-user -s /bin/bash
|
||||
fi
|
||||
if ! [ -e /etc/sudoers.d/ec2-user ] ; then
|
||||
echo "ec2-user ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/ec2-user
|
||||
chmod 0440 /etc/sudoers.d/ec2-user
|
||||
visudo -c
|
||||
fi
|
||||
|
||||
# We must pin to this fixed cloud-init from the demo ppa to ensure keys
|
||||
# are installed. This can be removed if http://pad.lv/1100920 is ever
|
||||
# fixed in the distro
|
||||
cat > /etc/apt/preferences.d/cloud-init-from-ppa <<EOF
|
||||
Package: cloud-init
|
||||
Pin: release o=LP-PPA-tripleo-demo
|
||||
Pin-Priority: 900
|
||||
EOF
|
||||
# Should pull from demo PPA if cloud-init exists there.
|
||||
# Since this is Ubuntu only, the --force-yes is OK
|
||||
install-packages --force-yes cloud-init
|
@ -1,2 +0,0 @@
|
||||
AWSAccessKeyId={{heat.access_key_id}}
|
||||
AWSSecretKey={{heat.secret_key}}
|
@ -1,5 +0,0 @@
|
||||
[main]
|
||||
stack={{heat.stack.name}}
|
||||
credential-file=/etc/cfn/cfn-credentials
|
||||
region={{heat.stack.region}}
|
||||
interval=10
|
@ -1,8 +0,0 @@
|
||||
{{#heat.refresh}}
|
||||
[os-refresh-config-{{resource}}]
|
||||
triggers=post.add,post.delete.post.update
|
||||
path=Resources.{{resource}}.Metadata
|
||||
action=os-refresh-config
|
||||
runas=root
|
||||
|
||||
{{/heat.refresh}}
|
@ -1,2 +0,0 @@
|
||||
Install Icinga's core from the distribution repository.
|
||||
|
@ -1,8 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# install icinga-core
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
install-packages icinga-core
|
@ -1,2 +0,0 @@
|
||||
Install Icinga's web interface from the distribution repository.
|
||||
|
@ -1,9 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# install icinga-web
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
install-packages icinga-web icinga-cgi
|
||||
|
@ -1,25 +0,0 @@
|
||||
Provisions a jenkins for doing tests of openstack cloud images
|
||||
==============================================================
|
||||
|
||||
After deploying the image, jenkins should be available on port 8080.
|
||||
|
||||
*The following is fiction*
|
||||
|
||||
To use this, add a new application at
|
||||
`https://github.com/organizations/$ORGANISATION/settings/applications` and grab
|
||||
the client id and secret it provides.
|
||||
|
||||
Config options
|
||||
--------------
|
||||
|
||||
XXX: These should be passed in via cloud-init or salt, not on image build. For
|
||||
now, export before building the image.
|
||||
|
||||
* export `GITHUB_ORGANISATION` to set the which organisation to look for github
|
||||
committers from.
|
||||
|
||||
* export `GITHUB_ADMINS` to set a list of github users to be jenkins admins.
|
||||
|
||||
* export `GITHUB_CLIENT_ID` to set the github OAuth client id.
|
||||
|
||||
* export `GITHUB_SECRET` to set the github OAuth secret.
|
@ -1,10 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
# jenkins installs into /var/lib/jenkins which is rather restrictive.
|
||||
mv /var/lib/jenkins /mnt/
|
||||
ln -s /mnt/jenkins /var/lib/jenkins
|
||||
|
||||
|
@ -1,10 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
install-packages jenkins
|
||||
|
||||
# jenkins installs into /var/lib/jenkins which is rather restrictive.
|
||||
mv /var/lib/jenkins /mnt/
|
||||
ln -s /mnt/jenkins /var/lib/jenkins
|
@ -1,13 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
source $(dirname $0)/defaults
|
||||
|
||||
if [ -n "${JENKINS_PLUGINS}" ];then
|
||||
for plugin in ${JENKINS_PLUGINS//,/ }; do
|
||||
wget -q http://updates.jenkins-ci.org/latest/${plugin}.hpi -O /var/lib/jenkins/plugins/${plugin}.hpi
|
||||
done
|
||||
chown jenkins:nogroup /var/lib/jenkins/plugins/*
|
||||
fi
|
@ -1,5 +0,0 @@
|
||||
set -e
|
||||
|
||||
#List of plugins to install, comma separated list
|
||||
#full list of plugins available at: http://updates.jenkins-ci.org/download/plugins/
|
||||
#JENKINS_PLUGINS="github-oauth,ircbot,debian-package-builder"
|
@ -1,8 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Add the Jenkins package archive
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
wget -q -O - http://pkg.jenkins-ci.org/debian/jenkins-ci.org.key | apt-key add -
|
||||
echo deb http://pkg.jenkins-ci.org/debian binary/ > /etc/apt/sources.list.d/jenkins.list
|
@ -1 +0,0 @@
|
||||
os-config-applier
|
@ -1,124 +0,0 @@
|
||||
[DEFAULT]
|
||||
admin_token = {{admin-token}}
|
||||
|
||||
debug = True
|
||||
|
||||
[sql]
|
||||
connection = {{keystone.db}}
|
||||
|
||||
[identity]
|
||||
driver = keystone.identity.backends.sql.Identity
|
||||
|
||||
[catalog]
|
||||
driver = keystone.catalog.backends.sql.Catalog
|
||||
|
||||
[token]
|
||||
driver = keystone.token.backends.kvs.Token
|
||||
|
||||
# Amount of time a token should remain valid (in seconds)
|
||||
# expiration = 86400
|
||||
|
||||
[policy]
|
||||
# driver = keystone.policy.backends.sql.Policy
|
||||
|
||||
[ec2]
|
||||
# driver = keystone.contrib.ec2.backends.kvs.Ec2
|
||||
|
||||
[ssl]
|
||||
#enable = True
|
||||
#certfile = /etc/keystone/ssl/certs/keystone.pem
|
||||
#keyfile = /etc/keystone/ssl/private/keystonekey.pem
|
||||
#ca_certs = /etc/keystone/ssl/certs/ca.pem
|
||||
#cert_required = True
|
||||
|
||||
[signing]
|
||||
#token_format = PKI
|
||||
#certfile = /etc/keystone/ssl/certs/signing_cert.pem
|
||||
#keyfile = /etc/keystone/ssl/private/signing_key.pem
|
||||
#ca_certs = /etc/keystone/ssl/certs/ca.pem
|
||||
#key_size = 1024
|
||||
#valid_days = 3650
|
||||
#ca_password = None
|
||||
|
||||
[ldap]
|
||||
|
||||
[filter:debug]
|
||||
paste.filter_factory = keystone.common.wsgi:Debug.factory
|
||||
|
||||
[filter:token_auth]
|
||||
paste.filter_factory = keystone.middleware:TokenAuthMiddleware.factory
|
||||
|
||||
[filter:admin_token_auth]
|
||||
paste.filter_factory = keystone.middleware:AdminTokenAuthMiddleware.factory
|
||||
|
||||
[filter:xml_body]
|
||||
paste.filter_factory = keystone.middleware:XmlBodyMiddleware.factory
|
||||
|
||||
[filter:json_body]
|
||||
paste.filter_factory = keystone.middleware:JsonBodyMiddleware.factory
|
||||
|
||||
[filter:user_crud_extension]
|
||||
paste.filter_factory = keystone.contrib.user_crud:CrudExtension.factory
|
||||
|
||||
[filter:crud_extension]
|
||||
paste.filter_factory = keystone.contrib.admin_crud:CrudExtension.factory
|
||||
|
||||
[filter:ec2_extension]
|
||||
paste.filter_factory = keystone.contrib.ec2:Ec2Extension.factory
|
||||
|
||||
[filter:s3_extension]
|
||||
paste.filter_factory = keystone.contrib.s3:S3Extension.factory
|
||||
|
||||
[filter:url_normalize]
|
||||
paste.filter_factory = keystone.middleware:NormalizingFilter.factory
|
||||
|
||||
[filter:sizelimit]
|
||||
paste.filter_factory = keystone.middleware:RequestBodySizeLimiter.factory
|
||||
|
||||
[filter:stats_monitoring]
|
||||
paste.filter_factory = keystone.contrib.stats:StatsMiddleware.factory
|
||||
|
||||
[filter:stats_reporting]
|
||||
paste.filter_factory = keystone.contrib.stats:StatsExtension.factory
|
||||
|
||||
[app:public_service]
|
||||
paste.app_factory = keystone.service:public_app_factory
|
||||
|
||||
[app:service_v3]
|
||||
paste.app_factory = keystone.service:v3_app_factory
|
||||
|
||||
[app:admin_service]
|
||||
paste.app_factory = keystone.service:admin_app_factory
|
||||
|
||||
[pipeline:public_api]
|
||||
pipeline = sizelimit stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug ec2_extension user_crud_extension public_service
|
||||
|
||||
[pipeline:admin_api]
|
||||
pipeline = sizelimit stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug stats_reporting ec2_extension s3_extension crud_extension admin_service
|
||||
|
||||
[pipeline:api_v3]
|
||||
pipeline = sizelimit stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug stats_reporting ec2_extension s3_extension service_v3
|
||||
|
||||
[app:public_version_service]
|
||||
paste.app_factory = keystone.service:public_version_app_factory
|
||||
|
||||
[app:admin_version_service]
|
||||
paste.app_factory = keystone.service:admin_version_app_factory
|
||||
|
||||
[pipeline:public_version_api]
|
||||
pipeline = sizelimit stats_monitoring url_normalize xml_body public_version_service
|
||||
|
||||
[pipeline:admin_version_api]
|
||||
pipeline = sizelimit stats_monitoring url_normalize xml_body admin_version_service
|
||||
|
||||
[composite:main]
|
||||
use = egg:Paste#urlmap
|
||||
/v2.0 = public_api
|
||||
/v3 = api_v3
|
||||
/ = public_version_api
|
||||
|
||||
[composite:admin]
|
||||
use = egg:Paste#urlmap
|
||||
/v2.0 = admin_api
|
||||
/v3 = api_v3
|
||||
/ = admin_version_api
|
@ -1,33 +0,0 @@
|
||||
[loggers]
|
||||
keys=root
|
||||
|
||||
[formatters]
|
||||
keys=normal,normal_with_name,debug
|
||||
|
||||
[handlers]
|
||||
keys=production,devel
|
||||
|
||||
[logger_root]
|
||||
level=WARNING
|
||||
handlers=
|
||||
|
||||
[handler_production]
|
||||
class=handlers.SysLogHandler
|
||||
level=ERROR
|
||||
formatter=normal_with_name
|
||||
args=(('localhost', handlers.SYSLOG_UDP_PORT), handlers.SysLogHandler.LOG_USER)
|
||||
|
||||
[handler_devel]
|
||||
class=StreamHandler
|
||||
level=NOTSET
|
||||
formatter=debug
|
||||
args=(sys.stdout,)
|
||||
|
||||
[formatter_normal]
|
||||
format=%(asctime)s %(levelname)s %(message)s
|
||||
|
||||
[formatter_normal_with_name]
|
||||
format=(%(name)s): %(asctime)s %(levelname)s %(message)s
|
||||
|
||||
[formatter_debug]
|
||||
format=(%(name)s): %(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s
|
@ -1,57 +0,0 @@
|
||||
{
|
||||
"admin_required": [["role:admin"], ["is_admin:1"]],
|
||||
|
||||
"identity:get_service": [["rule:admin_required"]],
|
||||
"identity:list_services": [["rule:admin_required"]],
|
||||
"identity:create_service": [["rule:admin_required"]],
|
||||
"identity:update_service": [["rule:admin_required"]],
|
||||
"identity:delete_service": [["rule:admin_required"]],
|
||||
|
||||
"identity:get_endpoint": [["rule:admin_required"]],
|
||||
"identity:list_endpoints": [["rule:admin_required"]],
|
||||
"identity:create_endpoint": [["rule:admin_required"]],
|
||||
"identity:update_endpoint": [["rule:admin_required"]],
|
||||
"identity:delete_endpoint": [["rule:admin_required"]],
|
||||
|
||||
"identity:get_domain": [["rule:admin_required"]],
|
||||
"identity:list_domains": [["rule:admin_required"]],
|
||||
"identity:create_domain": [["rule:admin_required"]],
|
||||
"identity:update_domain": [["rule:admin_required"]],
|
||||
"identity:delete_domain": [["rule:admin_required"]],
|
||||
|
||||
"identity:get_project": [["rule:admin_required"]],
|
||||
"identity:list_projects": [["rule:admin_required"]],
|
||||
"identity:list_user_projects": [["rule:admin_required"], ["user_id:%(user_id)s"]],
|
||||
"identity:create_project": [["rule:admin_required"]],
|
||||
"identity:update_project": [["rule:admin_required"]],
|
||||
"identity:delete_project": [["rule:admin_required"]],
|
||||
|
||||
"identity:get_user": [["rule:admin_required"]],
|
||||
"identity:list_users": [["rule:admin_required"]],
|
||||
"identity:create_user": [["rule:admin_required"]],
|
||||
"identity:update_user": [["rule:admin_required"]],
|
||||
"identity:delete_user": [["rule:admin_required"]],
|
||||
|
||||
"identity:get_credential": [["rule:admin_required"]],
|
||||
"identity:list_credentials": [["rule:admin_required"]],
|
||||
"identity:create_credential": [["rule:admin_required"]],
|
||||
"identity:update_credential": [["rule:admin_required"]],
|
||||
"identity:delete_credential": [["rule:admin_required"]],
|
||||
|
||||
"identity:get_role": [["rule:admin_required"]],
|
||||
"identity:list_roles": [["rule:admin_required"]],
|
||||
"identity:create_role": [["rule:admin_required"]],
|
||||
"identity:update_roles": [["rule:admin_required"]],
|
||||
"identity:delete_roles": [["rule:admin_required"]],
|
||||
|
||||
"identity:check_grant": [["rule:admin_required"]],
|
||||
"identity:list_grants": [["rule:admin_required"]],
|
||||
"identity:create_grant": [["rule:admin_required"]],
|
||||
"identity:revoke_grant": [["rule:admin_required"]],
|
||||
|
||||
"identity:get_policy": [["rule:admin_required"]],
|
||||
"identity:list_policies": [["rule:admin_required"]],
|
||||
"identity:create_policy": [["rule:admin_required"]],
|
||||
"identity:update_policy": [["rule:admin_required"]],
|
||||
"identity:delete_policy": [["rule:admin_required"]]
|
||||
}
|
@ -1,4 +0,0 @@
|
||||
This element installs the Keystone service from git.
|
||||
|
||||
Upon booting the image, the service should be running on port 5000.
|
||||
|
@ -1,3 +0,0 @@
|
||||
keystone-config
|
||||
os-refresh-config
|
||||
os-svc-install
|
@ -1,3 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -eux
|
||||
os-svc-install -n keystone -u keystone -r https://github.com/openstack/keystone.git -c "/opt/stack/keystone/bin/keystone-all"
|
@ -1,9 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -eux
|
||||
|
||||
os-svc-install -n keystone -u keystone -r https://github.com/openstack/keystone.git
|
||||
os-svc-daemon keystone keystone keystone-all "--config-dir /etc/keystone --log-config /etc/keystone/logging.conf"
|
||||
install -m 0755 -o keystone -g keystone -d /etc/keystone/ssl
|
||||
|
||||
install -m 0755 -o root -g root $(dirname $0)/../post-configure \
|
||||
$(os-refresh-config --print-base)/post-configure.d/70-keystone
|
@ -1,3 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -eu
|
||||
service keystone restart
|
@ -1,41 +0,0 @@
|
||||
Migrate data from another MySQL server into the local one using
|
||||
os-config-applier and os-refresh-config.
|
||||
|
||||
Please note the migration process is *destructive* to any data currently
|
||||
in the MySQL database running on the target host. Safeguards are in
|
||||
place to ensure the process only happens once on any machine.
|
||||
|
||||
Configuration
|
||||
-------------
|
||||
|
||||
Pass in Heat Metadata with the following structure in the
|
||||
OpenStack::Config sub-key.
|
||||
|
||||
mysql:
|
||||
users:
|
||||
root:
|
||||
username: rootuser
|
||||
password: XXXXXXX
|
||||
dump:
|
||||
username: dumpuser
|
||||
password: XXXXXXX
|
||||
mysql-migration:
|
||||
bootstrap_host: x.y.z
|
||||
slave_user: slave-bot1
|
||||
slave_password: XXXXXXXX
|
||||
|
||||
The migration process assumes `dump` and `root` exist on the
|
||||
`bootstrap_host` and have access from this host.
|
||||
|
||||
The `dump` user will be used to dump data from `bootstrap_host`. The
|
||||
`root` user will be used for localhost access after the database is
|
||||
migrated. If `slave_user` and `slave_password` are set to non-empty
|
||||
strings, replication will be setup against the `bootstrap_host` using
|
||||
this user/password combination.
|
||||
|
||||
Special /root/.my.cnf
|
||||
---------------------
|
||||
|
||||
As a convenience, we copy the given `dump` and `root` user names and
|
||||
passwords to /root/.my.cnf after migration. If this file is overwritten,
|
||||
they will also be available as /root/metadata.my.cnf
|
@ -1,3 +0,0 @@
|
||||
mysql
|
||||
os-config-applier
|
||||
os-refresh-config
|
@ -1,4 +0,0 @@
|
||||
MIGRATION_HOST={{mysql-migration.bootstrap_host}}
|
||||
MIGRATION_USER={{mysql-migration.slave_user}}
|
||||
MIGRATION_PASSWORD={{mysql-migration.slave_password}}
|
||||
MIGRATION_DUMP_USER={{mysql-migration.users.dump.username}}
|
@ -1,10 +0,0 @@
|
||||
{{#mysql-migration.users.root}}
|
||||
[client]
|
||||
user={{username}}
|
||||
password={{password}}
|
||||
{{/mysql-migration.users.root}}
|
||||
{{#mysql-migration.users.dump}}
|
||||
[mysqldump]
|
||||
user={{username}}
|
||||
password={{password}}
|
||||
{{/mysql-migration.users.dump}}
|
@ -1,78 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
set -eux
|
||||
set -o pipefail
|
||||
|
||||
# Quietly go away unless a migration has been asked for
|
||||
DEFAULTS=/etc/mysql/migration_default
|
||||
[ -e $DEFAULTS ] || exit 0
|
||||
source $DEFAULTS
|
||||
|
||||
DONE_FILE=/etc/mysql/migration_done
|
||||
if [ -e $DONE_FILE ] ; then
|
||||
echo migration from [$MIGRATION_HOST] already completed.
|
||||
ls -l $DONE_FILE
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# MySQL may be stopped pre-configuration, so try to start it
|
||||
if [ -d /etc/init ] ; then
|
||||
# Upstart: During initial boot, mysql will start in parallel with os-refresh-config
|
||||
# wait-for-state is a job that allows blocking until a particular state is reached.
|
||||
start wait-for-state WAIT_FOR=mysql WAITER=$(basename $0) WAIT_FOREVER=Y TARGET_GOAL=start WAIT_STATE=running
|
||||
else
|
||||
service mysql start || :
|
||||
fi
|
||||
|
||||
local_mysql() {
|
||||
if [ -e /root/.my.cnf ] ; then
|
||||
mysql --defaults-extra-file=/root/.my.cnf "$@"
|
||||
else
|
||||
mysql "$@"
|
||||
fi
|
||||
}
|
||||
|
||||
local_mysql -e 'SHOW GRANTS'
|
||||
# This runs as root. We assume root has a .my.cnf or access
|
||||
# via localhost.
|
||||
if [ -n "$MIGRATION_HOST" ] ; then
|
||||
local_mysql -e 'STOP SLAVE' || :
|
||||
# If we are planning on setting up a full slave
|
||||
SLAVE_OPTS=""
|
||||
if [ -n "$MIGRATION_USER" ] && [ -n "$MIGRATION_PASSWORD" ] ; then
|
||||
local_mysql -e "CHANGE MASTER TO master_host='${MIGRATION_HOST}', master_user='${MIGRATION_USER}', master_password='${MIGRATION_PASSWORD}'"
|
||||
SLAVE_OPTS="--master-data"
|
||||
fi
|
||||
mysqldump --defaults-extra-file=/root/metadata.my.cnf \
|
||||
-u $MIGRATION_DUMP_USER \
|
||||
--single-transaction \
|
||||
--all-databases \
|
||||
$SLAVE_OPTS -h $MIGRATION_HOST | local_mysql
|
||||
|
||||
# After this following command, our ~/.my.cnf may stop working as its
|
||||
# password may change due to the dump loaded above.
|
||||
local_mysql -e 'FLUSH PRIVILEGES'
|
||||
|
||||
# Now that database has been loaded, use creds that should match
|
||||
cp -f /root/metadata.my.cnf /root/.my.cnf
|
||||
# Now get the slave going if creds were provided
|
||||
if [ -n "$MIGRATION_USER" ] && [ -n "$MIGRATION_PASSWORD" ] ; then
|
||||
local_mysql -e "START SLAVE"
|
||||
fi
|
||||
touch $DONE_FILE
|
||||
fi
|
@ -1,3 +0,0 @@
|
||||
Sets up a MySQL server install in the image.
|
||||
|
||||
TODO: auto-tune settings based on host resources or metadata service.
|
@ -1,2 +0,0 @@
|
||||
os-config-applier
|
||||
os-refresh-config
|
@ -1,11 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Install controller base requiered packages
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
install -D -m 0644 -o root -g root $(dirname $0)/my.cnf /etc/mysql/my.cnf
|
||||
install $(dirname $0)/mysql-set-server-id.upstart /etc/init/mysql-set-server-id.conf
|
||||
|
||||
install-packages sysstat mytop percona-toolkit mysql-server-5.5 mysql-client-5.5 python-mysqldb
|
@ -1,7 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Install the haveged daemon so ssh config on startup isn't glacial.
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
install-packages haveged
|
@ -1,10 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Build iscsi modules with installed kernel
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
apt-get clean
|
||||
|
||||
|
@ -1,64 +0,0 @@
|
||||
[mysql]
|
||||
port = 3306
|
||||
socket = /var/run/mysqld/mysqld.sock
|
||||
|
||||
[mysqld]
|
||||
|
||||
# GENERAL #
|
||||
user = mysql
|
||||
default_storage_engine = InnoDB
|
||||
socket = /var/run/mysqld/mysqld.sock
|
||||
pid_file = /var/lib/mysql/mysql.pid
|
||||
bind-address = 0.0.0.0
|
||||
|
||||
# MyISAM #
|
||||
key_buffer_size = 32M
|
||||
myisam_recover = FORCE,BACKUP
|
||||
|
||||
# SAFETY #
|
||||
max_allowed_packet = 16M
|
||||
max_connect_errors = 1000000
|
||||
skip_name_resolve
|
||||
sysdate_is_now = 1
|
||||
innodb = FORCE
|
||||
innodb_strict_mode = 1
|
||||
|
||||
# DATA STORAGE #
|
||||
datadir = /var/lib/mysql/
|
||||
|
||||
# CACHES AND LIMITS #
|
||||
tmp_table_size = 32M
|
||||
max_heap_table_size = 32M
|
||||
query_cache_type = 0
|
||||
query_cache_size = 0
|
||||
max_connections = 500
|
||||
thread_cache_size = 50
|
||||
open_files_limit = 65535
|
||||
table_definition_cache = 4096
|
||||
table_open_cache = 4096
|
||||
|
||||
# INNODB #
|
||||
innodb_flush_method = O_DIRECT
|
||||
innodb_log_files_in_group = 2
|
||||
innodb_log_file_size = 64M
|
||||
innodb_flush_log_at_trx_commit = 2
|
||||
innodb_file_per_table = 1
|
||||
innodb_buffer_pool_size = 592M
|
||||
# TODO
|
||||
# innodb_read_io_threads
|
||||
# innodb_write_io_threads
|
||||
|
||||
|
||||
# LOGGING #
|
||||
log_error = /var/log/mysql/error.log
|
||||
log_queries_not_using_indexes = 1
|
||||
slow_query_log = 1
|
||||
slow_query_log_file = /var/log/mysql/mysql-slow.log
|
||||
|
||||
# server_id set in /etc/mysql/conf.d/server_id.cnf
|
||||
# server_id = 1
|
||||
log_bin = /var/lib/mysql/mysql-bin
|
||||
expire_logs_days = 7
|
||||
max_binlog_size = 100M
|
||||
binlog_format = ROW
|
||||
!includedir /etc/mysql/conf.d/
|
@ -1,27 +0,0 @@
|
||||
# vim: syntax=upstart
|
||||
description "Set mysql server_id based on instance-id"
|
||||
|
||||
start on starting mysql
|
||||
task
|
||||
|
||||
env INSTANCE_ID="/var/lib/cloud/data/instance-id"
|
||||
env CONF_TARGET="/etc/mysql/conf.d/server_id.cnf"
|
||||
|
||||
pre-start script
|
||||
if ! [ -e $INSTANCE_ID ] ; then
|
||||
stop
|
||||
exit 0
|
||||
fi
|
||||
end script
|
||||
|
||||
script
|
||||
instance=$(cat $INSTANCE_ID)
|
||||
server_id=$(python -c "print 0x${instance##i-}")
|
||||
cat > $CONF_TARGET.new <<EOF
|
||||
# Generated by mysql-set-server-id upstart job $(date)
|
||||
# From $INSTANCE_ID ${instance}
|
||||
[mysqld]
|
||||
server_id = $server_id
|
||||
EOF
|
||||
mv -f $CONF_TARGET.new $CONF_TARGET
|
||||
end script
|
@ -1 +0,0 @@
|
||||
{{mysql.create-users}}
|
@ -1 +0,0 @@
|
||||
[{"username": "root"}, {"username": "debian-sys-maint"}]
|
@ -1,87 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Assert users that came from metadata config
|
||||
#
|
||||
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
import subprocess
|
||||
import MySQLdb
|
||||
import logging
|
||||
import argparse
|
||||
import os
|
||||
import json
|
||||
from base64 import b64encode
|
||||
|
||||
logging.basicConfig()
|
||||
logger = logging.getLogger('mysql-users')
|
||||
|
||||
# Root should have a my.cnf setup
|
||||
conn = MySQLdb.Connect(read_default_file=os.path.expanduser('~/.my.cnf'))
|
||||
cursor = conn.cursor()
|
||||
rows = cursor.execute("SELECT DISTINCT User FROM mysql.user WHERE user != ''")
|
||||
existing = set([x[0] for x in cursor.fetchmany(size=rows)])
|
||||
cursor.close()
|
||||
should_exist = set()
|
||||
by_user = {}
|
||||
|
||||
|
||||
def load_userfile(path):
|
||||
global should_exist
|
||||
global by_user
|
||||
if os.path.exists(path):
|
||||
with open(path) as dbusers_file:
|
||||
db_users = json.load(dbusers_file)
|
||||
if not isinstance(db_users, list):
|
||||
raise ValueError('%s must be a list' % (path))
|
||||
for dbvalues in db_users:
|
||||
username = dbvalues['username']
|
||||
should_exist.add(username)
|
||||
by_user[username] = dbvalues
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--noop', '-n', default=False, action='store_true')
|
||||
|
||||
opts = parser.parse_args()
|
||||
|
||||
load_userfile('/etc/mysql/static-dbusers.json')
|
||||
load_userfile('/etc/mysql/dbusers.json')
|
||||
|
||||
to_delete = existing - should_exist
|
||||
to_create = should_exist - existing
|
||||
|
||||
for createuser in to_create:
|
||||
dbvalue = by_user[createuser]
|
||||
with open('/dev/urandom', 'rb') as urandom:
|
||||
password = b64encode(urandom.read(30))
|
||||
cmd = "GRANT ALL PRIVILEGES ON `%s`.* TO `%s`@'%%' IDENTIFIED BY '%s'" % (
|
||||
dbvalue['database'], dbvalue['username'], password)
|
||||
if opts.noop:
|
||||
print "%s" % (cmd)
|
||||
else:
|
||||
cursor = conn.cursor()
|
||||
cursor.execute(cmd)
|
||||
cursor.close()
|
||||
# Inform Heat of new password for this user
|
||||
cmd = ['/opt/aws/bin/cfn-signal', '-i', dbvalue['username'],
|
||||
'-s', 'true', '--data', password, dbvalue['userhandle']]
|
||||
if opts.noop:
|
||||
print cmd
|
||||
else:
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
if to_delete:
|
||||
logger.warn('The following users are not accounted for: %s' % to_delete)
|
@ -1,16 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
# add Percona GPG key
|
||||
#gpg --keyserver hkp://keys.gnupg.net --recv-keys 1C4CBDCDCD2EFD2A
|
||||
#gpg -a --export CD2EFD2A | apt-key add -
|
||||
|
||||
# add Percona repo
|
||||
# XXX: autodetect distribution when Percona supports Quantal
|
||||
#VER='precise'
|
||||
#cat <<EOL > /etc/apt/sources.list.d/percona
|
||||
#deb http://repo.percona.com/apt $VER main
|
||||
#deb-src http://repo.percona.com/apt $VER main
|
||||
#EOL
|
@ -1,2 +0,0 @@
|
||||
Installs Nova API service from github.
|
||||
|
@ -1 +0,0 @@
|
||||
os-svc-install
|
@ -1,5 +0,0 @@
|
||||
#!/bin/sh
|
||||
set -eux
|
||||
|
||||
os-svc-install -n nova-api -u nova -r https://github.com/openstack/nova.git -c "/opt/stack/nova/bin/nova-api"
|
||||
|
@ -1 +0,0 @@
|
||||
Sets up a nova (kvm) install in the image.
|
@ -1,5 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
set -o xtrace
|
||||
sed -i -r 's/^\s*#(net\.ipv4\.ip_forward=1.*)/\1/' /etc/sysctl.conf
|
||||
echo 1 > /proc/sys/net/ipv4/ip_forward
|
@ -1,11 +0,0 @@
|
||||
#!/bin/bash
|
||||
ntpfile=`mktemp`
|
||||
cat << EOF > $ntpfile
|
||||
server ntp.ubuntu.com iburst
|
||||
server 127.127.1.0
|
||||
fudge 127.127.1.0 stratum 10
|
||||
EOF
|
||||
|
||||
mv /etc/ntp.conf /etc/ntp.conf.orig
|
||||
mv $ntpfile /etc/ntp.conf
|
||||
service ntp restart
|
@ -1,44 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
source $(dirname $0)/defaults
|
||||
#MYSQL_ADMPASS
|
||||
#MYSQL_NOVAPASS
|
||||
#MYSQL_GLANCEPASS
|
||||
#MYSQL_KEYSTONEPASS
|
||||
#MYSQL_CINDERPASS
|
||||
|
||||
|
||||
service mysql stop || true
|
||||
MYSQL_BOOTSTRAP="/usr/sbin/mysqld --bootstrap --user=mysql --skip-grant-tables"
|
||||
sqltfile=`mktemp`
|
||||
cat <<EOF > $sqltfile
|
||||
USE mysql;
|
||||
UPDATE user SET password=PASSWORD("$MYSQL_ADMPASS") WHERE user='root';
|
||||
EOF
|
||||
$MYSQL_BOOTSTRAP < $sqltfile
|
||||
rm -f $sqltfile
|
||||
sed -i 's/^bind-address/#bind-address/' /etc/mysql/my.cnf
|
||||
service mysql start
|
||||
|
||||
sqltfile=`mktemp`
|
||||
cat <<EOF > $sqltfile
|
||||
CREATE DATABASE IF NOT EXISTS nova;
|
||||
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY '${MYSQL_NOVAPASS}';
|
||||
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY '${MYSQL_NOVAPASS}';
|
||||
|
||||
CREATE DATABASE IF NOT EXISTS cinder;
|
||||
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' IDENTIFIED BY '${MYSQL_CINDERPASS}';
|
||||
|
||||
CREATE DATABASE IF NOT EXISTS keystone;
|
||||
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY '${MYSQL_KEYSTONEPASS}';
|
||||
|
||||
CREATE DATABASE IF NOT EXISTS glance;
|
||||
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY '${MYSQL_GLANCEPASS}';
|
||||
|
||||
FLUSH PRIVILEGES;
|
||||
EOF
|
||||
|
||||
mysql -uroot --password=$MYSQL_ADMPASS < $sqltfile
|
||||
rm -f sqltfile
|
@ -1,8 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
source $(dirname $0)/defaults
|
||||
#RABBIT_PASS
|
||||
|
||||
rabbitmqctl change_password guest ${RABBIT_PASS}
|
@ -1,19 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
source $(dirname $0)/defaults
|
||||
#AUTH_TOKEN
|
||||
#MYSQL_KEYSTONEPASS
|
||||
#ADMIN_PASSWORD
|
||||
#HOST_IP
|
||||
|
||||
KEYSTONE_FILE="/etc/keystone/keystone.conf"
|
||||
|
||||
SQL_CONNECTION="mysql://keystone:${MYSQL_KEYSTONEPASS}@localhost:3306/keystone"
|
||||
|
||||
sed -e "s,^connection\s*=\s*.\+$,connection = $SQL_CONNECTION," -i ${KEYSTONE_FILE}
|
||||
sed -e 's|^[#]*[ \t]*admin_token[ \t]*=.*|admin_token = '${AUTH_TOKEN}'|' -i ${KEYSTONE_FILE}
|
||||
|
||||
service keystone restart
|
||||
keystone-manage db_sync
|
@ -1,72 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Keystone Datas
|
||||
#
|
||||
# Description: Fill Keystone with datas.
|
||||
|
||||
# Mainly inspired by http://www.hastexo.com/resources/docs/installing-openstack-essex-20121-ubuntu-1204-precise-pangolin
|
||||
# Written by Martin Gerhard Loschwitz / Hastexo
|
||||
# Modified by Emilien Macchi / StackOps
|
||||
#
|
||||
# Support: openstack@lists.launchpad.net
|
||||
# License: Apache Software License (ASL) 2.0
|
||||
#
|
||||
source $(dirname $0)/defaults
|
||||
|
||||
export OS_TENANT_NAME=admin
|
||||
export OS_USERNAME=admin
|
||||
export OS_PASSWORD=${ADMIN_PASSWORD}
|
||||
export OS_AUTH_URL="http://localhost:5000/v2.0/"
|
||||
export SERVICE_ENDPOINT="http://localhost:35357/v2.0"
|
||||
SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service}
|
||||
export SERVICE_TOKEN=${SERVICE_TOKEN}
|
||||
|
||||
get_id () {
|
||||
echo `$@ | awk '/ id / { print $4 }'`
|
||||
}
|
||||
|
||||
# Tenants
|
||||
ADMIN_TENANT=$(get_id keystone tenant-create --name=admin)
|
||||
SERVICE_TENANT=$(get_id keystone tenant-create --name=$SERVICE_TENANT_NAME)
|
||||
DEMO_TENANT=$(get_id keystone tenant-create --name=demo)
|
||||
INVIS_TENANT=$(get_id keystone tenant-create --name=invisible_to_admin)
|
||||
|
||||
# Users
|
||||
ADMIN_USER=$(get_id keystone user-create --name=admin --pass="$ADMIN_PASSWORD" --email=admin@domain.com)
|
||||
#DEMO_USER=$(get_id keystone user-create --name=demo --pass="$ADMIN_PASSWORD" --email=demo@domain.com)
|
||||
|
||||
# Roles
|
||||
ADMIN_ROLE=$(get_id keystone role-create --name=admin)
|
||||
KEYSTONEADMIN_ROLE=$(get_id keystone role-create --name=KeystoneAdmin)
|
||||
KEYSTONESERVICE_ROLE=$(get_id keystone role-create --name=KeystoneServiceAdmin)
|
||||
|
||||
# Add Roles to Users in Tenants
|
||||
keystone user-role-add --user-id $ADMIN_USER --role-id $ADMIN_ROLE --tenant-id $ADMIN_TENANT
|
||||
#keystone user-role-add --user-id $ADMIN_USER --role-id $ADMIN_ROLE --tenant-id $DEMO_TENANT
|
||||
keystone user-role-add --user-id $ADMIN_USER --role-id $KEYSTONEADMIN_ROLE --tenant-id $ADMIN_TENANT
|
||||
keystone user-role-add --user-id $ADMIN_USER --role-id $KEYSTONESERVICE_ROLE --tenant-id $ADMIN_TENANT
|
||||
|
||||
# The Member role is used by Horizon and Swift
|
||||
MEMBER_ROLE=$(get_id keystone role-create --name=Member)
|
||||
#keystone user-role-add --user-id $DEMO_USER --role-id $MEMBER_ROLE --tenant-id $INVIS_TENANT
|
||||
#keystone user-role-add --user-id $DEMO_USER --role-id $MEMBER_ROLE --tenant-id $DEMO_TENANT
|
||||
keystone user-role-add --user-id $ADMIN_USER --role-id $MEMBER_ROLE --tenant-id $ADMIN_TENANT
|
||||
|
||||
# Configure service users/roles
|
||||
NOVA_USER=$(get_id keystone user-create --name=nova --pass="$NOVA_PASS" --tenant-id $SERVICE_TENANT --email=nova@domain.com)
|
||||
keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $NOVA_USER --role-id $ADMIN_ROLE
|
||||
|
||||
GLANCE_USER=$(get_id keystone user-create --name=glance --pass="$GLANCE_PASSWORD" --tenant-id $SERVICE_TENANT --email=glance@domain.com)
|
||||
keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $GLANCE_USER --role-id $ADMIN_ROLE
|
||||
|
||||
SWIFT_USER=$(get_id keystone user-create --name=swift --pass="$SWIFT_PASSWORD" --tenant-id $SERVICE_TENANT --email=swift@domain.com)
|
||||
keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $SWIFT_USER --role-id $ADMIN_ROLE
|
||||
|
||||
RESELLER_ROLE=$(get_id keystone role-create --name=ResellerAdmin)
|
||||
keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $NOVA_USER --role-id $RESELLER_ROLE
|
||||
|
||||
QUANTUM_USER=$(get_id keystone user-create --name=quantum --pass="$QUANTUM_PASSWORD" --tenant-id $SERVICE_TENANT --email=quantum@domain.com)
|
||||
keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $QUANTUM_USER --role-id $ADMIN_ROLE
|
||||
|
||||
CINDER_USER=$(get_id keystone user-create --name=cinder --pass="$CINDER_PASSWORD" --tenant-id $SERVICE_TENANT --email=cinder@domain.com)
|
||||
keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $CINDER_USER --role-id $ADMIN_ROLE
|
@ -1,67 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Keystone Endpoints
|
||||
#
|
||||
# Description: Create Services Endpoints
|
||||
|
||||
# Mainly inspired by http://www.hastexo.com/resources/docs/installing-openstack-essex-20121-ubuntu-1204-precise-pangolin
|
||||
# Written by Martin Gerhard Loschwitz / Hastexo
|
||||
# Modified by Emilien Macchi / StackOps
|
||||
#
|
||||
# Support: openstack@lists.launchpad.net
|
||||
# License: Apache Software License (ASL) 2.0
|
||||
#
|
||||
source $(dirname $0)/defaults
|
||||
|
||||
# MySQL definitions
|
||||
MYSQL_USER=keystone
|
||||
MYSQL_DATABASE=keystone
|
||||
MYSQL_HOST=localhost
|
||||
|
||||
# Keystone definitions
|
||||
KEYSTONE_REGION=RegionOne
|
||||
export SERVICE_TOKEN=password
|
||||
export SERVICE_ENDPOINT="http://localhost:35357/v2.0"
|
||||
|
||||
keystone service-create --name nova --type compute --description 'OpenStack Compute Service'
|
||||
keystone service-create --name cinder --type volume --description 'OpenStack Volume Service'
|
||||
keystone service-create --name glance --type image --description 'OpenStack Image Service'
|
||||
keystone service-create --name swift --type object-store --description 'OpenStack Storage Service'
|
||||
keystone service-create --name keystone --type identity --description 'OpenStack Identity'
|
||||
keystone service-create --name ec2 --type ec2 --description 'OpenStack EC2 service'
|
||||
keystone service-create --name quantum --type network --description 'OpenStack Networking service'
|
||||
|
||||
create_endpoint () {
|
||||
case $1 in
|
||||
compute)
|
||||
keystone endpoint-create --region $KEYSTONE_REGION --service-id $2 --publicurl 'http://'"${HOST_IP}"':8774/v2/$(tenant_id)s' --adminurl 'http://'"$HOST_IP"':8774/v2/$(tenant_id)s' --internalurl 'http://'"$HOST_IP"':8774/v2/$(tenant_id)s'
|
||||
;;
|
||||
volume)
|
||||
keystone endpoint-create --region $KEYSTONE_REGION --service-id $2 --publicurl 'http://'"$HOST_IP"':8776/v1/$(tenant_id)s' --adminurl 'http://'"$HOST_IP"':8776/v1/$(tenant_id)s' --internalurl 'http://'"$HOST_IP"':8776/v1/$(tenant_id)s'
|
||||
;;
|
||||
image)
|
||||
keystone endpoint-create --region $KEYSTONE_REGION --service-id $2 --publicurl 'http://'"$HOST_IP"':9292/v2' --adminurl 'http://'"$HOST_IP"':9292/v2' --internalurl 'http://'"$HOST_IP"':9292/v2'
|
||||
;;
|
||||
object-store)
|
||||
if [ $SWIFT_HOST_IP ]; then
|
||||
keystone endpoint-create --region $KEYSTONE_REGION --service-id $2 --publicurl 'http://'"$SWIFT_HOST_IP"':8080/v1/AUTH_$(tenant_id)s' --adminurl 'http://'"$SWIFT_HOST_IP"':8080/v1' --internalurl 'http://'"$SWIFT_HOST_IP"':8080/v1/AUTH_$(tenant_id)s'
|
||||
else
|
||||
keystone endpoint-create --region $KEYSTONE_REGION --service-id $2 --publicurl 'http://'"$HOST_IP"':8080/v1/AUTH_$(tenant_id)s' --adminurl 'http://'"$HOST_IP"':8080/v1' --internalurl 'http://'"$HOST_IP"':8080/v1/AUTH_$(tenant_id)s'
|
||||
fi
|
||||
;;
|
||||
identity)
|
||||
keystone endpoint-create --region $KEYSTONE_REGION --service-id $2 --publicurl 'http://'"$HOST_IP"':5000/v2.0' --adminurl 'http://'"$HOST_IP"':35357/v2.0' --internalurl 'http://'"$HOST_IP"':5000/v2.0'
|
||||
;;
|
||||
ec2)
|
||||
keystone endpoint-create --region $KEYSTONE_REGION --service-id $2 --publicurl 'http://'"$HOST_IP"':8773/services/Cloud' --adminurl 'http://'"$HOST_IP"':8773/services/Admin' --internalurl 'http://'"$HOST_IP"':8773/services/Cloud'
|
||||
;;
|
||||
network)
|
||||
keystone endpoint-create --region $KEYSTONE_REGION --service-id $2 --publicurl 'http://'"$HOST_IP"':9696/' --adminurl 'http://'"$HOST_IP"':9696/' --internalurl 'http://'"$HOST_IP"':9696/'
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
for i in compute volume image object-store identity ec2 network; do
|
||||
id=`mysql -h "$MYSQL_HOST" -u "$MYSQL_USER" -p"$MYSQL_KEYSTONEPASS" "$MYSQL_DATABASE" -ss -e "SELECT id FROM service WHERE type='"$i"';"` || exit 1
|
||||
create_endpoint $i $id
|
||||
done
|
@ -1,35 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
source $(dirname $0)/defaults
|
||||
#MYSQL_GLANCEPASS
|
||||
#GLANCE_PASS
|
||||
#RABBIT_PASS
|
||||
|
||||
GLANCE_DIR="/etc/glance/"
|
||||
|
||||
SQL_CONNECTION="mysql://glance:${MYSQL_GLANCEPASS}@localhost:3306/glance"
|
||||
|
||||
sed -e "s,^sql_connection\s*=\s*.\+$,sql_connection = $SQL_CONNECTION," -i ${GLANCE_DIR}/glance-api.conf
|
||||
sed -e "s,^sql_connection\s*=\s*.\+$,sql_connection = $SQL_CONNECTION," -i ${GLANCE_DIR}/glance-registry.conf
|
||||
|
||||
sed -e "s,^admin_tenant_name\s*=\s*.\+$,admin_tenant_name = service," -i ${GLANCE_DIR}/glance-api.conf
|
||||
sed -e "s,^admin_tenant_name\s*=\s*.\+$,admin_tenant_name = service," -i ${GLANCE_DIR}/glance-registry.conf
|
||||
|
||||
sed -e "s,^admin_user\s*=\s*.\+$,admin_user = glance," -i ${GLANCE_DIR}/glance-api.conf
|
||||
sed -e "s,^admin_user\s*=\s*.\+$,admin_user = glance," -i ${GLANCE_DIR}/glance-registry.conf
|
||||
|
||||
sed -e "s,^admin_password\s*=\s*.\+$,admin_password = ${GLANCE_PASS}," -i ${GLANCE_DIR}/glance-api.conf
|
||||
sed -e "s,^admin_password\s*=\s*.\+$,admin_password = ${GLANCE_PASS}," -i ${GLANCE_DIR}/glance-registry.conf
|
||||
|
||||
sed -e "s,^notifier_strategy\s*=\s*.\+$,notifier_strategy = rabbit," -i ${GLANCE_DIR}/glance-api.conf
|
||||
sed -e "s,^rabbit_password\s*=\s*.\+$,rabbit_password = ${RABBIT_PASS}," -i ${GLANCE_DIR}/glance-api.conf
|
||||
|
||||
service glance-api restart
|
||||
service glance-registry restart
|
||||
glance-manage db_sync
|
||||
|
||||
#wget http://uec-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-amd64.tar.gz
|
||||
#tar xzvf ubuntu-12.04-server-cloudimg-amd64.tar.gz
|
||||
#glance image-create --name="Ubuntu 12.04 UEC" --public --container-format=ovf --disk-format=qcow2 < precise-server-cloudimg-amd64.img
|
@ -1,84 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
source $(dirname $0)/defaults
|
||||
#HOST_IP
|
||||
|
||||
NOVA_DIR="/etc/nova/"
|
||||
|
||||
SQL_CONNECTION="mysql://nova:${MYSQL_NOVAPASS}@localhost:3306/nova"
|
||||
|
||||
sed -e "s,^admin_tenant_name\s*=\s*.\+$,admin_tenant_name = service," -i ${NOVA_DIR}/api-paste.ini
|
||||
sed -e "s,^admin_user\s*=\s*.\+$,admin_user = nova," -i ${NOVA_DIR}/api-paste.ini
|
||||
sed -e "s,^admin_password\s*=\s*.\+$,admin_password = ${NOVA_PASS}," -i ${NOVA_DIR}/api-paste.ini
|
||||
|
||||
sed -i '/volume/d' /etc/nova/api-paste.ini
|
||||
|
||||
cat << EOF > ${NOVA_DIR}/nova.conf
|
||||
[DEFAULT]
|
||||
# MySQL Connection #
|
||||
sql_connection=mysql://nova:password@${HOST_IP}/nova
|
||||
|
||||
# nova-scheduler #
|
||||
rabbit_password=${RABBIT_PASS}
|
||||
scheduler_driver=nova.scheduler.simple.SimpleScheduler
|
||||
|
||||
# nova-api #
|
||||
cc_host=${HOST_IP}
|
||||
auth_strategy=keystone
|
||||
s3_host=${HOST_IP}
|
||||
ec2_host=${HOST_IP}
|
||||
nova_url=http://${HOST_IP}:8774/v1.1/
|
||||
ec2_url=http://${HOST_IP}:8773/services/Cloud
|
||||
keystone_ec2_url=http://${HOST_IP}:5000/v2.0/ec2tokens
|
||||
api_paste_config=/etc/nova/api-paste.ini
|
||||
allow_admin_api=true
|
||||
use_deprecated_auth=false
|
||||
ec2_private_dns_show_ip=True
|
||||
dmz_cidr=169.254.169.254/32
|
||||
ec2_dmz_host=${HOST_IP}
|
||||
metadata_host=${HOST_IP}
|
||||
metadata_listen=0.0.0.0
|
||||
enabled_apis=ec2,osapi_compute,metadata
|
||||
|
||||
# Networking #
|
||||
#network_api_class=nova.network.quantumv2.api.API
|
||||
#quantum_url=http://${HOST_IP}:9696
|
||||
#quantum_auth_strategy=keystone
|
||||
#quantum_admin_tenant_name=service
|
||||
#quantum_admin_username=quantum
|
||||
#quantum_admin_password=password
|
||||
#quantum_admin_auth_url=http://${HOST_IP}:35357/v2.0
|
||||
#libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver
|
||||
#linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver
|
||||
#firewall_driver=nova.virt.libvirt.firewall.IptablesFirewallDriver
|
||||
|
||||
# Cinder #
|
||||
volume_api_class=nova.volume.cinder.API
|
||||
|
||||
# Glance #
|
||||
glance_api_servers=${HOST_IP}:9292
|
||||
image_service=nova.image.glance.GlanceImageService
|
||||
|
||||
# novnc #
|
||||
novnc_enable=true
|
||||
novncproxy_base_url=http://${HOST_IP}:6080/vnc_auto.html
|
||||
vncserver_proxyclient_address=127.0.0.1
|
||||
vncserver_listen=0.0.0.0
|
||||
|
||||
# Misc #
|
||||
logdir=/var/log/nova
|
||||
state_path=/var/lib/nova
|
||||
lock_path=/var/lock/nova
|
||||
root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
|
||||
verbose=true
|
||||
EOF
|
||||
|
||||
nova-manage db sync
|
||||
service nova-api restart
|
||||
service nova-cert restart
|
||||
service nova-consoleauth restart
|
||||
service nova-scheduler restart
|
||||
service novnc restart
|
||||
service nova-network restart
|
@ -1,27 +0,0 @@
|
||||
set -e
|
||||
|
||||
DEFAULT_PASSWORD=${DEFAULT_PASSWORD:-password}
|
||||
|
||||
# HOST_IP
|
||||
# Find the interface used for the default route
|
||||
HOST_IP_IFACE=${HOST_IP_IFACE:-$(ip route | sed -n '/^default/{ s/.*dev \(\w\+\)\s\+.*/\1/; p; }')}
|
||||
# Search for an IP unless an explicit is set by ``HOST_IP`` environment variable
|
||||
HOST_IP=`LC_ALL=C ip -f inet addr show ${HOST_IP_IFACE} | awk '/inet/ {split($2,parts,"/"); print parts[1]}'`
|
||||
|
||||
# Mysql Passwords
|
||||
MYSQL_ADMPASS=${MYSQL_ADMPASS:-${DEFAULT_PASSWORD}}
|
||||
MYSQL_NOVAPASS=${MYSQL_NOVAPASS:-${MYSQL_ADMPASS}}
|
||||
MYSQL_GLANCEPASS=${MYSQL_GLANCEPASS:-${MYSQL_ADMPASS}}
|
||||
MYSQL_KEYSTONEPASS=${MYSQL_KEYSTONEPASS:-${MYSQL_ADMPASS}}
|
||||
MYSQL_CINDERPASS=${MYSQL_CINDERPASS:-${MYSQL_ADMPASS}}
|
||||
|
||||
#Rabbitmq Passwords
|
||||
RABBIT_PASS=${RABBIT_PASS:-${DEFAULT_PASSWORD}}
|
||||
|
||||
#Keystone
|
||||
AUTH_TOKEN=${AUTH_TOKEN:-${DEFAULT_PASSWORD}}
|
||||
SERVICE_TOKEN=${AUTH_TOKEN}
|
||||
ADMIN_PASSWORD=${ADMIN_PASSWORD:-${DEFAULT_PASSWORD}}
|
||||
|
||||
#Glance
|
||||
GLANCE_PASS=${GLANCE_PASS:-${DEFAULT_PASSWORD}}
|
@ -1,8 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Install controller base requiered packages
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
install-packages haproxy rabbitmq-server mysql-server ntp dkms
|
@ -1,9 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Install controller openstack packages
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
install-packages keystone glance nova-api nova-cert nova-common nova-scheduler python-nova python-novaclient nova-consoleauth novnc nova-novncproxy cinder-api cinder-scheduler cinder-volume iscsitarget open-iscsi iscsitarget-dkms python-cinderclient nova-network
|
||||
|
@ -1,27 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Build iscsi modules with installed kernel
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
package=iscsitarget-dkms
|
||||
name=iscsitarget
|
||||
|
||||
kernel_version=`ls /boot/vmlinuz-* -r -1| head -n1 | xargs basename |sed 's/vmlinuz-//'`
|
||||
|
||||
version=`dpkg-query -W -f='${Version}' "$package" \
|
||||
|rev|cut -d- -f2-|rev|cut -d':' -f2|tr -d "\n"`
|
||||
|
||||
isadded=`dkms status -m "$cd name" -v "$version"`
|
||||
|
||||
|
||||
|
||||
if [ "x${isadded}" = "x" ] ; then
|
||||
dkms add -m "$name" -v "$version" -k "$kernel_version" || true
|
||||
fi
|
||||
|
||||
dkms build -m "$name" -v "$version" -k "$kernel_version" && dkms install -m "$name" -v "$version" -k "$kernel_version" || true
|
||||
|
||||
|
||||
|
@ -1,10 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Build iscsi modules with installed kernel
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
apt-get clean
|
||||
|
||||
|
@ -1,2 +0,0 @@
|
||||
Installs nova-baremetal service from git.
|
||||
|
@ -1,3 +0,0 @@
|
||||
os-svc-install
|
||||
os-config-applier
|
||||
os-refresh-config
|
@ -1,55 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -eux
|
||||
|
||||
function install_dnsmasq_upstart {
|
||||
cat > /etc/init/nova-bm-dnsmasq.conf << eof
|
||||
start on runlevel [2345]
|
||||
stop on runlevel [016]
|
||||
pre-start script
|
||||
mkdir -p /tftpboot
|
||||
chown -R nova:nova /tftpboot
|
||||
killall -9 dnsmasq || echo 'no dnsmasq running'
|
||||
end script
|
||||
task
|
||||
|
||||
script
|
||||
exec dnsmasq --conf-file= \\
|
||||
--port=0 \\
|
||||
--enable-tftp \\
|
||||
--tftp-root=/tftpboot \\
|
||||
--dhcp-boot=pxelinux.0 \\
|
||||
--bind-interfaces \\
|
||||
--pid-file=/var/run/dnsmasq.pid \\
|
||||
--interface=eth0 \\
|
||||
--dhcp-range=10.8.53.201,10.8.53.206,29
|
||||
end script
|
||||
eof
|
||||
}
|
||||
|
||||
install-packages dnsmasq novnc dnsmasq-utils ebtables
|
||||
|
||||
os-svc-install -n nova -u nova -r https://github.com/openstack/nova.git
|
||||
|
||||
# for libvirt clouds only
|
||||
install-packages libvirt-bin python-libvirt kvm pm-utils syslinux
|
||||
usermod -a -G libvirtd nova
|
||||
|
||||
mkdir -p /var/run/nova/keys && chown -R nova:nova /var/run/nova/keys
|
||||
os-svc-daemon nova-api nova nova-api "--config-dir /etc/nova"
|
||||
os-svc-daemon nova-rpc-zmq-receiver nova nova-rpc-zmq-receiver "--config-dir /etc/nova"
|
||||
os-svc-daemon nova-cert nova nova-cert "--config-dir /etc/nova"
|
||||
os-svc-daemon nova-scheduler nova nova-scheduler "--config-dir /etc/nova --debug"
|
||||
os-svc-daemon nova-consoleauth nova nova-consoleauth "--config-dir /etc/nova"
|
||||
os-svc-daemon nova-conductor nova nova-conductor "--config-dir /etc/nova"
|
||||
os-svc-daemon nova-compute nova nova-compute "--config-dir /etc/nova"
|
||||
os-svc-daemon nova-baremetal-deploy-helper \
|
||||
nova nova-baremetal-deploy-helper "--config-dir /etc/nova"
|
||||
install_dnsmasq_upstart
|
||||
|
||||
mkdir -p /tftpboot/pxelinux.cfg/
|
||||
cp /usr/lib/syslinux/pxelinux.0 /tftpboot/
|
||||
chown -R nova:nova /var/lib/misc/
|
||||
|
||||
echo "nova ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/nova
|
||||
chmod 0440 /etc/sudoers.d/nova
|
||||
visudo -c
|
@ -1,113 +0,0 @@
|
||||
## NB: Unpolished config file
|
||||
## This config file was taken directly from the upstream repo, and tweaked just enough to work.
|
||||
## It has not been audited to ensure that everything present is either Heat controlled or a mandatory as-is setting.
|
||||
## Please submit patches for any setting that should be deleted or Heat-configurable.
|
||||
## https://github.com/stackforge/diskimage-builder
|
||||
|
||||
############
|
||||
# Metadata #
|
||||
############
|
||||
[composite:metadata]
|
||||
use = egg:Paste#urlmap
|
||||
/: meta
|
||||
|
||||
[pipeline:meta]
|
||||
pipeline = ec2faultwrap logrequest metaapp
|
||||
|
||||
[app:metaapp]
|
||||
paste.app_factory = nova.api.metadata.handler:MetadataRequestHandler.factory
|
||||
|
||||
#######
|
||||
# EC2 #
|
||||
#######
|
||||
|
||||
[composite:ec2]
|
||||
use = egg:Paste#urlmap
|
||||
/services/Cloud: ec2cloud
|
||||
|
||||
[composite:ec2cloud]
|
||||
use = call:nova.api.auth:pipeline_factory
|
||||
noauth = ec2faultwrap logrequest ec2noauth cloudrequest validator ec2executor
|
||||
keystone = ec2faultwrap logrequest ec2keystoneauth cloudrequest validator ec2executor
|
||||
|
||||
[filter:ec2faultwrap]
|
||||
paste.filter_factory = nova.api.ec2:FaultWrapper.factory
|
||||
|
||||
[filter:logrequest]
|
||||
paste.filter_factory = nova.api.ec2:RequestLogging.factory
|
||||
|
||||
[filter:ec2lockout]
|
||||
paste.filter_factory = nova.api.ec2:Lockout.factory
|
||||
|
||||
[filter:ec2keystoneauth]
|
||||
paste.filter_factory = nova.api.ec2:EC2KeystoneAuth.factory
|
||||
|
||||
[filter:ec2noauth]
|
||||
paste.filter_factory = nova.api.ec2:NoAuth.factory
|
||||
|
||||
[filter:cloudrequest]
|
||||
controller = nova.api.ec2.cloud.CloudController
|
||||
paste.filter_factory = nova.api.ec2:Requestify.factory
|
||||
|
||||
[filter:authorizer]
|
||||
paste.filter_factory = nova.api.ec2:Authorizer.factory
|
||||
|
||||
[filter:validator]
|
||||
paste.filter_factory = nova.api.ec2:Validator.factory
|
||||
|
||||
[app:ec2executor]
|
||||
paste.app_factory = nova.api.ec2:Executor.factory
|
||||
|
||||
#############
|
||||
# Openstack #
|
||||
#############
|
||||
|
||||
[composite:osapi_compute]
|
||||
use = call:nova.api.openstack.urlmap:urlmap_factory
|
||||
/: oscomputeversions
|
||||
/v1.1: openstack_compute_api_v2
|
||||
/v2: openstack_compute_api_v2
|
||||
|
||||
[composite:openstack_compute_api_v2]
|
||||
use = call:nova.api.auth:pipeline_factory
|
||||
noauth = faultwrap sizelimit noauth ratelimit osapi_compute_app_v2
|
||||
keystone = faultwrap sizelimit authtoken keystonecontext ratelimit osapi_compute_app_v2
|
||||
keystone_nolimit = faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v2
|
||||
|
||||
[filter:faultwrap]
|
||||
paste.filter_factory = nova.api.openstack:FaultWrapper.factory
|
||||
|
||||
[filter:noauth]
|
||||
paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory
|
||||
|
||||
[filter:ratelimit]
|
||||
paste.filter_factory = nova.api.openstack.compute.limits:RateLimitingMiddleware.factory
|
||||
|
||||
[filter:sizelimit]
|
||||
paste.filter_factory = nova.api.sizelimit:RequestBodySizeLimiter.factory
|
||||
|
||||
[app:osapi_compute_app_v2]
|
||||
paste.app_factory = nova.api.openstack.compute:APIRouter.factory
|
||||
|
||||
[pipeline:oscomputeversions]
|
||||
pipeline = faultwrap oscomputeversionapp
|
||||
|
||||
[app:oscomputeversionapp]
|
||||
paste.app_factory = nova.api.openstack.compute.versions:Versions.factory
|
||||
|
||||
##########
|
||||
# Shared #
|
||||
##########
|
||||
|
||||
[filter:keystonecontext]
|
||||
paste.filter_factory = nova.api.auth:NovaKeystoneContext.factory
|
||||
|
||||
[filter:authtoken]
|
||||
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
|
||||
auth_host = {{keystone.host}}
|
||||
auth_port = 35357
|
||||
admin_password = {{service-password}}
|
||||
auth_protocol = http
|
||||
admin_tenant_name = service
|
||||
admin_user = nova
|
||||
signing_dir = /tmp/keystone-signing-nova
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user