36b59c001c
There is a wide variety of tracing options through the various shell scripts. Some use "set -eux", others explicity set xtrace and others do nothing. There is a "-x" option to bin/disk-image-create but it doesn't flow down to the many scripts it calls. This adds a global integer variable set by disk-image-create DIB_DEBUG_TRACE. All scripts have a stanza added to detect this and turn on tracing. Any other tracing methods are rolled into this. So the standard header is --- if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then set -x fi set -eu set -o pipefail --- Multiple -x options can be specified to dib-create-image, which increases the value of DIB_DEBUG_TRACE. If script authors feel their script should only trace at higher levels, they should modify the "-gt" value. If they feel it should trace by default, they can modify the default value also. Changes to pachset 16 : scripts which currently trace themselves by default have retained this behaviour with DIB_DEBUG_TRACE defaulting to "1". This was done by running [1] on patch set 15. See the thread beginning at [2] dib-lint is also updated to look for the variable being matched. [1] https://gist.github.com/ianw/71bbda9e6acc74ccd0fd [2] http://lists.openstack.org/pipermail/openstack-dev/2014-November/051575.html Change-Id: I6c5a962260741dcf6f89da9a33b96372a719b7b0
65 lines
2.5 KiB
Bash
Executable File
65 lines
2.5 KiB
Bash
Executable File
#!/bin/bash
|
|
|
|
if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then
|
|
set -x
|
|
fi
|
|
set -eu
|
|
set -o pipefail
|
|
|
|
[ -n "$ARCH" ]
|
|
[ -n "$TARGET_ROOT" ]
|
|
|
|
if [ 'amd64' = "$ARCH" ] ; then
|
|
ARCH="x86_64"
|
|
fi
|
|
|
|
DIB_RELEASE=${DIB_RELEASE:-"6.5-20140603.0"}
|
|
DIB_CLOUD_IMAGES=${DIB_CLOUD_IMAGES:-http://rhn.redhat.com}
|
|
BASE_IMAGE_FILE=${BASE_IMAGE_FILE:-rhel-guest-image-$DIB_RELEASE.x86_64.qcow2}
|
|
BASE_IMAGE_TAR=$DIB_RELEASE-rhel-server-$ARCH-latest.tgz
|
|
CACHED_TAR=$DIB_IMAGE_CACHE/$BASE_IMAGE_TAR
|
|
|
|
if [ -n "$DIB_OFFLINE" -a -f "$CACHED_TAR" ] ; then
|
|
echo "Not checking freshness of cached $CACHED_TAR."
|
|
else
|
|
echo "Fetching Base Image"
|
|
$TMP_HOOKS_PATH/bin/cache-url $DIB_CLOUD_IMAGES/$BASE_IMAGE_FILE $DIB_IMAGE_CACHE/$BASE_IMAGE_FILE
|
|
|
|
if [ ! -f $CACHED_TAR -o \
|
|
$DIB_IMAGE_CACHE/$BASE_IMAGE_FILE -nt $CACHED_TAR ] ; then
|
|
echo "Repacking base image as tarball."
|
|
WORKING=$(mktemp -d)
|
|
EACTION="rm -r $WORKING"
|
|
trap "$EACTION" EXIT
|
|
RAW_FILE=$(basename $BASE_IMAGE_FILE)
|
|
RAW_FILE=${RAW_FILE%.qcow2}.raw
|
|
qemu-img convert -f qcow2 -O raw $DIB_IMAGE_CACHE/$BASE_IMAGE_FILE $WORKING/$RAW_FILE
|
|
MAGIC_BIT=p1
|
|
# NOTE: On RHEL, partprobe of /dev/loop0 does not create /dev/loop0p2,
|
|
# while kpartx at least creates /dev/mapper/loop0p2.
|
|
LOOPDEV=$(sudo kpartx -av $WORKING/$RAW_FILE | awk "/loop[0-9]+$MAGIC_BIT/ {print \$3}")
|
|
# If running inside Docker, make our nodes manually, because udev will not be working.
|
|
if [ -f /.dockerenv ]; then
|
|
sudo dmsetup --noudevsync mknodes
|
|
fi
|
|
export LOOPDEV=$LOOPDEV
|
|
echo "Loop device is set to: $LOOPDEV"
|
|
if ! timeout 5 sh -c "while ! [ -e /dev/mapper/$LOOPDEV ]; do sleep 1; done"; then
|
|
echo "Error: Could not find /dev/mapper/$LOOPDEV"
|
|
exit 1
|
|
fi
|
|
EACTION="sudo kpartx -d $WORKING/$RAW_FILE;$EACTION"
|
|
trap "$EACTION" EXIT
|
|
mkdir $WORKING/mnt
|
|
sudo mount /dev/mapper/$LOOPDEV $WORKING/mnt
|
|
EACTION="sudo umount -f $WORKING/mnt;$EACTION"
|
|
trap "$EACTION" EXIT
|
|
# Chroot in so that we get the correct uid/gid
|
|
sudo chroot $WORKING/mnt bin/tar -cz . > $WORKING/tmp.tar
|
|
mv $WORKING/tmp.tar $DIB_IMAGE_CACHE/$BASE_IMAGE_TAR
|
|
fi
|
|
fi
|
|
# Extract the base image (use --numeric-owner to avoid UID/GID mismatch between
|
|
# image tarball and host OS e.g. when building RHEL image on an openSUSE host)
|
|
sudo tar -C $TARGET_ROOT --numeric-owner -xzf $DIB_IMAGE_CACHE/$BASE_IMAGE_TAR
|