97c01e48ed
Currently we have all our elements and library files in a top-level directory and install them into <root>/share/diskimage-builder/[elements|lib] (where root is either / or the root of a virtualenv). The problem with this is that editable/development installs (pip -e) do *not* install data_files. Thus we have no canonical location to look for elements -- leading to the various odd things we do such as a whole bunch of guessing at the top of disk-image-create and having a special test-loader in tests/test_elements.py so we can run python unit tests on those elements that have it. data_files is really the wrong thing to use for what are essentially assets of the program. data_files install works well for things like config-files, init.d files or dropping documentation files. By moving the elements under the diskimage_builder package, we always know where they are relative to where we import from. In fact, pkg_resources has an api for this which we wrap in the new diskimage_builder/paths.py helper [1]. We use this helper to find the correct path in the couple of places we need to find the base-elements dir, and for the paths to import the library shell functions. Elements such as svc-map and pkg-map include python unit-tests, which we do not need tests/test_elements.py to special-case load any more. They just get found automatically by the normal subunit loader. I have a follow-on change (I69ca3d26fede0506a6353c077c69f735c8d84d28) to move disk-image-create to a regular python entry-point. Unfortunately, this has to move to work with setuptools. You'd think a symlink under diskimage_builder/[elements|lib] would work, but it doesn't. [1] this API handles stuff like getting files out of .zip archive modules, which we don't do. Essentially for us it's returning __file__. Change-Id: I5e3e3c97f385b1a4ff2031a161a55b231895df5b
105 lines
3.7 KiB
Plaintext
105 lines
3.7 KiB
Plaintext
echo "Starting HW Discovery"
|
|
|
|
function cpu_cores() {
|
|
hwinfo --cpu | grep -c "Hardware Class: cpu"
|
|
}
|
|
|
|
function ram() {
|
|
# XXX: /proc may not be the best place to get this from, but hwinfo reports weird values (e.g. "1GB + 512MB" on a test VM of mine)
|
|
_KB=$(grep MemTotal /proc/meminfo | awk '{ print $2 }')
|
|
echo "$((_KB * 1024)) bytes"
|
|
}
|
|
|
|
function pxe_mac() {
|
|
local bootif_re='BOOTIF=([^ ]+)' _mac
|
|
if [[ $(cat /proc/cmdline) =~ $bootif_re ]]; then
|
|
# If we were booted using pxelinux and its config file has the
|
|
# IPAPPEND 2 stanza under the entry we booted from, then pxelinux
|
|
# will have appended a BOOTIF argument to the kernel parameters telling
|
|
# us what MAC address we are booting with. From that, we can derive the
|
|
# boot interface with no problems.
|
|
_mac="${BASH_REMATCH[1]//-/:}"
|
|
_mac="${_mac#*:}"
|
|
elif [[ -d /sys/firmware/efi ]] && which efibootmgr &>/dev/null; then
|
|
# Likewise, if we booted via the network while running in UEFI mode, and
|
|
# efibootmgr is installed, we can determine the MAC address of the nic we
|
|
# booted from. It would be good to have code that can also do this using
|
|
# efivars or parsing the stuff under /sys/firmware/efi/efivars directly,
|
|
# but that is a trickier thing to do.
|
|
local -A boot_entries
|
|
local bootent_re='^Boot([0-9]{4})'
|
|
local efimac_re='MAC\(([0-9a-f]+)'
|
|
local k v current_bootent
|
|
while read line; do
|
|
k="${line%% *}"
|
|
v="${line#* }"
|
|
if [[ $k = BootCurrent:* ]]; then
|
|
current_bootent="${line##BootCurrent: }"
|
|
elif [[ $k =~ $bootent_re ]]; then
|
|
boot_entries["${BASH_REMATCH[1]}"]="$v"
|
|
fi
|
|
done < <(efibootmgr -v)
|
|
|
|
if [[ ${boot_entries["$current_bootent"]} =~ $efimac_re ]]; then
|
|
_mac=''
|
|
for o in 0 2 4 6 8 10; do
|
|
_mac+="${BASH_REMATCH[1]:$o:2}:"
|
|
done
|
|
_mac=${_mac%:}
|
|
fi
|
|
fi
|
|
if [[ ! $_mac ]]; then
|
|
# If none of the exact methods worked, fall back on the heuristic
|
|
# method and just return the mac addresses of all the interfaces
|
|
# that have a link. Hopefully whatever consumes this info is smarter
|
|
# than we are.
|
|
local _info1 _info2 _dev
|
|
_info1=$(hwinfo --network|grep -B2 "Link detected: yes"|grep -C1 "HW Address:")
|
|
_info2=$(echo "${_info1}"|awk '/Device File: (vlan*|br*)/{for(x=NR-2;x<=NR+2;x++)d[x];}{a[NR]=$0}END{for(i=1;i<=NR;i++)if(!(i in d))print a[i]}')
|
|
_dev=$(echo "${_info1}" | grep "Device File:"|awk -F':' {'print $2'}|tr -d ' ')
|
|
_mac=$(echo "${_info2}" | grep "HW Address:"|awk -F'ss:' {'print $2'}|tr -d ' ')
|
|
fi
|
|
echo $_mac
|
|
export HW_DISCOVERY_BOOT_IFACE="$_mac"
|
|
}
|
|
|
|
function disk() {
|
|
# XXX: This is returning only the first disk discovered, which is very likely to be insufficient on machines that present us with multiple disks
|
|
# XXX: This is likely reporting in TB, but the units are not guaranteed. Maybe convert to bytes?
|
|
lshw -C disk | grep size | awk -F'(' '{ print $2 }' | tr -d ')' | head -1
|
|
}
|
|
|
|
function raw_disk() {
|
|
hwinfo --disk
|
|
}
|
|
|
|
function raw_network() {
|
|
hwinfo --network
|
|
}
|
|
|
|
HW_DISCOVERY_OUTPUT=$(cat <<EOF
|
|
{
|
|
"cpu cores" : "$(cpu_cores)",
|
|
"disk size" : "$(disk)",
|
|
"ram size" : "$(ram)",
|
|
"pxe mac" : "$(pxe_mac)",
|
|
"extra data" : {
|
|
"raw disk" : "$(raw_disk | base64)",
|
|
"raw network" : "$(raw_network | base64)",
|
|
$_vendor_hwdiscovery_data
|
|
}
|
|
}
|
|
EOF
|
|
)
|
|
|
|
# Print the resulting JSON for debugging purposes
|
|
echo $HW_DISCOVERY_OUTPUT
|
|
|
|
# Now submit the JSON
|
|
HW_DISCOVERY_DATA=$(echo ${HW_DISCOVERY_OUTPUT} | base64)
|
|
HW_DISCOVERY_URL=$(get_kernel_parameter HW_DISCOVERY_URL)
|
|
wget --post-data "hwdiscovery=true&hwdiscovery_data=${HW_DISCOVERY_DATA}" ${HW_DISCOVERY_URL}
|
|
|
|
sleep 30
|
|
|