Compare commits

..

9 commits

Author SHA1 Message Date
pedro 490a8a2e47 postpone color customization to indefinite future 2024-12-12 01:37:44 +01:00
pedro 6e709bc9ed refactor docker
remove install deps in deploy script, centralize them
2024-12-12 01:33:12 +01:00
pedro fde7173ea5 pxe/Makefile: avoid the clean method
move qemu-system to install dependencies
2024-12-11 19:49:33 +01:00
Thomas Nahuel Rusiecki 58ee9237c8 docker support for iso build added 2024-12-11 19:42:25 +01:00
Thomas Nahuel Rusiecki f69082dbc8 cabj colors to boot menu 2024-12-11 19:42:25 +01:00
Thomas Nahuel Rusiecki 51efdeb7ca deleted obsolet line 2024-12-11 19:42:25 +01:00
Thomas Nahuel Rusiecki 449209cf3a added clean pxe method to makefile 2024-12-11 19:42:25 +01:00
Thomas Nahuel Rusiecki 6fb8ed0d14 edge case: bootloader dependency added 2024-12-11 19:42:25 +01:00
pedro db63a9a747 pxe: change server_ip in .env.example 2024-12-11 19:41:54 +01:00
7 changed files with 152 additions and 165 deletions

16
Dockerfile Normal file
View file

@ -0,0 +1,16 @@
FROM debian:bookworm-slim
# detect DOCKER_BUILD condition/situation in install script
ENV DOCKER_BUILD true
# pre install sudo
RUN apt update && apt install sudo && rm -rf /var/lib/apt/lists/*
# Install dependencies
COPY ./install-dependencies.sh /
RUN /install-dependencies.sh \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /opt/workbench-script
ENTRYPOINT sh ./deploy-workbench.sh

View file

@ -197,7 +197,15 @@ create_persistence_partition() {
tmp_rw_mount="/tmp/${rw_img_name}"
${SUDO} umount -f -l "${tmp_rw_mount}" >/dev/null 2>&1 || true
mkdir -p "${tmp_rw_mount}"
${SUDO} mount "$(pwd)/${rw_img_path}" "${tmp_rw_mount}"
# detect relative path, else absolute path
# TODO solve this situation better
# thanks https://unix.stackexchange.com/questions/256434/check-if-shell-variable-contains-an-absolute-path
if [ "${rw_img_path}" = "${rw_img_path#/}" ]; then
mount_rw_img_path="$(pwd)/${rw_img_path}"
else
mount_rw_img_path="${rw_img_path}"
fi
${SUDO} mount "${mount_rw_img_path}" "${tmp_rw_mount}"
${SUDO} mkdir -p "${tmp_rw_mount}"
if [ ! -f "settings.ini" ]; then
${SUDO} cp -v settings.ini.example settings.ini
@ -324,13 +332,11 @@ END
echo 'Install requirements'
# Install debian requirements
# TODO converge more here with install-dependencies.sh
apt-get install -y --no-install-recommends \
sudo locales keyboard-configuration console-setup qrencode \
python-is-python3 python3 python3-dev python3-pip pipenv \
dmidecode smartmontools hwinfo pciutils lshw nfs-common inxi < /dev/null
# Install lshw B02.19 utility using backports (DEPRECATED in Debian 12)
#apt install -y -t ${VERSION_CODENAME}-backports lshw < /dev/null
dmidecode smartmontools hwinfo pciutils lshw nfs-common < /dev/null
echo 'Install sanitize requirements'
@ -432,8 +438,10 @@ if [ -z "${DEBUG:-}" ]; then
fi
# cleanup bash history
history -c
# https://stackoverflow.com/questions/3199893/howto-detect-bash-from-shell-script
if [ "\${BASH_VERSION}" ]; then
history -c
fi
CHROOT
}
@ -474,31 +482,6 @@ prepare_chroot_env() {
prepare_app
}
# thanks https://willhaley.com/blog/custom-debian-live-environment/
install_requirements() {
# Install requirements
eval "${decide_if_update_str}" && decide_if_update
image_deps='debootstrap
squashfs-tools
xorriso
mtools
dosfstools'
# secureboot:
# -> extra src https://wiki.debian.org/SecureBoot/
# -> extra src https://wiki.debian.org/SecureBoot/VirtualMachine
# -> extra src https://wiki.debian.org/GrubEFIReinstall
bootloader_deps='isolinux
syslinux-efi
grub-pc-bin
grub-efi-amd64-bin
ovmf
grub-efi-amd64-signed'
${SUDO} apt-get install -y \
${image_deps} \
${bootloader_deps}
}
# thanks https://willhaley.com/blog/custom-debian-live-environment/
create_base_dirs() {
mkdir -p "${ISO_PATH}"
@ -523,7 +506,7 @@ detect_user() {
echo "ERROR: this script needs root or sudo permissions (current user is not part of sudo group)"
exit 1
# detect user with sudo or already on sudo src https://serverfault.com/questions/568627/can-a-program-tell-it-is-being-run-under-sudo/568628#568628
elif [ ! "\${userid}" = 0 ] || [ -n "\${SUDO_USER}" ]; then
elif [ ! "\${userid}" = 0 ] || [ -n "\${SUDO_USER:-}" ]; then
SUDO='sudo'
# jump to current dir where the script is so relative links work
cd "\$(dirname "\${0}")"
@ -532,7 +515,7 @@ detect_user() {
# detect pure root
elif [ "\${userid}" = 0 ]; then
SUDO=''
ISO_PATH="/opt/workbench"
ISO_PATH="/opt/workbench-script/iso"
fi
}
END
@ -553,7 +536,7 @@ main() {
create_base_dirs
install_requirements
echo 'Assuming that you already executed ./install-dependencies.sh'
prepare_chroot_env

13
docker-compose.yaml Normal file
View file

@ -0,0 +1,13 @@
services:
build-iso:
init: true
build: .
# this is needed to mount inside docker
privileged: true
# uncomment next two lines to test this
environment:
- DEBUG=true
volumes:
- .:/opt/workbench-script:ro
- ./iso:/opt/workbench-script/iso:rw

View file

@ -1,6 +1,6 @@
#!/bin/sh
# Copyright (c) 2024 Pedro <copyright@cas.cat>
# Copyright (c) 2024 pangea.org Associació Pangea - Coordinadora Comunicació per a la Cooperació
# SPDX-License-Identifier: AGPL-3.0-or-later
set -e
@ -9,7 +9,52 @@ set -u
set -x
main() {
sudo apt install qrencode smartmontools lshw hwinfo dmidecode inxi
sudo apt update
# system dependencies
host_deps='sudo'
# thanks https://stackoverflow.com/questions/23513045/how-to-check-if-a-process-is-running-inside-docker-container
if [ ! "${DOCKER_BUILD}" ]; then
host_deps="${host_deps} qemu-system"
fi
# workbench deploy/builder image dependencies
image_deps='debootstrap
squashfs-tools
xorriso
mtools
dosfstools'
# workbench deploy/builder bootloader dependencies
# thanks https://willhaley.com/blog/custom-debian-live-environment/
# secureboot:
# -> extra src https://wiki.debian.org/SecureBoot/
# -> extra src https://wiki.debian.org/SecureBoot/VirtualMachine
# -> extra src https://wiki.debian.org/GrubEFIReinstall
bootloader_deps='isolinux
syslinux-efi
syslinux-common
grub-pc-bin
grub-efi-amd64-bin
ovmf
shim-signed
grub-efi-amd64-signed'
# workbench-script client dependencies
client_deps='smartmontools
lshw
hwinfo
dmidecode
inxi
python3
pipenv'
# install all
sudo apt install --no-install-recommends -y \
${host_deps} \
${image_deps} \
${bootloader_deps} \
${client_deps}
}
main "${@}"

View file

@ -1,2 +1,8 @@
.PHONY: test_pxe
test_pxe:
qemu-system-x86_64 -m 1G -boot n -netdev user,id=mynet0,tftp=/srv/pxe-tftp,bootfile=pxelinux.0 -device virtio-net,netdev=mynet0
# TODO not very convinced on having this, but ok right now
.PHONY: install_pxe_debug
install_pxe_debug:
DEBUG=true ./install-pxe.sh

View file

@ -4,10 +4,6 @@ url = http://localhost:8000/api/v1/snapshot/
# sample token that works with default deployment such as the previous two urls
token = 5018dd65-9abd-4a62-8896-80f34ac66150
# Idhub
# wb_sign_token = "27de6ad7-cee2-4fe8-84d4-c7eea9c969c8"
# url_wallet = "http://localhost"
# path = /path/to/save
# device = your_device_name
# # erase = basic

View file

@ -16,17 +16,21 @@ import logging
from datetime import datetime
SNAPSHOT_BASE = {
'timestamp': str(datetime.now()),
'type': 'Snapshot',
'uuid': str(uuid.uuid4()),
'software': "workbench-script",
'version': "0.0.1",
'operator_id': "",
'data': {},
'erase': []
}
## Legacy Functions ##
def convert_to_legacy_snapshot(snapshot):
snapshot["sid"] = str(uuid.uuid4()).split("-")[0]
snapshot["software"] = "workbench-script"
snapshot["version"] = "dev"
snapshot["schema_api"] = "1.0.0"
snapshot["settings_version"] = "No Settings Version (NaN)"
snapshot["timestamp"] = snapshot["timestamp"].replace(" ", "T")
snapshot["data"]["smart"] = snapshot["data"]["disks"]
snapshot["data"]["lshw"] = json.loads(snapshot["data"]["lshw"])
snapshot["data"].pop("disks")
snapshot.pop("erase")
## End Legacy Functions ##
## Utility Functions ##
@ -46,7 +50,6 @@ def exec_cmd(cmd):
logger.info(_('Running command `%s`'), cmd)
return os.popen(cmd).read()
@logs
def exec_cmd_erase(cmd):
logger.info(_('Running command `%s`'), cmd)
@ -56,33 +59,15 @@ def exec_cmd_erase(cmd):
## End Utility functions ##
## Legacy Functions ##
def convert_to_legacy_snapshot(snapshot):
snapshot["sid"] = str(uuid.uuid4()).split("-")[1]
snapshot["software"] = "workbench-script"
snapshot["version"] = "dev"
snapshot["schema_api"] = "1.0.0"
snapshot["settings_version"] = "No Settings Version (NaN)"
snapshot["timestamp"] = snapshot["timestamp"].replace(" ", "T")
snapshot["data"]["smart"] = json.loads(snapshot["data"]["smartctl"])
snapshot["data"].pop("smartctl")
snapshot["data"].pop("inxi")
snapshot.pop("operator_id")
snapshot.pop("erase")
lshw = 'sudo lshw -json'
hwinfo = 'sudo hwinfo --reallyall'
lspci = 'sudo lspci -vv'
data = {
'lshw': exec_cmd(lshw) or "{}",
'hwinfo': exec_cmd(hwinfo),
'lspci': exec_cmd(lspci)
}
snapshot['data'].update(data)
## End Legacy Functions ##
SNAPSHOT_BASE = {
'timestamp': str(datetime.now()),
'type': 'Snapshot',
'uuid': str(uuid.uuid4()),
'software': "workbench-script",
'version': "0.0.1",
'data': {},
'erase': []
}
## Command Functions ##
@ -252,7 +237,7 @@ def smartctl(all_disks, disk=None):
data = exec_smart(disk['name'])
data_list.append(data)
return json.dumps(data_list)
return data_list
## End Command Functions ##
@ -260,13 +245,16 @@ def smartctl(all_disks, disk=None):
# TODO permitir selección
# TODO permitir que vaya más rápido
def get_data(all_disks):
lshw = 'sudo lshw -json'
hwinfo = 'sudo hwinfo --reallyall'
dmidecode = 'sudo dmidecode'
inxi = "sudo inxi -afmnGEMABD -x 3 --edid --output json --output-file print"
lspci = 'sudo lspci -vv'
data = {
'smartctl': smartctl(all_disks),
'lshw': exec_cmd(lshw) or "{}",
'disks': smartctl(all_disks),
'hwinfo': exec_cmd(hwinfo),
'dmidecode': exec_cmd(dmidecode),
'inxi': exec_cmd(inxi)
'lspci': exec_cmd(lspci)
}
return data
@ -278,20 +266,20 @@ def gen_snapshot(all_disks):
return snapshot
def save_snapshot_in_disk(snapshot, path, snap_uuid):
def save_snapshot_in_disk(snapshot, path):
snapshot_path = os.path.join(path, 'snapshots')
filename = "{}/{}_{}.json".format(
snapshot_path,
datetime.now().strftime("%Y%m%d-%H_%M_%S"),
snap_uuid)
snapshot['uuid'])
try:
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
logger.info(_("Created snapshots directory at '%s'"), snapshot_path)
with open(filename, "w") as f:
f.write(snapshot)
f.write(json.dumps(snapshot))
logger.info(_("Snapshot written in path '%s'"), filename)
except Exception as e:
try:
@ -299,59 +287,18 @@ def save_snapshot_in_disk(snapshot, path, snap_uuid):
fallback_filename = "{}/{}_{}.json".format(
path,
datetime.now().strftime("%Y%m%d-%H_%M_%S"),
snap_uuid)
snapshot['uuid'])
with open(fallback_filename, "w") as f:
f.write(snapshot)
f.write(json.dumps(snapshot))
logger.warning(_("Snapshot written in fallback path '%s'"), fallback_filename)
except Exception as e:
logger.error(_("Could not save snapshot locally. Reason: Failed to write in fallback path:\n %s"), e)
def send_to_sign_credential(snapshot, token, url):
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json"
}
try:
cred = {
"type": "DeviceSnapshotV1",
"save": False,
"data": {
"operator_id": snapshot["operator_id"],
"dmidecode": snapshot["data"]["dmidecode"],
"inxi": snapshot["data"]["inxi"],
"smartctl": snapshot["data"]["smartctl"],
"uuid": snapshot["uuid"],
}
}
data = json.dumps(cred).encode('utf-8')
request = urllib.request.Request(url, data=data, headers=headers)
with urllib.request.urlopen(request) as response:
status_code = response.getcode()
response_text = response.read().decode('utf-8')
if 200 <= status_code < 300:
logger.info(_("Credential successfully signed"))
res = json.loads(response_text)
if res.get("status") == "success" and res.get("data"):
return res["data"]
return json.dumps(snapshot)
else:
logger.error(_("Credential cannot signed in '%s'"), url)
return json.dumps(snapshot)
except Exception as e:
logger.error(_("Credential not remotely builded to URL '%s'. Do you have internet? Is your server up & running? Is the url token authorized?\n %s"), url, e)
return json.dumps(snapshot)
# TODO sanitize url, if url is like this, it fails
# url = 'http://127.0.0.1:8000/api/snapshot/'
def send_snapshot_to_devicehub(snapshot, token, url, ev_uuid, legacy):
def send_snapshot_to_devicehub(snapshot, token, url, legacy):
url_components = urllib.parse.urlparse(url)
ev_path = f"evidence/{ev_uuid}"
ev_path = "evidence/{}".format(snapshot["uuid"])
components = (url_components.scheme, url_components.netloc, ev_path, '', '', '')
ev_url = urllib.parse.urlunparse(components)
# apt install qrencode
@ -361,7 +308,7 @@ def send_snapshot_to_devicehub(snapshot, token, url, ev_uuid, legacy):
"Content-Type": "application/json"
}
try:
data = snapshot.encode('utf-8')
data = json.dumps(snapshot).encode('utf-8')
request = urllib.request.Request(url, data=data, headers=headers)
with urllib.request.urlopen(request) as response:
status_code = response.getcode()
@ -388,10 +335,15 @@ def send_snapshot_to_devicehub(snapshot, token, url, ev_uuid, legacy):
print(exec_cmd(qr))
print(f"url: {ev_url}")
else:
logger.error(_("Snapshot %s not remotely sent to URL '%s'. Server responded with error:\n %s"), ev_uuid, url, response_text)
logger.error(_("Snapshot %s could not be sent to URL '%s'"), snapshot["uuid"], url)
# TODO review all the try-except thing here; maybe the try inside legacy does not make sense anymore
except urllib.error.HTTPError as e:
error_details = e.read().decode('utf-8') # Get the error response body
logger.error(_("Snapshot %s not remotely sent to URL '%s'. Server responded with error:\n %s"),
snapshot["uuid"], url, error_details)
except Exception as e:
logger.error(_("Snapshot not remotely sent to URL '%s'. Do you have internet? Is your server up & running? Is the url token authorized?\n %s"), url, e)
logger.error(_("Snapshot %s not remotely sent to URL '%s'. Do you have internet? Is your server up & running? Is the url token authorized?\n %s"), snapshot["uuid"], url, e)
def load_config(config_file="settings.ini"):
@ -413,12 +365,10 @@ def load_config(config_file="settings.ini"):
device = config.get('settings', 'device', fallback=None)
erase = config.get('settings', 'erase', fallback=None)
legacy = config.get('settings', 'legacy', fallback=None)
url_wallet = config.get('settings', 'url_wallet', fallback=None)
wb_sign_token = config.get('settings', 'wb_sign_token', fallback=None)
else:
logger.error(_("Config file '%s' not found. Using default values."), config_file)
path = os.path.join(os.getcwd())
url, token, device, erase, legacy, url_wallet, wb_sign_token = (None,)*7
url, token, device, erase, legacy = None, None, None, None, None
return {
'path': path,
@ -426,9 +376,7 @@ def load_config(config_file="settings.ini"):
'token': token,
'device': device,
'erase': erase,
'legacy': legacy,
'wb_sign_token': wb_sign_token,
'url_wallet': url_wallet
'legacy': legacy
}
def parse_args():
@ -491,39 +439,19 @@ def main():
all_disks = get_disks()
snapshot = gen_snapshot(all_disks)
snap_uuid = snapshot["uuid"]
if config['erase'] and config['device'] and not legacy:
if config['erase'] and config['device'] and not config.get("legacy"):
snapshot['erase'] = gen_erase(all_disks, config['erase'], user_disk=config['device'])
elif config['erase'] and not legacy:
elif config['erase'] and not config.get("legacy"):
snapshot['erase'] = gen_erase(all_disks, config['erase'])
if legacy:
convert_to_legacy_snapshot(snapshot)
snapshot = json.dumps(snapshot)
else:
url_wallet = config.get("url_wallet")
wb_sign_token = config.get("wb_sign_token")
if wb_sign_token:
tk = wb_sign_token.encode("utf8")
snapshot["operator_id"] = hashlib.sha3_256(tk).hexdigest()
if url_wallet and wb_sign_token:
snapshot = send_to_sign_credential(snapshot, wb_sign_token, url_wallet)
else:
snapshot = json.dumps(snapshot)
save_snapshot_in_disk(snapshot, config['path'], snap_uuid)
save_snapshot_in_disk(snapshot, config['path'])
if config['url']:
send_snapshot_to_devicehub(
snapshot,
config['token'],
config['url'],
snap_uuid,
legacy
)
send_snapshot_to_devicehub(snapshot, config['token'], config['url'], legacy)
logger.info(_("END"))