Compare commits

..

5 commits

Author SHA1 Message Date
Cayo Puigdefabregas 629a2a172e change EreuseWorkbench for workbench-script 2024-10-21 11:30:05 +02:00
pedro 9aa156628b prepare a first integration of usody-sanitize
in session with cayo we see this just works for Monday
2024-10-18 18:32:07 +02:00
Cayo Puigdefabregas efe028aea1 deactivate confirm 2024-10-18 13:05:44 +02:00
Cayo Puigdefabregas 731567b8ea use of sanitize in workbench-script 2024-10-18 12:16:00 +02:00
pedro be28ec2a1f move alternate erase functions to docs 2024-10-18 10:54:45 +02:00
13 changed files with 284 additions and 530 deletions

View file

@ -1,16 +0,0 @@
FROM debian:bookworm-slim
# detect DOCKER_BUILD condition/situation in install script
ENV DOCKER_BUILD true
# pre install sudo
RUN apt update && apt install sudo && rm -rf /var/lib/apt/lists/*
# Install dependencies
COPY ./install-dependencies.sh /
RUN /install-dependencies.sh \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /opt/workbench-script
ENTRYPOINT sh ./deploy-workbench.sh

View file

@ -44,14 +44,20 @@ boot_iso_uefi_secureboot:
-drive file=deploy/iso/workbench_debug.iso,cache=none,if=virtio,format=raw,index=0,media=disk \
-boot menu=on
# when you change something, you need to refresh it this way
regenerate_pxe_install:
./deploy-workbench.sh
pxe/install-pxe.sh
test_usody_sanitize:
# TODO adapt settings accordingly for this test
# ERASE=y ./deploy-workbench.sh
# create 3 disks for testing
qemu-img create -f raw test_sanitize_disk1.img 1G
qemu-img create -f raw test_sanitize_disk2.img 1G
qemu-img create -f raw test_sanitize_disk3.img 1G
sudo qemu-system-x86_64 \
-enable-kvm -m 2G -vga qxl -netdev user,id=wan -device virtio-net,netdev=wan,id=nic1 \
-drive format=raw,file=iso/workbench_debug.iso,cache=none,if=virtio \
-drive format=raw,file=test_sanitize_disk1.img,cache=none,if=virtio \
-drive format=raw,file=test_sanitize_disk2.img,cache=none,if=virtio \
-drive format=raw,file=test_sanitize_disk3.img,cache=none,if=virtio
es_gen:
$(MAKE) es_gen_po
$(MAKE) es_gen_mo
es_gen_po:
cp locale/es/LC_MESSAGES/messages.po locale/es/LC_MESSAGES/messages.pot.bak

View file

@ -197,22 +197,14 @@ create_persistence_partition() {
tmp_rw_mount="/tmp/${rw_img_name}"
${SUDO} umount -f -l "${tmp_rw_mount}" >/dev/null 2>&1 || true
mkdir -p "${tmp_rw_mount}"
# detect relative path, else absolute path
# TODO solve this situation better
# thanks https://unix.stackexchange.com/questions/256434/check-if-shell-variable-contains-an-absolute-path
if [ "${rw_img_path}" = "${rw_img_path#/}" ]; then
mount_rw_img_path="$(pwd)/${rw_img_path}"
else
mount_rw_img_path="${rw_img_path}"
fi
${SUDO} mount "${mount_rw_img_path}" "${tmp_rw_mount}"
${SUDO} mount "$(pwd)/${rw_img_path}" "${tmp_rw_mount}"
${SUDO} mkdir -p "${tmp_rw_mount}"
if [ ! -f "settings.ini" ]; then
${SUDO} cp -v settings.ini.example settings.ini
echo "WARNING: settings.ini was not there, settings.ini.example was copied, this only happens once"
if [ -f "settings.ini" ]; then
${SUDO} cp -v settings.ini "${tmp_rw_mount}/settings.ini"
else
echo "ERROR: settings.ini does not exist yet, cannot read config from there. You can take inspiration with file settings.ini.example"
exit 1
fi
${SUDO} cp -v settings.ini "${tmp_rw_mount}/settings.ini"
${SUDO} umount "${tmp_rw_mount}"
uuid="$(blkid "${rw_img_path}" | awk '{ print $3; }')"
@ -261,27 +253,6 @@ END2
END
)"
# thanks https://wiki.debian.org/Keyboard
chroot_kbd_conf_str="$(cat<<END
chroot_kbd_conf() {
###################
# configure keyboard
cat > /etc/default/keyboard <<END2
# KEYBOARD CONFIGURATION FILE
# generated by deploy-workbench.sh
# Consult the keyboard(5) manual page.
XKBMODEL="pc105"
XKBLAYOUT="\${CUSTOM_LANG}"
BACKSPACE="guess"
END2
}
END
)"
prepare_app() {
# prepare app during prepare_chroot_env
workbench_dir="${ISO_PATH}/chroot/opt/workbench"
@ -293,6 +264,8 @@ prepare_app() {
# startup script execution
cat > "${ISO_PATH}/chroot/root/.profile" <<END
# pipx path for usody-sanitize
PATH="${PATH}:/root/.local/bin"
if [ -f /tmp/workbench_lock ]; then
return 0
else
@ -309,15 +282,13 @@ if [ "\${nfs_host}" ]; then
mount --bind /run/live/medium /mnt
# debian live nfs path is readonly, do a trick
# to make snapshots subdir readwrite
mount -v \${nfs_host}:/snapshots /run/live/medium/snapshots
mount \${nfs_host}:/snapshots /run/live/medium/snapshots
# reload mounts on systemd
systemctl daemon-reload
fi
# clearly specify the right working directory, used in the python script as os.getcwd()
cd /mnt
#pipenv run python /opt/workbench/workbench-script.py --config /mnt/settings.ini
# works meanwhile this project is vanilla python
python /opt/workbench/workbench-script.py --config /mnt/settings.ini
pipenv run python /opt/workbench/workbench-script.py --config /mnt/settings.ini
stty echo
set +x
@ -332,15 +303,19 @@ END
echo 'Install requirements'
# Install debian requirements
# TODO converge more here with install-dependencies.sh
apt-get install -y --no-install-recommends \
sudo locales keyboard-configuration console-setup qrencode \
sudo locales \
python-is-python3 python3 python3-dev python3-pip pipenv \
dmidecode smartmontools hwinfo pciutils lshw nfs-common inxi < /dev/null
dmidecode smartmontools hwinfo pciutils lshw nfs-common pipx < /dev/null
echo 'Install sanitize requirements'
pipx install usody-sanitize
# Install sanitize debian requirements
# Install lshw B02.19 utility using backports (DEPRECATED in Debian 12)
#apt install -y -t ${VERSION_CODENAME}-backports lshw < /dev/null
echo 'Install usody-sanitize requirements'
# Install usody-sanitize debian requirements
apt-get install -y --no-install-recommends \
hdparm nvme-cli < /dev/null
@ -389,15 +364,8 @@ ${install_app_str}
# thanks src https://serverfault.com/questions/362903/how-do-you-set-a-locale-non-interactively-on-debian-ubuntu
export LANG=${LANG}
export LC_ALL=${LANG}
echo "${MYLOCALE}" > /etc/locale.gen
# Generate the locale
locale-gen
# feeds /etc/default/locale for the shell env var
update-locale LANG=${LANG} LC_ALL=${LANG}
# this is a high level command that does locale-gen and update-locale altogether
# but it is too interactive
#dpkg-reconfigure --frontend=noninteractive locales
# DEBUG
dpkg-reconfigure --frontend=noninteractive locales
locale -a
# Autologin root user
@ -422,9 +390,6 @@ apt-get install -y --no-install-recommends \
< /dev/null
${chroot_netdns_conf_str}
CUSTOM_LANG=${CUSTOM_LANG}
${chroot_kbd_conf_str}
chroot_kbd_conf
# Set up root user
# this is the root password
@ -438,27 +403,13 @@ if [ -z "${DEBUG:-}" ]; then
fi
# cleanup bash history
# https://stackoverflow.com/questions/3199893/howto-detect-bash-from-shell-script
if [ "\${BASH_VERSION}" ]; then
history -c
fi
history -c
CHROOT
}
prepare_chroot_env() {
CUSTOM_LANG="${CUSTOM_LANG:-es}"
case "${CUSTOM_LANG}" in
es)
export LANG="es_ES.UTF-8"
export MYLOCALE="${LANG} UTF-8"
;;
en)
export LANG="en_US.UTF-8"
;;
*)
echo "ERROR: CUSTOM_LANG not supported. Available: es"
exit 1
esac
LANG="${CUSTOM_LANG:-es_ES.UTF-8}"
# version of debian the bootstrap is going to build
# if no VERSION_CODENAME is specified we assume that the bootstrap is going to
# be build with the same version of debian being executed because some files
@ -482,6 +433,30 @@ prepare_chroot_env() {
prepare_app
}
# thanks https://willhaley.com/blog/custom-debian-live-environment/
install_requirements() {
# Install requirements
eval "${decide_if_update_str}" && decide_if_update
image_deps='debootstrap
squashfs-tools
xorriso
mtools
dosfstools'
# secureboot:
# -> extra src https://wiki.debian.org/SecureBoot/
# -> extra src https://wiki.debian.org/SecureBoot/VirtualMachine
# -> extra src https://wiki.debian.org/GrubEFIReinstall
bootloader_deps='isolinux
syslinux-efi
grub-pc-bin
grub-efi-amd64-bin
ovmf
grub-efi-amd64-signed'
${SUDO} apt-get install -y \
${image_deps} \
${bootloader_deps}
}
# thanks https://willhaley.com/blog/custom-debian-live-environment/
create_base_dirs() {
mkdir -p "${ISO_PATH}"
@ -506,7 +481,7 @@ detect_user() {
echo "ERROR: this script needs root or sudo permissions (current user is not part of sudo group)"
exit 1
# detect user with sudo or already on sudo src https://serverfault.com/questions/568627/can-a-program-tell-it-is-being-run-under-sudo/568628#568628
elif [ ! "\${userid}" = 0 ] || [ -n "\${SUDO_USER:-}" ]; then
elif [ ! "\${userid}" = 0 ] || [ -n "\${SUDO_USER}" ]; then
SUDO='sudo'
# jump to current dir where the script is so relative links work
cd "\$(dirname "\${0}")"
@ -515,7 +490,7 @@ detect_user() {
# detect pure root
elif [ "\${userid}" = 0 ]; then
SUDO=''
ISO_PATH="/opt/workbench-script/iso"
ISO_PATH="/opt/workbench"
fi
}
END
@ -536,7 +511,7 @@ main() {
create_base_dirs
echo 'Assuming that you already executed ./install-dependencies.sh'
install_requirements
prepare_chroot_env

View file

@ -1,13 +0,0 @@
services:
build-iso:
init: true
build: .
# this is needed to mount inside docker
privileged: true
# uncomment next two lines to test this
environment:
- DEBUG=true
volumes:
- .:/opt/workbench-script:ro
- ./iso:/opt/workbench-script/iso:rw

112
docs/dev-es.md Normal file
View file

@ -0,0 +1,112 @@
## borrado minimalista
Un enfoque inicial que teníamos para el borrado de disco son las siguientes funciones, esto lo hemos descartado para usar una herramienta más avanzada en el borrado [usody-sanitize](https://github.com/usody/sanitize/)
```python
## Xavier Functions ##
def erase_basic(disk):
"""
Basic Erasure
https://tsapps.nist.gov/publication/get_pdf.cfm?pub_id=917935
Settings for basic data erasure using shred Linux command.
A software-based fast non-100%-secured way of erasing data storage.
Performs 1 pass overwriting one round using all zeros.
Compliant with NIST SP-800-8y8.
In settings appear:
WB_ERASE = EraseBasic
WB_ERASE_STEPS = 1
WB_ERASE_LEADING_ZEROS = False
"""
cmd = f'shred -vn 1 /dev/{disk}'
return [exec_cmd_erase(cmd)]
def erase_baseline(disk):
"""
Baseline Secure Erasure
Settings for advanced data erasure using badblocks Linux software.
A secured-way of erasing data storages, erase hidden areas,
checking the erase sector by sector.
Performs 1 pass overwriting each sector with zeros and a final verification.
Compliant with HMG Infosec Standard 5 Baseline.
In settings appear:
WB_ERASE = EraseSectors
WB_ERASE_STEPS = 1
WB_ERASE_LEADING_ZEROS = True
WB_ERASE_1_METHOD = EraseBasic
WB_ERASE_1_STEP_TYPE = 0
WB_ERASE_2_METHOD = EraseSectors
WB_ERASE_2_STEP_TYPE = 1
"""
result = []
cmd = f'shred -zvn 0 /dev/{disk}'
result.append(exec_cmd_erase(cmd))
cmd = f'badblocks -st random -w /dev/{disk}'
result.append(exec_cmd_erase(cmd))
return result
def erase_enhanced(disk):
"""
Enhanced Secure Erasure
Settings for advanced data erasure using badblocks Linux software.
A secured-way of erasing data storages, erase hidden areas,
checking the erase sector by sector.
Performs 3 passes overwriting every sector with zeros and ones,
and final verification. Compliant with HMG Infosec Standard 5 Enhanced.
In settings appear:
WB_ERASE = EraseSectors
WB_ERASE_LEADING_ZEROS = True
WB_ERASE_1_METHOD = EraseBasic
WB_ERASE_1_STEP_TYPE = 1
WB_ERASE_2_METHOD = EraseBasic
WB_ERASE_2_STEP_TYPE = 0
WB_ERASE_3_METHOD = EraseSectors
WB_ERASE_3_STEP_TYPE = 1
"""
result = []
cmd = f'shred -vn 1 /dev/{disk}'
result.append(exec_cmd_erase(cmd))
cmd = f'shred -zvn 0 /dev/{disk}'
result.append(exec_cmd_erase(cmd))
## creo que realmente seria asi (3 pases y una extra poniendo a ceros):
# shred -zvn 3 /def/{disk}
# tampoco estoy seguro que el badblocks haga un proceso de verificacion.
cmd = f'badblocks -st random -w /dev/{disk}'
result.append(exec_cmd_erase(cmd))
return result
## End Xavier Functions ##
## Erase Functions ##
def ata_secure_erase_null(disk):
cmd_baseline = f'hdparm --user-master u --security-erase NULL /dev/{disk}'
return [exec_cmd_erase(cmd_baseline)]
def ata_secure_erase_enhanced(disk):
cmd_enhanced = f'hdparm --user-master u --security-erase-enhanced /dev/{disk}'
return [exec_cmd_erase(cmd_enhanced)]
def nvme_secure_erase(disk):
cmd_encrypted = f'nvme format /dev/{disk} --ses=1'
return [exec_cmd_erase(cmd_encrypted)]
## End Erase Functions ##
```

View file

@ -1,6 +1,6 @@
#!/bin/sh
# Copyright (c) 2024 pangea.org Associació Pangea - Coordinadora Comunicació per a la Cooperació
# Copyright (c) 2024 Pedro <copyright@cas.cat>
# SPDX-License-Identifier: AGPL-3.0-or-later
set -e
@ -9,53 +9,7 @@ set -u
set -x
main() {
sudo apt update
# system dependencies
host_deps='sudo'
# thanks https://stackoverflow.com/questions/23513045/how-to-check-if-a-process-is-running-inside-docker-container
if [ ! "${DOCKER_BUILD}" ]; then
host_deps="${host_deps} qemu-system"
fi
# workbench deploy/builder image dependencies
image_deps='debootstrap
squashfs-tools
xorriso
mtools
dosfstools'
# workbench deploy/builder bootloader dependencies
# thanks https://willhaley.com/blog/custom-debian-live-environment/
# secureboot:
# -> extra src https://wiki.debian.org/SecureBoot/
# -> extra src https://wiki.debian.org/SecureBoot/VirtualMachine
# -> extra src https://wiki.debian.org/GrubEFIReinstall
bootloader_deps='isolinux
syslinux-efi
syslinux-common
grub-pc-bin
grub-efi-amd64-bin
ovmf
shim-signed
grub-efi-amd64-signed'
# workbench-script client dependencies
client_deps='smartmontools
lshw
hwinfo
dmidecode
inxi
python3
pipenv
qrencode'
# install all
sudo apt install --no-install-recommends -y \
${host_deps} \
${image_deps} \
${bootloader_deps} \
${client_deps}
sudo apt install smartmontools lshw hwinfo dmidecode
}
main "${@}"

Binary file not shown.

View file

@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: PACKAGE VERSION\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2024-11-08 18:25+0100\n"
"POT-Creation-Date: 2024-10-15 21:15+0200\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
@ -17,19 +17,19 @@ msgstr ""
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
#: workbench-script.py:49 workbench-script.py:54
#: workbench-script.py:48 workbench-script.py:53
msgid "Running command `%s`"
msgstr "Ejecutando comando `%s`"
#: workbench-script.py:279
#: workbench-script.py:284
msgid "Created snapshots directory at '%s'"
msgstr "Creado directorio de snapshots en '%s'"
#: workbench-script.py:282
#: workbench-script.py:287
msgid "Snapshot written in path '%s'"
msgstr "Snapshot escrito en ruta '%s'"
#: workbench-script.py:285
#: workbench-script.py:290
msgid ""
"Attempting to save file in actual path. Reason: Failed to write in snapshots "
"directory:\n"
@ -39,11 +39,11 @@ msgstr ""
"escribir en el directorio de snapshots:\n"
" %s."
#: workbench-script.py:292
#: workbench-script.py:297
msgid "Snapshot written in fallback path '%s'"
msgstr "Snapshot escrito en ruta alternativa '%s'"
#: workbench-script.py:294
#: workbench-script.py:299
msgid ""
"Could not save snapshot locally. Reason: Failed to write in fallback path:\n"
" %s"
@ -52,53 +52,49 @@ msgstr ""
"alternativa:\n"
" %s"
#: workbench-script.py:317
#: workbench-script.py:316
msgid "Snapshot successfully sent to '%s'"
msgstr "Snapshot enviado con éxito a '%s'"
#: workbench-script.py:335
msgid "Snapshot %s could not be sent to URL '%s'"
msgstr "Snapshot %s no se pudo enviar a la URL '%s'"
#: workbench-script.py:338
#: workbench-script.py:331
msgid ""
"Snapshot %s not remotely sent to URL '%s'. Do you have internet? Is your "
"server up & running? Is the url token authorized?\n"
"Snapshot not remotely sent to URL '%s'. Do you have internet? Is your server "
"up & running? Is the url token authorized?\n"
" %s"
msgstr ""
"Snapshot %s no enviado remotamente a la URL '%s'. Tienes internet? Está el "
"Snapshot no enviado remotamente a la URL '%s'. Tienes internet? Está el "
"servidor en marcha? Está autorizado el url token?\n"
" %s"
#: workbench-script.py:350
#: workbench-script.py:342
msgid "Found config file in path: %s."
msgstr "Encontrado fichero de configuración en ruta: %s."
#: workbench-script.py:361
#: workbench-script.py:353
msgid "Config file '%s' not found. Using default values."
msgstr ""
"Fichero de configuración '%s' no encontrado. Utilizando valores por defecto."
#: workbench-script.py:379
#: workbench-script.py:373
msgid "workbench-script.py [-h] [--config CONFIG]"
msgstr ""
#: workbench-script.py:380
#: workbench-script.py:374
msgid "Optional config loader for workbench."
msgstr "Cargador opcional de configuración para workbench"
#: workbench-script.py:383
#: workbench-script.py:377
msgid ""
"path to the config file. Defaults to 'settings.ini' in the current directory."
msgstr ""
"ruta al fichero de configuración. Por defecto es 'settings.ini' en el "
"directorio actual"
#: workbench-script.py:416
#: workbench-script.py:410
msgid "START"
msgstr "INICIO"
#: workbench-script.py:430
#: workbench-script.py:423
msgid ""
"This script must be run as root. Collected data will be incomplete or "
"unusable"
@ -106,6 +102,6 @@ msgstr ""
"Es conveniente que este script sea ejecutado como administrador (root). Los "
"datos recopilados serán incompletos o no usables."
#: workbench-script.py:448
#: workbench-script.py:441
msgid "END"
msgstr "FIN"

View file

@ -1,5 +1,4 @@
# assuming server_ip using qemu
server_ip=10.0.2.1
nfs_allowed_lan=10.0.2.0/24
server_ip=192.168.1.2
nfs_allowed_lan=192.168.1.0/24
tftp_path='/srv/pxe-tftp'
nfs_path='/srv/pxe-nfs'

View file

@ -1,8 +1,2 @@
.PHONY: test_pxe
test_pxe:
qemu-system-x86_64 -m 1G -boot n -netdev user,id=mynet0,tftp=/srv/pxe-tftp,bootfile=pxelinux.0 -device virtio-net,netdev=mynet0
# TODO not very convinced on having this, but ok right now
.PHONY: install_pxe_debug
install_pxe_debug:
DEBUG=true ./install-pxe.sh

View file

@ -1,6 +1,6 @@
#!/bin/sh
# Copyright (c) 2024 pangea.org Associació Pangea - Coordinadora Comunicació per a la Cooperació
# Copyright (c) 2024 Pedro <copyright@cas.cat>
# SPDX-License-Identifier: AGPL-3.0-or-later
set -e
@ -37,7 +37,7 @@ backup_file() {
if [ -f "${target}" ]; then
if ! grep -q 'we should do a backup' "${target}"; then
${SUDO} cp -v -a "${target}" "${target}-bak_${ts}"
${SUDO} cp -a "${target}" "${target}-bak_${ts}"
fi
fi
}
@ -69,14 +69,14 @@ END
# reload nfs exports
${SUDO} exportfs -vra
if [ ! -f ./settings.ini ]; then
cp -v ./settings.ini.example ./settings.ini
echo "WARNING: settings.ini was not there, settings.ini.example was copied, this only happens once"
fi
if [ ! -f "${nfs_path}/settings.ini" ]; then
${SUDO} cp -v settings.ini "${nfs_path}/settings.ini"
echo "WARNING: ${nfs_path}/settings.ini was not there, ./settings.ini was copied, this only happens once"
if [ -f "settings.ini" ]; then
${SUDO} cp settings.ini "${nfs_path}/settings.ini"
else
echo "ERROR: $(pwd)/settings.ini does not exist yet, cannot read config from there. You can take inspiration with file $(pwd)/settings.ini.example"
exit 1
fi
fi
}
@ -93,7 +93,6 @@ pxe-service=x86PC,"Network Boot",pxelinux
enable-tftp
tftp-root=${tftp_path}
END
sudo systemctl restart dnsmasq || true
}
install_netboot() {
@ -111,12 +110,8 @@ install_netboot() {
${SUDO} cp -fv "${PXE_DIR}/../iso/staging/live/vmlinuz" "${tftp_path}/"
${SUDO} cp -fv "${PXE_DIR}/../iso/staging/live/initrd" "${tftp_path}/"
${SUDO} cp -v /usr/lib/syslinux/memdisk "${tftp_path}/"
${SUDO} cp -v /usr/lib/syslinux/modules/bios/* "${tftp_path}/"
if [ ! -f ./pxe-menu.cfg ]; then
${SUDO} cp -v ./pxe-menu.cfg.example pxe-menu.cfg
echo "WARNING: pxe-menu.cfg was not there, pxe-menu.cfg.example was copied, this only happens once"
fi
${SUDO} cp /usr/lib/syslinux/memdisk "${tftp_path}/"
${SUDO} cp /usr/lib/syslinux/modules/bios/* "${tftp_path}/"
envsubst < ./pxe-menu.cfg | ${SUDO} tee "${tftp_path}/pxelinux.cfg/default"
fi
@ -133,11 +128,11 @@ init_config() {
PXE_DIR="$(pwd)"
if [ ! -f ./.env ]; then
cp -v ./.env.example ./.env
echo "WARNING: .env was not there, .env.example was copied, this only happens once"
if [ -f ./.env ]; then
. ./.env
else
echo "PXE: WARNING: $(pwd)/.env does not exist yet, cannot read config from there. You can take inspiration with file $(pwd)/.env.example"
fi
. ./.env
VERSION_CODENAME="${VERSION_CODENAME:-bookworm}"
tftp_path="${tftp_path:-/srv/pxe-tftp}"
# vars used in envsubst require to be exported:

View file

@ -1,17 +1,7 @@
[settings]
url = http://localhost:8000/api/v1/snapshot/
#url = https://demo.ereuse.org/api/v1/snapshot/
# sample token that works with default deployment such as the previous two urls
token = 5018dd65-9abd-4a62-8896-80f34ac66150
# Idhub
# wb_sign_token = 27de6ad7-cee2-4fe8-84d4-c7eea9c969c8
# url_wallet = http://localhost/webhook/sign/
# useful for development
# disable_qr = False
url = http://localhost:8000/api/snapshot/
token = '1234'
# path = /path/to/save
# device = your_device_name
# # erase = basic
# legacy = True
# legacy = true

View file

@ -1,15 +1,11 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2024 pangea.org Associació Pangea - Coordinadora Comunicació per a la Cooperació
# SPDX-License-Identifier: AGPL-3.0-or-later
import os
import json
import uuid
import hashlib
import argparse
import configparser
import urllib.parse
import urllib.request
import gettext
@ -19,17 +15,20 @@ import logging
from datetime import datetime
SNAPSHOT_BASE = {
'timestamp': str(datetime.now()),
'type': 'Snapshot',
'uuid': str(uuid.uuid4()),
'software': "workbench-script",
'version': "0.0.1",
'operator_id': "",
'data': {},
'erase': []
}
## Legacy Functions ##
def convert_to_legacy_snapshot(snapshot):
snapshot["sid"] = str(uuid.uuid4()).split("-")[0]
snapshot["software"] = "workbench-script"
snapshot["version"] = "dev"
snapshot["schema_api"] = "1.0.0"
snapshot["settings_version"] = "No Settings Version (NaN)"
snapshot["timestamp"] = snapshot["timestamp"].replace(" ", "T")
snapshot["data"]["smart"] = snapshot["data"]["disks"]
snapshot["data"].pop("disks")
snapshot.pop("code")
snapshot.pop("erase")
## End Legacy Functions ##
## Utility Functions ##
@ -49,151 +48,32 @@ def exec_cmd(cmd):
logger.info(_('Running command `%s`'), cmd)
return os.popen(cmd).read()
@logs
def exec_cmd_erase(cmd):
logger.info(_('Running command `%s`'), cmd)
return ''
# return os.popen(cmd).read()
def gen_code():
uid = str(uuid.uuid4()).encode('utf-8')
return hashlib.shake_256(uid).hexdigest(3)
## End Utility functions ##
## Legacy Functions ##
def convert_to_legacy_snapshot(snapshot):
snapshot["sid"] = str(uuid.uuid4()).split("-")[1]
snapshot["software"] = "workbench-script"
snapshot["version"] = "dev"
snapshot["schema_api"] = "1.0.0"
snapshot["settings_version"] = "No Settings Version (NaN)"
snapshot["timestamp"] = snapshot["timestamp"].replace(" ", "T")
snapshot["data"]["smart"] = json.loads(snapshot["data"]["smartctl"])
snapshot["data"].pop("smartctl")
snapshot["data"].pop("inxi")
snapshot.pop("operator_id")
snapshot.pop("erase")
lshw = 'sudo lshw -json'
hwinfo = 'sudo hwinfo --reallyall'
lspci = 'sudo lspci -vv'
data = {
'lshw': exec_cmd(lshw) or "{}",
'hwinfo': exec_cmd(hwinfo),
'lspci': exec_cmd(lspci)
}
snapshot['data'].update(data)
## End Legacy Functions ##
SNAPSHOT_BASE = {
'timestamp': str(datetime.now()),
'type': 'Snapshot',
'uuid': str(uuid.uuid4()),
'code': gen_code(),
'software': "workbench-script",
'version': "0.0.1",
'data': {},
'erase': []
}
## Command Functions ##
## Erase Functions ##
## Xavier Functions ##
def erase_basic(disk):
"""
Basic Erasure
https://tsapps.nist.gov/publication/get_pdf.cfm?pub_id=917935
Settings for basic data erasure using shred Linux command.
A software-based fast non-100%-secured way of erasing data storage.
Performs 1 pass overwriting one round using all zeros.
Compliant with NIST SP-800-8y8.
In settings appear:
WB_ERASE = EraseBasic
WB_ERASE_STEPS = 1
WB_ERASE_LEADING_ZEROS = False
"""
cmd = f'shred -vn 1 /dev/{disk}'
return [exec_cmd_erase(cmd)]
def erase_baseline(disk):
"""
Baseline Secure Erasure
Settings for advanced data erasure using badblocks Linux software.
A secured-way of erasing data storages, erase hidden areas,
checking the erase sector by sector.
Performs 1 pass overwriting each sector with zeros and a final verification.
Compliant with HMG Infosec Standard 5 Baseline.
In settings appear:
WB_ERASE = EraseSectors
WB_ERASE_STEPS = 1
WB_ERASE_LEADING_ZEROS = True
WB_ERASE_1_METHOD = EraseBasic
WB_ERASE_1_STEP_TYPE = 0
WB_ERASE_2_METHOD = EraseSectors
WB_ERASE_2_STEP_TYPE = 1
"""
result = []
cmd = f'shred -zvn 0 /dev/{disk}'
result.append(exec_cmd_erase(cmd))
cmd = f'badblocks -st random -w /dev/{disk}'
result.append(exec_cmd_erase(cmd))
return result
def erase_enhanced(disk):
"""
Enhanced Secure Erasure
Settings for advanced data erasure using badblocks Linux software.
A secured-way of erasing data storages, erase hidden areas,
checking the erase sector by sector.
Performs 3 passes overwriting every sector with zeros and ones,
and final verification. Compliant with HMG Infosec Standard 5 Enhanced.
In settings appear:
WB_ERASE = EraseSectors
WB_ERASE_LEADING_ZEROS = True
WB_ERASE_1_METHOD = EraseBasic
WB_ERASE_1_STEP_TYPE = 1
WB_ERASE_2_METHOD = EraseBasic
WB_ERASE_2_STEP_TYPE = 0
WB_ERASE_3_METHOD = EraseSectors
WB_ERASE_3_STEP_TYPE = 1
"""
result = []
cmd = f'shred -vn 1 /dev/{disk}'
result.append(exec_cmd_erase(cmd))
cmd = f'shred -zvn 0 /dev/{disk}'
result.append(exec_cmd_erase(cmd))
## creo que realmente seria asi (3 pases y una extra poniendo a ceros):
# shred -zvn 3 /def/{disk}
# tampoco estoy seguro que el badblocks haga un proceso de verificacion.
cmd = f'badblocks -st random -w /dev/{disk}'
result.append(exec_cmd_erase(cmd))
return result
## End Xavier Functions ##
def ata_secure_erase_null(disk):
cmd_baseline = f'hdparm --user-master u --security-erase NULL /dev/{disk}'
return [exec_cmd_erase(cmd_baseline)]
def ata_secure_erase_enhanced(disk):
cmd_enhanced = f'hdparm --user-master u --security-erase-enhanced /dev/{disk}'
return [exec_cmd_erase(cmd_enhanced)]
def nvme_secure_erase(disk):
cmd_encrypted = f'nvme format /dev/{disk} --ses=1'
return [exec_cmd_erase(cmd_encrypted)]
## End Erase Functions ##
@logs
def get_disks():
@ -202,39 +82,13 @@ def get_disks():
)
return disks.get('blockdevices', [])
@logs
def gen_erase(all_disks, type_erase, user_disk=None):
erase = []
for disk in all_disks:
if user_disk and disk['name'] not in user_disk:
continue
if disk['type'] != 'disk':
continue
if 'boot' in disk['mountpoints']:
continue
if not disk['rota']:
# if soport nvme erase
erase.append(nvme_secure_erase(disk['name']))
elif disk['tran'] in ['ata', 'sata']:
# if soport ata erase
if type_erase == 'basic':
erase.append(ata_secure_erase_null(disk['name']))
elif type_erase == 'baseline':
erase.append(ata_secure_erase_null(disk['name']))
elif type_erase == 'enhanced':
erase.append(ata_secure_erase_enhanced(disk['name']))
else:
# For old disks
if type_erase == 'basic':
erase.append(erase_basic(disk['name']))
elif type_erase == 'baseline':
erase.append(erase_baseline(disk['name']))
elif type_erase == 'enhanced':
erase.append(erase_enhanced(disk['name']))
return erase
def gen_erase(type_erase, user_disk=None):
if user_disk:
return exec_cmd(f"sanitize -d {user_disk} -m {type_erase}")
return exec_cmd(f"sanitize -a -m {type_erase}")
# return exec_cmd(f"sanitize -a -m {type_erase} --confirm")
@logs
@ -255,7 +109,7 @@ def smartctl(all_disks, disk=None):
data = exec_smart(disk['name'])
data_list.append(data)
return json.dumps(data_list)
return data_list
## End Command Functions ##
@ -263,13 +117,16 @@ def smartctl(all_disks, disk=None):
# TODO permitir selección
# TODO permitir que vaya más rápido
def get_data(all_disks):
lshw = 'sudo lshw -json'
hwinfo = 'sudo hwinfo --reallyall'
dmidecode = 'sudo dmidecode'
inxi = "sudo inxi -afmnGEMABD -x 3 --edid --output json --output-file print"
lspci = 'sudo lspci -vv'
data = {
'smartctl': smartctl(all_disks),
'lshw': exec_cmd(lshw) or "{}",
'disks': smartctl(all_disks),
'hwinfo': exec_cmd(hwinfo),
'dmidecode': exec_cmd(dmidecode),
'inxi': exec_cmd(inxi)
'lspci': exec_cmd(lspci)
}
return data
@ -281,20 +138,20 @@ def gen_snapshot(all_disks):
return snapshot
def save_snapshot_in_disk(snapshot, path, snap_uuid):
def save_snapshot_in_disk(snapshot, path):
snapshot_path = os.path.join(path, 'snapshots')
filename = "{}/{}_{}.json".format(
snapshot_path,
datetime.now().strftime("%Y%m%d-%H_%M_%S"),
snap_uuid)
snapshot['uuid'])
try:
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
logger.info(_("Created snapshots directory at '%s'"), snapshot_path)
with open(filename, "w") as f:
f.write(snapshot)
f.write(json.dumps(snapshot))
logger.info(_("Snapshot written in path '%s'"), filename)
except Exception as e:
try:
@ -302,80 +159,22 @@ def save_snapshot_in_disk(snapshot, path, snap_uuid):
fallback_filename = "{}/{}_{}.json".format(
path,
datetime.now().strftime("%Y%m%d-%H_%M_%S"),
snap_uuid)
snapshot['uuid'])
with open(fallback_filename, "w") as f:
f.write(snapshot)
f.write(json.dumps(snapshot))
logger.warning(_("Snapshot written in fallback path '%s'"), fallback_filename)
except Exception as e:
logger.error(_("Could not save snapshot locally. Reason: Failed to write in fallback path:\n %s"), e)
def send_to_sign_credential(snapshot, token, url):
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json"
}
try:
cred = {
"type": "DeviceSnapshotV1",
"save": False,
"data": {
"operator_id": snapshot["operator_id"],
"dmidecode": snapshot["data"]["dmidecode"],
"inxi": snapshot["data"]["inxi"],
"smartctl": snapshot["data"]["smartctl"],
"uuid": snapshot["uuid"],
}
}
data = json.dumps(cred).encode('utf-8')
## TODO better debug
#with open('/tmp/pre-vc-test.json', "wb") as f:
# f.write(data)
request = urllib.request.Request(url, data=data, headers=headers)
with urllib.request.urlopen(request) as response:
status_code = response.getcode()
response_text = response.read().decode('utf-8')
if 200 <= status_code < 300:
logger.info(_("Credential successfully signed"))
res = json.loads(response_text)
if res.get("status") == "success" and res.get("data"):
return res["data"]
return json.dumps(snapshot)
else:
logger.error(_("Credential cannot signed in '%s'"), url)
return json.dumps(snapshot)
except Exception as e:
logger.error(_("Credential not remotely builded to URL '%s'. Do you have internet? Is your server up & running? Is the url token authorized?\n %s"), url, e)
return json.dumps(snapshot)
# apt install qrencode
def generate_qr_code(url, disable_qr):
"""Generate and print QR code for the given URL."""
if disable_qr:
return
qr_command = "echo {} | qrencode -t ANSI".format(url)
print(exec_cmd(qr_command))
# TODO sanitize url, if url is like this, it fails
# url = 'http://127.0.0.1:8000/api/snapshot/'
def send_snapshot_to_devicehub(snapshot, token, url, ev_uuid, legacy, disable_qr):
url_components = urllib.parse.urlparse(url)
ev_path = f"evidence/{ev_uuid}"
components = (url_components.scheme, url_components.netloc, ev_path, '', '', '')
ev_url = urllib.parse.urlunparse(components)
def send_snapshot_to_devicehub(snapshot, token, url):
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json"
}
try:
data = snapshot.encode('utf-8')
data = json.dumps(snapshot).encode('utf-8')
request = urllib.request.Request(url, data=data, headers=headers)
with urllib.request.urlopen(request) as response:
status_code = response.getcode()
@ -383,28 +182,22 @@ def send_snapshot_to_devicehub(snapshot, token, url, ev_uuid, legacy, disable_qr
if 200 <= status_code < 300:
logger.info(_("Snapshot successfully sent to '%s'"), url)
if legacy:
try:
response = json.loads(response_text)
public_url = response.get('public_url')
dhid = response.get('dhid')
if public_url:
generate_qr_code(public_url, disable_qr)
print("url: {}".format(public_url))
if dhid:
print("dhid: {}".format(dhid))
except Exception:
logger.error(response_text)
else:
generate_qr_code(ev_url, disable_qr)
print("url: {}".format(ev_url))
else:
logger.error(_("Snapshot %s not remotely sent to URL '%s'. Server responded with error:\n %s"), ev_uuid, url, response_text)
try:
response = json.loads(response_text)
if response.get('url'):
# apt install qrencode
qr = "echo {} | qrencode -t ANSI".format(response['url'])
print(exec_cmd(qr))
print("url: {}".format(response['url']))
if response.get("dhid"):
print("dhid: {}".format(response['dhid']))
except Exception:
logger.error(response_text)
except Exception as e:
logger.error(_("Snapshot not remotely sent to URL '%s'. Do you have internet? Is your server up & running? Is the url token authorized?\n %s"), url, e)
def load_config(config_file="settings.ini"):
"""
Tries to load configuration from a config file.
@ -424,13 +217,10 @@ def load_config(config_file="settings.ini"):
device = config.get('settings', 'device', fallback=None)
erase = config.get('settings', 'erase', fallback=None)
legacy = config.get('settings', 'legacy', fallback=None)
url_wallet = config.get('settings', 'url_wallet', fallback=None)
wb_sign_token = config.get('settings', 'wb_sign_token', fallback=None)
disable_qr = config.get('settings', 'disable_qr', fallback=None)
else:
logger.error(_("Config file '%s' not found. Using default values."), config_file)
path = os.path.join(os.getcwd())
url, token, device, erase, legacy, url_wallet, wb_sign_token, disable_qr = (None,)*8
url, token, device, erase, legacy = None, None, None, None, None
return {
'path': path,
@ -438,10 +228,7 @@ def load_config(config_file="settings.ini"):
'token': token,
'device': device,
'erase': erase,
'legacy': legacy,
'wb_sign_token': wb_sign_token,
'url_wallet': url_wallet,
'disable_qr': disable_qr
'legacy': legacy
}
def parse_args():
@ -495,7 +282,6 @@ def main():
config_file = args.config
config = load_config(config_file)
legacy = config.get("legacy")
# TODO show warning if non root, means data is not complete
# if annotate as potentially invalid snapshot (pending the new API to be done)
@ -504,40 +290,16 @@ def main():
all_disks = get_disks()
snapshot = gen_snapshot(all_disks)
snap_uuid = snapshot["uuid"]
if config['erase'] and config['device'] and not legacy:
snapshot['erase'] = gen_erase(all_disks, config['erase'], user_disk=config['device'])
elif config['erase'] and not legacy:
snapshot['erase'] = gen_erase(all_disks, config['erase'])
if legacy:
if config.get("legacy"):
convert_to_legacy_snapshot(snapshot)
snapshot = json.dumps(snapshot)
else:
url_wallet = config.get("url_wallet")
wb_sign_token = config.get("wb_sign_token")
snapshot['erase'] = gen_erase(config['erase'], user_disk=config['device'])
if wb_sign_token:
tk = wb_sign_token.encode("utf8")
snapshot["operator_id"] = hashlib.sha3_256(tk).hexdigest()
if url_wallet and wb_sign_token:
snapshot = send_to_sign_credential(snapshot, wb_sign_token, url_wallet)
else:
snapshot = json.dumps(snapshot)
save_snapshot_in_disk(snapshot, config['path'], snap_uuid)
save_snapshot_in_disk(snapshot, config['path'])
if config['url']:
send_snapshot_to_devicehub(
snapshot,
config['token'],
config['url'],
snap_uuid,
legacy,
config['disable_qr']
)
send_snapshot_to_devicehub(snapshot, config['token'], config['url'])
logger.info(_("END"))