Merge remote-tracking branch 'origin/master' into reports
# Conflicts: # ereuse_devicehub/resources/device/views.py
This commit is contained in:
commit
ee231aecb9
20
README.md
20
README.md
|
@ -1,7 +1,8 @@
|
|||
# Devicehub
|
||||
|
||||
Devicehub is an IT Asset Management System focused in reusing devices,
|
||||
created under the project [eReuse.org](https://www.ereuse.org).
|
||||
Devicehub is a distributed IT Asset Management System focused in
|
||||
reusing devices, created under the project
|
||||
[eReuse.org](https://www.ereuse.org).
|
||||
|
||||
Our main objectives are:
|
||||
|
||||
|
@ -35,10 +36,12 @@ call the new file ``app.py``.
|
|||
Create a PostgreSQL database called *devicehub* by running
|
||||
[create-db](examples/create-db.sh):
|
||||
|
||||
- In a Debian 9 terminal, execute the following two commands:
|
||||
- In a Debian 9 bash terminal, execute the following two commands:
|
||||
1. `sudo su - postgres`.
|
||||
2. `bash examples/create-db.sh devicehub`.
|
||||
- In MacOS: `examples/create-db.sh devicehub`.
|
||||
2. `bash examples/create-db.sh devicehub dhub`,
|
||||
and password `ereuse`.
|
||||
- In MacOS: `bash examples/create-db.sh devicehub dhub`,
|
||||
and password `ereuse`.
|
||||
|
||||
Create the tables in the database by executing in the same directory
|
||||
where `app.py` is:
|
||||
|
@ -85,7 +88,8 @@ To run the tests you will need to:
|
|||
1. `git clone` this project.
|
||||
2. Create a database for testing executing `create-db.sh` like
|
||||
the normal installation but changing the first parameter
|
||||
from `devicehub` to `dh_test`: `create-db.sh dh_test`.
|
||||
from `devicehub` to `dh_test`: `create-db.sh dh_test dhub` and
|
||||
password `ereuse`.
|
||||
3. Execute at the root folder of the project `python3 setup.py test`.
|
||||
|
||||
## Generating the docs
|
||||
|
@ -94,3 +98,7 @@ To run the tests you will need to:
|
|||
3. Execute `pip3 install -e .[docs]` in the project root folder.
|
||||
3. Go to `<project root folder>/docs` and execute `make html`.
|
||||
Repeat this step to generate new docs.
|
||||
|
||||
To auto-generate the docs do `pip3 install -e .[docs-auto]`, then
|
||||
execute, in the root folder of the project
|
||||
`sphinx-autobuild docs docs/_build/html`.
|
||||
|
|
335
docs/actions.rst
335
docs/actions.rst
|
@ -38,43 +38,27 @@ to the `Swagger docs
|
|||
|
||||
Physical Actions
|
||||
****************
|
||||
The following actions describe and react on the physical condition
|
||||
The following actions describe and react on the
|
||||
:class:`ereuse_devicehub.resources.device.states.Physical` condition
|
||||
of the devices.
|
||||
|
||||
ToPrepare, Prepare
|
||||
==================
|
||||
Work has been performed to the device to a defined point of
|
||||
acceptance. Users using this event have to agree what is this point
|
||||
of acceptance; for some is when the device just works, for others
|
||||
when some testing has been performed.
|
||||
|
||||
**Prepare** dictates that the device has been prepared, whereas
|
||||
**ToPrepare** that the device has been selected to be prepared.
|
||||
|
||||
Usually **ToPrepare** is the next event done after registering the
|
||||
device.
|
||||
.. autoclass:: ereuse_devicehub.resources.event.models.Prepare
|
||||
.. autoclass:: ereuse_devicehub.resources.event.models.ToPrepare
|
||||
|
||||
ToRepair, Repair
|
||||
================
|
||||
ToRepair is the act of selecting a device to be repaired, and
|
||||
Repair the act of performing the actual reparations. If a repair
|
||||
without an error is performed, it represents that the reparation
|
||||
has been successful.
|
||||
.. autoclass:: ereuse_devicehub.resources.event.models.Repair
|
||||
.. autoclass:: ereuse_devicehub.resources.event.models.ToRepair
|
||||
|
||||
ReadyToUse
|
||||
==========
|
||||
The device is ready to be used. This involves greater preparation
|
||||
from the ``Prepare`` event, and users should only use a device
|
||||
after this event is performed.
|
||||
|
||||
Users usually require devices with this event before shipping them
|
||||
to costumers.
|
||||
.. autoclass:: ereuse_devicehub.resources.event.models.ReadyToUse
|
||||
|
||||
Live
|
||||
====
|
||||
A keep-alive from a device connected to the Internet with information
|
||||
about its state (in the form of a ``Snapshot`` event) and usage
|
||||
statistics.
|
||||
.. autoclass:: ereuse_devicehub.resources.event.models.Live
|
||||
|
||||
DisposeWaste, Recover
|
||||
=====================
|
||||
|
@ -86,6 +70,8 @@ DisposeWaste, Recover
|
|||
|
||||
See `ToDisposeProduct, DisposeProduct`_.
|
||||
|
||||
.. todo:: Events not developed yet.
|
||||
|
||||
Association actions
|
||||
*******************
|
||||
Actions that change the associations users have with devices;
|
||||
|
@ -99,43 +85,29 @@ and **organize** actions.
|
|||
|
||||
Trade actions
|
||||
=============
|
||||
Trade actions log the political exchange of devices between users,
|
||||
stating **owner** xor **usufructuaree**. Every time a trade event
|
||||
is performed, the old user looses its political possession in favor
|
||||
of another one.
|
||||
Not fully developed.
|
||||
.. autoclass:: ereuse_devicehub.resources.event.models.Trade
|
||||
|
||||
Sell
|
||||
----
|
||||
The act of taking money from a buyer in exchange of a device.
|
||||
.. autoclass:: ereuse_devicehub.resources.event.models.Sell
|
||||
|
||||
Donate
|
||||
------
|
||||
The act of giving devices without compensation.
|
||||
.. autoclass:: ereuse_devicehub.resources.event.models.Donate
|
||||
|
||||
Rent
|
||||
----
|
||||
The act of giving money in return for temporary use, but not
|
||||
ownership, of a device.
|
||||
.. autoclass:: ereuse_devicehub.resources.event.models.Rent
|
||||
|
||||
CancelTrade
|
||||
-----------
|
||||
The act of cancelling a `Sell`_, `Donate`_ or `Rent`_.
|
||||
.. autoclass:: ereuse_devicehub.resources.event.models.CancelTrade
|
||||
|
||||
ToDisposeProduct, DisposeProduct
|
||||
-------------------------
|
||||
``ToDispose`` and ``DisposeProduct`` manage the process of getting
|
||||
rid of devices by giving (selling, donating) to another organization
|
||||
like a waste manager.
|
||||
|
||||
``ToDispose`` marks a device for being disposed, and
|
||||
``DisposeProduct`` dictates that the device has been disposed.
|
||||
|
||||
See `DisposeWaste, Recover`_ events for disposing without trading
|
||||
the device.
|
||||
|
||||
.. note:: For usability purposes, users might not directly perform
|
||||
``Dispose``, but this could automatically be done when
|
||||
performing ``ToDispose`` + ``Receive`` to a ``RecyclingCenter``.
|
||||
--------------------------------
|
||||
.. autoclass:: ereuse_devicehub.resources.event.models.DisposeProduct
|
||||
.. autoclass:: ereuse_devicehub.resources.event.models.ToDisposeProduct
|
||||
|
||||
Transfer actions
|
||||
================
|
||||
|
@ -143,34 +115,27 @@ The act of transferring/moving devices from one place to another.
|
|||
|
||||
Receive
|
||||
-------
|
||||
The act of physically taking delivery of a device. The receiver
|
||||
confirms that the devices have arrived, and thus, they
|
||||
**physically possess** them. Note that
|
||||
there can only be one **physical possessor** per device, and
|
||||
``Receive`` changes it.
|
||||
|
||||
The receiver can optionally take a role in the reception, giving
|
||||
it meaning; an user that takes the ``FinalUser`` role in the
|
||||
reception express that it will use the device, whereas a role
|
||||
``Transporter`` is used by intermediaries in shipping.
|
||||
|
||||
.. todo:: how do we ensure users specify type of reception?
|
||||
.. autoclass:: ereuse_devicehub.resources.event.models.Receive
|
||||
.. autoclass:: ereuse_devicehub.resources.enums.ReceiverRole
|
||||
:members:
|
||||
:undoc-members:
|
||||
.. autoattribute:: ereuse_devicehub.resources.device.models.Device.physical_possessor
|
||||
|
||||
Organize actions
|
||||
================
|
||||
The act of manipulating/administering/supervising/controlling one or
|
||||
more devices.
|
||||
.. autoclass:: ereuse_devicehub.resources.event.models.Organize
|
||||
|
||||
Reserve, CancelReservation
|
||||
--------------------------
|
||||
The act of reserving devices and cancelling them.
|
||||
-------------------------
|
||||
Not fully developed.
|
||||
|
||||
After this event is performed, the user is the **reservee** of the
|
||||
devices. There can only be one non-cancelled reservation for
|
||||
a device, and a reservation can only have one reservee.
|
||||
.. autoclass:: ereuse_devicehub.resources.event.models.Reserve
|
||||
.. autoclass:: ereuse_devicehub.resources.event.models.CancelReservation
|
||||
|
||||
Assign, Accept, Reject
|
||||
----------------------
|
||||
Not developed.
|
||||
|
||||
``Assign`` allocates devices to an user. The purpose or meaning
|
||||
of the association is defined by the users.
|
||||
|
||||
|
@ -179,9 +144,7 @@ assignments.
|
|||
|
||||
.. todo:: shall we add ``Deassign`` or make ``Assign``
|
||||
always define all active users?
|
||||
|
||||
.. todo:: Assign won't be developed until further notice.
|
||||
|
||||
Assign won't be developed until further notice.
|
||||
|
||||
Internal state actions
|
||||
**********************
|
||||
|
@ -190,254 +153,88 @@ their state.
|
|||
|
||||
Snapshot
|
||||
========
|
||||
The Snapshot sets the physical information of the device (S/N, model...)
|
||||
and updates it with erasures, benchmarks, ratings, and tests; updates the
|
||||
composition of its components (adding / removing them), and links tags
|
||||
to the device.
|
||||
.. autoclass:: ereuse_devicehub.resources.event.models.Snapshot
|
||||
|
||||
When receiving a Snapshot, the DeviceHub creates, adds and removes
|
||||
components to match the Snapshot. For example, if a Snapshot of a computer
|
||||
contains a new component, the system searches for the component in its
|
||||
database and, if not found, its creates it; finally linking it to the
|
||||
computer.
|
||||
|
||||
A Snapshot is used with Remove to represent changes in components for
|
||||
a device:
|
||||
|
||||
1. ``Snapshot`` creates a device if it does not exist, and the same
|
||||
for its components. This is all done in one ``Snapshot``.
|
||||
2. If the device exists, it updates its component composition by
|
||||
*adding* and *removing* them. If,
|
||||
for example, this new Snasphot doesn't have a component, it means that
|
||||
this component is not present anymore in the device, thus removing it
|
||||
from it. Then we have that:
|
||||
|
||||
- Components that are added to the device: snapshot2.components -
|
||||
snapshot1.components
|
||||
- Components that are removed to the device: snapshot1.components -
|
||||
snapshot2.components
|
||||
|
||||
When adding a component, there may be the case this component existed
|
||||
before and it was inside another device. In such case, DeviceHub will
|
||||
perform ``Remove`` on the old parent.
|
||||
|
||||
Snapshots from Workbench
|
||||
------------------------
|
||||
When processing a device from the Workbench, this one performs a Snapshot
|
||||
and then performs more events (like testings, benchmarking...).
|
||||
|
||||
There are two ways of sending this information. In an async way,
|
||||
this is, submitting events as soon as Workbench performs then, or
|
||||
submitting only one Snapshot event with all the other events embedded.
|
||||
|
||||
Asynced
|
||||
^^^^^^^
|
||||
The use case, which is represented in the ``test_workbench_phases``,
|
||||
is as follows:
|
||||
|
||||
1. In **T1**, WorkbenchServer (as the middleware from Workbench and
|
||||
Devicehub) submits:
|
||||
|
||||
- A ``Snapshot`` event with the required information to **synchronize**
|
||||
and **rate** the device. This is:
|
||||
|
||||
- Identification information about the device and components
|
||||
(S/N, model, physical characteristics...)
|
||||
- ``Tags`` in a ``tags`` property in the ``device``.
|
||||
- ``Rate`` in an ``events`` property in the ``device``.
|
||||
- ``Benchmarks`` in an ``events`` property in each ``component``
|
||||
or ``device``.
|
||||
- ``TestDataStorage`` as in ``Benchmarks``.
|
||||
- An ordered set of **expected events**, defining which are the next
|
||||
events that Workbench will perform to the device in ideal
|
||||
conditions (device doesn't fail, no Internet drop...).
|
||||
|
||||
Devicehub **syncs** the device with the database and perform the
|
||||
``Benchmark``, the ``TestDataStorage``, and finally the ``Rate``.
|
||||
This leaves the Snapshot **open** to wait for the next events
|
||||
to come.
|
||||
2. Assuming that we expect all events, in **T2**, WorkbenchServer
|
||||
submits a ``StressTest`` with a ``snapshot`` field containing the
|
||||
ID of the Snapshot in 1, and Devicehub links the event with such
|
||||
``Snapshot``.
|
||||
3. In **T3**, WorkbenchServer submits the ``Erase`` with the ``Snapshot``
|
||||
and ``component`` IDs from 1, linking it to them. It repeats
|
||||
this for all the erased data storage devices; **T3+Tn** being
|
||||
*n* the erased data storage devices.
|
||||
4. WorkbenchServer does like in 3. but for the event ``Install``,
|
||||
finishing in **T3+Tn+Tx**, being *x* the number of data storage
|
||||
devices with an OS installed into.
|
||||
5. In **T3+Tn+Tx**, when all *expected events* have been performed,
|
||||
Devicehub **closes** the ``Snapshot`` from 1.
|
||||
|
||||
Synced
|
||||
^^^^^^
|
||||
Optionally, Devicehub understands receiving a ``Snapshot`` with all
|
||||
the events in an ``events`` property inside each affected ``component``
|
||||
or ``device``.
|
||||
|
||||
Add, Remove
|
||||
===========
|
||||
The act of adding and removing components of and from a device.
|
||||
|
||||
These are usually used internally from `Snapshot`_, or manually, for
|
||||
example, when removing a component (like a ``DataStorage`` unit) from
|
||||
a broken computer.
|
||||
.. autoclass:: ereuse_devicehub.resources.event.models.Add
|
||||
.. autoclass:: ereuse_devicehub.resources.event.models.Remove
|
||||
|
||||
EraseBasic, EraseSectors
|
||||
========================
|
||||
An erasure attempt to a ``DataStorage``. The event contains
|
||||
information about success and nature of the erasure.
|
||||
|
||||
``EraseBasic`` is a fast non-secured way of erasing data storage, and
|
||||
``EraseSectors`` is a slower secured, sector-by-sector, erasure
|
||||
method.
|
||||
|
||||
Users can generate erasure certificates from successful erasures.
|
||||
|
||||
Erasures are an accumulation of **erasure steps**, that are performed
|
||||
as separate actions, called ``StepRandom``, for an erasure step
|
||||
that has overwritten data with random bits, and ``StepZero``,
|
||||
for an erasure step that has overwritten data with zeros.
|
||||
.. autoclass:: ereuse_devicehub.resources.event.models.EraseBasic
|
||||
.. autoclass:: ereuse_devicehub.resources.event.models.EraseSectors
|
||||
.. autoclass:: ereuse_devicehub.resources.event.models.ErasePhysical
|
||||
|
||||
Install
|
||||
=======
|
||||
The action of install an Operative System to a data storage unit.
|
||||
.. autoclass:: ereuse_devicehub.resources.event.models.Install
|
||||
|
||||
Test
|
||||
====
|
||||
The act of testing the physical condition of a device and its
|
||||
components.
|
||||
.. autoclass:: ereuse_devicehub.resources.event.models.Test
|
||||
|
||||
TestDataStorage
|
||||
---------------
|
||||
The act of testing the data storage.
|
||||
|
||||
Testing is done using the `S.M.A.R.T self test
|
||||
<https://en.wikipedia.org/wiki/S.M.A.R.T.#Self-tests>`_. Note
|
||||
that not all data storage units, specially some new PCIe ones, do not
|
||||
support SMART testing.
|
||||
|
||||
The test takes to other SMART values indicators of the overall health
|
||||
of the data storage.
|
||||
.. autoclass:: ereuse_devicehub.resources.event.models.TestDataStorage
|
||||
|
||||
StressTest
|
||||
----------
|
||||
The act of stressing (putting to the maximum capacity)
|
||||
a device for an amount of minutes. If the device is not in great
|
||||
condition won't probably survive such test.
|
||||
.. autoclass:: ereuse_devicehub.resources.event.models.StressTest
|
||||
|
||||
Benchmark
|
||||
=========
|
||||
The act of gauging the performance of a device.
|
||||
.. autoclass:: ereuse_devicehub.resources.event.models.Benchmark
|
||||
|
||||
|
||||
BenchmarkDataStorage
|
||||
--------------------
|
||||
Benchmarks the data storage unit reading and writing speeds.
|
||||
.. autoclass:: ereuse_devicehub.resources.event.models.BenchmarkDataStorage
|
||||
|
||||
|
||||
BenchmarkWithRate
|
||||
-----------------
|
||||
The act of benchmarking a device with a single rate.
|
||||
.. autoclass:: ereuse_devicehub.resources.event.models.BenchmarkWithRate
|
||||
|
||||
|
||||
BenchmarkProcessor
|
||||
------------------
|
||||
Benchmarks a processor by executing `BogoMips
|
||||
<https://en.wikipedia.org/wiki/BogoMips>`_. Note that this is not
|
||||
a reliable way of rating processors and we keep it for compatibility
|
||||
purposes.
|
||||
.. autoclass:: ereuse_devicehub.resources.event.models.BenchmarkProcessor
|
||||
|
||||
|
||||
BenchmarkProcessorSysbench
|
||||
--------------------------
|
||||
Benchmarks a processor by using the processor benchmarking utility of
|
||||
`sysbench <https://github.com/akopytov/sysbench>`_.
|
||||
.. autoclass:: ereuse_devicehub.resources.event.models.BenchmarkProcessorSysbench
|
||||
|
||||
|
||||
BenchmarkRamSysbench
|
||||
--------------------
|
||||
.. autoclass:: ereuse_devicehub.resources.event.models.BenchmarkRamSysbench
|
||||
|
||||
Rate
|
||||
====
|
||||
Devicehub generates an rating for a device taking into consideration the
|
||||
visual, functional, and performance.
|
||||
|
||||
A Workflow is as follows:
|
||||
|
||||
1. An agent generates feedback from the device in the form of benchmark,
|
||||
visual, and functional information; which is filled in a ``Rate``
|
||||
event. This is done through a **software**, defining the type
|
||||
of ``Rate`` event. At the moment we have two rates: ``WorkbenchRate``
|
||||
and ``PhotoboxRate``.
|
||||
2. Devicehub gathers this information and computes a score that updates
|
||||
the ``Rate`` event.
|
||||
3. Devicehub aggregates different rates and computes a final score for
|
||||
the device by performing a new ``AggregateRating`` event.
|
||||
|
||||
There are three **types** of ``Rate``: ``WorkbenchRate``,
|
||||
``AppRate``, and ``PhotoboxRate``. ``WorkbenchRate`` can have different
|
||||
**software** algorithms, and each software algorithm can have several
|
||||
**versions**. So, we have 3 dimensions for ``WorkbenchRate``:
|
||||
type, software, version.
|
||||
|
||||
Devicehub generates a rate event for each software and version. So,
|
||||
if an agent fulfills a ``WorkbenchRate`` and there are 2 software
|
||||
algorithms and each has two versions, Devicehub will generate 4 rates.
|
||||
Devicehub understands that only one software and version are the
|
||||
**oficial** (set in the settings of each inventory),
|
||||
and it will generate an ``AggregateRating`` for only the official
|
||||
versions. At the same time, ``Price`` only computes the price of
|
||||
the **oficial** version.
|
||||
|
||||
The technical Workflow in Devicehub is as follows:
|
||||
|
||||
1. In **T1**, the user performs a ``Snapshot`` by processing the device
|
||||
through the Workbench. From the benchmarks and the visual and
|
||||
functional ratings the user does in the device, the system generates
|
||||
many ``WorkbenchRate`` (as many as software and versions defined).
|
||||
With only this information, the system generates an ``AggregateRating``,
|
||||
which is the event that the user will see in the web.
|
||||
2. In **T2**, the user takes pictures from the device through the
|
||||
Photobox, and DeviceHub crates an ``ImageSet`` with multiple
|
||||
``Image`` with information from the photobox.
|
||||
3. In **T3**, an agent (user or AI) rates the pictures, creating a
|
||||
``PhotoboxRate`` **for each** picture. When Devicehub receives the
|
||||
first ``PhotoboxRate`` it creates an ``AggregateRating`` linked
|
||||
to such ``PhotoboxRate``. So, the agent will perform as many
|
||||
``PhotoboxRate`` as pictures are in the ``ImageSet``, and Devicehub
|
||||
will link each ``PhotoboxRate`` to the same ``AggregateRating``.
|
||||
This will end in **T3+Tn**, being *n* the number of photos to rate.
|
||||
4. In **T3+Tn**, after the last photo is rated, Devicehub will generate
|
||||
a new rate for the device: it takes the ``AggregateRating`` from 3.
|
||||
and computes a rate from all the linked ``PhotoboxRate`` plus the
|
||||
last available ``WorkbenchRate`` for that device.
|
||||
|
||||
If the agent in 3. is an user, Devicehub creates ``PhotoboxUserRate``
|
||||
and if it is an AI it creates ``PhotoboxAIRate``.
|
||||
|
||||
The same ``ImageSet`` can be rated multiple times, generating a new
|
||||
``AggregateRating`` each time.
|
||||
.. autoclass:: ereuse_devicehub.resources.event.models.Rate
|
||||
|
||||
Price
|
||||
=====
|
||||
Price states a selling price for the device, but not necessariliy the
|
||||
final price this was sold (which is set in the Sell event).
|
||||
|
||||
Devicehub automatically computes a price from ``AggregateRating``
|
||||
events. As in a **Rate**, price can have **software** and **version**,
|
||||
and there is an **official** price that is used to automatically
|
||||
compute the price from an ``AggregateRating``. Only the official price
|
||||
is computed from an ``AggregateRating``.
|
||||
.. autoclass:: ereuse_devicehub.resources.event.models.Price
|
||||
|
||||
Migrate
|
||||
=======
|
||||
Moves the devices to a new database/inventory. Devices cannot be
|
||||
modified anymore at the previous database.
|
||||
|
||||
Donation
|
||||
========
|
||||
.. todo:: nextcloud/eReuse/99. Tasks/224. Definir datos necesarios
|
||||
configuración licencia
|
||||
Not done.
|
||||
|
||||
.. autoclass:: ereuse_devicehub.resources.event.models.Migrate
|
||||
|
||||
States
|
||||
******
|
||||
.. todo:: work on september.
|
||||
.. autoclass:: ereuse_devicehub.resources.device.states.State
|
||||
|
||||
.. uml:: states.puml
|
||||
|
||||
.. autoclass:: ereuse_devicehub.resources.device.states.Trading
|
||||
:members:
|
||||
:undoc-members:
|
||||
.. autoclass:: ereuse_devicehub.resources.device.states.Physical
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
#
|
||||
# import os
|
||||
# import sys
|
||||
# sys.path.insert(0, os.path.abspath('.'))
|
||||
# sys.path.insert(0, os.path.abspath('..'))
|
||||
|
||||
|
||||
# -- Project information -----------------------------------------------------
|
||||
|
@ -42,7 +42,8 @@ extensions = [
|
|||
'sphinx.ext.todo',
|
||||
'sphinx.ext.viewcode',
|
||||
'sphinxcontrib.plantuml',
|
||||
'sphinx.ext.autosectionlabel'
|
||||
'sphinx.ext.autosectionlabel',
|
||||
'sphinx.ext.autodoc'
|
||||
]
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
|
@ -159,7 +160,7 @@ texinfo_documents = [
|
|||
# -- Options for intersphinx extension ---------------------------------------
|
||||
|
||||
# Example configuration for intersphinx: refer to the Python standard library.
|
||||
intersphinx_mapping = {'https://docs.python.org/': None}
|
||||
intersphinx_mapping = {'python': ('https://docs.python.org/3', None)}
|
||||
|
||||
# -- Options for todo extension ----------------------------------------------
|
||||
|
||||
|
@ -174,3 +175,4 @@ html_favicon = 'img/favicon.ico'
|
|||
|
||||
# autosectionlabel
|
||||
autosectionlabel_prefix_document = True
|
||||
autodoc_member_order = 'bysource'
|
||||
|
|
|
@ -1,16 +1,14 @@
|
|||
Inventory
|
||||
Devices
|
||||
#########
|
||||
|
||||
Devicehub uses the same path to get devices and lots.
|
||||
You can retrieve devices using ``GET /devices/``, or a specific
|
||||
device by ``GET /devices/24``.
|
||||
|
||||
To get all devices and groups: ``GET /inventory`` or the devices of a
|
||||
specific groups: ``GET /inventory/24``.
|
||||
|
||||
You can **filter** devices ``GET /inventory/24?filter={"type": "Computer"}``,
|
||||
and **sort** them ``GET /inventory?sort={"created": 1}``, and of course
|
||||
you can combine both in the same query. You only get the groups that
|
||||
contain the devices that pass the filters. So, if a group contains
|
||||
only one device that is filtered, you don't get that group neither.
|
||||
You can **filter** devices ``GET /devices/?filter={"type": "Computer"}``,
|
||||
**sort** them ``GET /devices/?sort={"created": 1}``, and perform
|
||||
natural search with ``GET /devices/?search=foo bar. Of course
|
||||
you can combine them in the same query, returning devices that
|
||||
only pass all conditions.
|
||||
|
||||
Results are **paginated**; you get up to 30 devices and up to 30
|
||||
groups in a page. Select the actual page by ``GET /inventory?page=3``.
|
||||
|
@ -21,21 +19,10 @@ Query
|
|||
The query consists of 4 optional params:
|
||||
|
||||
- **search**: Filters devices by performing a full-text search over their
|
||||
physical properties, events, tags, and groups they are in:
|
||||
|
||||
- Device.type
|
||||
- Device.serial_number
|
||||
- Device.model
|
||||
- Device.manufacturer
|
||||
- Device.color
|
||||
- Tag.id
|
||||
- Tag.org
|
||||
- Group.name
|
||||
|
||||
Search is a string.
|
||||
physical properties, events, and tags. Search is a string.
|
||||
- **filter**: Filters devices field-by-field. Each field can be
|
||||
filtered in different ways, see them in
|
||||
:class:`ereuse_devicehub.resources.inventory.Filters`. Filter is
|
||||
:class:`ereuse_devicehub.resources.devices.Filters`. Filter is
|
||||
a JSON-encoded object whose keys are the filters. By default
|
||||
is empty (no filter applied).
|
||||
- **sort**: Sorts the devices. You can specify multiple sort clauses
|
||||
|
@ -59,3 +46,10 @@ The result is a JSON object with the following fields:
|
|||
or ``1``.
|
||||
- **perPage**: How many devices are in every page, fixed to ``30``.
|
||||
- **total**: How many total devices passed the filters.
|
||||
|
||||
Models
|
||||
******
|
||||
|
||||
.. automodule:: ereuse_devicehub.resources.device.models
|
||||
:members:
|
||||
:member-order: bysource
|
|
@ -14,7 +14,7 @@ This is the documentation and API of the `eReuse.org Devicehub
|
|||
|
||||
actions
|
||||
agents
|
||||
inventory
|
||||
devices
|
||||
tags
|
||||
lots
|
||||
|
||||
|
|
|
@ -6,13 +6,15 @@ skinparam ranksep 1
|
|||
[*] -> Registered
|
||||
|
||||
state Attributes {
|
||||
|
||||
state Broken : cannot turn on
|
||||
state Owners
|
||||
state Usufructuarees
|
||||
state Reservees
|
||||
state "Physical\nPossessor"
|
||||
state "Waste\n\Product"
|
||||
state problems : List of current events \nwith Warn/Error
|
||||
state privacy : Set of\ncurrent erasures
|
||||
state working : List of current events\naffecting working
|
||||
}
|
||||
|
||||
state Physical {
|
||||
|
@ -44,10 +46,4 @@ state Trading {
|
|||
Renting --> Cancelled : Cancel
|
||||
}
|
||||
|
||||
state DataStoragePrivacyCompliance {
|
||||
state Erased
|
||||
state Destroyed
|
||||
}
|
||||
|
||||
|
||||
@enduml
|
||||
|
|
|
@ -1,4 +0,0 @@
|
|||
from distutils.version import StrictVersion
|
||||
|
||||
__version__ = '0.2.0a13'
|
||||
version = StrictVersion(__version__)
|
|
@ -1,4 +1,7 @@
|
|||
from sqlalchemy import event
|
||||
from sqlalchemy.dialects import postgresql
|
||||
from sqlalchemy.sql import expression
|
||||
from sqlalchemy_utils import view
|
||||
from teal.db import SchemaSQLAlchemy
|
||||
|
||||
|
||||
|
@ -16,4 +19,24 @@ class SQLAlchemy(SchemaSQLAlchemy):
|
|||
self.drop_schema(schema='common')
|
||||
|
||||
|
||||
def create_view(name, selectable):
|
||||
"""Creates a view.
|
||||
|
||||
This is an adaptation from sqlalchemy_utils.view. See
|
||||
`the test on sqlalchemy-utils <https://github.com/kvesteri/
|
||||
sqlalchemy-utils/blob/master/tests/test_views.py>`_ for an
|
||||
example on how to use.
|
||||
"""
|
||||
table = view.create_table_from_selectable(name, selectable)
|
||||
|
||||
# We need to ensure views are created / destroyed before / after
|
||||
# SchemaSQLAlchemy's listeners execute
|
||||
# That is why insert=True in 'after_create'
|
||||
event.listen(db.metadata, 'after_create', view.CreateView(name, selectable), insert=True)
|
||||
event.listen(db.metadata, 'before_drop', view.DropView(name))
|
||||
return table
|
||||
|
||||
|
||||
db = SQLAlchemy(session_options={"autoflush": False})
|
||||
f = db.func
|
||||
exp = expression
|
||||
|
|
|
@ -35,6 +35,7 @@ class Devicehub(Teal):
|
|||
instance_relative_config, root_path, Auth)
|
||||
self.dummy = Dummy(self)
|
||||
self.before_request(self.register_db_events_listeners)
|
||||
self.cli.command('regenerate-search')(self.regenerate_search)
|
||||
|
||||
def register_db_events_listeners(self):
|
||||
"""Registers the SQLAlchemy event listeners."""
|
||||
|
@ -44,3 +45,9 @@ class Devicehub(Teal):
|
|||
def _init_db(self):
|
||||
super()._init_db()
|
||||
DeviceSearch.set_all_devices_tokens_if_empty(self.db.session)
|
||||
|
||||
def regenerate_search(self):
|
||||
"""Re-creates from 0 all the search tables."""
|
||||
DeviceSearch.regenerate_search_table(self.db.session)
|
||||
db.session.commit()
|
||||
print('Done.')
|
||||
|
|
|
@ -28,6 +28,7 @@ class Dummy:
|
|||
ET = (
|
||||
('A0000000000001', 'DT-AAAAA'),
|
||||
('A0000000000002', 'DT-BBBBB'),
|
||||
('A0000000000003', 'DT-CCCCC'),
|
||||
)
|
||||
"""eTags to create."""
|
||||
ORG = 'eReuse.org CAT', '-t', 'G-60437761', '-c', 'ES'
|
||||
|
@ -118,9 +119,9 @@ class Dummy:
|
|||
assert len(inventory['items'])
|
||||
|
||||
i, _ = user.get(res=Device, query=[('search', 'intel')])
|
||||
assert len(i['items']) == 11
|
||||
i, _ = user.get(res=Device, query=[('search', 'pc')])
|
||||
assert len(i['items']) == 12
|
||||
i, _ = user.get(res=Device, query=[('search', 'pc')])
|
||||
assert len(i['items']) == 13
|
||||
|
||||
# Let's create a set of events for the pc device
|
||||
# Make device Ready
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
"elapsed": 2
|
||||
},
|
||||
{
|
||||
"error": false,
|
||||
"severity": "Info",
|
||||
"type": "StressTest",
|
||||
"elapsed": 60
|
||||
},
|
||||
|
@ -91,14 +91,14 @@
|
|||
{
|
||||
"steps": [
|
||||
{
|
||||
"error": false,
|
||||
"severity": "Info",
|
||||
"type": "StepRandom",
|
||||
"startTime": "2018-07-11T11:20:01.005336",
|
||||
"endTime": "2018-07-11T11:42:12.971177"
|
||||
}
|
||||
],
|
||||
"zeros": false,
|
||||
"error": false,
|
||||
"severity": "Info",
|
||||
"type": "EraseBasic",
|
||||
"endTime": "2018-07-11T11:42:12.975358",
|
||||
"startTime": "2018-07-11T11:20:01.004892"
|
||||
|
@ -111,7 +111,7 @@
|
|||
},
|
||||
{
|
||||
"length": "Short",
|
||||
"error": true,
|
||||
"severity": "Error",
|
||||
"type": "TestDataStorage",
|
||||
"status": "Unspecified Error. Self-test not started.",
|
||||
"elapsed": 0
|
||||
|
|
|
@ -74,13 +74,13 @@
|
|||
"type": "EraseBasic",
|
||||
"zeros": false,
|
||||
"endTime": "2018-07-11T11:56:52.390306",
|
||||
"error": false,
|
||||
"severity": "Info",
|
||||
"startTime": "2018-07-11T10:49:31.998217",
|
||||
"steps": [
|
||||
{
|
||||
"type": "StepRandom",
|
||||
"endTime": "2018-07-11T11:56:52.386505",
|
||||
"error": false,
|
||||
"severity": "Info",
|
||||
"startTime": "2018-07-11T10:49:31.998609"
|
||||
}
|
||||
]
|
||||
|
@ -89,7 +89,7 @@
|
|||
"length": "Short",
|
||||
"type": "TestDataStorage",
|
||||
"status": "Unspecified Error. Self-test not started.",
|
||||
"error": true,
|
||||
"severity": "Error",
|
||||
"elapsed": 0
|
||||
},
|
||||
{
|
||||
|
@ -130,7 +130,7 @@
|
|||
},
|
||||
{
|
||||
"type": "StressTest",
|
||||
"error": false,
|
||||
"severity": "Info",
|
||||
"elapsed": 60
|
||||
}
|
||||
],
|
||||
|
|
|
@ -69,7 +69,7 @@
|
|||
{
|
||||
"elapsed": 1,
|
||||
"type": "TestDataStorage",
|
||||
"error": true,
|
||||
"severity": "Error",
|
||||
"status": "Unspecified Error. Self-test not started.",
|
||||
"length": "Short"
|
||||
},
|
||||
|
@ -83,13 +83,13 @@
|
|||
"startTime": "2018-07-11T10:32:14.445306",
|
||||
"zeros": false,
|
||||
"type": "EraseBasic",
|
||||
"error": false,
|
||||
"severity": "Info",
|
||||
"endTime": "2018-07-11T10:53:46.442123",
|
||||
"steps": [
|
||||
{
|
||||
"startTime": "2018-07-11T10:32:14.445496",
|
||||
"type": "StepRandom",
|
||||
"error": false,
|
||||
"severity": "Info",
|
||||
"endTime": "2018-07-11T10:53:46.438901"
|
||||
}
|
||||
]
|
||||
|
@ -107,7 +107,7 @@
|
|||
{
|
||||
"elapsed": 0,
|
||||
"type": "TestDataStorage",
|
||||
"error": true,
|
||||
"severity": "Error",
|
||||
"status": "Unspecified Error. Self-test not started.",
|
||||
"length": "Short"
|
||||
},
|
||||
|
@ -115,13 +115,13 @@
|
|||
"startTime": "2018-07-11T10:53:46.442187",
|
||||
"zeros": false,
|
||||
"type": "EraseBasic",
|
||||
"error": false,
|
||||
"severity": "Info",
|
||||
"endTime": "2018-07-11T11:16:28.469899",
|
||||
"steps": [
|
||||
{
|
||||
"startTime": "2018-07-11T10:53:46.442343",
|
||||
"type": "StepRandom",
|
||||
"error": false,
|
||||
"severity": "Info",
|
||||
"endTime": "2018-07-11T11:16:28.463789"
|
||||
}
|
||||
]
|
||||
|
@ -157,7 +157,7 @@
|
|||
"chassis": "Tower",
|
||||
"events": [
|
||||
{
|
||||
"error": false,
|
||||
"severity": "Info",
|
||||
"elapsed": 60,
|
||||
"type": "StressTest"
|
||||
},
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
{
|
||||
"elapsed": 60,
|
||||
"type": "StressTest",
|
||||
"error": false
|
||||
"severity": "Info"
|
||||
},
|
||||
{
|
||||
"elapsed": 1,
|
||||
|
@ -92,7 +92,7 @@
|
|||
"elapsed": 15
|
||||
},
|
||||
{
|
||||
"error": true,
|
||||
"severity": "Error",
|
||||
"type": "TestDataStorage",
|
||||
"elapsed": 0,
|
||||
"length": "Short",
|
||||
|
@ -102,13 +102,13 @@
|
|||
"startTime": "2018-07-11T13:28:07.319948",
|
||||
"type": "EraseBasic",
|
||||
"endTime": "2018-07-11T14:04:04.864425",
|
||||
"error": false,
|
||||
"severity": "Info",
|
||||
"steps": [
|
||||
{
|
||||
"startTime": "2018-07-11T13:28:07.320244",
|
||||
"type": "StepRandom",
|
||||
"endTime": "2018-07-11T14:04:04.861590",
|
||||
"error": false
|
||||
"severity": "Info"
|
||||
}
|
||||
],
|
||||
"zeros": false
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
"manufacturer": "NEC Computers SAS",
|
||||
"events": [
|
||||
{
|
||||
"error": false,
|
||||
"severity": "Info",
|
||||
"elapsed": 60,
|
||||
"type": "StressTest"
|
||||
},
|
||||
|
@ -101,7 +101,7 @@
|
|||
"size": 305245,
|
||||
"events": [
|
||||
{
|
||||
"error": false,
|
||||
"severity": "Info",
|
||||
"endTime": "2018-07-11T11:33:41.531918",
|
||||
"startTime": "2018-07-11T10:30:35.643855",
|
||||
"zeros": false,
|
||||
|
@ -111,7 +111,7 @@
|
|||
"type": "StepRandom",
|
||||
"endTime": "2018-07-11T11:33:41.529224",
|
||||
"startTime": "2018-07-11T10:30:35.644043",
|
||||
"error": false
|
||||
"severity": "Info"
|
||||
}
|
||||
]
|
||||
},
|
||||
|
@ -125,7 +125,7 @@
|
|||
"type": "TestDataStorage",
|
||||
"length": "Short",
|
||||
"elapsed": 1,
|
||||
"error": true,
|
||||
"severity": "Error",
|
||||
"status": "Unspecified Error. Self-test not started."
|
||||
}
|
||||
],
|
||||
|
|
|
@ -0,0 +1,169 @@
|
|||
{
|
||||
"uuid": "de4f495e-c58b-40e1-a33e-46ab5e84767e",
|
||||
"endTime": "2018-10-24T11:03:36.113006+00:00",
|
||||
"components": [
|
||||
{
|
||||
"speed": 1000,
|
||||
"manufacturer": "Realtek Semiconductor Co., Ltd.",
|
||||
"wireless": false,
|
||||
"model": "RTL8111/8168/8411 PCI Express Gigabit Ethernet Controller",
|
||||
"serialNumber": "00:26:18:96:dc:af",
|
||||
"type": "NetworkAdapter",
|
||||
"events": []
|
||||
},
|
||||
{
|
||||
"speed": 1333.0,
|
||||
"manufacturer": null,
|
||||
"interface": "DDR",
|
||||
"model": null,
|
||||
"serialNumber": null,
|
||||
"type": "RamModule",
|
||||
"format": "DIMM",
|
||||
"size": 2048,
|
||||
"events": []
|
||||
},
|
||||
{
|
||||
"speed": 1333.0,
|
||||
"manufacturer": null,
|
||||
"interface": "DDR",
|
||||
"model": null,
|
||||
"serialNumber": null,
|
||||
"type": "RamModule",
|
||||
"format": "DIMM",
|
||||
"size": 2048,
|
||||
"events": []
|
||||
},
|
||||
{
|
||||
"speed": 1333.0,
|
||||
"manufacturer": null,
|
||||
"interface": "DDR",
|
||||
"model": null,
|
||||
"serialNumber": null,
|
||||
"type": "RamModule",
|
||||
"format": "DIMM",
|
||||
"size": 2048,
|
||||
"events": []
|
||||
},
|
||||
{
|
||||
"speed": 1333.0,
|
||||
"manufacturer": null,
|
||||
"interface": "DDR",
|
||||
"model": null,
|
||||
"serialNumber": null,
|
||||
"type": "RamModule",
|
||||
"format": "DIMM",
|
||||
"size": 2048,
|
||||
"events": []
|
||||
},
|
||||
{
|
||||
"manufacturer": "Intel Corporation",
|
||||
"model": "5 Series/3400 Series Chipset High Definition Audio",
|
||||
"serialNumber": null,
|
||||
"type": "SoundCard",
|
||||
"events": []
|
||||
},
|
||||
{
|
||||
"speed": 2.5330000000000004,
|
||||
"manufacturer": "Intel Corp.",
|
||||
"cores": 4,
|
||||
"address": 64,
|
||||
"model": "Intel Core i7 CPU 860 @ 2.80GHz",
|
||||
"serialNumber": null,
|
||||
"type": "Processor",
|
||||
"threads": 8,
|
||||
"events": [
|
||||
{
|
||||
"elapsed": 9,
|
||||
"type": "BenchmarkProcessorSysbench",
|
||||
"rate": 8.7418
|
||||
},
|
||||
{
|
||||
"elapsed": 0,
|
||||
"type": "BenchmarkProcessor",
|
||||
"rate": 44937.520000000004
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"manufacturer": null,
|
||||
"interface": "ATA",
|
||||
"model": "SAMSUNG HD103SJ",
|
||||
"serialNumber": "S246J90Z406422",
|
||||
"type": "HardDrive",
|
||||
"size": 953869,
|
||||
"events": [
|
||||
{
|
||||
"elapsed": 120,
|
||||
"lifetime": 14298,
|
||||
"currentPendingSectorCount": 0,
|
||||
"type": "TestDataStorage",
|
||||
"status": "Completed without error",
|
||||
"powerCycleCount": 693,
|
||||
"assessment": true,
|
||||
"offlineUncorrectable": 0,
|
||||
"severity": "Info",
|
||||
"length": "Short",
|
||||
"reallocatedSectorCount": 0
|
||||
},
|
||||
{
|
||||
"readSpeed": 136.0,
|
||||
"elapsed": 9,
|
||||
"type": "BenchmarkDataStorage",
|
||||
"writeSpeed": 35.0
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"manufacturer": "NVIDIA Corporation",
|
||||
"model": "G84 GeForce 8600 GT",
|
||||
"serialNumber": null,
|
||||
"type": "GraphicCard",
|
||||
"memory": 256.0,
|
||||
"events": []
|
||||
},
|
||||
{
|
||||
"firewire": 1,
|
||||
"manufacturer": "ASUSTeK Computer INC.",
|
||||
"usb": 2,
|
||||
"model": "P7P55D",
|
||||
"serialNumber": "101005570001137",
|
||||
"type": "Motherboard",
|
||||
"pcmcia": 0,
|
||||
"slots": 4,
|
||||
"serial": 1,
|
||||
"events": []
|
||||
}
|
||||
],
|
||||
"elapsed": 203,
|
||||
"device": {
|
||||
"manufacturer": null,
|
||||
"model": null,
|
||||
"chassis": "Tower",
|
||||
"type": "Desktop",
|
||||
"serialNumber": null,
|
||||
"events": [
|
||||
{
|
||||
"elapsed": 60,
|
||||
"type": "StressTest",
|
||||
"severity": "Info"
|
||||
},
|
||||
{
|
||||
"elapsed": 1,
|
||||
"type": "BenchmarkRamSysbench",
|
||||
"rate": 0.8315
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
{"id": "A0000000000003", "type": "Tag"}
|
||||
]
|
||||
},
|
||||
"version": "11.0a6",
|
||||
"expectedEvents": [
|
||||
"Benchmark",
|
||||
"TestDataStorage",
|
||||
"StressTest"
|
||||
],
|
||||
"type": "Snapshot",
|
||||
"closed": true,
|
||||
"software": "Workbench"
|
||||
}
|
|
@ -62,7 +62,7 @@
|
|||
"assessment": true,
|
||||
"currentPendingSectorCount": 0,
|
||||
"elapsed": 134,
|
||||
"error": false,
|
||||
"severity": "Info",
|
||||
"length": "Short",
|
||||
"lifetime": 19549,
|
||||
"offlineUncorrectable": 0,
|
||||
|
@ -106,7 +106,7 @@
|
|||
"events": [
|
||||
{
|
||||
"elapsed": 60,
|
||||
"error": false,
|
||||
"severity": "Info",
|
||||
"type": "StressTest"
|
||||
},
|
||||
{
|
||||
|
|
|
@ -90,7 +90,7 @@
|
|||
"type": "TestDataStorage",
|
||||
"length": "Short",
|
||||
"elapsed": 2,
|
||||
"error": true,
|
||||
"severity": "Error",
|
||||
"status": "Unspecified Error. Self-test not started."
|
||||
},
|
||||
{
|
||||
|
@ -99,12 +99,12 @@
|
|||
{
|
||||
"type": "StepRandom",
|
||||
"startTime": "2018-07-03T09:15:22.257059",
|
||||
"error": false,
|
||||
"severity": "Info",
|
||||
"endTime": "2018-07-03T10:32:11.843190"
|
||||
}
|
||||
],
|
||||
"startTime": "2018-07-03T09:15:22.256074",
|
||||
"error": false,
|
||||
"severity": "Info",
|
||||
"zeros": false,
|
||||
"endTime": "2018-07-03T10:32:11.848455"
|
||||
}
|
||||
|
@ -143,7 +143,7 @@
|
|||
},
|
||||
{
|
||||
"type": "StressTest",
|
||||
"error": false,
|
||||
"severity": "Info",
|
||||
"elapsed": 60
|
||||
},
|
||||
{
|
||||
|
|
|
@ -83,7 +83,7 @@
|
|||
"elapsed": 0,
|
||||
"type": "TestDataStorage",
|
||||
"status": "Unspecified Error. Self-test not started.",
|
||||
"error": true,
|
||||
"severity": "Error",
|
||||
"length": "Short"
|
||||
}
|
||||
]
|
||||
|
|
|
@ -88,7 +88,7 @@
|
|||
},
|
||||
{
|
||||
"status": "Unspecified Error. Self-test not started.",
|
||||
"error": true,
|
||||
"severity": "Error",
|
||||
"type": "TestDataStorage",
|
||||
"elapsed": 1,
|
||||
"length": Short
|
||||
|
@ -142,7 +142,7 @@
|
|||
{
|
||||
"type": "StressTest",
|
||||
"elapsed": 60,
|
||||
"error": false
|
||||
"severity": "Info"
|
||||
},
|
||||
{
|
||||
"rate": 0.9759,
|
||||
|
|
|
@ -96,7 +96,7 @@
|
|||
"status": "Unspecified Error. Self-test not started.",
|
||||
"type": "TestDataStorage",
|
||||
"length": Short,
|
||||
"error": true
|
||||
"severity": "Error"
|
||||
}
|
||||
],
|
||||
"type": "HardDrive",
|
||||
|
@ -122,7 +122,7 @@
|
|||
"events": [
|
||||
{
|
||||
"type": "StressTest",
|
||||
"error": false,
|
||||
"severity": "Info",
|
||||
"elapsed": 120
|
||||
},
|
||||
{
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
{
|
||||
"type": "StressTest",
|
||||
"elapsed": 300,
|
||||
"error": false
|
||||
"severity": "Info"
|
||||
}
|
||||
],
|
||||
"serialNumber": "CZC0408YJG",
|
||||
|
@ -125,7 +125,7 @@
|
|||
"offlineUncorrectable": 1,
|
||||
"powerCycleCount": 1838,
|
||||
"assessment": true,
|
||||
"error": false,
|
||||
"severity": "Info",
|
||||
"type": "TestDataStorage",
|
||||
"lifetime": 10546,
|
||||
"reallocatedSectorCount": 0,
|
||||
|
|
|
@ -64,7 +64,7 @@ components:
|
|||
elapsed: 21
|
||||
- type: TestDataStorage
|
||||
elapsed: 233
|
||||
error: False
|
||||
severity: Info
|
||||
status: Completed without error
|
||||
length: Short
|
||||
lifetime: 99
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
from teal.query import NestedQueryFlaskParser
|
||||
from webargs.flaskparser import FlaskParser
|
||||
|
||||
|
||||
class SearchQueryParser(NestedQueryFlaskParser):
|
||||
|
||||
def parse_querystring(self, req, name, field):
|
||||
if name == 'search':
|
||||
v = FlaskParser.parse_querystring(self, req, name, field)
|
||||
else:
|
||||
v = super().parse_querystring(req, name, field)
|
||||
return v
|
|
@ -27,7 +27,7 @@ class JoinedTableMixin:
|
|||
|
||||
class Agent(Thing):
|
||||
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid4)
|
||||
type = Column(Unicode, nullable=False)
|
||||
type = Column(Unicode, nullable=False, index=True)
|
||||
name = Column(CIText())
|
||||
name.comment = """
|
||||
The name of the organization or person.
|
||||
|
|
|
@ -276,6 +276,22 @@ class VideoconferenceDef(VideoDef):
|
|||
SCHEMA = schemas.Videoconference
|
||||
|
||||
|
||||
class CookingDef(DeviceDef):
|
||||
VIEW = None
|
||||
SCHEMA = schemas.Cooking
|
||||
|
||||
def __init__(self, app, import_name=__name__, static_folder=None, static_url_path=None,
|
||||
template_folder=None, url_prefix=None, subdomain=None, url_defaults=None,
|
||||
root_path=None, cli_commands: Iterable[Tuple[Callable, str or None]] = tuple()):
|
||||
super().__init__(app, import_name, static_folder, static_url_path, template_folder,
|
||||
url_prefix, subdomain, url_defaults, root_path, cli_commands)
|
||||
|
||||
|
||||
class Mixer(CookingDef):
|
||||
VIEW = None
|
||||
SCHEMA = schemas.Mixer
|
||||
|
||||
|
||||
class ManufacturerDef(Resource):
|
||||
VIEW = ManufacturerView
|
||||
SCHEMA = schemas.Manufacturer
|
||||
|
|
|
@ -8,6 +8,7 @@ from typing import Dict, List, Set
|
|||
from boltons import urlutils
|
||||
from citext import CIText
|
||||
from ereuse_utils.naming import Naming
|
||||
from more_itertools import unique_everseen
|
||||
from sqlalchemy import BigInteger, Boolean, Column, Enum as DBEnum, Float, ForeignKey, Integer, \
|
||||
Sequence, SmallInteger, Unicode, inspect, text
|
||||
from sqlalchemy.ext.declarative import declared_attr
|
||||
|
@ -15,15 +16,15 @@ from sqlalchemy.orm import ColumnProperty, backref, relationship, validates
|
|||
from sqlalchemy.util import OrderedSet
|
||||
from sqlalchemy_utils import ColorType
|
||||
from stdnum import imei, meid
|
||||
from teal.db import CASCADE, POLYMORPHIC_ID, POLYMORPHIC_ON, ResourceNotFound, URL, check_lower, \
|
||||
check_range
|
||||
from teal.db import CASCADE_DEL, POLYMORPHIC_ID, POLYMORPHIC_ON, ResourceNotFound, URL, \
|
||||
check_lower, check_range
|
||||
from teal.enums import Layouts
|
||||
from teal.marshmallow import ValidationError
|
||||
from teal.resource import url_for_resource
|
||||
|
||||
from ereuse_devicehub.db import db
|
||||
from ereuse_devicehub.resources.enums import ComputerChassis, DataStorageInterface, \
|
||||
DataStoragePrivacyCompliance, DisplayTech, PrinterTechnology, RamFormat, RamInterface
|
||||
from ereuse_devicehub.resources.enums import ComputerChassis, DataStorageInterface, DisplayTech, \
|
||||
PrinterTechnology, RamFormat, RamInterface, Severity
|
||||
from ereuse_devicehub.resources.models import STR_SM_SIZE, Thing
|
||||
|
||||
|
||||
|
@ -31,12 +32,13 @@ class Device(Thing):
|
|||
"""
|
||||
Base class for any type of physical object that can be identified.
|
||||
"""
|
||||
EVENT_SORT_KEY = attrgetter('created')
|
||||
|
||||
id = Column(BigInteger, Sequence('device_seq'), primary_key=True)
|
||||
id.comment = """
|
||||
The identifier of the device for this database.
|
||||
"""
|
||||
type = Column(Unicode(STR_SM_SIZE), nullable=False)
|
||||
type = Column(Unicode(STR_SM_SIZE), nullable=False, index=True)
|
||||
hid = Column(Unicode(), check_lower('hid'), unique=True)
|
||||
hid.comment = """
|
||||
The Hardware ID (HID) is the unique ID traceability systems
|
||||
|
@ -77,6 +79,11 @@ class Device(Thing):
|
|||
'color'
|
||||
}
|
||||
|
||||
def __init__(self, **kw) -> None:
|
||||
super().__init__(**kw)
|
||||
with suppress(TypeError):
|
||||
self.hid = Naming.hid(self.manufacturer, self.serial_number, self.model)
|
||||
|
||||
@property
|
||||
def events(self) -> list:
|
||||
"""
|
||||
|
@ -86,12 +93,25 @@ class Device(Thing):
|
|||
|
||||
Events are returned by ascending creation time.
|
||||
"""
|
||||
return sorted(chain(self.events_multiple, self.events_one), key=attrgetter('created'))
|
||||
return sorted(chain(self.events_multiple, self.events_one), key=self.EVENT_SORT_KEY)
|
||||
|
||||
def __init__(self, **kw) -> None:
|
||||
super().__init__(**kw)
|
||||
with suppress(TypeError):
|
||||
self.hid = Naming.hid(self.manufacturer, self.serial_number, self.model)
|
||||
@property
|
||||
def problems(self):
|
||||
"""Current events with severity.Warning or higher.
|
||||
|
||||
There can be up to 3 events: current Snapshot,
|
||||
current Physical event, current Trading event.
|
||||
"""
|
||||
from ereuse_devicehub.resources.device import states
|
||||
from ereuse_devicehub.resources.event.models import Snapshot
|
||||
events = set()
|
||||
with suppress(LookupError, ValueError):
|
||||
events.add(self.last_event_of(Snapshot))
|
||||
with suppress(LookupError, ValueError):
|
||||
events.add(self.last_event_of(*states.Physical.events()))
|
||||
with suppress(LookupError, ValueError):
|
||||
events.add(self.last_event_of(*states.Trading.events()))
|
||||
return self._warning_events(events)
|
||||
|
||||
@property
|
||||
def physical_properties(self) -> Dict[str, object or None]:
|
||||
|
@ -158,12 +178,30 @@ class Device(Thing):
|
|||
that has it physically. As an example, a transporter could
|
||||
be a physical possessor of a device although it does not
|
||||
own it legally.
|
||||
|
||||
Note that there can only be one physical possessor per device,
|
||||
and :class:`ereuse_devicehub.resources.event.models.Receive`
|
||||
changes it.
|
||||
"""
|
||||
from ereuse_devicehub.resources.event.models import Receive
|
||||
with suppress(LookupError):
|
||||
event = self.last_event_of(Receive)
|
||||
return event.agent
|
||||
|
||||
@property
|
||||
def working(self):
|
||||
"""A list of the current tests with warning or errors. A
|
||||
device is working if the list is empty.
|
||||
|
||||
This property returns, for the last test performed of each type,
|
||||
the one with the worst severity of them, or `None` if no
|
||||
test has been executed.
|
||||
"""
|
||||
from ereuse_devicehub.resources.event.models import Test
|
||||
current_tests = unique_everseen((e for e in reversed(self.events) if isinstance(e, Test)),
|
||||
key=attrgetter('type')) # last test of each type
|
||||
return self._warning_events(current_tests)
|
||||
|
||||
@declared_attr
|
||||
def __mapper_args__(cls):
|
||||
"""
|
||||
|
@ -188,6 +226,10 @@ class Device(Thing):
|
|||
except StopIteration:
|
||||
raise LookupError('{!r} does not contain events of types {}.'.format(self, types))
|
||||
|
||||
def _warning_events(self, events):
|
||||
return sorted((ev for ev in events if ev.severity >= Severity.Warning),
|
||||
key=self.EVENT_SORT_KEY)
|
||||
|
||||
def __lt__(self, other):
|
||||
return self.id < other.id
|
||||
|
||||
|
@ -255,7 +297,7 @@ class Computer(Device):
|
|||
|
||||
@property
|
||||
def events(self) -> list:
|
||||
return sorted(chain(super().events, self.events_parent), key=attrgetter('created'))
|
||||
return sorted(chain(super().events, self.events_parent), key=self.EVENT_SORT_KEY)
|
||||
|
||||
@property
|
||||
def ram_size(self) -> int:
|
||||
|
@ -294,6 +336,17 @@ class Computer(Device):
|
|||
speeds[net.wireless] = max(net.speed or 0, speeds[net.wireless] or 0)
|
||||
return speeds
|
||||
|
||||
@property
|
||||
def privacy(self):
|
||||
"""Returns the privacy of all DataStorage components when
|
||||
it is None.
|
||||
"""
|
||||
return set(
|
||||
privacy for privacy in
|
||||
(hdd.privacy for hdd in self.components if isinstance(hdd, DataStorage))
|
||||
if privacy
|
||||
)
|
||||
|
||||
def __format__(self, format_spec):
|
||||
if not format_spec:
|
||||
return super().__format__(format_spec)
|
||||
|
@ -375,11 +428,11 @@ class Cellphone(Mobile):
|
|||
class Component(Device):
|
||||
id = Column(BigInteger, ForeignKey(Device.id), primary_key=True)
|
||||
|
||||
parent_id = Column(BigInteger, ForeignKey(Computer.id))
|
||||
parent_id = Column(BigInteger, ForeignKey(Computer.id), index=True)
|
||||
parent = relationship(Computer,
|
||||
backref=backref('components',
|
||||
lazy=True,
|
||||
cascade=CASCADE,
|
||||
cascade=CASCADE_DEL,
|
||||
order_by=lambda: Component.id,
|
||||
collection_class=OrderedSet),
|
||||
primaryjoin=parent_id == Computer.id)
|
||||
|
@ -405,7 +458,7 @@ class Component(Device):
|
|||
|
||||
@property
|
||||
def events(self) -> list:
|
||||
return sorted(chain(super().events, self.events_components), key=attrgetter('created'))
|
||||
return sorted(chain(super().events, self.events_components), key=self.EVENT_SORT_KEY)
|
||||
|
||||
|
||||
class JoinedComponentTableMixin:
|
||||
|
@ -431,11 +484,12 @@ class DataStorage(JoinedComponentTableMixin, Component):
|
|||
@property
|
||||
def privacy(self):
|
||||
"""Returns the privacy compliance state of the data storage."""
|
||||
# todo add physical destruction event
|
||||
from ereuse_devicehub.resources.event.models import EraseBasic
|
||||
with suppress(LookupError):
|
||||
erase = self.last_event_of(EraseBasic)
|
||||
return DataStoragePrivacyCompliance.from_erase(erase)
|
||||
try:
|
||||
ev = self.last_event_of(EraseBasic)
|
||||
except LookupError:
|
||||
ev = None
|
||||
return ev
|
||||
|
||||
def __format__(self, format_spec):
|
||||
v = super().__format__(format_spec)
|
||||
|
@ -589,6 +643,14 @@ class Videoconference(Video):
|
|||
pass
|
||||
|
||||
|
||||
class Cooking(Device):
|
||||
pass
|
||||
|
||||
|
||||
class Mixer(Cooking):
|
||||
pass
|
||||
|
||||
|
||||
class Manufacturer(db.Model):
|
||||
__table_args__ = {'schema': 'common'}
|
||||
CSV_DELIMITER = csv.get_dialect('excel').delimiter
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
from datetime import datetime
|
||||
from typing import Dict, List, Set, Type, Union
|
||||
from operator import attrgetter
|
||||
from typing import Dict, Generator, Iterable, List, Optional, Set, Type
|
||||
|
||||
from boltons import urlutils
|
||||
from boltons.urlutils import URL
|
||||
|
@ -11,8 +12,8 @@ from teal.enums import Layouts
|
|||
|
||||
from ereuse_devicehub.resources.agent.models import Agent
|
||||
from ereuse_devicehub.resources.device import states
|
||||
from ereuse_devicehub.resources.enums import ComputerChassis, DataStorageInterface, \
|
||||
DataStoragePrivacyCompliance, DisplayTech, PrinterTechnology, RamFormat, RamInterface
|
||||
from ereuse_devicehub.resources.enums import ComputerChassis, DataStorageInterface, DisplayTech, \
|
||||
PrinterTechnology, RamFormat, RamInterface
|
||||
from ereuse_devicehub.resources.event import models as e
|
||||
from ereuse_devicehub.resources.image.models import ImageList
|
||||
from ereuse_devicehub.resources.lot.models import Lot
|
||||
|
@ -21,6 +22,8 @@ from ereuse_devicehub.resources.tag import Tag
|
|||
|
||||
|
||||
class Device(Thing):
|
||||
EVENT_SORT_KEY = attrgetter('created')
|
||||
|
||||
id = ... # type: Column
|
||||
type = ... # type: Column
|
||||
hid = ... # type: Column
|
||||
|
@ -48,7 +51,6 @@ class Device(Thing):
|
|||
self.height = ... # type: float
|
||||
self.depth = ... # type: float
|
||||
self.color = ... # type: Color
|
||||
self.events = ... # type: List[e.Event]
|
||||
self.physical_properties = ... # type: Dict[str, object or None]
|
||||
self.events_multiple = ... # type: Set[e.EventWithMultipleDevices]
|
||||
self.events_one = ... # type: Set[e.EventWithOneDevice]
|
||||
|
@ -57,33 +59,48 @@ class Device(Thing):
|
|||
self.lots = ... # type: Set[Lot]
|
||||
self.production_date = ... # type: datetime
|
||||
|
||||
@property
|
||||
def events(self) -> List[e.Event]:
|
||||
pass
|
||||
|
||||
@property
|
||||
def problems(self) -> List[e.Event]:
|
||||
pass
|
||||
|
||||
@property
|
||||
def url(self) -> urlutils.URL:
|
||||
pass
|
||||
|
||||
@property
|
||||
def rate(self) -> Union[e.AggregateRate, None]:
|
||||
def rate(self) -> Optional[e.AggregateRate]:
|
||||
pass
|
||||
|
||||
@property
|
||||
def price(self) -> Union[e.Price, None]:
|
||||
def price(self) -> Optional[e.Price]:
|
||||
pass
|
||||
|
||||
@property
|
||||
def trading(self) -> Union[states.Trading, None]:
|
||||
def trading(self) -> Optional[states.Trading]:
|
||||
pass
|
||||
|
||||
@property
|
||||
def physical(self) -> Union[states.Physical, None]:
|
||||
def physical(self) -> Optional[states.Physical]:
|
||||
pass
|
||||
|
||||
@property
|
||||
def physical_possessor(self) -> Union[Agent, None]:
|
||||
def physical_possessor(self) -> Optional[Agent]:
|
||||
pass
|
||||
|
||||
@property
|
||||
def working(self) -> List[e.Test]:
|
||||
pass
|
||||
|
||||
def last_event_of(self, *types: Type[e.Event]) -> e.Event:
|
||||
pass
|
||||
|
||||
def _warning_events(self, events: Iterable[e.Event]) -> Generator[e.Event]:
|
||||
pass
|
||||
|
||||
|
||||
class DisplayMixin:
|
||||
technology = ... # type: Column
|
||||
|
@ -139,6 +156,10 @@ class Computer(DisplayMixin, Device):
|
|||
def network_speeds(self) -> List[int]:
|
||||
pass
|
||||
|
||||
@property
|
||||
def privacy(self) -> Set[e.EraseBasic]:
|
||||
pass
|
||||
|
||||
|
||||
class Desktop(Computer):
|
||||
pass
|
||||
|
@ -219,7 +240,7 @@ class DataStorage(Component):
|
|||
self.interface = ... # type: DataStorageInterface
|
||||
|
||||
@property
|
||||
def privacy(self) -> DataStoragePrivacyCompliance:
|
||||
def privacy(self) -> Optional[e.EraseBasic]:
|
||||
pass
|
||||
|
||||
|
||||
|
@ -373,6 +394,14 @@ class Videoconference(Video):
|
|||
pass
|
||||
|
||||
|
||||
class Cooking(Device):
|
||||
pass
|
||||
|
||||
|
||||
class Mixer(Cooking):
|
||||
pass
|
||||
|
||||
|
||||
class Manufacturer(Model):
|
||||
CUSTOM_MANUFACTURERS = ... # type: set
|
||||
name = ... # type: Column
|
||||
|
|
|
@ -8,9 +8,8 @@ from teal.marshmallow import EnumField, SanitizedStr, URL, ValidationError
|
|||
from teal.resource import Schema
|
||||
|
||||
from ereuse_devicehub.marshmallow import NestedOn
|
||||
from ereuse_devicehub.resources import enums
|
||||
from ereuse_devicehub.resources.device import models as m, states
|
||||
from ereuse_devicehub.resources.enums import ComputerChassis, DataStorageInterface, \
|
||||
DataStoragePrivacyCompliance, DisplayTech, PrinterTechnology, RamFormat, RamInterface
|
||||
from ereuse_devicehub.resources.models import STR_BIG_SIZE, STR_SIZE
|
||||
from ereuse_devicehub.resources.schemas import Thing, UnitCodes
|
||||
|
||||
|
@ -31,6 +30,7 @@ class Device(Thing):
|
|||
depth = Float(validate=Range(0.1, 5), unit=UnitCodes.m, description=m.Device.depth.comment)
|
||||
events = NestedOn('Event', many=True, dump_only=True, description=m.Device.events.__doc__)
|
||||
events_one = NestedOn('Event', many=True, load_only=True, collection_class=OrderedSet)
|
||||
problems = NestedOn('Event', many=True, dump_only=True, description=m.Device.problems.__doc__)
|
||||
url = URL(dump_only=True, description=m.Device.url.__doc__)
|
||||
lots = NestedOn('Lot',
|
||||
many=True,
|
||||
|
@ -44,6 +44,10 @@ class Device(Thing):
|
|||
production_date = DateTime('iso',
|
||||
description=m.Device.updated.comment,
|
||||
data_key='productionDate')
|
||||
working = NestedOn('Event',
|
||||
many=True,
|
||||
dump_only=True,
|
||||
description=m.Device.working.__doc__)
|
||||
|
||||
@pre_load
|
||||
def from_events_to_events_one(self, data: dict):
|
||||
|
@ -72,12 +76,13 @@ class Device(Thing):
|
|||
|
||||
class Computer(Device):
|
||||
components = NestedOn('Component', many=True, dump_only=True, collection_class=OrderedSet)
|
||||
chassis = EnumField(ComputerChassis, required=True)
|
||||
chassis = EnumField(enums.ComputerChassis, required=True)
|
||||
ram_size = Integer(dump_only=True, data_key='ramSize')
|
||||
data_storage_size = Integer(dump_only=True, data_key='dataStorageSize')
|
||||
processor_model = Str(dump_only=True, data_key='processorModel')
|
||||
graphic_card_model = Str(dump_only=True, data_key='graphicCardModel')
|
||||
network_speeds = List(Integer(dump_only=True), dump_only=True, data_key='networkSpeeds')
|
||||
privacy = NestedOn('Event', many=True, dump_only=True, collection_class=set)
|
||||
|
||||
|
||||
class Desktop(Computer):
|
||||
|
@ -94,7 +99,7 @@ class Server(Computer):
|
|||
|
||||
class DisplayMixin:
|
||||
size = Float(description=m.DisplayMixin.size.comment, validate=Range(2, 150))
|
||||
technology = EnumField(DisplayTech,
|
||||
technology = EnumField(enums.DisplayTech,
|
||||
description=m.DisplayMixin.technology.comment)
|
||||
resolution_width = Integer(data_key='resolutionWidth',
|
||||
validate=Range(10, 20000),
|
||||
|
@ -168,8 +173,8 @@ class DataStorage(Component):
|
|||
size = Integer(validate=Range(0, 10 ** 8),
|
||||
unit=UnitCodes.mbyte,
|
||||
description=m.DataStorage.size.comment)
|
||||
interface = EnumField(DataStorageInterface)
|
||||
privacy = EnumField(DataStoragePrivacyCompliance, dump_only=True)
|
||||
interface = EnumField(enums.DataStorageInterface)
|
||||
privacy = NestedOn('Event', dump_only=True)
|
||||
|
||||
|
||||
class HardDrive(DataStorage):
|
||||
|
@ -203,8 +208,8 @@ class Processor(Component):
|
|||
class RamModule(Component):
|
||||
size = Integer(validate=Range(min=128, max=17000), unit=UnitCodes.mbyte)
|
||||
speed = Integer(validate=Range(min=100, max=10000), unit=UnitCodes.mhz)
|
||||
interface = EnumField(RamInterface)
|
||||
format = EnumField(RamFormat)
|
||||
interface = EnumField(enums.RamInterface)
|
||||
format = EnumField(enums.RamFormat)
|
||||
|
||||
|
||||
class SoundCard(Component):
|
||||
|
@ -264,7 +269,7 @@ class WirelessAccessPoint(Networking):
|
|||
class Printer(Device):
|
||||
wireless = Boolean(required=True, missing=False)
|
||||
scanning = Boolean(required=True, missing=False)
|
||||
technology = EnumField(PrinterTechnology, required=True)
|
||||
technology = EnumField(enums.PrinterTechnology, required=True)
|
||||
monochrome = Boolean(required=True, missing=True)
|
||||
|
||||
|
||||
|
@ -290,3 +295,11 @@ class VideoScaler(Video):
|
|||
|
||||
class Videoconference(Video):
|
||||
pass
|
||||
|
||||
|
||||
class Cooking(Device):
|
||||
pass
|
||||
|
||||
|
||||
class Mixer(Cooking):
|
||||
pass
|
||||
|
|
|
@ -73,9 +73,15 @@ class DeviceSearch(db.Model):
|
|||
it deletes unlogged tables as ours.
|
||||
"""
|
||||
if not DeviceSearch.query.first():
|
||||
for device in Device.query:
|
||||
if not isinstance(device, Component):
|
||||
cls.set_device_tokens(session, device)
|
||||
cls.regenerate_search_table(session)
|
||||
|
||||
@classmethod
|
||||
def regenerate_search_table(cls, session: db.Session):
|
||||
"""Deletes and re-computes all the search table."""
|
||||
DeviceSearch.query.delete()
|
||||
for device in Device.query:
|
||||
if not isinstance(device, Component):
|
||||
cls.set_device_tokens(session, device)
|
||||
|
||||
@classmethod
|
||||
def set_device_tokens(cls, session: db.Session, device: Device):
|
||||
|
@ -83,14 +89,29 @@ class DeviceSearch(db.Model):
|
|||
assert not isinstance(device, Component)
|
||||
|
||||
tokens = [
|
||||
(str(device.id), search.Weight.A),
|
||||
(inflection.humanize(device.type), search.Weight.B),
|
||||
(Device.model, search.Weight.B),
|
||||
(Device.manufacturer, search.Weight.C),
|
||||
(Device.serial_number, search.Weight.A)
|
||||
]
|
||||
|
||||
if device.manufacturer:
|
||||
# todo this has to be done using a dictionary
|
||||
manufacturer = device.manufacturer.lower()
|
||||
if 'asus' in manufacturer:
|
||||
tokens.append(('asus', search.Weight.B))
|
||||
if 'hewlett' in manufacturer or 'hp' in manufacturer or 'h.p' in manufacturer:
|
||||
tokens.append(('hp', search.Weight.B))
|
||||
tokens.append(('h.p', search.Weight.C))
|
||||
tokens.append(('hewlett', search.Weight.C))
|
||||
tokens.append(('packard', search.Weight.C))
|
||||
|
||||
if isinstance(device, Computer):
|
||||
# Aggregate the values of all the components of pc
|
||||
Comp = aliased(Component)
|
||||
tokens.extend((
|
||||
(db.func.string_agg(db.cast(Comp.id, db.TEXT), ' '), search.Weight.D),
|
||||
(db.func.string_agg(Comp.model, ' '), search.Weight.C),
|
||||
(db.func.string_agg(Comp.manufacturer, ' '), search.Weight.D),
|
||||
(db.func.string_agg(Comp.serial_number, ' '), search.Weight.B),
|
||||
|
|
|
@ -1,11 +1,7 @@
|
|||
import csv
|
||||
import datetime
|
||||
|
||||
from io import StringIO
|
||||
from collections import OrderedDict
|
||||
|
||||
import marshmallow
|
||||
from flask import current_app as app, render_template, request, make_response
|
||||
from flask import current_app as app, render_template, request
|
||||
from flask.json import jsonify
|
||||
from flask_sqlalchemy import Pagination
|
||||
from marshmallow import fields, fields as f, validate as v
|
||||
|
@ -16,13 +12,12 @@ from teal.resource import View
|
|||
|
||||
from ereuse_devicehub import auth
|
||||
from ereuse_devicehub.db import db
|
||||
from ereuse_devicehub.query import SearchQueryParser
|
||||
from ereuse_devicehub.resources import search
|
||||
# from ereuse_devicehub.resources.device.definitions import ComponentDef
|
||||
from ereuse_devicehub.resources.device.models import Component, Computer, Device, Manufacturer, \
|
||||
RamModule, Processor, DataStorage, GraphicCard, Motherboard, Display, NetworkAdapter, SoundCard
|
||||
from ereuse_devicehub.resources.device.models import Component, Computer, Device, Manufacturer
|
||||
from ereuse_devicehub.resources.device.search import DeviceSearch
|
||||
from ereuse_devicehub.resources.event.models import Rate, Event
|
||||
from ereuse_devicehub.resources.lot.models import Lot, LotDevice
|
||||
from ereuse_devicehub.resources.event.models import Rate
|
||||
from ereuse_devicehub.resources.lot.models import LotDeviceDescendants
|
||||
from ereuse_devicehub.resources.tag.model import Tag
|
||||
|
||||
|
||||
|
@ -48,24 +43,20 @@ class TagQ(query.Query):
|
|||
|
||||
|
||||
class LotQ(query.Query):
|
||||
id = query.Or(query.QueryField(Lot.descendantsq, fields.UUID()))
|
||||
id = query.Or(query.Equal(LotDeviceDescendants.ancestor_lot_id, fields.UUID()))
|
||||
|
||||
|
||||
class Filters(query.Query):
|
||||
_parent = aliased(Computer)
|
||||
_device_inside_lot = (Device.id == LotDevice.device_id) & (Lot.id == LotDevice.lot_id)
|
||||
_component_inside_lot_through_parent = (Device.id == Component.id) \
|
||||
& (Component.parent_id == _parent.id) \
|
||||
& (_parent.id == LotDevice.device_id) \
|
||||
& (Lot.id == LotDevice.lot_id)
|
||||
|
||||
type = query.Or(OfType(Device.type))
|
||||
model = query.ILike(Device.model)
|
||||
manufacturer = query.ILike(Device.manufacturer)
|
||||
serialNumber = query.ILike(Device.serial_number)
|
||||
rating = query.Join(Device.id == Rate.device_id, RateQ)
|
||||
tag = query.Join(Device.id == Tag.device_id, TagQ)
|
||||
lot = query.Join(_device_inside_lot | _component_inside_lot_through_parent, LotQ)
|
||||
# todo This part of the query is really slow
|
||||
# And forces usage of distinct, as it returns many rows
|
||||
# due to having multiple paths to the same
|
||||
lot = query.Join(Device.id == LotDeviceDescendants.device_id, LotQ)
|
||||
|
||||
|
||||
class Sorting(query.Sort):
|
||||
|
@ -74,6 +65,8 @@ class Sorting(query.Sort):
|
|||
|
||||
|
||||
class DeviceView(View):
|
||||
QUERY_PARSER = SearchQueryParser()
|
||||
|
||||
class FindArgs(marshmallow.Schema):
|
||||
search = f.Str()
|
||||
filter = f.Nested(Filters, missing=[])
|
||||
|
@ -94,7 +87,15 @@ class DeviceView(View):
|
|||
200:
|
||||
description: The device or devices.
|
||||
"""
|
||||
return super().get(id)
|
||||
# Majority of code is from teal
|
||||
if id:
|
||||
response = self.one(id)
|
||||
else:
|
||||
args = self.QUERY_PARSER.parse(self.find_args,
|
||||
request,
|
||||
locations=('querystring',))
|
||||
response = self.find(args)
|
||||
return response
|
||||
|
||||
def one(self, id: int):
|
||||
"""Gets one device."""
|
||||
|
@ -116,7 +117,7 @@ class DeviceView(View):
|
|||
def find(self, args: dict):
|
||||
"""Gets many devices."""
|
||||
search_p = args.get('search', None)
|
||||
query = Device.query
|
||||
query = Device.query.distinct() # todo we should not force to do this if the query is ok
|
||||
if search_p:
|
||||
properties = DeviceSearch.properties
|
||||
tags = DeviceSearch.tags
|
||||
|
|
|
@ -244,7 +244,7 @@ class ComputerChassis(Enum):
|
|||
Tablet = 'Tablet'
|
||||
Virtual = 'Non-physical device'
|
||||
|
||||
def __format__(self, format_spec):
|
||||
def __str__(self):
|
||||
return inflection.humanize(inflection.underscore(self.value))
|
||||
|
||||
|
||||
|
@ -260,24 +260,6 @@ class ReceiverRole(Enum):
|
|||
Transporter = 'An user that ships the devices to another one.'
|
||||
|
||||
|
||||
class DataStoragePrivacyCompliance(Enum):
|
||||
EraseBasic = 'EraseBasic'
|
||||
EraseBasicError = 'EraseBasicError'
|
||||
EraseSectors = 'EraseSectors'
|
||||
EraseSectorsError = 'EraseSectorsError'
|
||||
Destruction = 'Destruction'
|
||||
DestructionError = 'DestructionError'
|
||||
|
||||
@classmethod
|
||||
def from_erase(cls, erasure) -> 'DataStoragePrivacyCompliance':
|
||||
"""Returns the correct enum depending of the passed-in erasure."""
|
||||
from ereuse_devicehub.resources.event.models import EraseSectors
|
||||
if isinstance(erasure, EraseSectors):
|
||||
return cls.EraseSectors if not erasure.error else cls.EraseSectorsError
|
||||
else:
|
||||
return cls.EraseBasic if not erasure.error else cls.EraseBasicError
|
||||
|
||||
|
||||
class PrinterTechnology(Enum):
|
||||
"""Technology of the printer."""
|
||||
Toner = 'Toner / Laser'
|
||||
|
@ -285,3 +267,38 @@ class PrinterTechnology(Enum):
|
|||
SolidInk = 'Solid ink'
|
||||
Dye = 'Dye-sublimation'
|
||||
Thermal = 'Thermal'
|
||||
|
||||
|
||||
class Severity(IntEnum):
|
||||
"""A flag evaluating the event execution. Ex. failed events
|
||||
have the value `Severity.Error`.
|
||||
|
||||
Devicehub uses 4 severity levels:
|
||||
|
||||
- Info: default neutral severity. The event succeeded.
|
||||
- Notice: The event succeeded but it is raising awareness.
|
||||
Notices are not usually that important but something
|
||||
(good or bad) worth checking.
|
||||
- Warning: The event succeeded but there is something important
|
||||
to check negatively affecting the event.
|
||||
- Error: the event failed.
|
||||
|
||||
Devicehub specially raises user awareness when an event
|
||||
has a Severity of ``Warning`` or greater.
|
||||
"""
|
||||
|
||||
Info = 0
|
||||
Notice = 1
|
||||
Warning = 2
|
||||
Error = 3
|
||||
|
||||
def __str__(self):
|
||||
if self == self.Info:
|
||||
m = '✓'
|
||||
elif self == self.Notice:
|
||||
m = 'ℹ️'
|
||||
elif self == self.Warning:
|
||||
m = '⚠'
|
||||
else:
|
||||
m = '❌'
|
||||
return m
|
||||
|
|
|
@ -6,18 +6,19 @@ from typing import Set, Union
|
|||
from uuid import uuid4
|
||||
|
||||
import inflection
|
||||
import teal.db
|
||||
from boltons import urlutils
|
||||
from citext import CIText
|
||||
from flask import current_app as app, g
|
||||
from sqlalchemy import BigInteger, Boolean, CheckConstraint, Column, DateTime, Enum as DBEnum, \
|
||||
Float, ForeignKey, Interval, JSON, Numeric, SmallInteger, Unicode, event, orm
|
||||
Float, ForeignKey, Integer, Interval, JSON, Numeric, SmallInteger, Unicode, event, orm
|
||||
from sqlalchemy.dialects.postgresql import UUID
|
||||
from sqlalchemy.ext.declarative import declared_attr
|
||||
from sqlalchemy.ext.orderinglist import ordering_list
|
||||
from sqlalchemy.orm import backref, relationship, validates
|
||||
from sqlalchemy.orm.events import AttributeEvents as Events
|
||||
from sqlalchemy.util import OrderedSet
|
||||
from teal.db import ArrayOfEnum, CASCADE, CASCADE_OWN, INHERIT_COND, IP, POLYMORPHIC_ID, \
|
||||
from teal.db import ArrayOfEnum, CASCADE_OWN, INHERIT_COND, IP, POLYMORPHIC_ID, \
|
||||
POLYMORPHIC_ON, StrictVersionType, URL, check_lower, check_range
|
||||
from teal.enums import Country, Currency, Subdivision
|
||||
from teal.marshmallow import ValidationError
|
||||
|
@ -27,9 +28,9 @@ from ereuse_devicehub.db import db
|
|||
from ereuse_devicehub.resources.agent.models import Agent
|
||||
from ereuse_devicehub.resources.device.models import Component, Computer, DataStorage, Desktop, \
|
||||
Device, Laptop, Server
|
||||
from ereuse_devicehub.resources.enums import AppearanceRange, Bios, \
|
||||
FunctionalityRange, PriceSoftware, RATE_NEGATIVE, RATE_POSITIVE, RatingRange, RatingSoftware, \
|
||||
ReceiverRole, SnapshotExpectedEvents, SnapshotSoftware, TestDataStorageLength
|
||||
from ereuse_devicehub.resources.enums import AppearanceRange, Bios, FunctionalityRange, \
|
||||
PriceSoftware, RATE_NEGATIVE, RATE_POSITIVE, RatingRange, RatingSoftware, ReceiverRole, \
|
||||
Severity, SnapshotExpectedEvents, SnapshotSoftware, TestDataStorageLength
|
||||
from ereuse_devicehub.resources.models import STR_SM_SIZE, Thing
|
||||
from ereuse_devicehub.resources.user.models import User
|
||||
|
||||
|
@ -43,27 +44,19 @@ class JoinedTableMixin:
|
|||
|
||||
class Event(Thing):
|
||||
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid4)
|
||||
type = Column(Unicode, nullable=False)
|
||||
type = Column(Unicode, nullable=False, index=True)
|
||||
name = Column(CIText(), default='', nullable=False)
|
||||
name.comment = """
|
||||
A name or title for the event. Used when searching for events.
|
||||
"""
|
||||
incidence = Column(Boolean, default=False, nullable=False)
|
||||
incidence.comment = """
|
||||
Should this event be reviewed due some anomaly?
|
||||
"""
|
||||
severity = Column(teal.db.IntEnum(Severity), default=Severity.Info, nullable=False)
|
||||
severity.comment = Severity.__doc__
|
||||
closed = Column(Boolean, default=True, nullable=False)
|
||||
closed.comment = """
|
||||
Whether the author has finished the event.
|
||||
After this is set to True, no modifications are allowed.
|
||||
By default events are closed when performed.
|
||||
"""
|
||||
error = Column(Boolean, default=False, nullable=False)
|
||||
error.comment = """
|
||||
Did the event fail?
|
||||
For example, a failure in ``Erase`` means that the data storage
|
||||
unit did not erase correctly.
|
||||
"""
|
||||
description = Column(Unicode, default='', nullable=False)
|
||||
description.comment = """
|
||||
A comment about the event.
|
||||
|
@ -148,7 +141,7 @@ class Event(Thing):
|
|||
For Add and Remove though, this has another meaning: the components
|
||||
that are added or removed.
|
||||
"""
|
||||
parent_id = Column(BigInteger, ForeignKey(Computer.id))
|
||||
parent_id = Column(BigInteger, ForeignKey(Computer.id), index=True)
|
||||
parent = relationship(Computer,
|
||||
backref=backref('events_parent',
|
||||
lazy=True,
|
||||
|
@ -181,6 +174,7 @@ class Event(Thing):
|
|||
args = {POLYMORPHIC_ID: cls.t}
|
||||
if cls.t == 'Event':
|
||||
args[POLYMORPHIC_ON] = cls.type
|
||||
# noinspection PyUnresolvedReferences
|
||||
if JoinedTableMixin in cls.mro():
|
||||
args[INHERIT_COND] = cls.id == Event.id
|
||||
return args
|
||||
|
@ -197,16 +191,15 @@ class Event(Thing):
|
|||
raise ValidationError('The event cannot start after it finished.')
|
||||
return start_time
|
||||
|
||||
@property
|
||||
def _err_str(self):
|
||||
return '❌ Error.' if self.error else '✓'
|
||||
|
||||
@property
|
||||
def _date_str(self):
|
||||
return '{:%c}'.format(self.end_time or self.created)
|
||||
|
||||
def __str__(self) -> str:
|
||||
return '{}'.format(self._err_str)
|
||||
return '{}'.format(self.severity)
|
||||
|
||||
def __repr__(self):
|
||||
return '<{0.t} {0.id} {0.severity}>'.format(self)
|
||||
|
||||
|
||||
class EventComponent(db.Model):
|
||||
|
@ -222,17 +215,17 @@ class JoinedWithOneDeviceMixin:
|
|||
|
||||
|
||||
class EventWithOneDevice(JoinedTableMixin, Event):
|
||||
device_id = Column(BigInteger, ForeignKey(Device.id), nullable=False)
|
||||
device_id = Column(BigInteger, ForeignKey(Device.id), nullable=False, index=True)
|
||||
device = relationship(Device,
|
||||
backref=backref('events_one',
|
||||
lazy=True,
|
||||
cascade=CASCADE,
|
||||
cascade=CASCADE_OWN,
|
||||
order_by=lambda: EventWithOneDevice.created,
|
||||
collection_class=OrderedSet),
|
||||
primaryjoin=Device.id == device_id)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return '<{0.t} {0.id!r} device={0.device!r}>'.format(self)
|
||||
return '<{0.t} {0.id} {0.severity} device={0.device!r}>'.format(self)
|
||||
|
||||
@declared_attr
|
||||
def __mapper_args__(cls):
|
||||
|
@ -260,7 +253,7 @@ class EventWithMultipleDevices(Event):
|
|||
collection_class=OrderedSet)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return '<{0.t} {0.id!r} devices={0.devices!r}>'.format(self)
|
||||
return '<{0.t} {0.id} {0.severity} devices={0.devices!r}>'.format(self)
|
||||
|
||||
|
||||
class EventDevice(db.Model):
|
||||
|
@ -270,11 +263,19 @@ class EventDevice(db.Model):
|
|||
|
||||
|
||||
class Add(EventWithOneDevice):
|
||||
pass
|
||||
"""The act of adding components to a device.
|
||||
|
||||
It is usually used internally from a :class:`.Snapshot`, for
|
||||
example, when adding a secondary data storage to a computer.
|
||||
"""
|
||||
|
||||
|
||||
class Remove(EventWithOneDevice):
|
||||
pass
|
||||
"""The act of removing components from a device.
|
||||
|
||||
It is usually used internally from a :class:`.Snapshot`, for
|
||||
example, when removing a component from a broken computer.
|
||||
"""
|
||||
|
||||
|
||||
class Allocate(JoinedTableMixin, EventWithMultipleDevices):
|
||||
|
@ -290,6 +291,30 @@ class Deallocate(JoinedTableMixin, EventWithMultipleDevices):
|
|||
|
||||
|
||||
class EraseBasic(JoinedWithOneDeviceMixin, EventWithOneDevice):
|
||||
"""An erasure attempt to a ``DataStorage``. The event contains
|
||||
information about success and nature of the erasure.
|
||||
|
||||
EraseBasic is a software-based fast non-100%-secured way of
|
||||
erasing data storage, performed
|
||||
by Workbench Computer when executing the open-source
|
||||
`shred <https://en.wikipedia.org/wiki/Shred_(Unix)>`_.
|
||||
|
||||
Users can generate erasure certificates from successful erasures.
|
||||
|
||||
Erasures are an accumulation of **erasure steps**, that are performed
|
||||
as separate actions, called ``StepRandom``, for an erasure step
|
||||
that has overwritten data with random bits, and ``StepZero``,
|
||||
for an erasure step that has overwritten data with zeros.
|
||||
|
||||
For example, if steps are set in the following order and the user
|
||||
used `EraseSectors`, the event represents a
|
||||
`British HMG Infosec Standard 5 (HMG IS5) <https://en.wikipedia.org/
|
||||
wiki/Infosec_Standard_5>`_:
|
||||
|
||||
1. A first step writing zeroes to the hard-drives.
|
||||
2. A second step erasing with random data, verifying the erasure
|
||||
success in each hard-drive sector.
|
||||
"""
|
||||
zeros = Column(Boolean, nullable=False)
|
||||
zeros.comment = """
|
||||
Whether this erasure had a first erasure step consisting of
|
||||
|
@ -299,10 +324,19 @@ class EraseBasic(JoinedWithOneDeviceMixin, EventWithOneDevice):
|
|||
# todo return erasure properties like num steps, if it is british...
|
||||
|
||||
def __str__(self) -> str:
|
||||
return '{} on {}.'.format(self._err_str, self.end_time)
|
||||
return '{} on {}.'.format(self.severity, self.end_time)
|
||||
|
||||
|
||||
class EraseSectors(EraseBasic):
|
||||
"""A secured-way of erasing data storages, checking sector-by-sector
|
||||
the erasure, using `badblocks <https://en.wikipedia.org/wiki/Badblocks>`_.
|
||||
"""
|
||||
# todo make a property that says if the data wiping process is british...
|
||||
|
||||
|
||||
class ErasePhysical(EraseBasic):
|
||||
"""The act of physically destroying a data storage unit."""
|
||||
# todo add attributes
|
||||
pass
|
||||
|
||||
|
||||
|
@ -310,7 +344,7 @@ class Step(db.Model):
|
|||
erasure_id = Column(UUID(as_uuid=True), ForeignKey(EraseBasic.id), primary_key=True)
|
||||
type = Column(Unicode(STR_SM_SIZE), nullable=False)
|
||||
num = Column(SmallInteger, primary_key=True)
|
||||
error = Column(Boolean, default=False, nullable=False)
|
||||
severity = Column(teal.db.IntEnum(Severity), default=Severity.Info, nullable=False)
|
||||
start_time = Column(DateTime, nullable=False)
|
||||
start_time.comment = Event.start_time.comment
|
||||
end_time = Column(DateTime, CheckConstraint('end_time > start_time'), nullable=False)
|
||||
|
@ -347,6 +381,92 @@ class StepRandom(Step):
|
|||
|
||||
|
||||
class Snapshot(JoinedWithOneDeviceMixin, EventWithOneDevice):
|
||||
"""The Snapshot sets the physical information of the device (S/N, model...)
|
||||
and updates it with erasures, benchmarks, ratings, and tests; updates the
|
||||
composition of its components (adding / removing them), and links tags
|
||||
to the device.
|
||||
|
||||
When receiving a Snapshot, the DeviceHub creates, adds and removes
|
||||
components to match the Snapshot. For example, if a Snapshot of a computer
|
||||
contains a new component, the system searches for the component in its
|
||||
database and, if not found, its creates it; finally linking it to the
|
||||
computer.
|
||||
|
||||
A Snapshot is used with Remove to represent changes in components for
|
||||
a device:
|
||||
|
||||
1. ``Snapshot`` creates a device if it does not exist, and the same
|
||||
for its components. This is all done in one ``Snapshot``.
|
||||
2. If the device exists, it updates its component composition by
|
||||
*adding* and *removing* them. If,
|
||||
for example, this new Snasphot doesn't have a component, it means that
|
||||
this component is not present anymore in the device, thus removing it
|
||||
from it. Then we have that:
|
||||
|
||||
- Components that are added to the device: snapshot2.components -
|
||||
snapshot1.components
|
||||
- Components that are removed to the device: snapshot1.components -
|
||||
snapshot2.components
|
||||
|
||||
When adding a component, there may be the case this component existed
|
||||
before and it was inside another device. In such case, DeviceHub will
|
||||
perform ``Remove`` on the old parent.
|
||||
|
||||
**Snapshots from Workbench**
|
||||
|
||||
When processing a device from the Workbench, this one performs a Snapshot
|
||||
and then performs more events (like testings, benchmarking...).
|
||||
|
||||
There are two ways of sending this information. In an async way,
|
||||
this is, submitting events as soon as Workbench performs then, or
|
||||
submitting only one Snapshot event with all the other events embedded.
|
||||
|
||||
**Asynced**
|
||||
|
||||
The use case, which is represented in the ``test_workbench_phases``,
|
||||
is as follows:
|
||||
|
||||
1. In **T1**, WorkbenchServer (as the middleware from Workbench and
|
||||
Devicehub) submits:
|
||||
|
||||
- A ``Snapshot`` event with the required information to **synchronize**
|
||||
and **rate** the device. This is:
|
||||
|
||||
- Identification information about the device and components
|
||||
(S/N, model, physical characteristics...)
|
||||
- ``Tags`` in a ``tags`` property in the ``device``.
|
||||
- ``Rate`` in an ``events`` property in the ``device``.
|
||||
- ``Benchmarks`` in an ``events`` property in each ``component``
|
||||
or ``device``.
|
||||
- ``TestDataStorage`` as in ``Benchmarks``.
|
||||
- An ordered set of **expected events**, defining which are the next
|
||||
events that Workbench will perform to the device in ideal
|
||||
conditions (device doesn't fail, no Internet drop...).
|
||||
|
||||
Devicehub **syncs** the device with the database and perform the
|
||||
``Benchmark``, the ``TestDataStorage``, and finally the ``Rate``.
|
||||
This leaves the Snapshot **open** to wait for the next events
|
||||
to come.
|
||||
2. Assuming that we expect all events, in **T2**, WorkbenchServer
|
||||
submits a ``StressTest`` with a ``snapshot`` field containing the
|
||||
ID of the Snapshot in 1, and Devicehub links the event with such
|
||||
``Snapshot``.
|
||||
3. In **T3**, WorkbenchServer submits the ``Erase`` with the ``Snapshot``
|
||||
and ``component`` IDs from 1, linking it to them. It repeats
|
||||
this for all the erased data storage devices; **T3+Tn** being
|
||||
*n* the erased data storage devices.
|
||||
4. WorkbenchServer does like in 3. but for the event ``Install``,
|
||||
finishing in **T3+Tn+Tx**, being *x* the number of data storage
|
||||
devices with an OS installed into.
|
||||
5. In **T3+Tn+Tx**, when all *expected events* have been performed,
|
||||
Devicehub **closes** the ``Snapshot`` from 1.
|
||||
|
||||
**Synced**
|
||||
|
||||
Optionally, Devicehub understands receiving a ``Snapshot`` with all
|
||||
the events in an ``events`` property inside each affected ``component``
|
||||
or ``device``.
|
||||
"""
|
||||
uuid = Column(UUID(as_uuid=True), unique=True)
|
||||
version = Column(StrictVersionType(STR_SM_SIZE), nullable=False)
|
||||
software = Column(DBEnum(SnapshotSoftware), nullable=False)
|
||||
|
@ -358,10 +478,13 @@ class Snapshot(JoinedWithOneDeviceMixin, EventWithOneDevice):
|
|||
expected_events = Column(ArrayOfEnum(DBEnum(SnapshotExpectedEvents)))
|
||||
|
||||
def __str__(self) -> str:
|
||||
return '{}. {} version {}.'.format(self._err_str, self.software, self.version)
|
||||
return '{}. {} version {}.'.format(self.severity, self.software, self.version)
|
||||
|
||||
|
||||
class Install(JoinedWithOneDeviceMixin, EventWithOneDevice):
|
||||
"""The action of installing an Operative System to a data
|
||||
storage unit.
|
||||
"""
|
||||
elapsed = Column(Interval, nullable=False)
|
||||
|
||||
|
||||
|
@ -376,6 +499,48 @@ class SnapshotRequest(db.Model):
|
|||
|
||||
|
||||
class Rate(JoinedWithOneDeviceMixin, EventWithOneDevice):
|
||||
"""Devicehub generates an rating for a device taking into consideration the
|
||||
visual, functional, and performance.
|
||||
|
||||
A Workflow is as follows:
|
||||
|
||||
1. An agent generates feedback from the device in the form of benchmark,
|
||||
visual, and functional information; which is filled in a ``Rate``
|
||||
event. This is done through a **software**, defining the type
|
||||
of ``Rate`` event. At the moment we have ``WorkbenchRate``.
|
||||
2. Devicehub gathers this information and computes a score that updates
|
||||
the ``Rate`` event.
|
||||
3. Devicehub aggregates different rates and computes a final score for
|
||||
the device by performing a new ``AggregateRating`` event.
|
||||
|
||||
There are two base **types** of ``Rate``: ``WorkbenchRate``,
|
||||
``ManualRate``. ``WorkbenchRate`` can have different
|
||||
**software** algorithms, and each software algorithm can have several
|
||||
**versions**. So, we have 3 dimensions for ``WorkbenchRate``:
|
||||
type, software, version.
|
||||
|
||||
Devicehub generates a rate event for each software and version. So,
|
||||
if an agent fulfills a ``WorkbenchRate`` and there are 2 software
|
||||
algorithms and each has two versions, Devicehub will generate 4 rates.
|
||||
Devicehub understands that only one software and version are the
|
||||
**oficial** (set in the settings of each inventory),
|
||||
and it will generate an ``AggregateRating`` for only the official
|
||||
versions. At the same time, ``Price`` only computes the price of
|
||||
the **oficial** version.
|
||||
|
||||
The technical Workflow in Devicehub is as follows:
|
||||
|
||||
1. In **T1**, the user performs a ``Snapshot`` by processing the device
|
||||
through the Workbench. From the benchmarks and the visual and
|
||||
functional ratings the user does in the device, the system generates
|
||||
many ``WorkbenchRate`` (as many as software and versions defined).
|
||||
With only this information, the system generates an ``AggregateRating``,
|
||||
which is the event that the user will see in the web.
|
||||
2. In **T2**, the agent can optionally visually re-rate the device
|
||||
using the mobile app, generating an ``AppRate``. This new
|
||||
action generates a new ``AggregateRating`` with the ``AppRate``
|
||||
plus the ``WorkbenchRate`` from 1.
|
||||
"""
|
||||
rating = Column(Float(decimal_return_scale=2), check_range('rating', *RATE_POSITIVE))
|
||||
rating.comment = """The rating for the content."""
|
||||
software = Column(DBEnum(RatingSoftware))
|
||||
|
@ -573,6 +738,16 @@ class AggregateRate(Rate):
|
|||
|
||||
|
||||
class Price(JoinedWithOneDeviceMixin, EventWithOneDevice):
|
||||
"""Price states a selling price for the device, but not
|
||||
necessarily the final price this is sold (which is set in the Sell
|
||||
event).
|
||||
|
||||
Devicehub automatically computes a price from ``AggregateRating``
|
||||
events. As in a **Rate**, price can have **software** and **version**,
|
||||
and there is an **official** price that is used to automatically
|
||||
compute the price from an ``AggregateRating``. Only the official price
|
||||
is computed from an ``AggregateRating``.
|
||||
"""
|
||||
SCALE = 4
|
||||
ROUND = ROUND_HALF_EVEN
|
||||
currency = Column(DBEnum(Currency), nullable=False)
|
||||
|
@ -714,6 +889,12 @@ class EreusePrice(Price):
|
|||
|
||||
|
||||
class Test(JoinedWithOneDeviceMixin, EventWithOneDevice):
|
||||
"""The act of testing the physical condition of a device and its
|
||||
components.
|
||||
|
||||
Testing errors and warnings are easily taken in
|
||||
:attr:`ereuse_devicehub.resources.device.models.Device.working`.
|
||||
"""
|
||||
elapsed = Column(Interval, nullable=False)
|
||||
|
||||
@declared_attr
|
||||
|
@ -732,6 +913,17 @@ class Test(JoinedWithOneDeviceMixin, EventWithOneDevice):
|
|||
|
||||
|
||||
class TestDataStorage(Test):
|
||||
"""
|
||||
The act of testing the data storage.
|
||||
|
||||
Testing is done using the `S.M.A.R.T self test
|
||||
<https://en.wikipedia.org/wiki/S.M.A.R.T.#Self-tests>`_. Note
|
||||
that not all data storage units, specially some new PCIe ones, do not
|
||||
support SMART testing.
|
||||
|
||||
The test takes to other SMART values indicators of the overall health
|
||||
of the data storage.
|
||||
"""
|
||||
id = Column(UUID(as_uuid=True), ForeignKey(Test.id), primary_key=True)
|
||||
length = Column(DBEnum(TestDataStorageLength), nullable=False) # todo from type
|
||||
status = Column(Unicode(), check_lower('status'), nullable=False)
|
||||
|
@ -740,27 +932,25 @@ class TestDataStorage(Test):
|
|||
reallocated_sector_count = Column(SmallInteger)
|
||||
power_cycle_count = Column(SmallInteger)
|
||||
reported_uncorrectable_errors = Column(SmallInteger)
|
||||
command_timeout = Column(SmallInteger)
|
||||
command_timeout = Column(Integer)
|
||||
current_pending_sector_count = Column(SmallInteger)
|
||||
offline_uncorrectable = Column(SmallInteger)
|
||||
remaining_lifetime_percentage = Column(SmallInteger)
|
||||
|
||||
def __init__(self, **kwargs) -> None:
|
||||
super().__init__(**kwargs)
|
||||
|
||||
# Define severity
|
||||
# As of https://www.backblaze.com/blog/hard-drive-smart-stats/ and
|
||||
# https://www.backblaze.com/blog-smart-stats-2014-8.html
|
||||
# We can guess some future disk failures by analyzing some
|
||||
# SMART data
|
||||
if (self.reallocated_sector_count or 0) > 10:
|
||||
self.incidence = True
|
||||
self.description = 'Warning: Chance of disk failure within a year.'
|
||||
if (self.current_pending_sector_count or 0) > 40 \
|
||||
and (self.reported_uncorrectable_errors or 0) > 10:
|
||||
self.incidence = True
|
||||
self.description = 'Warning: Chance of disk failure within a year.'
|
||||
if not self.assessment:
|
||||
self.incidence = True
|
||||
self.description = 'Warning: Drive failure expected soon.'
|
||||
# We can guess some future disk failures by analyzing some SMART data.
|
||||
if self.severity is None:
|
||||
# Test finished successfully
|
||||
if not self.assessment:
|
||||
self.severity = Severity.Error
|
||||
elif self.current_pending_sector_count and self.current_pending_sector_count > 40 \
|
||||
or self.reallocated_sector_count and self.reallocated_sector_count > 10:
|
||||
self.severity = Severity.Warning
|
||||
|
||||
def __str__(self) -> str:
|
||||
t = inflection.humanize(self.status)
|
||||
|
@ -771,6 +961,10 @@ class TestDataStorage(Test):
|
|||
|
||||
|
||||
class StressTest(Test):
|
||||
"""The act of stressing (putting to the maximum capacity)
|
||||
a device for an amount of minutes. If the device is not in great
|
||||
condition won't probably survive such test.
|
||||
"""
|
||||
|
||||
@validates('elapsed')
|
||||
def is_minute_and_bigger_than_1_minute(self, _, value: timedelta):
|
||||
|
@ -780,10 +974,11 @@ class StressTest(Test):
|
|||
return value
|
||||
|
||||
def __str__(self) -> str:
|
||||
return '{}. Computing for {}'.format(self._err_str, self.elapsed)
|
||||
return '{}. Computing for {}'.format(self.severity, self.elapsed)
|
||||
|
||||
|
||||
class Benchmark(JoinedWithOneDeviceMixin, EventWithOneDevice):
|
||||
"""The act of gauging the performance of a device."""
|
||||
elapsed = Column(Interval)
|
||||
|
||||
@declared_attr
|
||||
|
@ -802,6 +997,7 @@ class Benchmark(JoinedWithOneDeviceMixin, EventWithOneDevice):
|
|||
|
||||
|
||||
class BenchmarkDataStorage(Benchmark):
|
||||
"""Benchmarks the data storage unit reading and writing speeds."""
|
||||
id = Column(UUID(as_uuid=True), ForeignKey(Benchmark.id), primary_key=True)
|
||||
read_speed = Column(Float(decimal_return_scale=2), nullable=False)
|
||||
write_speed = Column(Float(decimal_return_scale=2), nullable=False)
|
||||
|
@ -811,19 +1007,27 @@ class BenchmarkDataStorage(Benchmark):
|
|||
|
||||
|
||||
class BenchmarkWithRate(Benchmark):
|
||||
"""The act of benchmarking a device with a single rate."""
|
||||
id = Column(UUID(as_uuid=True), ForeignKey(Benchmark.id), primary_key=True)
|
||||
rate = Column(SmallInteger, nullable=False)
|
||||
rate = Column(Float, nullable=False)
|
||||
|
||||
def __str__(self) -> str:
|
||||
return '{} points'.format(self.rate)
|
||||
|
||||
|
||||
class BenchmarkProcessor(BenchmarkWithRate):
|
||||
"""Benchmarks a processor by executing `BogoMips
|
||||
<https://en.wikipedia.org/wiki/BogoMips>`_. Note that this is not
|
||||
a reliable way of rating processors and we keep it for compatibility
|
||||
purposes.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class BenchmarkProcessorSysbench(BenchmarkProcessor):
|
||||
pass
|
||||
"""Benchmarks a processor by using the processor benchmarking
|
||||
utility of `sysbench <https://github.com/akopytov/sysbench>`_.
|
||||
"""
|
||||
|
||||
|
||||
class BenchmarkRamSysbench(BenchmarkWithRate):
|
||||
|
@ -831,26 +1035,54 @@ class BenchmarkRamSysbench(BenchmarkWithRate):
|
|||
|
||||
|
||||
class ToRepair(EventWithMultipleDevices):
|
||||
pass
|
||||
"""Select a device to be repaired."""
|
||||
|
||||
|
||||
class Repair(EventWithMultipleDevices):
|
||||
pass
|
||||
"""Repair is the act of performing reparations.
|
||||
|
||||
If a repair without an error is performed,
|
||||
it represents that the reparation has been successful.
|
||||
"""
|
||||
|
||||
|
||||
class ReadyToUse(EventWithMultipleDevices):
|
||||
pass
|
||||
"""The device is ready to be used.
|
||||
|
||||
This involves greater preparation from the ``Prepare`` event,
|
||||
and users should only use a device after this event is performed.
|
||||
|
||||
Users usually require devices with this event before shipping them
|
||||
to costumers.
|
||||
"""
|
||||
|
||||
|
||||
class ToPrepare(EventWithMultipleDevices):
|
||||
"""The device has been selected for preparation.
|
||||
|
||||
See Prepare for more info.
|
||||
|
||||
Usually **ToPrepare** is the next event done after registering the
|
||||
device.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class Prepare(EventWithMultipleDevices):
|
||||
pass
|
||||
"""Work has been performed to the device to a defined point of
|
||||
acceptance.
|
||||
|
||||
Users using this event have to agree what is this point
|
||||
of acceptance; for some is when the device just works, for others
|
||||
when some testing has been performed.
|
||||
"""
|
||||
|
||||
|
||||
class Live(JoinedWithOneDeviceMixin, EventWithOneDevice):
|
||||
"""A keep-alive from a device connected to the Internet with
|
||||
information about its state (in the form of a ``Snapshot`` event)
|
||||
and usage statistics.
|
||||
"""
|
||||
ip = Column(IP, nullable=False,
|
||||
comment='The IP where the live was triggered.')
|
||||
subdivision_confidence = Column(SmallInteger,
|
||||
|
@ -873,18 +1105,34 @@ class Live(JoinedWithOneDeviceMixin, EventWithOneDevice):
|
|||
|
||||
|
||||
class Organize(JoinedTableMixin, EventWithMultipleDevices):
|
||||
pass
|
||||
"""The act of manipulating/administering/supervising/controlling
|
||||
one or more devices.
|
||||
"""
|
||||
|
||||
|
||||
class Reserve(Organize):
|
||||
pass
|
||||
"""The act of reserving devices and cancelling them.
|
||||
|
||||
After this event is performed, the user is the **reservee** of the
|
||||
devices. There can only be one non-cancelled reservation for
|
||||
a device, and a reservation can only have one reservee.
|
||||
"""
|
||||
|
||||
|
||||
class CancelReservation(Organize):
|
||||
pass
|
||||
"""The act of cancelling a reservation."""
|
||||
|
||||
|
||||
class Trade(JoinedTableMixin, EventWithMultipleDevices):
|
||||
"""Trade actions log the political exchange of devices between users.
|
||||
Every time a trade event is performed, the old user looses its
|
||||
political possession, for example ownership, in favor of another
|
||||
user.
|
||||
|
||||
|
||||
Performing trade events changes the *Trading* state of the
|
||||
device —:class:`ereuse_devicehub.resources.device.states.Trading`.
|
||||
"""
|
||||
shipping_date = Column(DateTime)
|
||||
shipping_date.comment = """
|
||||
When are the devices going to be ready for shipping?
|
||||
|
@ -927,36 +1175,67 @@ class Trade(JoinedTableMixin, EventWithMultipleDevices):
|
|||
|
||||
|
||||
class Sell(Trade):
|
||||
pass
|
||||
"""The act of taking money from a buyer in exchange of a device."""
|
||||
|
||||
|
||||
class Donate(Trade):
|
||||
pass
|
||||
"""The act of giving devices without compensation."""
|
||||
|
||||
|
||||
class Rent(Trade):
|
||||
pass
|
||||
"""The act of giving money in return for temporary use, but not
|
||||
ownership, of a device.
|
||||
"""
|
||||
|
||||
|
||||
class CancelTrade(Trade):
|
||||
pass
|
||||
"""The act of cancelling a `Sell`_, `Donate`_ or `Rent`_."""
|
||||
# todo cancelTrade does not do anything
|
||||
|
||||
|
||||
class ToDisposeProduct(Trade):
|
||||
pass
|
||||
"""The act of setting a device for being disposed.
|
||||
|
||||
See :class:`.DisposeProduct`.
|
||||
"""
|
||||
# todo test this
|
||||
|
||||
|
||||
class DisposeProduct(Trade):
|
||||
pass
|
||||
"""The act of getting rid of devices by giving (selling, donating)
|
||||
to another organization, like a waste manager.
|
||||
|
||||
|
||||
See :class:`.ToDispose` and :class:`.DisposeProduct` for
|
||||
disposing without trading the device. See :class:`.DisposeWaste`
|
||||
and :class:`.Recover` for disposing in-house, this is,
|
||||
without trading the device.
|
||||
"""
|
||||
# todo For usability purposes, users might not directly perform
|
||||
# *DisposeProduct*, but this could automatically be done when
|
||||
# performing :class:`.ToDispose` + :class:`.Receive` to a
|
||||
# ``RecyclingCenter``.
|
||||
|
||||
|
||||
class Receive(JoinedTableMixin, EventWithMultipleDevices):
|
||||
"""The act of physically taking delivery of a device.
|
||||
|
||||
The receiver confirms that the devices have arrived, and thus,
|
||||
they are the
|
||||
:attr:`ereuse_devicehub.resources.device.models.Device.physical_possessor`.
|
||||
|
||||
The receiver can optionally take a
|
||||
:class:`ereuse_devicehub.resources.enums.ReceiverRole`.
|
||||
"""
|
||||
role = Column(DBEnum(ReceiverRole),
|
||||
nullable=False,
|
||||
default=ReceiverRole.Intermediary)
|
||||
|
||||
|
||||
class Migrate(JoinedTableMixin, EventWithMultipleDevices):
|
||||
"""Moves the devices to a new database/inventory. Devices cannot be
|
||||
modified anymore at the previous database.
|
||||
"""
|
||||
other = Column(URL(), nullable=False)
|
||||
other.comment = """
|
||||
The URL of the Migrate in the other end.
|
||||
|
|
|
@ -17,8 +17,8 @@ from teal.enums import Country
|
|||
from ereuse_devicehub.resources.agent.models import Agent
|
||||
from ereuse_devicehub.resources.device.models import Component, Computer, Device
|
||||
from ereuse_devicehub.resources.enums import AppearanceRange, Bios, FunctionalityRange, \
|
||||
PriceSoftware, RatingSoftware, ReceiverRole, SnapshotExpectedEvents, SnapshotSoftware, \
|
||||
TestDataStorageLength
|
||||
PriceSoftware, RatingSoftware, ReceiverRole, Severity, SnapshotExpectedEvents, \
|
||||
SnapshotSoftware, TestDataStorageLength
|
||||
from ereuse_devicehub.resources.models import Thing
|
||||
from ereuse_devicehub.resources.user.models import User
|
||||
|
||||
|
@ -27,8 +27,6 @@ class Event(Thing):
|
|||
id = ... # type: Column
|
||||
name = ... # type: Column
|
||||
type = ... # type: Column
|
||||
error = ... # type: Column
|
||||
incidence = ... # type: Column
|
||||
description = ... # type: Column
|
||||
snapshot_id = ... # type: Column
|
||||
snapshot = ... # type: relationship
|
||||
|
@ -41,17 +39,14 @@ class Event(Thing):
|
|||
start_time = ... # type: Column
|
||||
end_time = ... # type: Column
|
||||
agent_id = ... # type: Column
|
||||
severity = ... # type: Column
|
||||
|
||||
def __init__(self, id=None, name=None, incidence=None, closed=None, error=None,
|
||||
description=None, start_time=None, end_time=None, snapshot=None, agent=None,
|
||||
parent=None, created=None, updated=None, author=None) -> None:
|
||||
def __init__(self, **kwargs) -> None:
|
||||
super().__init__(created, updated)
|
||||
self.id = ... # type: UUID
|
||||
self.name = ... # type: str
|
||||
self.type = ... # type: str
|
||||
self.incidence = ... # type: bool
|
||||
self.closed = ... # type: bool
|
||||
self.error = ... # type: bool
|
||||
self.description = ... # type: str
|
||||
self.start_time = ... # type: datetime
|
||||
self.end_time = ... # type: datetime
|
||||
|
@ -60,34 +55,25 @@ class Event(Thing):
|
|||
self.parent = ... # type: Computer
|
||||
self.agent = ... # type: Agent
|
||||
self.author = ... # type: User
|
||||
self.severity = ... # type: Severity
|
||||
|
||||
@property
|
||||
def url(self) -> urlutils.URL:
|
||||
pass
|
||||
|
||||
@property
|
||||
def _err_str(self):
|
||||
pass
|
||||
|
||||
|
||||
class EventWithOneDevice(Event):
|
||||
|
||||
def __init__(self, id=None, name=None, incidence=None, closed=None, error=None,
|
||||
description=None, start_time=None, end_time=None, snapshot=None, agent=None,
|
||||
parent=None, created=None, updated=None, author=None, device=None) -> None:
|
||||
super().__init__(id, name, incidence, closed, error, description, start_time, end_time,
|
||||
snapshot, agent, parent, created, updated, author)
|
||||
def __init__(self, **kwargs) -> None:
|
||||
super().__init__(**kwargs)
|
||||
self.device = ... # type: Device
|
||||
|
||||
|
||||
class EventWithMultipleDevices(Event):
|
||||
devices = ... # type: relationship
|
||||
|
||||
def __init__(self, id=None, name=None, incidence=None, closed=None, error=None,
|
||||
description=None, start_time=None, end_time=None, snapshot=None, agent=None,
|
||||
parent=None, created=None, updated=None, author=None, devices=None) -> None:
|
||||
super().__init__(id, name, incidence, closed, error, description, start_time, end_time,
|
||||
snapshot, agent, parent, created, updated, author)
|
||||
def __init__(self, **kwargs) -> None:
|
||||
super().__init__(**kwargs)
|
||||
self.devices = ... # type: Set[Device]
|
||||
|
||||
|
||||
|
@ -100,15 +86,21 @@ class Remove(EventWithOneDevice):
|
|||
|
||||
|
||||
class Step(Model):
|
||||
type = ... # type: Column
|
||||
num = ... # type: Column
|
||||
start_time = ... # type: Column
|
||||
end_time = ... # type: Column
|
||||
erasure = ... # type: relationship
|
||||
severity = ... # type: Column
|
||||
|
||||
def __init__(self, num=None, success=None, start_time=None, end_time=None,
|
||||
erasure=None, error=None) -> None:
|
||||
erasure=None, severity=None) -> None:
|
||||
self.type = ... # type: str
|
||||
self.num = ... # type: int
|
||||
self.success = ... # type: bool
|
||||
self.start_time = ... # type: datetime
|
||||
self.end_time = ... # type: datetime
|
||||
self.erasure = ... # type: EraseBasic
|
||||
self.error = ... # type: bool
|
||||
self.severity = ... # type: Severity
|
||||
|
||||
|
||||
class StepZero(Step):
|
||||
|
|
|
@ -12,7 +12,7 @@ from ereuse_devicehub.marshmallow import NestedOn
|
|||
from ereuse_devicehub.resources.agent.schemas import Agent
|
||||
from ereuse_devicehub.resources.device.schemas import Component, Computer, Device
|
||||
from ereuse_devicehub.resources.enums import AppearanceRange, Bios, FunctionalityRange, \
|
||||
PriceSoftware, RATE_POSITIVE, RatingRange, RatingSoftware, ReceiverRole, \
|
||||
PriceSoftware, RATE_POSITIVE, RatingRange, RatingSoftware, ReceiverRole, Severity, \
|
||||
SnapshotExpectedEvents, SnapshotSoftware, TestDataStorageLength
|
||||
from ereuse_devicehub.resources.event import models as m
|
||||
from ereuse_devicehub.resources.models import STR_BIG_SIZE, STR_SIZE
|
||||
|
@ -25,9 +25,8 @@ class Event(Thing):
|
|||
name = SanitizedStr(default='',
|
||||
validate=Length(max=STR_BIG_SIZE),
|
||||
description=m.Event.name.comment)
|
||||
incidence = Boolean(default=False, description=m.Event.incidence.comment)
|
||||
closed = Boolean(missing=True, description=m.Event.closed.comment)
|
||||
error = Boolean(default=False, description=m.Event.error.comment)
|
||||
severity = EnumField(Severity, description=m.Event.severity.comment)
|
||||
description = SanitizedStr(default='', description=m.Event.description.comment)
|
||||
start_time = DateTime(data_key='startTime', description=m.Event.start_time.comment)
|
||||
end_time = DateTime(data_key='endTime', description=m.Event.end_time.comment)
|
||||
|
@ -85,7 +84,7 @@ class Step(Schema):
|
|||
type = String(description='Only required when it is nested.')
|
||||
start_time = DateTime(required=True, data_key='startTime')
|
||||
end_time = DateTime(required=True, data_key='endTime')
|
||||
error = Boolean(default=False, description='Did the event fail?')
|
||||
severity = EnumField(Severity, description=m.Event.severity.comment)
|
||||
|
||||
|
||||
class StepZero(Step):
|
||||
|
@ -299,7 +298,7 @@ class BenchmarkDataStorage(Benchmark):
|
|||
|
||||
|
||||
class BenchmarkWithRate(Benchmark):
|
||||
rate = Integer(required=True)
|
||||
rate = Float(required=True)
|
||||
|
||||
|
||||
class BenchmarkProcessor(BenchmarkWithRate):
|
||||
|
|
|
@ -1,19 +1,19 @@
|
|||
import uuid
|
||||
from datetime import datetime
|
||||
from typing import Union
|
||||
|
||||
from boltons import urlutils
|
||||
from citext import CIText
|
||||
from flask import g
|
||||
from sqlalchemy import TEXT
|
||||
from sqlalchemy.dialects.postgresql import UUID
|
||||
from sqlalchemy.sql import expression as exp
|
||||
from sqlalchemy_utils import LtreeType
|
||||
from sqlalchemy_utils.types.ltree import LQUERY
|
||||
from teal.db import UUIDLtree
|
||||
from teal.db import CASCADE_OWN, UUIDLtree
|
||||
from teal.resource import url_for_resource
|
||||
|
||||
from ereuse_devicehub.db import db
|
||||
from ereuse_devicehub.resources.device.models import Device
|
||||
from ereuse_devicehub.db import create_view, db, exp, f
|
||||
from ereuse_devicehub.resources.device.models import Component, Device
|
||||
from ereuse_devicehub.resources.models import Thing
|
||||
from ereuse_devicehub.resources.user.models import User
|
||||
|
||||
|
@ -21,6 +21,8 @@ from ereuse_devicehub.resources.user.models import User
|
|||
class Lot(Thing):
|
||||
id = db.Column(UUID(as_uuid=True), primary_key=True) # uuid is generated on init by default
|
||||
name = db.Column(CIText(), nullable=False)
|
||||
description = db.Column(CIText())
|
||||
description.comment = """A comment about the lot."""
|
||||
closed = db.Column(db.Boolean, default=False, nullable=False)
|
||||
closed.comment = """
|
||||
A closed lot cannot be modified anymore.
|
||||
|
@ -28,6 +30,7 @@ class Lot(Thing):
|
|||
devices = db.relationship(Device,
|
||||
backref=db.backref('lots', lazy=True, collection_class=set),
|
||||
secondary=lambda: LotDevice.__table__,
|
||||
lazy=True,
|
||||
collection_class=set)
|
||||
"""
|
||||
The **children** devices that the lot has.
|
||||
|
@ -35,48 +38,48 @@ class Lot(Thing):
|
|||
Note that the lot can have more devices, if they are inside
|
||||
descendant lots.
|
||||
"""
|
||||
parents = db.relationship(lambda: Lot,
|
||||
viewonly=True,
|
||||
lazy=True,
|
||||
collection_class=set,
|
||||
secondary=lambda: LotParent.__table__,
|
||||
primaryjoin=lambda: Lot.id == LotParent.child_id,
|
||||
secondaryjoin=lambda: LotParent.parent_id == Lot.id,
|
||||
cascade='refresh-expire', # propagate changes outside ORM
|
||||
backref=db.backref('children',
|
||||
viewonly=True,
|
||||
lazy=True,
|
||||
cascade='refresh-expire',
|
||||
collection_class=set)
|
||||
)
|
||||
"""The parent lots."""
|
||||
|
||||
def __init__(self, name: str, closed: bool = closed.default.arg) -> None:
|
||||
all_devices = db.relationship(Device,
|
||||
viewonly=True,
|
||||
lazy=True,
|
||||
collection_class=set,
|
||||
secondary=lambda: LotDeviceDescendants.__table__,
|
||||
primaryjoin=lambda: Lot.id == LotDeviceDescendants.ancestor_lot_id,
|
||||
secondaryjoin=lambda: LotDeviceDescendants.device_id == Device.id)
|
||||
"""All devices, including components, inside this lot and its
|
||||
descendants.
|
||||
"""
|
||||
|
||||
def __init__(self, name: str, closed: bool = closed.default.arg,
|
||||
description: str = None) -> None:
|
||||
"""
|
||||
Initializes a lot
|
||||
:param name:
|
||||
:param closed:
|
||||
"""
|
||||
super().__init__(id=uuid.uuid4(), name=name, closed=closed)
|
||||
super().__init__(id=uuid.uuid4(), name=name, closed=closed, description=description)
|
||||
Path(self) # Lots have always one edge per default.
|
||||
|
||||
def add_child(self, child):
|
||||
"""Adds a child to this lot."""
|
||||
if isinstance(child, Lot):
|
||||
Path.add(self.id, child.id)
|
||||
db.session.refresh(self) # todo is this useful?
|
||||
db.session.refresh(child)
|
||||
else:
|
||||
assert isinstance(child, uuid.UUID)
|
||||
Path.add(self.id, child)
|
||||
db.session.refresh(self) # todo is this useful?
|
||||
|
||||
def remove_child(self, child):
|
||||
if isinstance(child, Lot):
|
||||
Path.delete(self.id, child.id)
|
||||
else:
|
||||
assert isinstance(child, uuid.UUID)
|
||||
Path.delete(self.id, child)
|
||||
|
||||
@property
|
||||
def url(self) -> urlutils.URL:
|
||||
"""The URL where to GET this event."""
|
||||
return urlutils.URL(url_for_resource(Lot, item_id=self.id))
|
||||
|
||||
@property
|
||||
def children(self):
|
||||
"""The children lots."""
|
||||
# From https://stackoverflow.com/a/41158890
|
||||
id = UUIDLtree.convert(self.id)
|
||||
return self.query \
|
||||
.join(self.__class__.paths) \
|
||||
.filter(Path.path.lquery(exp.cast('*.{}.*{{1}}'.format(id), LQUERY)))
|
||||
|
||||
@property
|
||||
def descendants(self):
|
||||
return self.descendantsq(self.id)
|
||||
|
@ -86,29 +89,73 @@ class Lot(Thing):
|
|||
_id = UUIDLtree.convert(id)
|
||||
return (cls.id == Path.lot_id) & Path.path.lquery(exp.cast('*.{}.*'.format(_id), LQUERY))
|
||||
|
||||
@property
|
||||
def parents(self):
|
||||
return self.parentsq(self.id)
|
||||
|
||||
@classmethod
|
||||
def parentsq(cls, id: UUID):
|
||||
"""The parent lots."""
|
||||
id = UUIDLtree.convert(id)
|
||||
i = db.func.index(Path.path, id)
|
||||
parent_id = db.func.replace(exp.cast(db.func.subpath(Path.path, i - 1, i), TEXT), '_', '-')
|
||||
join_clause = parent_id == exp.cast(Lot.id, TEXT)
|
||||
return cls.query.join(Path, join_clause).filter(
|
||||
Path.path.lquery(exp.cast('*{{1}}.{}.*'.format(id), LQUERY))
|
||||
)
|
||||
|
||||
def __contains__(self, child: 'Lot'):
|
||||
return Path.has_lot(self.id, child.id)
|
||||
|
||||
@classmethod
|
||||
def roots(cls):
|
||||
"""Gets the lots that are not under any other lot."""
|
||||
return cls.query.join(cls.paths).filter(db.func.nlevel(Path.path) == 1)
|
||||
|
||||
def add_children(self, *children):
|
||||
"""Add children lots to this lot.
|
||||
|
||||
This operation is highly costly as it forces refreshing
|
||||
many models in session.
|
||||
"""
|
||||
for child in children:
|
||||
if isinstance(child, Lot):
|
||||
Path.add(self.id, child.id)
|
||||
db.session.refresh(child)
|
||||
else:
|
||||
assert isinstance(child, uuid.UUID)
|
||||
Path.add(self.id, child)
|
||||
# We need to refresh the models involved in this operation
|
||||
# outside the session / ORM control so the models
|
||||
# that have relationships to this model
|
||||
# with the cascade 'refresh-expire' can welcome the changes
|
||||
db.session.refresh(self)
|
||||
|
||||
def remove_children(self, *children):
|
||||
"""Remove children lots from this lot.
|
||||
|
||||
This operation is highly costly as it forces refreshing
|
||||
many models in session.
|
||||
"""
|
||||
for child in children:
|
||||
if isinstance(child, Lot):
|
||||
Path.delete(self.id, child.id)
|
||||
db.session.refresh(child)
|
||||
else:
|
||||
assert isinstance(child, uuid.UUID)
|
||||
Path.delete(self.id, child)
|
||||
db.session.refresh(self)
|
||||
|
||||
def delete(self):
|
||||
"""Deletes the lot.
|
||||
|
||||
This method removes the children lots and children
|
||||
devices orphan from this lot and then marks this lot
|
||||
for deletion.
|
||||
"""
|
||||
self.remove_children(*self.children)
|
||||
db.session.delete(self)
|
||||
|
||||
def _refresh_models_with_relationships_to_lots(self):
|
||||
session = db.Session.object_session(self)
|
||||
for model in session:
|
||||
if isinstance(model, (Device, Lot, Path)):
|
||||
session.expire(model)
|
||||
|
||||
def __contains__(self, child: Union['Lot', Device]):
|
||||
if isinstance(child, Lot):
|
||||
return Path.has_lot(self.id, child.id)
|
||||
elif isinstance(child, Device):
|
||||
device = db.session.query(LotDeviceDescendants) \
|
||||
.filter(LotDeviceDescendants.device_id == child.id) \
|
||||
.filter(LotDeviceDescendants.ancestor_lot_id == self.id) \
|
||||
.one_or_none()
|
||||
return device
|
||||
else:
|
||||
raise TypeError('Lot only contains devices and lots, not {}'.format(child.__class__))
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return '<Lot {0.name} devices={0.devices!r}>'.format(self)
|
||||
|
||||
|
@ -131,9 +178,12 @@ class Path(db.Model):
|
|||
id = db.Column(db.UUID(as_uuid=True),
|
||||
primary_key=True,
|
||||
server_default=db.text('gen_random_uuid()'))
|
||||
lot_id = db.Column(db.UUID(as_uuid=True), db.ForeignKey(Lot.id), nullable=False)
|
||||
lot_id = db.Column(db.UUID(as_uuid=True), db.ForeignKey(Lot.id), nullable=False, index=True)
|
||||
lot = db.relationship(Lot,
|
||||
backref=db.backref('paths', lazy=True, collection_class=set),
|
||||
backref=db.backref('paths',
|
||||
lazy=True,
|
||||
collection_class=set,
|
||||
cascade=CASCADE_OWN),
|
||||
primaryjoin=Lot.id == lot_id)
|
||||
path = db.Column(LtreeType, nullable=False)
|
||||
created = db.Column(db.TIMESTAMP(timezone=True), server_default=db.text('CURRENT_TIMESTAMP'))
|
||||
|
@ -171,3 +221,64 @@ class Path(db.Model):
|
|||
"SELECT 1 from path where path ~ '*.{}.*.{}.*'".format(parent_id, child_id)
|
||||
).first()
|
||||
)
|
||||
|
||||
|
||||
class LotDeviceDescendants(db.Model):
|
||||
"""A view facilitating querying inclusion between devices and lots,
|
||||
including components.
|
||||
|
||||
The view has 4 columns:
|
||||
1. The ID of the device.
|
||||
2. The ID of a lot containing the device.
|
||||
3. The ID of the lot that directly contains the device.
|
||||
4. If 1. is a component, the ID of the device that is inside the lot.
|
||||
"""
|
||||
|
||||
_ancestor = Lot.__table__.alias(name='ancestor')
|
||||
"""Ancestor lot table."""
|
||||
_desc = Lot.__table__.alias()
|
||||
"""Descendant lot table."""
|
||||
lot_device = _desc \
|
||||
.join(LotDevice, _desc.c.id == LotDevice.lot_id) \
|
||||
.join(Path, _desc.c.id == Path.lot_id)
|
||||
"""Join: Path -- Lot -- LotDevice"""
|
||||
|
||||
descendants = "path.path ~ (CAST('*.'|| replace(CAST({}.id as text), '-', '_') " \
|
||||
"|| '.*' AS LQUERY))".format(_ancestor.name)
|
||||
"""Query that gets the descendants of the ancestor lot."""
|
||||
devices = db.select([
|
||||
LotDevice.device_id,
|
||||
_desc.c.id.label('parent_lot_id'),
|
||||
_ancestor.c.id.label('ancestor_lot_id'),
|
||||
None
|
||||
]).select_from(_ancestor).select_from(lot_device).where(descendants)
|
||||
|
||||
# Components
|
||||
_parent_device = Device.__table__.alias(name='parent_device')
|
||||
"""The device that has the access to the lot."""
|
||||
lot_device_component = lot_device \
|
||||
.join(_parent_device, _parent_device.c.id == LotDevice.device_id) \
|
||||
.join(Component, _parent_device.c.id == Component.parent_id)
|
||||
"""Join: Path -- Lot -- LotDevice -- ParentDevice (Device) -- Component"""
|
||||
|
||||
components = db.select([
|
||||
Component.id.label('device_id'),
|
||||
_desc.c.id.label('parent_lot_id'),
|
||||
_ancestor.c.id.label('ancestor_lot_id'),
|
||||
LotDevice.device_id.label('device_parent_id'),
|
||||
]).select_from(_ancestor).select_from(lot_device_component).where(descendants)
|
||||
|
||||
__table__ = create_view('lot_device_descendants', devices.union(components))
|
||||
|
||||
|
||||
class LotParent(db.Model):
|
||||
i = f.index(Path.path, db.func.text2ltree(f.replace(exp.cast(Path.lot_id, TEXT), '-', '_')))
|
||||
|
||||
__table__ = create_view(
|
||||
'lot_parent',
|
||||
db.select([
|
||||
Path.lot_id.label('child_id'),
|
||||
exp.cast(f.replace(exp.cast(f.subltree(Path.path, i - 1, i), TEXT), '_', '-'),
|
||||
UUID).label('parent_id')
|
||||
]).select_from(Path).where(i > 0),
|
||||
)
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
import uuid
|
||||
from datetime import datetime
|
||||
from typing import Iterable, Set, Union
|
||||
from typing import Iterable, Optional, Set, Union
|
||||
from uuid import UUID
|
||||
|
||||
from boltons import urlutils
|
||||
|
@ -8,6 +8,7 @@ from sqlalchemy import Column
|
|||
from sqlalchemy.orm import Query, relationship
|
||||
from sqlalchemy_utils import Ltree
|
||||
|
||||
from ereuse_devicehub.db import db
|
||||
from ereuse_devicehub.resources.device.models import Device
|
||||
from ereuse_devicehub.resources.models import Thing
|
||||
|
||||
|
@ -20,6 +21,9 @@ class Lot(Thing):
|
|||
closed = ... # type: Column
|
||||
devices = ... # type: relationship
|
||||
paths = ... # type: relationship
|
||||
description = ... # type: Column
|
||||
all_devices = ... # type: relationship
|
||||
parents = ... # type: relationship
|
||||
|
||||
def __init__(self, name: str, closed: bool = closed.default.arg) -> None:
|
||||
super().__init__()
|
||||
|
@ -28,21 +32,21 @@ class Lot(Thing):
|
|||
self.closed = ... # type: bool
|
||||
self.devices = ... # type: Set[Device]
|
||||
self.paths = ... # type: Set[Path]
|
||||
self.description = ... # type: str
|
||||
self.all_devices = ... # type: Set[Device]
|
||||
self.parents = ... # type: Set[Lot]
|
||||
self.children = ... # type: Set[Lot]
|
||||
|
||||
def add_child(self, child: Union[Lot, uuid.UUID]):
|
||||
def add_children(self, *children: Union[Lot, uuid.UUID]):
|
||||
pass
|
||||
|
||||
def remove_child(self, child: Union[Lot, uuid.UUID]):
|
||||
def remove_children(self, *children: Union[Lot, uuid.UUID]):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def roots(cls) -> LotQuery:
|
||||
pass
|
||||
|
||||
@property
|
||||
def children(self) -> LotQuery:
|
||||
pass
|
||||
|
||||
@property
|
||||
def descendants(self) -> LotQuery:
|
||||
pass
|
||||
|
@ -51,18 +55,13 @@ class Lot(Thing):
|
|||
def descendantsq(cls, id) -> LotQuery:
|
||||
pass
|
||||
|
||||
@property
|
||||
def parents(self) -> LotQuery:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def parentsq(cls, id) -> LotQuery:
|
||||
pass
|
||||
|
||||
@property
|
||||
def url(self) -> urlutils.URL:
|
||||
pass
|
||||
|
||||
def delete(self):
|
||||
pass
|
||||
|
||||
|
||||
class Path:
|
||||
id = ... # type: Column
|
||||
|
@ -77,3 +76,17 @@ class Path:
|
|||
self.lot = ... # type: Lot
|
||||
self.path = ... # type: Ltree
|
||||
self.created = ... # type: datetime
|
||||
|
||||
|
||||
class LotDeviceDescendants(db.Model):
|
||||
device_id = ... # type: Column
|
||||
ancestor_lot_id = ... # type: Column
|
||||
parent_lot_id = ... # type: Column
|
||||
device_parent_id = ... # type: Column
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self.device_id = ... # type: int
|
||||
self.ancestor_lot_id = ... # type: UUID
|
||||
self.parent_lot_id = ... # type: UUID
|
||||
self.device_parent_id = ... # type: Optional[int]
|
||||
|
|
|
@ -11,6 +11,7 @@ from ereuse_devicehub.resources.schemas import Thing
|
|||
class Lot(Thing):
|
||||
id = f.UUID(dump_only=True)
|
||||
name = SanitizedStr(validate=f.validate.Length(max=STR_SIZE), required=True)
|
||||
description = SanitizedStr(description=m.Lot.description.comment)
|
||||
closed = f.Boolean(missing=False, description=m.Lot.closed.comment)
|
||||
devices = NestedOn(Device, many=True, dump_only=True)
|
||||
children = NestedOn('Lot', many=True, dump_only=True)
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
import uuid
|
||||
from collections import deque
|
||||
from enum import Enum
|
||||
from typing import List, Set
|
||||
from typing import Dict, List, Set, Union
|
||||
|
||||
import marshmallow as ma
|
||||
from flask import Response, jsonify, request
|
||||
|
@ -67,10 +67,12 @@ class LotView(View):
|
|||
you can filter.
|
||||
"""
|
||||
if args['format'] == LotFormat.UiTree:
|
||||
return jsonify({
|
||||
'items': self.ui_tree(),
|
||||
lots = self.schema.dump(Lot.query, many=True, nested=1)
|
||||
ret = {
|
||||
'items': {l['id']: l for l in lots},
|
||||
'tree': self.ui_tree(),
|
||||
'url': request.path
|
||||
})
|
||||
}
|
||||
else:
|
||||
query = Lot.query
|
||||
if args['search']:
|
||||
|
@ -87,18 +89,24 @@ class LotView(View):
|
|||
},
|
||||
'url': request.path
|
||||
}
|
||||
return jsonify(ret)
|
||||
return jsonify(ret)
|
||||
|
||||
def delete(self, id):
|
||||
lot = Lot.query.filter_by(id=id).one()
|
||||
lot.delete()
|
||||
db.session.commit()
|
||||
return Response(status=204)
|
||||
|
||||
@classmethod
|
||||
def ui_tree(cls) -> List[dict]:
|
||||
nodes = []
|
||||
def ui_tree(cls) -> List[Dict]:
|
||||
tree = []
|
||||
for model in Path.query: # type: Path
|
||||
path = deque(model.path.path.split('.'))
|
||||
cls._p(nodes, path)
|
||||
return nodes
|
||||
cls._p(tree, path)
|
||||
return tree
|
||||
|
||||
@classmethod
|
||||
def _p(cls, nodes: List[dict], path: deque):
|
||||
def _p(cls, nodes: List[Dict[str, Union[uuid.UUID, List]]], path: deque):
|
||||
"""Recursively creates the nested lot structure.
|
||||
|
||||
Every recursive step consumes path (a deque of lot_id),
|
||||
|
@ -110,14 +118,8 @@ class LotView(View):
|
|||
# does lot_id exist already in node?
|
||||
node = next(part for part in nodes if lot_id == part['id'])
|
||||
except StopIteration:
|
||||
lot = Lot.query.filter_by(id=lot_id).one()
|
||||
node = {
|
||||
'id': lot_id,
|
||||
'name': lot.name,
|
||||
'url': lot.url.to_text(),
|
||||
'closed': lot.closed,
|
||||
'updated': lot.updated,
|
||||
'created': lot.created,
|
||||
'nodes': []
|
||||
}
|
||||
nodes.append(node)
|
||||
|
@ -174,12 +176,10 @@ class LotChildrenView(LotBaseChildrenView):
|
|||
id = ma.fields.List(ma.fields.UUID())
|
||||
|
||||
def _post(self, lot: Lot, ids: Set[uuid.UUID]):
|
||||
for id in ids:
|
||||
lot.add_child(id) # todo what to do if child exists already?
|
||||
lot.add_children(*ids)
|
||||
|
||||
def _delete(self, lot: Lot, ids: Set[uuid.UUID]):
|
||||
for id in ids:
|
||||
lot.remove_child(id)
|
||||
lot.remove_children(*ids)
|
||||
|
||||
|
||||
class LotDeviceView(LotBaseChildrenView):
|
||||
|
|
|
@ -13,12 +13,14 @@ class Thing(db.Model):
|
|||
# todo make updated to auto-update
|
||||
updated = db.Column(db.TIMESTAMP(timezone=True),
|
||||
nullable=False,
|
||||
index=True,
|
||||
server_default=db.text('CURRENT_TIMESTAMP'))
|
||||
updated.comment = """
|
||||
When this was last changed.
|
||||
"""
|
||||
created = db.Column(db.TIMESTAMP(timezone=True),
|
||||
nullable=False,
|
||||
index=True,
|
||||
server_default=db.text('CURRENT_TIMESTAMP'))
|
||||
created.comment = """
|
||||
When Devicehub created this.
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
from datetime import datetime
|
||||
|
||||
from sqlalchemy import Column
|
||||
from sqlalchemy import Column, Table
|
||||
from teal.db import Model
|
||||
|
||||
STR_SIZE = 64
|
||||
|
@ -10,6 +10,7 @@ STR_XSM_SIZE = 16
|
|||
|
||||
|
||||
class Thing(Model):
|
||||
__table__ = ... # type: Table
|
||||
t = ... # type: str
|
||||
type = ... # type: str
|
||||
updated = ... # type: Column
|
||||
|
|
|
@ -32,12 +32,13 @@ class Tag(Thing):
|
|||
"""
|
||||
device_id = Column(BigInteger,
|
||||
# We don't want to delete the tag on device deletion, only set to null
|
||||
ForeignKey(Device.id, ondelete=DB_CASCADE_SET_NULL))
|
||||
ForeignKey(Device.id, ondelete=DB_CASCADE_SET_NULL),
|
||||
index=True)
|
||||
device = relationship(Device,
|
||||
backref=backref('tags', lazy=True, collection_class=set),
|
||||
primaryjoin=Device.id == device_id)
|
||||
"""The device linked to this tag."""
|
||||
secondary = Column(Unicode(), check_lower('secondary'))
|
||||
secondary = Column(Unicode(), check_lower('secondary'), index=True)
|
||||
secondary.comment = """
|
||||
A secondary identifier for this tag. It has the same
|
||||
constraints as the main one. Only needed in special cases.
|
||||
|
|
|
@ -4,11 +4,11 @@
|
|||
|
||||
Define servername api.devicetag.io
|
||||
# The domain used to access the server
|
||||
Define appdir /path/to/app/dir
|
||||
Define appdir /home/devicetag/sites/${servername}/source/
|
||||
# The path where the app directory is. Apache must have access to this folder.
|
||||
Define wsgipath ${appdir}/wsgi.wsgi
|
||||
# The location of the .wsgi file
|
||||
Define pyvenv /path/to/venv
|
||||
Define pyvenv ${appdir}/venv/
|
||||
# The path where the virtual environment is (the folder containing bin/activate)
|
||||
|
||||
<VirtualHost *:80>
|
||||
|
|
|
@ -1,9 +1,13 @@
|
|||
#!/usr/bin/env bash
|
||||
# Creates a database, user, and extensions to use Devicehub
|
||||
# $1 is the database to create
|
||||
# $2 is the user to create and give full permissions on the database
|
||||
# This script asks for the password of such user
|
||||
|
||||
read -s -p "Password for $2": pass
|
||||
createdb $1 # Create main database
|
||||
psql -d $1 -c "CREATE USER dhub WITH PASSWORD 'ereuse';" # Create user Devicehub uses to access db
|
||||
psql -d $1 -c "GRANT ALL PRIVILEGES ON DATABASE $1 TO dhub;" # Give access to the db
|
||||
psql -d $1 -c "CREATE USER $2 WITH PASSWORD '$pass';" # Create user Devicehub uses to access db
|
||||
psql -d $1 -c "GRANT ALL PRIVILEGES ON DATABASE $1 TO $2;" # Give access to the db
|
||||
psql -d $1 -c "CREATE EXTENSION pgcrypto SCHEMA public;" # Enable pgcrypto
|
||||
psql -d $1 -c "CREATE EXTENSION ltree SCHEMA public;" # Enable ltree
|
||||
psql -d $1 -c "CREATE EXTENSION citext SCHEMA public;" # Enable citext
|
||||
|
|
|
@ -23,9 +23,9 @@ python-stdnum==1.9
|
|||
PyYAML==3.13
|
||||
requests==2.19.1
|
||||
requests-mock==1.5.2
|
||||
SQLAlchemy==1.2.11
|
||||
SQLAlchemy-Utils==0.33.3
|
||||
teal==0.2.0a28
|
||||
SQLAlchemy==1.2.14
|
||||
SQLAlchemy-Utils==0.33.6
|
||||
teal==0.2.0a30
|
||||
webargs==4.0.0
|
||||
Werkzeug==0.14.1
|
||||
sqlalchemy-citext==1.3.post0
|
||||
|
|
12
setup.py
12
setup.py
|
@ -1,4 +1,3 @@
|
|||
import re
|
||||
from collections import OrderedDict
|
||||
|
||||
from setuptools import find_packages, setup
|
||||
|
@ -6,9 +5,6 @@ from setuptools import find_packages, setup
|
|||
with open('README.md', encoding='utf8') as f:
|
||||
long_description = f.read()
|
||||
|
||||
with open('ereuse_devicehub/__init__.py', 'rt', encoding='utf8') as f:
|
||||
version = re.search(r'__version__ = \'(.*?)\'', f.read()).group(1)
|
||||
|
||||
test_requires = [
|
||||
'pytest',
|
||||
'requests_mock'
|
||||
|
@ -16,7 +12,7 @@ test_requires = [
|
|||
|
||||
setup(
|
||||
name='ereuse-devicehub',
|
||||
version=version,
|
||||
version='0.2.0b1',
|
||||
url='https://github.com/ereuse/devicehub-teal',
|
||||
project_urls=OrderedDict((
|
||||
('Documentation', 'http://devicheub.ereuse.org'),
|
||||
|
@ -29,12 +25,11 @@ setup(
|
|||
description='A system to manage devices focusing reuse.',
|
||||
packages=find_packages(),
|
||||
include_package_data=True,
|
||||
platforms='any',
|
||||
python_requires='>=3.5.3',
|
||||
long_description=long_description,
|
||||
long_description_content_type='text/markdown',
|
||||
install_requires=[
|
||||
'teal>=0.2.0a28', # teal always first
|
||||
'teal>=0.2.0a30', # teal always first
|
||||
'click',
|
||||
'click-spinner',
|
||||
'ereuse-utils[Naming]>=0.4b10',
|
||||
|
@ -55,6 +50,9 @@ setup(
|
|||
'sphinxcontrib-plantuml >= 0.12',
|
||||
'sphinxcontrib-websupport >= 1.0.1'
|
||||
],
|
||||
'docs-auto': [
|
||||
'sphinx-autobuild'
|
||||
],
|
||||
'test': test_requires
|
||||
},
|
||||
tests_require=test_requires,
|
||||
|
|
|
@ -0,0 +1,128 @@
|
|||
{
|
||||
"type": "Snapshot",
|
||||
"uuid": "00000000-0000-0000-0000-000000000000",
|
||||
"software": "Workbench",
|
||||
"version": "11.0a1",
|
||||
"expectedEvents": [],
|
||||
"closed": true,
|
||||
"endTime": "2018-07-19T15:48:40.635776",
|
||||
"device": {
|
||||
"manufacturer": "Dell Inc.",
|
||||
"model": "Latitude E6440",
|
||||
"serialNumber": "FJBQVZ1",
|
||||
"events": [],
|
||||
"type": "Laptop",
|
||||
"chassis": "Laptop"
|
||||
},
|
||||
"components": [
|
||||
{
|
||||
"manufacturer": "Intel Corp.",
|
||||
"model": "Intel Core i7-4600M CPU @ 2.90GHz",
|
||||
"serialNumber": null,
|
||||
"events": [],
|
||||
"type": "Processor",
|
||||
"speed": 1.259899,
|
||||
"address": 64,
|
||||
"cores": 2,
|
||||
"threads": 4
|
||||
},
|
||||
{
|
||||
"manufacturer": "Samsung",
|
||||
"model": "M471B5173DB0-YK0",
|
||||
"serialNumber": "732CD498",
|
||||
"events": [],
|
||||
"type": "RamModule",
|
||||
"format": "SODIMM",
|
||||
"size": 4096,
|
||||
"interface": "DDR3",
|
||||
"speed": 1600.0
|
||||
},
|
||||
{
|
||||
"manufacturer": "Samsung",
|
||||
"model": "M471B5173DB0-YK0",
|
||||
"serialNumber": "152DD498",
|
||||
"events": [],
|
||||
"type": "RamModule",
|
||||
"format": "SODIMM",
|
||||
"size": 4096,
|
||||
"interface": "DDR3",
|
||||
"speed": 1600.0
|
||||
},
|
||||
{
|
||||
"manufacturer": null,
|
||||
"model": "Crucial_CT525MX3",
|
||||
"serialNumber": "164014297BCC",
|
||||
"events": [],
|
||||
"type": "HardDrive",
|
||||
"size": 500786,
|
||||
"interface": null
|
||||
},
|
||||
{
|
||||
"manufacturer": "Intel Corporation",
|
||||
"model": "4th Gen Core Processor Integrated Graphics Controller",
|
||||
"serialNumber": null,
|
||||
"events": [],
|
||||
"type": "GraphicCard",
|
||||
"memory": null
|
||||
},
|
||||
{
|
||||
"manufacturer": "Intel Corporation",
|
||||
"model": "Ethernet Connection I217-LM",
|
||||
"serialNumber": "ec:f4:bb:0b:18:90",
|
||||
"events": [],
|
||||
"type": "NetworkAdapter",
|
||||
"speed": 1000,
|
||||
"wireless": false
|
||||
},
|
||||
{
|
||||
"manufacturer": "Intel Corporation",
|
||||
"model": "Centrino Advanced-N 6235",
|
||||
"serialNumber": "c4:d9:87:47:90:e1",
|
||||
"events": [],
|
||||
"type": "NetworkAdapter",
|
||||
"wireless": true
|
||||
},
|
||||
{
|
||||
"manufacturer": null,
|
||||
"model": null,
|
||||
"serialNumber": "da:b4:3a:25:88:6c",
|
||||
"events": [],
|
||||
"type": "NetworkAdapter",
|
||||
"wireless": false
|
||||
},
|
||||
{
|
||||
"manufacturer": "Intel Corporation",
|
||||
"model": "Xeon E3-1200 v3/4th Gen Core Processor HD Audio Controller",
|
||||
"serialNumber": null,
|
||||
"events": [],
|
||||
"type": "SoundCard"
|
||||
},
|
||||
{
|
||||
"manufacturer": "CNFCH52J48303+YF2",
|
||||
"model": "Laptop_Integrated_Webcam_HD",
|
||||
"serialNumber": null,
|
||||
"events": [],
|
||||
"type": "SoundCard"
|
||||
},
|
||||
{
|
||||
"manufacturer": "Intel Corporation",
|
||||
"model": "8 Series/C220 Series Chipset High Definition Audio Controller",
|
||||
"serialNumber": null,
|
||||
"events": [],
|
||||
"type": "SoundCard"
|
||||
},
|
||||
{
|
||||
"manufacturer": "Dell Inc.",
|
||||
"model": "0159N7",
|
||||
"serialNumber": "/FJBQVZ1/CN1296342I009B/",
|
||||
"events": [],
|
||||
"type": "Motherboard",
|
||||
"usb": 3,
|
||||
"firewire": 0,
|
||||
"serial": 1,
|
||||
"pcmcia": 0,
|
||||
"slots": 1
|
||||
}
|
||||
],
|
||||
"elapsed": 0
|
||||
}
|
|
@ -1,9 +1,9 @@
|
|||
{
|
||||
"version": "11.0a3",
|
||||
"device": {
|
||||
"serialNumber": null,
|
||||
"manufacturer": null,
|
||||
"model": null,
|
||||
"serialNumber": 'foo',
|
||||
"manufacturer": 'bar',
|
||||
"model": 'baz',
|
||||
"type": "Desktop",
|
||||
"events": [],
|
||||
"chassis": "Tower"
|
||||
|
@ -53,7 +53,8 @@
|
|||
"type": "NetworkAdapter",
|
||||
"events": [],
|
||||
"serialNumber": "f4:6d:04:12:9b:85",
|
||||
"speed": 1000
|
||||
"speed": 1000,
|
||||
"wireless": false
|
||||
},
|
||||
{
|
||||
"serialNumber": "WD-WCAV29008961",
|
||||
|
@ -68,12 +69,12 @@
|
|||
{
|
||||
"endTime": "2018-07-13T11:54:55.096491",
|
||||
"type": "StepRandom",
|
||||
"error": false,
|
||||
"severity": "Info",
|
||||
"startTime": "2018-07-13T10:52:45.092981"
|
||||
}
|
||||
],
|
||||
"type": "EraseBasic",
|
||||
"error": false,
|
||||
"severity": "Info",
|
||||
"zeros": false,
|
||||
"startTime": "2018-07-13T10:52:45.092612"
|
||||
},
|
||||
|
@ -83,10 +84,10 @@
|
|||
"elapsed": 131,
|
||||
"length": "Short",
|
||||
"offlineUncorrectable": 1,
|
||||
"error": true,
|
||||
"severity": "Error",
|
||||
"currentPendingSectorCount": 1,
|
||||
"powerCycleCount": 1253,
|
||||
"reallocatedSectorCount": 6,
|
||||
"reallocatedSectorCount": 15,
|
||||
"type": "TestDataStorage",
|
||||
"status": "Completed: read failure"
|
||||
}
|
||||
|
@ -106,12 +107,12 @@
|
|||
{
|
||||
"endTime": "2018-07-13T12:55:47.326835",
|
||||
"type": "StepRandom",
|
||||
"error": false,
|
||||
"severity": "Info",
|
||||
"startTime": "2018-07-13T11:54:55.100925"
|
||||
}
|
||||
],
|
||||
"type": "EraseBasic",
|
||||
"error": false,
|
||||
"severity": "Info",
|
||||
"zeros": false,
|
||||
"startTime": "2018-07-13T11:54:55.100667"
|
||||
},
|
||||
|
@ -121,7 +122,7 @@
|
|||
"elapsed": 115,
|
||||
"length": "Short",
|
||||
"offlineUncorrectable": 0,
|
||||
"error": false,
|
||||
"severity": "Info",
|
||||
"currentPendingSectorCount": 0,
|
||||
"powerCycleCount": 1956,
|
||||
"reallocatedSectorCount": 0,
|
||||
|
|
|
@ -21,11 +21,11 @@ components:
|
|||
endTime: 2018-06-01T09:12:06
|
||||
steps:
|
||||
- type: StepZero
|
||||
error: False
|
||||
severity: Info
|
||||
startTime: 2018-06-01T08:15:00
|
||||
endTime: 2018-06-01T09:16:00
|
||||
- type: StepZero
|
||||
error: False
|
||||
severity: Info
|
||||
startTime: 2018-06-01T08:16:00
|
||||
endTime: 2018-06-01T09:17:00
|
||||
- type: Processor
|
||||
|
|
|
@ -77,7 +77,7 @@
|
|||
"status": "Self-test routine in progress",
|
||||
"powerCycleCount": 648,
|
||||
"length": "Short",
|
||||
"error": false,
|
||||
"severity": "Error",
|
||||
"lifetime": 202
|
||||
}
|
||||
],
|
||||
|
@ -110,7 +110,7 @@
|
|||
"type": "BenchmarkRamSysbench"
|
||||
},
|
||||
{
|
||||
"error": false,
|
||||
"severity": "Info",
|
||||
"elapsed": 60,
|
||||
"type": "StressTest"
|
||||
}
|
||||
|
|
|
@ -28,12 +28,12 @@ components:
|
|||
serialNumber: 6VMB1A52
|
||||
size: 238475
|
||||
test: {'@type': TestHardDrive, CommandTimeout: 1786733725708, CurrentPendingSectorCount: 0,
|
||||
OfflineUncorrectable: 0, assessment: true, error: false, firstError: null, lifetime: 16947,
|
||||
OfflineUncorrectable: 0, assessment: true, severity: Info, firstError: null, lifetime: 16947,
|
||||
passedLifetime: 16947, powerCycleCount: 1694, reallocatedSectorCount: 0, reportedUncorrectableErrors: 0,
|
||||
status: Completed without error, type: Short offline}
|
||||
type: HDD
|
||||
- { '@type': GraphicCard, manufacturer: Intel Corporation, memory: 256.0, model: 4
|
||||
Series Chipset Integrated Graphics Controller, serialNumber: null}
|
||||
- { '@type': GraphicCard, manufacturer: Intel Corporation, memory: 256.0, model: 4
|
||||
Series Chipset Integrated Graphics Controller, serialNumber: null}
|
||||
- '@type': Motherboard
|
||||
connectors: {firewire: 0, pcmcia: 0, serial: 1, usb: 8}
|
||||
manufacturer: LENOVO
|
||||
|
@ -41,22 +41,22 @@ components:
|
|||
serialNumber: null
|
||||
totalSlots: 0
|
||||
usedSlots: 2
|
||||
- { '@type': NetworkAdapter, manufacturer: Intel Corporation, model: 82567LM-3 Gigabit
|
||||
Network Connection, serialNumber: '00:21:86:2c:5e:d6', speed: 1000}
|
||||
- { '@type': SoundCard, manufacturer: Intel Corporation, model: 82801JD/DO HD Audio
|
||||
Controller, serialNumber: null}
|
||||
- { '@type': NetworkAdapter, manufacturer: Intel Corporation, model: 82567LM-3 Gigabit
|
||||
Network Connection, serialNumber: '00:21:86:2c:5e:d6', speed: 1000}
|
||||
- { '@type': SoundCard, manufacturer: Intel Corporation, model: 82801JD/DO HD Audio
|
||||
Controller, serialNumber: null}
|
||||
condition:
|
||||
appearance: {general: B}
|
||||
functionality: {general: A}
|
||||
date: '2018-05-09T10:32:15'
|
||||
debug:
|
||||
capabilities: { dmi-2.5: DMI version 2.5, smbios-2.5: SMBIOS version 2.5, smp: Symmetric
|
||||
Multi-Processing, smp-1.4: SMP specification v1.4}
|
||||
capabilities: { dmi-2.5: DMI version 2.5, smbios-2.5: SMBIOS version 2.5, smp: Symmetric
|
||||
Multi-Processing, smp-1.4: SMP specification v1.4}
|
||||
children:
|
||||
- children:
|
||||
- capabilities: { acpi: ACPI, biosbootspecification: BIOS boot specification, cdboot: Booting
|
||||
from CD-ROM/DVD, edd: Enhanced Disk Drive extensions, escd: ESCD, ls120boot: Booting
|
||||
from LS-120, pci: PCI bus, pnp: Plug-and-Play, shadowing: BIOS shadowing,
|
||||
- capabilities: { acpi: ACPI, biosbootspecification: BIOS boot specification, cdboot: Booting
|
||||
from CD-ROM/DVD, edd: Enhanced Disk Drive extensions, escd: ESCD, ls120boot: Booting
|
||||
from LS-120, pci: PCI bus, pnp: Plug-and-Play, shadowing: BIOS shadowing,
|
||||
smartbattery: Smart battery, upgrade: BIOS EEPROM can be upgraded, usb: USB
|
||||
legacy emulation}
|
||||
capacity: 4128768
|
||||
|
@ -71,9 +71,9 @@ debug:
|
|||
vendor: LENOVO
|
||||
version: 5CKT48AUS
|
||||
- businfo: cpu@0
|
||||
capabilities: { acpi: thermal control (ACPI), aperfmperf: true, apic: on-chip
|
||||
advanced programmable interrupt controller (APIC), arch_perfmon: true, boot: boot
|
||||
processor, bts: true, clflush: true, cmov: conditional move instruction,
|
||||
capabilities: { acpi: thermal control (ACPI), aperfmperf: true, apic: on-chip
|
||||
advanced programmable interrupt controller (APIC), arch_perfmon: true, boot: boot
|
||||
processor, bts: true, clflush: true, cmov: conditional move instruction,
|
||||
constant_tsc: true, cpufreq: CPU Frequency scaling, cx16: true, cx8: compare
|
||||
and exchange 8-byte, de: debugging extensions, ds_cpl: true, dtes64: true,
|
||||
dtherm: true, dts: debug trace and EMON store MSRs, eagerfpu: true, est: true,
|
||||
|
@ -149,16 +149,16 @@ debug:
|
|||
version: 6.7.10
|
||||
width: 64
|
||||
- children:
|
||||
- { claimed: true, class: memory, clock: 1067000000, description: DIMM DDR2 Synchronous
|
||||
1067 MHz (0.9 ns), handle: 'DMI:001F', id: 'bank:0', physid: '0', product: '000000000000000000000000000000000000',
|
||||
- { claimed: true, class: memory, clock: 1067000000, description: DIMM DDR2 Synchronous
|
||||
1067 MHz (0.9 ns), handle: 'DMI:001F', id: 'bank:0', physid: '0', product: '000000000000000000000000000000000000',
|
||||
serial: '00000000', size: 2147483648, slot: J6G1, units: bytes, vendor: Unknown,
|
||||
width: 40960}
|
||||
- {claimed: true, class: memory, clock: 1067000000, description: 'DIMM DDR2
|
||||
Synchronous 1067 MHz (0.9 ns) [empty]', handle: 'DMI:0020', id: 'bank:1',
|
||||
physid: '1', product: 012345678901234567890123456789012345, serial: '01234567',
|
||||
slot: J6G2, vendor: 48spaces}
|
||||
- { claimed: true, class: memory, clock: 1067000000, description: DIMM DDR2 Synchronous
|
||||
1067 MHz (0.9 ns), handle: 'DMI:0021', id: 'bank:2', physid: '2', product: '000000000000000000000000000000000000',
|
||||
- { claimed: true, class: memory, clock: 1067000000, description: DIMM DDR2 Synchronous
|
||||
1067 MHz (0.9 ns), handle: 'DMI:0021', id: 'bank:2', physid: '2', product: '000000000000000000000000000000000000',
|
||||
serial: '00000000', size: 2147483648, slot: J6H1, units: bytes, vendor: Unknown,
|
||||
width: 41984}
|
||||
- {claimed: true, class: memory, clock: 1067000000, description: 'DIMM DDR2
|
||||
|
@ -205,7 +205,7 @@ debug:
|
|||
- businfo: pci@0000:00:00.0
|
||||
children:
|
||||
- businfo: pci@0000:00:02.0
|
||||
capabilities: { bus_master: bus mastering, cap_list: PCI capabilities listing,
|
||||
capabilities: { bus_master: bus mastering, cap_list: PCI capabilities listing,
|
||||
msi: Message Signalled Interrupts, pm: Power Management, rom: extension
|
||||
ROM, vga_controller: true}
|
||||
claimed: true
|
||||
|
@ -265,8 +265,8 @@ debug:
|
|||
version: '03'
|
||||
width: 32
|
||||
- businfo: pci@0000:00:03.3
|
||||
capabilities: { '16550': true, bus_master: bus mastering, cap_list: PCI capabilities
|
||||
listing, msi: Message Signalled Interrupts, pm: Power Management}
|
||||
capabilities: { '16550': true, bus_master: bus mastering, cap_list: PCI capabilities
|
||||
listing, msi: Message Signalled Interrupts, pm: Power Management}
|
||||
claimed: true
|
||||
class: communication
|
||||
clock: 66000000
|
||||
|
@ -280,8 +280,8 @@ debug:
|
|||
version: '03'
|
||||
width: 32
|
||||
- businfo: pci@0000:00:19.0
|
||||
capabilities: { 1000bt-fd: 1Gbit/s (full duplex), 100bt: 100Mbit/s, 100bt-fd: 100Mbit/s
|
||||
(full duplex), 10bt: 10Mbit/s, 10bt-fd: 10Mbit/s (full duplex), autonegotiation: Auto-negotiation,
|
||||
capabilities: { 1000bt-fd: 1Gbit/s (full duplex), 100bt: 100Mbit/s, 100bt-fd: 100Mbit/s
|
||||
(full duplex), 10bt: 10Mbit/s, 10bt-fd: 10Mbit/s (full duplex), autonegotiation: Auto-negotiation,
|
||||
bus_master: bus mastering, cap_list: PCI capabilities listing, ethernet: true,
|
||||
msi: Message Signalled Interrupts, physical: Physical interface, pm: Power
|
||||
Management, tp: twisted pair}
|
||||
|
@ -575,8 +575,8 @@ debug:
|
|||
version: '02'
|
||||
width: 32
|
||||
- businfo: pci@0000:00:1f.2
|
||||
capabilities: { ahci_1.0: true, bus_master: bus mastering, cap_list: PCI capabilities
|
||||
listing, msi: Message Signalled Interrupts, pm: Power Management, storage: true}
|
||||
capabilities: { ahci_1.0: true, bus_master: bus mastering, cap_list: PCI capabilities
|
||||
listing, msi: Message Signalled Interrupts, pm: Power Management, storage: true}
|
||||
claimed: true
|
||||
class: storage
|
||||
clock: 66000000
|
||||
|
@ -620,7 +620,7 @@ debug:
|
|||
table}
|
||||
children:
|
||||
- businfo: scsi@0:0.0.0,1
|
||||
capabilities: { dir_nlink: directories with 65000+ subdirs, ext2: EXT2/EXT3,
|
||||
capabilities: { dir_nlink: directories with 65000+ subdirs, ext2: EXT2/EXT3,
|
||||
ext4: true, extended_attributes: Extended Attributes, extents: extent-based
|
||||
allocation, huge_files: 16TB+ files, initialized: initialized volume,
|
||||
journaled: true, large_files: 4GB+ files, primary: Primary partition}
|
||||
|
@ -672,8 +672,8 @@ debug:
|
|||
- capabilities: {emulated: Emulated device}
|
||||
children:
|
||||
- businfo: scsi@1:0.0.0
|
||||
capabilities: { audio: Audio CD playback, cd-r: CD-R burning, cd-rw: CD-RW
|
||||
burning, dvd: DVD playback, dvd-r: DVD-R burning, dvd-ram: DVD-RAM burning,
|
||||
capabilities: { audio: Audio CD playback, cd-r: CD-R burning, cd-rw: CD-RW
|
||||
burning, dvd: DVD playback, dvd-r: DVD-R burning, dvd-ram: DVD-RAM burning,
|
||||
removable: support is removable}
|
||||
claimed: true
|
||||
class: disk
|
||||
|
|
|
@ -8,5 +8,5 @@
|
|||
|
||||
type: 'StressTest'
|
||||
elapsed: 300
|
||||
error: False
|
||||
severity: Info
|
||||
# snapshot: None fulfill!
|
|
@ -7,7 +7,7 @@
|
|||
# All numbers are invented
|
||||
|
||||
type: 'EraseSectors'
|
||||
error: False
|
||||
severity: Info
|
||||
# snapshot: None fulfill!
|
||||
# device: None fulfill!
|
||||
zeros: False
|
||||
|
@ -17,4 +17,4 @@ steps:
|
|||
- type: 'StepRandom'
|
||||
startTime: '2018-01-01T10:10:10'
|
||||
endTime: '2018-01-01T12:10:10'
|
||||
error: False
|
||||
severity: Info
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
|
||||
type: 'Install'
|
||||
elapsed: 420
|
||||
error: False
|
||||
severity: Info
|
||||
# snapshot: None fulfill!
|
||||
# device: None fulfill!
|
||||
name: 'LinuxMint 18.01 32b'
|
|
@ -40,4 +40,4 @@ def test_api_docs(client: Client):
|
|||
'scheme': 'basic',
|
||||
'name': 'Authorization'
|
||||
}
|
||||
assert 92 == len(docs['definitions'])
|
||||
assert 94 == len(docs['definitions'])
|
||||
|
|
|
@ -20,7 +20,8 @@ from ereuse_devicehub.resources.device.exceptions import NeedsId
|
|||
from ereuse_devicehub.resources.device.schemas import Device as DeviceS
|
||||
from ereuse_devicehub.resources.device.sync import MismatchBetweenTags, MismatchBetweenTagsAndHid, \
|
||||
Sync
|
||||
from ereuse_devicehub.resources.enums import ComputerChassis, DisplayTech
|
||||
from ereuse_devicehub.resources.enums import ComputerChassis, DisplayTech, Severity, \
|
||||
SnapshotSoftware
|
||||
from ereuse_devicehub.resources.event import models as m
|
||||
from ereuse_devicehub.resources.event.models import Remove, Test
|
||||
from ereuse_devicehub.resources.tag.model import Tag
|
||||
|
@ -73,6 +74,11 @@ def test_device_model():
|
|||
assert d.GraphicCard.query.first() is None, 'We should have deleted it –it was inside the pc'
|
||||
|
||||
|
||||
@pytest.mark.xfail(reason='Test not developed')
|
||||
def test_device_problems():
|
||||
pass
|
||||
|
||||
|
||||
@pytest.mark.usefixtures(conftest.app_context.__name__)
|
||||
def test_device_schema():
|
||||
"""Ensures the user does not upload non-writable or extra fields."""
|
||||
|
@ -393,7 +399,7 @@ def test_get_device(app: Devicehub, user: UserClient):
|
|||
db.session.add(pc)
|
||||
db.session.add(Test(device=pc,
|
||||
elapsed=timedelta(seconds=4),
|
||||
error=False,
|
||||
severity=Severity.Info,
|
||||
agent=Person(name='Timmy'),
|
||||
author=User(email='bar@bar.com')))
|
||||
db.session.commit()
|
||||
|
@ -402,7 +408,7 @@ def test_get_device(app: Devicehub, user: UserClient):
|
|||
assert pc['events'][0]['type'] == 'Test'
|
||||
assert pc['events'][0]['device'] == 1
|
||||
assert pc['events'][0]['elapsed'] == 4
|
||||
assert not pc['events'][0]['error']
|
||||
assert pc['events'][0]['severity'] == 'Info'
|
||||
assert UUID(pc['events'][0]['author'])
|
||||
assert 'events_components' not in pc, 'events_components are internal use only'
|
||||
assert 'events_one' not in pc, 'they are internal use only'
|
||||
|
@ -531,3 +537,30 @@ def test_networking_model():
|
|||
switch = d.Switch(speed=1000, wireless=False)
|
||||
db.session.add(switch)
|
||||
db.session.commit()
|
||||
|
||||
|
||||
@pytest.mark.usefixtures(conftest.app_context.__name__)
|
||||
def test_cooking_mixer():
|
||||
mixer = d.Mixer(serial_number='foo', model='bar', manufacturer='foobar')
|
||||
db.session.add(mixer)
|
||||
db.session.commit()
|
||||
|
||||
|
||||
def test_cooking_mixer_api(user: UserClient):
|
||||
snapshot, _ = user.post(
|
||||
{
|
||||
'type': 'Snapshot',
|
||||
'device': {
|
||||
'serialNumber': 'foo',
|
||||
'model': 'bar',
|
||||
'manufacturer': 'foobar',
|
||||
'type': 'Mixer'
|
||||
},
|
||||
'version': '11.0',
|
||||
'software': SnapshotSoftware.Web.name
|
||||
},
|
||||
res=m.Snapshot
|
||||
)
|
||||
mixer, _ = user.get(res=d.Device, item=snapshot['device']['id'])
|
||||
assert mixer['type'] == 'Mixer'
|
||||
assert mixer['serialNumber'] == 'foo'
|
||||
|
|
|
@ -4,7 +4,7 @@ from teal.utils import compiled
|
|||
from ereuse_devicehub.client import UserClient
|
||||
from ereuse_devicehub.db import db
|
||||
from ereuse_devicehub.devicehub import Devicehub
|
||||
from ereuse_devicehub.resources.device.models import Desktop, Device, Laptop, Processor, \
|
||||
from ereuse_devicehub.resources.device.models import Desktop, Device, GraphicCard, Laptop, Server, \
|
||||
SolidStateDrive
|
||||
from ereuse_devicehub.resources.device.search import DeviceSearch
|
||||
from ereuse_devicehub.resources.device.views import Filters, Sorting
|
||||
|
@ -56,51 +56,70 @@ def test_device_sort():
|
|||
|
||||
@pytest.fixture()
|
||||
def device_query_dummy(app: Devicehub):
|
||||
"""
|
||||
3 computers, where:
|
||||
|
||||
1. s1 Desktop with a Processor
|
||||
2. s2 Desktop with an SSD
|
||||
3. s3 Laptop
|
||||
4. s4 Server with another SSD
|
||||
|
||||
:param app:
|
||||
:return:
|
||||
"""
|
||||
with app.app_context():
|
||||
devices = ( # The order matters ;-)
|
||||
Desktop(serial_number='s1',
|
||||
Desktop(serial_number='1',
|
||||
model='ml1',
|
||||
manufacturer='mr1',
|
||||
chassis=ComputerChassis.Tower),
|
||||
Laptop(serial_number='s3',
|
||||
model='ml3',
|
||||
manufacturer='mr3',
|
||||
chassis=ComputerChassis.Detachable),
|
||||
Desktop(serial_number='s2',
|
||||
Desktop(serial_number='2',
|
||||
model='ml2',
|
||||
manufacturer='mr2',
|
||||
chassis=ComputerChassis.Microtower),
|
||||
SolidStateDrive(serial_number='s4', model='ml4', manufacturer='mr4')
|
||||
Laptop(serial_number='3',
|
||||
model='ml3',
|
||||
manufacturer='mr3',
|
||||
chassis=ComputerChassis.Detachable),
|
||||
Server(serial_number='4',
|
||||
model='ml4',
|
||||
manufacturer='mr4',
|
||||
chassis=ComputerChassis.Tower),
|
||||
)
|
||||
devices[0].components.add(
|
||||
GraphicCard(serial_number='1-gc', model='s1ml', manufacturer='s1mr')
|
||||
)
|
||||
devices[1].components.add(
|
||||
SolidStateDrive(serial_number='2-ssd', model='s2ml', manufacturer='s2mr')
|
||||
)
|
||||
devices[-1].components.add(
|
||||
SolidStateDrive(serial_number='4-ssd', model='s4ml', manufacturer='s4mr')
|
||||
)
|
||||
devices[-1].parent = devices[0] # s4 in s1
|
||||
db.session.add_all(devices)
|
||||
|
||||
devices[0].components.add(Processor(model='ml5', manufacturer='mr5'))
|
||||
|
||||
db.session.commit()
|
||||
|
||||
|
||||
@pytest.mark.usefixtures(device_query_dummy.__name__)
|
||||
def test_device_query_no_filters(user: UserClient):
|
||||
i, _ = user.get(res=Device)
|
||||
assert tuple(d['type'] for d in i['items']) == (
|
||||
'Desktop', 'Laptop', 'Desktop', 'SolidStateDrive', 'Processor'
|
||||
assert ('1', '2', '3', '4', '1-gc', '2-ssd', '4-ssd') == tuple(
|
||||
d['serialNumber'] for d in i['items']
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.usefixtures(device_query_dummy.__name__)
|
||||
def test_device_query_filter_type(user: UserClient):
|
||||
i, _ = user.get(res=Device, query=[('filter', {'type': ['Desktop', 'Laptop']})])
|
||||
assert tuple(d['type'] for d in i['items']) == ('Desktop', 'Laptop', 'Desktop')
|
||||
assert ('1', '2', '3') == tuple(d['serialNumber'] for d in i['items'])
|
||||
|
||||
|
||||
@pytest.mark.usefixtures(device_query_dummy.__name__)
|
||||
def test_device_query_filter_sort(user: UserClient):
|
||||
i, _ = user.get(res=Device, query=[
|
||||
('sort', {'created': Sorting.ASCENDING}),
|
||||
('sort', {'created': Sorting.DESCENDING}),
|
||||
('filter', {'type': ['Computer']})
|
||||
])
|
||||
assert tuple(d['type'] for d in i['items']) == ('Desktop', 'Laptop', 'Desktop')
|
||||
assert ('4', '3', '2', '1') == tuple(d['serialNumber'] for d in i['items'])
|
||||
|
||||
|
||||
@pytest.mark.usefixtures(device_query_dummy.__name__)
|
||||
|
@ -111,7 +130,7 @@ def test_device_query_filter_lots(user: UserClient):
|
|||
i, _ = user.get(res=Device, query=[
|
||||
('filter', {'lot': {'id': [parent['id']]}})
|
||||
])
|
||||
assert len(i['items']) == 0, 'No devices in lot'
|
||||
assert not i['items'], 'No devices in lot'
|
||||
|
||||
parent, _ = user.post({},
|
||||
res=Lot,
|
||||
|
@ -120,42 +139,37 @@ def test_device_query_filter_lots(user: UserClient):
|
|||
i, _ = user.get(res=Device, query=[
|
||||
('filter', {'type': ['Computer']})
|
||||
])
|
||||
lot, _ = user.post({},
|
||||
res=Lot,
|
||||
item='{}/devices'.format(parent['id']),
|
||||
query=[('id', d['id']) for d in i['items'][:-1]])
|
||||
lot, _ = user.post({},
|
||||
res=Lot,
|
||||
item='{}/devices'.format(child['id']),
|
||||
query=[('id', i['items'][-1]['id'])])
|
||||
assert ('1', '2', '3', '4') == tuple(d['serialNumber'] for d in i['items'])
|
||||
parent, _ = user.post({},
|
||||
res=Lot,
|
||||
item='{}/devices'.format(parent['id']),
|
||||
query=[('id', d['id']) for d in i['items'][:2]])
|
||||
child, _ = user.post({},
|
||||
res=Lot,
|
||||
item='{}/devices'.format(child['id']),
|
||||
query=[('id', d['id']) for d in i['items'][2:]])
|
||||
i, _ = user.get(res=Device, query=[
|
||||
('filter', {'lot': {'id': [parent['id']]}}),
|
||||
('sort', {'id': Sorting.ASCENDING})
|
||||
('filter', {'lot': {'id': [parent['id']]}})
|
||||
])
|
||||
assert tuple(x['id'] for x in i['items']) == (1, 2, 3, 4, 5), \
|
||||
'The parent lot contains 2 items plus indirectly the third one, and 1st device the HDD.'
|
||||
assert ('1', '2', '3', '4', '1-gc', '2-ssd', '4-ssd') == tuple(
|
||||
x['serialNumber'] for x in i['items']
|
||||
), 'The parent lot contains 2 items plus indirectly the other ' \
|
||||
'2 from the child lot, with all their 2 components'
|
||||
|
||||
i, _ = user.get(res=Device, query=[
|
||||
('filter', {'type': ['Computer'], 'lot': {'id': [parent['id']]}}),
|
||||
('sort', {'id': Sorting.ASCENDING})
|
||||
])
|
||||
assert tuple(x['id'] for x in i['items']) == (1, 2, 3)
|
||||
|
||||
assert ('1', '2', '3', '4') == tuple(x['serialNumber'] for x in i['items'])
|
||||
s, _ = user.get(res=Device, query=[
|
||||
('filter', {'lot': {'id': [child['id']]}})
|
||||
])
|
||||
assert len(s['items']) == 1
|
||||
assert s['items'][0]['chassis'] == 'Microtower', 'The child lot only contains the last device.'
|
||||
assert ('3', '4', '4-ssd') == tuple(x['serialNumber'] for x in s['items'])
|
||||
s, _ = user.get(res=Device, query=[
|
||||
('filter', {'lot': {'id': [child['id'], parent['id']]}})
|
||||
])
|
||||
assert all(x['id'] == id for x, id in zip(i['items'], (1, 2, 3, 4))), \
|
||||
'Adding both lots is redundant in this case and we have the 4 elements.'
|
||||
i, _ = user.get(res=Device, query=[
|
||||
('filter', {'lot': {'id': [parent['id']]}, 'type': ['Computer']}),
|
||||
('sort', {'id': Sorting.ASCENDING})
|
||||
])
|
||||
assert tuple(x['id'] for x in i['items']) == (1, 2, 3), 'Only computers now'
|
||||
assert ('1', '2', '3', '4', '1-gc', '2-ssd', '4-ssd') == tuple(
|
||||
x['serialNumber'] for x in s['items']
|
||||
), 'Adding both lots is redundant in this case and we have the 4 elements.'
|
||||
|
||||
|
||||
def test_device_query(user: UserClient):
|
||||
|
@ -190,6 +204,21 @@ def test_device_search_all_devices_token_if_empty(app: Devicehub, user: UserClie
|
|||
assert i['items']
|
||||
|
||||
|
||||
def test_device_search_regenerate_table(app: DeviceSearch, user: UserClient):
|
||||
user.post(file('basic.snapshot'), res=Snapshot)
|
||||
i, _ = user.get(res=Device, query=[('search', 'Desktop')])
|
||||
assert i['items'], 'Normal search works'
|
||||
with app.app_context():
|
||||
app.db.session.execute('TRUNCATE TABLE {}'.format(DeviceSearch.__table__.name))
|
||||
app.db.session.commit()
|
||||
i, _ = user.get(res=Device, query=[('search', 'Desktop')])
|
||||
assert not i['items'], 'Truncate deleted all items'
|
||||
runner = app.test_cli_runner()
|
||||
runner.invoke(args=['regenerate-search'], catch_exceptions=False)
|
||||
i, _ = user.get(res=Device, query=[('search', 'Desktop')])
|
||||
assert i['items'], 'Regenerated re-made the table'
|
||||
|
||||
|
||||
def test_device_query_search(user: UserClient):
|
||||
# todo improve
|
||||
user.post(file('basic.snapshot'), res=Snapshot)
|
||||
|
@ -199,25 +228,27 @@ def test_device_query_search(user: UserClient):
|
|||
assert i['items'][0]['id'] == 1
|
||||
i, _ = user.get(res=Device, query=[('search', 'intel')])
|
||||
assert len(i['items']) == 1
|
||||
i, _ = user.get(res=Device, query=[('search', '1')])
|
||||
assert len(i['items']) == 1
|
||||
|
||||
|
||||
@pytest.mark.xfail(reason='No dictionary yet that knows asustek = asus')
|
||||
def test_device_query_search_synonyms_asus(user: UserClient):
|
||||
user.post(file('real-eee-1001pxd.snapshot.11'), res=Snapshot)
|
||||
i, _ = user.get(res=Device, query=[('search', 'asustek')])
|
||||
assert len(i['items']) == 1
|
||||
assert 1 == len(i['items'])
|
||||
i, _ = user.get(res=Device, query=[('search', 'asus')])
|
||||
assert len(i['items']) == 1
|
||||
assert 1 == len(i['items'])
|
||||
|
||||
|
||||
@pytest.mark.xfail(reason='No dictionary yet that knows hp = hewlett packard')
|
||||
def test_device_query_search_synonyms_intel(user: UserClient):
|
||||
s = file('real-hp.snapshot.11')
|
||||
s['device']['model'] = 'foo' # The model had the word 'HP' in it
|
||||
user.post(s, res=Snapshot)
|
||||
i, _ = user.get(res=Device, query=[('search', 'hewlett packard')])
|
||||
assert len(i['items']) == 1
|
||||
assert 1 == len(i['items'])
|
||||
i, _ = user.get(res=Device, query=[('search', 'hewlett')])
|
||||
assert len(i['items']) == 1
|
||||
assert 1 == len(i['items'])
|
||||
i, _ = user.get(res=Device, query=[('search', 'hp')])
|
||||
assert len(i['items']) == 1
|
||||
assert 1 == len(i['items'])
|
||||
i, _ = user.get(res=Device, query=[('search', 'h.p')])
|
||||
assert 1 == len(i['items'])
|
||||
|
|
|
@ -13,7 +13,7 @@ from ereuse_devicehub.db import db
|
|||
from ereuse_devicehub.resources.device import states
|
||||
from ereuse_devicehub.resources.device.models import Desktop, Device, GraphicCard, HardDrive, \
|
||||
RamModule, SolidStateDrive
|
||||
from ereuse_devicehub.resources.enums import ComputerChassis, TestDataStorageLength
|
||||
from ereuse_devicehub.resources.enums import ComputerChassis, Severity, TestDataStorageLength
|
||||
from ereuse_devicehub.resources.event import models
|
||||
from tests import conftest
|
||||
from tests.conftest import create_user, file
|
||||
|
@ -86,18 +86,35 @@ def test_erase_sectors_steps():
|
|||
|
||||
|
||||
@pytest.mark.usefixtures(conftest.auth_app_context.__name__)
|
||||
def test_test_data_storage():
|
||||
def test_test_data_storage_working():
|
||||
"""Tests TestDataStorage with the resulting properties in Device."""
|
||||
hdd = HardDrive(serial_number='foo', manufacturer='bar', model='foo-bar')
|
||||
test = models.TestDataStorage(
|
||||
device=HardDrive(serial_number='foo', manufacturer='bar', model='foo-bar'),
|
||||
error=False,
|
||||
device=hdd,
|
||||
severity=Severity.Error,
|
||||
elapsed=timedelta(minutes=25),
|
||||
length=TestDataStorageLength.Short,
|
||||
status='ok!',
|
||||
status=':-(',
|
||||
lifetime=timedelta(days=120)
|
||||
)
|
||||
db.session.add(test)
|
||||
db.session.commit()
|
||||
assert models.TestDataStorage.query.one()
|
||||
db.session.flush()
|
||||
assert hdd.working == [test]
|
||||
assert not hdd.problems
|
||||
# Add new test overriding the first test in the problems
|
||||
# / working condition
|
||||
test2 = models.TestDataStorage(
|
||||
device=hdd,
|
||||
severity=Severity.Warning,
|
||||
elapsed=timedelta(minutes=25),
|
||||
length=TestDataStorageLength.Short,
|
||||
status=':-(',
|
||||
lifetime=timedelta(days=120)
|
||||
)
|
||||
db.session.add(test2)
|
||||
db.session.flush()
|
||||
assert hdd.working == [test2]
|
||||
assert hdd.problems == []
|
||||
|
||||
|
||||
@pytest.mark.usefixtures(conftest.auth_app_context.__name__)
|
||||
|
@ -192,7 +209,6 @@ def test_update_parent():
|
|||
(models.Repair, states.Physical.Repaired),
|
||||
(models.ToPrepare, states.Physical.Preparing),
|
||||
(models.ReadyToUse, states.Physical.ReadyToBeUsed),
|
||||
(models.ToPrepare, states.Physical.Preparing),
|
||||
(models.Prepare, states.Physical.Prepared)
|
||||
])
|
||||
def test_generic_event(event_model_state: Tuple[models.Event, states.Trading], user: UserClient):
|
||||
|
|
|
@ -23,39 +23,93 @@ In case of error, debug with:
|
|||
"""
|
||||
|
||||
|
||||
def test_lot_modify_patch_endpoint(user: UserClient):
|
||||
@pytest.mark.usefixtures(conftest.auth_app_context.__name__)
|
||||
def test_lot_model_children():
|
||||
"""Tests the property Lot.children
|
||||
|
||||
l1
|
||||
|
|
||||
l2
|
||||
|
|
||||
l3
|
||||
"""
|
||||
lots = Lot('1'), Lot('2'), Lot('3')
|
||||
l1, l2, l3 = lots
|
||||
db.session.add_all(lots)
|
||||
db.session.flush()
|
||||
assert not l1.children
|
||||
assert not l1.parents
|
||||
assert not l2.children
|
||||
assert not l2.parents
|
||||
assert not l3.parents
|
||||
assert not l3.children
|
||||
|
||||
l1.add_children(l2)
|
||||
assert l1.children == {l2}
|
||||
assert l2.parents == {l1}
|
||||
|
||||
l2.add_children(l3)
|
||||
assert l1.children == {l2}
|
||||
assert l2.parents == {l1}
|
||||
assert l2.children == {l3}
|
||||
assert l3.parents == {l2}
|
||||
|
||||
l2.delete()
|
||||
db.session.flush()
|
||||
assert not l1.children
|
||||
assert not l3.parents
|
||||
|
||||
l1.delete()
|
||||
db.session.flush()
|
||||
l3b = Lot.query.one()
|
||||
assert l3 == l3b
|
||||
assert not l3.parents
|
||||
|
||||
|
||||
def test_lot_modify_patch_endpoint_and_delete(user: UserClient):
|
||||
"""Creates and modifies lot properties through the endpoint"""
|
||||
l, _ = user.post({'name': 'foo'}, res=Lot)
|
||||
l, _ = user.post({'name': 'foo', 'description': 'baz'}, res=Lot)
|
||||
assert l['name'] == 'foo'
|
||||
user.patch({'name': 'bar'}, res=Lot, item=l['id'], status=204)
|
||||
assert l['description'] == 'baz'
|
||||
user.patch({'name': 'bar', 'description': 'bax'}, res=Lot, item=l['id'], status=204)
|
||||
l_after, _ = user.get(res=Lot, item=l['id'])
|
||||
assert l_after['name'] == 'bar'
|
||||
assert l_after['description'] == 'bax'
|
||||
user.delete(res=Lot, item=l['id'], status=204)
|
||||
user.get(res=Lot, item=l['id'], status=404)
|
||||
|
||||
|
||||
@pytest.mark.xfail(reason='Components are not added to lots!')
|
||||
@pytest.mark.usefixtures(conftest.auth_app_context.__name__)
|
||||
def test_lot_device_relationship():
|
||||
device = Desktop(serial_number='foo',
|
||||
model='bar',
|
||||
manufacturer='foobar',
|
||||
chassis=ComputerChassis.Lunchbox)
|
||||
lot = Lot('lot1')
|
||||
lot.devices.add(device)
|
||||
db.session.add(lot)
|
||||
device.components.add(GraphicCard(serial_number='foo', model='bar1', manufacturer='baz'))
|
||||
child = Lot('child')
|
||||
child.devices.add(device)
|
||||
db.session.add(child)
|
||||
db.session.flush()
|
||||
|
||||
lot_device = LotDevice.query.one() # type: LotDevice
|
||||
assert lot_device.device_id == device.id
|
||||
assert lot_device.lot_id == lot.id
|
||||
assert lot_device.lot_id == child.id
|
||||
assert lot_device.created
|
||||
assert lot_device.author_id == g.user.id
|
||||
assert device.lots == {lot}
|
||||
assert device in lot
|
||||
assert device.lots == {child}
|
||||
assert device in child
|
||||
assert device in child.all_devices
|
||||
|
||||
graphic = GraphicCard(serial_number='foo', model='bar')
|
||||
device.components.add(graphic)
|
||||
db.session.flush()
|
||||
assert graphic in lot
|
||||
assert graphic in child
|
||||
|
||||
parent = Lot('parent')
|
||||
db.session.add(parent)
|
||||
db.session.flush()
|
||||
parent.add_children(child)
|
||||
assert child in parent
|
||||
|
||||
|
||||
@pytest.mark.usefixtures(conftest.auth_app_context.__name__)
|
||||
|
@ -67,13 +121,13 @@ def test_add_edge():
|
|||
db.session.add(parent)
|
||||
db.session.flush()
|
||||
|
||||
parent.add_child(child)
|
||||
parent.add_children(child)
|
||||
|
||||
assert child in parent
|
||||
assert len(child.paths) == 1
|
||||
assert len(parent.paths) == 1
|
||||
|
||||
parent.remove_child(child)
|
||||
parent.remove_children(child)
|
||||
assert child not in parent
|
||||
assert len(child.paths) == 1
|
||||
assert len(parent.paths) == 1
|
||||
|
@ -82,8 +136,8 @@ def test_add_edge():
|
|||
db.session.add(grandparent)
|
||||
db.session.flush()
|
||||
|
||||
grandparent.add_child(parent)
|
||||
parent.add_child(child)
|
||||
grandparent.add_children(parent)
|
||||
parent.add_children(child)
|
||||
|
||||
assert parent in grandparent
|
||||
assert child in parent
|
||||
|
@ -104,31 +158,36 @@ def test_lot_multiple_parents(auth_app_context):
|
|||
db.session.add_all(lots)
|
||||
db.session.flush()
|
||||
|
||||
grandparent1.add_child(parent)
|
||||
grandparent1.add_children(parent)
|
||||
assert parent in grandparent1
|
||||
parent.add_child(child)
|
||||
parent.add_children(child)
|
||||
assert child in parent
|
||||
assert child in grandparent1
|
||||
grandparent2.add_child(parent)
|
||||
grandparent2.add_children(parent)
|
||||
assert parent in grandparent1
|
||||
assert parent in grandparent2
|
||||
assert child in parent
|
||||
assert child in grandparent1
|
||||
assert child in grandparent2
|
||||
|
||||
p = parent.id
|
||||
c = child.id
|
||||
gp1 = grandparent1.id
|
||||
gp2 = grandparent2.id
|
||||
|
||||
nodes = auth_app_context.resources[Lot.t].VIEW.ui_tree()
|
||||
assert nodes[0]['name'] == 'grandparent1'
|
||||
assert nodes[0]['nodes'][0]['name'] == 'parent'
|
||||
assert nodes[0]['nodes'][0]['nodes'][0]['name'] == 'child'
|
||||
assert nodes[0]['id'] == gp1
|
||||
assert nodes[0]['nodes'][0]['id'] == p
|
||||
assert nodes[0]['nodes'][0]['nodes'][0]['id'] == c
|
||||
assert nodes[0]['nodes'][0]['nodes'][0]['nodes'] == []
|
||||
assert nodes[1]['name'] == 'grandparent2'
|
||||
assert nodes[1]['nodes'][0]['name'] == 'parent'
|
||||
assert nodes[1]['nodes'][0]['nodes'][0]['name'] == 'child'
|
||||
assert nodes[1]['id'] == gp2
|
||||
assert nodes[1]['nodes'][0]['id'] == p
|
||||
assert nodes[1]['nodes'][0]['nodes'][0]['id'] == c
|
||||
assert nodes[1]['nodes'][0]['nodes'][0]['nodes'] == []
|
||||
|
||||
# Now remove all childs
|
||||
|
||||
grandparent1.remove_child(parent)
|
||||
grandparent1.remove_children(parent)
|
||||
assert parent not in grandparent1
|
||||
assert child in parent
|
||||
assert parent in grandparent2
|
||||
|
@ -136,14 +195,14 @@ def test_lot_multiple_parents(auth_app_context):
|
|||
assert child in grandparent2
|
||||
|
||||
nodes = auth_app_context.resources[Lot.t].VIEW.ui_tree()
|
||||
assert nodes[0]['name'] == 'grandparent1'
|
||||
assert nodes[0]['id'] == gp1
|
||||
assert nodes[0]['nodes'] == []
|
||||
assert nodes[1]['name'] == 'grandparent2'
|
||||
assert nodes[1]['nodes'][0]['name'] == 'parent'
|
||||
assert nodes[1]['nodes'][0]['nodes'][0]['name'] == 'child'
|
||||
assert nodes[1]['id'] == gp2
|
||||
assert nodes[1]['nodes'][0]['id'] == p
|
||||
assert nodes[1]['nodes'][0]['nodes'][0]['id'] == c
|
||||
assert nodes[1]['nodes'][0]['nodes'][0]['nodes'] == []
|
||||
|
||||
grandparent2.remove_child(parent)
|
||||
grandparent2.remove_children(parent)
|
||||
assert parent not in grandparent2
|
||||
assert parent not in grandparent1
|
||||
assert child not in grandparent2
|
||||
|
@ -151,27 +210,27 @@ def test_lot_multiple_parents(auth_app_context):
|
|||
assert child in parent
|
||||
|
||||
nodes = auth_app_context.resources[Lot.t].VIEW.ui_tree()
|
||||
assert nodes[0]['name'] == 'grandparent1'
|
||||
assert nodes[0]['id'] == gp1
|
||||
assert nodes[0]['nodes'] == []
|
||||
assert nodes[1]['name'] == 'grandparent2'
|
||||
assert nodes[1]['id'] == gp2
|
||||
assert nodes[1]['nodes'] == []
|
||||
assert nodes[2]['name'] == 'parent'
|
||||
assert nodes[2]['nodes'][0]['name'] == 'child'
|
||||
assert nodes[2]['id'] == p
|
||||
assert nodes[2]['nodes'][0]['id'] == c
|
||||
assert nodes[2]['nodes'][0]['nodes'] == []
|
||||
|
||||
parent.remove_child(child)
|
||||
parent.remove_children(child)
|
||||
assert child not in parent
|
||||
assert len(child.paths) == 1
|
||||
assert len(parent.paths) == 1
|
||||
|
||||
nodes = auth_app_context.resources[Lot.t].VIEW.ui_tree()
|
||||
assert nodes[0]['name'] == 'grandparent1'
|
||||
assert nodes[0]['id'] == gp1
|
||||
assert nodes[0]['nodes'] == []
|
||||
assert nodes[1]['name'] == 'grandparent2'
|
||||
assert nodes[1]['id'] == gp2
|
||||
assert nodes[1]['nodes'] == []
|
||||
assert nodes[2]['name'] == 'parent'
|
||||
assert nodes[2]['id'] == p
|
||||
assert nodes[2]['nodes'] == []
|
||||
assert nodes[3]['name'] == 'child'
|
||||
assert nodes[3]['id'] == c
|
||||
assert nodes[3]['nodes'] == []
|
||||
|
||||
|
||||
|
@ -199,29 +258,29 @@ def test_lot_unite_graphs_and_find():
|
|||
db.session.add_all(lots)
|
||||
db.session.flush()
|
||||
|
||||
l1.add_child(l2)
|
||||
l1.add_children(l2)
|
||||
assert l2 in l1
|
||||
l3.add_child(l2)
|
||||
l3.add_children(l2)
|
||||
assert l2 in l3
|
||||
l5.add_child(l7)
|
||||
l5.add_children(l7)
|
||||
assert l7 in l5
|
||||
l4.add_child(l5)
|
||||
l4.add_children(l5)
|
||||
assert l5 in l4
|
||||
assert l7 in l4
|
||||
l5.add_child(l8)
|
||||
l5.add_children(l8)
|
||||
assert l8 in l5
|
||||
l4.add_child(l6)
|
||||
l4.add_children(l6)
|
||||
assert l6 in l4
|
||||
l6.add_child(l5)
|
||||
l6.add_children(l5)
|
||||
assert l5 in l6 and l5 in l4
|
||||
|
||||
# We unite the two graphs
|
||||
l2.add_child(l4)
|
||||
l2.add_children(l4)
|
||||
assert l4 in l2 and l5 in l2 and l6 in l2 and l7 in l2 and l8 in l2
|
||||
assert l4 in l3 and l5 in l3 and l6 in l3 and l7 in l3 and l8 in l3
|
||||
|
||||
# We remove the union
|
||||
l2.remove_child(l4)
|
||||
l2.remove_children(l4)
|
||||
assert l4 not in l2 and l5 not in l2 and l6 not in l2 and l7 not in l2 and l8 not in l2
|
||||
assert l4 not in l3 and l5 not in l3 and l6 not in l3 and l7 not in l3 and l8 not in l3
|
||||
|
||||
|
@ -235,25 +294,10 @@ def test_lot_roots():
|
|||
db.session.flush()
|
||||
|
||||
assert set(Lot.roots()) == {l1, l2, l3}
|
||||
l1.add_child(l2)
|
||||
l1.add_children(l2)
|
||||
assert set(Lot.roots()) == {l1, l3}
|
||||
|
||||
|
||||
@pytest.mark.usefixtures(conftest.auth_app_context.__name__)
|
||||
def test_lot_model_children():
|
||||
"""Tests the property Lot.children"""
|
||||
lots = Lot('1'), Lot('2'), Lot('3')
|
||||
l1, l2, l3 = lots
|
||||
db.session.add_all(lots)
|
||||
db.session.flush()
|
||||
|
||||
l1.add_child(l2)
|
||||
db.session.flush()
|
||||
|
||||
children = l1.children
|
||||
assert list(children) == [l2]
|
||||
|
||||
|
||||
def test_post_get_lot(user: UserClient):
|
||||
"""Tests submitting and retreiving a basic lot."""
|
||||
l, _ = user.post({'name': 'Foo'}, res=Lot)
|
||||
|
@ -277,21 +321,26 @@ def test_lot_post_add_children_view_ui_tree_normal(user: UserClient):
|
|||
assert child['parents'][0]['id'] == parent['id']
|
||||
|
||||
# Format UiTree
|
||||
lots = user.get(res=Lot, query=[('format', 'UiTree')])[0]['items']
|
||||
assert len(lots) == 1
|
||||
assert lots[0]['name'] == 'Parent'
|
||||
assert len(lots[0]['nodes']) == 1
|
||||
assert lots[0]['nodes'][0]['name'] == 'Child'
|
||||
r = user.get(res=Lot, query=[('format', 'UiTree')])[0]
|
||||
lots, nodes = r['items'], r['tree']
|
||||
assert 1 == len(nodes)
|
||||
assert nodes[0]['id'] == parent['id']
|
||||
assert len(nodes[0]['nodes']) == 1
|
||||
assert nodes[0]['nodes'][0]['id'] == child['id']
|
||||
assert 2 == len(lots)
|
||||
assert 'Parent' == lots[parent['id']]['name']
|
||||
assert 'Child' == lots[child['id']]['name']
|
||||
assert lots[child['id']]['parents'][0]['name'] == 'Parent'
|
||||
|
||||
# Normal list format
|
||||
lots = user.get(res=Lot)[0]['items']
|
||||
assert len(lots) == 2
|
||||
assert 2 == len(lots)
|
||||
assert lots[0]['name'] == 'Parent'
|
||||
assert lots[1]['name'] == 'Child'
|
||||
|
||||
# List format with a filter
|
||||
lots = user.get(res=Lot, query=[('search', 'pa')])[0]['items']
|
||||
assert len(lots) == 1
|
||||
assert 1 == len(lots)
|
||||
assert lots[0]['name'] == 'Parent'
|
||||
|
||||
|
||||
|
|
|
@ -289,8 +289,10 @@ def test_snapshot_component_containing_components(user: UserClient):
|
|||
user.post(s, res=Snapshot, status=ValidationError)
|
||||
|
||||
|
||||
def test_erase(user: UserClient):
|
||||
"""Tests a Snapshot with EraseSectors."""
|
||||
def test_erase_privacy(user: UserClient):
|
||||
"""Tests a Snapshot with EraseSectors and the resulting
|
||||
privacy properties.
|
||||
"""
|
||||
s = file('erase-sectors.snapshot')
|
||||
snapshot = snapshot_and_check(user, s, (EraseSectors.t,), perform_second_snapshot=True)
|
||||
storage, *_ = snapshot['components']
|
||||
|
@ -310,16 +312,32 @@ def test_erase(user: UserClient):
|
|||
assert erasure['device']['id'] == storage['id']
|
||||
for step in erasure['steps']:
|
||||
assert step['type'] == 'StepZero'
|
||||
assert step['error'] is False
|
||||
assert step['severity'] == 'Info'
|
||||
assert 'num' not in step
|
||||
assert storage['privacy'] == erasure['device']['privacy'] == 'EraseSectors'
|
||||
assert storage['privacy']['type'] == 'EraseSectors'
|
||||
pc, _ = user.get(res=m.Device, item=snapshot['device']['id'])
|
||||
assert pc['privacy'] == [storage['privacy']]
|
||||
|
||||
# Let's try a second erasure with an error
|
||||
s['uuid'] = uuid4()
|
||||
s['components'][0]['events'][0]['error'] = True
|
||||
s['components'][0]['events'][0]['severity'] = 'Error'
|
||||
snapshot, _ = user.post(s, res=Snapshot)
|
||||
assert snapshot['components'][0]['hid'] == 'c1mr-c1s-c1ml'
|
||||
assert snapshot['components'][0]['privacy'] == 'EraseSectorsError'
|
||||
storage, _ = user.get(res=m.Device, item=storage['id'])
|
||||
assert storage['hid'] == 'c1mr-c1s-c1ml'
|
||||
assert storage['privacy']['type'] == 'EraseSectors'
|
||||
pc, _ = user.get(res=m.Device, item=snapshot['device']['id'])
|
||||
assert pc['privacy'] == [storage['privacy']]
|
||||
|
||||
|
||||
def test_test_data_storage(user: UserClient):
|
||||
"""Tests a Snapshot with EraseSectors."""
|
||||
s = file('erase-sectors-2-hdd.snapshot')
|
||||
snapshot, _ = user.post(res=Snapshot, data=s)
|
||||
incidence_test = next(
|
||||
ev for ev in snapshot['events']
|
||||
if ev.get('reallocatedSectorCount', None) == 15
|
||||
)
|
||||
assert incidence_test['severity'] == 'Error'
|
||||
|
||||
|
||||
def test_snapshot_computer_monitor(user: UserClient):
|
||||
|
|
|
@ -49,7 +49,7 @@ def test_workbench_server_condensed(user: UserClient):
|
|||
('TestDataStorage', 6)
|
||||
}
|
||||
assert snapshot['closed']
|
||||
assert not snapshot['error']
|
||||
assert snapshot['severity'] == 'Info'
|
||||
device, _ = user.get(res=Device, item=snapshot['device']['id'])
|
||||
assert device['dataStorageSize'] == 1100
|
||||
assert device['chassis'] == 'Tower'
|
||||
|
@ -59,7 +59,7 @@ def test_workbench_server_condensed(user: UserClient):
|
|||
assert device['processorModel'] == device['components'][3]['model'] == 'p1-1ml'
|
||||
assert device['ramSize'] == 2048, 'There are 3 RAM: 2 x 1024 and 1 None sizes'
|
||||
assert device['rate']['closed']
|
||||
assert not device['rate']['error']
|
||||
assert device['rate']['severity'] == 'Info'
|
||||
assert device['rate']['rating'] == 0
|
||||
assert device['rate']['workbench']
|
||||
assert device['rate']['appearanceRange'] == 'A'
|
||||
|
@ -129,7 +129,7 @@ def test_workbench_server_phases(user: UserClient):
|
|||
assert events[8]['type'] == 'Install'
|
||||
assert events[8]['device'] == 6
|
||||
assert snapshot['closed']
|
||||
assert not snapshot['error']
|
||||
assert snapshot['severity'] == 'Info'
|
||||
|
||||
pc, _ = user.get(res=Device, item=snapshot['id'])
|
||||
assert len(pc['events']) == 10 # todo shall I add child events?
|
||||
|
@ -165,6 +165,7 @@ def test_real_toshiba_11(user: UserClient):
|
|||
snapshot, _ = user.post(res=em.Snapshot, data=s)
|
||||
|
||||
|
||||
@pytest.mark.xfail(reason='Wrong rates values')
|
||||
def test_snapshot_real_eee_1001pxd(user: UserClient):
|
||||
"""
|
||||
Checks the values of the device, components,
|
||||
|
@ -264,7 +265,7 @@ def test_snapshot_real_eee_1001pxd(user: UserClient):
|
|||
assert erase['endTime']
|
||||
assert erase['startTime']
|
||||
assert erase['zeros'] is False
|
||||
assert erase['error'] is False
|
||||
assert erase['severity'] == 'Info'
|
||||
assert hdd['privacy'] == 'EraseBasic'
|
||||
mother = components[8]
|
||||
assert mother['hid'] == 'asustek_computer_inc-eee0123456789-1001pxd'
|
||||
|
@ -321,3 +322,8 @@ def test_workbench_asus_1001pxd_rate_low(user: UserClient):
|
|||
"""Tests an Asus 1001pxd with a low rate."""
|
||||
s = file('asus-1001pxd.snapshot')
|
||||
snapshot, _ = user.post(res=em.Snapshot, data=s)
|
||||
|
||||
|
||||
def test_david(user: UserClient):
|
||||
s = file('david.lshw.snapshot')
|
||||
snapshot, _ = user.post(res=em.Snapshot, data=s)
|
||||
|
|
Reference in New Issue