This commit includes the first WDIO test for the ApplicationWizard. It doesn't do much right now, but

it does log in and navigate to the wizard successfully.
This commit is contained in:
Ken Sternberg 2023-08-25 11:12:13 -07:00
parent 58bc1c3656
commit 9ecf5cb219
10 changed files with 20566 additions and 1 deletions

1
web/authentik-live-tests/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
reports/

View File

@ -0,0 +1,95 @@
# '.PHONY' entries inform Make that there is no accompanying file in the
# repository that this command actually makes, and so Make should always run the
# command, rather than look for dependencies to see if it should be run.
.PHONY: help
help: ## Print out this help message.
@M=$$(perl -ne 'm/((\w|-)*):.*##/ && print length($$1)."\n"' Makefile | \
sort -nr | head -1) && \
perl -ne "m/^((\w|-)*):.*##\s*(.*)/ && print(sprintf(\"%s: %s\t%s\n\", \$$1, \" \"x($$M-length(\$$1)), \$$3))" Makefile
@echo ""
@echo "Set env NODE_ENV to 'development' to run this against the dev server"
@echo ""
.PHONY: update-local-chromedriver
update-local-chromedriver: ## Update the chrome driver to match the local chrome version, restoring package.json
@ scripts/update_local_chromedriver
.PHONY: check-chromedriver
check-chromedriver:
@ scripts/check_local_chromedriver
RUNNER=npx wdio wdio.conf.js --spec ./tests/application_runner.js
.PHONY: test-home-complex
test-home-complex: check-chromedriver ## Run the "Complex Home Application" test
${RUNNER} --application=./portfolios/home_full_home_application.json
.PHONY: test-home-carriage-hill
test-home-carriage-hill: check-chromedriver ## Run the "Carriage Hill" test
${RUNNER} --application=./portfolios/home_carriage-hill.json
.PHONY: test-home-mobile-home
test-home-mobile-home: check-chromedriver ## Run the "Mobile Home" test
${RUNNER} --application=./portfolios/home_mobile_home_application.json
.PHONY: test-home-florida-roofs
test-home-florida-roofs: check-chromedriver ## Run the "Floride Roofs" test
${RUNNER} --application=./portfolios/home_florida_roofs_gates_and_farms.json
.PHONY: test-home-townhome
test-home-townhome: check-chromedriver ## Run the "Townhome / Policy Lapsed" test
${RUNNER} --application=./portfolios/home_townhome_application.json
.PHONY: test-home-future-application
test-home-future-application: check-chromedriver ## Run the "Future Purchase" test
${RUNNER} --application=./portfolios/home_future_purchase_application.json
.PHONY: test-home-basic
test-home-basic: check-chromedriver ## Run the "Basic Home Application" test
${RUNNER} --application=./portfolios/home_application_with_error.json
.PHONY: test-home-and-auto
test-home-and-auto: check-chromedriver ## Run the "Home & Auto" test
${RUNNER} --application=./portfolios/ha_application.json
.PHONY: test-auto-full
test-auto-full: check-chromedriver ## Run the "Auto, Two Drivers" test
${RUNNER} --application=./portfolios/auto_two_driver_auto_application.json
.PHONY: test-auto-basic
test-auto-basic: check-chromedriver ## Run the "Basic Auto" test
${RUNNER} --application=./portfolios/auto_application.json
.PHONY: test-auto-two-cars
test-auto-two-cars: check-chromedriver ## Run the "Two-Cars Auto" test
${RUNNER} --application=./portfolios/auto_application_two_cars.json
.PHONY: test-auto-michigan
test-auto-michigan: check-chromedriver ## Run the "Michigan Auto" test
${RUNNER} --application=./portfolios/auto_michigan_auto_application.json
.PHONY: test-smoke
test-smoke: check-chromedriver ## Run the "Complex Home, Home+Auto and Two-Cars Auto" tests, including unanswered flows
${RUNNER} --application=./portfolios/ha_application.json \
--application=./portfolios/home_full_home_application.json \
--application=./portfolios/unanswered_ha_application.json \
--application=./portfolios/auto_application_two_cars.json \
--application=./portfolios/windmit/home_florida_windmit_empty.json \
--application=./portfolios/windmit/home_florida_windmit_full.json
.PHONY: test-embedded
test-embedded: check-chromedriver ## Run the "Complex Home, Home+Auto and Two-Cars Auto" tests, including unanswered flows in embedded mode
BOWTIE_EMBEDDED=true ${RUNNER} --application=./portfolios/ha_application.json \
--application=./portfolios/home_future_purchase_application.json \
--application=./portfolios/home_full_home_application.json \
--application=./portfolios/unanswered_ha_application.json \
--application=./portfolios/auto_application_two_cars.json
.PHONY: test-all
test-all: check-chromedriver ## Run all the tests! Warning: this currently takes about 20 minutes!
${RUNNER}
.PHONY: scan-for-duplicate-ids
scan-for-duplicate-ids: check-chromedriver ## Run 'Home Basic', cataloging duplicates. Warning: SLOW
npx wdio wdio.conf.js --spec ./tests/application_runner_with_dupe_scanner.js --application=./portfolios/home_application.json

19652
web/authentik-live-tests/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,22 @@
{
"name": "authentik-live-tests",
"version": "1.0.0",
"description": "",
"main": "index.js",
"author": "",
"license": "ISC",
"devDependencies": {
"@wdio/junit-reporter": "^8.15.6",
"@wdio/local-runner": "^8.15.6",
"@wdio/mocha-framework": "^8.15.6",
"@wdio/spec-reporter": "^8.15.6",
"@wdio/sync": "^7.27.0",
"chromedriver": "^116.0.0",
"wdio-chromedriver-service": "^8.1.1",
"wdio-safaridriver-service": "^2.1.1"
},
"dependencies": {
"@wdio/cli": "^8.15.6",
"prettier": "^3.0.2"
}
}

View File

@ -0,0 +1,63 @@
#!/usr/bin/env bash
# -u: Treat an unset variable as a fatal syntax error.
# -eo pipefile: Fail on first error, even if it happens inside a pipeline.
set -ueo pipefail
VERBOSE=""
if [ "$#" -gt 0 ] && [ "$1" = "-v" ]; then
VERBOSE="verbose";
fi;
if [ "$#" -gt 0 ] && [ "$1" = "-h" ]; then
echo "Usage: "
echo " -v: On success, show a message. (Default behavior only shows a message on failure)"
echo " -h: This help message"
echo ""
exit 0
fi;
# The path to the working folder for the test project, as a subfolder of the monorepo. This will be
# help us find where the driver is kept for comparison.
SUBFOLDER="authentik-live-tests"
# The variant of Chrome we expect under Linux. There are a lot of variants, like Midori, chromium,
# chromium-browser, etc. If you're not running the version supplied by Google, you'll have to change
# this variable.
LINUX_VARIANT="google-chrome"
CURRENT_OS=$(uname -s)
if [ "$CURRENT_OS" == "Linux" ]; then
CHROME_LOCATION=$(command -v "$LINUX_VARIANT")
if [ "$CHROME_LOCATION" == "" ]; then
echo "Could not find google-chrome installed on this Linux system."
exit 1
fi
CHROME_VERSION=$("$LINUX_VARIANT" --version)
else
CHROME_LOCATION="/Applications/Google Chrome.app/Contents/MacOS/Google Chrome";
if [ ! -f "$CHROME_LOCATION" ]; then
echo "Could not find Google Chrome app installed on this MacOS system."
exit 1
fi
CHROME_VERSION=$("$CHROME_LOCATION" --version)
fi
CHROME_MAJOR_VER=$(echo "$CHROME_VERSION" | sed 's/^Google Chrome //' | cut -d'.' -f1)
PROJECT_TOPLEVEL=$(git rev-parse --show-toplevel)
TEST_HOME=$(find $PROJECT_TOPLEVEL -not \( -path "*/node_modules" -prune \) -type d -name "$SUBFOLDER" | head -1)
DRIVER_VER=$(grep '^ "version":' "$TEST_HOME/node_modules/chromedriver/package.json")
DRIVER_MAJOR_VER=$(echo "$DRIVER_VER" | cut -d':' -f2 | sed 's/"//g' | cut -d'.' -f1 | sed 's/ *//')
if [ "$CHROME_MAJOR_VER" -ne "$DRIVER_MAJOR_VER" ]; then
echo "Driver: $DRIVER_MAJOR_VER, Chrome: $CHROME_MAJOR_VER, update required."
exit 1
fi
if [ "$VERBOSE" ]; then
echo "Driver: $DRIVER_MAJOR_VER, Chrome: $CHROME_MAJOR_VER. No update required."
fi
# SUCCESS!
exit 0

View File

@ -0,0 +1,77 @@
#!/usr/bin/env bash
# The below is helpful because Chrome is an evergreen browser, meaning that it will auto-updated on
# most desktop computers without even informing the user. So we need a way, if you hit the mismatch,
# to update the local chromedriver easily.
#
# Testing with WDIO requires a specific version of the chromedriver, specified in the current
# package.json. Updating the chromedriver will change that version number in package.json.
#
# The environment block derives the major version for the local (MacOS) version of chrome and the
# current driver version for the project and, if they're mismatch, updates the local driver then
# resets the content of package.json.
# -u: Treat an unset variable as a fatal syntax error.
# -eo pipefile: Fail on first error, even if it happens inside a pipeline.
set -ueo pipefail
VERBOSE=""
if [ "$#" -gt 0 ] && [ "$1" = "-v" ]; then
VERBOSE="verbose";
fi;
if [ "$#" -gt 0 ] && [ "$1" = "-h" ]; then
echo "Usage: "
echo " -v: On success, show a message. (Default behavior only shows a message on failure)"
echo " -h: This help message"
echo ""
exit 0
fi;
# The path to the working folder for the test project, as a subfolder of the monorepo. This will be
# help us find where the driver is kept for comparison.
SUBFOLDER="authentik-live-tests"
# The variant of Chrome we expect under Linux. There are a lot of variants, like Midori, chromium,
# chromium-browser, etc. If you're not running the version supplied by Google, you'll have to change
# this variable.
LINUX_VARIANT="google-chrome"
CURRENT_OS=$(uname -s)
CURRENT_OS=$(uname -s)
if [ "$CURRENT_OS" == "Linux" ]; then
CHROME_LOCATION=$(command -v "$LINUX_VARIANT")
if [ "$CHROME_LOCATION" == "" ]; then
echo "Could not find google-chrome installed on this Linux system."
exit 1
fi
CHROME_VERSION=$("$LINUX_VARIANT" --version)
else
CHROME_LOCATION="/Applications/Google Chrome.app/Contents/MacOS/Google Chrome";
if [ ! -f "$CHROME_LOCATION" ]; then
echo "Could not find Google Chrome app installed on this MacOS system."
exit 1
fi
CHROME_VERSION=$("$CHROME_LOCATION" --version)
fi
CHROME_MAJOR_VER=$(echo "$CHROME_VERSION" | sed 's/^Google Chrome //' | cut -d'.' -f1)
PROJECT_TOPLEVEL=$(git rev-parse --show-toplevel)
TEST_HOME=$(find $PROJECT_TOPLEVEL -not \( -path "*/node_modules" -prune \) -type d -name "$SUBFOLDER" | head -1)
DRIVER_VER=$(grep '^ "version":' "$TEST_HOME/node_modules/chromedriver/package.json")
DRIVER_MAJOR_VER=$(echo "$DRIVER_VER" | cut -d':' -f2 | sed 's/"//g' | cut -d'.' -f1 | sed 's/ *//')
if [ "$CHROME_MAJOR_VER" -ne "$DRIVER_MAJOR_VER" ]; then
echo "Driver: $DRIVER_MAJOR_VER, Chrome: $CHROME_MAJOR_VER, updating..."
npm install "chromedriver@$CHROME_MAJOR_VER"
git checkout package.json package-lock.json
exit 0
fi
if [ "$VERBOSE" ]; then
echo "Driver: $DRIVER_MAJOR_VER, Chrome: $CHROME_MAJOR_VER. No update required."
fi
# SUCCESS!
exit 0

View File

@ -0,0 +1,67 @@
const { execSync } = require('child_process')
const { readdirSync } = require('fs')
const path = require('path')
const CLICK_TIME_DELAY = 250;
describe('Login', () => {
it(`Should correctly log in to Authentik}`, async () => {
await browser.reloadSession()
await browser.url("http://localhost:9000")
const uidField = await $('>>>input[name="uidField"]');
await uidField.setValue('ken@goauthentik.io');
const next1 = await $('>>>button[type="submit"]');
await next1.click();
await browser.pause(CLICK_TIME_DELAY);
const pwdField = await $('>>>input[name="password"]');
await pwdField.setValue('eat10bugs');
const next2 = await $('>>>button[type="submit"]');
await next2.click();
await browser.pause(CLICK_TIME_DELAY);
const home = await $('>>>div.header h1');
expect(home).toHaveText('My applications');
const goToAdmin = await $('>>>a[href="/if/admin"]');
goToAdmin.click();
await $('>>>ak-admin-overview').waitForDisplayed();
const applicationLink = await $('>>>a[href="#/core/applications;%7B%22createForm%22%3Atrue%7D"]');
applicationLink.click();
await $('>>>ak-application-list').waitForDisplayed();
const startWizard = await $('>>>ak-wizard-frame button[slot="trigger"]')
startWizard.click();
{
const nameInput = await $('>>>ak-form-element-horizontal input[name="name"]');
await nameInput.setValue('This Is My Application');
const slugInput = await $('>>>ak-form-element-horizontal input[name="slug"]');
await slugInput.setValue('this-is-my-application');
const nextButton = await $('>>>ak-wizard-frame footer button.pf-m-primary');
await nextButton.click();
}
{
const input = await $('>>>input[value="proxyprovider-proxy"]');
await input.click();
const nextButton = await $('>>>ak-wizard-frame footer button.pf-m-primary');
await nextButton.click();
}
{
const input = await $('>>>ak-form-element-horizontal input[name="name"]');
await input.setValue('This Is My Provider');
}
await browser.pause(2000);
})
})

View File

@ -0,0 +1,294 @@
const fs = require('fs')
const path = require('path')
const debug = process.env.DEBUG
const defaultTimeoutInterval = 60000
const buildNumber = process.env.BUILD_NUMBER ? process.env.BUILD_NUMBER : '0'
const reportsOutputDir = './reports'
exports.config = {
//
// ====================
// Runner Configuration
// ====================
//
// WebdriverIO allows it to run your tests in arbitrary locations (e.g. locally or
// on a remote machine).
runner: 'local',
//
// ==================
// Specify Test Files
// ==================
// Define which test specs should run. The pattern is relative to the directory
// from which `wdio` was called. Notice that, if you are calling `wdio` from an
// NPM script (see https://docs.npmjs.com/cli/run-script) then the current working
// directory is where your package.json resides, so `wdio` will be called from there.
//
specs: ['./tests/*.js'],
// Patterns to exclude.
exclude: [
// 'path/to/excluded/files'
],
//
// ============
// Capabilities
// ============
// Define your capabilities here. WebdriverIO can run multiple capabilities at the same
// time. Depending on the number of capabilities, WebdriverIO launches several test
// sessions. Within your capabilities you can overwrite the spec and exclude options in
// order to group specific specs to a specific capability.
//
// First, you can define how many instances should be started at the same time. Let's
// say you have 3 different capabilities (Chrome, Firefox, and Safari) and you have
// set maxInstances to 1; wdio will spawn 3 processes. Therefore, if you have 10 spec
// files and you set maxInstances to 10, all spec files will get tested at the same time
// and 30 processes will get spawned. The property handles how many capabilities
// from the same test should run tests.
//
maxInstances: 10,
//
// If you have trouble getting all important capabilities together, check out the
// Sauce Labs platform configurator - a great tool to configure your capabilities:
// https://docs.saucelabs.com/reference/platforms-configurator
//
capabilities: [
{
// maxInstances can get overwritten per capability. So if you have an in-house Selenium
// grid with only 5 firefox instances available you can make sure that not more than
// 5 instances get started at a time.
maxInstances: 1,
//
browserName: 'Safari',
// acceptInsecureCerts: true,
// If outputDir is provided WebdriverIO can capture driver session logs
// it is possible to configure which logTypes to include/exclude.
// excludeDriverLogs: ['*'], // pass '*' to exclude all driver session logs
// excludeDriverLogs: ['bugreport', 'server'],
},
],
//
// ===================
// Test Configurations
// ===================
// Define all options that are relevant for the WebdriverIO instance here
//
// Level of logging verbosity: trace | debug | info | warn | error | silent
logLevel: 'info',
//
// Set specific log levels per logger
// loggers:
// - webdriver, webdriverio
// - @wdio/applitools-service, @wdio/browserstack-service, @wdio/devtools-service, @wdio/sauce-service
// - @wdio/mocha-framework, @wdio/jasmine-framework
// - @wdio/local-runner
// - @wdio/sumologic-reporter
// - @wdio/cli, @wdio/config, @wdio/sync, @wdio/utils
// Level of logging verbosity: trace | debug | info | warn | error | silent
// logLevels: {
// webdriver: 'info',
// '@wdio/applitools-service': 'info'
// },
//
// If you only want to run your tests until a specific amount of tests have failed use
// bail (default is 0 - don't bail, run all tests).
bail: 0,
//
// Set a base URL in order to shorten url command calls. If your `url` parameter starts
// with `/`, the base url gets prepended, not including the path portion of your baseUrl.
// If your `url` parameter starts without a scheme or `/` (like `some/path`), the base url
// gets prepended directly.
baseUrl: 'http://localhost',
//
// Default timeout for all waitFor* commands.
waitforTimeout: 10000,
//
// Default timeout in milliseconds for request
// if browser driver or grid doesn't send response
connectionRetryTimeout: 120000,
//
// Default request retries count
connectionRetryCount: 3,
//
// Test runner services
// Services take over a specific job you don't want to take care of. They enhance
// your test setup with almost no effort. Unlike plugins, they don't add new
// commands. Instead, they hook themselves up into the test process.
services: ['safaridriver'],
// Framework you want to run your specs with.
// The following are supported: Mocha, Jasmine, and Cucumber
// see also: https://webdriver.io/docs/frameworks.html
//
// Make sure you have the wdio adapter package for the specific framework installed
// before running any tests.
framework: 'mocha',
//
// The number of times to retry the entire specfile when it fails as a whole
// specFileRetries: 1,
//
// Delay in seconds between the spec file retry attempts
// specFileRetriesDelay: 0,
//
// Whether or not retried specfiles should be retried immediately or deferred to the end of the queue
// specFileRetriesDeferred: false,
//
// Test reporter for stdout.
// The only one supported by default is 'dot'
// see also: https://webdriver.io/docs/dot-reporter.html
reporters: [
'spec',
[
'junit',
{
outputDir: reportsOutputDir,
outputFileFormat(options) {
return `authentik-${buildNumber}-${options.cid}.xml`
},
errorOptions: {
failure: 'message',
stacktrace: 'stack',
},
},
],
],
//
// Options to be passed to Mocha.
// See the full list at http://mochajs.org/
mochaOpts: {
ui: 'bdd',
timeout: debug ? 24 * 60 * 60 * 1000 : defaultTimeoutInterval,
},
//
// =====
// Hooks
// =====
// WebdriverIO provides several hooks you can use to interfere with the test process in order to enhance
// it and to build services around it. You can either apply a single function or an array of
// methods to it. If one of them returns with a promise, WebdriverIO will wait until that promise got
// resolved to continue.
/**
* Gets executed once before all workers get launched.
* @param {Object} config wdio configuration object
* @param {Array.<Object>} capabilities list of capabilities details
*/
// onPrepare: function (config, capabilities) {
// },
/**
* Gets executed before a worker process is spawned and can be used to initialise specific service
* for that worker as well as modify runtime environments in an async fashion.
* @param {String} cid capability id (e.g 0-0)
* @param {[type]} caps object containing capabilities for session that will be spawn in the worker
* @param {[type]} specs specs to be run in the worker process
* @param {[type]} args object that will be merged with the main configuration once worker is initialised
* @param {[type]} execArgv list of string arguments passed to the worker process
*/
// onWorkerStart: function (cid, caps, specs, args, execArgv) {
// },
/**
* Gets executed just before initialising the webdriver session and test framework. It allows you
* to manipulate configurations depending on the capability or spec.
* @param {Object} config wdio configuration object
* @param {Array.<Object>} capabilities list of capabilities details
* @param {Array.<String>} specs List of spec file paths that are to be run
*/
// beforeSession: function (config, capabilities, specs) {
// },
/**
* Gets executed before test execution begins. At this point you can access to all global
* variables like `browser`. It is the perfect place to define custom commands.
* @param {Array.<Object>} capabilities list of capabilities details
* @param {Array.<String>} specs List of spec file paths that are to be run
* @param {Object} browser instance of created browser/device session
*/
// before: function (capabilities, specs) {
// },
/**
* Runs before a WebdriverIO command gets executed.
* @param {String} commandName hook command name
* @param {Array} args arguments that command would receive
*/
// beforeCommand: function (commandName, args) {
// },
/**
* Hook that gets executed before the suite starts
* @param {Object} suite suite details
*/
// beforeSuite: function (suite) {
// },
/**
* Function to be executed before a test (in Mocha/Jasmine) starts.
*/
// beforeTest: function (test, context) {
// },
/**
* Hook that gets executed _before_ a hook within the suite starts (e.g. runs before calling
* beforeEach in Mocha)
*/
// beforeHook: function (test, context) {
// },
/**
* Hook that gets executed _after_ a hook within the suite starts (e.g. runs after calling
* afterEach in Mocha)
*/
// afterHook: function (test, context, { error, result, duration, passed, retries }) {
// },
/**
* Function to be executed after a test (in Mocha/Jasmine).
*/
// afterTest: function(test, context, { error, result, duration, passed, retries }) {
// },
/**
* Hook that gets executed after the suite has ended
* @param {Object} suite suite details
*/
// afterSuite: function (suite) {
// },
/**
* Runs after a WebdriverIO command gets executed
* @param {String} commandName hook command name
* @param {Array} args arguments that command would receive
* @param {Number} result 0 - command success, 1 - command error
* @param {Object} error error object if any
*/
// afterCommand: function (commandName, args, result, error) {
// },
/**
* Gets executed after all tests are done. You still have access to all global variables from
* the test.
* @param {Number} result 0 - test pass, 1 - test fail
* @param {Array.<Object>} capabilities list of capabilities details
* @param {Array.<String>} specs List of spec file paths that ran
*/
// after: function (result, capabilities, specs) {
// },
/**
* Gets executed right after terminating the webdriver session.
* @param {Object} config wdio configuration object
* @param {Array.<Object>} capabilities list of capabilities details
* @param {Array.<String>} specs List of spec file paths that ran
*/
// afterSession: function (config, capabilities, specs) {
// },
/**
* Gets executed after all workers got shut down and the process is about to exit. An error
* thrown in the onComplete hook will result in the test run failing.
* @param {Object} exitCode 0 - success, 1 - fail
* @param {Object} config wdio configuration object
* @param {Array.<Object>} capabilities list of capabilities details
* @param {<Object>} results object containing test results
*/
onComplete(exitCode, config, capabilities, results) {
if (exitCode !== 0) {
fs.writeFileSync(path.join(reportsOutputDir, './failure.txt'), 'Tests failed')
}
},
/**
* Gets executed when a refresh happens.
* @param {String} oldSessionId session ID of the old session
* @param {String} newSessionId session ID of the new session
*/
// onReload: function(oldSessionId, newSessionId) {
// }
}

View File

@ -0,0 +1,294 @@
const fs = require('fs')
const path = require('path')
const debug = process.env.DEBUG
const defaultTimeoutInterval = 200000
const buildNumber = process.env.BUILD_NUMBER ? process.env.BUILD_NUMBER : '0'
const reportsOutputDir = './reports'
exports.config = {
//
// ====================
// Runner Configuration
// ====================
//
// WebdriverIO allows it to run your tests in arbitrary locations (e.g. locally or
// on a remote machine).
runner: 'local',
//
// ==================
// Specify Test Files
// ==================
// Define which test specs should run. The pattern is relative to the directory
// from which `wdio` was called. Notice that, if you are calling `wdio` from an
// NPM script (see https://docs.npmjs.com/cli/run-script) then the current working
// directory is where your package.json resides, so `wdio` will be called from there.
//
specs: ['./tests/*.js'],
// Patterns to exclude.
exclude: [
// 'path/to/excluded/files'
],
//
// ============
// Capabilities
// ============
// Define your capabilities here. WebdriverIO can run multiple capabilities at the same
// time. Depending on the number of capabilities, WebdriverIO launches several test
// sessions. Within your capabilities you can overwrite the spec and exclude options in
// order to group specific specs to a specific capability.
//
// First, you can define how many instances should be started at the same time. Let's
// say you have 3 different capabilities (Chrome, Firefox, and Safari) and you have
// set maxInstances to 1; wdio will spawn 3 processes. Therefore, if you have 10 spec
// files and you set maxInstances to 10, all spec files will get tested at the same time
// and 30 processes will get spawned. The property handles how many capabilities
// from the same test should run tests.
//
maxInstances: 10,
//
// If you have trouble getting all important capabilities together, check out the
// Sauce Labs platform configurator - a great tool to configure your capabilities:
// https://docs.saucelabs.com/reference/platforms-configurator
//
capabilities: [
{
// maxInstances can get overwritten per capability. So if you have an in-house Selenium
// grid with only 5 firefox instances available you can make sure that not more than
// 5 instances get started at a time.
maxInstances: 1,
//
browserName: 'chrome',
acceptInsecureCerts: true,
// If outputDir is provided WebdriverIO can capture driver session logs
// it is possible to configure which logTypes to include/exclude.
// excludeDriverLogs: ['*'], // pass '*' to exclude all driver session logs
// excludeDriverLogs: ['bugreport', 'server'],
},
],
//
// ===================
// Test Configurations
// ===================
// Define all options that are relevant for the WebdriverIO instance here
//
// Level of logging verbosity: trace | debug | info | warn | error | silent
logLevel: 'warn',
//
// Set specific log levels per logger
// loggers:
// - webdriver, webdriverio
// - @wdio/applitools-service, @wdio/browserstack-service, @wdio/devtools-service, @wdio/sauce-service
// - @wdio/mocha-framework, @wdio/jasmine-framework
// - @wdio/local-runner
// - @wdio/sumologic-reporter
// - @wdio/cli, @wdio/config, @wdio/sync, @wdio/utils
// Level of logging verbosity: trace | debug | info | warn | error | silent
// logLevels: {
// webdriver: 'info',
// '@wdio/applitools-service': 'info'
// },
//
// If you only want to run your tests until a specific amount of tests have failed use
// bail (default is 0 - don't bail, run all tests).
bail: 0,
//
// Set a base URL in order to shorten url command calls. If your `url` parameter starts
// with `/`, the base url gets prepended, not including the path portion of your baseUrl.
// If your `url` parameter starts without a scheme or `/` (like `some/path`), the base url
// gets prepended directly.
baseUrl: 'http://localhost',
//
// Default timeout for all waitFor* commands.
waitforTimeout: 10000,
//
// Default timeout in milliseconds for request
// if browser driver or grid doesn't send response
connectionRetryTimeout: 120000,
//
// Default request retries count
connectionRetryCount: 3,
//
// Test runner services
// Services take over a specific job you don't want to take care of. They enhance
// your test setup with almost no effort. Unlike plugins, they don't add new
// commands. Instead, they hook themselves up into the test process.
services: ['chromedriver'],
// Framework you want to run your specs with.
// The following are supported: Mocha, Jasmine, and Cucumber
// see also: https://webdriver.io/docs/frameworks.html
//
// Make sure you have the wdio adapter package for the specific framework installed
// before running any tests.
framework: 'mocha',
//
// The number of times to retry the entire specfile when it fails as a whole
// specFileRetries: 1,
//
// Delay in seconds between the spec file retry attempts
// specFileRetriesDelay: 0,
//
// Whether or not retried specfiles should be retried immediately or deferred to the end of the queue
// specFileRetriesDeferred: false,
//
// Test reporter for stdout.
// The only one supported by default is 'dot'
// see also: https://webdriver.io/docs/dot-reporter.html
reporters: [
'spec',
[
'junit',
{
outputDir: reportsOutputDir,
outputFileFormat(options) {
return `authentik-${buildNumber}-${options.cid}.xml`
},
errorOptions: {
failure: 'message',
stacktrace: 'stack',
},
},
],
],
//
// Options to be passed to Mocha.
// See the full list at http://mochajs.org/
mochaOpts: {
ui: 'bdd',
timeout: debug ? 24 * 60 * 60 * 1000 : defaultTimeoutInterval,
},
//
// =====
// Hooks
// =====
// WebdriverIO provides several hooks you can use to interfere with the test process in order to enhance
// it and to build services around it. You can either apply a single function or an array of
// methods to it. If one of them returns with a promise, WebdriverIO will wait until that promise got
// resolved to continue.
/**
* Gets executed once before all workers get launched.
* @param {Object} config wdio configuration object
* @param {Array.<Object>} capabilities list of capabilities details
*/
// onPrepare: function (config, capabilities) {
// },
/**
* Gets executed before a worker process is spawned and can be used to initialise specific service
* for that worker as well as modify runtime environments in an async fashion.
* @param {String} cid capability id (e.g 0-0)
* @param {[type]} caps object containing capabilities for session that will be spawn in the worker
* @param {[type]} specs specs to be run in the worker process
* @param {[type]} args object that will be merged with the main configuration once worker is initialised
* @param {[type]} execArgv list of string arguments passed to the worker process
*/
// onWorkerStart: function (cid, caps, specs, args, execArgv) {
// },
/**
* Gets executed just before initialising the webdriver session and test framework. It allows you
* to manipulate configurations depending on the capability or spec.
* @param {Object} config wdio configuration object
* @param {Array.<Object>} capabilities list of capabilities details
* @param {Array.<String>} specs List of spec file paths that are to be run
*/
// beforeSession: function (config, capabilities, specs) {
// },
/**
* Gets executed before test execution begins. At this point you can access to all global
* variables like `browser`. It is the perfect place to define custom commands.
* @param {Array.<Object>} capabilities list of capabilities details
* @param {Array.<String>} specs List of spec file paths that are to be run
* @param {Object} browser instance of created browser/device session
*/
// before: function (capabilities, specs) {
// },
/**
* Runs before a WebdriverIO command gets executed.
* @param {String} commandName hook command name
* @param {Array} args arguments that command would receive
*/
// beforeCommand: function (commandName, args) {
// },
/**
* Hook that gets executed before the suite starts
* @param {Object} suite suite details
*/
// beforeSuite: function (suite) {
// },
/**
* Function to be executed before a test (in Mocha/Jasmine) starts.
*/
// beforeTest: function (test, context) {
// },
/**
* Hook that gets executed _before_ a hook within the suite starts (e.g. runs before calling
* beforeEach in Mocha)
*/
// beforeHook: function (test, context) {
// },
/**
* Hook that gets executed _after_ a hook within the suite starts (e.g. runs after calling
* afterEach in Mocha)
*/
// afterHook: function (test, context, { error, result, duration, passed, retries }) {
// },
/**
* Function to be executed after a test (in Mocha/Jasmine).
*/
// afterTest: function(test, context, { error, result, duration, passed, retries }) {
// },
/**
* Hook that gets executed after the suite has ended
* @param {Object} suite suite details
*/
// afterSuite: function (suite) {
// },
/**
* Runs after a WebdriverIO command gets executed
* @param {String} commandName hook command name
* @param {Array} args arguments that command would receive
* @param {Number} result 0 - command success, 1 - command error
* @param {Object} error error object if any
*/
// afterCommand: function (commandName, args, result, error) {
// },
/**
* Gets executed after all tests are done. You still have access to all global variables from
* the test.
* @param {Number} result 0 - test pass, 1 - test fail
* @param {Array.<Object>} capabilities list of capabilities details
* @param {Array.<String>} specs List of spec file paths that ran
*/
// after: function (result, capabilities, specs) {
// },
/**
* Gets executed right after terminating the webdriver session.
* @param {Object} config wdio configuration object
* @param {Array.<Object>} capabilities list of capabilities details
* @param {Array.<String>} specs List of spec file paths that ran
*/
// afterSession: function (config, capabilities, specs) {
// },
/**
* Gets executed after all workers got shut down and the process is about to exit. An error
* thrown in the onComplete hook will result in the test run failing.
* @param {Object} exitCode 0 - success, 1 - fail
* @param {Object} config wdio configuration object
* @param {Array.<Object>} capabilities list of capabilities details
* @param {<Object>} results object containing test results
*/
onComplete(exitCode, config, capabilities, results) {
if (exitCode !== 0) {
fs.writeFileSync(path.join(reportsOutputDir, './failure.txt'), 'Tests failed')
}
},
/**
* Gets executed when a refresh happens.
* @param {String} oldSessionId session ID of the old session
* @param {String} newSessionId session ID of the new session
*/
// onReload: function(oldSessionId, newSessionId) {
// }
}

View File

@ -136,7 +136,7 @@ export class LibraryPage extends AKElement {
render() { render() {
return html`<main role="main" class="pf-c-page__main" tabindex="-1" id="main-content"> return html`<main role="main" class="pf-c-page__main" tabindex="-1" id="main-content">
<div class="pf-c-content header"> <div class="pf-c-content header">
<h1>${msg("My applications")}</h1> <h1 id="library-page-title">${msg("My applications")}</h1>
${this.uiConfig.searchEnabled ? this.renderSearch() : html``} ${this.uiConfig.searchEnabled ? this.renderSearch() : html``}
</div> </div>
<section class="pf-c-page__main-section"> <section class="pf-c-page__main-section">