|
3 | 3 | const { state } = require('./state') |
4 | 4 | const { assert } = require('./assertions') |
5 | 5 | const { handleError } = require('./errors') |
6 | | -const { failing } = require('./tests') |
7 | 6 | const { reporter, info } = require('./reporters') |
8 | 7 | const { char, color, fmtMs } = require('./utility') |
9 | 8 |
|
10 | | -const runner = async (t, noExit) => { |
11 | | - const { msg, fn, failing, benchOpts, fileName: currFile } = t |
| 9 | +const getRunner = (tests) => { |
| 10 | + const runner = async (t, noExit) => { |
| 11 | + const { msg, fn, failing, benchOpts, fileName: currFile } = t |
12 | 12 |
|
13 | | - if (currFile) { |
14 | | - if (currFile !== state.lastFileName) { |
15 | | - state.fileTestCount = 0 |
16 | | - info(`\nRunning tests for ${currFile}\n\n`) |
17 | | - } |
| 13 | + if (currFile) { |
| 14 | + if (currFile !== state.lastFileName) { |
| 15 | + state.fileTestCount = 0 |
| 16 | + info(`\nRunning tests for ${currFile}\n\n`) |
| 17 | + } |
18 | 18 |
|
19 | | - state.lastFileName = currFile |
20 | | - } |
| 19 | + state.lastFileName = currFile |
| 20 | + } |
21 | 21 |
|
22 | | - if (benchOpts) return benchRunner(t) |
| 22 | + if (benchOpts) return benchRunner(t) |
23 | 23 |
|
24 | | - const start = Date.now() |
| 24 | + const start = Date.now() |
25 | 25 |
|
26 | | - try { |
27 | | - await fn(assert(msg, failing)) |
28 | | - } catch (error) { |
29 | | - if (!failing) return handleError(msg, error, noExit) |
| 26 | + try { |
| 27 | + await fn(assert(msg, failing)) |
| 28 | + } catch (error) { |
| 29 | + if (!failing) return handleError(msg, error, noExit) |
30 | 30 |
|
31 | | - const ms = ` (${fmtMs(Date.now() - start)})` |
32 | | - const out = `${char('okFail')} ${color.red}${msg}${ms}` |
33 | | - return reporter({ msg, out, error, pass: false, mod: 'failing' }) |
34 | | - } |
| 31 | + const ms = ` (${fmtMs(Date.now() - start)})` |
| 32 | + const out = `${char('okFail')} ${color.red}${msg}${ms}` |
| 33 | + return reporter({ msg, out, error, pass: false, mod: 'failing' }) |
| 34 | + } |
35 | 35 |
|
36 | | - const ms = Date.now() - start |
| 36 | + const ms = Date.now() - start |
37 | 37 |
|
38 | | - if (failing) { |
39 | | - return handleError(msg, new Error('Passed test called with test.failing')) |
40 | | - } |
| 38 | + if (failing) { |
| 39 | + return handleError(msg, new Error('Passed test called with test.failing')) |
| 40 | + } |
41 | 41 |
|
42 | | - if (!msg) return |
| 42 | + if (!msg) return |
43 | 43 |
|
44 | | - const out = `${char('good')} ${msg} (${fmtMs(ms)})` |
45 | | - return reporter({ msg, out, pass: true }) |
46 | | -} |
| 44 | + const out = `${char('good')} ${msg} (${fmtMs(ms)})` |
| 45 | + return reporter({ msg, out, pass: true }) |
| 46 | + } |
47 | 47 |
|
48 | | -const benchRunner = async ({ msg, fn, benchOpts }) => { |
49 | | - const { samples, max } = benchOpts |
50 | | - const start = process.hrtime() |
51 | | - let msAvg |
52 | | - |
53 | | - try { |
54 | | - if (benchOpts.parallel) { |
55 | | - const wrapped = async () => { |
56 | | - const iStart = process.hrtime() |
57 | | - await fn(assert(msg, failing)) |
58 | | - return process.hrtime(iStart) |
| 48 | + const benchRunner = async ({ msg, fn, benchOpts }) => { |
| 49 | + const { samples, max } = benchOpts |
| 50 | + const start = process.hrtime() |
| 51 | + let msAvg |
| 52 | + |
| 53 | + try { |
| 54 | + if (benchOpts.parallel) { |
| 55 | + const wrapped = async () => { |
| 56 | + const iStart = process.hrtime() |
| 57 | + await fn(assert(msg, tests.failing)) |
| 58 | + return process.hrtime(iStart) |
| 59 | + } |
| 60 | + const times = await Promise.all([...Array(samples)].map(wrapped)) |
| 61 | + msAvg = parseInt(times.reduce((accum, curval) => { |
| 62 | + return accum + (curval[0] * 1e3 + curval[1] / 1e6) |
| 63 | + }, 0) / samples, 10) |
| 64 | + } else { |
| 65 | + for (let i = 0; i < samples; i++) await fn(assert(msg, tests.failing)) |
59 | 66 | } |
60 | | - const times = await Promise.all([...Array(samples)].map(wrapped)) |
61 | | - msAvg = parseInt(times.reduce((accum, curval) => { |
62 | | - return accum + (curval[0] * 1e3 + curval[1] / 1e6) |
63 | | - }, 0) / samples, 10) |
64 | | - } else { |
65 | | - for (let i = 0; i < samples; i++) await fn(assert(msg, failing)) |
| 67 | + } catch (ex) { |
| 68 | + return handleError(msg, ex) |
66 | 69 | } |
67 | | - } catch (ex) { |
68 | | - return handleError(msg, ex) |
69 | | - } |
70 | 70 |
|
71 | | - const ranFor = process.hrtime(start) |
72 | | - const msTotal = ranFor[0] * 1e3 + ranFor[1] / 1e6 |
73 | | - msAvg = msAvg || parseInt(msTotal / samples, 10) |
| 71 | + const ranFor = process.hrtime(start) |
| 72 | + const msTotal = ranFor[0] * 1e3 + ranFor[1] / 1e6 |
| 73 | + msAvg = msAvg || parseInt(msTotal / samples, 10) |
74 | 74 |
|
75 | | - if (typeof benchOpts.cb === 'function') { |
76 | | - benchOpts.cb({ msTotal, msAvg }) |
77 | | - } |
| 75 | + if (typeof benchOpts.cb === 'function') { |
| 76 | + benchOpts.cb({ msTotal, msAvg }) |
| 77 | + } |
| 78 | + |
| 79 | + if (msAvg > max) { |
| 80 | + const maxErr = new Error(`Bench failed: (${fmtMs(msAvg)} > ${fmtMs(max)})`) |
| 81 | + return handleError(msg, maxErr) |
| 82 | + } |
78 | 83 |
|
79 | | - if (msAvg > max) { |
80 | | - const maxErr = new Error(`Bench failed: (${fmtMs(msAvg)} > ${fmtMs(max)})`) |
81 | | - return handleError(msg, maxErr) |
| 84 | + const out = `${char('good')} ${msg} (${fmtMs(msAvg)} avg)` |
| 85 | + return reporter({ msg, out, pass: true }) |
82 | 86 | } |
83 | 87 |
|
84 | | - const out = `${char('good')} ${msg} (${fmtMs(msAvg)} avg)` |
85 | | - return reporter({ msg, out, pass: true }) |
| 88 | + return { runner, benchRunner } |
86 | 89 | } |
87 | 90 |
|
88 | | -module.exports = { runner, benchRunner } |
| 91 | +module.exports = { getRunner } |
0 commit comments