aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/test/py/test_repo.py
blob: e3a498d08e21a0ae9a609989a37f635f940c33a3 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
# Copyright (c) 2023 Egor Tensin <Egor.Tensin@gmail.com>
# This file is part of the "cimple" project.
# For details, see https://github.com/egor-tensin/cimple.
# Distributed under the MIT License.

import logging
import multiprocessing as mp
import re

import pytest

from lib.logging import child_logging_thread, configure_logging_in_child
from lib.process import LoggingEvent


class LoggingEventRunComplete(LoggingEvent):
    def __init__(self, target):
        self.counter = 0
        self.target = target
        self.re = re.compile(r'run \d+ as finished')
        super().__init__(timeout=150)

    def log_line_matches(self, line):
        return bool(self.re.search(line))

    def set(self):
        self.counter += 1
        if self.counter == self.target:
            super().set()


def client_runner(log_queue, client, runs_per_process, repo):
    with configure_logging_in_child(log_queue):
        logging.info('Executing %s clients', runs_per_process)
        for i in range(runs_per_process):
            client.run('run', repo.path, 'HEAD')


def _test_repo_internal(env, repo, numof_processes, runs_per_process):
    numof_runs = numof_processes * runs_per_process

    event = LoggingEventRunComplete(numof_runs)
    # Count the number of times the server receives the "run complete" message.
    env.server.logger.add_event(event)

    with child_logging_thread() as log_queue:
        ctx = mp.get_context('spawn')
        args = (log_queue, env.client, runs_per_process, repo)
        processes = [ctx.Process(target=client_runner, args=args) for i in range(numof_processes)]
        for proc in processes:
            proc.start()

        event.wait()

        for proc in processes:
            proc.join()

    assert numof_runs == repo.count_run_files()

    runs = env.db.get_all_runs()
    assert numof_runs == len(runs)

    for id, status, ec, output, url, rev in runs:
        assert status == 'finished', f'Invalid status for run {id}: {status}'
        assert repo.run_output_matches(output), f"Output doesn't match: {output}"


# Reference: https://github.com/pytest-dev/pytest/issues/3628
# Automatic generation of readable test IDs.
def my_parametrize(names, values, ids=None, **kwargs):
    _names = names.split(',') if isinstance(names, str) else names
    if not ids:
        if len(_names) == 1:
            ids = [f'{names}={v}' for v in values]
        else:
            ids = [
                '-'.join(f'{k}={v}' for k, v in zip(_names, combination))
                for combination in values
            ]
    return pytest.mark.parametrize(names, values, ids=ids, **kwargs)


@my_parametrize('runs_per_client', [1, 5])
@my_parametrize('numof_clients', [1, 5])
def test_repo(env, test_repo, numof_clients, runs_per_client):
    _test_repo_internal(env, test_repo, numof_clients, runs_per_client)


@pytest.mark.stress
@my_parametrize(('numof_clients', 'runs_per_client'),
                [(10, 50), (1, 2000), (4, 500)])
def test_repo_stress(env, test_repo, numof_clients, runs_per_client):
    _test_repo_internal(env, test_repo, numof_clients, runs_per_client)