diff options
author | Egor Tensin <Egor.Tensin@gmail.com> | 2023-07-10 13:00:06 +0200 |
---|---|---|
committer | Egor Tensin <Egor.Tensin@gmail.com> | 2023-07-10 13:05:10 +0200 |
commit | 6926dfdd8d9ed7c8fe3e82f2c59cf13c81396462 (patch) | |
tree | 0ac69600009d1c36282527c4e421e37269de7af1 /test/py/test_repo.py | |
parent | test: code style (diff) | |
download | cimple-6926dfdd8d9ed7c8fe3e82f2c59cf13c81396462.tar.gz cimple-6926dfdd8d9ed7c8fe3e82f2c59cf13c81396462.zip |
test: fix Python freezes
I would get random freezes when running tests; I completely forgot that
logging & multiprocessing don't play well together.
Diffstat (limited to '')
-rw-r--r-- | test/py/test_repo.py | 30 |
1 files changed, 19 insertions, 11 deletions
diff --git a/test/py/test_repo.py b/test/py/test_repo.py index f451e8f..e3a498d 100644 --- a/test/py/test_repo.py +++ b/test/py/test_repo.py @@ -3,11 +3,13 @@ # For details, see https://github.com/egor-tensin/cimple. # Distributed under the MIT License. -from multiprocessing import Process +import logging +import multiprocessing as mp import re import pytest +from lib.logging import child_logging_thread, configure_logging_in_child from lib.process import LoggingEvent @@ -27,6 +29,13 @@ class LoggingEventRunComplete(LoggingEvent): super().set() +def client_runner(log_queue, client, runs_per_process, repo): + with configure_logging_in_child(log_queue): + logging.info('Executing %s clients', runs_per_process) + for i in range(runs_per_process): + client.run('run', repo.path, 'HEAD') + + def _test_repo_internal(env, repo, numof_processes, runs_per_process): numof_runs = numof_processes * runs_per_process @@ -34,18 +43,17 @@ def _test_repo_internal(env, repo, numof_processes, runs_per_process): # Count the number of times the server receives the "run complete" message. env.server.logger.add_event(event) - def client_runner(): - for i in range(runs_per_process): - env.client.run('run', repo.path, 'HEAD') - - processes = [Process(target=client_runner) for i in range(numof_processes)] - for proc in processes: - proc.start() + with child_logging_thread() as log_queue: + ctx = mp.get_context('spawn') + args = (log_queue, env.client, runs_per_process, repo) + processes = [ctx.Process(target=client_runner, args=args) for i in range(numof_processes)] + for proc in processes: + proc.start() - event.wait() + event.wait() - for proc in processes: - proc.join() + for proc in processes: + proc.join() assert numof_runs == repo.count_run_files() |