|
15 | 15 | import numpy as np |
16 | 16 | import pandas as pd |
17 | 17 | import pytest |
18 | | -from simulation.model import Defaults, Exponential, Model, Runner |
| 18 | +import simpy |
| 19 | +from simulation.model import ( |
| 20 | + Defaults, Exponential, Model, Runner, MonitoredResource) |
19 | 21 |
|
20 | 22 |
|
21 | 23 | def test_new_attribute(): |
@@ -201,18 +203,14 @@ def helper_warmup(warm_up_period): |
201 | 203 | # and queue time greater than 0 |
202 | 204 | assert first_warmup['arrival_time'] > 500, ( |
203 | 205 | 'Expect first patient to arrive after time 500 when model is run ' + |
204 | | - f'with warm-up, but got {first_warmup["arrival_time"]}.' |
| 206 | + f'with warm-up (length 500), but got {first_warmup["arrival_time"]}.' |
205 | 207 | ) |
206 | 208 | assert first_warmup['q_time_nurse'] > 0, ( |
207 | 209 | 'Expect first patient to need to queue in model with warm-up and ' + |
208 | 210 | f'high arrival rate, but got {first_warmup["q_time_nurse"]}.' |
209 | 211 | ) |
210 | 212 |
|
211 | | - # Check that model without warm-up has arrival and queue time of 0 |
212 | | - assert first_none['arrival_time'] == 0, ( |
213 | | - 'Expect first patient to arrive at time 0 when model is run ' + |
214 | | - f'without warm-up, but got {first_none["arrival_time"]}.' |
215 | | - ) |
| 213 | + # Check that, in model without warm-up, first patient has 0 queue time |
216 | 214 | assert first_none['q_time_nurse'] == 0, ( |
217 | 215 | 'Expect first patient to have no wait time in model without warm-up ' + |
218 | 216 | f'but got {first_none["q_time_nurse"]}.' |
@@ -429,3 +427,81 @@ def test_parallel(): |
429 | 427 | pd.testing.assert_frame_equal( |
430 | 428 | results['seq']['interval_audit'], results['par']['interval_audit']) |
431 | 429 | assert results['seq']['run'] == results['par']['run'] |
| 430 | + |
| 431 | + |
| 432 | +def test_consistent_metrics(): |
| 433 | + """ |
| 434 | + Presently, the simulation code includes a few different methods to |
| 435 | + calculate the same metrics. We would expect each to return similar results |
| 436 | + (with a little tolerance, as expect some deviation due to methodological |
| 437 | + differences). |
| 438 | + """ |
| 439 | + # Run default model |
| 440 | + experiment = Runner(Defaults()) |
| 441 | + experiment.run_reps() |
| 442 | + |
| 443 | + # Absolute tolerance (atol) = +- 0.001 |
| 444 | + |
| 445 | + # Check nurse utilisation |
| 446 | + pd.testing.assert_series_equal( |
| 447 | + experiment.run_results_df['mean_nurse_utilisation'], |
| 448 | + experiment.run_results_df['mean_nurse_utilisation_tw'], |
| 449 | + atol=0.001, |
| 450 | + check_names=False) |
| 451 | + |
| 452 | + |
| 453 | +def test_monitoredresource_cleanup(): |
| 454 | + """ |
| 455 | + Run simple example and check that the monitored resource calculations |
| 456 | + are as expected (e.g. clean-up was performed appropriately at end of |
| 457 | + simulation). |
| 458 | + """ |
| 459 | + # Simulation setup |
| 460 | + env = simpy.Environment() |
| 461 | + resource = MonitoredResource(env, capacity=1) |
| 462 | + |
| 463 | + def process_task(env, resource, duration): |
| 464 | + """Simulate a task that requests the resource.""" |
| 465 | + with resource.request() as req: |
| 466 | + yield req |
| 467 | + yield env.timeout(duration) |
| 468 | + |
| 469 | + # Set run length |
| 470 | + run_length = 12 |
| 471 | + |
| 472 | + # Schedule tasks to occur during the simulation |
| 473 | + env.process(process_task(env, resource, duration=5)) # Task A |
| 474 | + env.process(process_task(env, resource, duration=10)) # Task B |
| 475 | + env.process(process_task(env, resource, duration=15)) # Task C |
| 476 | + |
| 477 | + # Run the simulation |
| 478 | + env.run(until=run_length) |
| 479 | + |
| 480 | + # If the simulation ends while resources are still in use or requests are |
| 481 | + # still in the queue, the time between the last recorded event and the |
| 482 | + # simulation end will not have been accounted for. Hence, we call |
| 483 | + # update_time_weighted_stats() to run for time between last event and end. |
| 484 | + resource.update_time_weighted_stats() |
| 485 | + |
| 486 | + # Assertions |
| 487 | + # At time=12: |
| 488 | + # - Task A is done (0-5) |
| 489 | + # - Task B is still using the resource (5-15) |
| 490 | + # - Task C is still waiting in the queue (15-30) |
| 491 | + # Hence... |
| 492 | + |
| 493 | + # Expected queue time: 17, as task B (5) + task C (12) |
| 494 | + expected_queue_time = 17.0 |
| 495 | + |
| 496 | + # Expected busy time: 12, as one resource busy for the whole simulation |
| 497 | + expected_busy_time = 12.0 |
| 498 | + |
| 499 | + # Run assertions |
| 500 | + assert resource.area_n_in_queue == expected_queue_time, ( |
| 501 | + f'Expected queue time {expected_queue_time} but ' + |
| 502 | + f'observed {resource.area_n_in_queue}.' |
| 503 | + ) |
| 504 | + assert resource.area_resource_busy == expected_busy_time, ( |
| 505 | + f'Expected queue time {expected_busy_time} but ' + |
| 506 | + f'observed {resource.area_resource_busy}.' |
| 507 | + ) |
0 commit comments