@@ -135,7 +135,7 @@ def dummy_test(*, commits):
135
135
)
136
136
# Do not check the reference commits, it's a bit underspecified quite what they
137
137
# mean, other than that the dummy_test assertions below should pass
138
- ref_commits = {
138
+ commits = {
139
139
package : algorithm_result .pop (f"{ package } _ref" )
140
140
for package in package_names
141
141
if package != culprit
@@ -144,12 +144,10 @@ def dummy_test(*, commits):
144
144
f"{ culprit } _bad" : bad_commit ,
145
145
f"{ culprit } _good" : good_commit ,
146
146
} == algorithm_result
147
- assert dummy_test (
148
- commits = {culprit : algorithm_result [f"{ culprit } _good" ]} | ref_commits
149
- ).result
150
- assert not dummy_test (
151
- commits = {culprit : algorithm_result [f"{ culprit } _bad" ]} | ref_commits
152
- ).result
147
+ commits [culprit ] = algorithm_result [f"{ culprit } _good" ]
148
+ assert dummy_test (commits = commits ).result
149
+ commits [culprit ] = algorithm_result [f"{ culprit } _bad" ]
150
+ assert not dummy_test (commits = commits ).result
153
151
154
152
155
153
def other (project ):
@@ -267,13 +265,15 @@ def dummy_test(*, commits):
267
265
assert algorithm_result [f"{ bad_project } _bad" ] == bad_commit
268
266
assert algorithm_result [f"{ bad_project } _good" ] == good_commit
269
267
# Do check that the reference commit gives the expected results
270
- ref_commits = {
268
+ commits = {
271
269
proj : algorithm_result [f"{ proj } _ref" ]
272
270
for proj in all_projects
273
271
if proj != bad_project
274
272
}
275
- assert not dummy_test (commits = {bad_project : bad_commit } | ref_commits ).result
276
- assert dummy_test (commits = {bad_project : good_commit } | ref_commits ).result
273
+ commits [bad_project ] = bad_commit
274
+ assert not dummy_test (commits = commits ).result
275
+ commits [bad_project ] = good_commit
276
+ assert dummy_test (commits = commits ).result
277
277
278
278
279
279
@pytest .mark .parametrize (
0 commit comments