Commit 531c9167 authored by Deathcrow's avatar Deathcrow

minor fixes

+ start_time was incorrect
+ Popen.wait() buggy with multiprocessing
parent 0215cf7b
......@@ -148,7 +148,8 @@ def clean_up():
tcpprobe.terminate()
if args.tcpdump == "1":
tcpdump.terminate()
# cpunetlog.terminate()
if args.cpunetlog != "":
cpunetlog.terminate()
for i in sender:
if i['utility'].returncode == None:
i['utility'].kill()
......@@ -265,11 +266,10 @@ for key, i in enumerate(sender):
i['utility_file'] = open(args.tmp_folder+"/"+args.utility+"_"+i['congestion']+"_"+str(key), 'w+')
# print(i['src']+":"+i['src_port']+" "+i['dst']+":"+i['port'])
#setup done, run tests
start_time = time.perf_counter()
#setup done, run tests in 5 seconds
start_time = time.perf_counter() + 5
with multiprocessing.Pool(min(multiprocessing.cpu_count(), len(sender))) as pool:
scheduled_time = time.perf_counter() + 5
with multiprocessing.Pool(len(sender)) as pool:
# for key, i in enumerate(sender):
# i['utility'] = subprocess.Popen(i['utility_command'], stdout=i['utility_file'])
......@@ -277,7 +277,7 @@ with multiprocessing.Pool(min(multiprocessing.cpu_count(), len(sender))) as pool
# StartUtilityThread(i, scheduled_time).start()
# i['utility'] = pool.apply(start_utility, [key, scheduled_time])
# i['utility'] = pool.apply(testy, [start_time, 0])
pool_results = [pool.apply_async(start_utility, [key, scheduled_time]) for key, i in enumerate(sender)]
pool_results = [pool.apply_async(start_utility, [key, start_time]) for key, i in enumerate(sender)]
# print(pool_results[0].get())
# i['thread'] = multiprocessing.Process(target=start_utility, args=(key, scheduled_time))
# i['thread'].start()
......@@ -346,8 +346,12 @@ if not args.legacy:
else:
time.sleep(float(args.time))
for i in sender:
i['utility'].wait()
#for i in sender:
# i['utility'].wait()
# wait doesn't work with multiprocessing (?! returncode always 0, even when netperf still running)
time.sleep(5) # sleep 5 more seconds to allow netperf processes to finish
clean_up()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment