问题
I am designing a Python multiprocessing code to work in a queue that might be updated along the processing. The following code sometimes works, or get stuck, or rises an Empty error.
import multiprocessing as mp
def worker(working_queue, output_queue):
while True:
if working_queue.empty() is True:
break
else:
picked = working_queue.get_nowait()
if picked % 2 == 0:
output_queue.put(picked)
else:
working_queue.put(picked+1)
return
if __name__ == '__main__':
manager = mp.Manager()
static_input = xrange(100)
working_q = manager.Queue()
output_q = mp.Queue()
for i in static_input:
working_q.put(i)
processes = [mp.Process(target=worker,args=(working_q, output_q)) for i in range(mp.cpu_count())]
for proc in processes:
proc.start()
for proc in processes:
proc.join()
results_bank = []
while True:
if output_q.empty() is True:
break
results_bank.append(output_q.get_nowait())
print len(results_bank) # length of this list should be equal to static_input, which is the range used to populate the input queue. In other words, this tells whether all the items placed for processing were actually processed.
results_bank.sort()
print results_bank
Should I use a list as a global variable, and lock it, instead of a manager.Queue()?
回答1:
I just added a try:
and except Exception:
to handle the Empty error. The results seem to be consistent now. Please let me know If you find problems I overlooked in this solution.
import multiprocessing as mp
def worker(working_queue, output_queue):
while True:
try:
if working_queue.empty() is True:
break
else:
picked = working_queue.get_nowait()
if picked % 2 == 0:
output_queue.put(picked)
else:
working_queue.put(picked+1)
except Exception:
continue
return
if __name__ == '__main__':
#Manager seem to be unnecessary.
#manager = mp.Manager()
#working_q = manager.Queue()
working_q = mp.Queue()
output_q = mp.Queue()
static_input = xrange(100)
for i in static_input:
working_q.put(i)
processes = [mp.Process(target=worker,args=(working_q, output_q)) for i in range(mp.cpu_count())]
for proc in processes:
proc.start()
for proc in processes:
proc.join()
results_bank = []
while True:
if output_q.empty() is True:
break
results_bank.append(output_q.get_nowait())
print len(results_bank) # length of this list should be equal to static_input, which is the range used to populate the input queue. In other words, this tells whether all the items placed for processing were actually processed.
results_bank.sort()
print results_bank
回答2:
Just use a lock to protect the access to shared data, it's safer (and will protect you from weird behavior of the process):
import multiprocessing as mp
def worker(working_queue, output_queue, lock):
while True:
shouldBeak = False
lock.acquire()
if working_queue.empty() is True:
shouldBeak = True
else:
picked = working_queue.get_nowait()
if picked % 2 == 0:
output_queue.put(picked)
else:
working_queue.put(picked+1)
lock.release()
if shouldBeak:
break
return
if __name__ == '__main__':
manager = mp.Manager()
static_input = xrange(1000)
working_q = manager.Queue()
output_q = mp.Queue()
lock = mp.Lock()
for i in static_input:
working_q.put(i)
processes = [mp.Process(target=worker,args=(working_q, output_q,lock)) for i in range(mp.cpu_count())]
for proc in processes:
proc.start()
for proc in processes:
proc.join()
results_bank = []
while True:
if output_q.empty() is True:
break
results_bank.append(output_q.get_nowait())
print len(results_bank) # length of this list should be equal to static_input, which is the range used to populate the input queue. In other words, this tells whether all the items placed for processing were actually processed.
results_bank.sort()
print results_bank
来源:https://stackoverflow.com/questions/21680903/using-a-a-manager-for-updating-a-queue-in-a-python-multiprocess