Pythonマルチプロセッシングキューキューエラー:AttributeError:ローカルオブジェクトをピクルできません



Python Multiprocessing Queue Queue Error



今日、test-191204-単一のカメラが認識のためにマルチプロセッシングスレッドキューを呼び出すと、エラーが報告されます。

D:20191031_tensorflow_yolov3pythonpython.exe D:/20191031_tensorflow_yolov3/tensorflow-yolov3/test-191204-Single camera calls multiprocessing thread queue recognition.py 2019-12-05 14:40:38.472262: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 2019-12-05 14:40:39.549686: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1411] Found device 0 with properties: name: GeForce GTX 1080 Ti major: 6 minor: 1 memoryClockRate(GHz): 1.6575 pciBusID: 0000:0e:00.0 totalMemory: 11.00GiB freeMemory: 9.10GiB 2019-12-05 14:40:39.731129: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1411] Found device 1 with properties: name: GeForce GT 710 major: 3 minor: 5 memoryClockRate(GHz): 0.954 pciBusID: 0000:05:00.0 totalMemory: 2.00GiB freeMemory: 1.67GiB 2019-12-05 14:40:39.731780: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1461] Ignoring visible gpu device (device: 1, name: GeForce GT 710, pci bus id: 0000:05:00.0, compute capability: 3.5) with Cuda compute capability 3.5. The minimum required Cuda capability is 3.7. 2019-12-05 14:40:39.732402: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1490] Adding visible gpu devices: 0 2019-12-05 14:40:41.527990: I tensorflow/core/common_runtime/gpu/gpu_device.cc:971] Device interconnect StreamExecutor with strength 1 edge matrix: 2019-12-05 14:40:41.528315: I tensorflow/core/common_runtime/gpu/gpu_device.cc:977] 0 1 2019-12-05 14:40:41.528511: I tensorflow/core/common_runtime/gpu/gpu_device.cc:990] 0: N N 2019-12-05 14:40:41.528706: I tensorflow/core/common_runtime/gpu/gpu_device.cc:990] 1: N N 2019-12-05 14:40:41.529236: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1103] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 8789 MB memory) -> physical GPU (device: 0, name: GeForce GTX 1080 Ti, pci bus id: 0000:0e:00.0, compute capability: 6.1) Traceback (most recent call last): File 'D:/20191031_tensorflow_yolov3/tensorflow-yolov3/test-191204-Single camera calls multiprocessing thread queue recognition.py', line 223, in <module> YoloTest().dontla_evaluate_detect() File 'D:/20191031_tensorflow_yolov3/tensorflow-yolov3/test-191204-Single camera calls multiprocessing thread queue recognition.py', line 201, in dontla_evaluate_detect process.start() File 'D:20191031_tensorflow_yolov3pythonlibmultiprocessingprocess.py', line 105, in start self._popen = self._Popen(self) File 'D:20191031_tensorflow_yolov3pythonlibmultiprocessingcontext.py', line 223, in _Popen return _default_context.get_context().Process._Popen(process_obj) File 'D:20191031_tensorflow_yolov3pythonlibmultiprocessingcontext.py', line 322, in _Popen return Popen(process_obj) File 'D:20191031_tensorflow_yolov3pythonlibmultiprocessingpopen_spawn_win32.py', line 65, in __init__ reduction.dump(process_obj, to_child) File 'D:20191031_tensorflow_yolov3pythonlibmultiprocessing eduction.py', line 60, in dump ForkingPickler(file, protocol).dump(obj) AttributeError: Can't pickle local object 'YoloTest.dontla_evaluate_detect.<locals>.predict_result' Process finished with exit code 1

画像
エラーコード:



# -*- coding: utf-8 -*- ''' @File: test-191204-Single camera calls multiprocessing thread queue recognition.py @Time : 2019/12/5 13:50 @Author : Dontla @Email : root@xxxxx @Software: PyCharm ''' import multiprocessing import cv2 import numpy as np import tensorflow as tf import core.utils as utils from core.config import cfg from core.yolov3 import YOLOV3 import pyrealsense2 as rs import functools class YoloTest(object): def __init__(self): # D·C 191111:__C.TEST.INPUT_SIZE = 544 self.input_size = cfg.TEST.INPUT_SIZE self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE # Dontla 191106 Note: Initialize the dictionary information properties of the class.names file self.classes = utils.read_class_names(cfg.YOLO.CLASSES) # D·C 191115: Class quantity attribute self.num_classes = len(self.classes) self.anchors = np.array(utils.get_anchors(cfg.YOLO.ANCHORS)) # D·C 191111:__C.TEST.SCORE_THRESHOLD = 0.3 self.score_threshold = cfg.TEST.SCORE_THRESHOLD # D·C 191120:__C.TEST.IOU_THRESHOLD = 0.45 self.iou_threshold = cfg.TEST.IOU_THRESHOLD self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY # D·C 191120:__C.TEST.ANNOT_PATH = './data/dataset/Dontla/20191023_Artificial_Flower/test.txt' self.annotation_path = cfg.TEST.ANNOT_PATH # D·C 191120:__C.TEST.WEIGHT_FILE = './checkpoint/f_g_c_weights_files/yolov3_test_loss=15.8845.ckpt-47' self.weight_file = cfg.TEST.WEIGHT_FILE # D·C 191115: Writable mark (bool type value) self.write_image = cfg.TEST.WRITE_IMAGE # D·C 191115: __C.TEST.WRITE_IMAGE_PATH = './data/detection/' (Picture path written after identifying the picture frame and marking the text) self.write_image_path = cfg.TEST.WRITE_IMAGE_PATH # D·C 191116: TEST.SHOW_LABEL is set to True self.show_label = cfg.TEST.SHOW_LABEL # D·C 191120: Create the namespace 'input' with tf.name_scope('input'): # D·C 191120: Create variables (create placeholders to open up memory space) self.input_data = tf.placeholder(dtype=tf.float32, name='input_data') self.trainable = tf.placeholder(dtype=tf.bool, name='trainable') model = YOLOV3(self.input_data, self.trainable) self.pred_sbbox, self.pred_mbbox, self.pred_lbbox = model.pred_sbbox, model.pred_mbbox, model.pred_lbbox # D·C 191120: Create a namespace 'Exponential Moving Average' with tf.name_scope('ema'): ema_obj = tf.train.ExponentialMovingAverage(self.moving_ave_decay) # D·C 191120: Start graphics and record placement decisions in a session that allows software equipment placement. (Don't know what it means...) allow_soft_placement=True means that tf is allowed to automatically select the available GPU and CPU self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) # D·C 191120: variables_to_restore() is used to load the model and directly map the shadow variable to the variable itself when calculating the moving average self.saver = tf.train.Saver(ema_obj.variables_to_restore()) # D·C 191120: Used to restore the model during the next training self.saver.restore(self.sess, self.weight_file) def predict(self, image): # D·C 191107: Copy a mirror image of the picture to avoid direct manipulation of the picture to change the intrinsic properties of the picture org_image = np.copy(image) # D·C 191107: Get image size org_h, org_w, _ = org_image.shape # D·C 191108: This function combines the source image with input_size and converts it into a pre-feeding square image (the author defaults to 544×544, the center is the reduced size source image, and the upper and lower spaces are gray images): image_data = utils.image_preprocess(image, [self.input_size, self.input_size]) # D·C 191108: Look at the print dimensions: # print(image_data.shape) # (544, 544, 3) # D·C 191108: Create a new axis, why do you want to create a new axis? image_data = image_data[np.newaxis, ...] # D·C 191108: Look at the print dimensions: # print(image_data.shape) # (1, 544, 544, 3) # D·C 191110: The three boxes may store the information of the prediction block diagram (there may be more than N boxes, the useful and useless overlapped ones are in it) (but the printed value is completely unreadable, hello?) pred_sbbox, pred_mbbox, pred_lbbox = self.sess.run( [self.pred_sbbox, self.pred_mbbox, self.pred_lbbox], feed_dict={ self.input_data: image_data, self.trainable: False } ) # D·C 191110: Print the type, shape and value of the three boxes to see: # print(type(pred_sbbox)) # print(type(pred_mbbox)) # print(type(pred_lbbox)) # Are all # print(pred_sbbox.shape) # print(pred_mbbox.shape) # print(pred_lbbox.shape) # (1, 68, 68, 3, 6) # (1, 34, 34, 3, 6) # (1, 17, 17, 3, 6) # print(pred_sbbox) # print(pred_mbbox) # print(pred_lbbox) # D·C 191110: (-1, 6) means I don’t know how many rows there are. Anyway, you make it into 6 columns for me, and then concatenate stacks them three together, and finally get countless arrays of 6 columns (self.num_classes later) ) The number stored looks like the probability that this box belongs to the class) pred_bbox = np.concatenate([np.reshape(pred_sbbox, (-1, 5 + self.num_classes)), np.reshape(pred_mbbox, (-1, 5 + self.num_classes)), np.reshape(pred_lbbox, (-1, 5 + self.num_classes))], axis=0) # D·C 191111: Print pred_bbox and its dimensions to see: # print(pred_bbox) # print(pred_bbox.shape) # (18207, 6) # D·C 191111: The guess is the first filtering, filtering out the pictures below the score_threshold, a lot less after filtering: # D·C 191115: The bboxes dimension is [n,6], the first four columns are coordinates, the fifth column is the score, and the sixth column is the corresponding class subscript bboxes = utils.postprocess_boxes(pred_bbox, (org_h, org_w), self.input_size, self.score_threshold) # D·C 191111: Guessing is the second filtering, filtering out pictures below iou_threshold: bboxes = utils.nms(bboxes, self.iou_threshold) return bboxes def dontla_evaluate_detect(self): pipeline1 = rs.pipeline() config1 = rs.config() ctx = rs.context() # Get the serial number of the connected camera through the program serial1 = ctx.devices[0].get_info(rs.camera_info.serial_number) config1.enable_device(serial1) config1.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30) config1.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30) pipeline1.start(config1) # Create alignment object (depth alignment color) align1 = rs.align(rs.stream.color) try: while True: frames1 = pipeline1.wait_for_frames() # Get aligned frame set aligned_frames1 = align1.process(frames1) # Get the aligned depth frame and color frame aligned_depth_frame1 = aligned_frames1.get_depth_frame() color_frame1 = aligned_frames1.get_color_frame() # Get color frame internal parameters color_profile1 = color_frame1.get_profile() cvsprofile1 = rs.video_stream_profile(color_profile1) color_intrin1 = cvsprofile1.get_intrinsics() color_intrin_part1 = [color_intrin1.ppx, color_intrin1.ppy, color_intrin1.fx, color_intrin1.fy] # if not aligned_depth_frame1 or not color_frame1: # continue # if not aligned_depth_frame2 or not color_frame2: # continue color_image1 = np.asanyarray(color_frame1.get_data()) # D·C 191121: Show the frame to see # cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE) # cv2.imshow('RealSense', color_frame) # cv2.waitKey(1) def predict_result(color_image, queue): queue.put(self.predict(color_image)) queue = multiprocessing.Queue() jobs = [] for i in range(1): process = multiprocessing.Process(target=predict_result, args=(color_image1, queue)) jobs.append(process) process.start() # Wait for the process to finish for process in jobs: process.join() results = [queue.get() for j in jobs] bboxes_pr1 = results[0] # bboxes_pr1 = self.predict(color_image1) # bboxes_pr2 = self.predict(color_image2) image1 = utils.draw_bbox(color_image1, bboxes_pr1, aligned_depth_frame1, color_intrin_part1, show_label=self.show_label) # cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE) cv2.imshow('window1', image1) cv2.waitKey(1) finally: pipeline1.stop() if __name__ == '__main__': YoloTest().dontla_evaluate_detect()

参照ソリューション:AttributeError:ローカルオブジェクトソリューションをpickle化できません

とにかく、私はそれを解決しませんでした。 。 。