LingxinMeng 7 miesięcy temu
rodzic
commit
058533a008

+ 9 - 9
src/python2/pjibot_delivery/2csv-pjibot_delivery.py

@@ -103,28 +103,28 @@ if __name__ == '__main__':
     endpoint = 'oss-cn-beijing-gqzl-d01-a.ops.gqzl-cloud.com'
     bucket = oss2.Bucket(auth, endpoint, 'open-bucket')
     while True:
-        logging.info("开始新一轮扫描")
+        logging.info("开始新一轮扫描", key1)
         try:
             local_delete_list = []
             oss_delete_list = []
             prefix_list = []
             # 2 获取已经上传完成的所有目录并分组
             for obj1 in oss2.ObjectIterator(bucket, prefix=key1):
-                # 获取合并后的包
-                merged_bag_object_key = str(obj1.key)
-                # print(f'判断1{merged_bag_object_key}')
-                if 'data_merge' in str(obj1.key) and str(obj1.key).endswith('.bag'):
+                if 'data_merge' in str(obj1.key) and str(obj1.key).endswith('.bag'):  # data_merge下的bag是等待解析的
+                    # 获取合并后的包
+                    merged_bag_object_key = str(obj1.key)
                     merged_bag_object_key_split = merged_bag_object_key.split('/')
-                    merged_prefix = '/'.join(merged_bag_object_key_split[:-1])
+                    merged_prefix = '/'.join(merged_bag_object_key_split[:-1])  # data_merge 目录
                     parse_prefix = merged_prefix.replace('data_merge', 'data_parse')
-                    parse_prefix_full = merged_bag_object_key.replace('data_merge', 'data_parse')[:-4] + '/'
+                    parse_prefix_full = merged_bag_object_key.replace('data_merge', 'data_parse').replace('.bag',
+                                                                                                          '/')  # data_parse 目录
                     callback_undone = False
                     csv1_done = False
                     csv2_done = False
                     csv3_done = False
                     csv4_done = False
                     pdf_done = False
-                    for obj2 in oss2.ObjectIterator(bucket, prefix=parse_prefix_full):
+                    for obj2 in oss2.ObjectIterator(bucket, prefix=parse_prefix_full):  # 判断 data_parse 目录下是否有解析后的文件
                         if '/callback.json' in str(obj2.key):
                             callback_undone = True
                         if '/trajectory_pji.csv' in str(obj2.key):
@@ -142,7 +142,7 @@ if __name__ == '__main__':
                     if csv1_done and csv2_done and csv3_done and csv4_done and pdf_done:
                         continue
 
-                    logging.info("开始生成场景还原csv: %s" % str(obj1.key))
+                    logging.info("开始生成场景还原csv: ", str(obj1.key))
                     local_merged_bag_path = path3 + merged_bag_object_key
                     local_merged_dir = '/'.join(local_merged_bag_path.split('/')[:-1])
                     local_parse_dir = local_merged_dir.replace('data_merge', 'data_parse')

+ 8 - 8
src/python2/pjibot_delivery/csv-pjibot_delivery.py

@@ -103,28 +103,28 @@ if __name__ == '__main__':
     endpoint = 'oss-cn-beijing-gqzl-d01-a.ops.gqzl-cloud.com'
     bucket = oss2.Bucket(auth, endpoint, 'pji-bucket1')
     while True:
-        logging.info("开始新一轮扫描")
+        logging.info("开始新一轮扫描", key1)
         try:
             local_delete_list = []
             oss_delete_list = []
             prefix_list = []
             # 2 获取已经上传完成的所有目录并分组
             for obj1 in oss2.ObjectIterator(bucket, prefix=key1):
-                # 获取合并后的包
-                merged_bag_object_key = str(obj1.key)
-                # print(f'判断1{merged_bag_object_key}')
-                if 'data_merge' in str(obj1.key) and str(obj1.key).endswith('.bag'):
+                if 'data_merge' in str(obj1.key) and str(obj1.key).endswith('.bag'):  # data_merge下的bag是等待解析的
+                    # 获取合并后的包
+                    merged_bag_object_key = str(obj1.key)
                     merged_bag_object_key_split = merged_bag_object_key.split('/')
-                    merged_prefix = '/'.join(merged_bag_object_key_split[:-1])
+                    merged_prefix = '/'.join(merged_bag_object_key_split[:-1])  # data_merge 目录
                     parse_prefix = merged_prefix.replace('data_merge', 'data_parse')
-                    parse_prefix_full = merged_bag_object_key.replace('data_merge', 'data_parse')[:-4] + '/'
+                    parse_prefix_full = merged_bag_object_key.replace('data_merge', 'data_parse').replace('.bag',
+                                                                                                          '/')  # data_parse 目录
                     callback_undone = False
                     csv1_done = False
                     csv2_done = False
                     csv3_done = False
                     csv4_done = False
                     pdf_done = False
-                    for obj2 in oss2.ObjectIterator(bucket, prefix=parse_prefix_full):
+                    for obj2 in oss2.ObjectIterator(bucket, prefix=parse_prefix_full): # 判断 data_parse 目录下是否有解析后的文件
                         if '/callback.json' in str(obj2.key):
                             callback_undone = True
                         if '/trajectory_pji.csv' in str(obj2.key):

+ 8 - 8
src/python2/pjibot_patrol/csv-pjibot_patrol.py

@@ -103,28 +103,28 @@ if __name__ == '__main__':
     endpoint = 'oss-cn-beijing-gqzl-d01-a.ops.gqzl-cloud.com'
     bucket = oss2.Bucket(auth, endpoint, 'pji-bucket1')
     while True:
-        logging.info("开始新一轮扫描")
+        logging.info("开始新一轮扫描", key1)
         try:
             local_delete_list = []
             oss_delete_list = []
             prefix_list = []
             # 2 获取已经上传完成的所有目录并分组
             for obj1 in oss2.ObjectIterator(bucket, prefix=key1):
-                # 获取合并后的包
-                merged_bag_object_key = str(obj1.key)
-                # print(f'判断1{merged_bag_object_key}')
-                if 'data_merge' in str(obj1.key) and str(obj1.key).endswith('.bag'):
+                if 'data_merge' in str(obj1.key) and str(obj1.key).endswith('.bag'):  # data_merge下的bag是等待解析的
+                    # 获取合并后的包
+                    merged_bag_object_key = str(obj1.key)
                     merged_bag_object_key_split = merged_bag_object_key.split('/')
-                    merged_prefix = '/'.join(merged_bag_object_key_split[:-1])
+                    merged_prefix = '/'.join(merged_bag_object_key_split[:-1])  # data_merge 目录
                     parse_prefix = merged_prefix.replace('data_merge', 'data_parse')
-                    parse_prefix_full = merged_bag_object_key.replace('data_merge', 'data_parse')[:-4] + '/'
+                    parse_prefix_full = merged_bag_object_key.replace('data_merge', 'data_parse').replace('.bag',
+                                                                                                          '/')  # data_parse 目录
                     callback_undone = False
                     csv1_done = False
                     csv2_done = False
                     csv3_done = False
                     csv4_done = False
                     pdf_done = False
-                    for obj2 in oss2.ObjectIterator(bucket, prefix=parse_prefix_full):
+                    for obj2 in oss2.ObjectIterator(bucket, prefix=parse_prefix_full): # 判断 data_parse 目录下是否有解析后的文件
                         if '/callback.json' in str(obj2.key):
                             callback_undone = True
                         if '/trajectory_pji.csv' in str(obj2.key):