|
| 1 | +#!/usr/bin/env python |
| 2 | +#-*- coding: utf-8 -*- |
| 3 | +''' |
| 4 | +Created on 16 mai 2014 |
| 5 | +
|
| 6 | +author: Thibault Francois <[email protected]> |
| 7 | +
|
| 8 | +This program is free software: you can redistribute it and/or modify |
| 9 | +it under the terms of the GNU General Public License as published by |
| 10 | +the Free Software Foundation, either version 3 of the License, or |
| 11 | +(at your option) any later version. |
| 12 | +
|
| 13 | +This program is distributed in the hope that it will be useful, |
| 14 | +but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15 | +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 16 | +GNU General Public License for more details. |
| 17 | +
|
| 18 | +You should have received a copy of the GNU General Public License |
| 19 | +along with this program. If not, see <http://www.gnu.org/licenses/>. |
| 20 | +''' |
| 21 | + |
| 22 | +from lib.internal.csv_reader import UnicodeReader, UnicodeWriter |
| 23 | +from xmlrpclib import Fault |
| 24 | +from lib import conf_lib |
| 25 | +from lib.conf_lib import log_error, log_info, log |
| 26 | +import argparse |
| 27 | +import sys |
| 28 | +import csv |
| 29 | +from time import time |
| 30 | +from itertools import islice, chain |
| 31 | +from lib.internal.rpc_thread import RpcThread |
| 32 | + |
| 33 | +csv.field_size_limit(sys.maxsize) |
| 34 | + |
| 35 | + |
| 36 | +def batch(iterable, size): |
| 37 | + sourceiter = iter(iterable) |
| 38 | + while True: |
| 39 | + batchiter = islice(sourceiter, size) |
| 40 | + yield chain([batchiter.next()], batchiter) |
| 41 | + |
| 42 | +class RPCThreadExport(RpcThread): |
| 43 | + |
| 44 | + def __init__(self, max_connection, model, header, writer, batch_size=20, context=None): |
| 45 | + super(RPCThreadExport, self).__init__(max_connection) |
| 46 | + self.model = model |
| 47 | + self.header = header |
| 48 | + self.batch_size = batch_size |
| 49 | + self.writer = writer |
| 50 | + self.context = context |
| 51 | + self.result = {} |
| 52 | + |
| 53 | + |
| 54 | + def launch_batch(self, data_ids, batch_number): |
| 55 | + def launch_batch_fun(data_ids, batch_number, check=False): |
| 56 | + success = False |
| 57 | + |
| 58 | + st = time() |
| 59 | + try: |
| 60 | + self.result[batch_number] = self.model.export_data(data_ids, self.header, context=self.context)['datas'] |
| 61 | + success = True |
| 62 | + except Fault as e: |
| 63 | + log_error("export %s failed" % batch_number) |
| 64 | + log_error(e.faultString) |
| 65 | + except Exception as e: |
| 66 | + log_info("Unknown Problem") |
| 67 | + exc_type, exc_value, exc_traceback = sys.exc_info() |
| 68 | + #traceback.print_tb(exc_traceback, file=sys.stdout) |
| 69 | + log_error(exc_type) |
| 70 | + log_error(exc_value) |
| 71 | + log_info("time for batch %s: %s" % (batch_number, time() - st)) |
| 72 | + |
| 73 | + self.spawn_thread(launch_batch_fun, [data_ids, batch_number], {}) |
| 74 | + |
| 75 | + def write_file(self, file_writer): |
| 76 | + file_writer.writerow(self.header) |
| 77 | + for key in self.result: |
| 78 | + file_writer.writerows(self.result[key]) |
| 79 | + |
| 80 | + |
| 81 | +parser = argparse.ArgumentParser(description='Import data in batch and in parallel') |
| 82 | +parser.add_argument('-c', '--config', dest='config', default="conf/connection.conf", help='Configuration File that contains connection parameters', required = True) |
| 83 | +parser.add_argument('--file', dest='filename', help='Output File', required = True) |
| 84 | +parser.add_argument('--model', dest='model', help='Model to Export', required = True) |
| 85 | +parser.add_argument('--field', dest='fields', help='Fields to Export', required = True) |
| 86 | +parser.add_argument('--domain', dest='domain', help='Filter', default="[]") |
| 87 | +parser.add_argument('--worker', dest='worker', default=1, help='Number of simultaneous connection') |
| 88 | +parser.add_argument('--size', dest='batch_size', default=10, help='Number of line to import per connection') |
| 89 | +parser.add_argument('-s', '--sep', dest="seprator", default=";", help='CSV separator') |
| 90 | +parser.add_argument('--context', dest='context', help='context that will be passed to the load function, need to be a valid python dict', default="{'tracking_disable' : True}") |
| 91 | +#TODO args : encoding |
| 92 | +#{'update_many2many': True,'tracking_disable' : True, 'create_product_variant' : True, 'check_move_validity' : False} |
| 93 | +args = parser.parse_args() |
| 94 | + |
| 95 | +config_file = args.config |
| 96 | +file_csv = args.filename |
| 97 | +batch_size = int(args.batch_size) |
| 98 | +model = args.model |
| 99 | +max_connection = int(args.worker) |
| 100 | +separator = args.seprator |
| 101 | +encoding='utf-8-sig' |
| 102 | +context= eval(args.context) |
| 103 | +domain = eval(args.domain) |
| 104 | + |
| 105 | +header = args.fields.split(',') |
| 106 | + |
| 107 | +object_registry = conf_lib.get_server_connection(config_file).get_model(model) |
| 108 | + |
| 109 | +file_result = open(file_csv, "wb") |
| 110 | +writer = UnicodeWriter(file_result, delimiter=separator, encoding=encoding, quoting=csv.QUOTE_ALL) |
| 111 | + |
| 112 | +rpc_thread = RPCThreadExport(int(max_connection), object_registry, header, writer, batch_size, context) |
| 113 | +st = time() |
| 114 | + |
| 115 | +ids = object_registry.search(domain, context=context) |
| 116 | + |
| 117 | +i = 0 |
| 118 | +for b in batch(ids,batch_size): |
| 119 | + batch_ids = [l for l in b] |
| 120 | + rpc_thread.launch_batch(batch_ids, i) |
| 121 | + i += 1 |
| 122 | + |
| 123 | +rpc_thread.wait() |
| 124 | +log_info("%s %s imported, total time %s second(s)" % (len(ids), model, (time() - st))) |
| 125 | +log_info("Writing file") |
| 126 | +rpc_thread.write_file(writer) |
| 127 | +file_result.close() |
0 commit comments