# HG changeset patch # User Vincent Hatakeyama <vincent.hatakeyama@xcg-consulting.fr> # Date 1567167782 -7200 # Fri Aug 30 14:23:02 2019 +0200 # Node ID a1ea1b12d41b2b7d63661b9db7046acf6d5766e4 # Parent 3fbab90723e3415e559fc3604a54ccb8becfdb80 ✨ add option to specify language, default to no language diff --git a/import_csv.py b/import_csv.py --- a/import_csv.py +++ b/import_csv.py @@ -13,6 +13,7 @@ import hmac import logging import os +import re import sys from datetime import datetime from typing import List, Tuple @@ -22,9 +23,9 @@ _logger = logging.getLogger(__name__) -__version__ = '1.0.1' +__version__ = '1.0.2' __date__ = '2019-07-29' -__updated__ = '2019-08-01' +__updated__ = '2019-08-30' def import_csv( @@ -32,7 +33,7 @@ database: str, protocol: str, host: str, port: int, timeout: int, - model_filenames: List[Tuple[str, str]], + model_filenames: List[Tuple[str, str, str]], update_parameter: bool = True, ) -> int: _logger.info( @@ -59,7 +60,6 @@ import_obj = o.env["base_import.import"] param_obj = o.env["ir.config_parameter"] - # user_obj = o.env["res.users"] _logger.info("Getting a CSRF token") # Reproduce what odoo.http::csrf_token does to build a CSRF token. @@ -86,9 +86,17 @@ "headers": True, } _logger.info(f"{len(model_filenames)} files to import") - for model, csv_file in model_filenames: - _logger.info('Importing - %s in model %s', csv_file, model) - import_dlg_id = import_obj.create({'res_model': model}) + for model, csv_file, lang in model_filenames: + _logger.info( + 'Importing - %s in model %s (lang %s)', csv_file, model, lang) + context = { + 'lang': lang, + # TODO add tz maybe + } + import_dlg_id = import_obj.create( + {'res_model': model}, + context=context, + ) with open(csv_file, 'rb') as f: m = MultipartEncoder( fields={ @@ -103,13 +111,18 @@ data=data, headers={'Content-Type': m.content_type} ) + # in Odoo, context contains user lang, but that is ok so far _logger.debug('response: %s', response) # This returns a lot of stuff we don't need, except for the field list. - fields = import_obj.parse_preview(import_dlg_id, import_options)[ - "headers"] + fields = import_obj.parse_preview( + import_dlg_id, import_options, + )["headers"] # Got fields; now run the import. errors = list() - results = import_obj.do(import_dlg_id, fields, import_options) + # this is where setting the context is necessary + results = import_obj.do( + import_dlg_id, fields, import_options, context=context + ) for result in results: if result['type'] == 'error': errors.append(result['message']) @@ -226,7 +239,14 @@ ) model_filenames = list() - # ⚠assume files are named ".* model_name.csv" or ".* model_name" + # files need to be named according to the following regexp + pattern = re.compile( + r'(?P<path>.*/)' + r'(?:\d+\s)?' + r'(?P<model>[\w.]+?)' + r'(?:@(?P<lang>\w{2,3}(?:@latin|_\w{2})))?' + r'(?:.csv)?' + ) file_list = list() if nmspc.file: file_list.extend(nmspc.file) @@ -236,12 +256,13 @@ file_list.extend(os.path.join(root, f) for f in sorted(files)) for filename in file_list: _logger.debug(f"{filename} to import") - model_csv = os.path.basename(filename).split(' ')[-1] - if model_csv[-4:] == '.csv': - model = model_csv[:-4] + result = pattern.fullmatch(filename) + if result: + model = result.group('model') + lang = result.group('lang') + model_filenames.append((model, filename, lang)) else: - model = model_csv - model_filenames.append((model, filename)) + _logger.warning('Unrecognized file name "%s", ignored', filename) return import_csv( login=nmspc.login,