Browse Source

[FIX] PEP8 compliance and review comments

pull/4/head
Maxime Chambreuil 11 years ago
parent
commit
0042904f45
  1. 2
      base_external_dbsource/__init__.py
  2. 9
      base_external_dbsource/__openerp__.py
  3. 339
      base_external_dbsource/base_external_dbsource.py
  4. 2
      import_odbc/__init__.py
  5. 13
      import_odbc/__openerp__.py
  6. 102
      import_odbc/import_odbc.py

2
base_external_dbsource/__init__.py

@ -19,6 +19,6 @@
# #
############################################################################## ##############################################################################
import base_external_dbsource
from . import base_external_dbsource
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:

9
base_external_dbsource/__openerp__.py

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
############################################################################## ##############################################################################
# #
# Daniel Reis, 2011
# Daniel Reis, 2011
# Additional contributions by Maxime Chambreuil, Savoir-faire Linux # Additional contributions by Maxime Chambreuil, Savoir-faire Linux
# #
# This program is free software: you can redistribute it and/or modify # This program is free software: you can redistribute it and/or modify
@ -27,11 +27,11 @@
This module allows you to define connections to foreign databases using ODBC, This module allows you to define connections to foreign databases using ODBC,
Oracle Client or SQLAlchemy. Oracle Client or SQLAlchemy.
Databases sources can be configured in Settings > Configuration -> Data sources.
Database sources can be configured in Settings > Configuration -> Data sources.
Depending on the database, you need: Depending on the database, you need:
* to install unixodbc and python-pyodbc packages to use ODBC connections. * to install unixodbc and python-pyodbc packages to use ODBC connections.
* to install FreeTDS driver (tdsodbc package) and configure it through ODBC to
* to install FreeTDS driver (tdsodbc package) and configure it through ODBC to
connect to Microsoft SQL Server. connect to Microsoft SQL Server.
* to install and configure Oracle Instant Client and cx_Oracle python library * to install and configure Oracle Instant Client and cx_Oracle python library
to connect to Oracle. to connect to Oracle.
@ -44,7 +44,6 @@ Depending on the database, you need:
'depends': [ 'depends': [
'base', 'base',
], ],
'init': [],
'data': [ 'data': [
'base_external_dbsource_view.xml', 'base_external_dbsource_view.xml',
'security/ir.model.access.csv', 'security/ir.model.access.csv',
@ -54,7 +53,7 @@ Depending on the database, you need:
], ],
'test': [ 'test': [
'dbsource_connect.yml', 'dbsource_connect.yml',
],
],
'installable': True, 'installable': True,
'active': False, 'active': False,
} }

339
base_external_dbsource/base_external_dbsource.py

@ -1,159 +1,180 @@
# -*- coding: utf-8 -*-
##############################################################################
#
# Daniel Reis
# 2011
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import os
from osv import fields, osv
from openerp.tools.translate import _
import openerp.tools as tools
import logging
_logger = logging.getLogger(__name__)
CONNECTORS = []
try:
import sqlalchemy
import pymssql
CONNECTORS.append( ('mssql', 'Microsoft SQL Server') )
except:
_logger.info('MS SQL Server not available. Please install "slqalchemy" and "pymssql" python package.')
try:
import sqlalchemy
import MySQLdb
CONNECTORS.append( ('mysql', 'MySQL') )
except:
_logger.info('MySQL not available. Please install "slqalchemy" and "mysqldb" python package.')
try:
import pyodbc
CONNECTORS.append( ('pyodbc', 'ODBC') )
except:
_logger.info('ODBC libraries not available. Please install "unixodbc" and "python-pyodbc" packages.')
try:
import cx_Oracle
CONNECTORS.append( ('cx_Oracle', 'Oracle') )
except:
_logger.info('Oracle libraries not available. Please install "cx_Oracle" python package.')
import psycopg2
CONNECTORS.append( ('postgresql', 'PostgreSQL') )
try:
import sqlalchemy
CONNECTORS.append( ('sqlite', 'SQLite') )
except:
_logger.info('SQLAlchemy not available. Please install "slqalchemy" python package.')
class base_external_dbsource(osv.osv):
_name = "base.external.dbsource"
_description = 'External Database Sources'
_columns = {
'name': fields.char('Datasource name', required=True, size=64),
'conn_string': fields.text('Connection string', help="""\
Sample connection strings:
- Microsoft SQL Server: mssql+pymssql://username:%s@server:port/dbname?charset=utf8
- MySQL: mysql://user:%s@server:port/dbname
- ODBC: DRIVER={FreeTDS};SERVER=server.address;Database=mydb;UID=sa
- ORACLE: username/%s@//server.address:port/instance
- PostgreSQL: dbname='template1' user='dbuser' host='localhost' port='5432' password=%s
- SQLite: sqlite:///test.db
"""),
'password': fields.char('Password' , size=40),
'connector': fields.selection(CONNECTORS, 'Connector', required=True,
help = "If a connector is missing from the list, check the " \
+ "server log to confirm that the required componentes were detected."),
}
def conn_open(self, cr, uid, id1):
#Get dbsource record
data = self.browse(cr, uid, id1)
#Build the full connection string
connStr = data.conn_string
if data.password:
if '%s' not in data.conn_string:
connStr += ';PWD=%s'
connStr = connStr % data.password
#Try to connect
if data.connector == 'cx_Oracle':
os.environ['NLS_LANG'] = 'AMERICAN_AMERICA.UTF8'
conn = cx_Oracle.connect(connStr)
elif data.connector == 'pyodbc':
conn = pyodbc.connect(connStr)
elif data.connector in ('sqlite','mysql','mssql'):
conn = sqlalchemy.create_engine(connStr).connect()
elif data.connector == 'postgresql':
conn = psycopg2.connect(connStr)
return conn
def execute(self, cr, uid, ids, sqlquery, sqlparams=None, metadata=False, context=None):
"""Executes SQL and returns a list of rows.
"sqlparams" can be a dict of values, that can be referenced in the SQL statement
using "%(key)s" or, in the case of Oracle, ":key".
Example:
sqlquery = "select * from mytable where city = %(city)s and date > %(dt)s"
params = {'city': 'Lisbon', 'dt': datetime.datetime(2000, 12, 31)}
If metadata=True, it will instead return a dict containing the rows list and the columns list,
in the format:
{ 'cols': [ 'col_a', 'col_b', ...]
, 'rows': [ (a0, b0, ...), (a1, b1, ...), ...] }
"""
data = self.browse(cr, uid, ids)
rows, cols = list(), list()
for obj in data:
conn = self.conn_open(cr, uid, obj.id)
if obj.connector in ["sqlite","mysql","mssql"]:
#using sqlalchemy
cur = conn.execute(sqlquery, sqlparams)
if metadata: cols = cur.keys()
rows = [r for r in cur]
else:
#using other db connectors
cur = conn.cursor()
cur.execute(sqlquery, sqlparams)
if metadata: cols = [d[0] for d in cur.description]
rows = cur.fetchall()
conn.close()
if metadata:
return{'cols': cols, 'rows': rows}
else:
return rows
def connection_test(self, cr, uid, ids, context=None):
for obj in self.browse(cr, uid, ids, context):
conn = False
try:
conn = self.conn_open(cr, uid, obj.id)
except Exception, e:
raise osv.except_osv(_("Connection test failed!"), _("Here is what we got instead:\n %s") % tools.ustr(e))
finally:
try:
if conn: conn.close()
except Exception:
# ignored, just a consequence of the previous exception
pass
#TODO: if OK a (wizard) message box should be displayed
raise osv.except_osv(_("Connection test succeeded!"), _("Everything seems properly set up!"))
base_external_dbsource()
# -*- coding: utf-8 -*-
##############################################################################
#
# Daniel Reis
# 2011
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import os
import logging
from openerp.osv import orm, fields
from openerp.tools.translate import _
import openerp.tools as tools
_logger = logging.getLogger(__name__)
CONNECTORS = []
try:
import sqlalchemy
import pymssql
CONNECTORS.append(('mssql', 'Microsoft SQL Server'))
except:
_logger.info('MS SQL Server not available. Please install "slqalchemy"\
and "pymssql" python package.')
try:
import sqlalchemy
import MySQLdb
CONNECTORS.append(('mysql', 'MySQL'))
except:
_logger.info('MySQL not available. Please install "slqalchemy" and\
"mysqldb" python package.')
try:
import pyodbc
CONNECTORS.append(('pyodbc', 'ODBC'))
except:
_logger.info('ODBC libraries not available. Please install "unixodbc"\
and "python-pyodbc" packages.')
try:
import cx_Oracle
CONNECTORS.append(('cx_Oracle', 'Oracle'))
except:
_logger.info('Oracle libraries not available. Please install "cx_Oracle"\
python package.')
import psycopg2
CONNECTORS.append(('postgresql', 'PostgreSQL'))
try:
import sqlalchemy
CONNECTORS.append(('sqlite', 'SQLite'))
except:
_logger.info('SQLAlchemy not available. Please install "slqalchemy" python\
package.')
class base_external_dbsource(orm.Model):
_name = "base.external.dbsource"
_description = 'External Database Sources'
_columns = {
'name': fields.char('Datasource name', required=True, size=64),
'conn_string': fields.text('Connection string', help="""
Sample connection strings:
- Microsoft SQL Server:
mssql+pymssql://username:%s@server:port/dbname?charset=utf8
- MySQL: mysql://user:%s@server:port/dbname
- ODBC: DRIVER={FreeTDS};SERVER=server.address;Database=mydb;UID=sa
- ORACLE: username/%s@//server.address:port/instance
- PostgreSQL:
dbname='template1' user='dbuser' host='localhost' port='5432' password=%s
- SQLite: sqlite:///test.db
"""),
'password': fields.char('Password', size=40),
'connector': fields.selection(CONNECTORS, 'Connector',
required=True,
help="If a connector is missing from the\
list, check the server log to confirm\
that the required components were\
detected."),
}
def conn_open(self, cr, uid, id1):
#Get dbsource record
data = self.browse(cr, uid, id1)
#Build the full connection string
connStr = data.conn_string
if data.password:
if '%s' not in data.conn_string:
connStr += ';PWD=%s'
connStr = connStr % data.password
#Try to connect
if data.connector == 'cx_Oracle':
os.environ['NLS_LANG'] = 'AMERICAN_AMERICA.UTF8'
conn = cx_Oracle.connect(connStr)
elif data.connector == 'pyodbc':
conn = pyodbc.connect(connStr)
elif data.connector in ('sqlite', 'mysql', 'mssql'):
conn = sqlalchemy.create_engine(connStr).connect()
elif data.connector == 'postgresql':
conn = psycopg2.connect(connStr)
return conn
def execute(self, cr, uid, ids, sqlquery, sqlparams=None, metadata=False,
context=None):
"""Executes SQL and returns a list of rows.
"sqlparams" can be a dict of values, that can be referenced in
the SQL statement using "%(key)s" or, in the case of Oracle,
":key".
Example:
sqlquery = "select * from mytable where city = %(city)s and
date > %(dt)s"
params = {'city': 'Lisbon',
'dt': datetime.datetime(2000, 12, 31)}
If metadata=True, it will instead return a dict containing the
rows list and the columns list, in the format:
{ 'cols': [ 'col_a', 'col_b', ...]
, 'rows': [ (a0, b0, ...), (a1, b1, ...), ...] }
"""
data = self.browse(cr, uid, ids)
rows, cols = list(), list()
for obj in data:
conn = self.conn_open(cr, uid, obj.id)
if obj.connector in ["sqlite", "mysql", "mssql"]:
#using sqlalchemy
cur = conn.execute(sqlquery, sqlparams)
if metadata:
cols = cur.keys()
rows = [r for r in cur]
else:
#using other db connectors
cur = conn.cursor()
cur.execute(sqlquery, sqlparams)
if metadata:
cols = [d[0] for d in cur.description]
rows = cur.fetchall()
conn.close()
if metadata:
return{'cols': cols, 'rows': rows}
else:
return rows
def connection_test(self, cr, uid, ids, context=None):
for obj in self.browse(cr, uid, ids, context):
conn = False
try:
conn = self.conn_open(cr, uid, obj.id)
except Exception, e:
raise osv.except_osv(_("Connection test failed!"),
_("Here is what we got instead:\n %s")
% tools.ustr(e))
finally:
try:
if conn:
conn.close()
except Exception:
# ignored, just a consequence of the previous exception
pass
#TODO: if OK a (wizard) message box should be displayed
raise osv.except_osv(_("Connection test succeeded!"),
_("Everything seems properly set up!"))
#EOF

2
import_odbc/__init__.py

@ -19,6 +19,6 @@
# #
############################################################################## ##############################################################################
import import_odbc
from . import import_odbc
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:

13
import_odbc/__openerp__.py

@ -36,9 +36,9 @@ Features:
* When errors are found, only the record with the error fails import. The other correct records are commited. However, the "last sync date" will only be automaticaly updated when no errors are found. * When errors are found, only the record with the error fails import. The other correct records are commited. However, the "last sync date" will only be automaticaly updated when no errors are found.
* The import execution can be scheduled to run automatically. * The import execution can be scheduled to run automatically.
Examples:
Examples:
* Importing suppliers to res.partner: * Importing suppliers to res.partner:
SELECT distinct
SELECT distinct
[SUPPLIER_CODE] as "ref" [SUPPLIER_CODE] as "ref"
, [SUPPLIER_NAME] as "name" , [SUPPLIER_NAME] as "name"
, 1 as "is_supplier" , 1 as "is_supplier"
@ -50,13 +50,13 @@ Examples:
SELECT PRODUCT_CODE as "ref" SELECT PRODUCT_CODE as "ref"
, PRODUCT_NAME as "name" , PRODUCT_NAME as "name"
, 'res_partner_id_'+SUPPLIER_ID as "partner_id/id" , 'res_partner_id_'+SUPPLIER_ID as "partner_id/id"
FROM T_PRODUCTS
WHERE DATE_CHANGED >= %(sync)s
FROM T_PRODUCTS
WHERE DATE_CHANGED >= %(sync)s
Improvements ideas waiting for a contributor: Improvements ideas waiting for a contributor:
* Allow to import many2one fields (currently not supported). Done by adding a second SQL sentence to get child record list? * Allow to import many2one fields (currently not supported). Done by adding a second SQL sentence to get child record list?
* Allow "import sets" that can be executed at different time intervals using different scheduler jobs. * Allow "import sets" that can be executed at different time intervals using different scheduler jobs.
* Allow to inactivate/delete OpenERP records when not present in an SQL result set.
* Allow to inactivate/delete OpenERP records when not present in an SQL result set.
""", """,
'author': 'Daniel Reis', 'author': 'Daniel Reis',
'website': 'http://launchpad.net/addons-tko', 'website': 'http://launchpad.net/addons-tko',
@ -68,7 +68,6 @@ Improvements ideas waiting for a contributor:
'base', 'base',
'base_external_dbsource', 'base_external_dbsource',
], ],
'init': [],
'data': [ 'data': [
'import_odbc_view.xml', 'import_odbc_view.xml',
'security/ir.model.access.csv', 'security/ir.model.access.csv',
@ -76,7 +75,7 @@ Improvements ideas waiting for a contributor:
'demo': [ 'demo': [
'import_odbc_demo.xml', 'import_odbc_demo.xml',
], ],
'test': [],
'test': [],
'installable': True, 'installable': True,
'active': False, 'active': False,
} }

102
import_odbc/import_odbc.py

@ -21,14 +21,15 @@
import sys import sys
from datetime import datetime from datetime import datetime
from osv import fields, osv
from openerp.osv import orm, fields
import logging import logging
_logger = logging.getLogger(__name__) _logger = logging.getLogger(__name__)
_loglvl = _logger.getEffectiveLevel() _loglvl = _logger.getEffectiveLevel()
SEP = '|'
SEP = '|'
class import_odbc_dbtable(osv.osv):
_name="import.odbc.dbtable"
class import_odbc_dbtable(orm.Model):
_name = "import.odbc.dbtable"
_description = 'Import Table Data' _description = 'Import Table Data'
_order = 'exec_order' _order = 'exec_order'
_columns = { _columns = {
@ -36,7 +37,7 @@ class import_odbc_dbtable(osv.osv):
'enabled': fields.boolean('Execution enabled'), 'enabled': fields.boolean('Execution enabled'),
'dbsource_id': fields.many2one('base.external.dbsource', 'Database source', required=True), 'dbsource_id': fields.many2one('base.external.dbsource', 'Database source', required=True),
'sql_source': fields.text('SQL', required=True, help='Column names must be valid "import_data" columns.'), 'sql_source': fields.text('SQL', required=True, help='Column names must be valid "import_data" columns.'),
'model_target': fields.many2one('ir.model','Target object'),
'model_target': fields.many2one('ir.model', 'Target object'),
'noupdate': fields.boolean('No updates', help="Only create new records; disable updates to existing records."), 'noupdate': fields.boolean('No updates', help="Only create new records; disable updates to existing records."),
'exec_order': fields.integer('Execution order', help="Defines the order to perform the import"), 'exec_order': fields.integer('Execution order', help="Defines the order to perform the import"),
'last_sync': fields.datetime('Last sync date', help="Datetime for the last succesfull sync. Later changes on the source may not be replicated on the destination"), 'last_sync': fields.datetime('Last sync date', help="Datetime for the last succesfull sync. Later changes on the source may not be replicated on the destination"),
@ -46,11 +47,11 @@ class import_odbc_dbtable(osv.osv):
'last_error_count': fields.integer('Last error count', readonly=True), 'last_error_count': fields.integer('Last error count', readonly=True),
'last_warn_count': fields.integer('Last warning count', readonly=True), 'last_warn_count': fields.integer('Last warning count', readonly=True),
'last_log': fields.text('Last run log', readonly=True), 'last_log': fields.text('Last run log', readonly=True),
'ignore_rel_errors': fields.boolean('Ignore relationship errors',
help = "On error try to reimport rows ignoring relationships."),
'raise_import_errors': fields.boolean('Raise import errors',
help = "Import errors not handled, intended for debugging purposes."
+ "\nAlso forces debug messages to be written to the server log."),
'ignore_rel_errors': fields.boolean('Ignore relationship errors',
help="On error try to reimport rows ignoring relationships."),
'raise_import_errors': fields.boolean('Raise import errors',
help="Import errors not handled, intended for debugging purposes."
"\nAlso forces debug messages to be written to the server log."),
} }
_defaults = { _defaults = {
'enabled': True, 'enabled': True,
@ -60,14 +61,14 @@ class import_odbc_dbtable(osv.osv):
def _import_data(self, cr, uid, flds, data, model_obj, table_obj, log): def _import_data(self, cr, uid, flds, data, model_obj, table_obj, log):
"""Import data and returns error msg or empty string""" """Import data and returns error msg or empty string"""
def find_m2o(field_list):
def find_m2o(field_list):
""""Find index of first column with a one2many field""" """"Find index of first column with a one2many field"""
for i, x in enumerate(field_list): for i, x in enumerate(field_list):
if len(x)>3 and x[-3:] == ':id' or x[-3:] == '/id':
if len(x) > 3 and x[-3:] == ':id' or x[-3:] == '/id':
return i return i
return -1 return -1
def append_to_log(log, level, obj_id = '', msg = '', rel_id = ''):
def append_to_log(log, level, obj_id='', msg='', rel_id=''):
if '_id_' in obj_id: if '_id_' in obj_id:
obj_id = '.'.join(obj_id.split('_')[:-2]) + ': ' + obj_id.split('_')[-1] obj_id = '.'.join(obj_id.split('_')[:-2]) + ': ' + obj_id.split('_')[-1]
if ': .' in msg and not rel_id: if ': .' in msg and not rel_id:
@ -76,10 +77,8 @@ class import_odbc_dbtable(osv.osv):
rel_id = '.'.join(rel_id.split('_')[:-2]) + ': ' + rel_id.split('_')[-1] rel_id = '.'.join(rel_id.split('_')[:-2]) + ': ' + rel_id.split('_')[-1]
msg = msg[:msg.find(': .')] msg = msg[:msg.find(': .')]
log['last_log'].append('%s|%s\t|%s\t|%s' % (level.ljust(5), obj_id, rel_id, msg)) log['last_log'].append('%s|%s\t|%s\t|%s' % (level.ljust(5), obj_id, rel_id, msg))
_logger.debug( data )
cols = list(flds) #copy to avoid side effects
_logger.debug(data)
cols = list(flds) # copy to avoid side effects
errmsg = str() errmsg = str()
if table_obj.raise_import_errors: if table_obj.raise_import_errors:
model_obj.import_data(cr, uid, cols, [data], noupdate=table_obj.noupdate) model_obj.import_data(cr, uid, cols, [data], noupdate=table_obj.noupdate)
@ -88,15 +87,14 @@ class import_odbc_dbtable(osv.osv):
model_obj.import_data(cr, uid, cols, [data], noupdate=table_obj.noupdate) model_obj.import_data(cr, uid, cols, [data], noupdate=table_obj.noupdate)
except: except:
errmsg = str(sys.exc_info()[1]) errmsg = str(sys.exc_info()[1])
if errmsg and not table_obj.ignore_rel_errors: if errmsg and not table_obj.ignore_rel_errors:
#Fail #Fail
append_to_log(log, 'ERROR', data, errmsg )
append_to_log(log, 'ERROR', data, errmsg)
log['last_error_count'] += 1 log['last_error_count'] += 1
return False return False
if errmsg and table_obj.ignore_rel_errors: if errmsg and table_obj.ignore_rel_errors:
#Warn and retry ignoring many2one fields... #Warn and retry ignoring many2one fields...
append_to_log(log, 'WARN', data, errmsg )
append_to_log(log, 'WARN', data, errmsg)
log['last_warn_count'] += 1 log['last_warn_count'] += 1
#Try ignoring each many2one (tip: in the SQL sentence select more problematic FKs first) #Try ignoring each many2one (tip: in the SQL sentence select more problematic FKs first)
i = find_m2o(cols) i = find_m2o(cols)
@ -107,32 +105,30 @@ class import_odbc_dbtable(osv.osv):
self._import_data(cr, uid, cols, data, model_obj, table_obj, log) self._import_data(cr, uid, cols, data, model_obj, table_obj, log)
else: else:
#Fail #Fail
append_to_log(log, 'ERROR', data, 'Removed all m2o keys and still fails.' )
append_to_log(log, 'ERROR', data, 'Removed all m2o keys and still fails.')
log['last_error_count'] += 1 log['last_error_count'] += 1
return False return False
return True return True
def import_run(self, cr, uid, ids=None, context=None): def import_run(self, cr, uid, ids=None, context=None):
db_model = self.pool.get('base.external.dbsource') db_model = self.pool.get('base.external.dbsource')
actions = self.read(cr, uid, ids, ['id', 'exec_order']) actions = self.read(cr, uid, ids, ['id', 'exec_order'])
actions.sort(key = lambda x:(x['exec_order'], x['id']))
actions.sort(key=lambda x: (x['exec_order'], x['id']))
#Consider each dbtable: #Consider each dbtable:
for action_ref in actions: for action_ref in actions:
obj = self.browse(cr, uid, action_ref['id']) obj = self.browse(cr, uid, action_ref['id'])
if not obj.enabled: continue #skip
if not obj.enabled:
continue # skip
_logger.setLevel(obj.raise_import_errors and logging.DEBUG or _loglvl) _logger.setLevel(obj.raise_import_errors and logging.DEBUG or _loglvl)
_logger.debug('Importing %s...' % obj.name) _logger.debug('Importing %s...' % obj.name)
#now() microseconds are stripped to avoid problem with SQL smalldate #now() microseconds are stripped to avoid problem with SQL smalldate
#TODO: convert UTC Now to local timezone (http://stackoverflow.com/questions/4770297/python-convert-utc-datetime-string-to-local-datetime)
#TODO: convert UTC Now to local timezone
#http://stackoverflow.com/questions/4770297/python-convert-utc-datetime-string-to-local-datetime
model_name = obj.model_target.model model_name = obj.model_target.model
model_obj = self.pool.get(model_name)
model_obj = self.pool.get(model_name)
xml_prefix = model_name.replace('.', '_') + "_id_" xml_prefix = model_name.replace('.', '_') + "_id_"
log = {'start_run': datetime.now().replace(microsecond=0), log = {'start_run': datetime.now().replace(microsecond=0),
'last_run': None, 'last_run': None,
@ -143,11 +139,14 @@ class import_odbc_dbtable(osv.osv):
self.write(cr, uid, [obj.id], log) self.write(cr, uid, [obj.id], log)
#Prepare SQL sentence; replace "%s" with the last_sync date #Prepare SQL sentence; replace "%s" with the last_sync date
if obj.last_sync: sync = datetime.strptime(obj.last_sync, "%Y-%m-%d %H:%M:%S")
else: sync = datetime.datetime(1900, 1, 1, 0, 0, 0)
if obj.last_sync:
sync = datetime.strptime(obj.last_sync, "%Y-%m-%d %H:%M:%S")
else:
sync = datetime.datetime(1900, 1, 1, 0, 0, 0)
params = {'sync': sync} params = {'sync': sync}
res = db_model.execute(cr, uid, [obj.dbsource_id.id], obj.sql_source, params, metadata=True)
res = db_model.execute(cr, uid, [obj.dbsource_id.id],
obj.sql_source, params, metadata=True)
#Exclude columns titled "None"; add (xml_)"id" column #Exclude columns titled "None"; add (xml_)"id" column
cidx = [i for i, x in enumerate(res['cols']) if x.upper() != 'NONE'] cidx = [i for i, x in enumerate(res['cols']) if x.upper() != 'NONE']
cols = [x for i, x in enumerate(res['cols']) if x.upper() != 'NONE'] + ['id'] cols = [x for i, x in enumerate(res['cols']) if x.upper() != 'NONE'] + ['id']
@ -159,46 +158,49 @@ class import_odbc_dbtable(osv.osv):
for i in cidx: for i in cidx:
#TODO: Handle imported datetimes properly - convert from localtime to UTC! #TODO: Handle imported datetimes properly - convert from localtime to UTC!
v = row[i] v = row[i]
if isinstance(v, str): v = v.strip()
if isinstance(v, str):
v = v.strip()
data.append(v) data.append(v)
data.append( xml_prefix + str(row[0]).strip() )
data.append(xml_prefix + str(row[0]).strip())
#Import the row; on error, write line to the log #Import the row; on error, write line to the log
log['last_record_count'] += 1 log['last_record_count'] += 1
self._import_data(cr, uid, cols, data, model_obj, obj, log) self._import_data(cr, uid, cols, data, model_obj, obj, log)
if log['last_record_count'] % 500 == 0: if log['last_record_count'] % 500 == 0:
_logger.info('...%s rows processed...' % (log['last_record_count']) )
_logger.info('...%s rows processed...' % (log['last_record_count']))
#Finished importing all rows #Finished importing all rows
#If no errors, write new sync date #If no errors, write new sync date
if not (log['last_error_count'] or log['last_warn_count']): if not (log['last_error_count'] or log['last_warn_count']):
log['last_sync'] = log['start_run'] log['last_sync'] = log['start_run']
level = logging.DEBUG level = logging.DEBUG
if log['last_warn_count']: level = logging.WARN
if log['last_error_count']: level = logging.ERROR
if log['last_warn_count']:
level = logging.WARN
if log['last_error_count']:
level = logging.ERROR
_logger.log(level, 'Imported %s , %d rows, %d errors, %d warnings.' % ( _logger.log(level, 'Imported %s , %d rows, %d errors, %d warnings.' % (
model_name, log['last_record_count'], log['last_error_count'] ,
log['last_warn_count'] ) )
model_name, log['last_record_count'], log['last_error_count'],
log['last_warn_count']))
#Write run log, either if the table import is active or inactive #Write run log, either if the table import is active or inactive
if log['last_log']:
if log['last_log']:
log['last_log'].insert(0, 'LEVEL|== Line == |== Relationship ==|== Message ==') log['last_log'].insert(0, 'LEVEL|== Line == |== Relationship ==|== Message ==')
log.update( {'last_log': '\n'.join(log['last_log'])} )
log.update({ 'last_run': datetime.now().replace(microsecond=0) }) #second=0,
log.update({'last_log': '\n'.join(log['last_log'])})
log.update({'last_run': datetime.now().replace(microsecond=0)})
self.write(cr, uid, [obj.id], log) self.write(cr, uid, [obj.id], log)
#Finished #Finished
_logger.debug('Import job FINISHED.') _logger.debug('Import job FINISHED.')
return True return True
def import_schedule(self, cr, uid, ids, context=None): def import_schedule(self, cr, uid, ids, context=None):
cron_obj = self.pool.get('ir.cron') cron_obj = self.pool.get('ir.cron')
new_create_id = cron_obj.create(cr, uid, { new_create_id = cron_obj.create(cr, uid, {
'name': 'Import ODBC tables', 'name': 'Import ODBC tables',
'interval_type': 'hours', 'interval_type': 'hours',
'interval_number': 1,
'interval_number': 1,
'numbercall': -1, 'numbercall': -1,
'model': 'import.odbc.dbtable', 'model': 'import.odbc.dbtable',
'function': 'import_run',
'function': 'import_run',
'doall': False, 'doall': False,
'active': True 'active': True
}) })
@ -210,5 +212,5 @@ class import_odbc_dbtable(osv.osv):
'res_id': new_create_id, 'res_id': new_create_id,
'type': 'ir.actions.act_window', 'type': 'ir.actions.act_window',
} }
import_odbc_dbtable()
#EOF
Loading…
Cancel
Save