Samir S
4 years ago
commit
04d3093b76
9 changed files with 1065 additions and 0 deletions
-
2CHANGELOG
-
26CloudronManifest.json
-
9DESCRIPTION.md
-
20Dockerfile
-
7POSTINSTALL.md
-
BINlogo.png
-
272odoo10CE_install.sh
-
693sql_db.py
-
36start.sh
@ -0,0 +1,2 @@ |
|||
[0.1.0] |
|||
* Initial version |
@ -0,0 +1,26 @@ |
|||
{ |
|||
"id": "com.odoo-ce.community.cloudronapp", |
|||
"title": "Odoo Community", |
|||
"author": "Odoo", |
|||
"description": "file://DESCRIPTION.md", |
|||
"changelog": "file://CHANGELOG", |
|||
"postInstallMessage": "file://POSTINSTALL.md", |
|||
"tagline": "One-line description", |
|||
"version": "0.1.0", |
|||
"healthCheckPath": "/", |
|||
"httpPort": 8069, |
|||
"addons": { |
|||
"localstorage": {}, |
|||
"sendmail": {}, |
|||
"ldap" : {}, |
|||
"postgresql": {} |
|||
}, |
|||
"manifestVersion": 2, |
|||
"website": "https://odoo.com", |
|||
"contactEmail": "support@cloudron.io", |
|||
"icon": "file://logo.png", |
|||
"tags": [ |
|||
"crm" |
|||
], |
|||
"mediaLinks": [ ] |
|||
} |
@ -0,0 +1,9 @@ |
|||
Odoo, formerly known as OpenERP, is a suite of open-source business apps |
|||
written in Python and released under the LGPLv3 license. This suite of |
|||
applications covers all business needs, from Website/Ecommerce down to |
|||
manufacturing, inventory and accounting, all seamlessly integrated. |
|||
Odoo's technical features include a distributed server, flexible |
|||
workflows, an object database, a dynamic GUI, customizable reports, and |
|||
an XML-RPC interface. Odoo is the most installed business software in |
|||
the world. It is used by 2.000.000 users worldwide ranging from very |
|||
small companies (1 user) to very large ones (300 000 users). |
@ -0,0 +1,20 @@ |
|||
FROM cloudron/base:0.12.0 |
|||
MAINTAINER Samir Saidani <saidani@babel.coop> |
|||
|
|||
RUN mkdir -p /app/code /app/data |
|||
WORKDIR /app/code |
|||
|
|||
COPY ./odoo10CE_install.sh /app/code/ |
|||
|
|||
RUN /app/code/odoo10CE_install.sh |
|||
RUN wget -O - https://nightly.odoo.com/odoo.key | apt-key add - |
|||
RUN echo "deb http://nightly.odoo.com/10.0/nightly/deb/ ./" >> /etc/apt/sources.list.d/odoo.list |
|||
RUN apt-get update && apt-get -y install wkhtmltopdf && rm -r /var/cache/apt /var/lib/apt/lists |
|||
|
|||
# patch to accept a db name |
|||
COPY sql_db.py /app/code/odoo-server/odoo/sql_db.py |
|||
# COPY sql_db.py /app/code/ |
|||
|
|||
COPY start.sh /app/data/ |
|||
|
|||
CMD [ "/app/data/start.sh" ] |
@ -0,0 +1,7 @@ |
|||
Utilisez the following credentials for initial setup: |
|||
|
|||
`username`: admin |
|||
|
|||
`password`: admin |
|||
|
|||
**Please change the admin password and email on first login** |
After Width: 500 | Height: 500 | Size: 41 KiB |
@ -0,0 +1,272 @@ |
|||
#!/bin/bash |
|||
# modified for cloudron - Samir Saidani |
|||
################################################################################ |
|||
# Script for installing Odoo V10 on Ubuntu 16.04, 15.04, 14.04 (could be used for other version too) |
|||
# Author: Yenthe Van Ginneken |
|||
#------------------------------------------------------------------------------- |
|||
# This script will install Odoo on your Ubuntu 14.04 server. It can install multiple Odoo instances |
|||
# in one Ubuntu because of the different xmlrpc_ports |
|||
#------------------------------------------------------------------------------- |
|||
# Make a new file: |
|||
# sudo nano odoo-install.sh |
|||
# Place this content in it and then make the file executable: |
|||
# sudo chmod +x odoo-install.sh |
|||
# Execute the script to install Odoo: |
|||
# ./odoo-install |
|||
################################################################################ |
|||
##fixed parameters |
|||
#odoo |
|||
OE_USER="odoo" |
|||
#OE_HOME="/app/code/$OE_USER" |
|||
OE_HOME="/app/code" |
|||
OE_HOME_EXT="$OE_HOME/${OE_USER}-server" |
|||
#The default port where this Odoo instance will run under (provided you use the command -c in the terminal) |
|||
#Set to true if you want to install it, false if you don't need it or have it already installed. |
|||
INSTALL_WKHTMLTOPDF="False" |
|||
#Set the default Odoo port (you still have to use -c /etc/odoo-server.conf for example to use this.) |
|||
OE_PORT="8069" |
|||
#Choose the Odoo version which you want to install. For example: 10.0, 9.0, 8.0, 7.0 or saas-6. When using 'trunk' the master version will be installed. |
|||
#IMPORTANT! This script contains extra libraries that are specifically needed for Odoo 10.0 |
|||
OE_VERSION="10.0" |
|||
# Set this to True if you want to install Odoo 10 Enterprise! |
|||
IS_ENTERPRISE="False" |
|||
#set the superadmin password |
|||
OE_SUPERADMIN="admin" |
|||
OE_CONFIG="${OE_USER}" |
|||
|
|||
## |
|||
### WKHTMLTOPDF download links |
|||
## === Ubuntu Trusty x64 & x32 === (for other distributions please replace these two links, |
|||
## in order to have correct version of wkhtmltox installed, for a danger note refer to |
|||
## https://www.odoo.com/documentation/8.0/setup/install.html#deb ): |
|||
#WKHTMLTOX_X64=http://download.gna.org/wkhtmltopdf/0.12/0.12.2.1/wkhtmltox-0.12.2.1_linux-jessie-amd64.deb |
|||
#WKHTMLTOX_X32=http://download.gna.org/wkhtmltopdf/0.12/0.12.2.1/wkhtmltox-0.12.2.1_linux-jessie-i386.deb |
|||
WKHTMLTOX_X64=https://github.com/wkhtmltopdf/wkhtmltopdf/releases/download/0.12.2.1/wkhtmltox-0.12.2.1_linux-jessie-amd64.deb |
|||
WKHTMLTOX_X32=https://github.com/wkhtmltopdf/wkhtmltopdf/releases/download/0.12.2.1/wkhtmltox-0.12.2.1_linux-jessie-i386.deb |
|||
|
|||
#-------------------------------------------------- |
|||
# Update Server |
|||
#-------------------------------------------------- |
|||
echo -e "\n---- Update Server ----" |
|||
sudo apt-get update |
|||
sudo apt-mark hold postfix phpmyadmin |
|||
sudo apt-get upgrade -y |
|||
|
|||
#-------------------------------------------------- |
|||
# Install PostgreSQL Server |
|||
#-------------------------------------------------- |
|||
#echo -e "\n---- Install PostgreSQL Server ----" |
|||
#sudo apt-get install postgresql -y |
|||
|
|||
echo -e "\n---- Creating the ODOO PostgreSQL User ----" |
|||
sudo su - postgres -c "createuser -s $OE_USER" 2> /dev/null || true |
|||
|
|||
#-------------------------------------------------- |
|||
# Install Dependencies |
|||
#-------------------------------------------------- |
|||
echo -e "\n---- Install tool packages ----" |
|||
sudo apt-get install wget git python-pip gdebi-core libjpeg62-turbo -y |
|||
|
|||
echo -e "\n---- Install python packages ----" |
|||
sudo apt-get install python-dateutil python-feedparser python-ldap python-libxslt1 python-lxml python-mako python-openid python-psycopg2 python-pybabel python-pychart python-pydot python-pyparsing python-reportlab python-simplejson python-tz python-vatnumber python-vobject python-webdav python-werkzeug python-xlwt python-yaml python-zsi python-docutils python-psutil python-mock python-unittest2 python-jinja2 python-pypdf python-decorator python-requests python-passlib python-pil -y python-suds |
|||
|
|||
echo -e "\n---- Upgrade pip ----" |
|||
pip install --upgrade pip |
|||
|
|||
echo -e "\n---- Install python libraries ----" |
|||
sudo pip install gdata psycogreen ofxparse XlsxWriter |
|||
|
|||
echo -e "\n--- Install other required packages" |
|||
sudo apt-get install node-clean-css -y |
|||
sudo apt-get install node-less -y |
|||
sudo apt-get install python-gevent -y |
|||
|
|||
#-------------------------------------------------- |
|||
# Install Wkhtmltopdf if needed |
|||
#-------------------------------------------------- |
|||
if [ $INSTALL_WKHTMLTOPDF = "True" ]; then |
|||
echo -e "\n---- Install wkhtml and place shortcuts on correct place for ODOO 10 ----" |
|||
#pick up correct one from x64 & x32 versions: |
|||
if [ "`getconf LONG_BIT`" == "64" ];then |
|||
_url=$WKHTMLTOX_X64 |
|||
else |
|||
_url=$WKHTMLTOX_X32 |
|||
fi |
|||
sudo wget $_url |
|||
sudo apt-get install gdebi-core -y |
|||
sudo gdebi --n `basename $_url` |
|||
sudo ln -s /usr/local/bin/wkhtmltopdf /usr/bin |
|||
sudo ln -s /usr/local/bin/wkhtmltoimage /usr/bin |
|||
else |
|||
echo "Wkhtmltopdf isn't installed due to the choice of the user!" |
|||
fi |
|||
|
|||
echo -e "\n---- Create ODOO system user ----" |
|||
sudo adduser --system --quiet --shell=/bin/bash --home=$OE_HOME --gecos 'ODOO' --group $OE_USER |
|||
#The user should also be added to the sudo'ers group. |
|||
sudo adduser $OE_USER sudo |
|||
sudo chown -R $OE_USER:$OE_USER /app/code |
|||
sudo chown -R $OE_USER:$OE_USER /app/data |
|||
|
|||
echo -e "\n---- Create Log directory ----" |
|||
sudo mkdir /var/log/$OE_USER |
|||
sudo chown $OE_USER:$OE_USER /var/log/$OE_USER |
|||
|
|||
#-------------------------------------------------- |
|||
# Install ODOO |
|||
#-------------------------------------------------- |
|||
echo -e "\n==== Installing ODOO Server ====" |
|||
sudo git clone --depth 1 --branch $OE_VERSION https://www.github.com/odoo/odoo $OE_HOME_EXT/ |
|||
|
|||
if [ $IS_ENTERPRISE = "True" ]; then |
|||
# Odoo Enterprise install! |
|||
echo -e "\n--- Create symlink for node" |
|||
sudo ln -s /usr/bin/nodejs /usr/bin/node |
|||
sudo su $OE_USER -c "mkdir $OE_HOME/enterprise" |
|||
sudo su $OE_USER -c "mkdir $OE_HOME/enterprise/addons" |
|||
|
|||
echo -e "\n---- Adding Enterprise code under $OE_HOME/enterprise/addons ----" |
|||
sudo git clone --depth 1 --branch 10.0 https://www.github.com/odoo/enterprise "$OE_HOME/enterprise/addons" |
|||
|
|||
echo -e "\n---- Installing Enterprise specific libraries ----" |
|||
sudo apt-get install nodejs npm |
|||
sudo npm install -g less |
|||
sudo npm install -g less-plugin-clean-css |
|||
else |
|||
echo -e "\n---- Create custom module directory ----" |
|||
# sudo su $OE_USER -c "mkdir $OE_HOME/custom" |
|||
# sudo su $OE_USER -c "mkdir $OE_HOME/custom/addons" |
|||
sudo su $OE_USER -c "mkdir $OE_HOME/extra-addons" |
|||
fi |
|||
|
|||
echo -e "\n---- Setting permissions on home folder ----" |
|||
sudo chown -R $OE_USER:$OE_USER $OE_HOME/* |
|||
|
|||
echo -e "* Create server config file" |
|||
#sudo cp $OE_HOME_EXT/debian/odoo.conf /app/data/${OE_CONFIG}.conf |
|||
sudo su $OE_USER -c "echo '[options]' >> /app/data/${OE_CONFIG}.conf" |
|||
sudo chown $OE_USER:$OE_USER /app/data/${OE_CONFIG}.conf |
|||
sudo chmod 640 /app/data/${OE_CONFIG}.conf |
|||
|
|||
echo -e "* Change server config file" |
|||
sudo sed -i s/"db_user = .*"/"db_user = $OE_USER"/g /app/data/${OE_CONFIG}.conf |
|||
sudo sed -i s/"; admin_passwd.*"/"admin_passwd = $OE_SUPERADMIN"/g /app/data/${OE_CONFIG}.conf |
|||
sudo su root -c "echo '[options]' > /app/data/${OE_CONFIG}.conf" |
|||
sudo su root -c "echo 'logfile = /var/log/$OE_USER/$OE_CONFIG$1.log' >> /app/data/${OE_CONFIG}.conf" |
|||
if [ $IS_ENTERPRISE = "True" ]; then |
|||
sudo su root -c "echo 'addons_path=$OE_HOME/enterprise/addons,$OE_HOME_EXT/addons' >> /app/data/${OE_CONFIG}.conf" |
|||
else |
|||
# sudo su root -c "echo 'addons_path=$OE_HOME_EXT/addons,$OE_HOME/custom/addons' >> /etc/${OE_CONFIG}.conf" |
|||
sudo su root -c "echo 'addons_path=$OE_HOME_EXT/addons,$OE_HOME/extra-addons' >> /app/data/${OE_CONFIG}.conf" |
|||
fi |
|||
|
|||
echo -e "* Create startup file" |
|||
sudo su root -c "echo '#!/bin/sh' >> $OE_HOME_EXT/start-odoo.sh" |
|||
sudo su root -c "echo 'sudo -u $OE_USER $OE_HOME_EXT/odoo-bin --config=/app/data/${OE_CONFIG}.conf' >> $OE_HOME_EXT/start-odoo.sh" |
|||
sudo chmod 755 $OE_HOME_EXT/start-odoo.sh |
|||
|
|||
#-------------------------------------------------- |
|||
# Adding ODOO as a deamon (initscript) |
|||
#-------------------------------------------------- |
|||
|
|||
echo -e "* Create init file" |
|||
cat <<EOF > ~/$OE_CONFIG |
|||
#!/bin/sh |
|||
### BEGIN INIT INFO |
|||
# Provides: $OE_CONFIG |
|||
# Required-Start: \$remote_fs \$syslog |
|||
# Required-Stop: \$remote_fs \$syslog |
|||
# Should-Start: \$network |
|||
# Should-Stop: \$network |
|||
# Default-Start: 2 3 4 5 |
|||
# Default-Stop: 0 1 6 |
|||
# Short-Description: Enterprise Business Applications |
|||
# Description: ODOO Business Applications |
|||
### END INIT INFO |
|||
PATH=/bin:/sbin:/usr/bin |
|||
DAEMON=$OE_HOME_EXT/odoo-bin |
|||
NAME=$OE_CONFIG |
|||
DESC=$OE_CONFIG |
|||
|
|||
# Specify the user name (Default: odoo). |
|||
USER=$OE_USER |
|||
|
|||
# Specify an alternate config file (Default: /etc/openerp-server.conf). |
|||
CONFIGFILE="/app/data/${OE_CONFIG}.conf" |
|||
|
|||
# pidfile |
|||
PIDFILE=/var/run/\${NAME}.pid |
|||
|
|||
# Additional options that are passed to the Daemon. |
|||
DAEMON_OPTS="-c \$CONFIGFILE" |
|||
[ -x \$DAEMON ] || exit 0 |
|||
[ -f \$CONFIGFILE ] || exit 0 |
|||
checkpid() { |
|||
[ -f \$PIDFILE ] || return 1 |
|||
pid=\`cat \$PIDFILE\` |
|||
[ -d /proc/\$pid ] && return 0 |
|||
return 1 |
|||
} |
|||
|
|||
case "\${1}" in |
|||
start) |
|||
echo -n "Starting \${DESC}: " |
|||
start-stop-daemon --start --quiet --pidfile \$PIDFILE \ |
|||
--chuid \$USER --background --make-pidfile \ |
|||
--exec \$DAEMON -- \$DAEMON_OPTS |
|||
echo "\${NAME}." |
|||
;; |
|||
stop) |
|||
echo -n "Stopping \${DESC}: " |
|||
start-stop-daemon --stop --quiet --pidfile \$PIDFILE \ |
|||
--oknodo |
|||
echo "\${NAME}." |
|||
;; |
|||
|
|||
restart|force-reload) |
|||
echo -n "Restarting \${DESC}: " |
|||
start-stop-daemon --stop --quiet --pidfile \$PIDFILE \ |
|||
--oknodo |
|||
sleep 1 |
|||
start-stop-daemon --start --quiet --pidfile \$PIDFILE \ |
|||
--chuid \$USER --background --make-pidfile \ |
|||
--exec \$DAEMON -- \$DAEMON_OPTS |
|||
echo "\${NAME}." |
|||
;; |
|||
*) |
|||
N=/etc/init.d/\$NAME |
|||
echo "Usage: \$NAME {start|stop|restart|force-reload}" >&2 |
|||
exit 1 |
|||
;; |
|||
|
|||
esac |
|||
exit 0 |
|||
EOF |
|||
|
|||
echo -e "* Security Init File" |
|||
sudo mv ~/$OE_CONFIG /etc/init.d/$OE_CONFIG |
|||
sudo chmod 755 /etc/init.d/$OE_CONFIG |
|||
sudo chown root: /etc/init.d/$OE_CONFIG |
|||
|
|||
echo -e "* Change default xmlrpc port" |
|||
sudo su root -c "echo 'xmlrpc_port = $OE_PORT' >> /app/data/${OE_CONFIG}.conf" |
|||
|
|||
echo -e "* Start ODOO on Startup" |
|||
sudo update-rc.d $OE_CONFIG defaults |
|||
|
|||
echo -e "* Starting Odoo Service" |
|||
sudo su root -c "/etc/init.d/$OE_CONFIG start" |
|||
echo "-----------------------------------------------------------" |
|||
echo "Done! The Odoo server is up and running. Specifications:" |
|||
echo "Port: $OE_PORT" |
|||
echo "User service: $OE_USER" |
|||
echo "User PostgreSQL: $OE_USER" |
|||
echo "Code location: $OE_USER" |
|||
echo "Addons folder: $OE_HOME/addons/" |
|||
echo "Start Odoo service: sudo service $OE_CONFIG start" |
|||
echo "Stop Odoo service: sudo service $OE_CONFIG stop" |
|||
echo "Restart Odoo service: sudo service $OE_CONFIG restart" |
|||
echo "-----------------------------------------------------------" |
|||
|
|||
# ajout de librairies python pour projet MAIE (module sale_order_import_csv) |
|||
sudo pip install PyPDF2 unicodecsv |
@ -0,0 +1,693 @@ |
|||
# -*- coding: utf-8 -*- |
|||
# Part of Odoo. See LICENSE file for full copyright and licensing details. |
|||
|
|||
|
|||
""" |
|||
The PostgreSQL connector is a connectivity layer between the OpenERP code and |
|||
the database, *not* a database abstraction toolkit. Database abstraction is what |
|||
the ORM does, in fact. |
|||
""" |
|||
|
|||
from contextlib import contextmanager |
|||
from functools import wraps |
|||
import logging |
|||
import time |
|||
import urlparse |
|||
import uuid |
|||
|
|||
import psycopg2 |
|||
import psycopg2.extras |
|||
import psycopg2.extensions |
|||
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT, ISOLATION_LEVEL_READ_COMMITTED, ISOLATION_LEVEL_REPEATABLE_READ |
|||
from psycopg2.pool import PoolError |
|||
|
|||
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE) |
|||
|
|||
_logger = logging.getLogger(__name__) |
|||
|
|||
types_mapping = { |
|||
'date': (1082,), |
|||
'time': (1083,), |
|||
'datetime': (1114,), |
|||
} |
|||
|
|||
def unbuffer(symb, cr): |
|||
if symb is None: |
|||
return None |
|||
return str(symb) |
|||
|
|||
def undecimalize(symb, cr): |
|||
if symb is None: |
|||
return None |
|||
return float(symb) |
|||
|
|||
for name, typeoid in types_mapping.items(): |
|||
psycopg2.extensions.register_type(psycopg2.extensions.new_type(typeoid, name, lambda x, cr: x)) |
|||
psycopg2.extensions.register_type(psycopg2.extensions.new_type((700, 701, 1700,), 'float', undecimalize)) |
|||
|
|||
|
|||
import tools |
|||
from tools.func import frame_codeinfo |
|||
from datetime import timedelta |
|||
import threading |
|||
from inspect import currentframe |
|||
|
|||
import re |
|||
re_from = re.compile('.* from "?([a-zA-Z_0-9]+)"? .*$') |
|||
re_into = re.compile('.* into "?([a-zA-Z_0-9]+)"? .*$') |
|||
|
|||
sql_counter = 0 |
|||
|
|||
class Cursor(object): |
|||
"""Represents an open transaction to the PostgreSQL DB backend, |
|||
acting as a lightweight wrapper around psycopg2's |
|||
``cursor`` objects. |
|||
|
|||
``Cursor`` is the object behind the ``cr`` variable used all |
|||
over the OpenERP code. |
|||
|
|||
.. rubric:: Transaction Isolation |
|||
|
|||
One very important property of database transactions is the |
|||
level of isolation between concurrent transactions. |
|||
The SQL standard defines four levels of transaction isolation, |
|||
ranging from the most strict *Serializable* level, to the least |
|||
strict *Read Uncommitted* level. These levels are defined in |
|||
terms of the phenomena that must not occur between concurrent |
|||
transactions, such as *dirty read*, etc. |
|||
In the context of a generic business data management software |
|||
such as OpenERP, we need the best guarantees that no data |
|||
corruption can ever be cause by simply running multiple |
|||
transactions in parallel. Therefore, the preferred level would |
|||
be the *serializable* level, which ensures that a set of |
|||
transactions is guaranteed to produce the same effect as |
|||
running them one at a time in some order. |
|||
|
|||
However, most database management systems implement a limited |
|||
serializable isolation in the form of |
|||
`snapshot isolation <http://en.wikipedia.org/wiki/Snapshot_isolation>`_, |
|||
providing most of the same advantages as True Serializability, |
|||
with a fraction of the performance cost. |
|||
With PostgreSQL up to version 9.0, this snapshot isolation was |
|||
the implementation of both the ``REPEATABLE READ`` and |
|||
``SERIALIZABLE`` levels of the SQL standard. |
|||
As of PostgreSQL 9.1, the previous snapshot isolation implementation |
|||
was kept for ``REPEATABLE READ``, while a new ``SERIALIZABLE`` |
|||
level was introduced, providing some additional heuristics to |
|||
detect a concurrent update by parallel transactions, and forcing |
|||
one of them to rollback. |
|||
|
|||
OpenERP implements its own level of locking protection |
|||
for transactions that are highly likely to provoke concurrent |
|||
updates, such as stock reservations or document sequences updates. |
|||
Therefore we mostly care about the properties of snapshot isolation, |
|||
but we don't really need additional heuristics to trigger transaction |
|||
rollbacks, as we are taking care of triggering instant rollbacks |
|||
ourselves when it matters (and we can save the additional performance |
|||
hit of these heuristics). |
|||
|
|||
As a result of the above, we have selected ``REPEATABLE READ`` as |
|||
the default transaction isolation level for OpenERP cursors, as |
|||
it will be mapped to the desired ``snapshot isolation`` level for |
|||
all supported PostgreSQL version (8.3 - 9.x). |
|||
|
|||
Note: up to psycopg2 v.2.4.2, psycopg2 itself remapped the repeatable |
|||
read level to serializable before sending it to the database, so it would |
|||
actually select the new serializable mode on PostgreSQL 9.1. Make |
|||
sure you use psycopg2 v2.4.2 or newer if you use PostgreSQL 9.1 and |
|||
the performance hit is a concern for you. |
|||
|
|||
.. attribute:: cache |
|||
|
|||
Cache dictionary with a "request" (-ish) lifecycle, only lives as |
|||
long as the cursor itself does and proactively cleared when the |
|||
cursor is closed. |
|||
|
|||
This cache should *only* be used to store repeatable reads as it |
|||
ignores rollbacks and savepoints, it should not be used to store |
|||
*any* data which may be modified during the life of the cursor. |
|||
|
|||
""" |
|||
IN_MAX = 1000 # decent limit on size of IN queries - guideline = Oracle limit |
|||
|
|||
def check(f): |
|||
@wraps(f) |
|||
def wrapper(self, *args, **kwargs): |
|||
if self._closed: |
|||
msg = 'Unable to use a closed cursor.' |
|||
if self.__closer: |
|||
msg += ' It was closed at %s, line %s' % self.__closer |
|||
raise psycopg2.OperationalError(msg) |
|||
return f(self, *args, **kwargs) |
|||
return wrapper |
|||
|
|||
def __init__(self, pool, dbname, dsn, serialized=True): |
|||
self.sql_from_log = {} |
|||
self.sql_into_log = {} |
|||
|
|||
# default log level determined at cursor creation, could be |
|||
# overridden later for debugging purposes |
|||
self.sql_log = _logger.isEnabledFor(logging.DEBUG) |
|||
|
|||
self.sql_log_count = 0 |
|||
|
|||
# avoid the call of close() (by __del__) if an exception |
|||
# is raised by any of the following initialisations |
|||
self._closed = True |
|||
|
|||
self.__pool = pool |
|||
self.dbname = dbname |
|||
# Whether to enable snapshot isolation level for this cursor. |
|||
# see also the docstring of Cursor. |
|||
self._serialized = serialized |
|||
|
|||
self._cnx = pool.borrow(dsn) |
|||
self._obj = self._cnx.cursor() |
|||
if self.sql_log: |
|||
self.__caller = frame_codeinfo(currentframe(), 2) |
|||
else: |
|||
self.__caller = False |
|||
self._closed = False # real initialisation value |
|||
self.autocommit(False) |
|||
self.__closer = False |
|||
|
|||
self._default_log_exceptions = True |
|||
|
|||
self.cache = {} |
|||
|
|||
# event handlers, see method after() below |
|||
self._event_handlers = {'commit': [], 'rollback': []} |
|||
|
|||
def __build_dict(self, row): |
|||
return {d.name: row[i] for i, d in enumerate(self._obj.description)} |
|||
def dictfetchone(self): |
|||
row = self._obj.fetchone() |
|||
return row and self.__build_dict(row) |
|||
def dictfetchmany(self, size): |
|||
return map(self.__build_dict, self._obj.fetchmany(size)) |
|||
def dictfetchall(self): |
|||
return map(self.__build_dict, self._obj.fetchall()) |
|||
|
|||
def __del__(self): |
|||
if not self._closed and not self._cnx.closed: |
|||
# Oops. 'self' has not been closed explicitly. |
|||
# The cursor will be deleted by the garbage collector, |
|||
# but the database connection is not put back into the connection |
|||
# pool, preventing some operation on the database like dropping it. |
|||
# This can also lead to a server overload. |
|||
msg = "Cursor not closed explicitly\n" |
|||
if self.__caller: |
|||
msg += "Cursor was created at %s:%s" % self.__caller |
|||
else: |
|||
msg += "Please enable sql debugging to trace the caller." |
|||
_logger.warning(msg) |
|||
self._close(True) |
|||
|
|||
@check |
|||
def execute(self, query, params=None, log_exceptions=None): |
|||
if params and not isinstance(params, (tuple, list, dict)): |
|||
# psycopg2's TypeError is not clear if you mess up the params |
|||
raise ValueError("SQL query parameters should be a tuple, list or dict; got %r" % (params,)) |
|||
|
|||
if self.sql_log: |
|||
now = time.time() |
|||
_logger.debug("query: %s", query) |
|||
|
|||
try: |
|||
params = params or None |
|||
res = self._obj.execute(query, params) |
|||
except Exception: |
|||
if self._default_log_exceptions if log_exceptions is None else log_exceptions: |
|||
_logger.info("bad query: %s", self._obj.query or query) |
|||
raise |
|||
|
|||
# simple query count is always computed |
|||
self.sql_log_count += 1 |
|||
|
|||
# advanced stats only if sql_log is enabled |
|||
if self.sql_log: |
|||
delay = (time.time() - now) * 1E6 |
|||
|
|||
res_from = re_from.match(query.lower()) |
|||
if res_from: |
|||
self.sql_from_log.setdefault(res_from.group(1), [0, 0]) |
|||
self.sql_from_log[res_from.group(1)][0] += 1 |
|||
self.sql_from_log[res_from.group(1)][1] += delay |
|||
res_into = re_into.match(query.lower()) |
|||
if res_into: |
|||
self.sql_into_log.setdefault(res_into.group(1), [0, 0]) |
|||
self.sql_into_log[res_into.group(1)][0] += 1 |
|||
self.sql_into_log[res_into.group(1)][1] += delay |
|||
return res |
|||
|
|||
def split_for_in_conditions(self, ids, size=None): |
|||
"""Split a list of identifiers into one or more smaller tuples |
|||
safe for IN conditions, after uniquifying them.""" |
|||
return tools.misc.split_every(size or self.IN_MAX, ids) |
|||
|
|||
def print_log(self): |
|||
global sql_counter |
|||
|
|||
if not self.sql_log: |
|||
return |
|||
def process(type): |
|||
sqllogs = {'from': self.sql_from_log, 'into': self.sql_into_log} |
|||
sum = 0 |
|||
if sqllogs[type]: |
|||
sqllogitems = sqllogs[type].items() |
|||
sqllogitems.sort(key=lambda k: k[1][1]) |
|||
_logger.debug("SQL LOG %s:", type) |
|||
sqllogitems.sort(lambda x, y: cmp(x[1][0], y[1][0])) |
|||
for r in sqllogitems: |
|||
delay = timedelta(microseconds=r[1][1]) |
|||
_logger.debug("table: %s: %s/%s", r[0], delay, r[1][0]) |
|||
sum += r[1][1] |
|||
sqllogs[type].clear() |
|||
sum = timedelta(microseconds=sum) |
|||
_logger.debug("SUM %s:%s/%d [%d]", type, sum, self.sql_log_count, sql_counter) |
|||
sqllogs[type].clear() |
|||
process('from') |
|||
process('into') |
|||
self.sql_log_count = 0 |
|||
self.sql_log = False |
|||
|
|||
@check |
|||
def close(self): |
|||
return self._close(False) |
|||
|
|||
def _close(self, leak=False): |
|||
global sql_counter |
|||
|
|||
if not self._obj: |
|||
return |
|||
|
|||
del self.cache |
|||
|
|||
if self.sql_log: |
|||
self.__closer = frame_codeinfo(currentframe(), 3) |
|||
|
|||
# simple query count is always computed |
|||
sql_counter += self.sql_log_count |
|||
|
|||
# advanced stats only if sql_log is enabled |
|||
self.print_log() |
|||
|
|||
self._obj.close() |
|||
|
|||
# This force the cursor to be freed, and thus, available again. It is |
|||
# important because otherwise we can overload the server very easily |
|||
# because of a cursor shortage (because cursors are not garbage |
|||
# collected as fast as they should). The problem is probably due in |
|||
# part because browse records keep a reference to the cursor. |
|||
del self._obj |
|||
self._closed = True |
|||
|
|||
# Clean the underlying connection. |
|||
self._cnx.rollback() |
|||
|
|||
if leak: |
|||
self._cnx.leaked = True |
|||
else: |
|||
chosen_template = tools.config['db_template'] |
|||
templates_list = tuple(set(['template0', 'template1', 'postgres', chosen_template])) |
|||
keep_in_pool = self.dbname not in templates_list |
|||
self.__pool.give_back(self._cnx, keep_in_pool=keep_in_pool) |
|||
|
|||
@check |
|||
def autocommit(self, on): |
|||
if on: |
|||
isolation_level = ISOLATION_LEVEL_AUTOCOMMIT |
|||
else: |
|||
# If a serializable cursor was requested, we |
|||
# use the appropriate PotsgreSQL isolation level |
|||
# that maps to snaphsot isolation. |
|||
# For all supported PostgreSQL versions (8.3-9.x), |
|||
# this is currently the ISOLATION_REPEATABLE_READ. |
|||
# See also the docstring of this class. |
|||
# NOTE: up to psycopg 2.4.2, repeatable read |
|||
# is remapped to serializable before being |
|||
# sent to the database, so it is in fact |
|||
# unavailable for use with pg 9.1. |
|||
isolation_level = \ |
|||
ISOLATION_LEVEL_REPEATABLE_READ \ |
|||
if self._serialized \ |
|||
else ISOLATION_LEVEL_READ_COMMITTED |
|||
self._cnx.set_isolation_level(isolation_level) |
|||
|
|||
@check |
|||
def after(self, event, func): |
|||
""" Register an event handler. |
|||
|
|||
:param event: the event, either `'commit'` or `'rollback'` |
|||
:param func: a callable object, called with no argument after the |
|||
event occurs |
|||
|
|||
Be careful when coding an event handler, since any operation on the |
|||
cursor that was just committed/rolled back will take place in the |
|||
next transaction that has already begun, and may still be rolled |
|||
back or committed independently. You may consider the use of a |
|||
dedicated temporary cursor to do some database operation. |
|||
""" |
|||
self._event_handlers[event].append(func) |
|||
|
|||
def _pop_event_handlers(self): |
|||
# return the current handlers, and reset them on self |
|||
result = self._event_handlers |
|||
self._event_handlers = {'commit': [], 'rollback': []} |
|||
return result |
|||
|
|||
@check |
|||
def commit(self): |
|||
""" Perform an SQL `COMMIT` |
|||
""" |
|||
result = self._cnx.commit() |
|||
for func in self._pop_event_handlers()['commit']: |
|||
func() |
|||
return result |
|||
|
|||
@check |
|||
def rollback(self): |
|||
""" Perform an SQL `ROLLBACK` |
|||
""" |
|||
result = self._cnx.rollback() |
|||
for func in self._pop_event_handlers()['rollback']: |
|||
func() |
|||
return result |
|||
|
|||
def __enter__(self): |
|||
""" Using the cursor as a contextmanager automatically commits and |
|||
closes it:: |
|||
|
|||
with cr: |
|||
cr.execute(...) |
|||
|
|||
# cr is committed if no failure occurred |
|||
# cr is closed in any case |
|||
""" |
|||
return self |
|||
|
|||
def __exit__(self, exc_type, exc_value, traceback): |
|||
if exc_type is None: |
|||
self.commit() |
|||
self.close() |
|||
|
|||
@contextmanager |
|||
@check |
|||
def savepoint(self): |
|||
"""context manager entering in a new savepoint""" |
|||
name = uuid.uuid1().hex |
|||
self.execute('SAVEPOINT "%s"' % name) |
|||
try: |
|||
yield |
|||
except Exception: |
|||
self.execute('ROLLBACK TO SAVEPOINT "%s"' % name) |
|||
raise |
|||
else: |
|||
self.execute('RELEASE SAVEPOINT "%s"' % name) |
|||
|
|||
@check |
|||
def __getattr__(self, name): |
|||
return getattr(self._obj, name) |
|||
|
|||
@property |
|||
def closed(self): |
|||
return self._closed |
|||
|
|||
class TestCursor(Cursor): |
|||
""" A cursor to be used for tests. It keeps the transaction open across |
|||
several requests, and simulates committing, rolling back, and closing. |
|||
""" |
|||
def __init__(self, *args, **kwargs): |
|||
super(TestCursor, self).__init__(*args, **kwargs) |
|||
# in order to simulate commit and rollback, the cursor maintains a |
|||
# savepoint at its last commit |
|||
self.execute("SAVEPOINT test_cursor") |
|||
# we use a lock to serialize concurrent requests |
|||
self._lock = threading.RLock() |
|||
|
|||
def acquire(self): |
|||
self._lock.acquire() |
|||
|
|||
def release(self): |
|||
self._lock.release() |
|||
|
|||
def force_close(self): |
|||
super(TestCursor, self).close() |
|||
|
|||
def close(self): |
|||
if not self._closed: |
|||
self.rollback() # for stuff that has not been committed |
|||
self.release() |
|||
|
|||
def autocommit(self, on): |
|||
_logger.debug("TestCursor.autocommit(%r) does nothing", on) |
|||
|
|||
def commit(self): |
|||
self.execute("RELEASE SAVEPOINT test_cursor") |
|||
self.execute("SAVEPOINT test_cursor") |
|||
|
|||
def rollback(self): |
|||
self.execute("ROLLBACK TO SAVEPOINT test_cursor") |
|||
self.execute("SAVEPOINT test_cursor") |
|||
|
|||
class LazyCursor(object): |
|||
""" A proxy object to a cursor. The cursor itself is allocated only if it is |
|||
needed. This class is useful for cached methods, that use the cursor |
|||
only in the case of a cache miss. |
|||
""" |
|||
def __init__(self, dbname=None): |
|||
self._dbname = dbname |
|||
self._cursor = None |
|||
self._depth = 0 |
|||
|
|||
@property |
|||
def dbname(self): |
|||
return self._dbname or threading.currentThread().dbname |
|||
|
|||
def __getattr__(self, name): |
|||
cr = self._cursor |
|||
if cr is None: |
|||
from odoo import registry |
|||
cr = self._cursor = registry(self.dbname).cursor() |
|||
for _ in xrange(self._depth): |
|||
cr.__enter__() |
|||
return getattr(cr, name) |
|||
|
|||
def __enter__(self): |
|||
self._depth += 1 |
|||
if self._cursor is not None: |
|||
self._cursor.__enter__() |
|||
return self |
|||
|
|||
def __exit__(self, exc_type, exc_value, traceback): |
|||
self._depth -= 1 |
|||
if self._cursor is not None: |
|||
self._cursor.__exit__(exc_type, exc_value, traceback) |
|||
|
|||
class PsycoConnection(psycopg2.extensions.connection): |
|||
pass |
|||
|
|||
class ConnectionPool(object): |
|||
""" The pool of connections to database(s) |
|||
|
|||
Keep a set of connections to pg databases open, and reuse them |
|||
to open cursors for all transactions. |
|||
|
|||
The connections are *not* automatically closed. Only a close_db() |
|||
can trigger that. |
|||
""" |
|||
|
|||
def locked(fun): |
|||
@wraps(fun) |
|||
def _locked(self, *args, **kwargs): |
|||
self._lock.acquire() |
|||
try: |
|||
return fun(self, *args, **kwargs) |
|||
finally: |
|||
self._lock.release() |
|||
return _locked |
|||
|
|||
def __init__(self, maxconn=64): |
|||
self._connections = [] |
|||
self._maxconn = max(maxconn, 1) |
|||
self._lock = threading.Lock() |
|||
|
|||
def __repr__(self): |
|||
used = len([1 for c, u in self._connections[:] if u]) |
|||
count = len(self._connections) |
|||
return "ConnectionPool(used=%d/count=%d/max=%d)" % (used, count, self._maxconn) |
|||
|
|||
def _debug(self, msg, *args): |
|||
_logger.debug(('%r ' + msg), self, *args) |
|||
|
|||
@locked |
|||
def borrow(self, connection_info): |
|||
""" |
|||
:param dict connection_info: dict of psql connection keywords |
|||
:rtype: PsycoConnection |
|||
""" |
|||
# free dead and leaked connections |
|||
for i, (cnx, _) in tools.reverse_enumerate(self._connections): |
|||
if cnx.closed: |
|||
self._connections.pop(i) |
|||
self._debug('Removing closed connection at index %d: %r', i, cnx.dsn) |
|||
continue |
|||
if getattr(cnx, 'leaked', False): |
|||
delattr(cnx, 'leaked') |
|||
self._connections.pop(i) |
|||
self._connections.append((cnx, False)) |
|||
_logger.info('%r: Free leaked connection to %r', self, cnx.dsn) |
|||
|
|||
for i, (cnx, used) in enumerate(self._connections): |
|||
if not used and cnx._original_dsn == connection_info: |
|||
try: |
|||
cnx.reset() |
|||
except psycopg2.OperationalError: |
|||
self._debug('Cannot reset connection at index %d: %r', i, cnx.dsn) |
|||
# psycopg2 2.4.4 and earlier do not allow closing a closed connection |
|||
if not cnx.closed: |
|||
cnx.close() |
|||
continue |
|||
self._connections.pop(i) |
|||
self._connections.append((cnx, True)) |
|||
self._debug('Borrow existing connection to %r at index %d', cnx.dsn, i) |
|||
|
|||
return cnx |
|||
|
|||
if len(self._connections) >= self._maxconn: |
|||
# try to remove the oldest connection not used |
|||
for i, (cnx, used) in enumerate(self._connections): |
|||
if not used: |
|||
self._connections.pop(i) |
|||
if not cnx.closed: |
|||
cnx.close() |
|||
self._debug('Removing old connection at index %d: %r', i, cnx.dsn) |
|||
break |
|||
else: |
|||
# note: this code is called only if the for loop has completed (no break) |
|||
raise PoolError('The Connection Pool Is Full') |
|||
|
|||
try: |
|||
result = psycopg2.connect( |
|||
connection_factory=PsycoConnection, |
|||
**connection_info) |
|||
except psycopg2.Error: |
|||
_logger.info('Connection to the database failed') |
|||
raise |
|||
result._original_dsn = connection_info |
|||
self._connections.append((result, True)) |
|||
self._debug('Create new connection') |
|||
return result |
|||
|
|||
@locked |
|||
def give_back(self, connection, keep_in_pool=True): |
|||
self._debug('Give back connection to %r', connection.dsn) |
|||
for i, (cnx, used) in enumerate(self._connections): |
|||
if cnx is connection: |
|||
self._connections.pop(i) |
|||
if keep_in_pool: |
|||
self._connections.append((cnx, False)) |
|||
self._debug('Put connection to %r in pool', cnx.dsn) |
|||
else: |
|||
self._debug('Forgot connection to %r', cnx.dsn) |
|||
cnx.close() |
|||
break |
|||
else: |
|||
raise PoolError('This connection does not below to the pool') |
|||
|
|||
@locked |
|||
def close_all(self, dsn=None): |
|||
count = 0 |
|||
last = None |
|||
for i, (cnx, used) in tools.reverse_enumerate(self._connections): |
|||
if dsn is None or cnx._original_dsn == dsn: |
|||
cnx.close() |
|||
last = self._connections.pop(i)[0] |
|||
count += 1 |
|||
_logger.info('%r: Closed %d connections %s', self, count, |
|||
(dsn and last and 'to %r' % last.dsn) or '') |
|||
|
|||
|
|||
class Connection(object): |
|||
""" A lightweight instance of a connection to postgres |
|||
""" |
|||
def __init__(self, pool, dbname, dsn): |
|||
self.dbname = dbname |
|||
self.dsn = dsn |
|||
self.__pool = pool |
|||
|
|||
def cursor(self, serialized=True): |
|||
cursor_type = serialized and 'serialized ' or '' |
|||
_logger.debug('create %scursor to %r', cursor_type, self.dsn) |
|||
return Cursor(self.__pool, self.dbname, self.dsn, serialized=serialized) |
|||
|
|||
def test_cursor(self, serialized=True): |
|||
cursor_type = serialized and 'serialized ' or '' |
|||
_logger.debug('create test %scursor to %r', cursor_type, self.dsn) |
|||
return TestCursor(self.__pool, self.dbname, self.dsn, serialized=serialized) |
|||
|
|||
# serialized_cursor is deprecated - cursors are serialized by default |
|||
serialized_cursor = cursor |
|||
|
|||
def __nonzero__(self): |
|||
"""Check if connection is possible""" |
|||
try: |
|||
_logger.info("__nonzero__() is deprecated. (It is too expensive to test a connection.)") |
|||
cr = self.cursor() |
|||
cr.close() |
|||
return True |
|||
except Exception: |
|||
return False |
|||
|
|||
def connection_info_for(db_or_uri): |
|||
""" parse the given `db_or_uri` and return a 2-tuple (dbname, connection_params) |
|||
|
|||
Connection params are either a dictionary with a single key ``dsn`` |
|||
containing a connection URI, or a dictionary containing connection |
|||
parameter keywords which psycopg2 can build a key/value connection string |
|||
(dsn) from |
|||
|
|||
:param str db_or_uri: database name or postgres dsn |
|||
:rtype: (str, dict) |
|||
""" |
|||
if db_or_uri.startswith(('postgresql://', 'postgres://')): |
|||
# extract db from uri |
|||
us = urlparse.urlsplit(db_or_uri) |
|||
if len(us.path) > 1: |
|||
db_name = us.path[1:] |
|||
elif us.username: |
|||
db_name = us.username |
|||
else: |
|||
db_name = us.hostname |
|||
return db_name, {'dsn': db_or_uri} |
|||
|
|||
connection_info = {'database': db_or_uri} |
|||
for p in ('host', 'port', 'user', 'password', 'dbname'): |
|||
cfg = tools.config['db_' + p] |
|||
if cfg: |
|||
connection_info[p] = cfg |
|||
|
|||
return db_or_uri, connection_info |
|||
|
|||
_Pool = None |
|||
|
|||
def db_connect(to, allow_uri=False): |
|||
global _Pool |
|||
if _Pool is None: |
|||
_Pool = ConnectionPool(int(tools.config['db_maxconn'])) |
|||
|
|||
db, info = connection_info_for(to) |
|||
if not allow_uri and db != to: |
|||
raise ValueError('URI connections not allowed') |
|||
return Connection(_Pool, db, info) |
|||
|
|||
def close_db(db_name): |
|||
""" You might want to call odoo.modules.registry.Registry.delete(db_name) along this function.""" |
|||
global _Pool |
|||
if _Pool: |
|||
_Pool.close_all(connection_info_for(db_name)[1]) |
|||
|
|||
def close_all(): |
|||
global _Pool |
|||
if _Pool: |
|||
_Pool.close_all() |
@ -0,0 +1,36 @@ |
|||
#!/bin/bash |
|||
|
|||
set -eu |
|||
|
|||
echo "=> Ensure directories" |
|||
mkdir -p /app/data/addons /app/data/data |
|||
|
|||
#if [[ ! -f "/app/data/odoo.conf" ]]; then |
|||
# echo "=> First run, create config file" |
|||
# cp /etc/odoo-server.conf /app/data/odoo.conf |
|||
#fi |
|||
|
|||
echo "=> Patch config file" |
|||
# https://github.com/odoo/docker/blob/master/10.0/odoo.conf |
|||
crudini --set /app/data/odoo.conf options addons_path /app/data/addons,/app/code/odoo-server/addons,/app/code/extra-addons |
|||
crudini --set /app/data/odoo.conf options data_dir /app/data/data |
|||
crudini --set /app/data/odoo.conf options db_host ${CLOUDRON_POSTGRESQL_HOST} |
|||
crudini --set /app/data/odoo.conf options db_port ${CLOUDRON_POSTGRESQL_PORT} |
|||
crudini --set /app/data/odoo.conf options db_user ${CLOUDRON_POSTGRESQL_USERNAME} |
|||
crudini --set /app/data/odoo.conf options db_password ${CLOUDRON_POSTGRESQL_PASSWORD} |
|||
crudini --set /app/data/odoo.conf options db_dbname ${CLOUDRON_POSTGRESQL_DATABASE} |
|||
crudini --set /app/data/odoo.conf options smtp_password ${CLOUDRON_MAIL_SMTP_PASSWORD} |
|||
crudini --set /app/data/odoo.conf options smtp_port ${CLOUDRON_MAIL_SMTP_PORT} |
|||
crudini --set /app/data/odoo.conf options smtp_server ${CLOUDRON_MAIL_SMTP_SERVER} |
|||
crudini --set /app/data/odoo.conf options smtp_user ${CLOUDRON_MAIL_SMTP_USERNAME} |
|||
crudini --set /app/data/odoo.conf options smtp_ssl False |
|||
crudini --set /app/data/odoo.conf options email_from ${CLOUDRON_MAIL_FROM} |
|||
crudini --set /app/data/odoo.conf options list_db False |
|||
crudini --set /app/data/odoo.conf options without_demo WITHOUT_DEMO |
|||
|
|||
echo "=> Ensure data ownership" |
|||
chown -R odoo:odoo /app/data/ |
|||
|
|||
echo "=> Starting odoo" |
|||
exec /usr/local/bin/gosu odoo:odoo /app/code/odoo-server/odoo-bin --config=/app/data/odoo.conf |
|||
|
Write
Preview
Loading…
Cancel
Save
Reference in new issue