Browse Source

publish muk_attachment_lobject - 11.0

pull/25/head
MuK IT GmbH 6 years ago
parent
commit
cfe1b59aa9
  1. 68
      muk_attachment_lobject/models/ir_attachment.py

68
muk_attachment_lobject/models/ir_attachment.py

@ -23,6 +23,7 @@ import base64
import logging
import mimetypes
import odoo
from odoo import api, models, _
from odoo.exceptions import AccessError
@ -37,15 +38,71 @@ class LObjectIrAttachment(models.Model):
store_lobject = LargeObject(
string="Data")
def _force_storage_prepare_chunks(self):
""" Technical method to select attachments that need to be migrated
This method automaticaly splits attachment by chunks,
to speed up migration.
:return list: list of chunks where each chunk is list of attachment ids
[[1,2,3],[40, 42, 12,33], ...]
"""
CHUNK_SIZE = 100
attachments = self.search(['|', ['res_field', '=', False], ['res_field', '!=', False]])
storage = self._storage()
chunks = []
current_chunk = []
for attach in attachments:
# Detect storage_type of attachment
if attach.db_datas:
current = 'db'
elif attach.store_lobject:
current = 'lobject'
elif attach.store_fname:
current = 'file'
else:
current = None
if storage != current:
# This attachment needs migration, thus adding it to result
current_chunk += [attach.id]
if len(current_chunk) >= CHUNK_SIZE:
chunks += [current_chunk]
current_chunk = []
if current_chunk:
chunks += [current_chunk]
return chunks
@api.model
def force_storage(self):
if not self.env.user._is_admin():
raise AccessError(_('Only administrators can execute this action.'))
attachments = self.search(['|', ['res_field', '=', False], ['res_field', '!=', False]])
# Do migration by chunks to make it faster.
chunks_to_migrate = self._force_storage_prepare_chunks()
for chunk_index, chunk in enumerate(self._force_storage_prepare_chunks()):
# Here we need to precess each chunk in new transaction.
# When all attachments in chunk processed, then commit.
# In case of any errors - rollback
with api.Environment.manage():
with odoo.registry(self.env.cr.dbname).cursor() as new_cr:
new_env = api.Environment(new_cr, self.env.uid,
self.env.context.copy())
attachments = new_env['ir.attachment'].browse(chunk)
try:
for index, attach in enumerate(attachments):
_logger.info(_("Migrate Attachment %s of %s") % (index, len(attachments)))
_logger.info(
"Migrate Attachment %s of %s [chunk %s of %s]",
index, len(attachments),
chunk_index, len(chunks_to_migrate))
attach.write({'datas': attach.datas})
return True
except Exception:
_logger.error(
"Cannot migrate attachments.", exc_info=True)
new_cr.rollback()
raise
else:
new_cr.commit()
@api.depends('store_fname', 'db_datas', 'store_lobject')
def _compute_datas(self):
@ -79,6 +136,11 @@ class LObjectIrAttachment(models.Model):
self._file_delete(fname)
else:
super(LObjectIrAttachment, attach)._inverse_datas()
# It is required to set 'store_lobject' to false, because it is
# used in muk_dms_attachment to detect storage type of
# attachment, thus it is impossible to detect attachments that
# need migration 'LObject -> File' or 'LObject -> smthng'
attach.write({'store_lobject': False})
def _compute_mimetype(self, values):
mimetype = super(LObjectIrAttachment, self)._compute_mimetype(values)

Loading…
Cancel
Save