You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

153 lines
6.3 KiB

7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
  1. ###################################################################################
  2. #
  3. # MuK Document Management System
  4. #
  5. # Copyright (C) 2018 MuK IT GmbH
  6. #
  7. # This program is free software: you can redistribute it and/or modify
  8. # it under the terms of the GNU Affero General Public License as
  9. # published by the Free Software Foundation, either version 3 of the
  10. # License, or (at your option) any later version.
  11. #
  12. # This program is distributed in the hope that it will be useful,
  13. # but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. # GNU Affero General Public License for more details.
  16. #
  17. # You should have received a copy of the GNU Affero General Public License
  18. # along with this program. If not, see <http://www.gnu.org/licenses/>.
  19. #
  20. ###################################################################################
  21. import base64
  22. import logging
  23. import mimetypes
  24. import odoo
  25. from odoo import api, models, _
  26. from odoo.exceptions import AccessError
  27. from odoo.addons.muk_fields_lobject.fields.lobject import LargeObject
  28. _logger = logging.getLogger(__name__)
  29. class LObjectIrAttachment(models.Model):
  30. _inherit = 'ir.attachment'
  31. store_lobject = LargeObject(
  32. string="Data")
  33. def _force_storage_prepare_chunks(self):
  34. """ Technical method to select attachments that need to be migrated
  35. This method automaticaly splits attachment by chunks,
  36. to speed up migration.
  37. :return list: list of chunks where each chunk is list of attachment ids
  38. [[1,2,3],[40, 42, 12,33], ...]
  39. """
  40. CHUNK_SIZE = 100
  41. attachments = self.search(['|', ['res_field', '=', False], ['res_field', '!=', False]])
  42. storage = self._storage()
  43. chunks = []
  44. current_chunk = []
  45. for attach in attachments:
  46. # Detect storage_type of attachment
  47. if attach.db_datas:
  48. current = 'db'
  49. elif attach.store_lobject:
  50. current = 'lobject'
  51. elif attach.store_fname:
  52. current = 'file'
  53. else:
  54. current = None
  55. if storage != current:
  56. # This attachment needs migration, thus adding it to result
  57. current_chunk += [attach.id]
  58. if len(current_chunk) >= CHUNK_SIZE:
  59. chunks += [current_chunk]
  60. current_chunk = []
  61. if current_chunk:
  62. chunks += [current_chunk]
  63. return chunks
  64. @api.model
  65. def force_storage(self):
  66. if not self.env.user._is_admin():
  67. raise AccessError(_('Only administrators can execute this action.'))
  68. # Do migration by chunks to make it faster.
  69. chunks_to_migrate = self._force_storage_prepare_chunks()
  70. for chunk_index, chunk in enumerate(self._force_storage_prepare_chunks()):
  71. # Here we need to precess each chunk in new transaction.
  72. # When all attachments in chunk processed, then commit.
  73. # In case of any errors - rollback
  74. with api.Environment.manage():
  75. with odoo.registry(self.env.cr.dbname).cursor() as new_cr:
  76. new_env = api.Environment(new_cr, self.env.uid,
  77. self.env.context.copy())
  78. attachments = new_env['ir.attachment'].browse(chunk)
  79. try:
  80. for index, attach in enumerate(attachments):
  81. _logger.info(
  82. "Migrate Attachment %s of %s [chunk %s of %s]",
  83. index, len(attachments),
  84. chunk_index, len(chunks_to_migrate))
  85. attach.write({'datas': attach.datas})
  86. except Exception:
  87. _logger.error(
  88. "Cannot migrate attachments.", exc_info=True)
  89. new_cr.rollback()
  90. raise
  91. else:
  92. new_cr.commit()
  93. @api.depends('store_fname', 'db_datas', 'store_lobject')
  94. def _compute_datas(self):
  95. bin_size = self._context.get('bin_size')
  96. for attach in self:
  97. if attach.store_lobject:
  98. if bin_size:
  99. attach.datas = attach.store_lobject
  100. else:
  101. attach.datas = attach.with_context({'base64': True}).store_lobject
  102. else:
  103. super(LObjectIrAttachment, attach)._compute_datas()
  104. def _inverse_datas(self):
  105. location = self._storage()
  106. for attach in self:
  107. if location == 'lobject':
  108. value = attach.datas
  109. bin_data = base64.b64decode(value) if value else b''
  110. vals = {
  111. 'file_size': len(bin_data),
  112. 'checksum': self._compute_checksum(bin_data),
  113. 'index_content': self._index(bin_data, attach.datas_fname, attach.mimetype),
  114. 'store_fname': False,
  115. 'db_datas': False,
  116. 'store_lobject': bin_data,
  117. }
  118. fname = attach.store_fname
  119. super(LObjectIrAttachment, attach.sudo()).write(vals)
  120. if fname:
  121. self._file_delete(fname)
  122. else:
  123. super(LObjectIrAttachment, attach)._inverse_datas()
  124. # It is required to set 'store_lobject' to false, because it is
  125. # used in muk_dms_attachment to detect storage type of
  126. # attachment, thus it is impossible to detect attachments that
  127. # need migration 'LObject -> File' or 'LObject -> smthng'
  128. attach.write({'store_lobject': False})
  129. def _compute_mimetype(self, values):
  130. mimetype = super(LObjectIrAttachment, self)._compute_mimetype(values)
  131. if not mimetype or mimetype == 'application/octet-stream':
  132. mimetype = None
  133. for attach in self:
  134. if attach.mimetype:
  135. mimetype = attach.mimetype
  136. if not mimetype and attach.datas_fname:
  137. mimetype = mimetypes.guess_type(attach.datas_fname)[0]
  138. return mimetype or 'application/octet-stream'