You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

244 lines
8.0 KiB

  1. # -*- coding: utf-8 -*-
  2. ##############################################################################
  3. #
  4. # Author: Alexandre Fayolle
  5. # Copyright 2014 Camptocamp SA
  6. #
  7. # This program is free software: you can redistribute it and/or modify
  8. # it under the terms of the GNU Affero General Public License as
  9. # published by the Free Software Foundation, either version 3 of the
  10. # License, or (at your option) any later version.
  11. #
  12. # This program is distributed in the hope that it will be useful,
  13. # but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. # GNU Affero General Public License for more details.
  16. #
  17. # You should have received a copy of the GNU Affero General Public License
  18. # along with this program. If not, see <http://www.gnu.org/licenses/>.
  19. #
  20. ##############################################################################
  21. """
  22. Monitor openerp instance.
  23. The measures are stored in database.
  24. cleanup cron (2 different for db and process monitoring)
  25. * database monitoring:
  26. cron for capturing data
  27. add timestamp
  28. * process monitoring
  29. TODO: log process start / end
  30. cron log
  31. RPC request log
  32. """
  33. import logging
  34. import gc
  35. from operator import itemgetter
  36. import types
  37. import os
  38. import threading
  39. import datetime
  40. # ugly hack to avoid a WARNING message when importing stdlib resource module
  41. _logger = logging.getLogger('openerp.modules.module')
  42. _saved_log_level = _logger.getEffectiveLevel()
  43. _logger.setLevel(logging.ERROR)
  44. import resource
  45. _logger.setLevel(_saved_log_level)
  46. import psutil
  47. from openerp.osv import orm, fields, osv
  48. from openerp import pooler
  49. from openerp import SUPERUSER_ID
  50. from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
  51. _logger = logging.getLogger(__name__)
  52. BLACKLIST = (
  53. type, tuple, dict, list, set, frozenset,
  54. property,
  55. classmethod,
  56. staticmethod,
  57. types.FunctionType,
  58. types.ClassType,
  59. types.ModuleType, types.FunctionType, types.MethodType,
  60. types.MemberDescriptorType, types.GetSetDescriptorType,
  61. )
  62. class ClassInstanceCount(orm.Model):
  63. _name = 'server.monitor.class.instance.count'
  64. _columns = {
  65. 'name': fields.text('Class name', readonly=True),
  66. 'count': fields.bigint('Instance count', readonly=True),
  67. 'measure_id': fields.many2one('server.monitor.process',
  68. 'Measure',
  69. readonly=True,
  70. ondelete='cascade'),
  71. }
  72. def _monkey_patch_object_proxy_execute():
  73. orig_execute_cr = osv.object_proxy.execute_cr
  74. def execute_cr(self, cr, uid, obj, method, *args, **kw):
  75. result = orig_execute_cr(self, cr, uid, obj, method, *args, **kw)
  76. monitor_obj = pooler.get_pool(cr.dbname)['server.monitor.process']
  77. context = {}
  78. monitor_obj.log_measure(cr, uid, obj, method, 'rpc call',
  79. False, False, context)
  80. return result
  81. osv.object_proxy.execute_cr = execute_cr
  82. class ServerMonitorProcess(orm.Model):
  83. def __init__(self, pool, cr):
  84. super(ServerMonitorProcess, self).__init__(pool, cr)
  85. _monkey_patch_object_proxy_execute()
  86. _name = 'server.monitor.process'
  87. _columns = {
  88. 'name': fields.datetime('Timestamp', readonly=True),
  89. 'pid': fields.integer('Process ID', readonly=True,
  90. group_operator='count'),
  91. 'thread': fields.text('Thread ID', readonly=True),
  92. 'cpu_time': fields.float(
  93. 'CPU time', readonly=True,
  94. group_operator='max',
  95. help='CPU time consumed by the current server process'),
  96. 'memory': fields.float(
  97. 'Memory', readonly=True,
  98. group_operator='max',
  99. help='Memory consumed by the current server process'),
  100. 'uid': fields.many2one('res.users', 'User',
  101. readonly=True,
  102. select=True),
  103. 'model': fields.many2one('ir.model', 'Model',
  104. readonly=True,
  105. select=True),
  106. 'method': fields.text('Method', readonly=True),
  107. 'status': fields.text('RPC status', readonly=True),
  108. 'sessionid': fields.text('Session ID', readonly=True),
  109. 'info': fields.text('Information'),
  110. 'class_count_ids': fields.one2many(
  111. 'server.monitor.class.instance.count',
  112. 'measure_id',
  113. 'Class counts',
  114. readonly=True),
  115. }
  116. _order = 'name DESC'
  117. def _default_pid(self, cr, uid, context):
  118. return os.getpid()
  119. def _default_cpu_time(self, cr, uid, context):
  120. r = resource.getrusage(resource.RUSAGE_SELF)
  121. cpu_time = r.ru_utime + r.ru_stime
  122. return cpu_time
  123. def _default_memory(self, cr, uid, context):
  124. try:
  125. rss, vms = psutil.Process(os.getpid()).get_memory_info()
  126. except AttributeError:
  127. # happens on travis
  128. vms = 0
  129. return vms
  130. def _default_uid(self, cr, uid, context):
  131. return uid
  132. def _default_thread(self, cr, uid, context):
  133. return threading.current_thread().name
  134. def _class_count(self, cr, uid, context):
  135. counts = {}
  136. if context.get('_x_no_class_count'):
  137. return []
  138. if context.get('_x_no_gc_collect'):
  139. gc.collect()
  140. gc.collect()
  141. for obj in gc.get_objects():
  142. if isinstance(obj, BLACKLIST):
  143. continue
  144. try:
  145. cls = obj.__class__
  146. except:
  147. if isinstance(obj, types.ClassType):
  148. cls = types.ClassType
  149. else:
  150. _logger.warning('unknown object type for %r (%s)',
  151. obj, type(obj))
  152. continue
  153. name = '%s.%s' % (cls.__module__, cls.__name__)
  154. try:
  155. counts[name] += 1
  156. except KeyError:
  157. counts[name] = 1
  158. info = []
  159. for name, count in sorted(counts.items(),
  160. key=itemgetter(1),
  161. reverse=True):
  162. if count < 2:
  163. break
  164. info.append({'name': name, 'count': count})
  165. return [(0, 0, val) for val in info]
  166. _defaults = {
  167. 'name': fields.datetime.now,
  168. 'class_count_ids': _class_count,
  169. 'pid': _default_pid,
  170. 'cpu_time': _default_cpu_time,
  171. 'memory': _default_memory,
  172. 'uid': _default_uid,
  173. 'thread': _default_thread,
  174. }
  175. def log_measure(self, cr, uid,
  176. model_name, method_name, info,
  177. with_class_count=True,
  178. gc_collect=True,
  179. context=None):
  180. if context is None:
  181. context = {}
  182. ctx = context.copy()
  183. ctx.update({
  184. '_x_no_class_count': not with_class_count,
  185. '_x_no_gc_collect': not gc_collect,
  186. })
  187. fields = self._defaults.keys()
  188. defaults = self.default_get(cr, uid, fields, context=ctx)
  189. model_obj = self.pool['ir.model']
  190. model_id = model_obj.search(cr, uid,
  191. [('name', '=', model_name)],
  192. context=context)
  193. if model_id:
  194. model_id = model_id[0]
  195. else:
  196. model_id = 0
  197. values = {'model': model_id,
  198. 'method': method_name,
  199. 'info': info,
  200. }
  201. defaults.update(values)
  202. id = self.create(cr, SUPERUSER_ID, defaults, context=context)
  203. return id
  204. def cleanup(self, cr, uid, age, context=None):
  205. now = datetime.datetime.now()
  206. delta = datetime.timedelta(days=age)
  207. when = (now - delta).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
  208. ids = self.search(cr, uid,
  209. [('name', '<', when)],
  210. context=context)
  211. _logger.debug('Process monitor cleanup: removing %d records', len(ids))
  212. self.unlink(cr, uid, ids, context=context)