You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

237 lines
7.8 KiB

  1. # -*- coding: utf-8 -*-
  2. ##############################################################################
  3. #
  4. # Author: Alexandre Fayolle
  5. # Copyright 2014 Camptocamp SA
  6. #
  7. # This program is free software: you can redistribute it and/or modify
  8. # it under the terms of the GNU Affero General Public License as
  9. # published by the Free Software Foundation, either version 3 of the
  10. # License, or (at your option) any later version.
  11. #
  12. # This program is distributed in the hope that it will be useful,
  13. # but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. # GNU Affero General Public License for more details.
  16. #
  17. # You should have received a copy of the GNU Affero General Public License
  18. # along with this program. If not, see <http://www.gnu.org/licenses/>.
  19. #
  20. ##############################################################################
  21. """
  22. Monitor openerp instance.
  23. The measures are stored in database.
  24. cleanup cron (2 different for db and process monitoring)
  25. * database monitoring:
  26. cron for capturing data
  27. add timestamp
  28. * process monitoring
  29. TODO: log process start / end
  30. cron log
  31. RPC request log
  32. """
  33. from __future__ import absolute_import
  34. import logging
  35. import gc
  36. from operator import itemgetter
  37. import types
  38. import os
  39. import threading
  40. import datetime
  41. import resource
  42. import psutil
  43. from openerp.osv import orm, fields, osv
  44. from openerp import pooler
  45. from openerp import SUPERUSER_ID
  46. from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
  47. _logger = logging.getLogger(__name__)
  48. BLACKLIST = (
  49. type, tuple, dict, list, set, frozenset,
  50. property,
  51. classmethod,
  52. staticmethod,
  53. types.FunctionType,
  54. types.ClassType,
  55. types.ModuleType, types.FunctionType, types.MethodType,
  56. types.MemberDescriptorType, types.GetSetDescriptorType,
  57. )
  58. class ClassInstanceCount(orm.Model):
  59. _name = 'server.monitor.class.instance.count'
  60. _columns = {
  61. 'name': fields.text('Class name', readonly=True),
  62. 'count': fields.bigint('Instance count', readonly=True),
  63. 'measure_id': fields.many2one('server.monitor.process',
  64. 'Measure',
  65. readonly=True,
  66. ondelete='cascade'),
  67. }
  68. def _monkey_patch_object_proxy_execute():
  69. orig_execute_cr = osv.object_proxy.execute_cr
  70. def execute_cr(self, cr, uid, obj, method, *args, **kw):
  71. result = orig_execute_cr(self, cr, uid, obj, method, *args, **kw)
  72. monitor_obj = pooler.get_pool(cr.dbname)['server.monitor.process']
  73. context = {}
  74. monitor_obj.log_measure(cr, uid, obj, method, 'rpc call',
  75. False, False, context)
  76. return result
  77. osv.object_proxy.execute_cr = execute_cr
  78. class ServerMonitorProcess(orm.Model):
  79. def __init__(self, pool, cr):
  80. super(ServerMonitorProcess, self).__init__(pool, cr)
  81. _monkey_patch_object_proxy_execute()
  82. _name = 'server.monitor.process'
  83. _columns = {
  84. 'name': fields.datetime('Timestamp', readonly=True),
  85. 'pid': fields.integer('Process ID', readonly=True,
  86. group_operator='count'),
  87. 'thread': fields.text('Thread ID', readonly=True),
  88. 'cpu_time': fields.float(
  89. 'CPU time', readonly=True,
  90. group_operator='max',
  91. help='CPU time consumed by the current server process'),
  92. 'memory': fields.float(
  93. 'Memory', readonly=True,
  94. group_operator='max',
  95. help='Memory consumed by the current server process'),
  96. 'uid': fields.many2one('res.users', 'User',
  97. readonly=True,
  98. select=True),
  99. 'model': fields.many2one('ir.model', 'Model',
  100. readonly=True,
  101. select=True),
  102. 'method': fields.text('Method', readonly=True),
  103. 'status': fields.text('RPC status', readonly=True),
  104. 'sessionid': fields.text('Session ID', readonly=True),
  105. 'info': fields.text('Information'),
  106. 'class_count_ids': fields.one2many(
  107. 'server.monitor.class.instance.count',
  108. 'measure_id',
  109. 'Class counts',
  110. readonly=True),
  111. }
  112. _order = 'name DESC'
  113. def _default_pid(self, cr, uid, context):
  114. return os.getpid()
  115. def _default_cpu_time(self, cr, uid, context):
  116. r = resource.getrusage(resource.RUSAGE_SELF)
  117. cpu_time = r.ru_utime + r.ru_stime
  118. return cpu_time
  119. def _default_memory(self, cr, uid, context):
  120. try:
  121. rss, vms = psutil.Process(os.getpid()).get_memory_info()
  122. except AttributeError:
  123. # happens on travis
  124. vms = 0
  125. return vms
  126. def _default_uid(self, cr, uid, context):
  127. return uid
  128. def _default_thread(self, cr, uid, context):
  129. return threading.current_thread().name
  130. def _class_count(self, cr, uid, context):
  131. counts = {}
  132. if context.get('_x_no_class_count'):
  133. return []
  134. if context.get('_x_no_gc_collect'):
  135. gc.collect()
  136. gc.collect()
  137. for obj in gc.get_objects():
  138. if isinstance(obj, BLACKLIST):
  139. continue
  140. try:
  141. cls = obj.__class__
  142. except:
  143. if isinstance(obj, types.ClassType):
  144. cls = types.ClassType
  145. else:
  146. _logger.warning('unknown object type for %r (%s)',
  147. obj, type(obj))
  148. continue
  149. name = '%s.%s' % (cls.__module__, cls.__name__)
  150. try:
  151. counts[name] += 1
  152. except KeyError:
  153. counts[name] = 1
  154. info = []
  155. for name, count in sorted(counts.items(),
  156. key=itemgetter(1),
  157. reverse=True):
  158. if count < 2:
  159. break
  160. info.append({'name': name, 'count': count})
  161. return [(0, 0, val) for val in info]
  162. _defaults = {
  163. 'name': fields.datetime.now,
  164. 'class_count_ids': _class_count,
  165. 'pid': _default_pid,
  166. 'cpu_time': _default_cpu_time,
  167. 'memory': _default_memory,
  168. 'uid': _default_uid,
  169. 'thread': _default_thread,
  170. }
  171. def log_measure(self, cr, uid,
  172. model_name, method_name, info,
  173. with_class_count=True,
  174. gc_collect=True,
  175. context=None):
  176. if context is None:
  177. context = {}
  178. ctx = context.copy()
  179. ctx.update({
  180. '_x_no_class_count': not with_class_count,
  181. '_x_no_gc_collect': not gc_collect,
  182. })
  183. fields = self._defaults.keys()
  184. defaults = self.default_get(cr, uid, fields, context=ctx)
  185. model_obj = self.pool['ir.model']
  186. model_id = model_obj.search(cr, uid,
  187. [('name', '=', model_name)],
  188. context=context)
  189. if model_id:
  190. model_id = model_id[0]
  191. else:
  192. model_id = 0
  193. values = {'model': model_id,
  194. 'method': method_name,
  195. 'info': info,
  196. }
  197. defaults.update(values)
  198. id = self.create(cr, SUPERUSER_ID, defaults, context=context)
  199. return id
  200. def cleanup(self, cr, uid, age, context=None):
  201. now = datetime.datetime.now()
  202. delta = datetime.timedelta(days=age)
  203. when = (now - delta).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
  204. ids = self.search(cr, uid,
  205. [('name', '<', when)],
  206. context=context)
  207. _logger.debug('Process monitor cleanup: removing %d records', len(ids))
  208. self.unlink(cr, uid, ids, context=context)