ok

Mini Shell

Direktori : /opt/cloudlinux/venv/lib64/python3.11/site-packages/lvestats/lib/parsers/
Upload File :
Current File : //opt/cloudlinux/venv/lib64/python3.11/site-packages/lvestats/lib/parsers/lveinfoargparse.py

# coding=utf-8
#
# Copyright © Cloud Linux GmbH & Cloud Linux Software, Inc 2010-2019 All Rights Reserved
#
# Licensed under CLOUD LINUX LICENSE AGREEMENT
# http://cloudlinux.com/docs/LICENSE.TXT


from __future__ import absolute_import
import argparse
import datetime
import sys

from lvestats.lib import uidconverter
from lvestats.lib.commons.argparse_utils import SmartFormatter, ParseDatetime, format_aliases, BY_USAGE_PERCENT, \
    alias_action, argparse_process_period
from lvestats.lib.commons.argparse_utils import (
    period_type, period_type2, check_percentages, check_non_negative_int, check_positive_int,
    check_from_zero_to_max_int)
from lvestats.lib.jsonhandler import prepare_attention_json
from lvestats.lib.lveinfolib import get_lve_version, get_supported_columns

__all__ = ['lveinfo_parser', 'dbgov_parser', 'time_unit_type']

DEF_BLANK_VALUE = '-'

# aliases descriptions for help
BY_USAGE_DSCR = (
    # alias1              alias2         alias3
    #                                    aggregate     descriptions for command line
    #                                    function
    #                                    key name
    ('ALIAS 1',           'ALIAS 2',     'ALIAS 3',    'DESCRIPTION'),

    ('cpu_avg',           'cpu',         'aCPU',       'average CPU usage'),
    ('cpu_max',           'cpu_max',     'mCPU',       'max CPU usage'),
    ('vmem_avg',          'vmem',    	 'aVMem',      'average virtual memory usage'),
    ('vmem_max',          'vmem_max',  	 'mVMem',      'max virtual memory usage'),
    ('mep_avg',           'ep',          'aEP',        'average number of entry processes (concurrent connections)'),
    ('mep_max',           'mep',         'mEP',        'max number of entry processes (concurrent connections)'),

    ('pmem_avg',          'pmem',        'aPMem',      'average physical memory usage'),
    ('pmem_max',          'pmem_max',    'mPMem',      'max physical memory usage'),
    ('nproc_avg',         'nproc',       'aNproc',     'average number of processes'),
    ('nproc_max',         'nproc_max',   'mNproc',     'max number of processes'),
    ('io_avg',            'io',          'aIO',        'average io usage'),
    ('io_max',            'io_max',      'mIO',        'max io usage'),

    ('iops_avg',          'iops',        'aIOPS',          'average io operations; (LVE version >= 8)'),
    ('iops_max',          'iops_max',    'mIOPS',         'max io operations; (LVE version >= 8)'),)

BY_FAULT_DSCR = (
    # alias1              alias2      alias3
    #                                 aggregate  descriptions for command line
    #                                 function
    # alias1              alias2      key name
    ('ALIAS 1',           'ALIAS 2',  'ALIAS 3',   'DESCRIPTION'),
    ('mcpu',              'cpu',      'CPUf',    'total number of max CPU usage faults'),
    ('mem',               'vmem',     'VMemF',   'total number of out of virtual memory faults'),
    ('mep',               'ep',       'EPf',     'total number of max entry processes faults'),

    ('pmem',              'pmem',     'PMemF',   'total number of out of physical memory faults'),
    ('nproc',             'nproc',    'NprocF',  'total number of max processes faults'),
    ('io',                'io',       'IOf',     'total number of max io faults'),

    ('iops',              'iops',     'IOPSf',   'total number of max io operations faults; (LVE version >= 8)'),

    ('any_faults',        'any',      'anyF',    'total number of faults of all types'),)

ORDER_BY_DSCR = (
    # alias1               alias2      descriptions for command line
    #                      aggregate
    #                      function
    #                      key name
    ('ALIAS 1',            'ALIAS 2',  'DESCRIPTION'),

    ('cpu_avg',            'aCPU',     'average CPU usage'),
    ('cpu_max',            'mCPU',     'max CPU usage'),
    ('total_cpu_faults',   'CPUf',     'total number of max CPU usage faults'),
    ('vmem_avg',           'aVMem',    'average virtual memory usage'),
    ('vmem_max',           'mVMem',    'max virtual memory usage'),
    ('total_vmem_faults',  'VMemF',    'total number of out of virtual memory faults'),
    ('mep_avg',            'aEP',      'average number of entry processes (concurrent connections)'),
    ('mep_max',            'mEP',      'max number of entry processes (concurrent connections)'),
    ('total_ep_faults',    'EPf',      'total number of max entry processes faults'),

    ('pmem_avg',           'aPMem',    'average physical memory usage'),
    ('pmem_max',           'mPMem',    'max physical memory usage'),
    ('nproc_avg',          'aNproc',   'average number of processes'),
    ('nproc_max',          'mNproc',   'max number of processes'),
    ('io_avg',             'aIO',      'average io usage'),
    ('io_max',             'mIO',      'max io usage'),
    ('total_pmem_faults',  'PMemF',    'total number of out of physical memory faults'),
    ('total_nproc_faults', 'NprocF',   'total number of max processes faults'),
    ('total_io_faults',    'IOf',      'total number of max io faults'),

    ('iops_avg',           'aIOPS',    'average io operations; (LVE version >= 8)'),
    ('iops_max',           'mIOPS',    'max io operations; (LVE version >= 8)'),
    ('total_iops_faults',  'IOPSf',    'total number of max io operations faults; (LVE version >= 8)'),

    ('any_faults',         'anyF',     'total number of faults of all types'),)

# output table column description
COLUMNS_DSCR = (
    ('COLUMN_NAME', 'DESCRIPTION'),

    ('From',    'Show start period statistics'),
    ('To',      'Show end period statistics'),
    ('ID',      'LVE Id or username'),
    ('aCPU',    'Average CPU usage'),
    ('uCPU',    'The percentage of user-allocated resource CPU'),  # new in v2
    ('mCPU',    'deprecated'),  # old in v1
    ('lCPU',    'CPU Limit'),
    ('CPUf',    'Out Of CPU usage Faults'),  # new in v2
    ('aEP',     'Average Entry Processes'),
    ('uEP',     'The percentage of user-allocated resource Entry processes'),  # new in v2
    ('mEP',     'deprecated'),  # old in v1
    ('lEP',     'maxEntryProc limit'),
    ('aVMem',   'Average Virtual Memory Usage'),
    ('uVMem',   'The percentage of user-allocated resource Virtual Memory'),  # new in v2
    ('mVMem',   'deprecated'),  # old in v1
    ('lVMem',   'Virtual Memory Limit'),
    ('VMemF',   'Out Of Memory Faults'),
    ('EPf',     'Entry processes faults'),

    ('aPMem',   'Average Physical Memory Usage'),
    ('uPMem',   'The percentage of user-allocated resource Physical Memory'),  # new in v2
    ('mPMem',   'deprecated'),  # old in v1
    ('lPMem',   'Physical Memory Limit'),
    ('aNproc',  'Average Number of processes'),
    ('uNproc',  'The percentage of user-allocated resource Number of processes'),  # new in v2
    ('mNproc',  'deprecated'),  # old in v1
    ('lNproc',  'Limit of Number of processes'),
    ('PMemF',   'Out Of Physical Memory Faults'),
    ('NprocF',  'Number of processes faults'),
    ('aIO',     'Average I/O'),
    ('uIO',     'The percentage of user-allocated resource I/O'),  # new in v2
    ('mIO',     'deprecated'),  # old in v1
    ('lIO',     'I/O Limit'),
    ('IOf',     'Out Of I/O usage Faults'),  # new in v2

    ('aIOPS',   'Average I/O Operations; (LVE version >= 8)'),
    ('mIOPS',   'deprecated; (LVE version >= 8)'),  # old in v1
    ('uIOPS',   'The percentage of user-allocated resource I/O Operations; (LVE version >= 8)'),  # new in v2
    ('lIOPS',   'I/O Operations Limit; (LVE version >= 8)'),
    ('IOPSf',   'Out Of I/O Operations Faults; (LVE version >= 8)'),)  # new in v2

# start configure argparse


class ArgParse(argparse.ArgumentParser):

    # add additional attributes dbengine and lve_version
    def __init__(self, *args, **kwargs):
        self.dbengine = kwargs.pop('dbengine') if 'dbengine' in kwargs else None
        self.lve_version = None
        self.changed_options = None  # contained changed options in command line
        self.output_error_handler = None
        argparse.ArgumentParser.__init__(self, *args, **kwargs)

    def was_not_changed(self, dest):
        # prevent incorrect input data; for developers
        actions_dest = [action.dest for action in self._actions]
        if dest not in actions_dest:
            raise ValueError('no such item {}; you can use only {}'.format(dest, "', '".join(actions_dest)))

        if self.changed_options is None:
            raise AttributeError('You can use method "was_not_changed"  only after used method "parse_args"')
        return dest not in self.changed_options

    def parse_args(self, args=None, namespace=None):
        # setup output error messages for json
        if args is None:
            args = sys.argv[1:]
        if '-j' in args or '--json' in args:
            self.output_error_handler = prepare_attention_json

        namespace = argparse.ArgumentParser.parse_args(self, args=args, namespace=namespace)

        # attribute 'changed_options' contained changed options in command line
        self.changed_options = tuple(vars(self._parse_known_args(args, argparse.Namespace())[0]))

        argparse_process_period(namespace)

        # Convert user to user_id
        if namespace.user:
            local_server_id = self.get_default('server_id')
            namespace.id = uidconverter.username_to_uid(
                    namespace.user, local_server_id, namespace.server_id, self.dbengine) or -1

        # Setup output lines limit; no rows output limit if output in json/csv formats
        if self.was_not_changed('limit') and (namespace.csv or namespace.json):
            namespace.limit = 0
        return namespace

    def error(self, message):
        if hasattr(self.output_error_handler, '__call__'):
            error_str = self.output_error_handler(message)
            self.exit(status=0, message=error_str)
        else:
            argparse.ArgumentParser.error(self, message)


class ArgParseLVEInfo(ArgParse):
    def parse_args(self, args=None, namespace=None):
        namespace = ArgParse.parse_args(self, args=args, namespace=namespace)

        if namespace.reseller_name:
            namespace.id = uidconverter._convert_name_to_lvp_id(namespace.reseller_name)
        if namespace.reseller_id:
            namespace.id = uidconverter._convert_id_to_lvp_id(namespace.reseller_id)
        if not self.was_not_changed('time_unit') and (not namespace.id and not namespace.user):
            self.error("--id or --user option should be specified with --time_unit")

        # obtain lve_version from database if not present in attribute lve_version
        if self.lve_version is None and self.dbengine is not None:
            self.lve_version = get_lve_version(dbengine=self.dbengine, server_id=namespace.server_id)

        # Prepare output columns
        if (('all' in namespace.show_columns) or namespace.show_all) and namespace.compat == 'v2':
            namespace.show_columns = ['From', 'To', 'ID'] + get_supported_columns(lve_version=None)
            namespace.show_all = True
        # if show_columns was modified do not change default show_columns
        elif self.was_not_changed('show_columns') or 'all' in namespace.show_columns:
            show_columns_ = get_supported_columns(lve_version=self.lve_version, mode=namespace.compat)
            if namespace.id:
                namespace.show_columns = ['From', 'To'] + show_columns_
            else:
                namespace.show_columns = ['ID'] + show_columns_

        # setup time_unit
        if self.was_not_changed('time_unit') and not namespace.id:
            # need convert to utc?
            dt = namespace.to - getattr(namespace, 'from')
            namespace.time_unit = dt.total_seconds()

        # time-unit in dynamic mode for support  compat == 'v1' or style=user
        if self.was_not_changed('time_unit') and namespace.id and namespace.csv \
                and (namespace.style == 'user' or namespace.compat == 'v1'):
            namespace.time_unit = -1

        # Setup output lines limit; no rows output limit if user/id defined
        if self.was_not_changed('limit') and namespace.id is not None:
            namespace.limit = 0

        return namespace


class ArgParseDBGOVInfo(ArgParse):
    def parse_args(self, args=None, namespace=None):
        namespace = ArgParse.parse_args(self, args, namespace)
        # hack to parse as list '--arg arg1,arg2,arg3' or '--arg arg1 arg2 arg3'
        if isinstance(namespace.format, list) and isinstance(namespace.format[0], list):
            namespace.format = namespace.format[0]

        if namespace.format and 'all' in namespace.format or namespace.show_all:
            namespace.format = getattr(self, 'all_columns_names')

        if namespace.id and 'TS' in namespace.format:
            ts_index = namespace.format.index('TS')
            namespace.format = namespace.format[:ts_index] + ['FROM', 'TO'] + namespace.format[ts_index + 1:]

        if not self.was_not_changed('time_unit') and (not namespace.id):
            self.error("--id or --user option should be specified with --time_unit")

        return namespace


def dbgov_parser(config, _datetime_now=None, dbengine=None):
    """
    Parser for dbgov statistics (use as subparser)
    :param config: parsed config
    :param _datetime_now: time now
    :param dbengine: database engine
    :return:
    """
    dbgov_order_by_dscr = (('ALIAS', 'DESCRIPTION'),
                           ('con',   'average connections; deprecated'),
                           ('cpu',   'average CPU usage'),
                           ('read',  'average READ usage'),
                           ('write', 'average WRITE usage'),
                           ('io', 'average READ+WRITE usage'))  # todo document extra param
    # output table column description
    dbgov_columns_dscr = (('COLUMN_NAME', 'ALIAS', 'DESCRIPTION'),
                          ('ts',          'TS',       'timestamp records'),
                          ('username',    'USER',     'user name'),
                          ('id',          'ID',       'user id'),
                          ('cpu',         'CPU',      'average CPU usage'),
                          ('read',        'READ',     'average READ usage'),
                          ('write',       'WRITE',    'average WRITE usage'),
                          ('con',         'CON',      'average connections; deprecated'),
                          ('lcpu',        'lCPU',     'CPU limit'),
                          ('lread',       'lREAD',    'READ limit'),
                          ('lwrite',      'lWRITE',   'WRITE limit'),
                          ('',            'RESTRICT', 'C-cpu restrict, R- read restrict, W- write restrict'))
    dbgov_by_usage_dscr = (('COLUMN_NAME', 'ALIAS', 'DESCRIPTION'),
                           ('cpu', 'CPU', 'average CPU usage'),
                           ('io',  'IO',   'average IO usage'))
    datetime_now = _datetime_now or datetime.datetime.now()
    parser = ArgParseDBGOVInfo(formatter_class=SmartFormatter,
                               dbengine=dbengine,
                               description="%(prog)s - Utility to display historical information about MySQL usage")
    setattr(parser, 'all_columns_names',
            ['TS', 'USER', 'CPU', 'READ', 'WRITE', 'CON', 'lCPU', 'lREAD', 'lWRITE', 'RESTRICT'])
    # filtering output data
    group_period = parser.add_argument_group()
    group_period.add_argument(
        '-f', '--from', action=ParseDatetime,
        default=datetime_now - datetime.timedelta(minutes=10),
        nargs='+',
        help='run report from date and time in [YY]YY-MM-DD[ HH:MM] format; if not present last 10 minutes are assumed')
    group_period.add_argument(
        '-t', '--to',  action=ParseDatetime,
        default=datetime_now,
        nargs='+',
        help='run report up to date and time in [YY]YY-MM-DD[ HH:MM] format; if not present, reports results up to now')
    group_period.add_argument(
        '--period', type=lambda value: period_type2(value, datetime_now),
        help='time period; specify minutes with m,  h - hours, days with d, and values: today, '
        'yesterday; 5m - last 5 minutes, 4h -- last four hours, 2d - last 2 days, as well as today')

    group_user = parser.add_mutually_exclusive_group()
    group_user.add_argument(
        '-u', '--user', type=str,
        help='System user name')
    group_user.add_argument(
        '--id', type=check_non_negative_int,
        help='User id')

    parser.add_argument(
        '-l', '--limit', type=check_from_zero_to_max_int,
        help='max number of results to display, if 0 no limit')
    parser.add_argument(
        '--by-usage', nargs='+', action=alias_action(dbgov_by_usage_dscr),
        help="R|show LVEs with usage (averaged) within %d percent of the limit available values:\n%s" %
             (BY_USAGE_PERCENT, format_aliases(dbgov_by_usage_dscr)),
        metavar='ALIAS')
    parser.add_argument(
        '-p', '--percentage', type=check_percentages, default=BY_USAGE_PERCENT,
        help='defines percentage for --by-usage option; default %(default)s', metavar='0..100')
    parser.add_argument(
        '-o', '--order-by', action=alias_action(dbgov_order_by_dscr),
        help='R|orders results by one of the following:\n%s' % format_aliases(dbgov_order_by_dscr),
        metavar='ALIAS')
    group_fields = parser.add_mutually_exclusive_group()
    group_fields.add_argument(
        '-b', '--format', nargs='+', action=alias_action(dbgov_columns_dscr),
        default=getattr(parser, 'all_columns_names'),
        help="R|show only specific fields into output:\n%s" % format_aliases(dbgov_columns_dscr),
        metavar='ALIAS')
    group_fields.add_argument(
        '--show-all', action='store_true', default=False,
        help='full output (show all limits); brief output is default')
    parser.add_argument(
        '--server_id', '--server-id', type=str, default=config.get('server_id', 'localhost'),
        help='used with central database for multiple servers, default "%(default)s"')
    parser.add_argument(
        '--time-unit', type=time_unit_type, default=int(config.get('aggregation_period', 60)),
        help='time step for grouping statistic in minutes; 1 min., by default; can use m|h|d suffixes;'
             'or can use dyn[amic] for using in v1 mode'
             ' for example: 1h or 1h30m or 1d12h')

    group_output_format = parser.add_mutually_exclusive_group()
    group_output_format.add_argument(
        '-c', '--csv', nargs='?', type=argparse.FileType(mode='w'), const='-',  # default save data to screen (print)
        help='save statistics in CSV format; "%(const)s" by default (output to screen)', metavar='PATH')
    group_output_format.add_argument(
        '-j', '--json', action='store_true', default=False,
        help='display output in JSON format')
    return parser


def time_unit_type(value):
    """
    ArgParse type for parsing time-unit with additional support dyn[amic]
    """
    if value in ('dyn', 'dynamic'):
        return -1
    else:
        return period_type(value)


def lveinfo_parser(config, name='lveinfo', _datetime_now=None, ver=None, dbengine=None):
    """
    Function for parsing command line arguments
    :param config:
    :param name:
    :param _datetime_now:
    :param ver:
    :param dbengine:
    :return: parsed arguments
    """
    datetime_now = _datetime_now or datetime.datetime.now()
    default_col = ['ID', 'From', 'To'] + get_supported_columns(mode=config.get('mode', 'v1'))

    cl_parser = ArgParseLVEInfo(
        formatter_class=SmartFormatter,
        description="%(prog)s - Utility to display historical information about LVE usage",
        prog=name,
        dbengine=dbengine,
        epilog="Prefixes Kb, Mb and Gb indicates powers of 1024.")
    # prepare all supported columns for table
    all_columns_names = [_[0] for _ in COLUMNS_DSCR[1:]]
    cl_parser.all_columns_names = all_columns_names  # for using inside parse_args method

    cl_parser.add_argument('-v', '--version', action='version', version=ver)
    cl_parser.add_argument('--dbgov', help='show MySql governor statistic')
    # filtering output data
    group_period = cl_parser.add_argument_group()
    group_period.add_argument(
        '-f', '--from', action=ParseDatetime,
        default=datetime_now - datetime.timedelta(minutes=10),
        nargs='+',
        help='run report from date and time in [YY]YY-MM-DD[ HH:MM] format; if not present last 10 minutes are assumed',
        metavar='YYYY-MM-DD[ HH:MM]')
    group_period.add_argument(
        '-t', '--to', action=ParseDatetime, default=datetime_now,
        nargs='+',
        help='run report up to date and time in [YY]YY-MM-DD[ HH:MM] format; if not present, reports results up to now',
        metavar='YYYY-MM-DD[ HH:MM]')
    group_period.add_argument(
        '--period', type=lambda value: period_type2(value, datetime_now),
        help='time period; specify minutes with m,  h - hours, days with d, and values: today, '
        'yesterday; 5m - last 5 minutes, 4h -- last four hours, 2d - last 2 days, as well as today')

    group_user = cl_parser.add_mutually_exclusive_group()
    group_user.add_argument(
        '-u', '--user', type=str,
        help='Use username instead of LVE id, and show only record for that user')
    group_user.add_argument(
        '--id', type=check_non_negative_int,
        help='will display record only for that LVE id')
    group_user.add_argument(
        '--reseller-name', type=str,
        help='Use reseller name instead of LVP id, and show only record for that reseller')
    group_user.add_argument(
        '--reseller-id', type=check_non_negative_int,
        help='will display record only for that LVP id')

    cl_parser.add_argument(
        '-d', '--display-username', action='store_true', default=False,
        help='try to convert LVE id into username when possible')

    cl_parser.add_argument(
        '-o', '--order-by', action=alias_action(ORDER_BY_DSCR),
        help="R|orders results by one of the following:\n%s" % format_aliases(ORDER_BY_DSCR), metavar='ALIAS')
    cl_parser.add_argument(
        '-b', '--by-usage', nargs='+', action=alias_action(BY_USAGE_DSCR),
        help="R|show LVEs with usage (averaged) within %d percent of the limit available values:\n%s" %
             (BY_USAGE_PERCENT, format_aliases(BY_USAGE_DSCR)),
        metavar='ALIAS')
    cl_parser.add_argument(
        '-p', '--percentage', type=check_percentages, default=BY_USAGE_PERCENT,
        help='defines percentage for --by-usage option; default %(default)s', metavar='0..100')
    group_by_faul = cl_parser.add_argument_group()
    group_by_faul.add_argument(
        '--by-fault', nargs='+', action=alias_action(BY_FAULT_DSCR),
        help="R|show LVEs which failed on max processes limit or memory limit\n%s" % format_aliases(BY_FAULT_DSCR),
        metavar='ALIAS')
    group_by_faul.add_argument(
        '-r', '--threshold', type=check_positive_int, default=1,
        help='in combination with --by-fault, shows only LVEs with number of faults above; default %(default)s',
        metavar='FAULTS')
    cl_parser.add_argument('--style', choices=['user', 'admin'], default='admin', help='deprecated, not used.')

    cl_parser.add_argument(
        '-l', '--limit', type=check_from_zero_to_max_int, default=10,
        help='max number of results to display, if 0 no limit; default %(default)s')

    group_output_format = cl_parser.add_mutually_exclusive_group()
    group_output_format.add_argument(
        '-c', '--csv', nargs='?', type=argparse.FileType(mode='w'), const='-',  # default save data to screen (print)
        help='save statistics in CSV format; "%(const)s" by default (output to screen)', metavar='PATH')
    group_output_format.add_argument(
        '-j', '--json', action='store_true', default=False,
        help='display output in JSON format')
    cl_parser.add_argument(
        '--server_id', '--server-id', type=str, default=config.get('server_id', 'localhost'),
        help='used with central database for multiple servers, default "%(default)s"')
    cl_parser.add_argument(
        '--servers-info', action='store_true', default=False,
        help='Show servers LVE versions"')
    group_show_columns = cl_parser.add_mutually_exclusive_group()
    group_show_columns.add_argument(
        '--show-all', action='store_true', default=False,
        help='full output (show all limits); brief output is default; equivalent --show-columns all')
    group_show_columns.add_argument(
        '--show-columns', nargs='+', action=alias_action(COLUMNS_DSCR + (('all', ''),)),
        default=tuple(default_col),  # tuple use for detecting changing default; changed if show_columns is list
        help='R|show only the listed columns; "all" for all supported columns\n%s' % format_aliases(COLUMNS_DSCR),
        metavar='COLUMN_NAME')
    # FIXME: need setup default in one place
    cl_parser.add_argument(
        '--time-unit', type=time_unit_type, default=int(config.get('aggregation_period', 60)),
        help='time step for grouping statistic in minutes; 1 min., by default; can use m|h|d suffixes;'
             'or can use dyn[amic] for using in v1 mode'
             ' for example: 1h or 1h30m or 1d12h')
    cl_parser.add_argument(
        '-m', '--compat', type=str, choices=('v1', 'v2'), default=config.get('mode', 'v1'),
        help='v1 - return old output mode; v2 - new mode; default %(default)s; you can change default in config')
    cl_parser.add_argument(
        '--blank-value', nargs='?', const=DEF_BLANK_VALUE,
        help='Use to fill unsupported limits; default "%(const)s"')
    return cl_parser

Zerion Mini Shell 1.0