From 532df87e74a38eda07fe5ed5f1fcd58acc34ea0e Mon Sep 17 00:00:00 2001 From: Raz Crimson <52282402+RazCrimson@users.noreply.github.com> Date: Tue, 14 Feb 2023 20:51:59 +0530 Subject: [PATCH 1/7] chg: Plugin name - docker -> containers --- conf/glances.conf | 2 +- glances/outputs/glances_curses.py | 6 +++--- .../plugins/{glances_docker.py => glances_containers.py} | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) rename glances/plugins/{glances_docker.py => glances_containers.py} (99%) diff --git a/conf/glances.conf b/conf/glances.conf index 923a92c9..25a22e10 100644 --- a/conf/glances.conf +++ b/conf/glances.conf @@ -386,7 +386,7 @@ port_default_gateway=True #web_4_url=https://blog.nicolargo.com/nonexist #web_4_description=Intranet -[docker] +[containers] disable=False # Only show specific containers (comma separated list of container name or regular expression) # Comment this line to display all containers (default configuration) diff --git a/glances/outputs/glances_curses.py b/glances/outputs/glances_curses.py index f3be8980..b11c6eb8 100644 --- a/glances/outputs/glances_curses.py +++ b/glances/outputs/glances_curses.py @@ -57,7 +57,7 @@ class _GlancesCurses(object): 'c': {'sort_key': 'cpu_percent'}, 'C': {'switch': 'disable_cloud'}, 'd': {'switch': 'disable_diskio'}, - 'D': {'switch': 'disable_docker'}, + 'D': {'switch': 'disable_containers'}, # 'e' > Enable/Disable process extended # 'E' > Erase the process filter # 'f' > Show/hide fs / folder stats @@ -124,7 +124,7 @@ class _GlancesCurses(object): _left_sidebar_max_width = 34 # Define right sidebar - _right_sidebar = ['docker', 'processcount', 'amps', 'processlist', 'alert'] + _right_sidebar = ['containers', 'processcount', 'amps', 'processlist', 'alert'] def __init__(self, config=None, args=None): # Init @@ -612,7 +612,7 @@ class _GlancesCurses(object): max_processes_displayed = ( self.term_window.getmaxyx()[0] - 11 - - (0 if 'docker' not in __stat_display else self.get_stats_display_height(__stat_display["docker"])) + - (0 if 'containers' not in __stat_display else self.get_stats_display_height(__stat_display["containers"])) - ( 0 if 'processcount' not in __stat_display diff --git a/glances/plugins/glances_docker.py b/glances/plugins/glances_containers.py similarity index 99% rename from glances/plugins/glances_docker.py rename to glances/plugins/glances_containers.py index 1eeb04ac..b316c78e 100644 --- a/glances/plugins/glances_docker.py +++ b/glances/plugins/glances_containers.py @@ -716,7 +716,7 @@ class Plugin(GlancesPlugin): # Get the maximum containers name # Max size is configurable. See feature request #1723. name_max_width = min( - self.config.get_int_value('docker', 'max_name_size', default=20) if self.config is not None else 20, + self.config.get_int_value('containers', 'max_name_size', default=20) if self.config is not None else 20, len(max(self.stats['containers'], key=lambda x: len(x['name']))['name']), ) msg = ' {:{width}}'.format('Name', width=name_max_width) From b65f8006318921d641dcee520687cf87061cd3ef Mon Sep 17 00:00:00 2001 From: Raz Crimson <52282402+RazCrimson@users.noreply.github.com> Date: Tue, 14 Feb 2023 20:51:59 +0530 Subject: [PATCH 2/7] add: containers Plugin - StatsFetcher --- glances/plugins/containers/__init__.py | 0 glances/plugins/containers/stats_fetcher.py | 72 +++++++++++++++++++++ 2 files changed, 72 insertions(+) create mode 100644 glances/plugins/containers/__init__.py create mode 100644 glances/plugins/containers/stats_fetcher.py diff --git a/glances/plugins/containers/__init__.py b/glances/plugins/containers/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/glances/plugins/containers/stats_fetcher.py b/glances/plugins/containers/stats_fetcher.py new file mode 100644 index 00000000..ed08f4ce --- /dev/null +++ b/glances/plugins/containers/stats_fetcher.py @@ -0,0 +1,72 @@ +import threading +import time + +from glances.logger import logger + + +class StatsFetcher: + # Should be an Abstract Base Class + # Inherit from abc.ABC by Glancesv4 (not inheriting for compatibility with py2) + """ + Streams the container stats through threading + + Use `StatsFetcher.stats` to access the streamed results + """ + + def __init__(self, container): + """Init the class. + + container: instance of Container returned by Docker or Podman client + """ + # The docker-py return stats as a stream + self._container = container + # Container stats are maintained as dicts + self._raw_stats = {} + # Use a Thread to stream stats + self._thread = threading.Thread(target=self._fetch_stats, daemon=True) + # Event needed to stop properly the thread + self._stopper = threading.Event() + + self._thread.start() + logger.debug("docker plugin - Create thread for container {}".format(self._container.name)) + + def _fetch_stats(self): + """Grab the stats. + + Infinite loop, should be stopped by calling the stop() method + """ + try: + for new_stats in self._container.stats(decode=True): + self._pre_raw_stats_update_hook() + self._raw_stats = new_stats + self._post_raw_stats_update_hook() + + time.sleep(0.1) + if self.stopped(): + break + + except Exception as e: + logger.debug("docker plugin - Exception thrown during run ({})".format(e)) + self.stop() + + def stopped(self): + """Return True is the thread is stopped.""" + return self._stopper.is_set() + + def stop(self, timeout=None): + """Stop the thread.""" + logger.debug("docker plugin - Close thread for container {}".format(self._container.name)) + self._stopper.set() + + @property + def stats(self): + """Raw Stats getter.""" + return self._raw_stats + + def _pre_raw_stats_update_hook(self): + """Hook that runs before worker thread updates the raw_stats""" + pass + + def _post_raw_stats_update_hook(self): + """Hook that runs after worker thread updates the raw_stats""" + pass From 16c3b4311251191d0081c404bb333cb682498a99 Mon Sep 17 00:00:00 2001 From: Raz Crimson <52282402+RazCrimson@users.noreply.github.com> Date: Tue, 14 Feb 2023 20:51:59 +0530 Subject: [PATCH 3/7] chg: containers Plugin - switch to docker extension unit --- glances/plugins/containers/glances_docker.py | 353 +++++++++++++++++++ glances/plugins/glances_containers.py | 169 +-------- 2 files changed, 366 insertions(+), 156 deletions(-) create mode 100644 glances/plugins/containers/glances_docker.py diff --git a/glances/plugins/containers/glances_docker.py b/glances/plugins/containers/glances_docker.py new file mode 100644 index 00000000..11ea2ef9 --- /dev/null +++ b/glances/plugins/containers/glances_docker.py @@ -0,0 +1,353 @@ +"""Docker Extension unit for Glances' Containers plugin.""" +import threading +import time + +from glances.compat import iterkeys, itervalues, nativestr, pretty_date +from glances.logger import logger +from glances.plugins.containers.stats_fetcher import StatsFetcher + +# Docker-py library (optional and Linux-only) +# https://github.com/docker/docker-py +try: + import docker + from dateutil import parser, tz +except Exception as e: + import_docker_error_tag = True + # Display debug message if import KeyError + logger.debug("Error loading Docker deps Lib. Docker plugin is disabled ({})".format(e)) +else: + import_docker_error_tag = False + + +class DockerStatsFetcher(StatsFetcher): + MANDATORY_MEMORY_FIELDS = ["usage", 'limit'] + + def __init__(self, container): + super().__init__(container) + # Lock to avoid the daemon thread updating stats when main thread reads the stats + self._stats_lock = threading.Lock() + + # Previous computes stats are stored in the self._old_computed_stats variable + # By storing time data we enable IoR/s and IoW/s calculations in the XML/RPC API, which would otherwise + # be overly difficult work for users of the API + self._old_computed_stats = {} + + # Last time when output stats (results) were computed + self._last_stats_output_time = 0 + # Last time when the raw_stats were updated by worker thread + self._last_raws_stats_update_time = 1 + + @property + def activity_stats(self): + """Activity Stats + + Each successive access of activity_stats will cause computation of activity_stats from raw_stats + """ + computed_activity_stats = self._compute_activity_stats() + self._old_computed_stats = computed_activity_stats + self._last_stats_output_time = time.time() + return computed_activity_stats + + def _pre_raw_stats_update_hook(self): + self._stats_lock.acquire() + + def _post_raw_stats_update_hook(self): + self._last_raws_stats_update_time = time.time() + self._stats_lock.release() + + @property + def time_since_update(self): + return self._last_raws_stats_update_time - self._last_stats_output_time + + def _compute_activity_stats(self): + with self._stats_lock: + io_stats = self._get_io_stats() + cpu_stats = self._get_cpu_stats() + memory_stats = self._get_memory_stats() + network_stats = self._get_network_stats() + + computed_stats = { + "io": io_stats or {}, + "memory": memory_stats or {}, + "network": network_stats or {}, + "cpu": cpu_stats or {"total": 0.0}, + } + return computed_stats + + def _get_cpu_stats(self): + """Return the container CPU usage. + + Output: a dict {'total': 1.49} + """ + stats = {'total': 0.0} + + try: + cpu_stats = self.stats['cpu_stats'] + precpu_stats = self.stats['precpu_stats'] + cpu = {'system': cpu_stats['system_cpu_usage'], 'total': cpu_stats['cpu_usage']['total_usage']} + precpu = {'system': precpu_stats['system_cpu_usage'], 'total': precpu_stats['cpu_usage']['total_usage']} + + # Issue #1857 + # If either precpu_stats.online_cpus or cpu_stats.online_cpus is nil + # then for compatibility with older daemons the length of + # the corresponding cpu_usage.percpu_usage array should be used. + cpu['nb_core'] = cpu_stats.get('online_cpus') or len(cpu_stats['cpu_usage']['percpu_usage'] or []) + except KeyError as e: + logger.debug("containers plugin - Can't grab CPU stat for container {} ({})".format(self._container.id, e)) + logger.debug(self.stats) + return None + + try: + cpu_delta = cpu['total'] - precpu['total'] + system_cpu_delta = cpu['system'] - precpu['system'] + # CPU usage % = (cpu_delta / system_cpu_delta) * number_cpus * 100.0 + stats['total'] = (cpu_delta / system_cpu_delta) * cpu['nb_core'] * 100.0 + except TypeError as e: + msg = "containers plugin - Can't compute CPU usage for container {} ({})".format(self._container.id, e) + logger.debug(msg) + logger.debug(self.stats) + return None + + # Return the stats + return stats + + def _get_memory_stats(self): + """Return the container MEMORY. + + Output: a dict {'rss': 1015808, 'cache': 356352, 'usage': ..., 'max_usage': ...} + """ + memory_stats = self.stats.get('memory_stats') + + # Checks for memory_stats & mandatory fields + if not memory_stats or any(field not in memory_stats for field in self.MANDATORY_MEMORY_FIELDS): + logger.debug("containers plugin - Missing MEM usage fields for container {}".format(self._container.id)) + logger.debug(self.stats) + return None + + stats = {field: memory_stats[field] for field in self.MANDATORY_MEMORY_FIELDS} + try: + # Issue #1857 - Some stats are not always available in ['memory_stats']['stats'] + detailed_stats = memory_stats['stats'] + stats['rss'] = detailed_stats.get('rss') or detailed_stats.get('total_rss') + stats['max_usage'] = detailed_stats.get('max_usage') + stats['cache'] = detailed_stats.get('cache') + except (KeyError, TypeError) as e: + # self.stats do not have MEM information + logger.debug("containers plugin - Can't grab MEM usage for container {} ({})".format(self._container.id, e)) + logger.debug(self.stats) + return None + + # Return the stats + return stats + + def _get_network_stats(self): + """Return the container network usage using the Docker API (v1.0 or higher). + + Output: a dict {'time_since_update': 3000, 'rx': 10, 'tx': 65}. + with: + time_since_update: number of seconds elapsed between the latest grab + rx: Number of bytes received + tx: Number of bytes transmitted + """ + eth0_stats = self.stats.get('networks', {}).get('eth0') + + # Checks for net_stats & mandatory fields + if not eth0_stats or any(field not in eth0_stats for field in ['rx_bytes', 'tx_bytes']): + logger.debug("containers plugin - Missing Network usage fields for container {}".format(self._container.id)) + logger.debug(self.stats) + return None + + # Read the rx/tx stats (in bytes) + stats = {'cumulative_rx': eth0_stats["rx_bytes"], 'cumulative_tx': eth0_stats["tx_bytes"]} + + # Using previous stats to calculate rates + old_network_stats = self._old_computed_stats.get("network") + if old_network_stats: + stats['time_since_update'] = round(self.time_since_update) + stats['rx'] = stats['cumulative_rx'] - old_network_stats["cumulative_rx"] + stats['tx'] = stats['cumulative_tx'] - old_network_stats['cumulative_tx'] + + # Return the stats + return stats + + def _get_io_stats(self): + """Return the container IO usage using the Docker API (v1.0 or higher). + + Output: a dict {'time_since_update': 3000, 'ior': 10, 'iow': 65}. + with: + time_since_update: number of seconds elapsed between the latest grab + ior: Number of bytes read + iow: Number of bytes written + """ + io_service_bytes_recursive = self.stats.get('blkio_stats', {}).get('io_service_bytes_recursive') + + # Checks for net_stats + if not io_service_bytes_recursive: + logger.debug("containers plugin - Missing blockIO usage fields for container {}".format(self._container.id)) + logger.debug(self.stats) + return None + + # Read the ior/iow stats (in bytes) + try: + # Read IOR and IOW value in the structure list of dict + cumulative_ior = [i for i in io_service_bytes_recursive if i['op'].lower() == 'read'][0]['value'] + cumulative_iow = [i for i in io_service_bytes_recursive if i['op'].lower() == 'write'][0]['value'] + except (TypeError, IndexError, KeyError, AttributeError) as e: + # self.stats do not have io information + logger.debug( + "containers plugin - Can't grab blockIO usage for container {} ({})".format(self._container.id, e) + ) + logger.debug(self.stats) + return None + + stats = {'cumulative_ior': cumulative_ior, 'cumulative_iow': cumulative_iow} + + # Using previous stats to calculate difference + old_io_stats = self._old_computed_stats.get("io") + if old_io_stats: + stats['time_since_update'] = round(self.time_since_update) + stats['ior'] = stats['cumulative_ior'] - old_io_stats["cumulative_ior"] + stats['iow'] = stats['cumulative_iow'] - old_io_stats["cumulative_iow"] + + # Return the stats + return stats + + +class DockerContainersExtension: + """Glances' Containers Plugin's Docker Extension unit""" + + CONTAINER_ACTIVE_STATUS = ['running', 'paused'] + + def __init__(self): + if import_docker_error_tag: + raise Exception("Missing libs required to run Docker Extension (Containers) ") + + self.client = None + self.ext_name = "Docker Ext" + self.stats_fetchers = {} + self.connect() + + def connect(self): + """Connect to the Docker server.""" + # Init the Docker API Client + try: + # Do not use the timeout option (see issue #1878) + self.client = docker.from_env() + except Exception as e: + logger.error("docker plugin - Can not connect to Docker ({})".format(e)) + self.client = None + + def stop(self): + # Stop all streaming threads + for t in itervalues(self.stats_fetchers): + t.stop() + + def update(self, all_tag): + """Update Docker stats using the input method.""" + # Docker version + # Example: { + # "KernelVersion": "3.16.4-tinycore64", + # "Arch": "amd64", + # "ApiVersion": "1.15", + # "Version": "1.3.0", + # "GitCommit": "c78088f", + # "Os": "linux", + # "GoVersion": "go1.3.3" + # } + try: + version_stats = self.client.version() + except Exception as e: + # Correct issue#649 + logger.error("{} plugin - Cannot get Docker version ({})".format(self.ext_name, e)) + return {}, [] + + # Update current containers list + try: + # Issue #1152: Docker module doesn't export details about stopped containers + # The Containers/all key of the configuration file should be set to True + containers = self.client.containers.list(all=all_tag) + except Exception as e: + logger.error("{} plugin - Cannot get containers list ({})".format(self.ext_name, e)) + return version_stats, [] + + # Start new thread for new container + for container in containers: + if container.id not in self.stats_fetchers: + # StatsFetcher did not exist in the internal dict + # Create it, add it to the internal dict + logger.debug("{} plugin - Create thread for container {}".format(self.ext_name, container.id[:12])) + self.stats_fetchers[container.id] = DockerStatsFetcher(container) + + # Stop threads for non-existing containers + absent_containers = set(iterkeys(self.stats_fetchers)) - set(c.id for c in containers) + for container_id in absent_containers: + # Stop the StatsFetcher + logger.debug("{} plugin - Stop thread for old container {}".format(self.ext_name, container_id[:12])) + self.stats_fetchers[container_id].stop() + # Delete the StatsFetcher from the dict + del self.stats_fetchers[container_id] + + # Get stats for all containers + container_stats = [self.generate_stats(container) for container in containers] + return version_stats, container_stats + + @property + def key(self): + """Return the key of the list.""" + return 'name' + + def generate_stats(self, container): + # Init the stats for the current container + stats = { + 'key': self.key, + # Export name + 'name': nativestr(container.name), + # Container Id + 'Id': container.id, + # Container Image + 'Image': str(container.image.tags), + # Container Status (from attrs) + 'Status': container.attrs['State']['Status'], + 'Created': container.attrs['Created'], + 'Command': [], + } + + if container.attrs['Config'].get('Entrypoint', None): + stats['Command'].extend(container.attrs['Config'].get('Entrypoint', [])) + if container.attrs['Config'].get('Cmd', None): + stats['Command'].extend(container.attrs['Config'].get('Cmd', [])) + if not stats['Command']: + stats['Command'] = None + + if stats['Status'] in self.CONTAINER_ACTIVE_STATUS: + stats['StartedAt'] = container.attrs['State']['StartedAt'] + stats_fetcher = self.stats_fetchers[container.id] + activity_stats = stats_fetcher.activity_stats + stats.update(activity_stats) + + # Additional fields + stats['cpu_percent'] = stats["cpu"]['total'] + stats['memory_usage'] = stats["memory"].get('usage') + if stats['memory'].get('cache') is not None: + stats['memory_usage'] -= stats['memory']['cache'] + stats['io_r'] = stats['io'].get('ior') + stats['io_w'] = stats['io'].get('iow') + stats['network_rx'] = stats['network'].get('rx') + stats['network_tx'] = stats['network'].get('tx') + stats['Uptime'] = pretty_date( + parser.parse(stats['StartedAt']).astimezone(tz.tzlocal()).replace(tzinfo=None) + ) + else: + stats['io'] = {} + stats['cpu'] = {} + stats['memory'] = {} + stats['network'] = {} + stats['io_r'] = None + stats['io_w'] = None + stats['cpu_percent'] = None + stats['memory_percent'] = None + stats['network_rx'] = None + stats['network_tx'] = None + stats['Uptime'] = None + + return stats diff --git a/glances/plugins/glances_containers.py b/glances/plugins/glances_containers.py index b316c78e..74804c82 100644 --- a/glances/plugins/glances_containers.py +++ b/glances/plugins/glances_containers.py @@ -7,7 +7,7 @@ # SPDX-License-Identifier: LGPL-3.0-only # -"""Docker (and Podman) plugin.""" +"""Containers plugin.""" import os import threading @@ -18,19 +18,7 @@ from glances.compat import iterkeys, itervalues, nativestr, pretty_date, string_ from glances.logger import logger from glances.plugins.glances_plugin import GlancesPlugin from glances.processes import sort_stats as sort_stats_processes, glances_processes -from glances.timer import getTimeSinceLastUpdate - -# Docker-py library (optional and Linux-only) -# https://github.com/docker/docker-py -try: - import docker - from dateutil import parser, tz -except Exception as e: - import_docker_error_tag = True - # Display debug message if import KeyError - logger.debug("Error loading Docker deps Lib. Docker plugin is disabled ({})".format(e)) -else: - import_docker_error_tag = False +from glances.plugins.containers.glances_docker import import_docker_error_tag, DockerContainersExtension # Podman library (optional and Linux-only) # https://pypi.org/project/podman/ @@ -88,9 +76,7 @@ class Plugin(GlancesPlugin): def __init__(self, args=None, config=None): """Init the plugin.""" - super(Plugin, self).__init__(args=args, - config=config, - items_history_list=items_history_list) + super(Plugin, self).__init__(args=args, config=config, items_history_list=items_history_list) # The plugin can be disabled using: args.disable_docker self.args = args @@ -102,10 +88,8 @@ class Plugin(GlancesPlugin): self.display_curse = True # Init the Docker API - if not import_docker_error_tag: - self.docker_client = self.connect_docker() - else: - self.docker_client = None + self.docker_extension = DockerContainersExtension() if not import_docker_error_tag else None + self.docker_extension: DockerContainersExtension # Init the Podman API self._version_podman = {} @@ -143,10 +127,8 @@ class Plugin(GlancesPlugin): def exit(self): """Overwrite the exit method to close threads.""" - for t in itervalues(self.thread_docker_list): - t.stop() - for t in itervalues(self.thread_podman_list): - t.stop() + if self.docker_extension: + self.docker_extension.stop() # Call the father class super(Plugin, self).exit() @@ -235,17 +217,17 @@ class Plugin(GlancesPlugin): def update(self): """Update Docker and podman stats using the input method.""" # Connection should be ok - if self.docker_client is None and self.podman_client is None: + if self.docker_extension is None and self.podman_client is None: return self.get_init_value() if self.input_method == 'local': # Update stats - stats_docker = self.update_docker() - stats_podman = self.update_podman() + stats_docker = self.update_docker() if self.docker_extension else {} + stats_podman = self.update_podman() if self.podman_client else {} stats = { 'version': stats_docker.get('version', {}), 'version_podman': stats_podman.get('version', {}), - 'containers': stats_docker.get('containers', []) + stats_podman.get('containers', []) + 'containers': stats_docker.get('containers', []) + stats_podman.get('containers', []), } elif self.input_method == 'snmp': # Update stats using SNMP @@ -260,133 +242,8 @@ class Plugin(GlancesPlugin): def update_docker(self): """Update Docker stats using the input method.""" - # Init new docker stats - stats = self.get_init_value() - - # Docker version - # Example: { - # "KernelVersion": "3.16.4-tinycore64", - # "Arch": "amd64", - # "ApiVersion": "1.15", - # "Version": "1.3.0", - # "GitCommit": "c78088f", - # "Os": "linux", - # "GoVersion": "go1.3.3" - # } - try: - stats['version'] = self.docker_client.version() - except Exception as e: - # Correct issue#649 - logger.error("{} plugin - Cannot get Docker version ({})".format(self.plugin_name, e)) - return stats - - # Update current containers list - try: - # Issue #1152: Docker module doesn't export details about stopped containers - # The Docker/all key of the configuration file should be set to True - containers = self.docker_client.containers.list(all=self._all_tag()) or [] - except Exception as e: - logger.error("{} plugin - Cannot get containers list ({})".format(self.plugin_name, e)) - return stats - - # Start new thread for new container - for container in containers: - if container.id not in self.thread_docker_list: - # Thread did not exist in the internal dict - # Create it, add it to the internal dict and start it - logger.debug( - "{} plugin - Create thread for container {}".format(self.plugin_name, container.id[:12]) - ) - t = ThreadContainerGrabber(container) - self.thread_docker_list[container.id] = t - t.start() - - # Stop threads for non-existing containers - absent_containers = set(iterkeys(self.thread_docker_list)) - set([c.id for c in containers]) - for container_id in absent_containers: - # Stop the thread - logger.debug("{} plugin - Stop thread for old container {}".format(self.plugin_name, container_id[:12])) - self.thread_docker_list[container_id].stop() - # Delete the item from the dict - del self.thread_docker_list[container_id] - - # Get stats for all containers - stats['containers'] = [] - for container in containers: - # logger.info(['{}: {}'.format(key, container.attrs[key]) for key in sorted(container.attrs.keys())]) - # logger.info(container.attrs['State']['Status']) - # Shall we display the stats ? - if not self.is_display(nativestr(container.name)): - continue - - # Init the stats for the current container - container_stats = {} - # The key is the container name and not the Id - container_stats['key'] = self.get_key() - # Export name - container_stats['name'] = nativestr(container.name) - # Container Id - container_stats['Id'] = container.id - # Container Image - container_stats['Image'] = container.image.tags - # Global stats (from attrs) - # Container Status - container_stats['Status'] = container.attrs['State']['Status'] - # Container Command (see #1912) - container_stats['Command'] = [] - if container.attrs['Config'].get('Entrypoint', None): - container_stats['Command'].extend(container.attrs['Config'].get('Entrypoint', [])) - if container.attrs['Config'].get('Cmd', None): - container_stats['Command'].extend(container.attrs['Config'].get('Cmd', [])) - if not container_stats['Command']: - container_stats['Command'] = None - # Standards stats - # See https://docs.docker.com/engine/api/v1.41/#operation/ContainerStats - # Be aware that the API can change... (example see issue #1857) - if container_stats['Status'] in ('running', 'paused'): - # CPU - container_stats['cpu'] = self.get_docker_cpu(container.id, self.thread_docker_list[container.id].stats) - container_stats['cpu_percent'] = container_stats['cpu'].get('total', None) - # MEM - container_stats['memory'] = self.get_docker_memory( - container.id, self.thread_docker_list[container.id].stats - ) - container_stats['memory_usage'] = container_stats['memory'].get('usage', None) - if container_stats['memory'].get('cache', None) is not None: - container_stats['memory_usage'] -= container_stats['memory']['cache'] - # IO - container_stats['io'] = self.get_docker_io(container.id, self.thread_docker_list[container.id].stats) - container_stats['io_r'] = container_stats['io'].get('ior', None) - container_stats['io_w'] = container_stats['io'].get('iow', None) - # NET - container_stats['network'] = self.get_docker_network( - container.id, self.thread_docker_list[container.id].stats - ) - container_stats['network_rx'] = container_stats['network'].get('rx', None) - container_stats['network_tx'] = container_stats['network'].get('tx', None) - # Uptime - container_stats['Uptime'] = pretty_date( - # parser.parse(container.attrs['State']['StartedAt']).replace(tzinfo=None) - parser.parse(container.attrs['State']['StartedAt']) - .astimezone(tz.tzlocal()) - .replace(tzinfo=None) - ) - else: - container_stats['cpu'] = {} - container_stats['cpu_percent'] = None - container_stats['memory'] = {} - container_stats['memory_percent'] = None - container_stats['io'] = {} - container_stats['io_r'] = None - container_stats['io_w'] = None - container_stats['network'] = {} - container_stats['network_rx'] = None - container_stats['network_tx'] = None - container_stats['Uptime'] = None - # Add current container stats to the stats list - stats['containers'].append(container_stats) - - return stats + version, containers = self.docker_extension.update(all_tag=self._all_tag()) + return {"version": version, "containers": containers} def update_podman(self): """Update Podman stats.""" From ebb26e6a708ff44a3809068b2d025a0993427dfa Mon Sep 17 00:00:00 2001 From: Raz Crimson <52282402+RazCrimson@users.noreply.github.com> Date: Tue, 14 Feb 2023 20:51:59 +0530 Subject: [PATCH 4/7] chg: containers Plugin - switch to basic podman extension unit Pod support is still missing --- glances/plugins/containers/glances_podman.py | 258 ++++++++++++ glances/plugins/glances_containers.py | 419 +------------------ 2 files changed, 281 insertions(+), 396 deletions(-) create mode 100644 glances/plugins/containers/glances_podman.py diff --git a/glances/plugins/containers/glances_podman.py b/glances/plugins/containers/glances_podman.py new file mode 100644 index 00000000..992ca2a1 --- /dev/null +++ b/glances/plugins/containers/glances_podman.py @@ -0,0 +1,258 @@ +"""Podman Extension unit for Glances' Containers plugin.""" +from datetime import datetime + +from glances.compat import iterkeys, itervalues, nativestr, pretty_date, string_value_to_float +from glances.logger import logger +from glances.plugins.containers.stats_fetcher import StatsFetcher + +# Podman library (optional and Linux-only) +# https://pypi.org/project/podman/ +try: + import podman +except Exception as e: + import_podman_error_tag = True + # Display debug message if import KeyError + logger.debug("Error loading Podman deps Lib. Podman feature in the Containers plugin is disabled ({})".format(e)) +else: + import_podman_error_tag = False + + +class PodmanStatsFetcher(StatsFetcher): + @property + def activity_stats(self): + io_stats = self._get_io_stats() + cpu_stats = self._get_cpu_stats() + memory_stats = self._get_memory_stats() + network_stats = self._get_network_stats() + + computed_stats = { + "io": io_stats or {}, + "memory": memory_stats or {}, + "network": network_stats or {}, + "cpu": cpu_stats or {"total": 0.0}, + } + return computed_stats + + def _get_cpu_stats(self): + """Return the container CPU usage. + + Output: a dict {'total': 1.49} + """ + if "cpu_percent" not in self.stats: + logger.debug("containers plugin - Missing CPU usage fields for container {}".format(self._container.id)) + logger.debug(self.stats) + return None + + cpu_usage = string_value_to_float(self.stats["cpu_percent"].rstrip("%")) + return {"total": cpu_usage} + + def _get_memory_stats(self): + """Return the container MEMORY. + + Output: a dict {'rss': 1015808, 'cache': 356352, 'usage': ..., 'max_usage': ...} + """ + if "mem_usage" not in self.stats or "/" not in self.stats["mem_usage"]: + logger.debug("containers plugin - Missing MEM usage fields for container {}".format(self._container.id)) + logger.debug(self.stats) + return None + + memory_usage_str = self.stats["mem_usage"] + usage_str, limit_str = memory_usage_str.split("/") + + try: + usage = string_value_to_float(usage_str) + limit = string_value_to_float(limit_str) + except ValueError as e: + logger.debug("containers plugin - Compute MEM usage failed for container {}".format(self._container.id)) + logger.debug(self.stats) + return None + + return {"usage": usage, "limit": limit} + + def _get_network_stats(self): + """Return the container network usage using the Docker API (v1.0 or higher). + + Output: a dict {'time_since_update': 3000, 'rx': 10, 'tx': 65}. + with: + time_since_update: number of seconds elapsed between the latest grab + rx: Number of bytes received + tx: Number of bytes transmitted + """ + if "net_io" not in self.stats or "/" not in self.stats["net_io"]: + logger.debug("containers plugin - Missing Network usage fields for container {}".format(self._container.id)) + logger.debug(self.stats) + return None + + net_io_str = self.stats["net_io"] + rx_str, tx_str = net_io_str.split("/") + + try: + rx = string_value_to_float(rx_str) + tx = string_value_to_float(tx_str) + except ValueError as e: + logger.debug("containers plugin - Compute Network usage failed for container {}".format(self._container.id)) + logger.debug(self.stats) + return None + + # Hardcode `time_since_update` to 1 as podman docs don't specify the rate calculated procedure + return {"rx": rx, "tx": tx, "time_since_update": 1} + + def _get_io_stats(self): + """Return the container IO usage using the Docker API (v1.0 or higher). + + Output: a dict {'time_since_update': 3000, 'ior': 10, 'iow': 65}. + with: + time_since_update: number of seconds elapsed between the latest grab + ior: Number of bytes read + iow: Number of bytes written + """ + if "block_io" not in self.stats or "/" not in self.stats["block_io"]: + logger.debug("containers plugin - Missing BlockIO usage fields for container {}".format(self._container.id)) + logger.debug(self.stats) + return None + + block_io_str = self.stats["block_io"] + ior_str, iow_str = block_io_str.split("/") + + try: + ior = string_value_to_float(ior_str) + iow = string_value_to_float(iow_str) + except ValueError as e: + logger.debug("containers plugin - Compute BlockIO usage failed for container {}".format(self._container.id)) + logger.debug(self.stats) + return None + + # Hardcode `time_since_update` to 1 as podman docs don't specify the rate calculated procedure + return {"ior": ior, "iow": iow, "time_since_update": 1} + + +class PodmanContainersExtension: + """Glances' Containers Plugin's Docker Extension unit""" + + CONTAINER_ACTIVE_STATUS = ['running', 'paused'] + + def __init__(self, podman_sock): + if import_podman_error_tag: + raise Exception("Missing libs required to run Podman Extension (Containers)") + + self.client = None + self.ext_name = "Podman (Containers)" + self.podman_sock = podman_sock + self.stats_fetchers = {} + self._version = {} + self.connect() + + def connect(self): + """Connect to Podman.""" + try: + self.client = podman.PodmanClient(base_url=self.podman_sock) + except Exception as e: + logger.error("{} plugin - Can not connect to Podman ({})".format(self.ext_name, e)) + + try: + version_podman = self.client.version() + except Exception as e: + logger.error("{} plugin - Cannot get Podman version ({})".format(self.ext_name, e)) + else: + self._version = { + 'Version': version_podman['Version'], + 'ApiVersion': version_podman['ApiVersion'], + 'MinAPIVersion': version_podman['MinAPIVersion'], + } + + def stop(self): + # Stop all streaming threads + for t in itervalues(self.stats_fetchers): + t.stop() + + def update(self, all_tag): + """Update Podman stats using the input method.""" + + try: + version_stats = self.client.version() + except Exception as e: + # Correct issue#649 + logger.error("{} plugin - Cannot get Podman version ({})".format(self.ext_name, e)) + return {}, [] + + # Update current containers list + try: + # Issue #1152: Podman module doesn't export details about stopped containers + # The Containers/all key of the configuration file should be set to True + containers = self.client.containers.list(all=all_tag) + except Exception as e: + logger.error("{} plugin - Cannot get containers list ({})".format(self.ext_name, e)) + return version_stats, [] + + # Start new thread for new container + for container in containers: + if container.id not in self.stats_fetchers: + # StatsFetcher did not exist in the internal dict + # Create it, add it to the internal dict + logger.debug("{} plugin - Create thread for container {}".format(self.ext_name, container.id[:12])) + self.stats_fetchers[container.id] = PodmanStatsFetcher(container) + + # Stop threads for non-existing containers + absent_containers = set(iterkeys(self.stats_fetchers)) - set(c.id for c in containers) + for container_id in absent_containers: + # Stop the StatsFetcher + logger.debug("{} plugin - Stop thread for old container {}".format(self.ext_name, container_id[:12])) + self.stats_fetchers[container_id].stop() + # Delete the StatsFetcher from the dict + del self.stats_fetchers[container_id] + + # Get stats for all containers + container_stats = [self.generate_stats(container) for container in containers] + return version_stats, container_stats + + @property + def key(self): + """Return the key of the list.""" + return 'name' + + def generate_stats(self, container): + # Init the stats for the current container + stats = { + 'key': self.key, + # Export name + 'name': nativestr(container.name), + # Container Id + 'Id': container.id, + # Container Image + 'Image': str(container.image.tags), + # Container Status (from attrs) + 'Status': container.attrs['State'], + 'Created': container.attrs['Created'], + 'Command': container.attrs.get('Command') or [], + } + + if stats['Status'] in self.CONTAINER_ACTIVE_STATUS: + stats['StartedAt'] = datetime.fromtimestamp(container.attrs['StartedAt']) + stats_fetcher = self.stats_fetchers[container.id] + activity_stats = stats_fetcher.activity_stats + stats.update(activity_stats) + + # Additional fields + stats['cpu_percent'] = stats["cpu"]['total'] + stats['memory_usage'] = stats["memory"].get('usage') + if stats['memory'].get('cache') is not None: + stats['memory_usage'] -= stats['memory']['cache'] + stats['io_r'] = stats['io'].get('ior') + stats['io_w'] = stats['io'].get('iow') + stats['network_rx'] = stats['network'].get('rx') + stats['network_tx'] = stats['network'].get('tx') + stats['Uptime'] = pretty_date(stats['StartedAt']) + else: + stats['io'] = {} + stats['cpu'] = {} + stats['memory'] = {} + stats['network'] = {} + stats['io_r'] = None + stats['io_w'] = None + stats['cpu_percent'] = None + stats['memory_percent'] = None + stats['network_rx'] = None + stats['network_tx'] = None + stats['Uptime'] = None + + return stats diff --git a/glances/plugins/glances_containers.py b/glances/plugins/glances_containers.py index 74804c82..de1a89fc 100644 --- a/glances/plugins/glances_containers.py +++ b/glances/plugins/glances_containers.py @@ -10,26 +10,17 @@ """Containers plugin.""" import os -import threading -import time from copy import deepcopy +from typing import Optional -from glances.compat import iterkeys, itervalues, nativestr, pretty_date, string_value_to_float from glances.logger import logger +from glances.plugins.containers.glances_docker import ( + DockerContainersExtension, import_docker_error_tag) +from glances.plugins.containers.glances_podman import ( + PodmanContainersExtension, import_podman_error_tag) from glances.plugins.glances_plugin import GlancesPlugin -from glances.processes import sort_stats as sort_stats_processes, glances_processes -from glances.plugins.containers.glances_docker import import_docker_error_tag, DockerContainersExtension - -# Podman library (optional and Linux-only) -# https://pypi.org/project/podman/ -try: - from podman import PodmanClient -except Exception as e: - import_podman_error_tag = True - # Display debug message if import KeyError - logger.debug("Error loading Podman deps Lib. Podman feature in the Docker plugin is disabled ({})".format(e)) -else: - import_podman_error_tag = False +from glances.processes import glances_processes +from glances.processes import sort_stats as sort_stats_processes # Define the items history list (list of items to add to history) # TODO: For the moment limited to the CPU. Had to change the graph exports @@ -89,34 +80,12 @@ class Plugin(GlancesPlugin): # Init the Docker API self.docker_extension = DockerContainersExtension() if not import_docker_error_tag else None - self.docker_extension: DockerContainersExtension # Init the Podman API - self._version_podman = {} - if not import_podman_error_tag: - self.podman_client = self.connect_podman() - else: + if import_podman_error_tag: self.podman_client = None - - # Dict of Docker thread (to grab stats asynchronously, one thread is created per container) - # key: Container Id - # value: instance of ThreadContainerGrabber - self.thread_docker_list = {} - - # Dict of Podman thread (to grab stats asynchronously, one thread is created per container) - # key: Container Id - # value: instance of ThreadContainerGrabber - self.thread_podman_list = {} - - # Dict of Network stats (Storing previous network stats to compute Rx/s and Tx/s) - # key: Container Id - # value: network stats dict - self.network_old = {} - - # Dict of Disk IO stats (Storing previous disk_io stats to compute Rx/s and Tx/s) - # key: Container Id - # value: network stats dict - self.io_old = {} + else: + self.podman_client = PodmanContainersExtension(podman_sock=self._podman_sock()) # Sort key self.sort_key = None @@ -125,6 +94,17 @@ class Plugin(GlancesPlugin): self.update() self.refresh_timer.set(0) + def _podman_sock(self): + """Return the podman sock. + Could be desfined in the [docker] section thanks to the podman_sock option. + Default value: unix:///run/user/1000/podman/podman.sock + """ + conf_podman_sock = self.get_conf_value('podman_sock') + if len(conf_podman_sock) == 0: + return "unix:///run/user/1000/podman/podman.sock" + else: + return conf_podman_sock[0] + def exit(self): """Overwrite the exit method to close threads.""" if self.docker_extension: @@ -155,50 +135,6 @@ class Plugin(GlancesPlugin): return ret - def connect_docker(self): - """Connect to the Docker server.""" - try: - # Do not use the timeout option (see issue #1878) - ret = docker.from_env() - except Exception as e: - logger.error("docker plugin - Can not connect to Docker ({})".format(e)) - ret = None - - return ret - - def connect_podman(self): - """Connect to Podman.""" - try: - ret = PodmanClient(base_url=self._podman_sock()) - except Exception as e: - logger.error("docker plugin - Can not connect to Podman ({})".format(e)) - ret = None - - try: - version_podman = ret.version() - except Exception as e: - logger.error("{} plugin - Cannot get Podman version ({})".format(self.plugin_name, e)) - ret = None - else: - self._version_podman = { - 'Version': version_podman['Version'], - 'ApiVersion': version_podman['ApiVersion'], - 'MinAPIVersion': version_podman['MinAPIVersion'], - } - - return ret - - def _podman_sock(self): - """Return the podman sock. - Could be desfined in the [docker] section thanks to the podman_sock option. - Default value: unix:///run/user/1000/podman/podman.sock - """ - conf_podman_sock = self.get_conf_value('podman_sock') - if len(conf_podman_sock) == 0: - return "unix:///run/user/1000/podman/podman.sock" - else: - return conf_podman_sock[0] - def _all_tag(self): """Return the all tag of the Glances/Docker configuration file. @@ -247,261 +183,8 @@ class Plugin(GlancesPlugin): def update_podman(self): """Update Podman stats.""" - # Init new docker stats - stats = self.get_init_value() - - # Podman version - # Request very long so it is only done once in the connect_podman method - stats['version'] = self._version_podman - - # Update current containers list - try: - containers = self.podman_client.containers.list() or [] - except Exception as e: - logger.error("{} plugin - Cannot get Podman containers list ({})".format(self.plugin_name, e)) - return stats - - # And the stats for each container - try: - # Return example: - # [{'CPU': '3.21%', - # 'MemUsage': '352.3kB / 7.836GB', 'MemUsageBytes': '344KiB / 7.298GiB', 'Mem': '0.00%', - # 'NetIO': '-- / --', - # 'BlockIO': '-- / --', - # 'PIDS': '1', 'Pod': '8d0f1c783def', 'CID': '9491515251ed', - # 'Name': '8d0f1c783def-infra'}, ... ] - podman_stats = {s['CID'][:12]: s for s in self.podman_client.pods.stats()} - except Exception as e: - logger.error("{} plugin - Cannot get Podman containers list ({})".format(self.plugin_name, e)) - return stats - - # Get stats for all containers - stats['containers'] = [] - for container in containers: - # Shall we display the stats ? - if not self.is_display(nativestr(container.name)): - continue - - # Init the stats for the current container - container_stats = {} - # The key is the container name and not the Id - container_stats['key'] = self.get_key() - # Export name - container_stats['name'] = nativestr(container.name) - # Container Id - container_stats['Id'] = container.id - container_stats['IdShort'] = container.id[:12] - # Container Image - container_stats['Image'] = container.image.tags - # Container Status (from attrs) - container_stats['Status'] = container.attrs['State'] - # Container Command - container_stats['Command'] = container.attrs['Command'] - # Standards stats - if container_stats['Status'] in ('running', 'paused'): - # CPU - # Convert: '3.21%' to 3.21 - container_stats['cpu_percent'] = float(podman_stats[container_stats['IdShort']]['CPU'][:-1]) - container_stats['cpu'] = {'total': container_stats['cpu_percent']} - # MEMORY - # Convert 'MemUsage': '352.3kB / 7.836GB' to bytes - # Yes it is ungly but the API do not expose the memory limit in bytes... - container_stats['memory'] = { - 'usage': string_value_to_float(podman_stats[container_stats['IdShort']]['MemUsage'].split(' / ')[0]), - 'limit': string_value_to_float(podman_stats[container_stats['IdShort']]['MemUsage'].split(' / ')[1]), - } - container_stats['memory_percent'] = float(podman_stats[container_stats['IdShort']]['Mem'][:-1]) - # Not available for the moment: https://github.com/containers/podman/issues/11695 - container_stats['io'] = {} - container_stats['io_r'] = string_value_to_float(podman_stats[container_stats['IdShort']]['BlockIO'].split(' / ')[0]) - container_stats['io_w'] = string_value_to_float(podman_stats[container_stats['IdShort']]['BlockIO'].split(' / ')[1]) - container_stats['network'] = {} - container_stats['network_rx'] = string_value_to_float(podman_stats[container_stats['IdShort']]['NetIO'].split(' / ')[0]) - container_stats['network_tx'] = string_value_to_float(podman_stats[container_stats['IdShort']]['NetIO'].split(' / ')[1]) - # - container_stats['Uptime'] = None - else: - container_stats['cpu'] = {} - container_stats['cpu_percent'] = None - container_stats['memory'] = {} - container_stats['memory_percent'] = None - container_stats['io'] = {} - container_stats['io_r'] = None - container_stats['io_w'] = None - container_stats['network'] = {} - container_stats['network_rx'] = None - container_stats['network_tx'] = None - container_stats['Uptime'] = None - # Add current container stats to the stats list - stats['containers'].append(container_stats) - - return stats - - def get_docker_cpu(self, container_id, all_stats): - """Return the container CPU usage. - - Input: id is the full container id - all_stats is the output of the stats method of the Docker API - Output: a dict {'total': 1.49} - """ - cpu_stats = {'total': 0.0} - - try: - cpu = { - 'system': all_stats['cpu_stats']['system_cpu_usage'], - 'total': all_stats['cpu_stats']['cpu_usage']['total_usage'], - } - precpu = { - 'system': all_stats['precpu_stats']['system_cpu_usage'], - 'total': all_stats['precpu_stats']['cpu_usage']['total_usage'], - } - # Issue #1857 - # If either precpu_stats.online_cpus or cpu_stats.online_cpus is nil - # then for compatibility with older daemons the length of - # the corresponding cpu_usage.percpu_usage array should be used. - cpu['nb_core'] = all_stats['cpu_stats'].get('online_cpus', None) - if cpu['nb_core'] is None: - cpu['nb_core'] = len(all_stats['cpu_stats']['cpu_usage']['percpu_usage'] or []) - except KeyError as e: - logger.debug("docker plugin - Cannot grab CPU usage for container {} ({})".format(container_id, e)) - logger.debug(all_stats) - else: - try: - cpu_delta = cpu['total'] - precpu['total'] - system_cpu_delta = cpu['system'] - precpu['system'] - # CPU usage % = (cpu_delta / system_cpu_delta) * number_cpus * 100.0 - cpu_stats['total'] = (cpu_delta / system_cpu_delta) * cpu['nb_core'] * 100.0 - except TypeError as e: - logger.debug("docker plugin - Cannot compute CPU usage for container {} ({})".format(container_id, e)) - logger.debug(all_stats) - - # Return the stats - return cpu_stats - - def get_docker_memory(self, container_id, all_stats): - """Return the container MEMORY. - - Input: id is the full container id - all_stats is the output of the stats method of the Docker API - Output: a dict {'rss': 1015808, 'cache': 356352, 'usage': ..., 'max_usage': ...} - """ - memory_stats = {} - # Read the stats - try: - # Mandatory fields - memory_stats['usage'] = all_stats['memory_stats']['usage'] - memory_stats['limit'] = all_stats['memory_stats']['limit'] - # Issue #1857 - # Some stats are not always available in ['memory_stats']['stats'] - if 'rss' in all_stats['memory_stats']['stats']: - memory_stats['rss'] = all_stats['memory_stats']['stats']['rss'] - elif 'total_rss' in all_stats['memory_stats']['stats']: - memory_stats['rss'] = all_stats['memory_stats']['stats']['total_rss'] - else: - memory_stats['rss'] = None - memory_stats['cache'] = all_stats['memory_stats']['stats'].get('cache', None) - memory_stats['max_usage'] = all_stats['memory_stats'].get('max_usage', None) - except (KeyError, TypeError) as e: - # all_stats do not have MEM information - logger.debug("docker plugin - Cannot grab MEM usage for container {} ({})".format(container_id, e)) - logger.debug(all_stats) - # Return the stats - return memory_stats - - def get_docker_network(self, container_id, all_stats): - """Return the container network usage using the Docker API (v1.0 or higher). - - Input: id is the full container id - Output: a dict {'time_since_update': 3000, 'rx': 10, 'tx': 65}. - with: - time_since_update: number of seconds elapsed between the latest grab - rx: Number of bytes received - tx: Number of bytes transmitted - """ - # Init the returned dict - network_new = {} - - # Read the rx/tx stats (in bytes) - try: - net_stats = all_stats["networks"] - except KeyError as e: - # all_stats do not have NETWORK information - logger.debug("docker plugin - Cannot grab NET usage for container {} ({})".format(container_id, e)) - logger.debug(all_stats) - # No fallback available... - return network_new - - # Previous network interface stats are stored in the self.network_old variable - # By storing time data we enable Rx/s and Tx/s calculations in the XML/RPC API, which would otherwise - # be overly difficult work for users of the API - try: - network_new['cumulative_rx'] = net_stats["eth0"]["rx_bytes"] - network_new['cumulative_tx'] = net_stats["eth0"]["tx_bytes"] - except KeyError as e: - # all_stats do not have INTERFACE information - logger.debug( - "docker plugin - Cannot grab network interface usage for container {} ({})".format(container_id, e) - ) - logger.debug(all_stats) - else: - network_new['time_since_update'] = getTimeSinceLastUpdate('docker_net_{}'.format(container_id)) - if container_id in self.network_old: - network_new['rx'] = network_new['cumulative_rx'] - self.network_old[container_id]['cumulative_rx'] - network_new['tx'] = network_new['cumulative_tx'] - self.network_old[container_id]['cumulative_tx'] - - # Save stats to compute next bitrate - self.network_old[container_id] = network_new - - # Return the stats - return network_new - - def get_docker_io(self, container_id, all_stats): - """Return the container IO usage using the Docker API (v1.0 or higher). - - Input: id is the full container id - Output: a dict {'time_since_update': 3000, 'ior': 10, 'iow': 65}. - with: - time_since_update: number of seconds elapsed between the latest grab - ior: Number of bytes read - iow: Number of bytes written - """ - # Init the returned dict - io_new = {} - - # Read the ior/iow stats (in bytes) - try: - io_stats = all_stats["blkio_stats"] - except KeyError as e: - # all_stats do not have io information - logger.debug("docker plugin - Cannot grab block IO usage for container {} ({})".format(container_id, e)) - logger.debug(all_stats) - # No fallback available... - return io_new - - # Previous io interface stats are stored in the self.io_old variable - # By storing time data we enable IoR/s and IoW/s calculations in the - # XML/RPC API, which would otherwise be overly difficult work - # for users of the API - try: - io_service_bytes_recursive = io_stats['io_service_bytes_recursive'] - - # Read IOR and IOW value in the structure list of dict - io_new['cumulative_ior'] = [i for i in io_service_bytes_recursive if i['op'].lower() == 'read'][0]['value'] - io_new['cumulative_iow'] = [i for i in io_service_bytes_recursive if i['op'].lower() == 'write'][0]['value'] - except (TypeError, IndexError, KeyError, AttributeError) as e: - # all_stats do not have io information - logger.debug("docker plugin - Cannot grab block IO usage for container {} ({})".format(container_id, e)) - else: - io_new['time_since_update'] = getTimeSinceLastUpdate('docker_io_{}'.format(container_id)) - if container_id in self.io_old: - io_new['ior'] = io_new['cumulative_ior'] - self.io_old[container_id]['cumulative_ior'] - io_new['iow'] = io_new['cumulative_iow'] - self.io_old[container_id]['cumulative_iow'] - - # Save stats to compute next bitrate - self.io_old[container_id] = io_new - - # Return the stats - return io_new + version, containers = self.podman_client.update(all_tag=self._all_tag()) + return {"version": version, "containers": containers} def get_user_ticks(self): """Return the user ticks by reading the environment variable.""" @@ -701,62 +384,6 @@ class Plugin(GlancesPlugin): return 'CAREFUL' -class ThreadContainerGrabber(threading.Thread): - """ - Specific thread to grab container stats. - - stats is a dict - """ - - def __init__(self, container): - """Init the class. - - container: instance of Container returned by Docker or Podman client - """ - super(ThreadContainerGrabber, self).__init__() - # Event needed to stop properly the thread - self._stopper = threading.Event() - # The docker-py return stats as a stream - self._container = container - # The class return the stats as a dict - self._stats = {} - logger.debug("docker plugin - Create thread for container {}".format(self._container.name)) - - def run(self): - """Grab the stats. - - Infinite loop, should be stopped by calling the stop() method - """ - try: - for i in self._container.stats(decode=True): - self._stats = i - time.sleep(0.1) - if self.stopped(): - break - except Exception as e: - logger.debug("docker plugin - Exception thrown during run ({})".format(e)) - self.stop() - - @property - def stats(self): - """Stats getter.""" - return self._stats - - @stats.setter - def stats(self, value): - """Stats setter.""" - self._stats = value - - def stop(self, timeout=None): - """Stop the thread.""" - logger.debug("docker plugin - Close thread for container {}".format(self._container.name)) - self._stopper.set() - - def stopped(self): - """Return True is the thread is stopped.""" - return self._stopper.is_set() - - def sort_docker_stats(stats): # Sort Docker stats using the same function than processes sort_by = glances_processes.sort_key From 928752a453cc027a9fae1f49f6cf00be66425d64 Mon Sep 17 00:00:00 2001 From: Raz Crimson <52282402+RazCrimson@users.noreply.github.com> Date: Wed, 15 Feb 2023 00:08:02 +0530 Subject: [PATCH 5/7] fix: containers (Podman) - wrong response format Ref: https://docs.podman.io/en/latest/Reference.html --- glances/plugins/containers/glances_podman.py | 136 +++++-------------- 1 file changed, 37 insertions(+), 99 deletions(-) diff --git a/glances/plugins/containers/glances_podman.py b/glances/plugins/containers/glances_podman.py index 992ca2a1..ff2119d4 100644 --- a/glances/plugins/containers/glances_podman.py +++ b/glances/plugins/containers/glances_podman.py @@ -1,7 +1,7 @@ """Podman Extension unit for Glances' Containers plugin.""" from datetime import datetime -from glances.compat import iterkeys, itervalues, nativestr, pretty_date, string_value_to_float +from glances.compat import iterkeys, itervalues, nativestr, pretty_date from glances.logger import logger from glances.plugins.containers.stats_fetcher import StatsFetcher @@ -18,112 +18,50 @@ else: class PodmanStatsFetcher(StatsFetcher): + MANDATORY_FIELDS = ["CPU", "MemUsage", "MemLimit", "NetInput", "NetOutput", "BlockInput", "BlockOutput"] + + @property + def stats(self): + if self._raw_stats["Error"]: + logger.error("containers plugin - Stats fetching failed: {}".format(self._raw_stats["Error"])) + logger.error(self._raw_stats) + + return self._raw_stats["Stats"][0] + @property def activity_stats(self): - io_stats = self._get_io_stats() - cpu_stats = self._get_cpu_stats() - memory_stats = self._get_memory_stats() - network_stats = self._get_network_stats() + result_stats = {"cpu": {}, "memory": {}, "io": {}, "network": {}} - computed_stats = { - "io": io_stats or {}, - "memory": memory_stats or {}, - "network": network_stats or {}, - "cpu": cpu_stats or {"total": 0.0}, - } - return computed_stats - - def _get_cpu_stats(self): - """Return the container CPU usage. - - Output: a dict {'total': 1.49} - """ - if "cpu_percent" not in self.stats: - logger.debug("containers plugin - Missing CPU usage fields for container {}".format(self._container.id)) + if any(field not in self.stats for field in self.MANDATORY_FIELDS): + logger.debug("containers plugin - Missing mandatory fields for container {}".format(self._container.id)) logger.debug(self.stats) - return None - - cpu_usage = string_value_to_float(self.stats["cpu_percent"].rstrip("%")) - return {"total": cpu_usage} - - def _get_memory_stats(self): - """Return the container MEMORY. - - Output: a dict {'rss': 1015808, 'cache': 356352, 'usage': ..., 'max_usage': ...} - """ - if "mem_usage" not in self.stats or "/" not in self.stats["mem_usage"]: - logger.debug("containers plugin - Missing MEM usage fields for container {}".format(self._container.id)) - logger.debug(self.stats) - return None - - memory_usage_str = self.stats["mem_usage"] - usage_str, limit_str = memory_usage_str.split("/") + return result_stats try: - usage = string_value_to_float(usage_str) - limit = string_value_to_float(limit_str) - except ValueError as e: - logger.debug("containers plugin - Compute MEM usage failed for container {}".format(self._container.id)) + cpu_usage = float(self.stats.get("CPU", 0)) + + mem_usage = float(self.stats["MemUsage"]) + mem_limit = float(self.stats["MemLimit"]) + + rx = float(self.stats["NetInput"]) + tx = float(self.stats["NetOutput"]) + + ior = float(self.stats["BlockInput"]) + iow = float(self.stats["BlockOutput"]) + + # Hardcode `time_since_update` to 1 as podman already sends the calculated rate + result_stats = { + "cpu": {"total": cpu_usage}, + "memory": {"usage": mem_usage, "limit": mem_limit}, + "io": {"ior": ior, "iow": iow, "time_since_update": 1}, + "network": {"rx": rx, "tx": tx, "time_since_update": 1}, + } + except ValueError: + logger.debug("containers plugin - Non float stats values found for container {}".format(self._container.id)) logger.debug(self.stats) - return None + return result_stats - return {"usage": usage, "limit": limit} - - def _get_network_stats(self): - """Return the container network usage using the Docker API (v1.0 or higher). - - Output: a dict {'time_since_update': 3000, 'rx': 10, 'tx': 65}. - with: - time_since_update: number of seconds elapsed between the latest grab - rx: Number of bytes received - tx: Number of bytes transmitted - """ - if "net_io" not in self.stats or "/" not in self.stats["net_io"]: - logger.debug("containers plugin - Missing Network usage fields for container {}".format(self._container.id)) - logger.debug(self.stats) - return None - - net_io_str = self.stats["net_io"] - rx_str, tx_str = net_io_str.split("/") - - try: - rx = string_value_to_float(rx_str) - tx = string_value_to_float(tx_str) - except ValueError as e: - logger.debug("containers plugin - Compute Network usage failed for container {}".format(self._container.id)) - logger.debug(self.stats) - return None - - # Hardcode `time_since_update` to 1 as podman docs don't specify the rate calculated procedure - return {"rx": rx, "tx": tx, "time_since_update": 1} - - def _get_io_stats(self): - """Return the container IO usage using the Docker API (v1.0 or higher). - - Output: a dict {'time_since_update': 3000, 'ior': 10, 'iow': 65}. - with: - time_since_update: number of seconds elapsed between the latest grab - ior: Number of bytes read - iow: Number of bytes written - """ - if "block_io" not in self.stats or "/" not in self.stats["block_io"]: - logger.debug("containers plugin - Missing BlockIO usage fields for container {}".format(self._container.id)) - logger.debug(self.stats) - return None - - block_io_str = self.stats["block_io"] - ior_str, iow_str = block_io_str.split("/") - - try: - ior = string_value_to_float(ior_str) - iow = string_value_to_float(iow_str) - except ValueError as e: - logger.debug("containers plugin - Compute BlockIO usage failed for container {}".format(self._container.id)) - logger.debug(self.stats) - return None - - # Hardcode `time_since_update` to 1 as podman docs don't specify the rate calculated procedure - return {"ior": ior, "iow": iow, "time_since_update": 1} + return result_stats class PodmanContainersExtension: From 7c3fe93226484ceb3d4ff4b13978593b67a1cd89 Mon Sep 17 00:00:00 2001 From: Raz Crimson <52282402+RazCrimson@users.noreply.github.com> Date: Sat, 18 Feb 2023 23:37:16 +0530 Subject: [PATCH 6/7] chg: containers Plugin - basic pod support StatsFetcher -> StatsStreamer --- glances/plugins/containers/glances_docker.py | 85 ++++----- glances/plugins/containers/glances_podman.py | 191 ++++++++++++++++--- glances/plugins/containers/stats_fetcher.py | 72 ------- glances/plugins/containers/stats_streamer.py | 76 ++++++++ glances/plugins/glances_containers.py | 26 ++- 5 files changed, 299 insertions(+), 151 deletions(-) delete mode 100644 glances/plugins/containers/stats_fetcher.py create mode 100644 glances/plugins/containers/stats_streamer.py diff --git a/glances/plugins/containers/glances_docker.py b/glances/plugins/containers/glances_docker.py index 11ea2ef9..a642db9f 100644 --- a/glances/plugins/containers/glances_docker.py +++ b/glances/plugins/containers/glances_docker.py @@ -1,10 +1,9 @@ """Docker Extension unit for Glances' Containers plugin.""" -import threading import time from glances.compat import iterkeys, itervalues, nativestr, pretty_date from glances.logger import logger -from glances.plugins.containers.stats_fetcher import StatsFetcher +from glances.plugins.containers.stats_streamer import StatsStreamer # Docker-py library (optional and Linux-only) # https://github.com/docker/docker-py @@ -19,48 +18,43 @@ else: import_docker_error_tag = False -class DockerStatsFetcher(StatsFetcher): +class DockerStatsFetcher: MANDATORY_MEMORY_FIELDS = ["usage", 'limit'] def __init__(self, container): - super().__init__(container) - # Lock to avoid the daemon thread updating stats when main thread reads the stats - self._stats_lock = threading.Lock() + self._container = container # Previous computes stats are stored in the self._old_computed_stats variable - # By storing time data we enable IoR/s and IoW/s calculations in the XML/RPC API, which would otherwise - # be overly difficult work for users of the API + # We store time data to enable IoR/s & IoW/s calculations to avoid complexity for consumers of the APIs exposed. self._old_computed_stats = {} # Last time when output stats (results) were computed - self._last_stats_output_time = 0 - # Last time when the raw_stats were updated by worker thread - self._last_raws_stats_update_time = 1 + self._last_stats_computed_time = 0 + + # Threaded Streamer + stats_iterable = container.stats(decode=True) + self._streamer = StatsStreamer(stats_iterable, initial_stream_value={}) + + def _log_debug(self, msg, exception=None): + logger.debug("containers (Docker) ID: {} - {} ({}) ".format(self._container.id, msg, exception)) + logger.debug(self._streamer.stats) + + def stop(self): + self._streamer.stop() @property def activity_stats(self): """Activity Stats - Each successive access of activity_stats will cause computation of activity_stats from raw_stats + Each successive access of activity_stats will cause computation of activity_stats """ computed_activity_stats = self._compute_activity_stats() self._old_computed_stats = computed_activity_stats - self._last_stats_output_time = time.time() + self._last_stats_computed_time = time.time() return computed_activity_stats - def _pre_raw_stats_update_hook(self): - self._stats_lock.acquire() - - def _post_raw_stats_update_hook(self): - self._last_raws_stats_update_time = time.time() - self._stats_lock.release() - - @property - def time_since_update(self): - return self._last_raws_stats_update_time - self._last_stats_output_time - def _compute_activity_stats(self): - with self._stats_lock: + with self._streamer.result_lock: io_stats = self._get_io_stats() cpu_stats = self._get_cpu_stats() memory_stats = self._get_memory_stats() @@ -74,6 +68,11 @@ class DockerStatsFetcher(StatsFetcher): } return computed_stats + @property + def time_since_update(self): + # In case no update, default to 1 + return max(1, self._streamer.last_update_time - self._last_stats_computed_time) + def _get_cpu_stats(self): """Return the container CPU usage. @@ -82,8 +81,8 @@ class DockerStatsFetcher(StatsFetcher): stats = {'total': 0.0} try: - cpu_stats = self.stats['cpu_stats'] - precpu_stats = self.stats['precpu_stats'] + cpu_stats = self._streamer.stats['cpu_stats'] + precpu_stats = self._streamer.stats['precpu_stats'] cpu = {'system': cpu_stats['system_cpu_usage'], 'total': cpu_stats['cpu_usage']['total_usage']} precpu = {'system': precpu_stats['system_cpu_usage'], 'total': precpu_stats['cpu_usage']['total_usage']} @@ -93,8 +92,7 @@ class DockerStatsFetcher(StatsFetcher): # the corresponding cpu_usage.percpu_usage array should be used. cpu['nb_core'] = cpu_stats.get('online_cpus') or len(cpu_stats['cpu_usage']['percpu_usage'] or []) except KeyError as e: - logger.debug("containers plugin - Can't grab CPU stat for container {} ({})".format(self._container.id, e)) - logger.debug(self.stats) + self._log_debug("Can't grab CPU stats", e) return None try: @@ -103,9 +101,7 @@ class DockerStatsFetcher(StatsFetcher): # CPU usage % = (cpu_delta / system_cpu_delta) * number_cpus * 100.0 stats['total'] = (cpu_delta / system_cpu_delta) * cpu['nb_core'] * 100.0 except TypeError as e: - msg = "containers plugin - Can't compute CPU usage for container {} ({})".format(self._container.id, e) - logger.debug(msg) - logger.debug(self.stats) + self._log_debug("Can't compute CPU usage", e) return None # Return the stats @@ -116,12 +112,11 @@ class DockerStatsFetcher(StatsFetcher): Output: a dict {'rss': 1015808, 'cache': 356352, 'usage': ..., 'max_usage': ...} """ - memory_stats = self.stats.get('memory_stats') + memory_stats = self._streamer.stats.get('memory_stats') # Checks for memory_stats & mandatory fields if not memory_stats or any(field not in memory_stats for field in self.MANDATORY_MEMORY_FIELDS): - logger.debug("containers plugin - Missing MEM usage fields for container {}".format(self._container.id)) - logger.debug(self.stats) + self._log_debug("Missing MEM usage fields") return None stats = {field: memory_stats[field] for field in self.MANDATORY_MEMORY_FIELDS} @@ -132,9 +127,7 @@ class DockerStatsFetcher(StatsFetcher): stats['max_usage'] = detailed_stats.get('max_usage') stats['cache'] = detailed_stats.get('cache') except (KeyError, TypeError) as e: - # self.stats do not have MEM information - logger.debug("containers plugin - Can't grab MEM usage for container {} ({})".format(self._container.id, e)) - logger.debug(self.stats) + self._log_debug("Can't grab MEM usage", e) # stats do not have MEM information return None # Return the stats @@ -149,12 +142,11 @@ class DockerStatsFetcher(StatsFetcher): rx: Number of bytes received tx: Number of bytes transmitted """ - eth0_stats = self.stats.get('networks', {}).get('eth0') + eth0_stats = self._streamer.stats.get('networks', {}).get('eth0') # Checks for net_stats & mandatory fields if not eth0_stats or any(field not in eth0_stats for field in ['rx_bytes', 'tx_bytes']): - logger.debug("containers plugin - Missing Network usage fields for container {}".format(self._container.id)) - logger.debug(self.stats) + self._log_debug("Missing Network usage fields") return None # Read the rx/tx stats (in bytes) @@ -179,12 +171,11 @@ class DockerStatsFetcher(StatsFetcher): ior: Number of bytes read iow: Number of bytes written """ - io_service_bytes_recursive = self.stats.get('blkio_stats', {}).get('io_service_bytes_recursive') + io_service_bytes_recursive = self._streamer.stats.get('blkio_stats', {}).get('io_service_bytes_recursive') # Checks for net_stats if not io_service_bytes_recursive: - logger.debug("containers plugin - Missing blockIO usage fields for container {}".format(self._container.id)) - logger.debug(self.stats) + self._log_debug("Missing blockIO usage fields") return None # Read the ior/iow stats (in bytes) @@ -193,11 +184,7 @@ class DockerStatsFetcher(StatsFetcher): cumulative_ior = [i for i in io_service_bytes_recursive if i['op'].lower() == 'read'][0]['value'] cumulative_iow = [i for i in io_service_bytes_recursive if i['op'].lower() == 'write'][0]['value'] except (TypeError, IndexError, KeyError, AttributeError) as e: - # self.stats do not have io information - logger.debug( - "containers plugin - Can't grab blockIO usage for container {} ({})".format(self._container.id, e) - ) - logger.debug(self.stats) + self._log_debug("Can't grab blockIO usage", e) # stats do not have io information return None stats = {'cumulative_ior': cumulative_ior, 'cumulative_iow': cumulative_iow} diff --git a/glances/plugins/containers/glances_podman.py b/glances/plugins/containers/glances_podman.py index ff2119d4..602d6d52 100644 --- a/glances/plugins/containers/glances_podman.py +++ b/glances/plugins/containers/glances_podman.py @@ -1,9 +1,10 @@ """Podman Extension unit for Glances' Containers plugin.""" +import json from datetime import datetime -from glances.compat import iterkeys, itervalues, nativestr, pretty_date +from glances.compat import iterkeys, itervalues, nativestr, pretty_date, string_value_to_float from glances.logger import logger -from glances.plugins.containers.stats_fetcher import StatsFetcher +from glances.plugins.containers.stats_streamer import StatsStreamer # Podman library (optional and Linux-only) # https://pypi.org/project/podman/ @@ -17,37 +18,51 @@ else: import_podman_error_tag = False -class PodmanStatsFetcher(StatsFetcher): +class PodmanContainerStatsFetcher: MANDATORY_FIELDS = ["CPU", "MemUsage", "MemLimit", "NetInput", "NetOutput", "BlockInput", "BlockOutput"] + def __init__(self, container): + self._container = container + + # Threaded Streamer + stats_iterable = container.stats(decode=True) + self._streamer = StatsStreamer(stats_iterable, initial_stream_value={}) + + def _log_debug(self, msg, exception=None): + logger.debug("containers (Podman) ID: {} - {} ({})".format(self._container.id, msg, exception)) + logger.debug(self._streamer.stats) + + def stop(self): + self._streamer.stop() + @property def stats(self): - if self._raw_stats["Error"]: - logger.error("containers plugin - Stats fetching failed: {}".format(self._raw_stats["Error"])) - logger.error(self._raw_stats) + stats = self._streamer.stats + if stats["Error"]: + self._log_debug("Stats fetching failed", stats["Error"]) - return self._raw_stats["Stats"][0] + return stats["Stats"][0] @property def activity_stats(self): result_stats = {"cpu": {}, "memory": {}, "io": {}, "network": {}} + api_stats = self.stats - if any(field not in self.stats for field in self.MANDATORY_FIELDS): - logger.debug("containers plugin - Missing mandatory fields for container {}".format(self._container.id)) - logger.debug(self.stats) + if any(field not in api_stats for field in self.MANDATORY_FIELDS): + self._log_debug("Missing mandatory fields") return result_stats try: - cpu_usage = float(self.stats.get("CPU", 0)) + cpu_usage = float(api_stats.get("CPU", 0)) - mem_usage = float(self.stats["MemUsage"]) - mem_limit = float(self.stats["MemLimit"]) + mem_usage = float(api_stats["MemUsage"]) + mem_limit = float(api_stats["MemLimit"]) - rx = float(self.stats["NetInput"]) - tx = float(self.stats["NetOutput"]) + rx = float(api_stats["NetInput"]) + tx = float(api_stats["NetOutput"]) - ior = float(self.stats["BlockInput"]) - iow = float(self.stats["BlockOutput"]) + ior = float(api_stats["BlockInput"]) + iow = float(api_stats["BlockOutput"]) # Hardcode `time_since_update` to 1 as podman already sends the calculated rate result_stats = { @@ -56,14 +71,136 @@ class PodmanStatsFetcher(StatsFetcher): "io": {"ior": ior, "iow": iow, "time_since_update": 1}, "network": {"rx": rx, "tx": tx, "time_since_update": 1}, } - except ValueError: - logger.debug("containers plugin - Non float stats values found for container {}".format(self._container.id)) - logger.debug(self.stats) - return result_stats + except ValueError as e: + self._log_debug("Non float stats values found", e) return result_stats +class PodmanPodStatsFetcher: + def __init__(self, pod_manager): + self._pod_manager = pod_manager + + # Threaded Streamer + stats_iterable = pod_manager.stats(stream=True, decode=True) + self._streamer = StatsStreamer(stats_iterable, initial_stream_value={}) + + def _log_debug(self, msg, exception=None): + logger.debug("containers (Podman): Pod Manager - {} ({})".format(msg, exception)) + logger.debug(self._streamer.stats) + + def stop(self): + self._streamer.stop() + + @property + def activity_stats(self): + result_stats = {} + container_stats = self._streamer.stats + for stat in container_stats: + io_stats = self._get_io_stats(stat) + cpu_stats = self._get_cpu_stats(stat) + memory_stats = self._get_memory_stats(stat) + network_stats = self._get_network_stats(stat) + + computed_stats = { + "name": stat["Name"], + "cid": stat["CID"], + "pod_id": stat["Pod"], + "io": io_stats or {}, + "memory": memory_stats or {}, + "network": network_stats or {}, + "cpu": cpu_stats or {"total": 0.0}, + } + result_stats[stat["CID"]] = computed_stats + + return result_stats + + def _get_cpu_stats(self, stats): + """Return the container CPU usage. + + Output: a dict {'total': 1.49} + """ + if "CPU" not in stats: + self._log_debug("Missing CPU usage fields") + return None + + cpu_usage = string_value_to_float(stats["CPU"].rstrip("%")) + return {"total": cpu_usage} + + def _get_memory_stats(self, stats): + """Return the container MEMORY. + + Output: a dict {'rss': 1015808, 'cache': 356352, 'usage': ..., 'max_usage': ...} + """ + if "MemUsage" not in stats or "/" not in stats["MemUsage"]: + self._log_debug("Missing MEM usage fields") + return None + + memory_usage_str = stats["MemUsage"] + usage_str, limit_str = memory_usage_str.split("/") + + try: + usage = string_value_to_float(usage_str) + limit = string_value_to_float(limit_str) + except ValueError as e: + self._log_debug("Compute MEM usage failed", e) + return None + + return {"usage": usage, "limit": limit} + + def _get_network_stats(self, stats): + """Return the container network usage using the Docker API (v1.0 or higher). + + Output: a dict {'time_since_update': 3000, 'rx': 10, 'tx': 65}. + with: + time_since_update: number of seconds elapsed between the latest grab + rx: Number of bytes received + tx: Number of bytes transmitted + """ + if "NetIO" not in stats or "/" not in stats["NetIO"]: + self._log_debug("Compute MEM usage failed") + return None + + net_io_str = stats["NetIO"] + rx_str, tx_str = net_io_str.split("/") + + try: + rx = string_value_to_float(rx_str) + tx = string_value_to_float(tx_str) + except ValueError as e: + self._log_debug("Compute MEM usage failed", e) + return None + + # Hardcode `time_since_update` to 1 as podman docs don't specify the rate calculated procedure + return {"rx": rx, "tx": tx, "time_since_update": 1} + + def _get_io_stats(self, stats): + """Return the container IO usage using the Docker API (v1.0 or higher). + + Output: a dict {'time_since_update': 3000, 'ior': 10, 'iow': 65}. + with: + time_since_update: number of seconds elapsed between the latest grab + ior: Number of bytes read + iow: Number of bytes written + """ + if "BlockIO" not in stats or "/" not in stats["BlockIO"]: + self._log_debug("Missing BlockIO usage fields") + return None + + block_io_str = stats["BlockIO"] + ior_str, iow_str = block_io_str.split("/") + + try: + ior = string_value_to_float(ior_str) + iow = string_value_to_float(iow_str) + except ValueError as e: + self._log_debug("Compute BlockIO usage failed", e) + return None + + # Hardcode `time_since_update` to 1 as podman docs don't specify the rate calculated procedure + return {"ior": ior, "iow": iow, "time_since_update": 1} + + class PodmanContainersExtension: """Glances' Containers Plugin's Docker Extension unit""" @@ -77,6 +214,7 @@ class PodmanContainersExtension: self.ext_name = "Podman (Containers)" self.podman_sock = podman_sock self.stats_fetchers = {} + self.pod_fetcher = None self._version = {} self.connect() @@ -118,6 +256,8 @@ class PodmanContainersExtension: # Issue #1152: Podman module doesn't export details about stopped containers # The Containers/all key of the configuration file should be set to True containers = self.client.containers.list(all=all_tag) + if not self.pod_fetcher: + self.pod_fetcher = PodmanPodStatsFetcher(self.client.pods) except Exception as e: logger.error("{} plugin - Cannot get containers list ({})".format(self.ext_name, e)) return version_stats, [] @@ -128,7 +268,7 @@ class PodmanContainersExtension: # StatsFetcher did not exist in the internal dict # Create it, add it to the internal dict logger.debug("{} plugin - Create thread for container {}".format(self.ext_name, container.id[:12])) - self.stats_fetchers[container.id] = PodmanStatsFetcher(container) + self.stats_fetchers[container.id] = PodmanContainerStatsFetcher(container) # Stop threads for non-existing containers absent_containers = set(iterkeys(self.stats_fetchers)) - set(c.id for c in containers) @@ -141,6 +281,13 @@ class PodmanContainersExtension: # Get stats for all containers container_stats = [self.generate_stats(container) for container in containers] + + pod_stats = self.pod_fetcher.activity_stats + for stats in container_stats: + if stats["Id"][:12] in pod_stats: + stats["pod_name"] = pod_stats[stats["Id"][:12]]["name"] + stats["pod_id"] = pod_stats[stats["Id"][:12]]["pod_id"] + return version_stats, container_stats @property diff --git a/glances/plugins/containers/stats_fetcher.py b/glances/plugins/containers/stats_fetcher.py deleted file mode 100644 index ed08f4ce..00000000 --- a/glances/plugins/containers/stats_fetcher.py +++ /dev/null @@ -1,72 +0,0 @@ -import threading -import time - -from glances.logger import logger - - -class StatsFetcher: - # Should be an Abstract Base Class - # Inherit from abc.ABC by Glancesv4 (not inheriting for compatibility with py2) - """ - Streams the container stats through threading - - Use `StatsFetcher.stats` to access the streamed results - """ - - def __init__(self, container): - """Init the class. - - container: instance of Container returned by Docker or Podman client - """ - # The docker-py return stats as a stream - self._container = container - # Container stats are maintained as dicts - self._raw_stats = {} - # Use a Thread to stream stats - self._thread = threading.Thread(target=self._fetch_stats, daemon=True) - # Event needed to stop properly the thread - self._stopper = threading.Event() - - self._thread.start() - logger.debug("docker plugin - Create thread for container {}".format(self._container.name)) - - def _fetch_stats(self): - """Grab the stats. - - Infinite loop, should be stopped by calling the stop() method - """ - try: - for new_stats in self._container.stats(decode=True): - self._pre_raw_stats_update_hook() - self._raw_stats = new_stats - self._post_raw_stats_update_hook() - - time.sleep(0.1) - if self.stopped(): - break - - except Exception as e: - logger.debug("docker plugin - Exception thrown during run ({})".format(e)) - self.stop() - - def stopped(self): - """Return True is the thread is stopped.""" - return self._stopper.is_set() - - def stop(self, timeout=None): - """Stop the thread.""" - logger.debug("docker plugin - Close thread for container {}".format(self._container.name)) - self._stopper.set() - - @property - def stats(self): - """Raw Stats getter.""" - return self._raw_stats - - def _pre_raw_stats_update_hook(self): - """Hook that runs before worker thread updates the raw_stats""" - pass - - def _post_raw_stats_update_hook(self): - """Hook that runs after worker thread updates the raw_stats""" - pass diff --git a/glances/plugins/containers/stats_streamer.py b/glances/plugins/containers/stats_streamer.py new file mode 100644 index 00000000..0bf7d38e --- /dev/null +++ b/glances/plugins/containers/stats_streamer.py @@ -0,0 +1,76 @@ +import threading +import time + +from glances.logger import logger + + +class StatsStreamer: + """ + Utility class to stream an iterable using a background / daemon Thread + + Use `StatsStreamer.stats` to access the latest streamed results + """ + + def __init__(self, iterable, initial_stream_value=None): + """ + iterable: an Iterable instance that needs to be streamed + """ + self._iterable = iterable + # Iterable results are stored here + self._raw_result = initial_stream_value + # Use a Thread to stream iterable (daemon=True to automatically kill thread when main process dies) + self._thread = threading.Thread(target=self._stream_results, daemon=True) + # Event needed to stop the thread manually + self._stopper = threading.Event() + # Lock to avoid the daemon thread updating stats when main thread reads the stats + self.result_lock = threading.Lock() + # Last result streamed time (initial val 0) + self._last_update_time = 0 + + self._thread.start() + + def stop(self): + """Stop the thread.""" + self._stopper.set() + + def stopped(self): + """Return True is the thread is stopped.""" + return self._stopper.is_set() + + def _stream_results(self): + """Grab the stats. + + Infinite loop, should be stopped by calling the stop() method + """ + try: + for res in self._iterable: + self._pre_update_hook() + self._raw_result = res + self._post_update_hook() + + time.sleep(0.1) + if self.stopped(): + break + + except Exception as e: + logger.debug("docker plugin - Exception thrown during run ({})".format(e)) + self.stop() + + def _pre_update_hook(self): + """Hook that runs before worker thread updates the raw_stats""" + self.result_lock.acquire() + + def _post_update_hook(self): + """Hook that runs after worker thread updates the raw_stats""" + self._last_update_time = time.time() + self.result_lock.release() + + @property + def stats(self): + """Raw Stats getter.""" + return self._raw_result + + @property + def last_update_time(self): + """Raw Stats getter.""" + return self._last_update_time diff --git a/glances/plugins/glances_containers.py b/glances/plugins/glances_containers.py index de1a89fc..ddd6c837 100644 --- a/glances/plugins/glances_containers.py +++ b/glances/plugins/glances_containers.py @@ -240,6 +240,10 @@ class Plugin(GlancesPlugin): if not self.stats or 'containers' not in self.stats or len(self.stats['containers']) == 0 or self.is_disabled(): return ret + show_pod_name = False + if any(ct.get("pod_name") for ct in self.stats["containers"]): + show_pod_name = True + # Build the string message # Title msg = '{}'.format('CONTAINERS') @@ -259,6 +263,10 @@ class Plugin(GlancesPlugin): self.config.get_int_value('containers', 'max_name_size', default=20) if self.config is not None else 20, len(max(self.stats['containers'], key=lambda x: len(x['name']))['name']), ) + + if show_pod_name: + msg = ' {:{width}}'.format('Pod', width=12) + ret.append(self.curse_add_line(msg)) msg = ' {:{width}}'.format('Name', width=name_max_width) ret.append(self.curse_add_line(msg, 'SORT' if self.sort_key == 'name' else 'DEFAULT')) msg = '{:>10}'.format('Status') @@ -284,6 +292,8 @@ class Plugin(GlancesPlugin): # Data for container in self.stats['containers']: ret.append(self.curse_new_line()) + if show_pod_name: + ret.append(self.curse_add_line(' {:{width}}'.format(container.get("pod_id", " - "), width=12))) # Name ret.append(self.curse_add_line(self._msg_name(container=container, max_width=name_max_width))) # Status @@ -338,10 +348,10 @@ class Plugin(GlancesPlugin): unit = 'b' try: value = ( - self.auto_unit( - int(container['network']['rx'] // container['network']['time_since_update'] * to_bit) - ) - + unit + self.auto_unit( + int(container['network']['rx'] // container['network']['time_since_update'] * to_bit) + ) + + unit ) msg = '{:>7}'.format(value) except KeyError: @@ -349,10 +359,10 @@ class Plugin(GlancesPlugin): ret.append(self.curse_add_line(msg)) try: value = ( - self.auto_unit( - int(container['network']['tx'] // container['network']['time_since_update'] * to_bit) - ) - + unit + self.auto_unit( + int(container['network']['tx'] // container['network']['time_since_update'] * to_bit) + ) + + unit ) msg = ' {:<7}'.format(value) except KeyError: From 95b7b94b1f9345e0443aa8527f6e4ee052958e3c Mon Sep 17 00:00:00 2001 From: Raz Crimson <52282402+RazCrimson@users.noreply.github.com> Date: Sun, 19 Feb 2023 01:08:16 +0530 Subject: [PATCH 7/7] chg: containers Plugin - include engine name --- glances/plugins/glances_containers.py | 36 ++++++++++++++++++--------- 1 file changed, 24 insertions(+), 12 deletions(-) diff --git a/glances/plugins/glances_containers.py b/glances/plugins/glances_containers.py index ddd6c837..f68e38be 100644 --- a/glances/plugins/glances_containers.py +++ b/glances/plugins/glances_containers.py @@ -14,10 +14,8 @@ from copy import deepcopy from typing import Optional from glances.logger import logger -from glances.plugins.containers.glances_docker import ( - DockerContainersExtension, import_docker_error_tag) -from glances.plugins.containers.glances_podman import ( - PodmanContainersExtension, import_podman_error_tag) +from glances.plugins.containers.glances_docker import DockerContainersExtension, import_docker_error_tag +from glances.plugins.containers.glances_podman import PodmanContainersExtension, import_podman_error_tag from glances.plugins.glances_plugin import GlancesPlugin from glances.processes import glances_processes from glances.processes import sort_stats as sort_stats_processes @@ -179,11 +177,15 @@ class Plugin(GlancesPlugin): def update_docker(self): """Update Docker stats using the input method.""" version, containers = self.docker_extension.update(all_tag=self._all_tag()) + for container in containers: + container["engine"] = 'docker' return {"version": version, "containers": containers} def update_podman(self): """Update Podman stats.""" version, containers = self.podman_client.update(all_tag=self._all_tag()) + for container in containers: + container["engine"] = 'podman' return {"version": version, "containers": containers} def get_user_ticks(self): @@ -244,6 +246,10 @@ class Plugin(GlancesPlugin): if any(ct.get("pod_name") for ct in self.stats["containers"]): show_pod_name = True + show_engine_name = False + if len(set(ct["engine"] for ct in self.stats["containers"])) > 1: + show_engine_name = True + # Build the string message # Title msg = '{}'.format('CONTAINERS') @@ -264,6 +270,9 @@ class Plugin(GlancesPlugin): len(max(self.stats['containers'], key=lambda x: len(x['name']))['name']), ) + if show_engine_name: + msg = ' {:{width}}'.format('Engine', width=6) + ret.append(self.curse_add_line(msg)) if show_pod_name: msg = ' {:{width}}'.format('Pod', width=12) ret.append(self.curse_add_line(msg)) @@ -289,9 +298,12 @@ class Plugin(GlancesPlugin): ret.append(self.curse_add_line(msg)) msg = ' {:8}'.format('Command') ret.append(self.curse_add_line(msg)) + # Data for container in self.stats['containers']: ret.append(self.curse_new_line()) + if show_engine_name: + ret.append(self.curse_add_line(' {:{width}}'.format(container["engine"], width=6))) if show_pod_name: ret.append(self.curse_add_line(' {:{width}}'.format(container.get("pod_id", " - "), width=12))) # Name @@ -348,10 +360,10 @@ class Plugin(GlancesPlugin): unit = 'b' try: value = ( - self.auto_unit( - int(container['network']['rx'] // container['network']['time_since_update'] * to_bit) - ) - + unit + self.auto_unit( + int(container['network']['rx'] // container['network']['time_since_update'] * to_bit) + ) + + unit ) msg = '{:>7}'.format(value) except KeyError: @@ -359,10 +371,10 @@ class Plugin(GlancesPlugin): ret.append(self.curse_add_line(msg)) try: value = ( - self.auto_unit( - int(container['network']['tx'] // container['network']['time_since_update'] * to_bit) - ) - + unit + self.auto_unit( + int(container['network']['tx'] // container['network']['time_since_update'] * to_bit) + ) + + unit ) msg = ' {:<7}'.format(value) except KeyError: