ok

Mini Shell

Direktori : /proc/thread-self/root/opt/imunify360/venv/lib64/python3.11/site-packages/im360/plugins/
Upload File :
Current File : //proc/thread-self/root/opt/imunify360/venv/lib64/python3.11/site-packages/im360/plugins/cagefs.py

"""
Goal: Invoke

    /usr/sbin/cagefsctl --update-etc
    /usr/sbin/cagefsctl --force-update-etc

    asynchronously. As far production scale `cagefsctl --force-update-etc`
    tends last for too long, e.g. -

    # time cagefsctl --force-update-etc
    Updating users ...
    Updating user user523 ...
    Updating user user804 ...
    ...
    Updating user user269 ...
    Updating user user116 ...
    Updating user user121 ...
    Updating user user117 ...

    real    2m44.454s
    user    0m26.233s
    sys     0m19.972s
"""
import asyncio
import logging
import os
import subprocess
import time
from typing import Optional

from defence360agent.api import inactivity
from defence360agent.contracts.messages import MessageType
from defence360agent.contracts.plugins import MessageSink, expect
from defence360agent.subsys.persistent_state import load_state, save_state
from defence360agent.utils import timefun

_CAGEFSCTL_TOOL = "/usr/sbin/cagefsctl"
_WAIT_LOCK = "--wait-lock"

logger = logging.getLogger(__name__)


class CageFS(MessageSink):
    async def create_sink(self, loop):
        self._loop = loop
        self._queue = asyncio.Queue()
        self._last_force_update_ts = load_state("CageFS").get(
            "last_force_update_ts", 0
        )
        self._consumer_task = self._loop.create_task(self._consumer())

    async def shutdown(self):
        self._consumer_task.cancel()
        await self._consumer_task

        if self._queue.qsize():
            logger.warning("%d item(s) were not consumed", self._queue.qsize())

        save_state(
            "CageFS", {"last_force_update_ts": self._last_force_update_ts}
        )

    @expect(MessageType.ConfigUpdate)
    async def put_to_queue(self, message):
        config = message["conf"]
        username = getattr(config, "username", None)

        # not all ConfigUpdate messages mean the merged config file changed on disk
        # --force-update-etc is expensive so we wanna make sure the SystemConfig
        # actually changed on disk
        # OR it is a UserConfig change, in which case we process anyways
        if username is not None or config.modified_since(
            self._last_force_update_ts
        ):
            self._queue.put_nowait(username)

    async def _consumer(self):
        """
        :raise never:
        """
        while True:
            try:
                commitconfig_username = await self._queue.get()

                # that check is here because CageFS may be installed
                # just after Imunify agent installation/startup
                if not os.path.exists(_CAGEFSCTL_TOOL):
                    continue

                # purge queue and eliminate duplicates
                uniq = {commitconfig_username}
                try:
                    while True:
                        uniq.add(self._queue.get_nowait())
                except asyncio.QueueEmpty:
                    pass

                with inactivity.track.task("cagefs"):
                    for username in uniq:
                        await self._commitconfig(username)
            except asyncio.CancelledError:
                # We are done
                return
            except Exception:
                logger.exception("Something went wrong")

                # Never. Stop.
                continue

    @timefun(log=logger.info)
    async def _commitconfig(self, username: Optional[str]):
        """
        :raise asyncio.CancelledError:
        :raise Exception:
        """
        if username:
            cmd = [_CAGEFSCTL_TOOL, _WAIT_LOCK, "--update-etc", username]
        else:
            cmd = [_CAGEFSCTL_TOOL, _WAIT_LOCK, "--force-update-etc"]

        try:
            proc = await asyncio.create_subprocess_exec(
                *cmd,
                stdin=subprocess.DEVNULL,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                # must not survive on agent stop/restart because of
                # stdout, stderr pipes
                start_new_session=False,
            )

            future1 = self._passthru_log(cmd, logging.DEBUG, proc.stdout)
            future2 = self._passthru_log(cmd, logging.WARN, proc.stderr)
            await asyncio.gather(future1, future2)

            out, err = await proc.communicate()
            rc = await proc.wait()
        except asyncio.CancelledError:
            logger.warning("%r is terminated by CancelledError", cmd)
            raise
        else:
            if rc is None:
                logger.error("logic error: process has not terminated yet")
            elif rc:
                logger.error(
                    "%r failed with rc [%s], stdout=%s, stderr=%s",
                    cmd,
                    rc,
                    out,
                    err,
                )
            else:
                logger.info("%r succeeded with rc [%s]", cmd, rc)
                if username is None:
                    self._last_force_update_ts = time.time()

    @staticmethod
    async def _passthru_log(cmd, loglevel, streamreader):
        while True:
            line = await streamreader.readline()
            if not line:  # EOF
                break
            logger.log(loglevel, "%r: %r", cmd, line)

Zerion Mini Shell 1.0