From a57863a41d2f14f1e85ee7fd0b4056347d0be9c2 Mon Sep 17 00:00:00 2001 From: Apisit Ritreungroj <38898766+apskhem@users.noreply.github.com> Date: Wed, 24 Jul 2024 19:25:41 +0700 Subject: [PATCH] feat: Earthly cache watcher utility (#269) * feat: initial commit * feat: initial file watcher * refactor: minor * feat: config * feat: interval * feat: handle size exceeding * feat: trigger events * refactor: check sizes * refactor: print * feat: journal * feat: conf * fix: logging * feat: pyproject * docs: add proper readme * fix: growth indexes * fix: logging * feat: default options * chore: format * chore: lintfix * ci: fix earthfile * fix: logging * fix: app.service * feat: config argument * refactor: service file * fix: service script * fix: params * fix: to simple servicei instead of forking * fix: watch dir location * feat: layer watching * chore: type annotation * chore: lint * chore: log layer name * fix: type * fix: first checks * chore: lintfix * fix: is file check * feat: init log * feat: log init * fix: log large layer * fix: layer name * fix: parameters and notes * refactor: minor code * feat: print number formatter * refactor: minor number formatter * feat: safe delete * feat: handle file accessing * feat: overall compacting * fix: default config path in systemd service * fix: default.conf * docs: systemd installation * feat: handle move * fix: growth index iteration * feat: trigger once * chore: warning to error * fix: empty set * fix: has triggered layer * fix: layer discard * feat: loguru * fix: markdownlint * chore: sort import * chore: lintfix * chore: rufflint fix * fix: log level info --------- Co-authored-by: Oleksandr Prokhorenko --- utilities/earthly-cache-watcher/Earthfile | 10 + utilities/earthly-cache-watcher/README.md | 110 +++++ utilities/earthly-cache-watcher/default.conf | 8 + utilities/earthly-cache-watcher/helper.py | 45 ++ utilities/earthly-cache-watcher/main.py | 384 ++++++++++++++++++ utilities/earthly-cache-watcher/poetry.lock | 90 ++++ .../earthly-cache-watcher/pyproject.toml | 36 ++ .../earthly-cache-watcher/watchdog.service | 11 + 8 files changed, 694 insertions(+) create mode 100644 utilities/earthly-cache-watcher/Earthfile create mode 100644 utilities/earthly-cache-watcher/README.md create mode 100644 utilities/earthly-cache-watcher/default.conf create mode 100644 utilities/earthly-cache-watcher/helper.py create mode 100644 utilities/earthly-cache-watcher/main.py create mode 100644 utilities/earthly-cache-watcher/poetry.lock create mode 100644 utilities/earthly-cache-watcher/pyproject.toml create mode 100644 utilities/earthly-cache-watcher/watchdog.service diff --git a/utilities/earthly-cache-watcher/Earthfile b/utilities/earthly-cache-watcher/Earthfile new file mode 100644 index 000000000..55d9ce2ea --- /dev/null +++ b/utilities/earthly-cache-watcher/Earthfile @@ -0,0 +1,10 @@ +VERSION 0.8 + +IMPORT github.com/input-output-hk/catalyst-ci/earthly/python:v3.1.7 AS python-ci + +check: + FROM python-ci+python-base + + COPY . . + + DO python-ci+CHECK \ No newline at end of file diff --git a/utilities/earthly-cache-watcher/README.md b/utilities/earthly-cache-watcher/README.md new file mode 100644 index 000000000..31984dcb1 --- /dev/null +++ b/utilities/earthly-cache-watcher/README.md @@ -0,0 +1,110 @@ + + +# Earthly Cache Watcher + +Logs an error when cache layers reach their maximum size limit. + +## Functionality + +* Watch files changes in a specified directory. +* Trigger events when either an individual file or +a watched directory grows beyond certain criteria. +* Main triggering criteria: single file size exceeds, watched directory size exceeds, +watched directory growth in size within an interval exceeds. + +## Configuration Parameters + +There are several options of configurable parameters: + +* `watch_dir` - A directory to watch recursively. (default: `.`) +* `large_layer_size` - A parameter to determine and detect an individual file +if reaches the criteria of a large-sized file. (default: `1073741824` bytes) +* `max_cache_size` - A parameter to determine `watch_dir` +if reaches the criteria. (default: `536870912000` bytes) +* `time_window` - The duration of time interval to detect growth +in size of `watch_dir`. (default: `10` secs) +* `max_time_window_growth_size` - A criteria to determine within an interval to detect +if `watch_dir` exceeds the size criteria. (default: `53687091200`) +* `log_file_accessing_err` - Logs errors occurring during file access. (default: `True`) + +Typically, these configuration will be read from the specified file. + +## System Setup + +If the system has many files to watch, you should consider to config this parameter +with `sysctl` to raise the maximum numbers of files to watch: + +```bash +sudo sysctl fs.inotify.max_user_watches=25000000 +echo 'fs.inotify.max_user_watches=25000000' | sudo tee -a /etc/sysctl.conf +``` + +Feel free to change the number of the parameter to fit your requirement. + +## Systemd Unit Installation + +Run the following commands to install the program as a unit in systemd service: + +```bash +systemctl daemon-reload +systemctl enable /path/to/your/watchdog.service +systemctl start watchdog +``` + +To view the status and logs, use these two commands: + +```bash +systemctl status watchdog +``` + +Or + +```bash +journalctl -xeu watchdog.service +``` + +## Logging Example + +Logging example using `loguru`: + +```json +{ + "text": "read config from '/root/catalyst-ci/utilities/earthly-cache-watcher/default.conf'\n", + "record": { + "elapsed": { + "repr": "0:00:00.007240", + "seconds": 0.00724 + }, + "exception": null, + "extra": {}, + "file": { + "name": "main.py", + "path": "/root/catalyst-ci/utilities/earthly-cache-watcher/main.py" + }, + "function": "main", + "level": { + "icon": "ℹ️", + "name": "INFO", + "no": 20 + }, + "line": 298, + "message": "read config from '/root/catalyst-ci/utilities/earthly-cache-watcher/default.conf'", + "module": "main", + "name": "__main__", + "process": { + "id": 59917, + "name": "MainProcess" + }, + "thread": { + "id": 8615431168, + "name": "MainThread" + }, + "time": { + "repr": "2024-07-04 19:22:31.458044+07:00", + "timestamp": 1720095751.458044 + } + } +} +``` + +Notes: The logging result is prettified, the actual result is a single-lined message. diff --git a/utilities/earthly-cache-watcher/default.conf b/utilities/earthly-cache-watcher/default.conf new file mode 100644 index 000000000..da61ec76c --- /dev/null +++ b/utilities/earthly-cache-watcher/default.conf @@ -0,0 +1,8 @@ +# cspell: words runc overlayfs + +watch_dir = /var/lib/docker/volumes/earthly-satellite_earthly-tmp/_data/buildkit/runc-overlayfs/snapshots/snapshots +large_layer_size = 1073741824 # 1GB +max_cache_size = 536870912000 # 500GB +time_window = 10 # 10 secs +max_time_window_growth_size = 53687091200 # 50GB +log_file_accessing_err = True \ No newline at end of file diff --git a/utilities/earthly-cache-watcher/helper.py b/utilities/earthly-cache-watcher/helper.py new file mode 100644 index 000000000..c62188a56 --- /dev/null +++ b/utilities/earthly-cache-watcher/helper.py @@ -0,0 +1,45 @@ +import os + + +def get_subdirectory_name(working_dir_path: str, path: str): + """ + Extracts the direct subdirectory name from the given path within + the specified working directory. + + Parameters: + working_dir_path (str): The absolute path of the current working directory. + path (str): The absolute path from which to extract the direct subdir name. + + Returns: + str | None: The name of the direct subdirectory if the given path is within + the working directory; otherwise, None. + + Example: + >>> working_dir = "/home/user/projects" + >>> given_path = "/home/user/projects/subdir1/file.txt" + >>> get_subdirectory_name(working_dir, given_path) + 'subdir1' + + >>> given_path_invalid = "/home/user/projects1/subdir1/file.txt" + >>> get_subdirectory_name(working_dir, given_path_invalid) + None + """ + working_dir_path = os.path.abspath(working_dir_path) + path = os.path.abspath(path) + + if ( + os.path.commonpath([working_dir_path]) + != os.path.commonpath([working_dir_path, path]) + ): + return None + + relative_path = os.path.relpath(path, working_dir_path) + parts = relative_path.split(os.sep) + + if parts: + return parts[0] + return None + +def add_or_init(obj: dict[str, int], key: str, value: int): + obj.setdefault(key, 0) + obj[key] += value \ No newline at end of file diff --git a/utilities/earthly-cache-watcher/main.py b/utilities/earthly-cache-watcher/main.py new file mode 100644 index 000000000..66485d612 --- /dev/null +++ b/utilities/earthly-cache-watcher/main.py @@ -0,0 +1,384 @@ +# cspell: words dotenv levelname loguru + +import os +import sys +import threading +import time +from collections.abc import Callable +from typing import Set + +from dotenv import dotenv_values +from loguru import logger +from watchdog.events import FileSystemEventHandler +from watchdog.observers import Observer + +import helper + +logger.remove() # Remove the default handler +logger.add(sys.stdout, level="INFO", serialize=True, format="{message}") + +class Interval: + """ + A class that repeatedly executes a function + at specified intervals in a separate thread. + """ + + def __init__(self, interval: int, func: Callable[[], None]): + """ + Initializes the Interval instance + with the specified interval and function. + """ + + self.interval = interval + self.func = func + self.stop_event = threading.Event() + + thread = threading.Thread(target=self.set_interval) + thread.start() + + def set_interval(self): + """ + Repeatedly executes the function at + the specified interval until the stop event is set. + """ + + next_time = time.time() + self.interval + while not self.stop_event.wait(next_time - time.time()): + next_time += self.interval + self.func() + + def drop(self): + """ + Signals the thread to stop executing the function. + """ + + self.stop_event.set() + + +class ChangeEventHandler(FileSystemEventHandler): + """ + Handles file system events. + """ + + def __init__(self, interval: int): + self.layer_growth_index: dict[str, int] = {} + self.layer_index: dict[str, int] = {} + self.file_index: dict[str, int] = {} + self.triggered_layers: Set[str] = set() + self.triggered_growth_layers: Set[str] = set() + self.interval = Interval(interval, self.handle_interval_change) + + self.list_initial_sizes() + + def list_initial_sizes(self): + """ + Lists initial file sizes during initialization. + """ + + logger.info("initializing...") + + for root, _directories, files in os.walk(watch_dir): + for filename in files: + dir_abspath = os.path.abspath(root) + file_path = os.path.join(dir_abspath, filename) + layer_name = helper.get_subdirectory_name(watch_dir, file_path) + + if not os.path.isfile(file_path): + continue + + try: + size = os.path.getsize(file_path) + + self.file_index[file_path] = size + helper.add_or_init(self.layer_index, layer_name, size) + + logger.debug( + f"initial file: {file_path} (size: {size:,} bytes)" + ) + except OSError as e: + if log_file_accessing_err: + logger.error(f"{e}: {file_path}") + + # checks total + self.check_sizes(layer_name="") + + # check individual + for layer_name in self.layer_index: + self.check_sizes(layer_name, skip_sum_check=True) + + logger.info("finished initializing") + + def on_any_event(self, event): + """ + Logs any file system event. + """ + + if event.is_directory: + return None + + if event.event_type == "created": + self.handle_created(event.src_path) + elif event.event_type == "modified": + self.handle_modified(event.src_path) + elif event.event_type == "moved": + self.handle_moved(event.src_path, event.dest_path) + elif event.event_type == "deleted": + self.handle_deleted(event.src_path) + + logger.debug(event.event_type) + + def handle_interval_change(self): + logger.debug("interval changed") + + self.layer_growth_index.clear() + self.triggered_growth_layers.clear() + + def handle_created(self, file_path: str): + logger.debug(f"new file created: {file_path}") + + try: + layer_name = helper.get_subdirectory_name(watch_dir, file_path) + size = os.path.getsize(file_path) + + self.file_index[file_path] = size + helper.add_or_init(self.layer_index, layer_name, size) + helper.add_or_init(self.layer_growth_index, layer_name, size) + + # checks + self.check_sizes(layer_name) + except OSError as e: + if log_file_accessing_err: + logger.error(f"{e}: {file_path}") + + def handle_modified(self, file_path: str): + logger.debug(f"file modified: {file_path}") + + try: + layer_name = helper.get_subdirectory_name(watch_dir, file_path) + size = os.path.getsize(file_path) + + if file_path not in self.file_index: + self.handle_created(file_path) + elif size != self.file_index[file_path]: + prev_size = self.file_index[file_path] + d_size = size - prev_size + + self.file_index[file_path] = size + helper.add_or_init(self.layer_index, layer_name, d_size) + helper.add_or_init(self.layer_growth_index, layer_name, d_size) + + # checks + self.check_sizes(layer_name) + + logger.debug(" ".join([ + f"file modified: {file_path}", + f"(size changed from {prev_size:,} bytes", + f"to {size:,} bytes)" + ])) + else: + logger.debug(f"file modified: {file_path} (size unchanged)") + except OSError as e: + if log_file_accessing_err: + logger.error(f"{e}: {file_path}") + + def handle_moved(self, src_path: str, dest_path: str): + logger.debug(f"file moved: {src_path}") + + src_layer_name = helper.get_subdirectory_name(watch_dir, src_path) + dest_layer_name = helper.get_subdirectory_name(watch_dir, dest_path) + + if src_path in self.file_index: + size = self.file_index[src_path] + self.file_index[dest_path] = size + del self.file_index[src_path] + + if src_layer_name != dest_layer_name: + helper.add_or_init(self.layer_index, src_layer_name, -size) + helper.add_or_init(self.layer_index, dest_layer_name, size) + + def handle_deleted(self, file_path: str): + logger.debug(f"file deleted: {file_path}") + + if file_path in self.file_index: + layer_name = helper.get_subdirectory_name(watch_dir, file_path) + prev_size = self.file_index[file_path] + + del self.file_index[file_path] + + helper.add_or_init(self.layer_index, layer_name, -prev_size) + helper.add_or_init(self.layer_growth_index, layer_name, -prev_size) + + if self.layer_index[layer_name] < large_layer_size: + self.triggered_layers.discard(layer_name) + if self.layer_index[layer_name] <= 0: + del self.layer_index[layer_name] + + def check_sizes(self, layer_name: str, skip_sum_check=False): + if ( + layer_name in self.layer_index + and self.layer_index[layer_name] + >= large_layer_size + ): + self.trigger_layer_size_exceeded(layer_name) + + if ( + not skip_sum_check + and sum(self.layer_growth_index.values()) + >= max_time_window_growth_size + ): + self.trigger_interval_growth_exceeded() + + if ( + not skip_sum_check + and sum(self.layer_index.values()) >= max_cache_size + ): + self.trigger_max_cache_size() + + def trigger_layer_size_exceeded(self, layer_name: str): + if layer_name in self.triggered_layers: + return + + self.triggered_layers.add(layer_name) + + logger.error( + " ".join([ + f"layer '{layer_name}' exceeds large layer size criteria", + f"(size: {self.layer_index[layer_name]:,} bytes", + f"- limit: {large_layer_size:,} bytes)" + ]), + extra={ + "err_type": "layer_size_exceeded", + "layer": layer_name, + "size": self.layer_index[layer_name], + "limit": large_layer_size + } + ) + + def trigger_interval_growth_exceeded(self): + try: + has_triggered_layer = False + for layer_name, size in self.layer_growth_index.items(): + if layer_name in self.triggered_growth_layers: + continue + + has_triggered_layer = True + self.triggered_growth_layers.add(layer_name) + + logger.error( + " ".join([ + f"layer '{layer_name}'", + f"- {size:,} bytes within the interval" + ]), + extra={ + "err_type": "layer_list_growth_exceeded", + "layer": layer_name, + "size": size + } + ) + + if has_triggered_layer: + size = sum(self.layer_growth_index.values()) + + logger.error( + " ".join([ + "the total amount of cache growth", + f"within {time_window:,} secs exceeds the limit", + f"(size: {size:,} bytes", + f"- limit: {max_time_window_growth_size:,} bytes)" + ]), + extra={ + "err_type": "interval_growth_exceeded", + "size": size, + "limit": max_time_window_growth_size, + "within": time_window + } + ) + except RuntimeError as e: + logger.error(f"an error occurred: {e}") + + def trigger_max_cache_size(self): + size = sum(self.layer_index.values()) + + logger.error( + " ".join([ + "the total amount of cache exceeds the limit", + f"(size: {size:,} bytes", + f"- limit: {max_cache_size:,} bytes)" + ]), + extra={ + "err_type": "max_cache_size_exceeded", + "size": size, + "limit": max_cache_size + } + ) + + def drop(self): + self.interval.drop() + +def main(): + global \ + watch_dir, \ + large_layer_size, \ + max_cache_size, \ + time_window, \ + max_time_window_growth_size, \ + log_file_accessing_err + + default_config_path = sys.argv[1] if len(sys.argv) > 1 else "default.conf" + + # init configs + watch_dir = "." + large_layer_size = 1073741824 # 1GB + max_cache_size = 536870912000 # 500GB + time_window = 10 # 10 secs + max_time_window_growth_size = 53687091200 # 50GB + log_file_accessing_err = True + + if os.path.isfile(default_config_path): + logger.info( + f"read config from {os.path.abspath(default_config_path)!r}" + ) + + cfg = dotenv_values(default_config_path) + + watch_dir = str(cfg["watch_dir"]) + large_layer_size = int(cfg["large_layer_size"]) + max_cache_size = int(cfg["max_cache_size"]) + time_window = int(cfg["time_window"]) + max_time_window_growth_size = int(cfg["max_time_window_growth_size"]) + log_file_accessing_err = str(cfg["log_file_accessing_err"]) == "True" + else: + logger.info("cannot find the config file, use default config instead") + + logger.info(f"start watching directory {os.path.abspath(watch_dir)!r}") + logger.info(f"with `large_layer_size` set to {large_layer_size:,} bytes") + logger.info(f"with `max_cache_size` set to {max_cache_size:,} bytes") + logger.info(f"with `time_window` set to {time_window:,} secs") + logger.info(" ".join([ + "with `max_time_window_growth_size` set to", + f"{max_time_window_growth_size:,} bytes" + ])) + logger.info(" ".join([ + "with `log_file_accessing_err` set to", + log_file_accessing_err + ])) + + # init watcher + handler = ChangeEventHandler(time_window) + + observer = Observer() + observer.schedule(handler, watch_dir, recursive=True) + observer.start() + + try: + while True: + time.sleep(1) + except KeyboardInterrupt: + observer.stop() + handler.drop() + finally: + observer.join() + + +if __name__ == "__main__": + main() diff --git a/utilities/earthly-cache-watcher/poetry.lock b/utilities/earthly-cache-watcher/poetry.lock new file mode 100644 index 000000000..7299891c9 --- /dev/null +++ b/utilities/earthly-cache-watcher/poetry.lock @@ -0,0 +1,90 @@ +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. + +[[package]] +name = "python-dotenv" +version = "1.0.1" +description = "Read key-value pairs from a .env file and set them as environment variables" +optional = false +python-versions = ">=3.8" +files = [ + {file = "python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca"}, + {file = "python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a"}, +] + +[package.extras] +cli = ["click (>=5.0)"] + +[[package]] +name = "ruff" +version = "0.1.15" +description = "An extremely fast Python linter and code formatter, written in Rust." +optional = false +python-versions = ">=3.7" +files = [ + {file = "ruff-0.1.15-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:5fe8d54df166ecc24106db7dd6a68d44852d14eb0729ea4672bb4d96c320b7df"}, + {file = "ruff-0.1.15-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:6f0bfbb53c4b4de117ac4d6ddfd33aa5fc31beeaa21d23c45c6dd249faf9126f"}, + {file = "ruff-0.1.15-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e0d432aec35bfc0d800d4f70eba26e23a352386be3a6cf157083d18f6f5881c8"}, + {file = "ruff-0.1.15-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9405fa9ac0e97f35aaddf185a1be194a589424b8713e3b97b762336ec79ff807"}, + {file = "ruff-0.1.15-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c66ec24fe36841636e814b8f90f572a8c0cb0e54d8b5c2d0e300d28a0d7bffec"}, + {file = "ruff-0.1.15-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:6f8ad828f01e8dd32cc58bc28375150171d198491fc901f6f98d2a39ba8e3ff5"}, + {file = "ruff-0.1.15-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86811954eec63e9ea162af0ffa9f8d09088bab51b7438e8b6488b9401863c25e"}, + {file = "ruff-0.1.15-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fd4025ac5e87d9b80e1f300207eb2fd099ff8200fa2320d7dc066a3f4622dc6b"}, + {file = "ruff-0.1.15-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b17b93c02cdb6aeb696effecea1095ac93f3884a49a554a9afa76bb125c114c1"}, + {file = "ruff-0.1.15-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:ddb87643be40f034e97e97f5bc2ef7ce39de20e34608f3f829db727a93fb82c5"}, + {file = "ruff-0.1.15-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:abf4822129ed3a5ce54383d5f0e964e7fef74a41e48eb1dfad404151efc130a2"}, + {file = "ruff-0.1.15-py3-none-musllinux_1_2_i686.whl", hash = "sha256:6c629cf64bacfd136c07c78ac10a54578ec9d1bd2a9d395efbee0935868bf852"}, + {file = "ruff-0.1.15-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:1bab866aafb53da39c2cadfb8e1c4550ac5340bb40300083eb8967ba25481447"}, + {file = "ruff-0.1.15-py3-none-win32.whl", hash = "sha256:2417e1cb6e2068389b07e6fa74c306b2810fe3ee3476d5b8a96616633f40d14f"}, + {file = "ruff-0.1.15-py3-none-win_amd64.whl", hash = "sha256:3837ac73d869efc4182d9036b1405ef4c73d9b1f88da2413875e34e0d6919587"}, + {file = "ruff-0.1.15-py3-none-win_arm64.whl", hash = "sha256:9a933dfb1c14ec7a33cceb1e49ec4a16b51ce3c20fd42663198746efc0427360"}, + {file = "ruff-0.1.15.tar.gz", hash = "sha256:f6dfa8c1b21c913c326919056c390966648b680966febcb796cc9d1aaab8564e"}, +] + +[[package]] +name = "watchdog" +version = "4.0.1" +description = "Filesystem events monitoring" +optional = false +python-versions = ">=3.8" +files = [ + {file = "watchdog-4.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:da2dfdaa8006eb6a71051795856bedd97e5b03e57da96f98e375682c48850645"}, + {file = "watchdog-4.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e93f451f2dfa433d97765ca2634628b789b49ba8b504fdde5837cdcf25fdb53b"}, + {file = "watchdog-4.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ef0107bbb6a55f5be727cfc2ef945d5676b97bffb8425650dadbb184be9f9a2b"}, + {file = "watchdog-4.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:17e32f147d8bf9657e0922c0940bcde863b894cd871dbb694beb6704cfbd2fb5"}, + {file = "watchdog-4.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:03e70d2df2258fb6cb0e95bbdbe06c16e608af94a3ffbd2b90c3f1e83eb10767"}, + {file = "watchdog-4.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:123587af84260c991dc5f62a6e7ef3d1c57dfddc99faacee508c71d287248459"}, + {file = "watchdog-4.0.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:093b23e6906a8b97051191a4a0c73a77ecc958121d42346274c6af6520dec175"}, + {file = "watchdog-4.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:611be3904f9843f0529c35a3ff3fd617449463cb4b73b1633950b3d97fa4bfb7"}, + {file = "watchdog-4.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:62c613ad689ddcb11707f030e722fa929f322ef7e4f18f5335d2b73c61a85c28"}, + {file = "watchdog-4.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:d4925e4bf7b9bddd1c3de13c9b8a2cdb89a468f640e66fbfabaf735bd85b3e35"}, + {file = "watchdog-4.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cad0bbd66cd59fc474b4a4376bc5ac3fc698723510cbb64091c2a793b18654db"}, + {file = "watchdog-4.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a3c2c317a8fb53e5b3d25790553796105501a235343f5d2bf23bb8649c2c8709"}, + {file = "watchdog-4.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c9904904b6564d4ee8a1ed820db76185a3c96e05560c776c79a6ce5ab71888ba"}, + {file = "watchdog-4.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:667f3c579e813fcbad1b784db7a1aaa96524bed53437e119f6a2f5de4db04235"}, + {file = "watchdog-4.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d10a681c9a1d5a77e75c48a3b8e1a9f2ae2928eda463e8d33660437705659682"}, + {file = "watchdog-4.0.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0144c0ea9997b92615af1d94afc0c217e07ce2c14912c7b1a5731776329fcfc7"}, + {file = "watchdog-4.0.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:998d2be6976a0ee3a81fb8e2777900c28641fb5bfbd0c84717d89bca0addcdc5"}, + {file = "watchdog-4.0.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:e7921319fe4430b11278d924ef66d4daa469fafb1da679a2e48c935fa27af193"}, + {file = "watchdog-4.0.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:f0de0f284248ab40188f23380b03b59126d1479cd59940f2a34f8852db710625"}, + {file = "watchdog-4.0.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:bca36be5707e81b9e6ce3208d92d95540d4ca244c006b61511753583c81c70dd"}, + {file = "watchdog-4.0.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:ab998f567ebdf6b1da7dc1e5accfaa7c6992244629c0fdaef062f43249bd8dee"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_aarch64.whl", hash = "sha256:dddba7ca1c807045323b6af4ff80f5ddc4d654c8bce8317dde1bd96b128ed253"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_armv7l.whl", hash = "sha256:4513ec234c68b14d4161440e07f995f231be21a09329051e67a2118a7a612d2d"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_i686.whl", hash = "sha256:4107ac5ab936a63952dea2a46a734a23230aa2f6f9db1291bf171dac3ebd53c6"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_ppc64.whl", hash = "sha256:6e8c70d2cd745daec2a08734d9f63092b793ad97612470a0ee4cbb8f5f705c57"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:f27279d060e2ab24c0aa98363ff906d2386aa6c4dc2f1a374655d4e02a6c5e5e"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_s390x.whl", hash = "sha256:f8affdf3c0f0466e69f5b3917cdd042f89c8c63aebdb9f7c078996f607cdb0f5"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_x86_64.whl", hash = "sha256:ac7041b385f04c047fcc2951dc001671dee1b7e0615cde772e84b01fbf68ee84"}, + {file = "watchdog-4.0.1-py3-none-win32.whl", hash = "sha256:206afc3d964f9a233e6ad34618ec60b9837d0582b500b63687e34011e15bb429"}, + {file = "watchdog-4.0.1-py3-none-win_amd64.whl", hash = "sha256:7577b3c43e5909623149f76b099ac49a1a01ca4e167d1785c76eb52fa585745a"}, + {file = "watchdog-4.0.1-py3-none-win_ia64.whl", hash = "sha256:d7b9f5f3299e8dd230880b6c55504a1f69cf1e4316275d1b215ebdd8187ec88d"}, + {file = "watchdog-4.0.1.tar.gz", hash = "sha256:eebaacf674fa25511e8867028d281e602ee6500045b57f43b08778082f7f8b44"}, +] + +[package.extras] +watchmedo = ["PyYAML (>=3.10)"] + +[metadata] +lock-version = "2.0" +python-versions = "3.12.4" +content-hash = "8a3864832a558787d52a134e1a79a88a2c990a1c3753f7c8f8f1ef8245997ddd" diff --git a/utilities/earthly-cache-watcher/pyproject.toml b/utilities/earthly-cache-watcher/pyproject.toml new file mode 100644 index 000000000..286bc66bc --- /dev/null +++ b/utilities/earthly-cache-watcher/pyproject.toml @@ -0,0 +1,36 @@ +[tool.poetry] +name = "earthly-cache-watcher" +version = "0.1.0" +description = "A file watcher to detect changes in file size" +authors = ["Catalyst Team"] +license = "MIT" +readme = "README.md" + + +[tool.poetry.dependencies] +python = "3.12.4" +watchdog = "^4.0.1" +python-dotenv = "^1.0.1" + +[tool.poetry.group.dev.dependencies] +ruff = "^0.1.14" + +[tool.ruff] +line-length = 79 + +[tool.ruff.lint] +select = [ + "E", # pycodestyle + "F", # Pyflakes + "UP", # pyupgrade + "B", # flake8-bugbear + "SIM", # flake8-simplify + "I", # isort +] + +[tool.ruff.lint.pydocstyle] +convention = "google" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" diff --git a/utilities/earthly-cache-watcher/watchdog.service b/utilities/earthly-cache-watcher/watchdog.service new file mode 100644 index 000000000..8965859f2 --- /dev/null +++ b/utilities/earthly-cache-watcher/watchdog.service @@ -0,0 +1,11 @@ +[Unit] +Description=Earthly Cache Watcher + +[Service] +Type=simple +ExecStart=/usr/bin/python3 /root/catalyst-ci/utilities/earthly-cache-watcher/main.py /root/catalyst-ci/utilities/earthly-cache-watcher/default.conf +Restart=always +User=root + +[Install] +WantedBy=multi-user.target \ No newline at end of file