From 3b6cd9ef1126dd380ab30ed46653e47ac1794aa4 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 9 Feb 2026 20:48:38 +0000 Subject: [PATCH 1/2] [pre-commit.ci] pre-commit autoupdate MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.14.11 → v0.15.0](https://github.com/astral-sh/ruff-pre-commit/compare/v0.14.11...v0.15.0) --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8705c89..3fd073d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -17,7 +17,7 @@ repos: - id: check-toml - repo: https://github.com/astral-sh/ruff-pre-commit - rev: 'v0.14.11' + rev: 'v0.15.0' hooks: - id: ruff args: [--fix, --exit-non-zero-on-fix] From fe098c341493403dfa6ce6fcc6c50296350c1947 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 9 Feb 2026 20:49:23 +0000 Subject: [PATCH 2/2] [pre-commit.ci] Add auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- scripts/data/oc22_s2ef_train_convert.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/scripts/data/oc22_s2ef_train_convert.py b/scripts/data/oc22_s2ef_train_convert.py index b378ee4..c2a6e5f 100644 --- a/scripts/data/oc22_s2ef_train_convert.py +++ b/scripts/data/oc22_s2ef_train_convert.py @@ -21,7 +21,9 @@ def process_data(db_path: str, output_dir: str) -> None: "has_formation_energy": [], } db = LmdbDataset(config={"src": db_path}) - for j in tqdm(range(len(db)), desc=f"Processing {db_path.split('/')[-1]}"): + for j in tqdm( + range(len(db)), desc=f"Processing {db_path.rsplit('/', maxsplit=1)[-1]}" + ): dataset["input_ids"].append(db[j].atomic_numbers.short()) dataset["coords"].append(db[j].pos) dataset["forces"].append(db[j].force) @@ -29,7 +31,9 @@ def process_data(db_path: str, output_dir: str) -> None: dataset["total_energy"].append(np.array(db[j].y).astype("float32")) dataset["has_formation_energy"].append(False) hf_dataset = Dataset.from_dict(dataset) - hf_dataset.save_to_disk(f"{output_dir}/{db_path.split('/')[-1].split('.')[-2]}") + hf_dataset.save_to_disk( + f"{output_dir}/{db_path.rsplit('/', maxsplit=1)[-1].split('.')[-2]}" + ) def main(args: argparse.Namespace) -> None: