diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b04334b..db0499e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -42,7 +42,7 @@ jobs: python-version: ${{ matrix.python-version }} - name: Install Hatch run: | - python -m pip install hatch==1.15.0 + python -m pip install hatch==1.16.5 - name: static analysis run: hatch fmt --check - name: type checking diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index 3ceb9a6..bd35d8b 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -42,7 +42,7 @@ jobs: python-version: ${{ matrix.python-version }} - name: Install Hatch - run: python -m pip install hatch==1.15.0 + run: python -m pip install hatch==1.16.5 - name: Setup and run Testing SDK working-directory: testing-sdk @@ -102,7 +102,7 @@ jobs: env: AWS_DURABLE_SDK_URL: file://${{ github.workspace }}/language-sdk run: | - pip install hatch==1.15.0 + pip install hatch==1.16.5 python -m pip install -e . - name: Get integration examples diff --git a/.github/workflows/notify_slack.yml b/.github/workflows/notify_slack.yml new file mode 100644 index 0000000..c1b2ebb --- /dev/null +++ b/.github/workflows/notify_slack.yml @@ -0,0 +1,39 @@ +name: Slack Notifications + +on: + issues: + types: [opened, reopened, edited] + pull_request_target: + types: [opened, reopened, synchronize] + +permissions: {} + +jobs: + notify: + runs-on: ubuntu-latest + steps: + - name: Send issue notification to Slack + if: github.event_name == 'issues' + uses: slackapi/slack-github-action@v2.1.1 + with: + webhook: ${{ secrets.SLACK_WEBHOOK_URL_ISSUE }} + webhook-type: incoming-webhook + payload: | + { + "action": "${{ github.event.action }}", + "issue_url": "${{ github.event.issue.html_url }}", + "package_name": "${{ github.repository }}" + } + + - name: Send pull request notification to Slack + if: github.event_name == 'pull_request_target' + uses: slackapi/slack-github-action@v2.1.1 + with: + webhook: ${{ secrets.SLACK_WEBHOOK_URL_PR }} + webhook-type: incoming-webhook + payload: | + { + "action": "${{ github.event.action }}", + "pr_url": "${{ github.event.pull_request.html_url }}", + "package_name": "${{ github.repository }}" + } \ No newline at end of file diff --git a/.github/workflows/pypi-publish.yml b/.github/workflows/pypi-publish.yml index aca91f6..83c6869 100644 --- a/.github/workflows/pypi-publish.yml +++ b/.github/workflows/pypi-publish.yml @@ -27,7 +27,7 @@ jobs: python-version: "3.11" - name: Install Hatch run: | - python -m pip install --upgrade hatch==1.15.0 + python -m pip install --upgrade hatch==1.16.5 - name: Build release distributions run: | # NOTE: put your own distribution build steps here. diff --git a/.github/workflows/sync-package.yml b/.github/workflows/sync-package.yml index 58048ea..9dfc6ff 100644 --- a/.github/workflows/sync-package.yml +++ b/.github/workflows/sync-package.yml @@ -27,7 +27,7 @@ jobs: python-version: ${{ matrix.python-version }} - name: Install Hatch run: | - python -m pip install --upgrade hatch==1.15.0 + python -m pip install --upgrade hatch==1.16.5 - name: Build distribution run: hatch build - name: configure aws credentials diff --git a/pyproject.toml b/pyproject.toml index 08ef201..4b0e2d8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -62,9 +62,16 @@ exclude_lines = ["no cov", "if __name__ == .__main__.:", "if TYPE_CHECKING:"] [tool.ruff] line-length = 88 +target-version = "py313" [tool.ruff.lint] preview = true +select = ["TID252"] # Enforce absolute imports (ban relative imports) + +[tool.ruff.lint.isort] +known-first-party = ["aws_durable_execution_sdk_python"] +force-single-line = false +lines-after-imports = 2 [tool.ruff.lint.per-file-ignores] "tests/**" = [ diff --git a/src/aws_durable_execution_sdk_python/serdes.py b/src/aws_durable_execution_sdk_python/serdes.py index b3b704a..2cf4120 100644 --- a/src/aws_durable_execution_sdk_python/serdes.py +++ b/src/aws_durable_execution_sdk_python/serdes.py @@ -316,11 +316,7 @@ def encode(self, obj: Any) -> EncodedValue: def decode(self, tag: TypeTag, value: Any) -> Any: match tag: case ( - TypeTag.NONE - | TypeTag.STR - | TypeTag.BOOL - | TypeTag.INT - | TypeTag.FLOAT + TypeTag.NONE | TypeTag.STR | TypeTag.BOOL | TypeTag.INT | TypeTag.FLOAT ): return self.primitive_codec.decode(tag, value) case TypeTag.BYTES: diff --git a/tests/concurrency_test.py b/tests/concurrency_test.py index 0aeb3ac..aa05f88 100644 --- a/tests/concurrency_test.py +++ b/tests/concurrency_test.py @@ -2833,9 +2833,9 @@ def task_func(ctx, item, idx, items): # With tolerated_failure_count=1, executor stops when failure_count > 1 (at 2 failures) # Executor terminates early rather than executing all 100 tasks assert executed_count["value"] < 100 - assert ( - result.completion_reason == CompletionReason.FAILURE_TOLERANCE_EXCEEDED - ), executed_count + assert result.completion_reason == CompletionReason.FAILURE_TOLERANCE_EXCEEDED, ( + executed_count + ) assert sum(1 for item in result.all if item.status == BatchItemStatus.FAILED) == 2 assert ( sum(1 for item in result.all if item.status == BatchItemStatus.SUCCEEDED) < 98 @@ -2965,9 +2965,9 @@ def slow_branch(): # Slow branch may or may not have started (depends on thread scheduling) # but it definitely should not have completed - assert ( - operation_tracker.slow_completed.call_count == 0 - ), "Executor should return before slow branch completes" + assert operation_tracker.slow_completed.call_count == 0, ( + "Executor should return before slow branch completes" + ) # Result should show MIN_SUCCESSFUL_REACHED assert result.completion_reason == CompletionReason.MIN_SUCCESSFUL_REACHED @@ -3019,9 +3019,9 @@ def slow_func(): result = executor.execute(execution_state, executor_context) # Executor should have returned before slow branch completed - assert ( - not slow_branch_mock.completed.called - ), "Executor should return before slow branch completes" + assert not slow_branch_mock.completed.called, ( + "Executor should return before slow branch completes" + ) # Result should show MIN_SUCCESSFUL_REACHED assert result.completion_reason == CompletionReason.MIN_SUCCESSFUL_REACHED diff --git a/tests/operation/wait_for_condition_test.py b/tests/operation/wait_for_condition_test.py index 84d038f..7ed8dd1 100644 --- a/tests/operation/wait_for_condition_test.py +++ b/tests/operation/wait_for_condition_test.py @@ -662,9 +662,9 @@ def wait_strategy(state, attempt): context_logger=mock_logger, ) - assert ( - captured_attempts[-1] == 2 - ), "After first retry (checkpoint.attempt=1), current attempt should be 2" + assert captured_attempts[-1] == 2, ( + "After first retry (checkpoint.attempt=1), current attempt should be 2" + ) # Test 3: After second retry (checkpoint has attempt=2) operation = Operation( @@ -684,9 +684,9 @@ def wait_strategy(state, attempt): context_logger=mock_logger, ) - assert ( - captured_attempts[-1] == 3 - ), "After second retry (checkpoint.attempt=2), current attempt should be 3" + assert captured_attempts[-1] == 3, ( + "After second retry (checkpoint.attempt=2), current attempt should be 3" + ) # Test 4: After third retry (checkpoint has attempt=3) operation = Operation( @@ -706,9 +706,9 @@ def wait_strategy(state, attempt): context_logger=mock_logger, ) - assert ( - captured_attempts[-1] == 4 - ), "After third retry (checkpoint.attempt=3), current attempt should be 4" + assert captured_attempts[-1] == 4, ( + "After third retry (checkpoint.attempt=3), current attempt should be 4" + ) # Verify the complete sequence is monotonically increasing assert captured_attempts == [ diff --git a/tests/state_test.py b/tests/state_test.py index 968a3b4..019b001 100644 --- a/tests/state_test.py +++ b/tests/state_test.py @@ -608,9 +608,9 @@ def test_checkpointed_result_is_timed_out_false_for_other_statuses(): status=status, ) result = CheckpointedResult.create_from_operation(operation) - assert ( - result.is_timed_out() is False - ), f"is_timed_out should be False for status {status}" + assert result.is_timed_out() is False, ( + f"is_timed_out should be False for status {status}" + ) def test_fetch_paginated_operations_with_marker(): @@ -2535,9 +2535,9 @@ def test_create_checkpoint_is_sync_false_returns_immediately(): elapsed_time = time.time() - start_time # Verify it returns immediately (should be < 10ms, we allow 50ms for safety) - assert ( - elapsed_time < 0.05 - ), f"Async checkpoint took {elapsed_time:.3f}s, expected < 0.05s" + assert elapsed_time < 0.05, ( + f"Async checkpoint took {elapsed_time:.3f}s, expected < 0.05s" + ) # Verify operation was enqueued assert state._checkpoint_queue.qsize() == 1 @@ -3115,9 +3115,9 @@ def background_processor(): # Verify all calls blocked for at least the delay time for i in range(num_callers): elapsed = end_times[i] - start_times[i] - assert ( - elapsed >= 0.15 - ), f"Caller {i} expected blocking for at least 0.15s, got {elapsed}s" + assert elapsed >= 0.15, ( + f"Caller {i} expected blocking for at least 0.15s, got {elapsed}s" + ) def test_create_checkpoint_sync_with_empty_checkpoint():