Diff of the two buildlogs:

--
--- b1/build.log	2025-01-08 08:35:39.602025225 +0000
+++ b2/build.log	2025-01-08 09:27:40.759956347 +0000
@@ -1,6 +1,6 @@
 I: pbuilder: network access will be disabled during build
-I: Current time: Tue Jan  7 19:12:52 -12 2025
-I: pbuilder-time-stamp: 1736320372
+I: Current time: Wed Feb 11 04:58:43 +14 2026
+I: pbuilder-time-stamp: 1770735523
 I: Building the build Environment
 I: extracting base tarball [/var/cache/pbuilder/unstable-reproducible-base.tgz]
 I: copying local configuration
@@ -44,52 +44,84 @@
 dpkg-source: info: applying dask-matching-interpreter.patch
 I: Not using root during the build.
 I: Installing the build-deps
-I: user script /srv/workspace/pbuilder/1948021/tmp/hooks/D02_print_environment starting
+I: user script /srv/workspace/pbuilder/3233247/tmp/hooks/D01_modify_environment starting
+debug: Running on codethink03-arm64.
+I: Changing host+domainname to test build reproducibility
+I: Adding a custom variable just for the fun of it...
+I: Changing /bin/sh to bash
+'/bin/sh' -> '/bin/bash'
+lrwxrwxrwx 1 root root 9 Feb 10 14:58 /bin/sh -> /bin/bash
+I: Setting pbuilder2's login shell to /bin/bash
+I: Setting pbuilder2's GECOS to second user,second room,second work-phone,second home-phone,second other
+I: user script /srv/workspace/pbuilder/3233247/tmp/hooks/D01_modify_environment finished
+I: user script /srv/workspace/pbuilder/3233247/tmp/hooks/D02_print_environment starting
 I: set
-  BUILDDIR='/build/reproducible-path'
-  BUILDUSERGECOS='first user,first room,first work-phone,first home-phone,first other'
-  BUILDUSERNAME='pbuilder1'
-  BUILD_ARCH='arm64'
-  DEBIAN_FRONTEND='noninteractive'
+  BASH=/bin/sh
+  BASHOPTS=checkwinsize:cmdhist:complete_fullquote:extquote:force_fignore:globasciiranges:globskipdots:hostcomplete:interactive_comments:patsub_replacement:progcomp:promptvars:sourcepath
+  BASH_ALIASES=()
+  BASH_ARGC=()
+  BASH_ARGV=()
+  BASH_CMDS=()
+  BASH_LINENO=([0]="12" [1]="0")
+  BASH_LOADABLES_PATH=/usr/local/lib/bash:/usr/lib/bash:/opt/local/lib/bash:/usr/pkg/lib/bash:/opt/pkg/lib/bash:.
+  BASH_SOURCE=([0]="/tmp/hooks/D02_print_environment" [1]="/tmp/hooks/D02_print_environment")
+  BASH_VERSINFO=([0]="5" [1]="2" [2]="37" [3]="1" [4]="release" [5]="aarch64-unknown-linux-gnu")
+  BASH_VERSION='5.2.37(1)-release'
+  BUILDDIR=/build/reproducible-path
+  BUILDUSERGECOS='second user,second room,second work-phone,second home-phone,second other'
+  BUILDUSERNAME=pbuilder2
+  BUILD_ARCH=arm64
+  DEBIAN_FRONTEND=noninteractive
   DEB_BUILD_OPTIONS='buildinfo=+all reproducible=+all parallel=12 '
-  DISTRIBUTION='unstable'
-  HOME='/root'
-  HOST_ARCH='arm64'
+  DIRSTACK=()
+  DISTRIBUTION=unstable
+  EUID=0
+  FUNCNAME=([0]="Echo" [1]="main")
+  GROUPS=()
+  HOME=/root
+  HOSTNAME=i-capture-the-hostname
+  HOSTTYPE=aarch64
+  HOST_ARCH=arm64
   IFS=' 	
   '
-  INVOCATION_ID='a9610b8a00e54a93b7f99007c14498ba'
-  LANG='C'
-  LANGUAGE='en_US:en'
-  LC_ALL='C'
-  MAIL='/var/mail/root'
-  OPTIND='1'
-  PATH='/usr/sbin:/usr/bin:/sbin:/bin:/usr/games'
-  PBCURRENTCOMMANDLINEOPERATION='build'
-  PBUILDER_OPERATION='build'
-  PBUILDER_PKGDATADIR='/usr/share/pbuilder'
-  PBUILDER_PKGLIBDIR='/usr/lib/pbuilder'
-  PBUILDER_SYSCONFDIR='/etc'
-  PPID='1948021'
-  PS1='# '
-  PS2='> '
+  INVOCATION_ID=607a217235074b56b8e86576abf0f528
+  LANG=C
+  LANGUAGE=nl_BE:nl
+  LC_ALL=C
+  MACHTYPE=aarch64-unknown-linux-gnu
+  MAIL=/var/mail/root
+  OPTERR=1
+  OPTIND=1
+  OSTYPE=linux-gnu
+  PATH=/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/i/capture/the/path
+  PBCURRENTCOMMANDLINEOPERATION=build
+  PBUILDER_OPERATION=build
+  PBUILDER_PKGDATADIR=/usr/share/pbuilder
+  PBUILDER_PKGLIBDIR=/usr/lib/pbuilder
+  PBUILDER_SYSCONFDIR=/etc
+  PIPESTATUS=([0]="0")
+  POSIXLY_CORRECT=y
+  PPID=3233247
   PS4='+ '
-  PWD='/'
-  SHELL='/bin/bash'
-  SHLVL='2'
-  SUDO_COMMAND='/usr/bin/timeout -k 18.1h 18h /usr/bin/ionice -c 3 /usr/bin/nice /usr/sbin/pbuilder --build --configfile /srv/reproducible-results/rbuild-debian/r-b-build.xReI7fGN/pbuilderrc_HeFQ --distribution unstable --hookdir /etc/pbuilder/first-build-hooks --debbuildopts -b --basetgz /var/cache/pbuilder/unstable-reproducible-base.tgz --buildresult /srv/reproducible-results/rbuild-debian/r-b-build.xReI7fGN/b1 --logfile b1/build.log dask.distributed_2024.12.1+ds-1.dsc'
-  SUDO_GID='109'
-  SUDO_UID='104'
-  SUDO_USER='jenkins'
-  TERM='unknown'
-  TZ='/usr/share/zoneinfo/Etc/GMT+12'
-  USER='root'
-  _='/usr/bin/systemd-run'
-  http_proxy='http://192.168.101.4:3128'
+  PWD=/
+  SHELL=/bin/bash
+  SHELLOPTS=braceexpand:errexit:hashall:interactive-comments:posix
+  SHLVL=3
+  SUDO_COMMAND='/usr/bin/timeout -k 24.1h 24h /usr/bin/ionice -c 3 /usr/bin/nice -n 11 /usr/bin/unshare --uts -- /usr/sbin/pbuilder --build --configfile /srv/reproducible-results/rbuild-debian/r-b-build.xReI7fGN/pbuilderrc_xyFI --distribution unstable --hookdir /etc/pbuilder/rebuild-hooks --debbuildopts -b --basetgz /var/cache/pbuilder/unstable-reproducible-base.tgz --buildresult /srv/reproducible-results/rbuild-debian/r-b-build.xReI7fGN/b2 --logfile b2/build.log dask.distributed_2024.12.1+ds-1.dsc'
+  SUDO_GID=109
+  SUDO_UID=104
+  SUDO_USER=jenkins
+  TERM=unknown
+  TZ=/usr/share/zoneinfo/Etc/GMT-14
+  UID=0
+  USER=root
+  _='I: set'
+  http_proxy=http://192.168.101.4:3128
 I: uname -a
-  Linux codethink04-arm64 6.1.0-28-cloud-arm64 #1 SMP Debian 6.1.119-1 (2024-11-22) aarch64 GNU/Linux
+  Linux i-capture-the-hostname 6.1.0-28-cloud-arm64 #1 SMP Debian 6.1.119-1 (2024-11-22) aarch64 GNU/Linux
 I: ls -l /bin
-  lrwxrwxrwx 1 root root 7 Nov 22 14:40 /bin -> usr/bin
-I: user script /srv/workspace/pbuilder/1948021/tmp/hooks/D02_print_environment finished
+  lrwxrwxrwx 1 root root 7 Nov 22  2024 /bin -> usr/bin
+I: user script /srv/workspace/pbuilder/3233247/tmp/hooks/D02_print_environment finished
  -> Attempting to satisfy build-dependencies
  -> Creating pbuilder-satisfydepends-dummy package
 Package: pbuilder-satisfydepends-dummy
@@ -449,7 +481,7 @@
 Get: 236 http://deb.debian.org/debian unstable/main arm64 python3-zict all 3.0.0-2 [29.7 kB]
 Get: 237 http://deb.debian.org/debian unstable/main arm64 tzdata-legacy all 2024b-4 [176 kB]
 Get: 238 http://deb.debian.org/debian unstable/main arm64 uglifyjs all 3.17.4-2 [12.1 kB]
-Fetched 137 MB in 2s (77.2 MB/s)
+Fetched 137 MB in 1s (125 MB/s)
 Preconfiguring packages ...
 Selecting previously unselected package fonts-lato.
 (Reading database ... 
(Reading database ... 5%
(Reading database ... 10%
(Reading database ... 15%
(Reading database ... 20%
(Reading database ... 25%
(Reading database ... 30%
(Reading database ... 35%
(Reading database ... 40%
(Reading database ... 45%
(Reading database ... 50%
(Reading database ... 55%
(Reading database ... 60%
(Reading database ... 65%
(Reading database ... 70%
(Reading database ... 75%
(Reading database ... 80%
(Reading database ... 85%
(Reading database ... 90%
(Reading database ... 95%
(Reading database ... 100%
(Reading database ... 19966 files and directories currently installed.)
@@ -1213,8 +1245,8 @@
 Setting up tzdata (2024b-4) ...
 
 Current default time zone: 'Etc/UTC'
-Local time is now:      Wed Jan  8 07:14:22 UTC 2025.
-Universal Time is now:  Wed Jan  8 07:14:22 UTC 2025.
+Local time is now:      Tue Feb 10 15:00:01 UTC 2026.
+Universal Time is now:  Tue Feb 10 15:00:01 UTC 2026.
 Run 'dpkg-reconfigure tzdata' if you wish to change it.
 
 Setting up libpgm-5.3-0t64:arm64 (5.3.128~dfsg-2.1+b1) ...
@@ -1445,7 +1477,11 @@
 Building tag database...
  -> Finished parsing the build-deps
 I: Building the package
-I: Running cd /build/reproducible-path/dask.distributed-2024.12.1+ds/ && env PATH="/usr/sbin:/usr/bin:/sbin:/bin:/usr/games" HOME="/nonexistent/first-build" dpkg-buildpackage -us -uc -b && env PATH="/usr/sbin:/usr/bin:/sbin:/bin:/usr/games" HOME="/nonexistent/first-build" dpkg-genchanges -S  > ../dask.distributed_2024.12.1+ds-1_source.changes
+I: user script /srv/workspace/pbuilder/3233247/tmp/hooks/A99_set_merged_usr starting
+Not re-configuring usrmerge for unstable
+I: user script /srv/workspace/pbuilder/3233247/tmp/hooks/A99_set_merged_usr finished
+hostname: Name or service not known
+I: Running cd /build/reproducible-path/dask.distributed-2024.12.1+ds/ && env PATH="/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/i/capture/the/path" HOME="/nonexistent/second-build" dpkg-buildpackage -us -uc -b && env PATH="/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/i/capture/the/path" HOME="/nonexistent/second-build" dpkg-genchanges -S  > ../dask.distributed_2024.12.1+ds-1_source.changes
 dpkg-buildpackage: info: source package dask.distributed
 dpkg-buildpackage: info: source version 2024.12.1+ds-1
 dpkg-buildpackage: info: source distribution unstable
@@ -2822,7 +2858,7 @@
 Copying distributed.egg-info to build/bdist.linux-aarch64/wheel/./distributed-2024.12.1.egg-info
 running install_scripts
 creating build/bdist.linux-aarch64/wheel/distributed-2024.12.1.dist-info/WHEEL
-creating '/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.12_distributed/.tmp-on69p7h6/distributed-2024.12.1-py3-none-any.whl' and adding 'build/bdist.linux-aarch64/wheel' to it
+creating '/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.12_distributed/.tmp-kz_v1w1v/distributed-2024.12.1-py3-none-any.whl' and adding 'build/bdist.linux-aarch64/wheel' to it
 adding 'distributed/__init__.py'
 adding 'distributed/_async_taskgroup.py'
 adding 'distributed/_asyncio.py'
@@ -4133,7 +4169,7 @@
 Copying distributed.egg-info to build/bdist.linux-aarch64/wheel/./distributed-2024.12.1.egg-info
 running install_scripts
 creating build/bdist.linux-aarch64/wheel/distributed-2024.12.1.dist-info/WHEEL
-creating '/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/.tmp-e569_hbi/distributed-2024.12.1-py3-none-any.whl' and adding 'build/bdist.linux-aarch64/wheel' to it
+creating '/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/.tmp-e8jvj30j/distributed-2024.12.1-py3-none-any.whl' and adding 'build/bdist.linux-aarch64/wheel' to it
 adding 'distributed/__init__.py'
 adding 'distributed/_async_taskgroup.py'
 adding 'distributed/_asyncio.py'
@@ -4838,8 +4874,6 @@
 distributed/diagnostics/tests/test_nanny_plugin.py::test_plugin_with_broken_setup_on_new_nanny_logs PASSED [  2%]
 distributed/diagnostics/tests/test_nanny_plugin.py::test_unregister_nanny_plugin_with_broken_teardown_raises PASSED [  3%]
 distributed/diagnostics/tests/test_nanny_plugin.py::test_nanny_plugin_with_broken_teardown_logs_on_close PASSED [  3%]
-distributed/diagnostics/tests/test_progress.py::test_many_Progress RERUN [  3%]
-distributed/diagnostics/tests/test_progress.py::test_many_Progress RERUN [  3%]
 distributed/diagnostics/tests/test_progress.py::test_many_Progress PASSED [  3%]
 distributed/diagnostics/tests/test_progress.py::test_multiprogress PASSED [  3%]
 distributed/diagnostics/tests/test_progress.py::test_multiprogress_cancel PASSED [  3%]
@@ -4886,7 +4920,7 @@
 distributed/diagnostics/tests/test_task_stream.py::test_collect PASSED   [  4%]
 distributed/diagnostics/tests/test_task_stream.py::test_no_startstops PASSED [  4%]
 distributed/diagnostics/tests/test_task_stream.py::test_client PASSED    [  4%]
-distributed/diagnostics/tests/test_task_stream.py::test_client_sync PASSED [  4%]
+distributed/diagnostics/tests/test_task_stream.py::test_client_sync FAILED [  4%]
 distributed/diagnostics/tests/test_task_stream.py::test_get_task_stream_plot SKIPPED [  4%]
 distributed/diagnostics/tests/test_task_stream.py::test_get_task_stream_save SKIPPED [  4%]
 distributed/diagnostics/tests/test_worker_plugin.py::test_create_with_client PASSED [  4%]
@@ -6127,7 +6161,7 @@
 distributed/tests/test_client.py::test_computation_object_code_dask_persist PASSED [ 43%]
 distributed/tests/test_client.py::test_computation_object_code_client_submit_simple PASSED [ 43%]
 distributed/tests/test_client.py::test_computation_object_code_client_submit_list_comp PASSED [ 43%]
-distributed/tests/test_client.py::test_computation_object_code_client_submit_dict_comp FAILED [ 43%]
+distributed/tests/test_client.py::test_computation_object_code_client_submit_dict_comp PASSED [ 43%]
 distributed/tests/test_client.py::test_computation_object_code_client_map PASSED [ 43%]
 distributed/tests/test_client.py::test_computation_object_code_client_compute PASSED [ 43%]
 distributed/tests/test_client.py::test_upload_directory SKIPPED (need
@@ -7979,110 +8013,89 @@
 distributed/tests/test_worker_state_machine.py::test_remove_worker_unknown PASSED [100%]
 
 =================================== FAILURES ===================================
-_____________ test_computation_object_code_client_submit_dict_comp _____________
+_______________________________ test_client_sync _______________________________
 
-c = <Client: No scheduler connected>
-s = <Scheduler 'tcp://127.0.0.1:33669', workers: 0, cores: 0, tasks: 0>
-a = <Worker 'tcp://127.0.0.1:42983', name: 0, status: closed, stored: 0, running: 0/1, ready: 0, comm: 0, waiting: 0>
-b = <Worker 'tcp://127.0.0.1:43997', name: 1, status: closed, stored: 0, running: 0/2, ready: 0, comm: 0, waiting: 0>
-
-    @gen_cluster(client=True, config={"distributed.diagnostics.computations.nframes": 2})
-    async def test_computation_object_code_client_submit_dict_comp(c, s, a, b):
-        def func(x):
-            return x
-    
-        futs = {x: c.submit(func, x) for x in range(10)}
-    
-        await c.gather(futs)
+client = <Client: 'tcp://127.0.0.1:43223' processes=2 threads=2, memory=117.51 GiB>
+
+    def test_client_sync(client):
+        with get_task_stream(client=client) as ts:
+            sleep(0.1)  # to smooth over time differences on the scheduler
+            # to smooth over time differences on the scheduler
+            futures = client.map(inc, range(10))
+            wait(futures)
     
-        test_function_code = inspect.getsource(
-            test_computation_object_code_client_submit_dict_comp.__wrapped__
-        )
-        computations = list(s.computations)
->       assert len(computations) == 1
-E       assert 2 == 1
-E        +  where 2 = len([<Computation 8567bce1-2c36-4461-89c4-01ef75bc391e: Tasks: memory: 10>, <Computation 66ebc6b0-a515-4e05-b19d-111903e1a725: Tasks: >])
+>       assert len(ts.data) == 10
+E       AssertionError: assert 2 == 10
+E        +  where 2 = len([{'key': 'inc-bf6e4c654a999b0a0ace1814a611b982', 'metadata': {}, 'nbytes': 28, 'startstops': ({'action': 'compute', 'start': 1770735826.8685853, 'stop': 1770735826.868627},), ...}, {'key': 'inc-12806163c2662b70f178b2541b9ea713', 'metadata': {}, 'nbytes': 28, 'startstops': ({'action': 'compute', 'start': 1770735826.339644, 'stop': 1770735826.3396828},), ...}])
+E        +    where [{'key': 'inc-bf6e4c654a999b0a0ace1814a611b982', 'metadata': {}, 'nbytes': 28, 'startstops': ({'action': 'compute', 'start': 1770735826.8685853, 'stop': 1770735826.868627},), ...}, {'key': 'inc-12806163c2662b70f178b2541b9ea713', 'metadata': {}, 'nbytes': 28, 'startstops': ({'action': 'compute', 'start': 1770735826.339644, 'stop': 1770735826.3396828},), ...}] = <distributed.client.get_task_stream object at 0xffff6f8e4470>.data
 
-distributed/tests/test_client.py:7362: AssertionError
------------------------------ Captured stderr call -----------------------------
-2025-01-07 19:36:42,511 - distributed.scheduler - INFO - State start
-2025-01-07 19:36:42,519 - distributed.scheduler - INFO -   Scheduler at:     tcp://127.0.0.1:33669
-2025-01-07 19:36:42,521 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:41709/status
-2025-01-07 19:36:42,522 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 19:36:42,538 - distributed.worker - INFO -       Start worker at:      tcp://127.0.0.1:42983
-2025-01-07 19:36:42,540 - distributed.worker - INFO -          Listening to:      tcp://127.0.0.1:42983
-2025-01-07 19:36:42,546 - distributed.worker - INFO -           Worker name:                          0
-2025-01-07 19:36:42,547 - distributed.worker - INFO -          dashboard at:            127.0.0.1:38907
-2025-01-07 19:36:42,548 - distributed.worker - INFO - Waiting to connect to:      tcp://127.0.0.1:33669
-2025-01-07 19:36:42,554 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 19:36:42,555 - distributed.worker - INFO -               Threads:                          1
-2025-01-07 19:36:42,556 - distributed.worker - INFO -                Memory:                  58.76 GiB
-2025-01-07 19:36:42,558 - distributed.worker - INFO -       Local Directory: /tmp/dask-scratch-space/worker-i9xbyw3k
-2025-01-07 19:36:42,559 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 19:36:42,569 - distributed.worker - INFO -       Start worker at:      tcp://127.0.0.1:43997
-2025-01-07 19:36:42,571 - distributed.worker - INFO -          Listening to:      tcp://127.0.0.1:43997
-2025-01-07 19:36:42,572 - distributed.worker - INFO -           Worker name:                          1
-2025-01-07 19:36:42,573 - distributed.worker - INFO -          dashboard at:            127.0.0.1:41123
-2025-01-07 19:36:42,574 - distributed.worker - INFO - Waiting to connect to:      tcp://127.0.0.1:33669
-2025-01-07 19:36:42,575 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 19:36:42,576 - distributed.worker - INFO -               Threads:                          2
-2025-01-07 19:36:42,582 - distributed.worker - INFO -                Memory:                  58.76 GiB
-2025-01-07 19:36:42,583 - distributed.worker - INFO -       Local Directory: /tmp/dask-scratch-space/worker-68deozrp
-2025-01-07 19:36:42,584 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 19:36:42,639 - distributed.scheduler - INFO - Register worker addr: tcp://127.0.0.1:42983 name: 0
-2025-01-07 19:36:42,671 - distributed.scheduler - INFO - Starting worker compute stream, tcp://127.0.0.1:42983
-2025-01-07 19:36:42,672 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:41808
-2025-01-07 19:36:42,673 - distributed.scheduler - INFO - Register worker addr: tcp://127.0.0.1:43997 name: 1
-2025-01-07 19:36:42,700 - distributed.scheduler - INFO - Starting worker compute stream, tcp://127.0.0.1:43997
-2025-01-07 19:36:42,701 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:41814
-2025-01-07 19:36:42,702 - distributed.worker - INFO - Starting Worker plugin shuffle
-2025-01-07 19:36:42,704 - distributed.worker - INFO - Starting Worker plugin shuffle
-2025-01-07 19:36:42,711 - distributed.worker - INFO -         Registered to:      tcp://127.0.0.1:33669
-2025-01-07 19:36:42,712 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 19:36:42,718 - distributed.worker - INFO -         Registered to:      tcp://127.0.0.1:33669
-2025-01-07 19:36:42,720 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 19:36:42,722 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:33669
-2025-01-07 19:36:42,722 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:33669
-2025-01-07 19:36:42,752 - distributed.scheduler - INFO - Receive client connection: Client-4e2cea21-cd93-11ef-abcc-f1751d17d1b8
-2025-01-07 19:36:42,783 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:41830
-2025-01-07 19:36:42,901 - distributed.scheduler - INFO - Remove client Client-4e2cea21-cd93-11ef-abcc-f1751d17d1b8
-2025-01-07 19:36:42,902 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:41830; closing.
-2025-01-07 19:36:42,903 - distributed.scheduler - INFO - Remove client Client-4e2cea21-cd93-11ef-abcc-f1751d17d1b8
-2025-01-07 19:36:42,905 - distributed.scheduler - INFO - Close client connection: Client-4e2cea21-cd93-11ef-abcc-f1751d17d1b8
-2025-01-07 19:36:42,908 - distributed.worker - INFO - Stopping worker at tcp://127.0.0.1:42983. Reason: worker-close
-2025-01-07 19:36:42,910 - distributed.worker - INFO - Stopping worker at tcp://127.0.0.1:43997. Reason: worker-close
-2025-01-07 19:36:42,912 - distributed.worker - INFO - Removing Worker plugin shuffle
-2025-01-07 19:36:42,914 - distributed.worker - INFO - Removing Worker plugin shuffle
-2025-01-07 19:36:42,917 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:41808; closing.
-2025-01-07 19:36:42,917 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:41814; closing.
-2025-01-07 19:36:42,917 - distributed.scheduler - INFO - Remove worker addr: tcp://127.0.0.1:42983 name: 0 (stimulus_id='handle-worker-cleanup-1736321802.9178703')
-2025-01-07 19:36:42,919 - distributed.scheduler - INFO - Remove worker addr: tcp://127.0.0.1:43997 name: 1 (stimulus_id='handle-worker-cleanup-1736321802.919672')
-2025-01-07 19:36:42,921 - distributed.scheduler - INFO - Lost all workers
-2025-01-07 19:36:42,922 - distributed.core - INFO - Connection to tcp://127.0.0.1:33669 has been closed.
-2025-01-07 19:36:42,937 - distributed.core - INFO - Connection to tcp://127.0.0.1:33669 has been closed.
-2025-01-07 19:36:42,965 - distributed.scheduler - INFO - Closing scheduler. Reason: unknown
-2025-01-07 19:36:42,967 - distributed.scheduler - INFO - Scheduler closing all comms
+distributed/diagnostics/tests/test_task_stream.py:135: AssertionError
+---------------------------- Captured stderr setup -----------------------------
+2026-02-11 05:03:45,440 - distributed.http.proxy - INFO - To route to workers diagnostics web server please install jupyter-server-proxy: python -m pip install jupyter-server-proxy
+2026-02-11 05:03:45,442 - distributed.scheduler - INFO - State start
+2026-02-11 05:03:45,445 - distributed.scheduler - INFO -   Scheduler at:     tcp://127.0.0.1:43223
+2026-02-11 05:03:45,445 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:8787/status
+2026-02-11 05:03:45,446 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:03:45,466 - distributed.worker - INFO -       Start worker at:      tcp://127.0.0.1:37397
+2026-02-11 05:03:45,467 - distributed.worker - INFO -          Listening to:      tcp://127.0.0.1:37397
+2026-02-11 05:03:45,467 - distributed.worker - INFO -          dashboard at:            127.0.0.1:37887
+2026-02-11 05:03:45,467 - distributed.worker - INFO - Waiting to connect to:      tcp://127.0.0.1:43223
+2026-02-11 05:03:45,467 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:03:45,467 - distributed.worker - INFO -               Threads:                          1
+2026-02-11 05:03:45,467 - distributed.worker - INFO -                Memory:                  58.76 GiB
+2026-02-11 05:03:45,467 - distributed.worker - INFO -       Local Directory: /tmp/dask-scratch-space/worker-jhuvtknx
+2026-02-11 05:03:45,467 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:03:45,484 - distributed.worker - INFO -       Start worker at:      tcp://127.0.0.1:40599
+2026-02-11 05:03:45,484 - distributed.worker - INFO -          Listening to:      tcp://127.0.0.1:40599
+2026-02-11 05:03:45,484 - distributed.worker - INFO -          dashboard at:            127.0.0.1:42771
+2026-02-11 05:03:45,484 - distributed.worker - INFO - Waiting to connect to:      tcp://127.0.0.1:43223
+2026-02-11 05:03:45,484 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:03:45,484 - distributed.worker - INFO -               Threads:                          1
+2026-02-11 05:03:45,484 - distributed.worker - INFO -                Memory:                  58.76 GiB
+2026-02-11 05:03:45,485 - distributed.worker - INFO -       Local Directory: /tmp/dask-scratch-space/worker-if7_nnoo
+2026-02-11 05:03:45,485 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:03:45,728 - distributed.scheduler - INFO - Register worker addr: tcp://127.0.0.1:37397 name: tcp://127.0.0.1:37397
+2026-02-11 05:03:46,267 - distributed.scheduler - INFO - Starting worker compute stream, tcp://127.0.0.1:37397
+2026-02-11 05:03:46,268 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:42514
+2026-02-11 05:03:46,269 - distributed.scheduler - INFO - Register worker addr: tcp://127.0.0.1:40599 name: tcp://127.0.0.1:40599
+2026-02-11 05:03:46,269 - distributed.scheduler - INFO - Starting worker compute stream, tcp://127.0.0.1:40599
+2026-02-11 05:03:46,270 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:42516
+2026-02-11 05:03:46,271 - distributed.worker - INFO - Starting Worker plugin shuffle
+2026-02-11 05:03:46,271 - distributed.worker - INFO - Starting Worker plugin shuffle
+2026-02-11 05:03:46,272 - distributed.worker - INFO -         Registered to:      tcp://127.0.0.1:43223
+2026-02-11 05:03:46,272 - distributed.worker - INFO -         Registered to:      tcp://127.0.0.1:43223
+2026-02-11 05:03:46,272 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:03:46,272 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:03:46,287 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:43223
+2026-02-11 05:03:46,291 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:43223
+2026-02-11 05:03:46,327 - distributed.scheduler - INFO - Receive client connection: Client-b2ae2379-0691-11f1-b9c4-2fe95a9ac685
+2026-02-11 05:03:46,328 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:42536
+--------------------------- Captured stderr teardown ---------------------------
+2026-02-11 05:03:46,516 - distributed.scheduler - INFO - Remove client Client-b2ae2379-0691-11f1-b9c4-2fe95a9ac685
+2026-02-11 05:03:46,517 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:42536; closing.
+2026-02-11 05:03:46,517 - distributed.scheduler - INFO - Remove client Client-b2ae2379-0691-11f1-b9c4-2fe95a9ac685
+2026-02-11 05:03:46,518 - distributed.scheduler - INFO - Close client connection: Client-b2ae2379-0691-11f1-b9c4-2fe95a9ac685
 ============================= slowest 20 durations =============================
-33.93s call     distributed/tests/test_gc.py::test_gc_diagnosis_cpu_time
-14.53s call     distributed/tests/test_nanny.py::test_num_fds
-13.99s call     distributed/shuffle/tests/test_rechunk.py::test_rechunk_method
-10.03s call     distributed/tests/test_utils_test.py::test_popen_timeout
-9.85s call     distributed/shuffle/tests/test_rechunk.py::test_rechunk_with_partially_unknown_dimension[x3-chunks3]
-9.08s call     distributed/tests/test_failed_workers.py::test_restart_sync
-8.86s call     distributed/tests/test_steal.py::test_trivial_workload_should_not_cause_work_stealing
-8.84s call     distributed/tests/test_failed_workers.py::test_worker_doesnt_await_task_completion
-8.58s call     distributed/shuffle/tests/test_rechunk.py::test_rechunk_with_partially_unknown_dimension[x5-chunks5]
-8.49s call     distributed/diagnostics/tests/test_progress.py::test_AllProgress
-8.32s call     distributed/diagnostics/tests/test_progress.py::test_group_timing
-7.75s call     distributed/tests/test_failed_workers.py::test_restart_during_computation
-7.68s call     distributed/shuffle/tests/test_rechunk.py::test_homogeneously_schedule_unpack
-7.48s call     distributed/shuffle/tests/test_rechunk.py::test_rechunk_with_partially_unknown_dimension[x4-chunks4]
-7.48s call     distributed/tests/test_stress.py::test_cancel_stress_sync
-7.34s call     distributed/tests/test_failed_workers.py::test_multiple_clients_restart
-7.15s call     distributed/tests/test_asyncprocess.py::test_num_fds
-6.96s call     distributed/tests/test_nanny.py::test_environ_plugin
-6.70s call     distributed/tests/test_gc.py::test_gc_diagnosis_rss_win
-6.54s call     distributed/cli/tests/test_tls_cli.py::test_nanny
+16.05s call     distributed/tests/test_gc.py::test_gc_diagnosis_cpu_time
+10.04s call     distributed/tests/test_utils_test.py::test_popen_timeout
+9.17s call     distributed/tests/test_stress.py::test_cancel_stress
+8.17s call     distributed/tests/test_stress.py::test_cancel_stress_sync
+7.65s call     distributed/tests/test_nanny.py::test_num_fds
+6.70s call     distributed/tests/test_failed_workers.py::test_worker_doesnt_await_task_completion
+6.02s call     distributed/shuffle/tests/test_rechunk.py::test_rechunk_method
+5.36s call     distributed/shuffle/tests/test_rechunk.py::test_homogeneously_schedule_unpack
+5.24s call     distributed/tests/test_steal.py::test_balance_with_longer_task
+5.14s call     distributed/tests/test_chaos.py::test_KillWorker[sys.exit]
+5.11s call     distributed/shuffle/tests/test_rechunk.py::test_rechunk_with_partially_unknown_dimension[x4-chunks4]
+5.09s call     distributed/shuffle/tests/test_rechunk.py::test_rechunk_with_partially_unknown_dimension[x3-chunks3]
+5.05s call     distributed/diagnostics/tests/test_progress.py::test_group_timing
+4.89s call     distributed/shuffle/tests/test_rechunk.py::test_rechunk_with_partially_unknown_dimension[x5-chunks5]
+4.62s call     distributed/tests/test_failed_workers.py::test_restart_sync
+4.48s call     distributed/tests/test_nanny.py::test_environ_plugin
+4.28s call     distributed/diagnostics/tests/test_progress.py::test_AllProgress
+4.14s call     distributed/tests/test_dask_collections.py::test_dataframe_groupby_tasks
+4.12s call     distributed/tests/test_tls_functional.py::test_retire_workers
+3.93s call     distributed/tests/test_failed_workers.py::test_multiple_clients_restart
 =========================== short test summary info ============================
 SKIPPED [1] distributed/cli/tests/test_dask_ssh.py:9: could not import 'paramiko': No module named 'paramiko'
 SKIPPED [1] distributed/comm/tests/test_ucx.py:15: could not import 'ucp': No module named 'ucp'
@@ -8297,9 +8310,10 @@
 SKIPPED [1] distributed/tests/test_worker_memory.py:871: need --runslow option to run
 SKIPPED [2] distributed/tests/test_worker_memory.py:883: need --runslow option to run
 SKIPPED [1] distributed/tests/test_worker_memory.py:997: need --runslow option to run
-FAILED distributed/tests/test_client.py::test_computation_object_code_client_submit_dict_comp - assert 2 == 1
- +  where 2 = len([<Computation 8567bce1-2c36-4461-89c4-01ef75bc391e: Tasks: memory: 10>, <Computation 66ebc6b0-a515-4e05-b19d-111903e1a725: Tasks: >])
-= 1 failed, 2912 passed, 264 skipped, 222 deselected, 15 xfailed, 8 xpassed, 2 rerun in 2255.35s (0:37:35) =
+FAILED distributed/diagnostics/tests/test_task_stream.py::test_client_sync - AssertionError: assert 2 == 10
+ +  where 2 = len([{'key': 'inc-bf6e4c654a999b0a0ace1814a611b982', 'metadata': {}, 'nbytes': 28, 'startstops': ({'action': 'compute', 'start': 1770735826.8685853, 'stop': 1770735826.868627},), ...}, {'key': 'inc-12806163c2662b70f178b2541b9ea713', 'metadata': {}, 'nbytes': 28, 'startstops': ({'action': 'compute', 'start': 1770735826.339644, 'stop': 1770735826.3396828},), ...}])
+ +    where [{'key': 'inc-bf6e4c654a999b0a0ace1814a611b982', 'metadata': {}, 'nbytes': 28, 'startstops': ({'action': 'compute', 'start': 1770735826.8685853, 'stop': 1770735826.868627},), ...}, {'key': 'inc-12806163c2662b70f178b2541b9ea713', 'metadata': {}, 'nbytes': 28, 'startstops': ({'action': 'compute', 'start': 1770735826.339644, 'stop': 1770735826.3396828},), ...}] = <distributed.client.get_task_stream object at 0xffff6f8e4470>.data
+= 1 failed, 2912 passed, 264 skipped, 222 deselected, 15 xfailed, 8 xpassed in 1278.71s (0:21:18) =
 *** END OF RUN 1: NOT ALL TESTS HAVE YET PASSED/XFAILED ***
 *** STARTING RUN 2: python3.12 -m pytest --pyargs distributed --verbose --color=no --timeout-method=signal --timeout=300 -m not avoid_ci -rfE --last-failed --last-failed-no-failures none --ignore=distributed/comm/tests/test_comms.py --ignore=distributed/comm/tests/test_ws.py --ignore=distributed/deploy/tests/test_adaptive.py --ignore=distributed/deploy/tests/test_local.py --ignore=distributed/deploy/tests/test_slow_adaptive.py --ignore=distributed/deploy/tests/test_spec_cluster.py --deselect=distributed/cli/tests/test_dask_scheduler.py::test_no_dashboard --deselect=distributed/deploy/tests/test_local.py::test_localcluster_get_client --deselect=distributed/deploy/tests/test_old_ssh.py::test_cluster --deselect=distributed/deploy/tests/test_old_ssh.py::test_old_ssh_nprocs_renamed_to_n_workers --deselect=distributed/deploy/tests/test_old_ssh.py::test_nprocs_attribute_is_deprecated --deselect=distributed/deploy/tests/test_ssh.py::test_nprocs_attribute_is_deprecated --deselect=distributed/http/tests/test_core.py::test_prometheus_api_doc --deselect=distributed/tests/test_init.py::test_git_revision --deselect=distributed/tests/test_jupyter.py::test_jupyter_idle_timeout --deselect=distributed/tests/test_jupyter.py::test_jupyter_idle_timeout_returned --deselect=distributed/tests/test_jupyter.py::test_jupyter_server --deselect=distributed/tests/test_jupyter.py::test_shutsdown_cleanly --deselect=distributed/tests/test_profile.py::test_stack_overflow --deselect=distributed/tests/test_pubsub.py::test_client_worker --deselect=distributed/tests/test_queues.py::test_queue_in_task --deselect=distributed/tests/test_spill.py::test_spillbuffer_oserror --deselect=distributed/tests/test_steal.py::test_steal_twice --deselect=distributed/tests/test_utils_test.py::test_cluster --deselect=distributed/tests/test_variable.py::test_variable_in_task --deselect=distributed/tests/test_worker.py::test_process_executor_kills_process --deselect=distributed/tests/test_worker_memory.py::test_fail_to_pickle_execute_1 --deselect=distributed/tests/test_worker_state_machine.py::test_task_state_instance_are_garbage_collected --deselect=distributed/protocol/tests/test_protocol.py::test_deeply_nested_structures --deselect=distributed/protocol/tests/test_serialize.py::test_deeply_nested_structures --deselect=distributed/cli/tests/test_dask_scheduler.py::test_defaults --deselect=distributed/cli/tests/test_dask_scheduler.py::test_hostport --deselect=distributed/cli/tests/test_dask_spec.py::test_errors --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_file --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_file --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_file --deselect=distributed/dashboard/tests/test_scheduler_bokeh.py::test_counters --deselect=distributed/dashboard/tests/test_worker_bokeh.py::test_counters --deselect=distributed/deploy/tests/test_local.py::test_adapt_then_manual --deselect=distributed/deploy/tests/test_local.py::test_async_with --deselect=distributed/deploy/tests/test_local.py::test_close_twice --deselect=distributed/deploy/tests/test_local.py::test_cluster_info_sync --deselect=distributed/deploy/tests/test_local.py::test_local_tls --deselect=distributed/deploy/tests/test_local.py::test_no_dangling_asyncio_tasks --deselect=distributed/deploy/tests/test_local.py::test_only_local_access --deselect=distributed/deploy/tests/test_local.py::test_remote_access --deselect=distributed/diagnostics/tests/test_progress_widgets.py::test_serializers --deselect=distributed/diagnostics/tests/test_scheduler_plugin.py::test_lifecycle --deselect=distributed/http/scheduler/tests/test_missing_bokeh.py::test_missing_bokeh --deselect=distributed/http/scheduler/tests/test_scheduler_http.py::test_metrics_when_prometheus_client_not_installed --deselect=distributed/protocol/tests/test_serialize.py::test_errors --deselect=distributed/tests/test_batched.py::test_BatchedSend --deselect=distributed/tests/test_batched.py::test_close_closed --deselect=distributed/tests/test_batched.py::test_close_twice --deselect=distributed/tests/test_batched.py::test_send_after_stream_start --deselect=distributed/tests/test_batched.py::test_send_before_close --deselect=distributed/tests/test_batched.py::test_send_before_start --deselect=distributed/tests/test_batched.py::test_sending_traffic_jam --deselect=distributed/tests/test_batched.py::test_serializers --deselect=distributed/tests/test_client.py::TestClientSecurityLoader::test_security_loader --deselect=distributed/tests/test_client.py::TestClientSecurityLoader::test_security_loader_ignored_if_explicit_security_provided --deselect=distributed/tests/test_client.py::TestClientSecurityLoader::test_security_loader_ignored_if_returns_none --deselect=distributed/tests/test_client.py::test_async_with --deselect=distributed/tests/test_client.py::test_client_is_quiet_cluster_close --deselect=distributed/tests/test_client.py::test_dashboard_link_cluster --deselect=distributed/tests/test_client.py::test_dashboard_link_inproc --deselect=distributed/tests/test_client.py::test_file_descriptors_dont_leak --deselect=distributed/tests/test_client.py::test_mixing_clients_different_scheduler --deselect=distributed/tests/test_client.py::test_quiet_client_close --deselect=distributed/tests/test_client.py::test_rebalance_sync --deselect=distributed/tests/test_client.py::test_repr_localcluster --deselect=distributed/tests/test_client.py::test_security_loader --deselect=distributed/tests/test_client.py::test_security_loader_ignored_if_explicit_security_provided --deselect=distributed/tests/test_client.py::test_security_loader_ignored_if_returns_none --deselect=distributed/tests/test_client.py::test_shutdown --deselect=distributed/tests/test_client.py::test_shutdown_is_quiet_with_cluster --deselect=distributed/tests/test_client.py::test_shutdown_localcluster --deselect=distributed/tests/test_client.py::test_shutdown_stops_callbacks --deselect=distributed/tests/test_client_loop.py::test_close_loop_sync_start_new_loop --deselect=distributed/tests/test_client_loop.py::test_close_loop_sync_use_running_loop --deselect=distributed/tests/test_core.py::test_close_fast_without_active_handlers --deselect=distributed/tests/test_core.py::test_close_grace_period_for_handlers --deselect=distributed/tests/test_core.py::test_close_properly --deselect=distributed/tests/test_core.py::test_compression --deselect=distributed/tests/test_core.py::test_connection_pool --deselect=distributed/tests/test_core.py::test_connection_pool_close_while_connecting --deselect=distributed/tests/test_core.py::test_connection_pool_detects_remote_close --deselect=distributed/tests/test_core.py::test_connection_pool_outside_cancellation --deselect=distributed/tests/test_core.py::test_connection_pool_remove --deselect=distributed/tests/test_core.py::test_connection_pool_respects_limit --deselect=distributed/tests/test_core.py::test_connection_pool_tls --deselect=distributed/tests/test_core.py::test_counters --deselect=distributed/tests/test_core.py::test_deserialize_error --deselect=distributed/tests/test_core.py::test_errors --deselect=distributed/tests/test_core.py::test_identity_inproc --deselect=distributed/tests/test_core.py::test_identity_tcp --deselect=distributed/tests/test_core.py::test_large_packets_inproc --deselect=distributed/tests/test_core.py::test_messages_are_ordered_bsend --deselect=distributed/tests/test_core.py::test_messages_are_ordered_raw --deselect=distributed/tests/test_core.py::test_ports --deselect=distributed/tests/test_core.py::test_rpc_default --deselect=distributed/tests/test_core.py::test_rpc_inproc --deselect=distributed/tests/test_core.py::test_rpc_message_lifetime_default --deselect=distributed/tests/test_core.py::test_rpc_message_lifetime_inproc --deselect=distributed/tests/test_core.py::test_rpc_message_lifetime_tcp --deselect=distributed/tests/test_core.py::test_rpc_serialization --deselect=distributed/tests/test_core.py::test_rpc_tcp --deselect=distributed/tests/test_core.py::test_rpc_tls --deselect=distributed/tests/test_core.py::test_rpc_with_many_connections_inproc --deselect=distributed/tests/test_core.py::test_rpc_with_many_connections_tcp --deselect=distributed/tests/test_core.py::test_send_recv_args --deselect=distributed/tests/test_core.py::test_send_recv_cancelled --deselect=distributed/tests/test_core.py::test_server --deselect=distributed/tests/test_core.py::test_server_comms_mark_active_handlers --deselect=distributed/tests/test_core.py::test_server_raises_on_blocked_handlers --deselect=distributed/tests/test_jupyter.py::test_jupyter_idle_timeout --deselect=distributed/tests/test_jupyter.py::test_jupyter_server --deselect=distributed/tests/test_locks.py::test_errors --deselect=distributed/tests/test_nanny.py::test_scheduler_file --deselect=distributed/tests/test_nanny.py::test_scheduler_file --deselect=distributed/tests/test_nanny.py::test_scheduler_file --deselect=distributed/tests/test_nanny.py::test_worker_uses_same_host_as_nanny --deselect=distributed/tests/test_preload.py::test_failure_doesnt_crash_scheduler --deselect=distributed/tests/test_preload.py::test_preload_import_time --deselect=distributed/tests/test_preload.py::test_preload_manager_sequence --deselect=distributed/tests/test_preload.py::test_worker_preload_text --deselect=distributed/tests/test_scheduler.py::test_allowed_failures_config --deselect=distributed/tests/test_scheduler.py::test_async_context_manager --deselect=distributed/tests/test_scheduler.py::test_dashboard_host --deselect=distributed/tests/test_scheduler.py::test_file_descriptors_dont_leak --deselect=distributed/tests/test_scheduler.py::test_finished --deselect=distributed/tests/test_scheduler.py::test_multiple_listeners --deselect=distributed/tests/test_scheduler.py::test_no_dangling_asyncio_tasks --deselect=distributed/tests/test_scheduler.py::test_scheduler_file --deselect=distributed/tests/test_scheduler.py::test_scheduler_file --deselect=distributed/tests/test_scheduler.py::test_scheduler_file --deselect=distributed/tests/test_security.py::test_require_encryption --deselect=distributed/tests/test_security.py::test_tls_listen_connect --deselect=distributed/tests/test_security.py::test_tls_temporary_credentials_functional --deselect=distributed/tests/test_semaphore.py::test_threadpoolworkers_pick_correct_ioloop --deselect=distributed/tests/test_tls_functional.py::test_security_dict_input_no_security --deselect=distributed/tests/test_utils_test.py::test_ensure_no_new_clients --deselect=distributed/tests/test_utils_test.py::test_freeze_batched_send --deselect=distributed/tests/test_utils_test.py::test_locked_comm_drop_in_replacement --deselect=distributed/tests/test_utils_test.py::test_locked_comm_intercept_read --deselect=distributed/tests/test_utils_test.py::test_locked_comm_intercept_write --deselect=distributed/tests/test_worker.py::test_host_uses_scheduler_protocol --deselect=distributed/tests/test_worker.py::test_plugin_exception --deselect=distributed/tests/test_worker.py::test_plugin_internal_exception --deselect=distributed/tests/test_worker.py::test_plugin_multiple_exceptions --deselect=distributed/tests/test_worker.py::test_scheduler_file --deselect=distributed/tests/test_worker.py::test_scheduler_file --deselect=distributed/tests/test_worker.py::test_scheduler_file --deselect=distributed/tests/test_worker_client.py::test_dont_override_default_get --deselect=distributed/cli/tests/test_dask_scheduler.py::test_dashboard --deselect=distributed/cli/tests/test_dask_scheduler.py::test_dashboard_allowlist --deselect=distributed/cli/tests/test_dask_scheduler.py::test_dashboard_non_standard_ports --deselect=distributed/cli/tests/test_dask_scheduler.py::test_dashboard_port_zero --deselect=distributed/cli/tests/test_dask_scheduler.py::test_defaults --deselect=distributed/cli/tests/test_dask_scheduler.py::test_hostport --deselect=distributed/cli/tests/test_dask_scheduler.py::test_multiple_protocols --deselect=distributed/cli/tests/test_dask_scheduler.py::test_multiple_workers --deselect=distributed/cli/tests/test_dask_scheduler.py::test_multiple_workers_2 --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_command --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_command_default --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_config --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_file --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_module --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_remote_module --deselect=distributed/cli/tests/test_dask_scheduler.py::test_scheduler_port_zero --deselect=distributed/cli/tests/test_dask_scheduler.py::test_single_executable_deprecated --deselect=distributed/cli/tests/test_dask_worker.py::test_contact_listen_address --deselect=distributed/cli/tests/test_dask_worker.py::test_dashboard_non_standard_ports --deselect=distributed/cli/tests/test_dask_worker.py::test_error_during_startup --deselect=distributed/cli/tests/test_dask_worker.py::test_integer_names --deselect=distributed/cli/tests/test_dask_worker.py::test_listen_address_ipv6 --deselect=distributed/cli/tests/test_dask_worker.py::test_local_directory --deselect=distributed/cli/tests/test_dask_worker.py::test_memory_limit --deselect=distributed/cli/tests/test_dask_worker.py::test_nanny_worker_port_range --deselect=distributed/cli/tests/test_dask_worker.py::test_nanny_worker_port_range_too_many_workers_raises --deselect=distributed/cli/tests/test_dask_worker.py::test_nanny_worker_ports --deselect=distributed/cli/tests/test_dask_worker.py::test_no_nanny --deselect=distributed/cli/tests/test_dask_worker.py::test_nworkers_auto --deselect=distributed/cli/tests/test_dask_worker.py::test_nworkers_expands_name --deselect=distributed/cli/tests/test_dask_worker.py::test_nworkers_negative --deselect=distributed/cli/tests/test_dask_worker.py::test_nworkers_requires_nanny --deselect=distributed/cli/tests/test_dask_worker.py::test_preload_config --deselect=distributed/cli/tests/test_dask_worker.py::test_resources --deselect=distributed/cli/tests/test_dask_worker.py::test_respect_host_listen_address --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_address_env --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_file --deselect=distributed/cli/tests/test_dask_worker.py::test_set_lifetime_restart_via_env_var --deselect=distributed/cli/tests/test_dask_worker.py::test_set_lifetime_stagger_via_env_var --deselect=distributed/cli/tests/test_dask_worker.py::test_signal_handling --deselect=distributed/cli/tests/test_dask_worker.py::test_single_executable_deprecated --deselect=distributed/cli/tests/test_dask_worker.py::test_single_executable_works --deselect=distributed/cli/tests/test_dask_worker.py::test_timeout --deselect=distributed/cli/tests/test_dask_worker.py::test_worker_class --deselect=distributed/tests/test_config.py::test_logging_extended --deselect=distributed/tests/test_config.py::test_logging_file_config --deselect=distributed/tests/test_config.py::test_logging_mutual_exclusive --deselect=distributed/tests/test_config.py::test_logging_simple --deselect=distributed/tests/test_config.py::test_logging_simple_under_distributed --deselect=distributed/tests/test_queues.py::test_queue_in_task --deselect=distributed/tests/test_variable.py::test_variable_in_task 
 ============================= test session starts ==============================
@@ -8311,16 +8325,16 @@
 timeout: 300.0s
 timeout method: signal
 timeout func_only: False
-collecting ... collected 5 items / 4 deselected / 1 selected
+collecting ... collected 1 item
 run-last-failure: rerun previous 1 failure (skipped 148 files)
 
-distributed/tests/test_client.py::test_computation_object_code_client_submit_dict_comp PASSED [100%]
+distributed/diagnostics/tests/test_task_stream.py::test_client_sync PASSED [100%]
 
 ============================= slowest 20 durations =============================
-0.80s call     distributed/tests/test_client.py::test_computation_object_code_client_submit_dict_comp
-0.00s setup    distributed/tests/test_client.py::test_computation_object_code_client_submit_dict_comp
-0.00s teardown distributed/tests/test_client.py::test_computation_object_code_client_submit_dict_comp
-======================= 1 passed, 4 deselected in 1.05s ========================
+1.70s setup    distributed/diagnostics/tests/test_task_stream.py::test_client_sync
+0.14s call     distributed/diagnostics/tests/test_task_stream.py::test_client_sync
+0.03s teardown distributed/diagnostics/tests/test_task_stream.py::test_client_sync
+============================== 1 passed in 2.01s ===============================
 *** END OF RUN 2: ALL TESTS RUN HAVE NOW PASSED/XFAILED ***
 I: pybuild pybuild:308: mkdir -pv /build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/.config && printf '\173 "dataframe": \173 "query-planning": False \175 \175\n' > /build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/.config/dask
 mkdir: created directory '/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/.config'
@@ -8434,6 +8448,11 @@
 distributed/diagnostics/tests/test_nanny_plugin.py::test_plugin_with_broken_setup_on_new_nanny_logs PASSED [  2%]
 distributed/diagnostics/tests/test_nanny_plugin.py::test_unregister_nanny_plugin_with_broken_teardown_raises PASSED [  3%]
 distributed/diagnostics/tests/test_nanny_plugin.py::test_nanny_plugin_with_broken_teardown_logs_on_close PASSED [  3%]
+distributed/diagnostics/tests/test_progress.py::test_many_Progress RERUN [  3%]
+distributed/diagnostics/tests/test_progress.py::test_many_Progress RERUN [  3%]
+distributed/diagnostics/tests/test_progress.py::test_many_Progress RERUN [  3%]
+distributed/diagnostics/tests/test_progress.py::test_many_Progress RERUN [  3%]
+distributed/diagnostics/tests/test_progress.py::test_many_Progress RERUN [  3%]
 distributed/diagnostics/tests/test_progress.py::test_many_Progress PASSED [  3%]
 distributed/diagnostics/tests/test_progress.py::test_multiprogress PASSED [  3%]
 distributed/diagnostics/tests/test_progress.py::test_multiprogress_cancel PASSED [  3%]
@@ -8480,7 +8499,7 @@
 distributed/diagnostics/tests/test_task_stream.py::test_collect PASSED   [  4%]
 distributed/diagnostics/tests/test_task_stream.py::test_no_startstops PASSED [  4%]
 distributed/diagnostics/tests/test_task_stream.py::test_client PASSED    [  4%]
-distributed/diagnostics/tests/test_task_stream.py::test_client_sync FAILED [  4%]
+distributed/diagnostics/tests/test_task_stream.py::test_client_sync PASSED [  4%]
 distributed/diagnostics/tests/test_task_stream.py::test_get_task_stream_plot SKIPPED [  4%]
 distributed/diagnostics/tests/test_task_stream.py::test_get_task_stream_save SKIPPED [  4%]
 distributed/diagnostics/tests/test_worker_plugin.py::test_create_with_client PASSED [  4%]
@@ -10070,7 +10089,7 @@
 distributed/tests/test_priorities.py::test_annotate_persist[queue on scheduler] PASSED [ 53%]
 distributed/tests/test_priorities.py::test_repeated_persists_same_priority[queue on worker] PASSED [ 53%]
 distributed/tests/test_priorities.py::test_repeated_persists_same_priority[queue on scheduler] PASSED [ 53%]
-distributed/tests/test_priorities.py::test_last_in_first_out[queue on worker] FAILED [ 53%]
+distributed/tests/test_priorities.py::test_last_in_first_out[queue on worker] PASSED [ 53%]
 distributed/tests/test_priorities.py::test_last_in_first_out[queue on scheduler] PASSED [ 53%]
 distributed/tests/test_profile.py::test_basic PASSED                     [ 53%]
 distributed/tests/test_profile.py::test_basic_low_level SKIPPED (could
@@ -10868,7 +10887,7 @@
 distributed/tests/test_steal.py::test_dont_steal_executing_tasks PASSED  [ 78%]
 distributed/tests/test_steal.py::test_dont_steal_executing_tasks_2 PASSED [ 78%]
 distributed/tests/test_steal.py::test_dont_steal_few_saturated_tasks_many_workers PASSED [ 78%]
-distributed/tests/test_steal.py::test_steal_when_more_tasks PASSED       [ 78%]
+distributed/tests/test_steal.py::test_steal_when_more_tasks FAILED       [ 78%]
 distributed/tests/test_steal.py::test_steal_more_attractive_tasks PASSED [ 78%]
 distributed/tests/test_steal.py::test_balance[don't move unnecessarily] PASSED [ 78%]
 distributed/tests/test_steal.py::test_balance[balance] PASSED            [ 78%]
@@ -10962,7 +10981,7 @@
 distributed/tests/test_tls_functional.py::test_client_submit PASSED      [ 81%]
 distributed/tests/test_tls_functional.py::test_gather PASSED             [ 81%]
 distributed/tests/test_tls_functional.py::test_scatter PASSED            [ 81%]
-distributed/tests/test_tls_functional.py::test_nanny FAILED              [ 81%]
+distributed/tests/test_tls_functional.py::test_nanny PASSED              [ 81%]
 distributed/tests/test_tls_functional.py::test_rebalance PASSED          [ 81%]
 distributed/tests/test_tls_functional.py::test_work_stealing PASSED      [ 81%]
 distributed/tests/test_tls_functional.py::test_worker_client PASSED      [ 81%]
@@ -11319,7 +11338,7 @@
 distributed/tests/test_worker.py::test_missing_released_zombie_tasks PASSED [ 91%]
 distributed/tests/test_worker.py::test_missing_released_zombie_tasks_2 PASSED [ 92%]
 distributed/tests/test_worker.py::test_worker_status_sync PASSED         [ 92%]
-distributed/tests/test_worker.py::test_log_remove_worker FAILED          [ 92%]
+distributed/tests/test_worker.py::test_log_remove_worker PASSED          [ 92%]
 distributed/tests/test_worker.py::test_task_flight_compute_oserror PASSED [ 92%]
 distributed/tests/test_worker.py::test_gather_dep_cancelled_rescheduled PASSED [ 92%]
 distributed/tests/test_worker.py::test_gather_dep_do_not_handle_response_of_not_requested_tasks PASSED [ 92%]
@@ -11376,7 +11395,7 @@
 distributed/tests/test_worker_memory.py::test_workerstate_fail_to_pickle_execute_1[long-running] PASSED [ 93%]
 distributed/tests/test_worker_memory.py::test_workerstate_fail_to_pickle_flight PASSED [ 93%]
 distributed/tests/test_worker_memory.py::test_fail_to_pickle_execute_2 PASSED [ 93%]
-distributed/tests/test_worker_memory.py::test_fail_to_pickle_spill FAILED [ 93%]
+distributed/tests/test_worker_memory.py::test_fail_to_pickle_spill PASSED [ 93%]
 distributed/tests/test_worker_memory.py::test_spill_target_threshold PASSED [ 93%]
 distributed/tests/test_worker_memory.py::test_spill_constrained PASSED   [ 93%]
 distributed/tests/test_worker_memory.py::test_spill_spill_threshold PASSED [ 93%]
@@ -11575,129 +11594,492 @@
 distributed/tests/test_worker_state_machine.py::test_remove_worker_unknown PASSED [100%]
 
 =================================== FAILURES ===================================
-_______________________________ test_client_sync _______________________________
-
-client = <Client: 'tcp://127.0.0.1:42413' processes=2 threads=2, memory=117.51 GiB>
+__________________________ test_steal_when_more_tasks __________________________
 
-    def test_client_sync(client):
-        with get_task_stream(client=client) as ts:
-            sleep(0.1)  # to smooth over time differences on the scheduler
-            # to smooth over time differences on the scheduler
-            futures = client.map(inc, range(10))
-            wait(futures)
+c = <Client: 'tcp://127.0.0.1:41997' processes=10 threads=10, memory=587.56 GiB>
+s = <Scheduler 'tcp://127.0.0.1:41997', workers: 0, cores: 0, tasks: 0>
+a = <Worker 'tcp://127.0.0.1:35875', name: 0, status: closed, stored: 0, running: 1/1, ready: 0, comm: 0, waiting: 0>
+rest = (<Worker 'tcp://127.0.0.1:36757', name: 1, status: closed, stored: 0, running: 0/1, ready: 0, comm: 1, waiting: 0>, <W...<Worker 'tcp://127.0.0.1:43465', name: 6, status: closed, stored: 0, running: 0/1, ready: 0, comm: 1, waiting: 0>, ...)
+x = <Future: cancelled, type: bytes, key: mul-e596ab3137dbb34a8b7a48fc09fce949>
+futures = [<Future: cancelled, type: bytes, key: slowidentity-8e4c933a-6407-4c44-abd2-889066a37e22>, <Future: cancelled, key: sl...52af70e-299f-4d0e-a1f6-56d4239c934e>, <Future: cancelled, key: slowidentity-c6c49dc0-fd42-4c8d-b6f6-09535695def4>, ...]
+
+    @gen_cluster(
+        client=True,
+        nthreads=[("127.0.0.1", 1)] * 10,
+        worker_kwargs={"memory_limit": MEMORY_LIMIT},
+        config={
+            "distributed.scheduler.default-task-durations": {"slowidentity": 0.2},
+            "distributed.scheduler.work-stealing-interval": "20ms",
+        },
+    )
+    async def test_steal_when_more_tasks(c, s, a, *rest):
+        x = c.submit(mul, b"0", 50000000, workers=a.address)  # 50 MB
+        await wait(x)
     
->       assert len(ts.data) == 10
-E       AssertionError: assert 1 == 10
-E        +  where 1 = len([{'key': 'inc-bf6e4c654a999b0a0ace1814a611b982', 'metadata': {}, 'nbytes': 28, 'startstops': ({'action': 'compute', 'start': 1736323136.2691898, 'stop': 1736323136.2698834},), ...}])
-E        +    where [{'key': 'inc-bf6e4c654a999b0a0ace1814a611b982', 'metadata': {}, 'nbytes': 28, 'startstops': ({'action': 'compute', 'start': 1736323136.2691898, 'stop': 1736323136.2698834},), ...}] = <distributed.client.get_task_stream object at 0xffff98392120>.data
-
-distributed/diagnostics/tests/test_task_stream.py:135: AssertionError
----------------------------- Captured stderr setup -----------------------------
-2025-01-07 19:58:54,432 - distributed.http.proxy - INFO - To route to workers diagnostics web server please install jupyter-server-proxy: python -m pip install jupyter-server-proxy
-2025-01-07 19:58:54,438 - distributed.scheduler - INFO - State start
-2025-01-07 19:58:54,445 - distributed.scheduler - INFO -   Scheduler at:     tcp://127.0.0.1:42413
-2025-01-07 19:58:54,446 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:8787/status
-2025-01-07 19:58:54,446 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 19:58:54,493 - distributed.worker - INFO -       Start worker at:      tcp://127.0.0.1:45343
-2025-01-07 19:58:54,494 - distributed.worker - INFO -          Listening to:      tcp://127.0.0.1:45343
-2025-01-07 19:58:54,494 - distributed.worker - INFO -          dashboard at:            127.0.0.1:41287
-2025-01-07 19:58:54,494 - distributed.worker - INFO - Waiting to connect to:      tcp://127.0.0.1:42413
-2025-01-07 19:58:54,494 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 19:58:54,494 - distributed.worker - INFO -               Threads:                          1
-2025-01-07 19:58:54,494 - distributed.worker - INFO -                Memory:                  58.76 GiB
-2025-01-07 19:58:54,494 - distributed.worker - INFO -       Local Directory: /tmp/dask-scratch-space/worker-rz5wfxmx
-2025-01-07 19:58:54,494 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 19:58:54,795 - distributed.worker - INFO -       Start worker at:      tcp://127.0.0.1:34351
-2025-01-07 19:58:54,795 - distributed.worker - INFO -          Listening to:      tcp://127.0.0.1:34351
-2025-01-07 19:58:54,795 - distributed.worker - INFO -          dashboard at:            127.0.0.1:42015
-2025-01-07 19:58:54,795 - distributed.worker - INFO - Waiting to connect to:      tcp://127.0.0.1:42413
-2025-01-07 19:58:54,795 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 19:58:54,795 - distributed.worker - INFO -               Threads:                          1
-2025-01-07 19:58:54,795 - distributed.worker - INFO -                Memory:                  58.76 GiB
-2025-01-07 19:58:54,795 - distributed.worker - INFO -       Local Directory: /tmp/dask-scratch-space/worker-4cwvy_81
-2025-01-07 19:58:54,795 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 19:58:54,857 - distributed.scheduler - INFO - Register worker addr: tcp://127.0.0.1:45343 name: tcp://127.0.0.1:45343
-2025-01-07 19:58:55,635 - distributed.scheduler - INFO - Starting worker compute stream, tcp://127.0.0.1:45343
-2025-01-07 19:58:55,635 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:51854
-2025-01-07 19:58:55,636 - distributed.scheduler - INFO - Register worker addr: tcp://127.0.0.1:34351 name: tcp://127.0.0.1:34351
-2025-01-07 19:58:55,637 - distributed.worker - INFO - Starting Worker plugin shuffle
-2025-01-07 19:58:55,638 - distributed.worker - INFO -         Registered to:      tcp://127.0.0.1:42413
-2025-01-07 19:58:55,638 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 19:58:55,649 - distributed.scheduler - INFO - Starting worker compute stream, tcp://127.0.0.1:34351
-2025-01-07 19:58:55,649 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:51860
-2025-01-07 19:58:55,653 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:42413
-2025-01-07 19:58:55,653 - distributed.worker - INFO - Starting Worker plugin shuffle
-2025-01-07 19:58:55,654 - distributed.worker - INFO -         Registered to:      tcp://127.0.0.1:42413
-2025-01-07 19:58:55,654 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 19:58:55,669 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:42413
-2025-01-07 19:58:55,711 - distributed.scheduler - INFO - Receive client connection: Client-68b03092-cd96-11ef-917c-e386edcdc4c0
-2025-01-07 19:58:55,712 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:34682
---------------------------- Captured stderr teardown ---------------------------
-2025-01-07 19:58:55,880 - distributed.scheduler - INFO - Remove client Client-68b03092-cd96-11ef-917c-e386edcdc4c0
-2025-01-07 19:58:55,880 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:34682; closing.
-2025-01-07 19:58:55,880 - distributed.scheduler - INFO - Remove client Client-68b03092-cd96-11ef-917c-e386edcdc4c0
-2025-01-07 19:58:55,881 - distributed.scheduler - INFO - Close client connection: Client-68b03092-cd96-11ef-917c-e386edcdc4c0
-___________________ test_last_in_first_out[queue on worker] ____________________
-
-c = <Client: No scheduler connected>
-s = <Scheduler 'tcp://127.0.0.1:46451', workers: 0, cores: 0, tasks: 0>
-a = <Worker 'tcp://127.0.0.1:41921', name: 0, status: closed, stored: 0, running: 1/1, ready: 0, comm: 0, waiting: 0>
-pause = False
-
-    @gen_blockable_cluster
-    async def test_last_in_first_out(c, s, a, pause):
-        async with block_worker(c, s, a, pause, 15, 5):
-            xs = [c.submit(slowinc, i, delay=0.05, key=f"x{i}") for i in range(5)]
-            ys = [c.submit(slowinc, xs[i], delay=0.05, key=f"y{i}") for i in range(5)]
-            zs = [c.submit(slowinc, ys[i], delay=0.05, key=f"z{i}") for i in range(5)]
+        futures = [c.submit(slowidentity, x, pure=False, delay=0.2) for i in range(20)]
     
-        while not any(s.tasks[z.key].state == "memory" for z in zs):
+        start = time()
+        while not any(w.state.tasks for w in rest):
             await asyncio.sleep(0.01)
->       assert not all(s.tasks[x.key].state == "memory" for x in xs)
-E       assert not True
-E        +  where True = all(<generator object test_last_in_first_out.<locals>.<genexpr> at 0xffff5874db10>)
+>           assert time() < start + 1
+E           assert 1770738045.9156072 < (1770738043.0377011 + 1)
+E            +  where 1770738045.9156072 = time()
 
-distributed/tests/test_priorities.py:234: AssertionError
+distributed/tests/test_steal.py:609: AssertionError
 ----------------------------- Captured stderr call -----------------------------
-2025-01-07 20:16:47,188 - distributed.scheduler - INFO - State start
-2025-01-07 20:16:47,193 - distributed.scheduler - INFO -   Scheduler at:     tcp://127.0.0.1:46451
-2025-01-07 20:16:47,195 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:39919/status
-2025-01-07 20:16:47,197 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:16:47,202 - distributed.worker - INFO -       Start worker at:      tcp://127.0.0.1:41921
-2025-01-07 20:16:47,204 - distributed.worker - INFO -          Listening to:      tcp://127.0.0.1:41921
-2025-01-07 20:16:47,205 - distributed.worker - INFO -           Worker name:                          0
-2025-01-07 20:16:47,207 - distributed.worker - INFO -          dashboard at:            127.0.0.1:34997
-2025-01-07 20:16:47,208 - distributed.worker - INFO - Waiting to connect to:      tcp://127.0.0.1:46451
-2025-01-07 20:16:47,209 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:16:47,211 - distributed.worker - INFO -               Threads:                          1
-2025-01-07 20:16:47,212 - distributed.worker - INFO -                Memory:                  58.76 GiB
-2025-01-07 20:16:47,217 - distributed.worker - INFO -       Local Directory: /tmp/dask-scratch-space/worker-5ai0ujuv
-2025-01-07 20:16:47,219 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:16:47,238 - distributed.scheduler - INFO - Register worker addr: tcp://127.0.0.1:41921 name: 0
-2025-01-07 20:16:47,259 - distributed.scheduler - INFO - Starting worker compute stream, tcp://127.0.0.1:41921
-2025-01-07 20:16:47,260 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:48194
-2025-01-07 20:16:47,261 - distributed.worker - INFO - Starting Worker plugin shuffle
-2025-01-07 20:16:47,263 - distributed.worker - INFO -         Registered to:      tcp://127.0.0.1:46451
-2025-01-07 20:16:47,265 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:16:47,267 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:46451
-2025-01-07 20:16:47,286 - distributed.scheduler - INFO - Receive client connection: Client-e765586a-cd98-11ef-917c-e386edcdc4c0
-2025-01-07 20:16:47,302 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:48206
-2025-01-07 20:16:48,074 - distributed.scheduler - INFO - Remove client Client-e765586a-cd98-11ef-917c-e386edcdc4c0
-2025-01-07 20:16:48,076 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:48206; closing.
-2025-01-07 20:16:48,076 - distributed.scheduler - INFO - Remove client Client-e765586a-cd98-11ef-917c-e386edcdc4c0
-2025-01-07 20:16:48,078 - distributed.scheduler - INFO - Close client connection: Client-e765586a-cd98-11ef-917c-e386edcdc4c0
-2025-01-07 20:16:48,090 - distributed.worker - INFO - Stopping worker at tcp://127.0.0.1:41921. Reason: worker-close
-2025-01-07 20:16:48,092 - distributed.worker.state_machine - WARNING - Async instruction for <Task cancelled name="execute('y0')" coro=<Worker.execute() done, defined at /build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/worker_state_machine.py:3606>> ended with CancelledError
-2025-01-07 20:16:48,095 - distributed.worker - INFO - Removing Worker plugin shuffle
-2025-01-07 20:16:48,102 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:48194; closing.
-2025-01-07 20:16:48,102 - distributed.scheduler - INFO - Remove worker addr: tcp://127.0.0.1:41921 name: 0 (stimulus_id='handle-worker-cleanup-1736324208.102533')
-2025-01-07 20:16:48,104 - distributed.scheduler - INFO - Lost all workers
-2025-01-07 20:16:48,110 - distributed.core - INFO - Connection to tcp://127.0.0.1:46451 has been closed.
-2025-01-07 20:16:48,133 - distributed.scheduler - INFO - Closing scheduler. Reason: unknown
-2025-01-07 20:16:48,135 - distributed.scheduler - INFO - Scheduler closing all comms
-__________________________________ test_nanny __________________________________
+2026-02-11 05:40:41,995 - distributed.scheduler - INFO - State start
+2026-02-11 05:40:42,004 - distributed.scheduler - INFO -   Scheduler at:     tcp://127.0.0.1:41997
+2026-02-11 05:40:42,006 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:41037/status
+2026-02-11 05:40:42,016 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:40:42,085 - distributed.worker - INFO -       Start worker at:      tcp://127.0.0.1:35875
+2026-02-11 05:40:42,087 - distributed.worker - INFO -          Listening to:      tcp://127.0.0.1:35875
+2026-02-11 05:40:42,097 - distributed.worker - INFO -           Worker name:                          0
+2026-02-11 05:40:42,098 - distributed.worker - INFO -          dashboard at:            127.0.0.1:39163
+2026-02-11 05:40:42,104 - distributed.worker - INFO - Waiting to connect to:      tcp://127.0.0.1:41997
+2026-02-11 05:40:42,106 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:40:42,116 - distributed.worker - INFO -               Threads:                          1
+2026-02-11 05:40:42,117 - distributed.worker - INFO -                Memory:                  58.76 GiB
+2026-02-11 05:40:42,119 - distributed.worker - INFO -       Local Directory: /tmp/dask-scratch-space/worker-dcmksi_s
+2026-02-11 05:40:42,120 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:40:42,133 - distributed.worker - INFO -       Start worker at:      tcp://127.0.0.1:36757
+2026-02-11 05:40:42,135 - distributed.worker - INFO -          Listening to:      tcp://127.0.0.1:36757
+2026-02-11 05:40:42,137 - distributed.worker - INFO -           Worker name:                          1
+2026-02-11 05:40:42,138 - distributed.worker - INFO -          dashboard at:            127.0.0.1:37007
+2026-02-11 05:40:42,148 - distributed.worker - INFO - Waiting to connect to:      tcp://127.0.0.1:41997
+2026-02-11 05:40:42,150 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:40:42,155 - distributed.worker - INFO -               Threads:                          1
+2026-02-11 05:40:42,157 - distributed.worker - INFO -                Memory:                  58.76 GiB
+2026-02-11 05:40:42,167 - distributed.worker - INFO -       Local Directory: /tmp/dask-scratch-space/worker-ypaixr6z
+2026-02-11 05:40:42,169 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:40:42,172 - distributed.worker - INFO -       Start worker at:      tcp://127.0.0.1:43321
+2026-02-11 05:40:42,174 - distributed.worker - INFO -          Listening to:      tcp://127.0.0.1:43321
+2026-02-11 05:40:42,184 - distributed.worker - INFO -           Worker name:                          2
+2026-02-11 05:40:42,185 - distributed.worker - INFO -          dashboard at:            127.0.0.1:45617
+2026-02-11 05:40:42,191 - distributed.worker - INFO - Waiting to connect to:      tcp://127.0.0.1:41997
+2026-02-11 05:40:42,193 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:40:42,194 - distributed.worker - INFO -               Threads:                          1
+2026-02-11 05:40:42,204 - distributed.worker - INFO -                Memory:                  58.76 GiB
+2026-02-11 05:40:42,206 - distributed.worker - INFO -       Local Directory: /tmp/dask-scratch-space/worker-uzqiqbmw
+2026-02-11 05:40:42,207 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:40:42,210 - distributed.worker - INFO -       Start worker at:      tcp://127.0.0.1:44081
+2026-02-11 05:40:42,220 - distributed.worker - INFO -          Listening to:      tcp://127.0.0.1:44081
+2026-02-11 05:40:42,222 - distributed.worker - INFO -           Worker name:                          3
+2026-02-11 05:40:42,227 - distributed.worker - INFO -          dashboard at:            127.0.0.1:36899
+2026-02-11 05:40:42,229 - distributed.worker - INFO - Waiting to connect to:      tcp://127.0.0.1:41997
+2026-02-11 05:40:42,231 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:40:42,241 - distributed.worker - INFO -               Threads:                          1
+2026-02-11 05:40:42,242 - distributed.worker - INFO -                Memory:                  58.76 GiB
+2026-02-11 05:40:42,244 - distributed.worker - INFO -       Local Directory: /tmp/dask-scratch-space/worker-clkf4ali
+2026-02-11 05:40:42,245 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:40:42,256 - distributed.worker - INFO -       Start worker at:      tcp://127.0.0.1:34531
+2026-02-11 05:40:42,258 - distributed.worker - INFO -          Listening to:      tcp://127.0.0.1:34531
+2026-02-11 05:40:42,263 - distributed.worker - INFO -           Worker name:                          4
+2026-02-11 05:40:42,265 - distributed.worker - INFO -          dashboard at:            127.0.0.1:37947
+2026-02-11 05:40:42,270 - distributed.worker - INFO - Waiting to connect to:      tcp://127.0.0.1:41997
+2026-02-11 05:40:42,272 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:40:42,274 - distributed.worker - INFO -               Threads:                          1
+2026-02-11 05:40:42,276 - distributed.worker - INFO -                Memory:                  58.76 GiB
+2026-02-11 05:40:42,278 - distributed.worker - INFO -       Local Directory: /tmp/dask-scratch-space/worker-m9j3ydsl
+2026-02-11 05:40:42,279 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:40:42,282 - distributed.worker - INFO -       Start worker at:      tcp://127.0.0.1:42011
+2026-02-11 05:40:42,284 - distributed.worker - INFO -          Listening to:      tcp://127.0.0.1:42011
+2026-02-11 05:40:42,285 - distributed.worker - INFO -           Worker name:                          5
+2026-02-11 05:40:42,287 - distributed.worker - INFO -          dashboard at:            127.0.0.1:35701
+2026-02-11 05:40:42,288 - distributed.worker - INFO - Waiting to connect to:      tcp://127.0.0.1:41997
+2026-02-11 05:40:42,290 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:40:42,291 - distributed.worker - INFO -               Threads:                          1
+2026-02-11 05:40:42,292 - distributed.worker - INFO -                Memory:                  58.76 GiB
+2026-02-11 05:40:42,294 - distributed.worker - INFO -       Local Directory: /tmp/dask-scratch-space/worker-wcxv5elx
+2026-02-11 05:40:42,295 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:40:42,298 - distributed.worker - INFO -       Start worker at:      tcp://127.0.0.1:43465
+2026-02-11 05:40:42,300 - distributed.worker - INFO -          Listening to:      tcp://127.0.0.1:43465
+2026-02-11 05:40:42,302 - distributed.worker - INFO -           Worker name:                          6
+2026-02-11 05:40:42,303 - distributed.worker - INFO -          dashboard at:            127.0.0.1:34405
+2026-02-11 05:40:42,305 - distributed.worker - INFO - Waiting to connect to:      tcp://127.0.0.1:41997
+2026-02-11 05:40:42,306 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:40:42,308 - distributed.worker - INFO -               Threads:                          1
+2026-02-11 05:40:42,310 - distributed.worker - INFO -                Memory:                  58.76 GiB
+2026-02-11 05:40:42,311 - distributed.worker - INFO -       Local Directory: /tmp/dask-scratch-space/worker-3flqcn74
+2026-02-11 05:40:42,313 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:40:42,315 - distributed.worker - INFO -       Start worker at:      tcp://127.0.0.1:38399
+2026-02-11 05:40:42,317 - distributed.worker - INFO -          Listening to:      tcp://127.0.0.1:38399
+2026-02-11 05:40:42,319 - distributed.worker - INFO -           Worker name:                          7
+2026-02-11 05:40:42,320 - distributed.worker - INFO -          dashboard at:            127.0.0.1:34533
+2026-02-11 05:40:42,321 - distributed.worker - INFO - Waiting to connect to:      tcp://127.0.0.1:41997
+2026-02-11 05:40:42,323 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:40:42,324 - distributed.worker - INFO -               Threads:                          1
+2026-02-11 05:40:42,326 - distributed.worker - INFO -                Memory:                  58.76 GiB
+2026-02-11 05:40:42,327 - distributed.worker - INFO -       Local Directory: /tmp/dask-scratch-space/worker-j0adtsfe
+2026-02-11 05:40:42,328 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:40:42,331 - distributed.worker - INFO -       Start worker at:      tcp://127.0.0.1:42455
+2026-02-11 05:40:42,332 - distributed.worker - INFO -          Listening to:      tcp://127.0.0.1:42455
+2026-02-11 05:40:42,334 - distributed.worker - INFO -           Worker name:                          8
+2026-02-11 05:40:42,335 - distributed.worker - INFO -          dashboard at:            127.0.0.1:37265
+2026-02-11 05:40:42,337 - distributed.worker - INFO - Waiting to connect to:      tcp://127.0.0.1:41997
+2026-02-11 05:40:42,338 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:40:42,339 - distributed.worker - INFO -               Threads:                          1
+2026-02-11 05:40:42,341 - distributed.worker - INFO -                Memory:                  58.76 GiB
+2026-02-11 05:40:42,342 - distributed.worker - INFO -       Local Directory: /tmp/dask-scratch-space/worker-4lm2l7_r
+2026-02-11 05:40:42,343 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:40:42,346 - distributed.worker - INFO -       Start worker at:      tcp://127.0.0.1:46233
+2026-02-11 05:40:42,348 - distributed.worker - INFO -          Listening to:      tcp://127.0.0.1:46233
+2026-02-11 05:40:42,350 - distributed.worker - INFO -           Worker name:                          9
+2026-02-11 05:40:42,351 - distributed.worker - INFO -          dashboard at:            127.0.0.1:40899
+2026-02-11 05:40:42,352 - distributed.worker - INFO - Waiting to connect to:      tcp://127.0.0.1:41997
+2026-02-11 05:40:42,354 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:40:42,355 - distributed.worker - INFO -               Threads:                          1
+2026-02-11 05:40:42,357 - distributed.worker - INFO -                Memory:                  58.76 GiB
+2026-02-11 05:40:42,358 - distributed.worker - INFO -       Local Directory: /tmp/dask-scratch-space/worker-n076xe05
+2026-02-11 05:40:42,360 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:40:42,588 - distributed.scheduler - INFO - Register worker addr: tcp://127.0.0.1:35875 name: 0
+2026-02-11 05:40:42,620 - distributed.scheduler - INFO - Starting worker compute stream, tcp://127.0.0.1:35875
+2026-02-11 05:40:42,622 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:43458
+2026-02-11 05:40:42,624 - distributed.scheduler - INFO - Register worker addr: tcp://127.0.0.1:36757 name: 1
+2026-02-11 05:40:42,646 - distributed.scheduler - INFO - Starting worker compute stream, tcp://127.0.0.1:36757
+2026-02-11 05:40:42,653 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:43474
+2026-02-11 05:40:42,653 - distributed.scheduler - INFO - Register worker addr: tcp://127.0.0.1:43321 name: 2
+2026-02-11 05:40:42,694 - distributed.scheduler - INFO - Starting worker compute stream, tcp://127.0.0.1:43321
+2026-02-11 05:40:42,696 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:43480
+2026-02-11 05:40:42,697 - distributed.scheduler - INFO - Register worker addr: tcp://127.0.0.1:44081 name: 3
+2026-02-11 05:40:42,720 - distributed.scheduler - INFO - Starting worker compute stream, tcp://127.0.0.1:44081
+2026-02-11 05:40:42,722 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:43486
+2026-02-11 05:40:42,723 - distributed.scheduler - INFO - Register worker addr: tcp://127.0.0.1:34531 name: 4
+2026-02-11 05:40:42,745 - distributed.scheduler - INFO - Starting worker compute stream, tcp://127.0.0.1:34531
+2026-02-11 05:40:42,746 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:43498
+2026-02-11 05:40:42,747 - distributed.scheduler - INFO - Register worker addr: tcp://127.0.0.1:42011 name: 5
+2026-02-11 05:40:42,770 - distributed.scheduler - INFO - Starting worker compute stream, tcp://127.0.0.1:42011
+2026-02-11 05:40:42,772 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:43512
+2026-02-11 05:40:42,773 - distributed.scheduler - INFO - Register worker addr: tcp://127.0.0.1:43465 name: 6
+2026-02-11 05:40:42,794 - distributed.scheduler - INFO - Starting worker compute stream, tcp://127.0.0.1:43465
+2026-02-11 05:40:42,796 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:43518
+2026-02-11 05:40:42,797 - distributed.scheduler - INFO - Register worker addr: tcp://127.0.0.1:38399 name: 7
+2026-02-11 05:40:42,819 - distributed.scheduler - INFO - Starting worker compute stream, tcp://127.0.0.1:38399
+2026-02-11 05:40:42,821 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:43520
+2026-02-11 05:40:42,821 - distributed.scheduler - INFO - Register worker addr: tcp://127.0.0.1:42455 name: 8
+2026-02-11 05:40:42,843 - distributed.scheduler - INFO - Starting worker compute stream, tcp://127.0.0.1:42455
+2026-02-11 05:40:42,845 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:43536
+2026-02-11 05:40:42,846 - distributed.scheduler - INFO - Register worker addr: tcp://127.0.0.1:46233 name: 9
+2026-02-11 05:40:42,868 - distributed.scheduler - INFO - Starting worker compute stream, tcp://127.0.0.1:46233
+2026-02-11 05:40:42,870 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:43550
+2026-02-11 05:40:42,872 - distributed.worker - INFO - Starting Worker plugin shuffle
+2026-02-11 05:40:42,874 - distributed.worker - INFO - Starting Worker plugin shuffle
+2026-02-11 05:40:42,876 - distributed.worker - INFO - Starting Worker plugin shuffle
+2026-02-11 05:40:42,878 - distributed.worker - INFO - Starting Worker plugin shuffle
+2026-02-11 05:40:42,880 - distributed.worker - INFO - Starting Worker plugin shuffle
+2026-02-11 05:40:42,882 - distributed.worker - INFO - Starting Worker plugin shuffle
+2026-02-11 05:40:42,884 - distributed.worker - INFO - Starting Worker plugin shuffle
+2026-02-11 05:40:42,886 - distributed.worker - INFO - Starting Worker plugin shuffle
+2026-02-11 05:40:42,888 - distributed.worker - INFO - Starting Worker plugin shuffle
+2026-02-11 05:40:42,890 - distributed.worker - INFO - Starting Worker plugin shuffle
+2026-02-11 05:40:42,893 - distributed.worker - INFO -         Registered to:      tcp://127.0.0.1:41997
+2026-02-11 05:40:42,895 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:40:42,896 - distributed.worker - INFO -         Registered to:      tcp://127.0.0.1:41997
+2026-02-11 05:40:42,898 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:40:42,900 - distributed.worker - INFO -         Registered to:      tcp://127.0.0.1:41997
+2026-02-11 05:40:42,902 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:40:42,904 - distributed.worker - INFO -         Registered to:      tcp://127.0.0.1:41997
+2026-02-11 05:40:42,905 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:40:42,907 - distributed.worker - INFO -         Registered to:      tcp://127.0.0.1:41997
+2026-02-11 05:40:42,909 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:40:42,910 - distributed.worker - INFO -         Registered to:      tcp://127.0.0.1:41997
+2026-02-11 05:40:42,912 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:40:42,914 - distributed.worker - INFO -         Registered to:      tcp://127.0.0.1:41997
+2026-02-11 05:40:42,916 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:40:42,918 - distributed.worker - INFO -         Registered to:      tcp://127.0.0.1:41997
+2026-02-11 05:40:42,919 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:40:42,921 - distributed.worker - INFO -         Registered to:      tcp://127.0.0.1:41997
+2026-02-11 05:40:42,923 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:40:42,924 - distributed.worker - INFO -         Registered to:      tcp://127.0.0.1:41997
+2026-02-11 05:40:42,926 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:40:42,929 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:41997
+2026-02-11 05:40:42,929 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:41997
+2026-02-11 05:40:42,929 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:41997
+2026-02-11 05:40:42,929 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:41997
+2026-02-11 05:40:42,929 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:41997
+2026-02-11 05:40:42,929 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:41997
+2026-02-11 05:40:42,929 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:41997
+2026-02-11 05:40:42,929 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:41997
+2026-02-11 05:40:42,929 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:41997
+2026-02-11 05:40:42,929 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:41997
+2026-02-11 05:40:42,982 - distributed.scheduler - INFO - Receive client connection: Client-dbe560ca-0696-11f1-931f-190d74729cd0
+2026-02-11 05:40:43,004 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:43556
+2026-02-11 05:40:45,917 - distributed.core - INFO - Event loop was unresponsive in Worker for 3.02s.  This is often caused by long-running GIL-holding functions or moving large chunks of data. This can cause timeouts and instability.
+2026-02-11 05:40:45,918 - distributed.core - INFO - Event loop was unresponsive in Worker for 3.02s.  This is often caused by long-running GIL-holding functions or moving large chunks of data. This can cause timeouts and instability.
+2026-02-11 05:40:45,918 - distributed.core - INFO - Event loop was unresponsive in Worker for 3.01s.  This is often caused by long-running GIL-holding functions or moving large chunks of data. This can cause timeouts and instability.
+2026-02-11 05:40:45,920 - distributed.core - INFO - Event loop was unresponsive in Worker for 3.01s.  This is often caused by long-running GIL-holding functions or moving large chunks of data. This can cause timeouts and instability.
+2026-02-11 05:40:45,921 - distributed.core - INFO - Event loop was unresponsive in Worker for 3.01s.  This is often caused by long-running GIL-holding functions or moving large chunks of data. This can cause timeouts and instability.
+2026-02-11 05:40:45,922 - distributed.core - INFO - Event loop was unresponsive in Worker for 3.01s.  This is often caused by long-running GIL-holding functions or moving large chunks of data. This can cause timeouts and instability.
+2026-02-11 05:40:45,922 - distributed.core - INFO - Event loop was unresponsive in Worker for 3.00s.  This is often caused by long-running GIL-holding functions or moving large chunks of data. This can cause timeouts and instability.
+2026-02-11 05:40:45,923 - distributed.core - INFO - Event loop was unresponsive in Worker for 3.00s.  This is often caused by long-running GIL-holding functions or moving large chunks of data. This can cause timeouts and instability.
+2026-02-11 05:40:46,063 - distributed.scheduler - INFO - Remove client Client-dbe560ca-0696-11f1-931f-190d74729cd0
+2026-02-11 05:40:46,066 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:43556; closing.
+2026-02-11 05:40:46,075 - distributed.scheduler - INFO - Remove client Client-dbe560ca-0696-11f1-931f-190d74729cd0
+2026-02-11 05:40:46,101 - distributed.worker - INFO - Stopping worker at tcp://127.0.0.1:35875. Reason: worker-close
+2026-02-11 05:40:46,104 - distributed.worker - INFO - Stopping worker at tcp://127.0.0.1:36757. Reason: worker-close
+2026-02-11 05:40:46,105 - distributed.worker - INFO - Stopping worker at tcp://127.0.0.1:43321. Reason: worker-close
+2026-02-11 05:40:46,108 - distributed.worker - INFO - Stopping worker at tcp://127.0.0.1:44081. Reason: worker-close
+2026-02-11 05:40:46,110 - distributed.worker - INFO - Stopping worker at tcp://127.0.0.1:34531. Reason: worker-close
+2026-02-11 05:40:46,112 - distributed.worker - INFO - Stopping worker at tcp://127.0.0.1:42011. Reason: worker-close
+2026-02-11 05:40:46,113 - distributed.worker - INFO - Stopping worker at tcp://127.0.0.1:43465. Reason: worker-close
+2026-02-11 05:40:46,123 - distributed.worker - INFO - Stopping worker at tcp://127.0.0.1:38399. Reason: worker-close
+2026-02-11 05:40:46,125 - distributed.worker - INFO - Stopping worker at tcp://127.0.0.1:42455. Reason: worker-close
+2026-02-11 05:40:46,131 - distributed.worker - INFO - Stopping worker at tcp://127.0.0.1:46233. Reason: worker-close
+2026-02-11 05:40:46,161 - distributed.batched - INFO - Batched Comm Closed <TCP (closed) Scheduler->Client local=tcp://127.0.0.1:41997 remote=tcp://127.0.0.1:43556>
+Traceback (most recent call last):
+  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/batched.py", line 115, in _background_send
+    nbytes = yield coro
+             ^^^^^^^^^^
+  File "/usr/lib/python3/dist-packages/tornado/gen.py", line 766, in run
+    value = future.result()
+  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 262, in write
+    raise CommClosedError()
+distributed.comm.core.CommClosedError
+2026-02-11 05:40:46,167 - distributed.worker.state_machine - WARNING - Async instruction for <Task cancelled name="execute('slowidentity-a4d72076-d3ed-4066-9cfc-264837e0928c')" coro=<Worker.execute() done, defined at /build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/worker_state_machine.py:3606>> ended with CancelledError
+2026-02-11 05:40:46,169 - distributed.worker.state_machine - WARNING - Async instruction for <Task cancelled name='gather_dep(tcp://127.0.0.1:35875, {mul-e596ab3137dbb34a8b7a48fc09fce949})' coro=<Worker.gather_dep() done, defined at /build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/worker_state_machine.py:3606>> ended with CancelledError
+2026-02-11 05:40:46,172 - distributed.worker.state_machine - WARNING - Async instruction for <Task cancelled name='gather_dep(tcp://127.0.0.1:35875, {mul-e596ab3137dbb34a8b7a48fc09fce949})' coro=<Worker.gather_dep() done, defined at /build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/worker_state_machine.py:3606>> ended with CancelledError
+2026-02-11 05:40:46,174 - distributed.worker.state_machine - WARNING - Async instruction for <Task cancelled name='gather_dep(tcp://127.0.0.1:35875, {mul-e596ab3137dbb34a8b7a48fc09fce949})' coro=<Worker.gather_dep() done, defined at /build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/worker_state_machine.py:3606>> ended with CancelledError
+2026-02-11 05:40:46,176 - distributed.worker.state_machine - WARNING - Async instruction for <Task cancelled name='gather_dep(tcp://127.0.0.1:35875, {mul-e596ab3137dbb34a8b7a48fc09fce949})' coro=<Worker.gather_dep() done, defined at /build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/worker_state_machine.py:3606>> ended with CancelledError
+2026-02-11 05:40:46,179 - distributed.worker.state_machine - WARNING - Async instruction for <Task cancelled name='gather_dep(tcp://127.0.0.1:35875, {mul-e596ab3137dbb34a8b7a48fc09fce949})' coro=<Worker.gather_dep() done, defined at /build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/worker_state_machine.py:3606>> ended with CancelledError
+2026-02-11 05:40:46,181 - distributed.worker.state_machine - WARNING - Async instruction for <Task cancelled name='gather_dep(tcp://127.0.0.1:35875, {mul-e596ab3137dbb34a8b7a48fc09fce949})' coro=<Worker.gather_dep() done, defined at /build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/worker_state_machine.py:3606>> ended with CancelledError
+2026-02-11 05:40:46,185 - distributed.worker.state_machine - WARNING - Async instruction for <Task cancelled name='gather_dep(tcp://127.0.0.1:35875, {mul-e596ab3137dbb34a8b7a48fc09fce949})' coro=<Worker.gather_dep() done, defined at /build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/worker_state_machine.py:3606>> ended with CancelledError
+2026-02-11 05:40:46,187 - distributed.worker.state_machine - WARNING - Async instruction for <Task cancelled name='gather_dep(tcp://127.0.0.1:35875, {mul-e596ab3137dbb34a8b7a48fc09fce949})' coro=<Worker.gather_dep() done, defined at /build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/worker_state_machine.py:3606>> ended with CancelledError
+2026-02-11 05:40:46,190 - distributed.worker.state_machine - WARNING - Async instruction for <Task cancelled name='gather_dep(tcp://127.0.0.1:35875, {mul-e596ab3137dbb34a8b7a48fc09fce949})' coro=<Worker.gather_dep() done, defined at /build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/worker_state_machine.py:3606>> ended with CancelledError
+2026-02-11 05:40:46,192 - distributed.worker - ERROR - failed during get data with tcp://127.0.0.1:35875 -> tcp://127.0.0.1:43321
+Traceback (most recent call last):
+  File "/usr/lib/python3/dist-packages/tornado/iostream.py", line 962, in _handle_write
+    num_bytes = self.write_to_fd(self._write_buffer.peek(size))
+  File "/usr/lib/python3/dist-packages/tornado/iostream.py", line 1124, in write_to_fd
+    return self.socket.send(data)  # type: ignore
+           ~~~~~~~~~~~~~~~~^^^^^^
+BrokenPipeError: [Errno 32] Broken pipe
+
+The above exception was the direct cause of the following exception:
+
+Traceback (most recent call last):
+  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/worker.py", line 1797, in get_data
+    response = await comm.read(deserializers=serializers)
+               ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 236, in read
+    convert_stream_closed_error(self, e)
+    ~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^
+  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 140, in convert_stream_closed_error
+    raise CommClosedError(f"in {obj}: {exc.__class__.__name__}: {exc}") from exc
+distributed.comm.core.CommClosedError: in <TCP (closed)  local=tcp://127.0.0.1:35875 remote=tcp://127.0.0.1:47454>: BrokenPipeError: [Errno 32] Broken pipe
+2026-02-11 05:40:46,196 - distributed.worker - ERROR - failed during get data with tcp://127.0.0.1:35875 -> tcp://127.0.0.1:38399
+Traceback (most recent call last):
+  File "/usr/lib/python3/dist-packages/tornado/iostream.py", line 962, in _handle_write
+    num_bytes = self.write_to_fd(self._write_buffer.peek(size))
+  File "/usr/lib/python3/dist-packages/tornado/iostream.py", line 1124, in write_to_fd
+    return self.socket.send(data)  # type: ignore
+           ~~~~~~~~~~~~~~~~^^^^^^
+BrokenPipeError: [Errno 32] Broken pipe
+
+The above exception was the direct cause of the following exception:
 
-fut = <coroutine object Nanny.start_unsafe at 0xffff00114eb0>, timeout = 0
+Traceback (most recent call last):
+  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/worker.py", line 1797, in get_data
+    response = await comm.read(deserializers=serializers)
+               ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 236, in read
+    convert_stream_closed_error(self, e)
+    ~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^
+  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 140, in convert_stream_closed_error
+    raise CommClosedError(f"in {obj}: {exc.__class__.__name__}: {exc}") from exc
+distributed.comm.core.CommClosedError: in <TCP (closed)  local=tcp://127.0.0.1:35875 remote=tcp://127.0.0.1:47462>: BrokenPipeError: [Errno 32] Broken pipe
+2026-02-11 05:40:46,199 - distributed.worker - ERROR - failed during get data with tcp://127.0.0.1:35875 -> tcp://127.0.0.1:42455
+Traceback (most recent call last):
+  File "/usr/lib/python3/dist-packages/tornado/iostream.py", line 962, in _handle_write
+    num_bytes = self.write_to_fd(self._write_buffer.peek(size))
+  File "/usr/lib/python3/dist-packages/tornado/iostream.py", line 1124, in write_to_fd
+    return self.socket.send(data)  # type: ignore
+           ~~~~~~~~~~~~~~~~^^^^^^
+BrokenPipeError: [Errno 32] Broken pipe
+
+The above exception was the direct cause of the following exception:
+
+Traceback (most recent call last):
+  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/worker.py", line 1797, in get_data
+    response = await comm.read(deserializers=serializers)
+               ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 236, in read
+    convert_stream_closed_error(self, e)
+    ~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^
+  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 140, in convert_stream_closed_error
+    raise CommClosedError(f"in {obj}: {exc.__class__.__name__}: {exc}") from exc
+distributed.comm.core.CommClosedError: in <TCP (closed)  local=tcp://127.0.0.1:35875 remote=tcp://127.0.0.1:47506>: BrokenPipeError: [Errno 32] Broken pipe
+2026-02-11 05:40:46,202 - distributed.worker - ERROR - failed during get data with tcp://127.0.0.1:35875 -> tcp://127.0.0.1:36757
+Traceback (most recent call last):
+  File "/usr/lib/python3/dist-packages/tornado/iostream.py", line 962, in _handle_write
+    num_bytes = self.write_to_fd(self._write_buffer.peek(size))
+  File "/usr/lib/python3/dist-packages/tornado/iostream.py", line 1124, in write_to_fd
+    return self.socket.send(data)  # type: ignore
+           ~~~~~~~~~~~~~~~~^^^^^^
+BrokenPipeError: [Errno 32] Broken pipe
+
+The above exception was the direct cause of the following exception:
+
+Traceback (most recent call last):
+  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/worker.py", line 1797, in get_data
+    response = await comm.read(deserializers=serializers)
+               ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 236, in read
+    convert_stream_closed_error(self, e)
+    ~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^
+  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 140, in convert_stream_closed_error
+    raise CommClosedError(f"in {obj}: {exc.__class__.__name__}: {exc}") from exc
+distributed.comm.core.CommClosedError: in <TCP (closed)  local=tcp://127.0.0.1:35875 remote=tcp://127.0.0.1:47502>: BrokenPipeError: [Errno 32] Broken pipe
+2026-02-11 05:40:46,205 - distributed.worker - ERROR - failed during get data with tcp://127.0.0.1:35875 -> tcp://127.0.0.1:44081
+Traceback (most recent call last):
+  File "/usr/lib/python3/dist-packages/tornado/iostream.py", line 962, in _handle_write
+    num_bytes = self.write_to_fd(self._write_buffer.peek(size))
+  File "/usr/lib/python3/dist-packages/tornado/iostream.py", line 1124, in write_to_fd
+    return self.socket.send(data)  # type: ignore
+           ~~~~~~~~~~~~~~~~^^^^^^
+BrokenPipeError: [Errno 32] Broken pipe
+
+The above exception was the direct cause of the following exception:
+
+Traceback (most recent call last):
+  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/worker.py", line 1797, in get_data
+    response = await comm.read(deserializers=serializers)
+               ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 236, in read
+    convert_stream_closed_error(self, e)
+    ~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^
+  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 140, in convert_stream_closed_error
+    raise CommClosedError(f"in {obj}: {exc.__class__.__name__}: {exc}") from exc
+distributed.comm.core.CommClosedError: in <TCP (closed)  local=tcp://127.0.0.1:35875 remote=tcp://127.0.0.1:47488>: BrokenPipeError: [Errno 32] Broken pipe
+2026-02-11 05:40:46,208 - distributed.worker - ERROR - failed during get data with tcp://127.0.0.1:35875 -> tcp://127.0.0.1:42011
+Traceback (most recent call last):
+  File "/usr/lib/python3/dist-packages/tornado/iostream.py", line 962, in _handle_write
+    num_bytes = self.write_to_fd(self._write_buffer.peek(size))
+  File "/usr/lib/python3/dist-packages/tornado/iostream.py", line 1124, in write_to_fd
+    return self.socket.send(data)  # type: ignore
+           ~~~~~~~~~~~~~~~~^^^^^^
+BrokenPipeError: [Errno 32] Broken pipe
+
+The above exception was the direct cause of the following exception:
+
+Traceback (most recent call last):
+  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/worker.py", line 1797, in get_data
+    response = await comm.read(deserializers=serializers)
+               ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 236, in read
+    convert_stream_closed_error(self, e)
+    ~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^
+  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 140, in convert_stream_closed_error
+    raise CommClosedError(f"in {obj}: {exc.__class__.__name__}: {exc}") from exc
+distributed.comm.core.CommClosedError: in <TCP (closed)  local=tcp://127.0.0.1:35875 remote=tcp://127.0.0.1:47480>: BrokenPipeError: [Errno 32] Broken pipe
+2026-02-11 05:40:46,211 - distributed.worker - ERROR - failed during get data with tcp://127.0.0.1:35875 -> tcp://127.0.0.1:46233
+Traceback (most recent call last):
+  File "/usr/lib/python3/dist-packages/tornado/iostream.py", line 962, in _handle_write
+    num_bytes = self.write_to_fd(self._write_buffer.peek(size))
+  File "/usr/lib/python3/dist-packages/tornado/iostream.py", line 1124, in write_to_fd
+    return self.socket.send(data)  # type: ignore
+           ~~~~~~~~~~~~~~~~^^^^^^
+BrokenPipeError: [Errno 32] Broken pipe
+
+The above exception was the direct cause of the following exception:
+
+Traceback (most recent call last):
+  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/worker.py", line 1797, in get_data
+    response = await comm.read(deserializers=serializers)
+               ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 236, in read
+    convert_stream_closed_error(self, e)
+    ~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^
+  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 140, in convert_stream_closed_error
+    raise CommClosedError(f"in {obj}: {exc.__class__.__name__}: {exc}") from exc
+distributed.comm.core.CommClosedError: in <TCP (closed)  local=tcp://127.0.0.1:35875 remote=tcp://127.0.0.1:47476>: BrokenPipeError: [Errno 32] Broken pipe
+2026-02-11 05:40:46,214 - distributed.worker - ERROR - failed during get data with tcp://127.0.0.1:35875 -> tcp://127.0.0.1:43465
+Traceback (most recent call last):
+  File "/usr/lib/python3/dist-packages/tornado/iostream.py", line 962, in _handle_write
+    num_bytes = self.write_to_fd(self._write_buffer.peek(size))
+  File "/usr/lib/python3/dist-packages/tornado/iostream.py", line 1124, in write_to_fd
+    return self.socket.send(data)  # type: ignore
+           ~~~~~~~~~~~~~~~~^^^^^^
+BrokenPipeError: [Errno 32] Broken pipe
+
+The above exception was the direct cause of the following exception:
+
+Traceback (most recent call last):
+  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/worker.py", line 1797, in get_data
+    response = await comm.read(deserializers=serializers)
+               ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 236, in read
+    convert_stream_closed_error(self, e)
+    ~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^
+  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 140, in convert_stream_closed_error
+    raise CommClosedError(f"in {obj}: {exc.__class__.__name__}: {exc}") from exc
+distributed.comm.core.CommClosedError: in <TCP (closed)  local=tcp://127.0.0.1:35875 remote=tcp://127.0.0.1:47524>: BrokenPipeError: [Errno 32] Broken pipe
+2026-02-11 05:40:46,217 - distributed.worker - ERROR - failed during get data with tcp://127.0.0.1:35875 -> tcp://127.0.0.1:34531
+Traceback (most recent call last):
+  File "/usr/lib/python3/dist-packages/tornado/iostream.py", line 962, in _handle_write
+    num_bytes = self.write_to_fd(self._write_buffer.peek(size))
+  File "/usr/lib/python3/dist-packages/tornado/iostream.py", line 1124, in write_to_fd
+    return self.socket.send(data)  # type: ignore
+           ~~~~~~~~~~~~~~~~^^^^^^
+BrokenPipeError: [Errno 32] Broken pipe
+
+The above exception was the direct cause of the following exception:
+
+Traceback (most recent call last):
+  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/worker.py", line 1797, in get_data
+    response = await comm.read(deserializers=serializers)
+               ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 236, in read
+    convert_stream_closed_error(self, e)
+    ~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^
+  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 140, in convert_stream_closed_error
+    raise CommClosedError(f"in {obj}: {exc.__class__.__name__}: {exc}") from exc
+distributed.comm.core.CommClosedError: in <TCP (closed)  local=tcp://127.0.0.1:35875 remote=tcp://127.0.0.1:47512>: BrokenPipeError: [Errno 32] Broken pipe
+2026-02-11 05:40:46,224 - distributed.scheduler - INFO - Close client connection: Client-dbe560ca-0696-11f1-931f-190d74729cd0
+2026-02-11 05:40:46,226 - distributed.worker - INFO - Removing Worker plugin shuffle
+2026-02-11 05:40:46,228 - distributed.worker - INFO - Removing Worker plugin shuffle
+2026-02-11 05:40:46,230 - distributed.worker - INFO - Removing Worker plugin shuffle
+2026-02-11 05:40:46,231 - distributed.worker - INFO - Removing Worker plugin shuffle
+2026-02-11 05:40:46,233 - distributed.worker - INFO - Removing Worker plugin shuffle
+2026-02-11 05:40:46,234 - distributed.worker - INFO - Removing Worker plugin shuffle
+2026-02-11 05:40:46,236 - distributed.worker - INFO - Removing Worker plugin shuffle
+2026-02-11 05:40:46,237 - distributed.worker - INFO - Removing Worker plugin shuffle
+2026-02-11 05:40:46,238 - distributed.worker - INFO - Removing Worker plugin shuffle
+2026-02-11 05:40:46,240 - distributed.worker - INFO - Removing Worker plugin shuffle
+2026-02-11 05:40:46,248 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:43474; closing.
+2026-02-11 05:40:46,248 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:43480; closing.
+2026-02-11 05:40:46,248 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:43486; closing.
+2026-02-11 05:40:46,248 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:43498; closing.
+2026-02-11 05:40:46,248 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:43512; closing.
+2026-02-11 05:40:46,249 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:43518; closing.
+2026-02-11 05:40:46,249 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:43520; closing.
+2026-02-11 05:40:46,249 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:43536; closing.
+2026-02-11 05:40:46,249 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:43550; closing.
+2026-02-11 05:40:46,249 - distributed.scheduler - INFO - Remove worker addr: tcp://127.0.0.1:36757 name: 1 (stimulus_id='handle-worker-cleanup-1770738046.2498937')
+2026-02-11 05:40:46,252 - distributed.scheduler - INFO - Remove worker addr: tcp://127.0.0.1:43321 name: 2 (stimulus_id='handle-worker-cleanup-1770738046.2522955')
+2026-02-11 05:40:46,254 - distributed.scheduler - INFO - Remove worker addr: tcp://127.0.0.1:44081 name: 3 (stimulus_id='handle-worker-cleanup-1770738046.254521')
+2026-02-11 05:40:46,258 - distributed.scheduler - INFO - Remove worker addr: tcp://127.0.0.1:34531 name: 4 (stimulus_id='handle-worker-cleanup-1770738046.2580652')
+2026-02-11 05:40:46,260 - distributed.scheduler - INFO - Remove worker addr: tcp://127.0.0.1:42011 name: 5 (stimulus_id='handle-worker-cleanup-1770738046.2603815')
+2026-02-11 05:40:46,262 - distributed.scheduler - INFO - Remove worker addr: tcp://127.0.0.1:43465 name: 6 (stimulus_id='handle-worker-cleanup-1770738046.2626793')
+2026-02-11 05:40:46,264 - distributed.scheduler - INFO - Remove worker addr: tcp://127.0.0.1:38399 name: 7 (stimulus_id='handle-worker-cleanup-1770738046.2649336')
+2026-02-11 05:40:46,267 - distributed.scheduler - INFO - Remove worker addr: tcp://127.0.0.1:42455 name: 8 (stimulus_id='handle-worker-cleanup-1770738046.2670844')
+2026-02-11 05:40:46,269 - distributed.scheduler - INFO - Remove worker addr: tcp://127.0.0.1:46233 name: 9 (stimulus_id='handle-worker-cleanup-1770738046.2693248')
+2026-02-11 05:40:46,271 - distributed.core - INFO - Connection to tcp://127.0.0.1:41997 has been closed.
+2026-02-11 05:40:46,275 - distributed.core - INFO - Connection to tcp://127.0.0.1:41997 has been closed.
+2026-02-11 05:40:46,275 - distributed.core - INFO - Connection to tcp://127.0.0.1:41997 has been closed.
+2026-02-11 05:40:46,290 - distributed.core - INFO - Connection to tcp://127.0.0.1:41997 has been closed.
+2026-02-11 05:40:46,290 - distributed.core - INFO - Connection to tcp://127.0.0.1:41997 has been closed.
+2026-02-11 05:40:46,291 - distributed.core - INFO - Connection to tcp://127.0.0.1:41997 has been closed.
+2026-02-11 05:40:46,293 - distributed.core - INFO - Connection to tcp://127.0.0.1:41997 has been closed.
+2026-02-11 05:40:46,294 - distributed.core - INFO - Connection to tcp://127.0.0.1:41997 has been closed.
+2026-02-11 05:40:46,294 - distributed.core - INFO - Connection to tcp://127.0.0.1:41997 has been closed.
+2026-02-11 05:40:46,294 - distributed.core - INFO - Connection to tcp://127.0.0.1:41997 has been closed.
+2026-02-11 05:40:46,296 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:43458; closing.
+2026-02-11 05:40:46,296 - distributed.scheduler - INFO - Remove worker addr: tcp://127.0.0.1:35875 name: 0 (stimulus_id='handle-worker-cleanup-1770738046.2969232')
+2026-02-11 05:40:46,298 - distributed.scheduler - INFO - Lost all workers
+2026-02-11 05:40:46,309 - distributed.scheduler - INFO - Closing scheduler. Reason: unknown
+2026-02-11 05:40:46,312 - distributed.scheduler - INFO - Scheduler closing all comms
+_____________________________ test_retire_workers ______________________________
+
+fut = <coroutine object Nanny.start_unsafe at 0xffff222bd000>, timeout = 0
 
     async def wait_for(fut: Awaitable[T], timeout: float) -> T:
         async with asyncio.timeout(timeout):
@@ -11719,10 +12101,10 @@
     stream = await self.client.connect(
 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
 
-self = <tornado.tcpclient.TCPClient object at 0xffffa614cad0>
-host = '127.0.0.1', port = 33751, af = <AddressFamily.AF_UNSPEC: 0>
-ssl_options = <ssl.SSLContext object at 0xffff0070f890>
-max_buffer_size = 31544635392.0, source_ip = None, source_port = None
+self = <tornado.tcpclient.TCPClient object at 0xffff944fcad0>
+host = '127.0.0.1', port = 36529, af = <AddressFamily.AF_UNSPEC: 0>
+ssl_options = <ssl.SSLContext object at 0xffff449e9fd0>
+max_buffer_size = 31544629248.0, source_ip = None, source_port = None
 timeout = None
 
     async def connect(
@@ -11818,7 +12200,7 @@
 
 self = <Timeout [expired]>
 exc_type = <class 'asyncio.exceptions.CancelledError'>
-exc_val = CancelledError(), exc_tb = <traceback object at 0xffff00157200>
+exc_val = CancelledError(), exc_tb = <traceback object at 0xffff3cddb780>
 
     async def __aexit__(
         self,
@@ -11903,7 +12285,7 @@
 
 During handling of the above exception, another exception occurred:
 
-fut = <coroutine object gen_cluster.<locals>._.<locals>.test_func.<locals>.async_fn at 0xffff5a69e340>
+fut = <coroutine object gen_cluster.<locals>._.<locals>.test_func.<locals>.async_fn at 0xffff47c6a340>
 timeout = 60
 
     async def wait_for(fut: Awaitable[T], timeout: float) -> T:
@@ -11975,7 +12357,7 @@
 
 self = <Timeout [expired]>
 exc_type = <class 'asyncio.exceptions.CancelledError'>
-exc_val = CancelledError(), exc_tb = <traceback object at 0xffff233d0ac0>
+exc_val = CancelledError(), exc_tb = <traceback object at 0xffff22181600>
 
     async def __aexit__(
         self,
@@ -12001,71 +12383,73 @@
 
 /usr/lib/python3.13/asyncio/timeouts.py:116: TimeoutError
 ----------------------------- Captured stderr call -----------------------------
-2025-01-07 20:23:13,478 - distributed.scheduler - INFO - State start
-2025-01-07 20:23:13,493 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:42593
-2025-01-07 20:23:13,495 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:38847/status
-2025-01-07 20:23:13,501 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:23:13,556 - distributed.nanny - INFO -         Start Nanny at: 'tls://127.0.0.1:42165'
-2025-01-07 20:23:13,564 - distributed.nanny - INFO -         Start Nanny at: 'tls://127.0.0.1:38925'
-2025-01-07 20:23:14,316 - distributed.worker.memory - WARNING - Ignoring provided memory limit 63089270784 due to system memory limit of 58.76 GiB
-2025-01-07 20:23:14,337 - distributed.worker - INFO -       Start worker at:      tls://127.0.0.1:41105
-2025-01-07 20:23:14,337 - distributed.worker - INFO -          Listening to:      tls://127.0.0.1:41105
-2025-01-07 20:23:14,337 - distributed.worker - INFO -           Worker name:                          0
-2025-01-07 20:23:14,337 - distributed.worker - INFO -          dashboard at:            127.0.0.1:36223
-2025-01-07 20:23:14,337 - distributed.worker - INFO - Waiting to connect to:      tls://127.0.0.1:42593
-2025-01-07 20:23:14,337 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:23:14,337 - distributed.worker - INFO -               Threads:                          1
-2025-01-07 20:23:14,337 - distributed.worker - INFO -                Memory:                  58.76 GiB
-2025-01-07 20:23:14,337 - distributed.worker - INFO -       Local Directory: /tmp/dask-scratch-space/worker-ismu5i_d
-2025-01-07 20:23:14,337 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:23:14,619 - distributed.worker.memory - WARNING - Ignoring provided memory limit 63089270784 due to system memory limit of 58.76 GiB
-2025-01-07 20:23:14,638 - distributed.worker - INFO -       Start worker at:      tls://127.0.0.1:41821
-2025-01-07 20:23:14,638 - distributed.worker - INFO -          Listening to:      tls://127.0.0.1:41821
-2025-01-07 20:23:14,638 - distributed.worker - INFO -           Worker name:                          1
-2025-01-07 20:23:14,638 - distributed.worker - INFO -          dashboard at:            127.0.0.1:41457
-2025-01-07 20:23:14,638 - distributed.worker - INFO - Waiting to connect to:      tls://127.0.0.1:42593
-2025-01-07 20:23:14,638 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:23:14,638 - distributed.worker - INFO -               Threads:                          2
-2025-01-07 20:23:14,638 - distributed.worker - INFO -                Memory:                  58.76 GiB
-2025-01-07 20:23:14,638 - distributed.worker - INFO -       Local Directory: /tmp/dask-scratch-space/worker-il16u814
-2025-01-07 20:23:14,638 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:23:14,726 - distributed.scheduler - INFO - Register worker addr: tls://127.0.0.1:41105 name: 0
-2025-01-07 20:23:14,757 - distributed.scheduler - INFO - Starting worker compute stream, tls://127.0.0.1:41105
-2025-01-07 20:23:14,760 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:43628
-2025-01-07 20:23:14,761 - distributed.worker - INFO - Starting Worker plugin shuffle
-2025-01-07 20:23:14,762 - distributed.worker - INFO -         Registered to:      tls://127.0.0.1:42593
-2025-01-07 20:23:14,762 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:23:14,785 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:42593
-2025-01-07 20:23:14,977 - distributed.scheduler - INFO - Register worker addr: tls://127.0.0.1:41821 name: 1
-2025-01-07 20:23:15,006 - distributed.scheduler - INFO - Starting worker compute stream, tls://127.0.0.1:41821
-2025-01-07 20:23:15,009 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:43640
-2025-01-07 20:23:15,009 - distributed.core - INFO - Connection to tls://127.0.0.1:43640 has been closed.
-2025-01-07 20:23:15,009 - distributed.worker - INFO - Starting Worker plugin shuffle
-2025-01-07 20:23:15,010 - distributed.worker - INFO -         Registered to:      tls://127.0.0.1:42593
-2025-01-07 20:23:15,011 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:23:15,009 - distributed.scheduler - INFO - Remove worker addr: tls://127.0.0.1:41821 name: 1 (stimulus_id='handle-worker-cleanup-1736324595.0098538')
-2025-01-07 20:23:15,033 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:42593
-2025-01-07 20:23:15,033 - distributed.core - INFO - Connection to tls://127.0.0.1:42593 has been closed.
-2025-01-07 20:23:15,033 - distributed.worker - INFO - Stopping worker at tls://127.0.0.1:41821. Reason: worker-handle-scheduler-connection-broken
-2025-01-07 20:23:15,073 - distributed.nanny - INFO - Closing Nanny gracefully at 'tls://127.0.0.1:38925'. Reason: worker-handle-scheduler-connection-broken
-2025-01-07 20:23:15,077 - distributed.worker - INFO - Removing Worker plugin shuffle
-2025-01-07 20:23:15,101 - distributed.nanny - INFO - Worker closed
-2025-01-07 20:23:17,161 - distributed.nanny - ERROR - Worker process died unexpectedly
-2025-01-07 20:23:17,857 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:38925'. Reason: nanny-close-gracefully
-2025-01-07 20:23:17,858 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:38925' closed.
-2025-01-07 20:23:45,077 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:42165'. Reason: nanny-close
-2025-01-07 20:23:45,077 - distributed.nanny - INFO - Nanny asking worker to close. Reason: nanny-close
-2025-01-07 20:23:45,089 - distributed.worker - INFO - Stopping worker at tls://127.0.0.1:41105. Reason: nanny-close
-2025-01-07 20:23:45,089 - distributed.worker - INFO - Removing Worker plugin shuffle
-2025-01-07 20:23:45,091 - distributed.core - INFO - Connection to tls://127.0.0.1:42593 has been closed.
-2025-01-07 20:23:45,093 - distributed.core - INFO - Received 'close-stream' from tls://127.0.0.1:43628; closing.
-2025-01-07 20:23:45,093 - distributed.scheduler - INFO - Remove worker addr: tls://127.0.0.1:41105 name: 0 (stimulus_id='handle-worker-cleanup-1736324625.09354')
-2025-01-07 20:23:45,096 - distributed.scheduler - INFO - Lost all workers
-2025-01-07 20:23:45,113 - distributed.nanny - INFO - Worker closed
-2025-01-07 20:23:45,753 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:42165' closed.
-2025-01-07 20:23:45,753 - distributed.scheduler - INFO - Closing scheduler. Reason: unknown
-2025-01-07 20:23:45,756 - distributed.scheduler - INFO - Scheduler closing all comms
-2025-01-07 20:23:45,759 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Cluster creation timeout; retrying
+2026-02-11 05:42:35,196 - distributed.scheduler - INFO - State start
+2026-02-11 05:42:35,202 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:40015
+2026-02-11 05:42:35,204 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:34087/status
+2026-02-11 05:42:35,206 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:42:35,239 - distributed.nanny - INFO -         Start Nanny at: 'tls://127.0.0.1:44863'
+2026-02-11 05:42:35,240 - distributed.nanny - INFO -         Start Nanny at: 'tls://127.0.0.1:35605'
+2026-02-11 05:42:36,025 - distributed.worker.memory - WARNING - Ignoring provided memory limit 63089258496 due to system memory limit of 58.76 GiB
+2026-02-11 05:42:36,045 - distributed.worker - INFO -       Start worker at:      tls://127.0.0.1:40799
+2026-02-11 05:42:36,045 - distributed.worker - INFO -          Listening to:      tls://127.0.0.1:40799
+2026-02-11 05:42:36,045 - distributed.worker - INFO -           Worker name:                          1
+2026-02-11 05:42:36,045 - distributed.worker - INFO -          dashboard at:            127.0.0.1:46335
+2026-02-11 05:42:36,045 - distributed.worker - INFO - Waiting to connect to:      tls://127.0.0.1:40015
+2026-02-11 05:42:36,045 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:42:36,045 - distributed.worker - INFO -               Threads:                          2
+2026-02-11 05:42:36,045 - distributed.worker - INFO -                Memory:                  58.76 GiB
+2026-02-11 05:42:36,045 - distributed.worker - INFO -       Local Directory: /tmp/dask-scratch-space/worker-jh20nkh3
+2026-02-11 05:42:36,045 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:42:36,079 - distributed.worker.memory - WARNING - Ignoring provided memory limit 63089258496 due to system memory limit of 58.76 GiB
+2026-02-11 05:42:36,100 - distributed.worker - INFO -       Start worker at:      tls://127.0.0.1:42549
+2026-02-11 05:42:36,100 - distributed.worker - INFO -          Listening to:      tls://127.0.0.1:42549
+2026-02-11 05:42:36,100 - distributed.worker - INFO -           Worker name:                          0
+2026-02-11 05:42:36,100 - distributed.worker - INFO -          dashboard at:            127.0.0.1:34065
+2026-02-11 05:42:36,100 - distributed.worker - INFO - Waiting to connect to:      tls://127.0.0.1:40015
+2026-02-11 05:42:36,100 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:42:36,100 - distributed.worker - INFO -               Threads:                          1
+2026-02-11 05:42:36,100 - distributed.worker - INFO -                Memory:                  58.76 GiB
+2026-02-11 05:42:36,100 - distributed.worker - INFO -       Local Directory: /tmp/dask-scratch-space/worker-x3pu39tt
+2026-02-11 05:42:36,100 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:42:36,392 - distributed.scheduler - INFO - Register worker addr: tls://127.0.0.1:40799 name: 1
+2026-02-11 05:42:36,446 - distributed.worker - INFO - Starting Worker plugin shuffle
+2026-02-11 05:42:36,447 - distributed.worker - INFO -         Registered to:      tls://127.0.0.1:40015
+2026-02-11 05:42:36,447 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:42:36,448 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:40015
+2026-02-11 05:42:36,445 - distributed.scheduler - INFO - Starting worker compute stream, tls://127.0.0.1:40799
+2026-02-11 05:42:36,448 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:44382
+2026-02-11 05:42:36,456 - distributed.scheduler - INFO - Register worker addr: tls://127.0.0.1:42549 name: 0
+2026-02-11 05:42:36,499 - distributed.worker - INFO - Starting Worker plugin shuffle
+2026-02-11 05:42:36,500 - distributed.worker - INFO -         Registered to:      tls://127.0.0.1:40015
+2026-02-11 05:42:36,500 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:42:36,501 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:40015
+2026-02-11 05:42:36,498 - distributed.scheduler - INFO - Starting worker compute stream, tls://127.0.0.1:42549
+2026-02-11 05:42:36,501 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:44392
+2026-02-11 05:42:36,502 - distributed.core - INFO - Connection to tls://127.0.0.1:44392 has been closed.
+2026-02-11 05:42:36,503 - distributed.core - INFO - Connection to tls://127.0.0.1:40015 has been closed.
+2026-02-11 05:42:36,503 - distributed.worker - INFO - Stopping worker at tls://127.0.0.1:42549. Reason: worker-handle-scheduler-connection-broken
+2026-02-11 05:42:36,502 - distributed.scheduler - INFO - Remove worker addr: tls://127.0.0.1:42549 name: 0 (stimulus_id='handle-worker-cleanup-1770738156.5029278')
+2026-02-11 05:42:36,526 - distributed.nanny - INFO - Closing Nanny gracefully at 'tls://127.0.0.1:44863'. Reason: worker-handle-scheduler-connection-broken
+2026-02-11 05:42:36,527 - distributed.worker - INFO - Removing Worker plugin shuffle
+2026-02-11 05:42:36,530 - distributed.nanny - INFO - Worker closed
+2026-02-11 05:42:38,533 - distributed.nanny - ERROR - Worker process died unexpectedly
+2026-02-11 05:42:38,852 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:44863'. Reason: nanny-close-gracefully
+2026-02-11 05:42:38,852 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:44863' closed.
+2026-02-11 05:43:06,527 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:35605'. Reason: nanny-close
+2026-02-11 05:43:06,527 - distributed.nanny - INFO - Nanny asking worker to close. Reason: nanny-close
+2026-02-11 05:43:06,531 - distributed.worker - INFO - Stopping worker at tls://127.0.0.1:40799. Reason: nanny-close
+2026-02-11 05:43:06,532 - distributed.worker - INFO - Removing Worker plugin shuffle
+2026-02-11 05:43:06,533 - distributed.core - INFO - Connection to tls://127.0.0.1:40015 has been closed.
+2026-02-11 05:43:06,536 - distributed.core - INFO - Received 'close-stream' from tls://127.0.0.1:44382; closing.
+2026-02-11 05:43:06,544 - distributed.nanny - INFO - Worker closed
+2026-02-11 05:43:06,537 - distributed.scheduler - INFO - Remove worker addr: tls://127.0.0.1:40799 name: 1 (stimulus_id='handle-worker-cleanup-1770738186.537061')
+2026-02-11 05:43:06,547 - distributed.scheduler - INFO - Lost all workers
+2026-02-11 05:43:07,331 - distributed.nanny - WARNING - Worker process still alive after 0.8 seconds, killing
+2026-02-11 05:43:07,355 - distributed.nanny - INFO - Worker process 3536411 was killed by signal 9
+2026-02-11 05:43:07,356 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:35605' closed.
+2026-02-11 05:43:07,356 - distributed.scheduler - INFO - Closing scheduler. Reason: unknown
+2026-02-11 05:43:07,363 - distributed.scheduler - INFO - Scheduler closing all comms
+2026-02-11 05:43:07,366 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Cluster creation timeout; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
     s, ws = await start_cluster(
@@ -12076,1834 +12460,17 @@
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 806, in start_cluster
     raise TimeoutError("Cluster creation timeout")
 TimeoutError: Cluster creation timeout
-2025-01-07 20:23:46,767 - distributed.scheduler - INFO - State start
-2025-01-07 20:23:46,785 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:44683
-2025-01-07 20:23:46,788 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:39647/status
-2025-01-07 20:23:46,790 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:23:46,814 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:35833'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:23:46,814 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:35833' closed.
-2025-01-07 20:23:46,815 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41563'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:23:46,815 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41563' closed.
-2025-01-07 20:23:46,815 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44683': TLS handshake failed with remote 'tls://127.0.0.1:42862': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:23:46,815 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44683': TLS handshake failed with remote 'tls://127.0.0.1:42876': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:23:46,815 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:23:47,823 - distributed.scheduler - INFO - State start
-2025-01-07 20:23:47,841 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:41439
-2025-01-07 20:23:47,844 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:41073/status
-2025-01-07 20:23:47,855 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:23:47,882 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37535'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:23:47,883 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37535' closed.
-2025-01-07 20:23:47,883 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:40023'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:23:47,883 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:40023' closed.
-2025-01-07 20:23:47,884 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41439': TLS handshake failed with remote 'tls://127.0.0.1:48386': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:23:47,884 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41439': TLS handshake failed with remote 'tls://127.0.0.1:48392': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:23:47,884 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:23:48,911 - distributed.scheduler - INFO - State start
-2025-01-07 20:23:48,926 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:38169
-2025-01-07 20:23:48,928 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:37125/status
-2025-01-07 20:23:48,939 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:23:48,962 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45133'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:23:48,963 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45133' closed.
-2025-01-07 20:23:48,963 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45409'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:23:48,963 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45409' closed.
-2025-01-07 20:23:48,963 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:38169': TLS handshake failed with remote 'tls://127.0.0.1:34098': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:23:48,964 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:38169': TLS handshake failed with remote 'tls://127.0.0.1:34112': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:23:48,964 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:23:49,971 - distributed.scheduler - INFO - State start
-2025-01-07 20:23:49,989 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:37923
-2025-01-07 20:23:49,992 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:44645/status
-2025-01-07 20:23:50,003 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:23:50,031 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41391'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:23:50,031 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41391' closed.
-2025-01-07 20:23:50,031 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:34237'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:23:50,031 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:34237' closed.
-2025-01-07 20:23:50,032 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37923': TLS handshake failed with remote 'tls://127.0.0.1:59732': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:23:50,032 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37923': TLS handshake failed with remote 'tls://127.0.0.1:59742': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:23:50,032 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:23:51,047 - distributed.scheduler - INFO - State start
-2025-01-07 20:23:51,078 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:42179
-2025-01-07 20:23:51,085 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:43947/status
-2025-01-07 20:23:51,087 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:23:51,124 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45841'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:23:51,124 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45841' closed.
-2025-01-07 20:23:51,124 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:43577'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:23:51,125 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:43577' closed.
-2025-01-07 20:23:51,125 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:42179': TLS handshake failed with remote 'tls://127.0.0.1:51364': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:23:51,125 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:42179': TLS handshake failed with remote 'tls://127.0.0.1:51378': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:23:51,126 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:23:52,131 - distributed.scheduler - INFO - State start
-2025-01-07 20:23:52,141 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:37761
-2025-01-07 20:23:52,144 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:43577/status
-2025-01-07 20:23:52,155 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:23:52,175 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33309'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:23:52,175 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33309' closed.
-2025-01-07 20:23:52,175 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37085'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:23:52,175 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37085' closed.
-2025-01-07 20:23:52,176 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37761': TLS handshake failed with remote 'tls://127.0.0.1:58104': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:23:52,176 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37761': TLS handshake failed with remote 'tls://127.0.0.1:58120': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:23:52,176 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:23:53,183 - distributed.scheduler - INFO - State start
-2025-01-07 20:23:53,217 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:37773
-2025-01-07 20:23:53,220 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:41755/status
-2025-01-07 20:23:53,222 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:23:53,254 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:42523'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:23:53,254 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:42523' closed.
-2025-01-07 20:23:53,254 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33285'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:23:53,254 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33285' closed.
-2025-01-07 20:23:53,255 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37773': TLS handshake failed with remote 'tls://127.0.0.1:54306': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:23:53,255 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37773': TLS handshake failed with remote 'tls://127.0.0.1:54308': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:23:53,255 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:23:54,263 - distributed.scheduler - INFO - State start
-2025-01-07 20:23:54,271 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:33697
-2025-01-07 20:23:54,290 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:46625/status
-2025-01-07 20:23:54,297 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:23:54,318 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:38827'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:23:54,319 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:38827' closed.
-2025-01-07 20:23:54,319 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:38519'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:23:54,319 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:38519' closed.
-2025-01-07 20:23:54,320 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33697': TLS handshake failed with remote 'tls://127.0.0.1:38050': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:23:54,320 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33697': TLS handshake failed with remote 'tls://127.0.0.1:38062': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:23:54,320 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:23:55,331 - distributed.scheduler - INFO - State start
-2025-01-07 20:23:55,346 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:37169
-2025-01-07 20:23:55,348 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:44213/status
-2025-01-07 20:23:55,359 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:23:55,402 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:43353'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:23:55,402 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:43353' closed.
-2025-01-07 20:23:55,402 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39159'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:23:55,402 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39159' closed.
-2025-01-07 20:23:55,403 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37169': TLS handshake failed with remote 'tls://127.0.0.1:58866': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:23:55,403 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37169': TLS handshake failed with remote 'tls://127.0.0.1:58868': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:23:55,403 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:23:56,419 - distributed.scheduler - INFO - State start
-2025-01-07 20:23:56,438 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:33879
-2025-01-07 20:23:56,440 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:35701/status
-2025-01-07 20:23:56,451 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:23:56,480 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45745'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:23:56,489 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45745' closed.
-2025-01-07 20:23:56,489 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:34483'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:23:56,489 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:34483' closed.
-2025-01-07 20:23:56,490 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33879': TLS handshake failed with remote 'tls://127.0.0.1:36146': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:23:56,490 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33879': TLS handshake failed with remote 'tls://127.0.0.1:36158': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:23:56,490 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:23:57,499 - distributed.scheduler - INFO - State start
-2025-01-07 20:23:57,509 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:38073
-2025-01-07 20:23:57,511 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:43817/status
-2025-01-07 20:23:57,514 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:23:57,546 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:35523'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:23:57,546 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:35523' closed.
-2025-01-07 20:23:57,546 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39645'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:23:57,546 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39645' closed.
-2025-01-07 20:23:57,547 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:38073': TLS handshake failed with remote 'tls://127.0.0.1:50656': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:23:57,547 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:38073': TLS handshake failed with remote 'tls://127.0.0.1:50668': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:23:57,547 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:23:58,555 - distributed.scheduler - INFO - State start
-2025-01-07 20:23:58,573 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:39279
-2025-01-07 20:23:58,576 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:39489/status
-2025-01-07 20:23:58,586 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:23:58,615 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:44003'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:23:58,615 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:44003' closed.
-2025-01-07 20:23:58,615 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41243'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:23:58,615 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41243' closed.
-2025-01-07 20:23:58,616 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39279': TLS handshake failed with remote 'tls://127.0.0.1:56624': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:23:58,616 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39279': TLS handshake failed with remote 'tls://127.0.0.1:56626': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:23:58,616 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:23:59,631 - distributed.scheduler - INFO - State start
-2025-01-07 20:23:59,645 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:43439
-2025-01-07 20:23:59,648 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:36815/status
-2025-01-07 20:23:59,659 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:23:59,698 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:44637'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:23:59,699 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:44637' closed.
-2025-01-07 20:23:59,699 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:43801'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:23:59,699 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:43801' closed.
-2025-01-07 20:23:59,700 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43439': TLS handshake failed with remote 'tls://127.0.0.1:32768': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:23:59,700 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43439': TLS handshake failed with remote 'tls://127.0.0.1:32774': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:23:59,700 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:24:00,715 - distributed.scheduler - INFO - State start
-2025-01-07 20:24:00,725 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:40827
-2025-01-07 20:24:00,728 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:42115/status
-2025-01-07 20:24:00,730 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:24:00,754 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45941'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:24:00,755 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45941' closed.
-2025-01-07 20:24:00,755 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:40649'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:24:00,755 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:40649' closed.
-2025-01-07 20:24:00,755 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:40827': TLS handshake failed with remote 'tls://127.0.0.1:59886': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:24:00,756 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:40827': TLS handshake failed with remote 'tls://127.0.0.1:59892': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:24:00,756 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:24:01,767 - distributed.scheduler - INFO - State start
-2025-01-07 20:24:01,777 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:37895
-2025-01-07 20:24:01,780 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:36249/status
-2025-01-07 20:24:01,786 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:24:01,815 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:46545'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:24:01,815 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:46545' closed.
-2025-01-07 20:24:01,815 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:40959'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:24:01,815 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:40959' closed.
-2025-01-07 20:24:01,816 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37895': TLS handshake failed with remote 'tls://127.0.0.1:41924': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:24:01,816 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37895': TLS handshake failed with remote 'tls://127.0.0.1:41930': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:24:01,816 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:24:02,827 - distributed.scheduler - INFO - State start
-2025-01-07 20:24:02,845 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:33749
-2025-01-07 20:24:02,848 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:37131/status
-2025-01-07 20:24:02,858 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:24:02,885 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:42417'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:24:02,885 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:42417' closed.
-2025-01-07 20:24:02,885 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:40301'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:24:02,885 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:40301' closed.
-2025-01-07 20:24:02,886 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33749': TLS handshake failed with remote 'tls://127.0.0.1:34118': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:24:02,886 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33749': TLS handshake failed with remote 'tls://127.0.0.1:34124': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:24:02,886 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:24:03,895 - distributed.scheduler - INFO - State start
-2025-01-07 20:24:03,913 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:36143
-2025-01-07 20:24:03,916 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:43171/status
-2025-01-07 20:24:03,926 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:24:04,008 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45163'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:24:04,008 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45163' closed.
-2025-01-07 20:24:04,008 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:38295'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:24:04,017 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:38295' closed.
-2025-01-07 20:24:04,018 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:36143': TLS handshake failed with remote 'tls://127.0.0.1:48442': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:24:04,018 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:36143': TLS handshake failed with remote 'tls://127.0.0.1:48446': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:24:04,018 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:24:05,023 - distributed.scheduler - INFO - State start
-2025-01-07 20:24:05,042 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:43621
-2025-01-07 20:24:05,045 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:40477/status
-2025-01-07 20:24:05,047 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:24:05,085 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:46235'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:24:05,085 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:46235' closed.
-2025-01-07 20:24:05,086 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33053'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:24:05,086 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33053' closed.
-2025-01-07 20:24:05,086 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43621': TLS handshake failed with remote 'tls://127.0.0.1:50082': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:24:05,086 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43621': TLS handshake failed with remote 'tls://127.0.0.1:50098': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:24:05,086 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:24:06,107 - distributed.scheduler - INFO - State start
-2025-01-07 20:24:06,121 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:33125
-2025-01-07 20:24:06,124 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:35061/status
-2025-01-07 20:24:06,134 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:24:06,171 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37025'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:24:06,171 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37025' closed.
-2025-01-07 20:24:06,171 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45713'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:24:06,171 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45713' closed.
-2025-01-07 20:24:06,172 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33125': TLS handshake failed with remote 'tls://127.0.0.1:39976': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:24:06,172 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33125': TLS handshake failed with remote 'tls://127.0.0.1:39984': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:24:06,178 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:24:07,182 - distributed.scheduler - INFO - State start
-2025-01-07 20:24:07,201 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:33829
-2025-01-07 20:24:07,203 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:39643/status
-2025-01-07 20:24:07,214 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:24:07,238 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:40737'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:24:07,239 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:40737' closed.
-2025-01-07 20:24:07,239 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45309'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:24:07,239 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45309' closed.
-2025-01-07 20:24:07,240 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33829': TLS handshake failed with remote 'tls://127.0.0.1:51054': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:24:07,245 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33829': TLS handshake failed with remote 'tls://127.0.0.1:51060': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:24:07,245 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:24:08,255 - distributed.scheduler - INFO - State start
-2025-01-07 20:24:08,262 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:46167
-2025-01-07 20:24:08,264 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:37313/status
-2025-01-07 20:24:08,275 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:24:08,299 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:38261'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:24:08,299 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:38261' closed.
-2025-01-07 20:24:08,299 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39975'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:24:08,300 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39975' closed.
-2025-01-07 20:24:08,300 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:46167': TLS handshake failed with remote 'tls://127.0.0.1:52336': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:24:08,300 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:46167': TLS handshake failed with remote 'tls://127.0.0.1:52340': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:24:08,305 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:24:09,309 - distributed.scheduler - INFO - State start
-2025-01-07 20:24:09,324 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:37719
-2025-01-07 20:24:09,331 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:41255/status
-2025-01-07 20:24:09,333 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:24:09,366 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39301'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:24:09,367 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39301' closed.
-2025-01-07 20:24:09,367 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:42761'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:24:09,367 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:42761' closed.
-2025-01-07 20:24:09,369 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37719': TLS handshake failed with remote 'tls://127.0.0.1:37550': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:24:09,369 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37719': TLS handshake failed with remote 'tls://127.0.0.1:37566': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:24:09,369 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:24:10,379 - distributed.scheduler - INFO - State start
-2025-01-07 20:24:10,389 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:37835
-2025-01-07 20:24:10,392 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:42197/status
-2025-01-07 20:24:10,395 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:24:10,424 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:46681'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:24:10,424 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:46681' closed.
-2025-01-07 20:24:10,424 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37041'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:24:10,424 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37041' closed.
-2025-01-07 20:24:10,425 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37835': TLS handshake failed with remote 'tls://127.0.0.1:45500': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:24:10,425 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37835': TLS handshake failed with remote 'tls://127.0.0.1:45510': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:24:10,425 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:24:11,431 - distributed.scheduler - INFO - State start
-2025-01-07 20:24:11,441 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:33467
-2025-01-07 20:24:11,444 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:41591/status
-2025-01-07 20:24:11,454 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:24:11,477 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:44137'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:24:11,478 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:44137' closed.
-2025-01-07 20:24:11,478 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:38085'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:24:11,478 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:38085' closed.
-2025-01-07 20:24:11,478 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33467': TLS handshake failed with remote 'tls://127.0.0.1:47188': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:24:11,479 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33467': TLS handshake failed with remote 'tls://127.0.0.1:47198': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:24:11,479 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:43:08,373 - distributed.scheduler - INFO - State start
+2026-02-11 05:43:08,399 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:32857
+2026-02-11 05:43:08,402 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:35133/status
+2026-02-11 05:43:08,416 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:43:08,464 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37697'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:08,464 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37697' closed.
+2026-02-11 05:43:08,464 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:40265'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:08,464 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:40265' closed.
+2026-02-11 05:43:08,465 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:32857': TLS handshake failed with remote 'tls://127.0.0.1:59104': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:08,465 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:32857': TLS handshake failed with remote 'tls://127.0.0.1:59118': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:08,465 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -13972,17 +12539,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:24:12,495 - distributed.scheduler - INFO - State start
-2025-01-07 20:24:12,510 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:33751
-2025-01-07 20:24:12,512 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:40703/status
-2025-01-07 20:24:12,523 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:24:12,574 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:38127'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:24:12,574 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:38127' closed.
-2025-01-07 20:24:12,575 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:44551'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:24:12,575 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:44551' closed.
-2025-01-07 20:24:12,576 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33751': TLS handshake failed with remote 'tls://127.0.0.1:59704': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:24:12,576 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33751': TLS handshake failed with remote 'tls://127.0.0.1:59720': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:24:12,576 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:43:09,489 - distributed.scheduler - INFO - State start
+2026-02-11 05:43:09,511 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:45975
+2026-02-11 05:43:09,513 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:45157/status
+2026-02-11 05:43:09,527 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:43:09,575 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:38849'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:09,576 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:38849' closed.
+2026-02-11 05:43:09,576 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39515'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:09,576 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39515' closed.
+2026-02-11 05:43:09,576 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:45975': TLS handshake failed with remote 'tls://127.0.0.1:34612': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:09,577 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:45975': TLS handshake failed with remote 'tls://127.0.0.1:34622': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:09,577 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -14051,418 +12618,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-_____________________________ test_retire_workers ______________________________
-
-fut = <coroutine object Nanny.start_unsafe at 0xffff00115930>, timeout = 0
-
-    async def wait_for(fut: Awaitable[T], timeout: float) -> T:
-        async with asyncio.timeout(timeout):
->           return await fut
-
-distributed/utils.py:1914: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-distributed/nanny.py:358: in start_unsafe
-    comm = await self.rpc.connect(saddr)
-distributed/core.py:1485: in connect
-    return await self._connect(addr=addr, timeout=timeout)
-distributed/core.py:1429: in _connect
-    comm = await connect(
-distributed/comm/core.py:342: in connect
-    comm = await wait_for(
-distributed/utils.py:1914: in wait_for
-    return await fut
-distributed/comm/tcp.py:546: in connect
-    stream = await self.client.connect(
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-self = <tornado.tcpclient.TCPClient object at 0xffffa614cad0>
-host = '127.0.0.1', port = 38075, af = <AddressFamily.AF_UNSPEC: 0>
-ssl_options = <ssl.SSLContext object at 0xffff44e33d10>
-max_buffer_size = 31544635392.0, source_ip = None, source_port = None
-timeout = None
-
-    async def connect(
-        self,
-        host: str,
-        port: int,
-        af: socket.AddressFamily = socket.AF_UNSPEC,
-        ssl_options: Optional[Union[Dict[str, Any], ssl.SSLContext]] = None,
-        max_buffer_size: Optional[int] = None,
-        source_ip: Optional[str] = None,
-        source_port: Optional[int] = None,
-        timeout: Optional[Union[float, datetime.timedelta]] = None,
-    ) -> IOStream:
-        """Connect to the given host and port.
-    
-        Asynchronously returns an `.IOStream` (or `.SSLIOStream` if
-        ``ssl_options`` is not None).
-    
-        Using the ``source_ip`` kwarg, one can specify the source
-        IP address to use when establishing the connection.
-        In case the user needs to resolve and
-        use a specific interface, it has to be handled outside
-        of Tornado as this depends very much on the platform.
-    
-        Raises `TimeoutError` if the input future does not complete before
-        ``timeout``, which may be specified in any form allowed by
-        `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time
-        relative to `.IOLoop.time`)
-    
-        Similarly, when the user requires a certain source port, it can
-        be specified using the ``source_port`` arg.
-    
-        .. versionchanged:: 4.5
-           Added the ``source_ip`` and ``source_port`` arguments.
-    
-        .. versionchanged:: 5.0
-           Added the ``timeout`` argument.
-        """
-        if timeout is not None:
-            if isinstance(timeout, numbers.Real):
-                timeout = IOLoop.current().time() + timeout
-            elif isinstance(timeout, datetime.timedelta):
-                timeout = IOLoop.current().time() + timeout.total_seconds()
-            else:
-                raise TypeError("Unsupported timeout %r" % timeout)
-        if timeout is not None:
-            addrinfo = await gen.with_timeout(
-                timeout, self.resolver.resolve(host, port, af)
-            )
-        else:
-            addrinfo = await self.resolver.resolve(host, port, af)
-        connector = _Connector(
-            addrinfo,
-            functools.partial(
-                self._create_stream,
-                max_buffer_size,
-                source_ip=source_ip,
-                source_port=source_port,
-            ),
-        )
->       af, addr, stream = await connector.start(connect_timeout=timeout)
-E       asyncio.exceptions.CancelledError
-
-/usr/lib/python3/dist-packages/tornado/tcpclient.py:279: CancelledError
-
-The above exception was the direct cause of the following exception:
-
-self = <Nanny: None, threads: 1>
-
-    @final
-    async def start(self):
-        async with self._startup_lock:
-            if self.status == Status.failed:
-                assert self.__startup_exc is not None
-                raise self.__startup_exc
-            elif self.status != Status.init:
-                return self
-            timeout = getattr(self, "death_timeout", None)
-    
-            async def _close_on_failure(exc: Exception) -> None:
-                await self.close(reason=f"failure-to-start-{str(type(exc))}")
-                self.status = Status.failed
-                self.__startup_exc = exc
-    
-            try:
->               await wait_for(self.start_unsafe(), timeout=timeout)
-
-distributed/core.py:528: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-distributed/utils.py:1913: in wait_for
-    async with asyncio.timeout(timeout):
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-self = <Timeout [expired]>
-exc_type = <class 'asyncio.exceptions.CancelledError'>
-exc_val = CancelledError(), exc_tb = <traceback object at 0xfffee14ebfc0>
-
-    async def __aexit__(
-        self,
-        exc_type: Optional[Type[BaseException]],
-        exc_val: Optional[BaseException],
-        exc_tb: Optional[TracebackType],
-    ) -> Optional[bool]:
-        assert self._state in (_State.ENTERED, _State.EXPIRING)
-    
-        if self._timeout_handler is not None:
-            self._timeout_handler.cancel()
-            self._timeout_handler = None
-    
-        if self._state is _State.EXPIRING:
-            self._state = _State.EXPIRED
-    
-            if self._task.uncancel() <= self._cancelling and exc_type is not None:
-                # Since there are no new cancel requests, we're
-                # handling this.
-                if issubclass(exc_type, exceptions.CancelledError):
->                   raise TimeoutError from exc_val
-E                   TimeoutError
-
-/usr/lib/python3.13/asyncio/timeouts.py:116: TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-    @contextlib.asynccontextmanager
-    async def _cluster_factory():
-        workers = []
-        s = None
-        try:
-            for _ in range(60):
-                try:
->                   s, ws = await start_cluster(
-                        nthreads,
-                        scheduler,
-                        security=security,
-                        Worker=Worker,
-                        scheduler_kwargs=scheduler_kwargs,
-                        worker_kwargs=merge(
-                            {"death_timeout": min(15, int(deadline.remaining))},
-                            worker_kwargs,
-                        ),
-                    )
-
-distributed/utils_test.py:974: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-distributed/utils_test.py:791: in start_cluster
-    await asyncio.gather(*workers)
-/usr/lib/python3.13/asyncio/tasks.py:737: in _wrap_awaitable
-    return await awaitable
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-self = <Nanny: None, threads: 1>
-
-    @final
-    async def start(self):
-        async with self._startup_lock:
-            if self.status == Status.failed:
-                assert self.__startup_exc is not None
-                raise self.__startup_exc
-            elif self.status != Status.init:
-                return self
-            timeout = getattr(self, "death_timeout", None)
-    
-            async def _close_on_failure(exc: Exception) -> None:
-                await self.close(reason=f"failure-to-start-{str(type(exc))}")
-                self.status = Status.failed
-                self.__startup_exc = exc
-    
-            try:
-                await wait_for(self.start_unsafe(), timeout=timeout)
-            except asyncio.TimeoutError as exc:
-                await _close_on_failure(exc)
->               raise asyncio.TimeoutError(
-                    f"{type(self).__name__} start timed out after {timeout}s."
-                ) from exc
-E               TimeoutError: Nanny start timed out after 0s.
-
-distributed/core.py:531: TimeoutError
-
-During handling of the above exception, another exception occurred:
-
-fut = <coroutine object gen_cluster.<locals>._.<locals>.test_func.<locals>.async_fn at 0xffff5a69ea40>
-timeout = 60
-
-    async def wait_for(fut: Awaitable[T], timeout: float) -> T:
-        async with asyncio.timeout(timeout):
->           return await fut
-
-distributed/utils.py:1914: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-distributed/utils_test.py:1007: in async_fn
-    _cluster_factory() as (s, workers),
-/usr/lib/python3.13/contextlib.py:214: in __aenter__
-    return await anext(self.gen)
-distributed/utils_test.py:991: in _cluster_factory
-    await asyncio.sleep(1)
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-delay = 1, result = None
-
-    async def sleep(delay, result=None):
-        """Coroutine that completes after a given time (in seconds)."""
-        if delay <= 0:
-            await __sleep0()
-            return result
-    
-        if math.isnan(delay):
-            raise ValueError("Invalid delay: NaN (not a number)")
-    
-        loop = events.get_running_loop()
-        future = loop.create_future()
-        h = loop.call_later(delay,
-                            futures._set_result_unless_cancelled,
-                            future, result)
-        try:
->           return await future
-E           asyncio.exceptions.CancelledError
-
-/usr/lib/python3.13/asyncio/tasks.py:718: CancelledError
-
-The above exception was the direct cause of the following exception:
-
-args = (), kwds = {}
-
-    @wraps(func)
-    def inner(*args, **kwds):
-        with self._recreate_cm():
->           return func(*args, **kwds)
-
-/usr/lib/python3.13/contextlib.py:85: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-/usr/lib/python3.13/contextlib.py:85: in inner
-    return func(*args, **kwds)
-distributed/utils_test.py:1090: in test_func
-    return _run_and_close_tornado(async_fn_outer)
-distributed/utils_test.py:380: in _run_and_close_tornado
-    return asyncio_run(inner_fn(), loop_factory=get_loop_factory())
-/usr/lib/python3.13/asyncio/runners.py:195: in run
-    return runner.run(main)
-/usr/lib/python3.13/asyncio/runners.py:118: in run
-    return self._loop.run_until_complete(task)
-/usr/lib/python3.13/asyncio/base_events.py:720: in run_until_complete
-    return future.result()
-distributed/utils_test.py:377: in inner_fn
-    return await async_fn(*args, **kwargs)
-distributed/utils_test.py:1087: in async_fn_outer
-    return await utils_wait_for(async_fn(), timeout=timeout * 2)
-distributed/utils.py:1913: in wait_for
-    async with asyncio.timeout(timeout):
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-self = <Timeout [expired]>
-exc_type = <class 'asyncio.exceptions.CancelledError'>
-exc_val = CancelledError(), exc_tb = <traceback object at 0xffff44d3da40>
-
-    async def __aexit__(
-        self,
-        exc_type: Optional[Type[BaseException]],
-        exc_val: Optional[BaseException],
-        exc_tb: Optional[TracebackType],
-    ) -> Optional[bool]:
-        assert self._state in (_State.ENTERED, _State.EXPIRING)
-    
-        if self._timeout_handler is not None:
-            self._timeout_handler.cancel()
-            self._timeout_handler = None
-    
-        if self._state is _State.EXPIRING:
-            self._state = _State.EXPIRED
-    
-            if self._task.uncancel() <= self._cancelling and exc_type is not None:
-                # Since there are no new cancel requests, we're
-                # handling this.
-                if issubclass(exc_type, exceptions.CancelledError):
->                   raise TimeoutError from exc_val
-E                   TimeoutError
-
-/usr/lib/python3.13/asyncio/timeouts.py:116: TimeoutError
------------------------------ Captured stderr call -----------------------------
-2025-01-07 20:24:20,635 - distributed.scheduler - INFO - State start
-2025-01-07 20:24:20,657 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:35521
-2025-01-07 20:24:20,660 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:44281/status
-2025-01-07 20:24:20,671 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:24:20,747 - distributed.nanny - INFO -         Start Nanny at: 'tls://127.0.0.1:39313'
-2025-01-07 20:24:20,789 - distributed.nanny - INFO -         Start Nanny at: 'tls://127.0.0.1:39971'
-2025-01-07 20:24:22,073 - distributed.worker.memory - WARNING - Ignoring provided memory limit 63089270784 due to system memory limit of 58.76 GiB
-2025-01-07 20:24:22,101 - distributed.worker - INFO -       Start worker at:      tls://127.0.0.1:33233
-2025-01-07 20:24:22,101 - distributed.worker - INFO -          Listening to:      tls://127.0.0.1:33233
-2025-01-07 20:24:22,101 - distributed.worker - INFO -           Worker name:                          1
-2025-01-07 20:24:22,101 - distributed.worker - INFO -          dashboard at:            127.0.0.1:46691
-2025-01-07 20:24:22,101 - distributed.worker - INFO - Waiting to connect to:      tls://127.0.0.1:35521
-2025-01-07 20:24:22,101 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:24:22,101 - distributed.worker - INFO -               Threads:                          2
-2025-01-07 20:24:22,101 - distributed.worker - INFO -                Memory:                  58.76 GiB
-2025-01-07 20:24:22,101 - distributed.worker - INFO -       Local Directory: /tmp/dask-scratch-space/worker-qj5vzzcx
-2025-01-07 20:24:22,101 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:24:22,237 - distributed.worker.memory - WARNING - Ignoring provided memory limit 63089270784 due to system memory limit of 58.76 GiB
-2025-01-07 20:24:22,285 - distributed.worker - INFO -       Start worker at:      tls://127.0.0.1:45815
-2025-01-07 20:24:22,285 - distributed.worker - INFO -          Listening to:      tls://127.0.0.1:45815
-2025-01-07 20:24:22,285 - distributed.worker - INFO -           Worker name:                          0
-2025-01-07 20:24:22,285 - distributed.worker - INFO -          dashboard at:            127.0.0.1:40903
-2025-01-07 20:24:22,285 - distributed.worker - INFO - Waiting to connect to:      tls://127.0.0.1:35521
-2025-01-07 20:24:22,285 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:24:22,285 - distributed.worker - INFO -               Threads:                          1
-2025-01-07 20:24:22,285 - distributed.worker - INFO -                Memory:                  58.76 GiB
-2025-01-07 20:24:22,285 - distributed.worker - INFO -       Local Directory: /tmp/dask-scratch-space/worker-ducb83hw
-2025-01-07 20:24:22,285 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:24:22,633 - distributed.scheduler - INFO - Register worker addr: tls://127.0.0.1:33233 name: 1
-2025-01-07 20:24:22,701 - distributed.worker - INFO - Starting Worker plugin shuffle
-2025-01-07 20:24:22,702 - distributed.worker - INFO -         Registered to:      tls://127.0.0.1:35521
-2025-01-07 20:24:22,702 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:24:22,699 - distributed.scheduler - INFO - Starting worker compute stream, tls://127.0.0.1:33233
-2025-01-07 20:24:22,705 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:51646
-2025-01-07 20:24:22,729 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:35521
-2025-01-07 20:24:22,972 - distributed.scheduler - INFO - Register worker addr: tls://127.0.0.1:45815 name: 0
-2025-01-07 20:24:23,017 - distributed.scheduler - INFO - Starting worker compute stream, tls://127.0.0.1:45815
-2025-01-07 20:24:23,019 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:51654
-2025-01-07 20:24:23,020 - distributed.core - INFO - Connection to tls://127.0.0.1:51654 has been closed.
-2025-01-07 20:24:23,021 - distributed.worker - INFO - Starting Worker plugin shuffle
-2025-01-07 20:24:23,022 - distributed.batched - INFO - Batched Comm Closed <TLS (closed) Worker->Scheduler local=tls://127.0.0.1:51654 remote=tls://127.0.0.1:35521>
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 297, in write
-    raise StreamClosedError()
-tornado.iostream.StreamClosedError: Stream is closed
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/batched.py", line 115, in _background_send
-    nbytes = yield coro
-             ^^^^^^^^^^
-  File "/usr/lib/python3/dist-packages/tornado/gen.py", line 766, in run
-    value = future.result()
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 307, in write
-    convert_stream_closed_error(self, e)
-    ~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 142, in convert_stream_closed_error
-    raise CommClosedError(f"in {obj}: {exc}") from exc
-distributed.comm.core.CommClosedError: in <TLS (closed) Worker->Scheduler local=tls://127.0.0.1:51654 remote=tls://127.0.0.1:35521>: Stream is closed
-2025-01-07 20:24:23,024 - distributed.worker - INFO -         Registered to:      tls://127.0.0.1:35521
-2025-01-07 20:24:23,024 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:24:23,020 - distributed.scheduler - INFO - Remove worker addr: tls://127.0.0.1:45815 name: 0 (stimulus_id='handle-worker-cleanup-1736324663.0201557')
-2025-01-07 20:24:23,027 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:35521
-2025-01-07 20:24:23,028 - distributed.core - INFO - Connection to tls://127.0.0.1:35521 has been closed.
-2025-01-07 20:24:23,028 - distributed.worker - INFO - Stopping worker at tls://127.0.0.1:45815. Reason: worker-handle-scheduler-connection-broken
-2025-01-07 20:24:23,080 - distributed.nanny - INFO - Closing Nanny gracefully at 'tls://127.0.0.1:39313'. Reason: worker-handle-scheduler-connection-broken
-2025-01-07 20:24:23,082 - distributed.worker - INFO - Removing Worker plugin shuffle
-2025-01-07 20:24:23,101 - distributed.nanny - INFO - Worker closed
-2025-01-07 20:24:25,137 - distributed.nanny - ERROR - Worker process died unexpectedly
-2025-01-07 20:24:25,469 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39313'. Reason: nanny-close-gracefully
-2025-01-07 20:24:25,469 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39313' closed.
-2025-01-07 20:24:53,085 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39971'. Reason: nanny-close
-2025-01-07 20:24:53,085 - distributed.nanny - INFO - Nanny asking worker to close. Reason: nanny-close
-2025-01-07 20:24:53,090 - distributed.worker - INFO - Stopping worker at tls://127.0.0.1:33233. Reason: nanny-close
-2025-01-07 20:24:53,091 - distributed.worker - INFO - Removing Worker plugin shuffle
-2025-01-07 20:24:53,092 - distributed.core - INFO - Connection to tls://127.0.0.1:35521 has been closed.
-2025-01-07 20:24:53,094 - distributed.core - INFO - Received 'close-stream' from tls://127.0.0.1:51646; closing.
-2025-01-07 20:24:53,094 - distributed.scheduler - INFO - Remove worker addr: tls://127.0.0.1:33233 name: 1 (stimulus_id='handle-worker-cleanup-1736324693.0944352')
-2025-01-07 20:24:53,096 - distributed.scheduler - INFO - Lost all workers
-2025-01-07 20:24:53,104 - distributed.nanny - INFO - Worker closed
-2025-01-07 20:24:53,427 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39971' closed.
-2025-01-07 20:24:53,427 - distributed.scheduler - INFO - Closing scheduler. Reason: unknown
-2025-01-07 20:24:53,430 - distributed.scheduler - INFO - Scheduler closing all comms
-2025-01-07 20:24:53,432 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Cluster creation timeout; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 806, in start_cluster
-    raise TimeoutError("Cluster creation timeout")
-TimeoutError: Cluster creation timeout
-2025-01-07 20:24:54,439 - distributed.scheduler - INFO - State start
-2025-01-07 20:24:54,457 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:39039
-2025-01-07 20:24:54,460 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:39999/status
-2025-01-07 20:24:54,470 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:24:54,498 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:36043'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:24:54,498 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:36043' closed.
-2025-01-07 20:24:54,498 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:36763'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:24:54,498 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:36763' closed.
-2025-01-07 20:24:54,499 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39039': TLS handshake failed with remote 'tls://127.0.0.1:33160': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:24:54,499 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39039': TLS handshake failed with remote 'tls://127.0.0.1:33162': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:24:54,499 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:43:10,585 - distributed.scheduler - INFO - State start
+2026-02-11 05:43:10,599 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:43399
+2026-02-11 05:43:10,602 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:44787/status
+2026-02-11 05:43:10,612 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:43:10,636 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:42619'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:10,637 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:42619' closed.
+2026-02-11 05:43:10,637 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37501'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:10,637 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37501' closed.
+2026-02-11 05:43:10,638 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43399': TLS handshake failed with remote 'tls://127.0.0.1:48522': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:10,638 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43399': TLS handshake failed with remote 'tls://127.0.0.1:48528': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:10,638 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -14531,17 +12697,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:24:55,515 - distributed.scheduler - INFO - State start
-2025-01-07 20:24:55,525 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:39189
-2025-01-07 20:24:55,528 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:37003/status
-2025-01-07 20:24:55,534 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:24:55,557 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:46263'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:24:55,558 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:46263' closed.
-2025-01-07 20:24:55,558 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33943'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:24:55,558 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33943' closed.
-2025-01-07 20:24:55,558 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39189': TLS handshake failed with remote 'tls://127.0.0.1:43854': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:24:55,558 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39189': TLS handshake failed with remote 'tls://127.0.0.1:43868': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:24:55,558 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:43:11,645 - distributed.scheduler - INFO - State start
+2026-02-11 05:43:11,667 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:39815
+2026-02-11 05:43:11,670 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:42187/status
+2026-02-11 05:43:11,684 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:43:11,720 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41809'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:11,720 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41809' closed.
+2026-02-11 05:43:11,720 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33285'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:11,720 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33285' closed.
+2026-02-11 05:43:11,721 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39815': TLS handshake failed with remote 'tls://127.0.0.1:59202': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:11,721 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39815': TLS handshake failed with remote 'tls://127.0.0.1:59218': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:11,721 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -14610,17 +12776,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:24:56,570 - distributed.scheduler - INFO - State start
-2025-01-07 20:24:56,577 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:43095
-2025-01-07 20:24:56,579 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:33013/status
-2025-01-07 20:24:56,581 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:24:56,592 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:44125'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:24:56,593 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:44125' closed.
-2025-01-07 20:24:56,593 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:32821'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:24:56,593 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:32821' closed.
-2025-01-07 20:24:56,594 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43095': TLS handshake failed with remote 'tls://127.0.0.1:52526': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:24:56,594 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43095': TLS handshake failed with remote 'tls://127.0.0.1:52532': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:24:56,594 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:43:12,741 - distributed.scheduler - INFO - State start
+2026-02-11 05:43:12,758 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:40869
+2026-02-11 05:43:12,765 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:44615/status
+2026-02-11 05:43:12,771 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:43:12,790 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:40061'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:12,799 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:40061' closed.
+2026-02-11 05:43:12,799 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37269'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:12,799 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37269' closed.
+2026-02-11 05:43:12,800 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:40869': TLS handshake failed with remote 'tls://127.0.0.1:40792': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:12,800 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:40869': TLS handshake failed with remote 'tls://127.0.0.1:40806': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:12,800 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -14689,17 +12855,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:24:57,599 - distributed.scheduler - INFO - State start
-2025-01-07 20:24:57,609 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:33267
-2025-01-07 20:24:57,612 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:36291/status
-2025-01-07 20:24:57,622 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:24:57,639 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:42233'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:24:57,639 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:42233' closed.
-2025-01-07 20:24:57,639 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:43955'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:24:57,639 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:43955' closed.
-2025-01-07 20:24:57,640 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33267': TLS handshake failed with remote 'tls://127.0.0.1:39408': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:24:57,640 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33267': TLS handshake failed with remote 'tls://127.0.0.1:39418': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:24:57,640 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:43:13,809 - distributed.scheduler - INFO - State start
+2026-02-11 05:43:13,815 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:42541
+2026-02-11 05:43:13,829 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:46431/status
+2026-02-11 05:43:13,839 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:43:13,866 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41061'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:13,875 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41061' closed.
+2026-02-11 05:43:13,875 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:42927'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:13,875 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:42927' closed.
+2026-02-11 05:43:13,876 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:42541': TLS handshake failed with remote 'tls://127.0.0.1:56294': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:13,876 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:42541': TLS handshake failed with remote 'tls://127.0.0.1:56300': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:13,876 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -14768,17 +12934,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:24:58,651 - distributed.scheduler - INFO - State start
-2025-01-07 20:24:58,669 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:35431
-2025-01-07 20:24:58,671 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:34453/status
-2025-01-07 20:24:58,678 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:24:58,697 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:35363'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:24:58,698 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:35363' closed.
-2025-01-07 20:24:58,698 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:42615'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:24:58,698 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:42615' closed.
-2025-01-07 20:24:58,698 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:35431': TLS handshake failed with remote 'tls://127.0.0.1:58318': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:24:58,698 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:35431': TLS handshake failed with remote 'tls://127.0.0.1:58326': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:24:58,698 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:43:14,885 - distributed.scheduler - INFO - State start
+2026-02-11 05:43:14,898 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:41775
+2026-02-11 05:43:14,909 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:34437/status
+2026-02-11 05:43:14,911 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:43:14,940 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45841'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:14,940 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45841' closed.
+2026-02-11 05:43:14,941 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:38185'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:14,941 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:38185' closed.
+2026-02-11 05:43:14,942 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41775': TLS handshake failed with remote 'tls://127.0.0.1:58828': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:14,942 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41775': TLS handshake failed with remote 'tls://127.0.0.1:58834': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:14,942 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -14847,17 +13013,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:24:59,707 - distributed.scheduler - INFO - State start
-2025-01-07 20:24:59,717 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:43627
-2025-01-07 20:24:59,719 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:36923/status
-2025-01-07 20:24:59,730 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:24:59,749 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:36283'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:24:59,750 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:36283' closed.
-2025-01-07 20:24:59,750 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:38937'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:24:59,750 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:38937' closed.
-2025-01-07 20:24:59,751 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43627': TLS handshake failed with remote 'tls://127.0.0.1:42612': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:24:59,751 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43627': TLS handshake failed with remote 'tls://127.0.0.1:42622': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:24:59,751 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:43:15,949 - distributed.scheduler - INFO - State start
+2026-02-11 05:43:15,963 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:45131
+2026-02-11 05:43:15,965 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:38049/status
+2026-02-11 05:43:15,971 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:43:15,990 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45557'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:15,995 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45557' closed.
+2026-02-11 05:43:15,995 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:43427'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:15,995 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:43427' closed.
+2026-02-11 05:43:15,996 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:45131': TLS handshake failed with remote 'tls://127.0.0.1:45366': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:15,996 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:45131': TLS handshake failed with remote 'tls://127.0.0.1:45378': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:15,996 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -14926,17 +13092,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:25:00,759 - distributed.scheduler - INFO - State start
-2025-01-07 20:25:00,772 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:42065
-2025-01-07 20:25:00,775 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:33085/status
-2025-01-07 20:25:00,785 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:25:00,805 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:46465'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:25:00,805 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:46465' closed.
-2025-01-07 20:25:00,805 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39893'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:25:00,805 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39893' closed.
-2025-01-07 20:25:00,806 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:42065': TLS handshake failed with remote 'tls://127.0.0.1:37716': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:25:00,806 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:42065': TLS handshake failed with remote 'tls://127.0.0.1:37722': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:25:00,806 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:43:17,005 - distributed.scheduler - INFO - State start
+2026-02-11 05:43:17,019 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:41153
+2026-02-11 05:43:17,021 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:41521/status
+2026-02-11 05:43:17,023 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:43:17,055 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:43759'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:17,055 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:43759' closed.
+2026-02-11 05:43:17,056 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:44135'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:17,056 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:44135' closed.
+2026-02-11 05:43:17,056 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41153': TLS handshake failed with remote 'tls://127.0.0.1:45534': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:17,056 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41153': TLS handshake failed with remote 'tls://127.0.0.1:45548': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:17,056 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -15005,17 +13171,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:25:01,815 - distributed.scheduler - INFO - State start
-2025-01-07 20:25:01,825 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:38169
-2025-01-07 20:25:01,828 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:37573/status
-2025-01-07 20:25:01,830 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:25:01,854 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41617'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:25:01,854 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41617' closed.
-2025-01-07 20:25:01,855 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:42921'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:25:01,855 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:42921' closed.
-2025-01-07 20:25:01,855 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:38169': TLS handshake failed with remote 'tls://127.0.0.1:50772': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:25:01,855 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:38169': TLS handshake failed with remote 'tls://127.0.0.1:50782': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:25:01,855 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:43:18,065 - distributed.scheduler - INFO - State start
+2026-02-11 05:43:18,079 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:46579
+2026-02-11 05:43:18,082 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:42767/status
+2026-02-11 05:43:18,084 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:43:18,112 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:40261'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:18,112 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:40261' closed.
+2026-02-11 05:43:18,112 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:35323'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:18,112 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:35323' closed.
+2026-02-11 05:43:18,113 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:46579': TLS handshake failed with remote 'tls://127.0.0.1:60884': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:18,113 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:46579': TLS handshake failed with remote 'tls://127.0.0.1:60888': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:18,113 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -15084,17 +13250,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:25:02,867 - distributed.scheduler - INFO - State start
-2025-01-07 20:25:02,877 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:39359
-2025-01-07 20:25:02,880 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:42713/status
-2025-01-07 20:25:02,886 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:25:02,913 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33347'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:25:02,913 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33347' closed.
-2025-01-07 20:25:02,913 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:42451'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:25:02,914 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:42451' closed.
-2025-01-07 20:25:02,914 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39359': TLS handshake failed with remote 'tls://127.0.0.1:57276': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:25:02,914 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39359': TLS handshake failed with remote 'tls://127.0.0.1:57284': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:25:02,914 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:43:19,118 - distributed.scheduler - INFO - State start
+2026-02-11 05:43:19,134 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:33659
+2026-02-11 05:43:19,137 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:40275/status
+2026-02-11 05:43:19,147 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:43:19,170 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:44429'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:19,171 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:44429' closed.
+2026-02-11 05:43:19,179 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33429'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:19,179 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33429' closed.
+2026-02-11 05:43:19,180 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33659': TLS handshake failed with remote 'tls://127.0.0.1:60086': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:19,180 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33659': TLS handshake failed with remote 'tls://127.0.0.1:60096': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:19,180 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -15163,17 +13329,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:25:03,923 - distributed.scheduler - INFO - State start
-2025-01-07 20:25:03,929 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:38969
-2025-01-07 20:25:03,932 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:44171/status
-2025-01-07 20:25:03,934 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:25:03,945 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37777'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:25:03,946 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37777' closed.
-2025-01-07 20:25:03,946 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:46449'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:25:03,946 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:46449' closed.
-2025-01-07 20:25:03,947 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:38969': TLS handshake failed with remote 'tls://127.0.0.1:60052': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:25:03,947 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:38969': TLS handshake failed with remote 'tls://127.0.0.1:60058': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:25:03,947 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:43:20,189 - distributed.scheduler - INFO - State start
+2026-02-11 05:43:20,203 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:45119
+2026-02-11 05:43:20,205 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:45121/status
+2026-02-11 05:43:20,208 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:43:20,236 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:40593'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:20,236 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:40593' closed.
+2026-02-11 05:43:20,236 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33973'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:20,236 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33973' closed.
+2026-02-11 05:43:20,237 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:45119': TLS handshake failed with remote 'tls://127.0.0.1:55716': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:20,237 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:45119': TLS handshake failed with remote 'tls://127.0.0.1:55730': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:20,237 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -15242,17 +13408,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:25:04,955 - distributed.scheduler - INFO - State start
-2025-01-07 20:25:04,965 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:46409
-2025-01-07 20:25:04,968 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:39097/status
-2025-01-07 20:25:04,970 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:25:05,002 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41487'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:25:05,003 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41487' closed.
-2025-01-07 20:25:05,003 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:44165'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:25:05,003 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:44165' closed.
-2025-01-07 20:25:05,004 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:46409': TLS handshake failed with remote 'tls://127.0.0.1:58582': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:25:05,004 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:46409': TLS handshake failed with remote 'tls://127.0.0.1:58596': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:25:05,004 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:43:21,245 - distributed.scheduler - INFO - State start
+2026-02-11 05:43:21,251 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:39935
+2026-02-11 05:43:21,261 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:45263/status
+2026-02-11 05:43:21,263 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:43:21,294 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:44089'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:21,294 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:44089' closed.
+2026-02-11 05:43:21,294 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:34179'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:21,294 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:34179' closed.
+2026-02-11 05:43:21,295 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39935': TLS handshake failed with remote 'tls://127.0.0.1:39882': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:21,295 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39935': TLS handshake failed with remote 'tls://127.0.0.1:39892': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:21,295 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -15321,17 +13487,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:25:06,011 - distributed.scheduler - INFO - State start
-2025-01-07 20:25:06,021 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:40707
-2025-01-07 20:25:06,024 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:34603/status
-2025-01-07 20:25:06,030 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:25:06,051 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:46513'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:25:06,051 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:46513' closed.
-2025-01-07 20:25:06,051 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:32925'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:25:06,051 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:32925' closed.
-2025-01-07 20:25:06,052 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:40707': TLS handshake failed with remote 'tls://127.0.0.1:38318': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:25:06,052 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:40707': TLS handshake failed with remote 'tls://127.0.0.1:38332': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:25:06,052 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:43:22,301 - distributed.scheduler - INFO - State start
+2026-02-11 05:43:22,315 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:35079
+2026-02-11 05:43:22,317 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:46123/status
+2026-02-11 05:43:22,323 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:43:22,347 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:42295'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:22,347 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:42295' closed.
+2026-02-11 05:43:22,348 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:32917'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:22,348 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:32917' closed.
+2026-02-11 05:43:22,349 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:35079': TLS handshake failed with remote 'tls://127.0.0.1:34258': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:22,349 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:35079': TLS handshake failed with remote 'tls://127.0.0.1:34266': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:22,350 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -15400,17 +13566,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:25:07,062 - distributed.scheduler - INFO - State start
-2025-01-07 20:25:07,081 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:40639
-2025-01-07 20:25:07,084 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:42117/status
-2025-01-07 20:25:07,086 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:25:07,118 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:35335'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:25:07,118 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:35335' closed.
-2025-01-07 20:25:07,118 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:44795'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:25:07,119 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:44795' closed.
-2025-01-07 20:25:07,119 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:40639': TLS handshake failed with remote 'tls://127.0.0.1:34304': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:25:07,119 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:40639': TLS handshake failed with remote 'tls://127.0.0.1:34308': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:25:07,119 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:43:23,361 - distributed.scheduler - INFO - State start
+2026-02-11 05:43:23,367 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:32963
+2026-02-11 05:43:23,377 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:45593/status
+2026-02-11 05:43:23,380 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:43:23,399 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:35329'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:23,399 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:35329' closed.
+2026-02-11 05:43:23,399 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:35023'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:23,399 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:35023' closed.
+2026-02-11 05:43:23,400 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:32963': TLS handshake failed with remote 'tls://127.0.0.1:37476': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:23,400 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:32963': TLS handshake failed with remote 'tls://127.0.0.1:37492': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:23,400 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -15479,17 +13645,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:25:08,127 - distributed.scheduler - INFO - State start
-2025-01-07 20:25:08,141 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:40577
-2025-01-07 20:25:08,144 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:35739/status
-2025-01-07 20:25:08,146 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:25:08,171 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:38541'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:25:08,171 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:38541' closed.
-2025-01-07 20:25:08,171 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:43281'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:25:08,171 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:43281' closed.
-2025-01-07 20:25:08,172 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:40577': TLS handshake failed with remote 'tls://127.0.0.1:48322': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:25:08,172 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:40577': TLS handshake failed with remote 'tls://127.0.0.1:48336': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:25:08,172 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:43:24,405 - distributed.scheduler - INFO - State start
+2026-02-11 05:43:24,420 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:43075
+2026-02-11 05:43:24,423 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:34579/status
+2026-02-11 05:43:24,425 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:43:24,459 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:36489'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:24,460 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:36489' closed.
+2026-02-11 05:43:24,460 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:38609'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:24,460 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:38609' closed.
+2026-02-11 05:43:24,461 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43075': TLS handshake failed with remote 'tls://127.0.0.1:57194': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:24,461 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43075': TLS handshake failed with remote 'tls://127.0.0.1:57202': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:24,461 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -15558,17 +13724,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:25:09,179 - distributed.scheduler - INFO - State start
-2025-01-07 20:25:09,197 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:46047
-2025-01-07 20:25:09,200 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:37059/status
-2025-01-07 20:25:09,206 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:25:09,284 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:46777'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:25:09,284 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:46777' closed.
-2025-01-07 20:25:09,284 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:43681'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:25:09,284 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:43681' closed.
-2025-01-07 20:25:09,293 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:46047': TLS handshake failed with remote 'tls://127.0.0.1:35874': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:25:09,293 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:46047': TLS handshake failed with remote 'tls://127.0.0.1:35890': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:25:09,294 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:43:25,473 - distributed.scheduler - INFO - State start
+2026-02-11 05:43:25,487 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:45895
+2026-02-11 05:43:25,489 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:34965/status
+2026-02-11 05:43:25,491 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:43:25,522 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45771'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:25,522 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45771' closed.
+2026-02-11 05:43:25,522 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:44655'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:25,522 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:44655' closed.
+2026-02-11 05:43:25,527 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:45895': TLS handshake failed with remote 'tls://127.0.0.1:49872': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:25,527 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:45895': TLS handshake failed with remote 'tls://127.0.0.1:49886': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:25,528 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -15637,17 +13803,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:25:10,299 - distributed.scheduler - INFO - State start
-2025-01-07 20:25:10,313 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:41267
-2025-01-07 20:25:10,316 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:43523/status
-2025-01-07 20:25:10,326 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:25:10,359 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:32881'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:25:10,360 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:32881' closed.
-2025-01-07 20:25:10,360 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:35161'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:25:10,360 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:35161' closed.
-2025-01-07 20:25:10,365 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41267': TLS handshake failed with remote 'tls://127.0.0.1:40052': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:25:10,365 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41267': TLS handshake failed with remote 'tls://127.0.0.1:40062': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:25:10,365 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:43:26,533 - distributed.scheduler - INFO - State start
+2026-02-11 05:43:26,547 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:44453
+2026-02-11 05:43:26,549 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:34273/status
+2026-02-11 05:43:26,556 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:43:26,581 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:43499'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:26,581 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:43499' closed.
+2026-02-11 05:43:26,582 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:34725'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:26,582 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:34725' closed.
+2026-02-11 05:43:26,582 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44453': TLS handshake failed with remote 'tls://127.0.0.1:52090': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:26,583 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44453': TLS handshake failed with remote 'tls://127.0.0.1:52100': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:26,583 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -15716,17 +13882,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:25:11,371 - distributed.scheduler - INFO - State start
-2025-01-07 20:25:11,381 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:38911
-2025-01-07 20:25:11,384 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:37215/status
-2025-01-07 20:25:11,394 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:25:11,418 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:40313'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:25:11,418 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:40313' closed.
-2025-01-07 20:25:11,419 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41411'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:25:11,419 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41411' closed.
-2025-01-07 20:25:11,419 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:38911': TLS handshake failed with remote 'tls://127.0.0.1:35868': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:25:11,419 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:38911': TLS handshake failed with remote 'tls://127.0.0.1:35882': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:25:11,419 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:43:27,597 - distributed.scheduler - INFO - State start
+2026-02-11 05:43:27,615 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:41127
+2026-02-11 05:43:27,617 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:40617/status
+2026-02-11 05:43:27,627 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:43:27,676 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:38163'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:27,676 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:38163' closed.
+2026-02-11 05:43:27,676 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:43609'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:27,676 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:43609' closed.
+2026-02-11 05:43:27,677 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41127': TLS handshake failed with remote 'tls://127.0.0.1:33000': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:27,677 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41127': TLS handshake failed with remote 'tls://127.0.0.1:33002': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:27,677 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -15795,17 +13961,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:25:12,435 - distributed.scheduler - INFO - State start
-2025-01-07 20:25:12,454 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:39739
-2025-01-07 20:25:12,465 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:40485/status
-2025-01-07 20:25:12,467 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:25:12,503 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45627'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:25:12,504 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45627' closed.
-2025-01-07 20:25:12,504 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:43361'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:25:12,504 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:43361' closed.
-2025-01-07 20:25:12,504 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39739': TLS handshake failed with remote 'tls://127.0.0.1:60466': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:25:12,509 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39739': TLS handshake failed with remote 'tls://127.0.0.1:60476': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:25:12,509 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:43:28,694 - distributed.scheduler - INFO - State start
+2026-02-11 05:43:28,704 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:43731
+2026-02-11 05:43:28,707 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:34309/status
+2026-02-11 05:43:28,717 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:43:28,737 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:44139'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:28,737 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:44139' closed.
+2026-02-11 05:43:28,737 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37599'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:28,737 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37599' closed.
+2026-02-11 05:43:28,738 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43731': TLS handshake failed with remote 'tls://127.0.0.1:41010': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:28,738 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43731': TLS handshake failed with remote 'tls://127.0.0.1:41012': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:28,738 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -15874,17 +14040,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:25:13,515 - distributed.scheduler - INFO - State start
-2025-01-07 20:25:13,530 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:36339
-2025-01-07 20:25:13,532 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:37867/status
-2025-01-07 20:25:13,535 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:25:13,569 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33529'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:25:13,569 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33529' closed.
-2025-01-07 20:25:13,570 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39465'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:25:13,570 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39465' closed.
-2025-01-07 20:25:13,570 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:36339': TLS handshake failed with remote 'tls://127.0.0.1:47266': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:25:13,570 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:36339': TLS handshake failed with remote 'tls://127.0.0.1:47274': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:25:13,570 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:43:29,753 - distributed.scheduler - INFO - State start
+2026-02-11 05:43:29,767 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:46825
+2026-02-11 05:43:29,769 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:41625/status
+2026-02-11 05:43:29,775 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:43:29,808 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37019'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:29,809 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37019' closed.
+2026-02-11 05:43:29,809 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37659'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:29,809 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37659' closed.
+2026-02-11 05:43:29,809 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:46825': TLS handshake failed with remote 'tls://127.0.0.1:34416': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:29,809 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:46825': TLS handshake failed with remote 'tls://127.0.0.1:34426': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:29,809 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -15953,17 +14119,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:25:14,579 - distributed.scheduler - INFO - State start
-2025-01-07 20:25:14,591 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:44773
-2025-01-07 20:25:14,594 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:33035/status
-2025-01-07 20:25:14,601 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:25:14,627 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:35045'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:25:14,628 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:35045' closed.
-2025-01-07 20:25:14,628 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39601'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:25:14,628 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39601' closed.
-2025-01-07 20:25:14,630 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44773': TLS handshake failed with remote 'tls://127.0.0.1:55240': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:25:14,630 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44773': TLS handshake failed with remote 'tls://127.0.0.1:55250': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:25:14,630 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:43:30,821 - distributed.scheduler - INFO - State start
+2026-02-11 05:43:30,835 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:40169
+2026-02-11 05:43:30,837 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:36385/status
+2026-02-11 05:43:30,839 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:43:30,869 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39067'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:30,870 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39067' closed.
+2026-02-11 05:43:30,870 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:34855'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:30,870 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:34855' closed.
+2026-02-11 05:43:30,871 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:40169': TLS handshake failed with remote 'tls://127.0.0.1:38064': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:30,871 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:40169': TLS handshake failed with remote 'tls://127.0.0.1:38072': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:30,871 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -16032,17 +14198,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:25:15,639 - distributed.scheduler - INFO - State start
-2025-01-07 20:25:15,645 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:33345
-2025-01-07 20:25:15,648 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:44605/status
-2025-01-07 20:25:15,658 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:25:15,683 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:35821'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:25:15,684 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:35821' closed.
-2025-01-07 20:25:15,684 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:46809'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:25:15,684 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:46809' closed.
-2025-01-07 20:25:15,694 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33345': TLS handshake failed with remote 'tls://127.0.0.1:46718': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:25:15,694 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33345': TLS handshake failed with remote 'tls://127.0.0.1:46724': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:25:15,694 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:43:31,877 - distributed.scheduler - INFO - State start
+2026-02-11 05:43:31,891 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:36921
+2026-02-11 05:43:31,893 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:43387/status
+2026-02-11 05:43:31,900 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:43:31,973 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:35879'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:31,974 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:35879' closed.
+2026-02-11 05:43:31,974 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39231'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:31,974 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39231' closed.
+2026-02-11 05:43:31,979 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:36921': TLS handshake failed with remote 'tls://127.0.0.1:34876': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:31,979 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:36921': TLS handshake failed with remote 'tls://127.0.0.1:34888': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:31,980 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -16111,17 +14277,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:25:16,703 - distributed.scheduler - INFO - State start
-2025-01-07 20:25:16,713 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:35749
-2025-01-07 20:25:16,716 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:42417/status
-2025-01-07 20:25:16,718 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:25:16,749 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39163'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:25:16,749 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39163' closed.
-2025-01-07 20:25:16,749 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:38979'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:25:16,749 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:38979' closed.
-2025-01-07 20:25:16,750 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:35749': TLS handshake failed with remote 'tls://127.0.0.1:33208': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:25:16,750 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:35749': TLS handshake failed with remote 'tls://127.0.0.1:33218': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:25:16,750 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:43:32,985 - distributed.scheduler - INFO - State start
+2026-02-11 05:43:32,995 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:39789
+2026-02-11 05:43:32,997 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:33947/status
+2026-02-11 05:43:33,003 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:43:33,033 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:40999'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:33,033 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:40999' closed.
+2026-02-11 05:43:33,033 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:44517'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:33,033 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:44517' closed.
+2026-02-11 05:43:33,035 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39789': TLS handshake failed with remote 'tls://127.0.0.1:34228': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:33,035 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39789': TLS handshake failed with remote 'tls://127.0.0.1:34234': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:33,035 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -16190,17 +14356,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:25:17,755 - distributed.scheduler - INFO - State start
-2025-01-07 20:25:17,765 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:41957
-2025-01-07 20:25:17,768 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:46359/status
-2025-01-07 20:25:17,770 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:25:17,802 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:35479'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:25:17,802 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:35479' closed.
-2025-01-07 20:25:17,803 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39149'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:25:17,803 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39149' closed.
-2025-01-07 20:25:17,803 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41957': TLS handshake failed with remote 'tls://127.0.0.1:48372': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:25:17,803 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41957': TLS handshake failed with remote 'tls://127.0.0.1:48388': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:25:17,804 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:43:34,046 - distributed.scheduler - INFO - State start
+2026-02-11 05:43:34,056 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:44865
+2026-02-11 05:43:34,060 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:33123/status
+2026-02-11 05:43:34,071 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:43:34,102 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:40603'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:34,102 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:40603' closed.
+2026-02-11 05:43:34,102 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:34305'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:34,102 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:34305' closed.
+2026-02-11 05:43:34,107 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44865': TLS handshake failed with remote 'tls://127.0.0.1:59256': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:34,107 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44865': TLS handshake failed with remote 'tls://127.0.0.1:59270': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:34,108 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -16269,17 +14435,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:25:18,811 - distributed.scheduler - INFO - State start
-2025-01-07 20:25:18,821 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:40519
-2025-01-07 20:25:18,824 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:43161/status
-2025-01-07 20:25:18,831 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:25:18,857 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:46415'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:25:18,857 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:46415' closed.
-2025-01-07 20:25:18,858 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33217'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:25:18,858 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33217' closed.
-2025-01-07 20:25:18,858 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:40519': TLS handshake failed with remote 'tls://127.0.0.1:54460': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:25:18,859 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:40519': TLS handshake failed with remote 'tls://127.0.0.1:54472': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:25:18,859 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:43:35,113 - distributed.scheduler - INFO - State start
+2026-02-11 05:43:35,131 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:36529
+2026-02-11 05:43:35,134 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:41999/status
+2026-02-11 05:43:35,140 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:43:35,177 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37685'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:35,177 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37685' closed.
+2026-02-11 05:43:35,177 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:40339'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:43:35,177 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:40339' closed.
+2026-02-11 05:43:35,178 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:36529': TLS handshake failed with remote 'tls://127.0.0.1:41988': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:35,178 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:36529': TLS handshake failed with remote 'tls://127.0.0.1:42000': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:43:35,179 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -16348,667 +14514,27 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:25:19,871 - distributed.scheduler - INFO - State start
-2025-01-07 20:25:19,878 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:38075
-2025-01-07 20:25:19,880 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:42959/status
-2025-01-07 20:25:19,883 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:25:19,901 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:46645'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:25:19,901 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:46645' closed.
-2025-01-07 20:25:19,901 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:44911'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:25:19,901 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:44911' closed.
-2025-01-07 20:25:19,903 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:38075': TLS handshake failed with remote 'tls://127.0.0.1:52840': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:25:19,903 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:38075': TLS handshake failed with remote 'tls://127.0.0.1:52850': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:25:19,903 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-____________________________ test_log_remove_worker ____________________________
-
-c = <Client: No scheduler connected>
-s = <Scheduler 'tcp://127.0.0.1:35511', workers: 0, cores: 0, tasks: 0>
-a = <Worker 'tcp://127.0.0.1:33363', name: 0, status: closed, stored: 2, running: 1/1, ready: 0, comm: 0, waiting: 0>
-b = <Worker 'tcp://127.0.0.1:35233', name: 1, status: closed, stored: 2, running: 1/2, ready: 0, comm: 0, waiting: 0>
-
-    @gen_cluster(client=True)
-    async def test_log_remove_worker(c, s, a, b):
-        # Computed task
-        x = c.submit(inc, 1, key="x", workers=a.address)
-        await x
-        ev = Event()
-        # Processing task
-        y = c.submit(
-            lambda ev: ev.wait(), ev, key="y", workers=a.address, allow_other_workers=True
-        )
-        await wait_for_state("y", "processing", s)
-        # Scattered task
-        z = await c.scatter({"z": 3}, workers=a.address)
-    
-        s._broker.truncate()
-    
-        with captured_logger("distributed.scheduler", level=logging.INFO) as log:
-            # Successful graceful shutdown
-            await s.retire_workers([a.address], stimulus_id="graceful")
-            # Refuse to retire gracefully as there's nowhere to put x and z
-            await s.retire_workers([b.address], stimulus_id="graceful_abort")
-            await asyncio.sleep(0.2)
-            # Ungraceful shutdown
-            await s.remove_worker(b.address, stimulus_id="ungraceful")
-            await asyncio.sleep(0.2)
-        await ev.set()
-    
-        assert log.getvalue().splitlines() == [
-            # Successful graceful
-            f"Retire worker addresses (stimulus_id='graceful') ['{a.address}']",
-            f"Remove worker addr: {a.address} name: {a.name} (stimulus_id='graceful')",
-            f"Retired worker '{a.address}' (stimulus_id='graceful')",
-            # Aborted graceful
-            f"Retire worker addresses (stimulus_id='graceful_abort') ['{b.address}']",
-            f"Could not retire worker '{b.address}': unique data could not be "
-            "moved to any other worker (stimulus_id='graceful_abort')",
-            # Ungraceful
-            f"Remove worker addr: {b.address} name: {b.name} (stimulus_id='ungraceful')",
-            f"Removing worker '{b.address}' caused the cluster to lose already "
-            "computed task(s), which will be recomputed elsewhere: {'x'} "
-            "(stimulus_id='ungraceful')",
-            f"Removing worker '{b.address}' caused the cluster to lose scattered "
-            "data, which can't be recovered: {'z'} (stimulus_id='ungraceful')",
-            "Lost all workers",
-        ]
-    
-        events = {topic: [ev for _, ev in evs] for topic, evs in s.get_events().items()}
-        for evs in events.values():
-            for ev in evs:
-                if ev.get("action", None) == "retire-workers":
-                    for k in ("retired", "could-not-retire"):
-                        ev[k] = {addr: "snip" for addr in ev[k]}
-                if "stimulus_id" in ev:  # Strip timestamp
-                    ev["stimulus_id"] = ev["stimulus_id"].rsplit("-", 1)[0]
-    
->       assert events == {
-            a.address: [
-                {
-                    "action": "worker-status-change",
-                    "prev-status": "running",
-                    "status": "closing_gracefully",
-                    "stimulus_id": "graceful",
-                },
-                {
-                    "action": "remove-worker",
-                    "lost-computed-tasks": set(),
-                    "lost-scattered-tasks": set(),
-                    "processing-tasks": {"y"},
-                    "expected": True,
-                    "stimulus_id": "graceful",
-                },
-                {"action": "retired", "stimulus_id": "graceful"},
-            ],
-            b.address: [
-                {
-                    "action": "worker-status-change",
-                    "prev-status": "running",
-                    "status": "closing_gracefully",
-                    "stimulus_id": "graceful_abort",
-                },
-                {"action": "could-not-retire", "stimulus_id": "graceful_abort"},
-                {
-                    "action": "worker-status-change",
-                    "prev-status": "closing_gracefully",
-                    "status": "running",
-                    "stimulus_id": "worker-status-change",
-                },
-                {
-                    "action": "remove-worker",
-                    "lost-computed-tasks": {"x"},
-                    "lost-scattered-tasks": {"z"},
-                    "processing-tasks": {"y"},
-                    "expected": False,
-                    "stimulus_id": "ungraceful",
-                },
-                {"action": "closing-worker", "reason": "scheduler-remove-worker"},
-            ],
-            "all": [
-                {
-                    "action": "remove-worker",
-                    "lost-computed-tasks": set(),
-                    "lost-scattered-tasks": set(),
-                    "processing-tasks": {"y"},
-                    "expected": True,
-                    "stimulus_id": "graceful",
-                    "worker": a.address,
-                },
-                {
-                    "action": "retire-workers",
-                    "stimulus_id": "graceful",
-                    "retired": {a.address: "snip"},
-                    "could-not-retire": {},
-                },
-                {
-                    "action": "retire-workers",
-                    "stimulus_id": "graceful_abort",
-                    "retired": {},
-                    "could-not-retire": {b.address: "snip"},
-                },
-                {
-                    "action": "remove-worker",
-                    "lost-computed-tasks": {"x"},
-                    "lost-scattered-tasks": {"z"},
-                    "processing-tasks": {"y"},
-                    "expected": False,
-                    "stimulus_id": "ungraceful",
-                    "worker": b.address,
-                },
-            ],
-            "worker-get-client": [{"client": c.id, "timeout": 5, "worker": b.address}],
-        }
-E       AssertionError: assert {'tcp://127.0.0.1:33363': [{'action': 'worker-status-change', 'prev-status': 'running', 'status': 'closing_gracefully', 'stimulus_id': 'graceful'}, {'action': 'remove-worker', 'processing-tasks': {'y'}, 'lost-computed-tasks': set(), 'lost-scattered-tasks': set(), 'stimulus_id': 'graceful', 'expected': True}, {'action': 'retired', 'stimulus_id': 'graceful'}], 'all': [{'action': 'remove-worker', 'processing-tasks': {'y'}, 'lost-computed-tasks': set(), 'lost-scattered-tasks': set(), 'stimulus_id': 'graceful', 'expected': True, 'worker': 'tcp://127.0.0.1:33363'}, {'action': 'retire-workers', 'retired': {'tcp://127.0.0.1:33363': 'snip'}, 'could-not-retire': {}, 'stimulus_id': 'graceful'}, {'action': 'retire-workers', 'retired': {}, 'could-not-retire': {'tcp://127.0.0.1:35233': 'snip'}, 'stimulus_id': 'graceful_abort'}, {'action': 'remove-worker', 'processing-tasks': {'y'}, 'lost-computed-tasks': {'x'}, 'lost-scattered-tasks': {'z'}, 'stimulus_id': 'ungraceful', 'expected': False, 'worker': 'tcp://127.0.0.1:35233'}], 'tcp://127.0.0.1:35233': [{'action': 'worker-status-change', 'prev-status': 'running', 'status': 'closing_gracefully', 'stimulus_id': 'graceful_abort'}, {'action': 'could-not-retire', 'stimulus_id': 'graceful_abort'}, {'action': 'worker-status-change', 'prev-status': 'closing_gracefully', 'status': 'running', 'stimulus_id': 'worker-status-change'}, {'action': 'remove-worker', 'processing-tasks': {'y'}, 'lost-computed-tasks': {'x'}, 'lost-scattered-tasks': {'z'}, 'stimulus_id': 'ungraceful', 'expected': False}, {'action': 'closing-worker', 'reason': 'scheduler-remove-worker'}], 'worker-get-client': [{'client': 'Client-9ec89110-cd9a-11ef-917c-e386edcdc4c0', 'timeout': 5, 'worker': 'tcp://127.0.0.1:33363'}, {'client': 'Client-9ec89110-cd9a-11ef-917c-e386edcdc4c0', 'timeout': 5, 'worker': 'tcp://127.0.0.1:35233'}]} == {'tcp://127.0.0.1:33363': [{'action': 'worker-status-change', 'prev-status': 'running', 'status': 'closing_gracefully', 'stimulus_id': 'graceful'}, {'action': 'remove-worker', 'lost-computed-tasks': set(), 'lost-scattered-tasks': set(), 'processing-tasks': {'y'}, 'expected': True, 'stimulus_id': 'graceful'}, {'action': 'retired', 'stimulus_id': 'graceful'}], 'tcp://127.0.0.1:35233': [{'action': 'worker-status-change', 'prev-status': 'running', 'status': 'closing_gracefully', 'stimulus_id': 'graceful_abort'}, {'action': 'could-not-retire', 'stimulus_id': 'graceful_abort'}, {'action': 'worker-status-change', 'prev-status': 'closing_gracefully', 'status': 'running', 'stimulus_id': 'worker-status-change'}, {'action': 'remove-worker', 'lost-computed-tasks': {'x'}, 'lost-scattered-tasks': {'z'}, 'processing-tasks': {'y'}, 'expected': False, 'stimulus_id': 'ungraceful'}, {'action': 'closing-worker', 'reason': 'scheduler-remove-worker'}], 'all': [{'action': 'remove-worker', 'lost-computed-tasks': set(), 'lost-scattered-tasks': set(), 'processing-tasks': {'y'}, 'expected': True, 'stimulus_id': 'graceful', 'worker': 'tcp://127.0.0.1:33363'}, {'action': 'retire-workers', 'stimulus_id': 'graceful', 'retired': {'tcp://127.0.0.1:33363': 'snip'}, 'could-not-retire': {}}, {'action': 'retire-workers', 'stimulus_id': 'graceful_abort', 'retired': {}, 'could-not-retire': {'tcp://127.0.0.1:35233': 'snip'}}, {'action': 'remove-worker', 'lost-computed-tasks': {'x'}, 'lost-scattered-tasks': {'z'}, 'processing-tasks': {'y'}, 'expected': False, 'stimulus_id': 'ungraceful', 'worker': 'tcp://127.0.0.1:35233'}], 'worker-get-client': [{'client': 'Client-9ec89110-cd9a-11ef-917c-e386edcdc4c0', 'timeout': 5, 'worker': 'tcp://127.0.0.1:35233'}]}
-E         
-E         Common items:
-E         {'all': [{'action': 'remove-worker',
-E                   'expected': True,
-E                   'lost-computed-tasks': set(),
-E                   'lost-scattered-tasks': set(),
-E                   'processing-tasks': {'y'},
-E                   'stimulus_id': 'graceful',
-E                   'worker': 'tcp://127.0.0.1:33363'},
-E                  {'action': 'retire-workers',
-E                   'could-not-retire': {},
-E                   'retired': {'tcp://127.0.0.1:33363': 'snip'},
-E                   'stimulus_id': 'graceful'},
-E                  {'action': 'retire-workers',
-E                   'could-not-retire': {'tcp://127.0.0.1:35233': 'snip'},
-E                   'retired': {},
-E                   'stimulus_id': 'graceful_abort'},
-E                  {'action': 'remove-worker',
-E                   'expected': False,
-E                   'lost-computed-tasks': {'x'},
-E                   'lost-scattered-tasks': {'z'},
-E                   'processing-tasks': {'y'},
-E                   'stimulus_id': 'ungraceful',
-E                   'worker': 'tcp://127.0.0.1:35233'}],
-E          'tcp://127.0.0.1:33363': [{'action': 'worker-status-change',
-E                                     'prev-status': 'running',
-E                                     'status': 'closing_gracefully',
-E                                     'stimulus_id': 'graceful'},
-E                                    {'action': 'remove-worker',
-E                                     'expected': True,
-E                                     'lost-computed-tasks': set(),
-E                                     'lost-scattered-tasks': set(),
-E                                     'processing-tasks': {'y'},
-E                                     'stimulus_id': 'graceful'},
-E                                    {'action': 'retired', 'stimulus_id': 'graceful'}],
-E          'tcp://127.0.0.1:35233': [{'action': 'worker-status-change',
-E                                     'prev-status': 'running',
-E                                     'status': 'closing_gracefully',
-E                                     'stimulus_id': 'graceful_abort'},
-E                                    {'action': 'could-not-retire',
-E                                     'stimulus_id': 'graceful_abort'},
-E                                    {'action': 'worker-status-change',
-E                                     'prev-status': 'closing_gracefully',
-E                                     'status': 'running',
-E                                     'stimulus_id': 'worker-status-change'},
-E                                    {'action': 'remove-worker',
-E                                     'expected': False,
-E                                     'lost-computed-tasks': {'x'},
-E                                     'lost-scattered-tasks': {'z'},
-E                                     'processing-tasks': {'y'},
-E                                     'stimulus_id': 'ungraceful'},
-E                                    {'action': 'closing-worker',
-E                                     'reason': 'scheduler-remove-worker'}]}
-E         Differing items:
-E         {'worker-get-client': [{'client': 'Client-9ec89110-cd9a-11ef-917c-e386edcdc4c0', 'timeout': 5, 'worker': 'tcp://127.0.0.1:33363'}, {'client': 'Client-9ec89110-cd9a-11ef-917c-e386edcdc4c0', 'timeout': 5, 'worker': 'tcp://127.0.0.1:35233'}]} != {'worker-get-client': [{'client': 'Client-9ec89110-cd9a-11ef-917c-e386edcdc4c0', 'timeout': 5, 'worker': 'tcp://127.0.0.1:35233'}]}
-E         
-E         Full diff:
-E           {
-E               'all': [
-E                   {
-E                       'action': 'remove-worker',
-E                       'expected': True,
-E                       'lost-computed-tasks': set(),
-E                       'lost-scattered-tasks': set(),
-E                       'processing-tasks': {
-E                           'y',
-E                       },
-E                       'stimulus_id': 'graceful',
-E                       'worker': 'tcp://127.0.0.1:33363',
-E                   },
-E                   {
-E                       'action': 'retire-workers',
-E                       'could-not-retire': {},
-E                       'retired': {
-E                           'tcp://127.0.0.1:33363': 'snip',
-E                       },
-E                       'stimulus_id': 'graceful',
-E                   },
-E                   {
-E                       'action': 'retire-workers',
-E                       'could-not-retire': {
-E                           'tcp://127.0.0.1:35233': 'snip',
-E                       },
-E                       'retired': {},
-E                       'stimulus_id': 'graceful_abort',
-E                   },
-E                   {
-E                       'action': 'remove-worker',
-E                       'expected': False,
-E                       'lost-computed-tasks': {
-E                           'x',
-E                       },
-E                       'lost-scattered-tasks': {
-E                           'z',
-E                       },
-E                       'processing-tasks': {
-E                           'y',
-E                       },
-E                       'stimulus_id': 'ungraceful',
-E                       'worker': 'tcp://127.0.0.1:35233',
-E                   },
-E               ],
-E               'tcp://127.0.0.1:33363': [
-E                   {
-E                       'action': 'worker-status-change',
-E                       'prev-status': 'running',
-E                       'status': 'closing_gracefully',
-E                       'stimulus_id': 'graceful',
-E                   },
-E                   {
-E                       'action': 'remove-worker',
-E                       'expected': True,
-E                       'lost-computed-tasks': set(),
-E                       'lost-scattered-tasks': set(),
-E                       'processing-tasks': {
-E                           'y',
-E                       },
-E                       'stimulus_id': 'graceful',
-E                   },
-E                   {
-E                       'action': 'retired',
-E                       'stimulus_id': 'graceful',
-E                   },
-E               ],
-E               'tcp://127.0.0.1:35233': [
-E                   {
-E                       'action': 'worker-status-change',
-E                       'prev-status': 'running',
-E                       'status': 'closing_gracefully',
-E                       'stimulus_id': 'graceful_abort',
-E                   },
-E                   {
-E                       'action': 'could-not-retire',
-E                       'stimulus_id': 'graceful_abort',
-E                   },
-E                   {
-E                       'action': 'worker-status-change',
-E                       'prev-status': 'closing_gracefully',
-E                       'status': 'running',
-E                       'stimulus_id': 'worker-status-change',
-E                   },
-E                   {
-E                       'action': 'remove-worker',
-E                       'expected': False,
-E                       'lost-computed-tasks': {
-E                           'x',
-E                       },
-E                       'lost-scattered-tasks': {
-E                           'z',
-E                       },
-E                       'processing-tasks': {
-E                           'y',
-E                       },
-E                       'stimulus_id': 'ungraceful',
-E                   },
-E                   {
-E                       'action': 'closing-worker',
-E                       'reason': 'scheduler-remove-worker',
-E                   },
-E               ],
-E               'worker-get-client': [
-E                   {
-E                       'client': 'Client-9ec89110-cd9a-11ef-917c-e386edcdc4c0',
-E                       'timeout': 5,
-E         +             'worker': 'tcp://127.0.0.1:33363',
-E         +         },
-E         +         {
-E         +             'client': 'Client-9ec89110-cd9a-11ef-917c-e386edcdc4c0',
-E         +             'timeout': 5,
-E                       'worker': 'tcp://127.0.0.1:35233',
-E                   },
-E               ],
-E           }
-
-distributed/tests/test_worker.py:3016: AssertionError
------------------------------ Captured stderr call -----------------------------
-2025-01-07 20:29:03,849 - distributed.scheduler - INFO - State start
-2025-01-07 20:29:03,871 - distributed.scheduler - INFO -   Scheduler at:     tcp://127.0.0.1:35511
-2025-01-07 20:29:03,874 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:44861/status
-2025-01-07 20:29:03,885 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:29:03,911 - distributed.worker - INFO -       Start worker at:      tcp://127.0.0.1:33363
-2025-01-07 20:29:03,922 - distributed.worker - INFO -          Listening to:      tcp://127.0.0.1:33363
-2025-01-07 20:29:03,925 - distributed.worker - INFO -           Worker name:                          0
-2025-01-07 20:29:03,928 - distributed.worker - INFO -          dashboard at:            127.0.0.1:46009
-2025-01-07 20:29:03,939 - distributed.worker - INFO - Waiting to connect to:      tcp://127.0.0.1:35511
-2025-01-07 20:29:03,949 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:29:03,952 - distributed.worker - INFO -               Threads:                          1
-2025-01-07 20:29:03,963 - distributed.worker - INFO -                Memory:                  58.76 GiB
-2025-01-07 20:29:03,970 - distributed.worker - INFO -       Local Directory: /tmp/dask-scratch-space/worker-2tkyxmjy
-2025-01-07 20:29:03,981 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:29:03,989 - distributed.worker - INFO -       Start worker at:      tcp://127.0.0.1:35233
-2025-01-07 20:29:03,991 - distributed.worker - INFO -          Listening to:      tcp://127.0.0.1:35233
-2025-01-07 20:29:04,002 - distributed.worker - INFO -           Worker name:                          1
-2025-01-07 20:29:04,013 - distributed.worker - INFO -          dashboard at:            127.0.0.1:39171
-2025-01-07 20:29:04,016 - distributed.worker - INFO - Waiting to connect to:      tcp://127.0.0.1:35511
-2025-01-07 20:29:04,018 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:29:04,029 - distributed.worker - INFO -               Threads:                          2
-2025-01-07 20:29:04,032 - distributed.worker - INFO -                Memory:                  58.76 GiB
-2025-01-07 20:29:04,043 - distributed.worker - INFO -       Local Directory: /tmp/dask-scratch-space/worker-xcrp_x7i
-2025-01-07 20:29:04,054 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:29:04,210 - distributed.scheduler - INFO - Register worker addr: tcp://127.0.0.1:33363 name: 0
-2025-01-07 20:29:04,290 - distributed.scheduler - INFO - Starting worker compute stream, tcp://127.0.0.1:33363
-2025-01-07 20:29:04,293 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:59650
-2025-01-07 20:29:04,294 - distributed.scheduler - INFO - Register worker addr: tcp://127.0.0.1:35233 name: 1
-2025-01-07 20:29:04,371 - distributed.scheduler - INFO - Starting worker compute stream, tcp://127.0.0.1:35233
-2025-01-07 20:29:04,382 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:59656
-2025-01-07 20:29:04,383 - distributed.worker - INFO - Starting Worker plugin shuffle
-2025-01-07 20:29:04,387 - distributed.worker - INFO - Starting Worker plugin shuffle
-2025-01-07 20:29:04,399 - distributed.worker - INFO -         Registered to:      tcp://127.0.0.1:35511
-2025-01-07 20:29:04,410 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:29:04,421 - distributed.worker - INFO -         Registered to:      tcp://127.0.0.1:35511
-2025-01-07 20:29:04,424 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:29:04,437 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:35511
-2025-01-07 20:29:04,437 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:35511
-2025-01-07 20:29:04,516 - distributed.scheduler - INFO - Receive client connection: Client-9ec89110-cd9a-11ef-917c-e386edcdc4c0
-2025-01-07 20:29:04,598 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:59670
-2025-01-07 20:29:04,674 - distributed.scheduler - INFO - Retire worker addresses (stimulus_id='graceful') ['tcp://127.0.0.1:33363']
-2025-01-07 20:29:04,674 - distributed.active_memory_manager - INFO - Retiring worker tcp://127.0.0.1:33363; 2 keys are being moved away.
-2025-01-07 20:29:04,709 - distributed.scheduler - INFO - Remove worker addr: tcp://127.0.0.1:33363 name: 0 (stimulus_id='graceful')
-2025-01-07 20:29:04,709 - distributed.scheduler - INFO - Retired worker 'tcp://127.0.0.1:33363' (stimulus_id='graceful')
-2025-01-07 20:29:04,710 - distributed.scheduler - INFO - Retire worker addresses (stimulus_id='graceful_abort') ['tcp://127.0.0.1:35233']
-2025-01-07 20:29:04,710 - distributed.active_memory_manager - WARNING - Tried retiring worker tcp://127.0.0.1:35233, but 2 tasks could not be moved as there are no suitable workers to receive them. The worker will not be retired.
-2025-01-07 20:29:04,711 - distributed.scheduler - WARNING - Could not retire worker 'tcp://127.0.0.1:35233': unique data could not be moved to any other worker (stimulus_id='graceful_abort')
-2025-01-07 20:29:04,913 - distributed.scheduler - INFO - Remove worker addr: tcp://127.0.0.1:35233 name: 1 (stimulus_id='ungraceful')
-2025-01-07 20:29:04,913 - distributed.scheduler - WARNING - Removing worker 'tcp://127.0.0.1:35233' caused the cluster to lose already computed task(s), which will be recomputed elsewhere: {'x'} (stimulus_id='ungraceful')
-2025-01-07 20:29:04,913 - distributed.scheduler - ERROR - Removing worker 'tcp://127.0.0.1:35233' caused the cluster to lose scattered data, which can't be recovered: {'z'} (stimulus_id='ungraceful')
-2025-01-07 20:29:04,913 - distributed.scheduler - INFO - Lost all workers
-2025-01-07 20:29:04,914 - distributed.worker - INFO - Stopping worker at tcp://127.0.0.1:35233. Reason: scheduler-remove-worker
-2025-01-07 20:29:04,922 - distributed.worker.state_machine - WARNING - Async instruction for <Task cancelled name="execute('y')" coro=<Worker.execute() done, defined at /build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/worker_state_machine.py:3606>> ended with CancelledError
-2025-01-07 20:29:04,935 - distributed.worker - INFO - Removing Worker plugin shuffle
-2025-01-07 20:29:04,943 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:59656; closing.
-2025-01-07 20:29:05,124 - distributed.scheduler - INFO - Remove client Client-9ec89110-cd9a-11ef-917c-e386edcdc4c0
-2025-01-07 20:29:05,130 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:59670; closing.
-2025-01-07 20:29:05,130 - distributed.scheduler - INFO - Remove client Client-9ec89110-cd9a-11ef-917c-e386edcdc4c0
-2025-01-07 20:29:05,133 - distributed.scheduler - INFO - Close client connection: Client-9ec89110-cd9a-11ef-917c-e386edcdc4c0
-2025-01-07 20:29:05,137 - distributed.worker - INFO - Stopping worker at tcp://127.0.0.1:33363. Reason: worker-close
-2025-01-07 20:29:05,141 - distributed.worker.state_machine - WARNING - Async instruction for <Task cancelled name="execute('y')" coro=<Worker.execute() done, defined at /build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/worker_state_machine.py:3606>> ended with CancelledError
-2025-01-07 20:29:05,145 - distributed.worker - INFO - Removing Worker plugin shuffle
-2025-01-07 20:29:05,150 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:59650; closing.
-2025-01-07 20:29:05,150 - distributed.core - INFO - Connection to tcp://127.0.0.1:35511 has been closed.
-2025-01-07 20:29:05,161 - distributed.core - INFO - Connection to tcp://127.0.0.1:35511 has been closed.
-2025-01-07 20:29:05,173 - distributed.scheduler - INFO - Closing scheduler. Reason: unknown
-2025-01-07 20:29:05,176 - distributed.scheduler - INFO - Scheduler closing all comms
-__________________________ test_fail_to_pickle_spill ___________________________
-
-fut = <coroutine object TCP.read at 0xffff033c4d60>, timeout = 5
-
-    async def wait_for(fut: Awaitable[T], timeout: float) -> T:
-        async with asyncio.timeout(timeout):
->           return await fut
-
-distributed/utils.py:1914: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-self = <TCP (closed) Client->Scheduler local=tcp://127.0.0.1:59178 remote=tcp://127.0.0.1:37561>
-deserializers = None
-
-    async def read(self, deserializers=None):
-        stream = self.stream
-        if stream is None:
-            raise CommClosedError()
-    
-        fmt = "Q"
-        fmt_size = struct.calcsize(fmt)
-    
-        try:
-            # Don't store multiple numpy or parquet buffers into the same buffer, or
-            # none will be released until all are released.
->           frames_nosplit_nbytes_bin = await stream.read_bytes(fmt_size)
-E           asyncio.exceptions.CancelledError
-
-distributed/comm/tcp.py:225: CancelledError
-
-The above exception was the direct cause of the following exception:
-
-args = (), kwds = {}
-
-    @wraps(func)
-    def inner(*args, **kwds):
-        with self._recreate_cm():
->           return func(*args, **kwds)
-
-/usr/lib/python3.13/contextlib.py:85: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-/usr/lib/python3.13/contextlib.py:85: in inner
-    return func(*args, **kwds)
-distributed/utils_test.py:1090: in test_func
-    return _run_and_close_tornado(async_fn_outer)
-distributed/utils_test.py:380: in _run_and_close_tornado
-    return asyncio_run(inner_fn(), loop_factory=get_loop_factory())
-/usr/lib/python3.13/asyncio/runners.py:195: in run
-    return runner.run(main)
-/usr/lib/python3.13/asyncio/runners.py:118: in run
-    return self._loop.run_until_complete(task)
-/usr/lib/python3.13/asyncio/base_events.py:720: in run_until_complete
-    return future.result()
-distributed/utils_test.py:377: in inner_fn
-    return await async_fn(*args, **kwargs)
-distributed/utils_test.py:1087: in async_fn_outer
-    return await utils_wait_for(async_fn(), timeout=timeout * 2)
-distributed/utils.py:1914: in wait_for
-    return await fut
-distributed/utils_test.py:1008: in async_fn
-    _client_factory(s) as c,
-/usr/lib/python3.13/contextlib.py:214: in __aenter__
-    return await anext(self.gen)
-distributed/utils_test.py:957: in _client_factory
-    async with Client(
-distributed/client.py:1700: in __aenter__
-    await self
-distributed/client.py:1512: in _start
-    await self._ensure_connected(timeout=timeout)
-distributed/client.py:1604: in _ensure_connected
-    msg = await wait_for(comm.read(), timeout)
-distributed/utils.py:1913: in wait_for
-    async with asyncio.timeout(timeout):
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-self = <Timeout [expired]>
-exc_type = <class 'asyncio.exceptions.CancelledError'>
-exc_val = CancelledError(), exc_tb = <traceback object at 0xffff44463900>
-
-    async def __aexit__(
-        self,
-        exc_type: Optional[Type[BaseException]],
-        exc_val: Optional[BaseException],
-        exc_tb: Optional[TracebackType],
-    ) -> Optional[bool]:
-        assert self._state in (_State.ENTERED, _State.EXPIRING)
-    
-        if self._timeout_handler is not None:
-            self._timeout_handler.cancel()
-            self._timeout_handler = None
-    
-        if self._state is _State.EXPIRING:
-            self._state = _State.EXPIRED
-    
-            if self._task.uncancel() <= self._cancelling and exc_type is not None:
-                # Since there are no new cancel requests, we're
-                # handling this.
-                if issubclass(exc_type, exceptions.CancelledError):
->                   raise TimeoutError from exc_val
-E                   TimeoutError
-
-/usr/lib/python3.13/asyncio/timeouts.py:116: TimeoutError
------------------------------ Captured stderr call -----------------------------
-2025-01-07 20:30:12,636 - distributed.scheduler - INFO - State start
-2025-01-07 20:30:12,662 - distributed.scheduler - INFO -   Scheduler at:     tcp://127.0.0.1:37561
-2025-01-07 20:30:12,678 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:34229/status
-2025-01-07 20:30:12,680 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:30:12,712 - distributed.worker - INFO -       Start worker at:      tcp://127.0.0.1:45109
-2025-01-07 20:30:12,735 - distributed.worker - INFO -          Listening to:      tcp://127.0.0.1:45109
-2025-01-07 20:30:12,746 - distributed.worker - INFO -           Worker name:                          0
-2025-01-07 20:30:12,765 - distributed.worker - INFO -          dashboard at:            127.0.0.1:45731
-2025-01-07 20:30:12,768 - distributed.worker - INFO - Waiting to connect to:      tcp://127.0.0.1:37561
-2025-01-07 20:30:12,783 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:30:12,797 - distributed.worker - INFO -               Threads:                          1
-2025-01-07 20:30:12,800 - distributed.worker - INFO -                Memory:                   0.98 kiB
-2025-01-07 20:30:12,819 - distributed.worker - INFO -       Local Directory: /tmp/dask-scratch-space/worker-v3xmy4ct
-2025-01-07 20:30:12,834 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:30:12,985 - distributed.scheduler - INFO - Register worker addr: tcp://127.0.0.1:45109 name: 0
-2025-01-07 20:30:13,102 - distributed.scheduler - INFO - Starting worker compute stream, tcp://127.0.0.1:45109
-2025-01-07 20:30:13,121 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:59172
-2025-01-07 20:30:13,122 - distributed.worker - INFO - Starting Worker plugin shuffle
-2025-01-07 20:30:13,133 - distributed.worker - INFO -         Registered to:      tcp://127.0.0.1:37561
-2025-01-07 20:30:13,136 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:30:13,156 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:37561
-2025-01-07 20:30:13,283 - distributed.scheduler - INFO - Receive client connection: Client-c7be3546-cd9a-11ef-917c-e386edcdc4c0
-2025-01-07 20:30:13,406 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:59178
-2025-01-07 20:30:20,559 - distributed.worker.memory - WARNING - gc.collect() took 1.911s. This is usually a sign that some tasks handle too many Python objects at the same time. Rechunking the work into smaller tasks might help.
-2025-01-07 20:30:20,570 - distributed.worker.memory - WARNING - Worker is at 101868339% memory usage. Pausing worker.  Process memory: 0.95 GiB -- Worker memory limit: 0.98 kiB
-2025-01-07 20:30:20,578 - distributed.worker.memory - WARNING - Unmanaged memory use is high. This may indicate a memory leak or the memory may not be released to the OS; see https://distributed.dask.org/en/latest/worker-memory.html#memory-not-released-back-to-the-os for more information. -- Unmanaged memory: 0.95 GiB -- Worker memory limit: 0.98 kiB
-2025-01-07 20:30:20,590 - distributed.core - INFO - Event loop was unresponsive in Worker for 7.44s.  This is often caused by long-running GIL-holding functions or moving large chunks of data. This can cause timeouts and instability.
-2025-01-07 20:30:20,591 - distributed.core - INFO - Event loop was unresponsive in Scheduler for 7.18s.  This is often caused by long-running GIL-holding functions or moving large chunks of data. This can cause timeouts and instability.
-2025-01-07 20:30:20,597 - distributed.worker - INFO - Stopping worker at tcp://127.0.0.1:45109. Reason: worker-close
-2025-01-07 20:30:20,609 - distributed.core - INFO - Connection to tcp://127.0.0.1:59178 has been closed.
-2025-01-07 20:30:20,609 - distributed.scheduler - INFO - Remove client Client-c7be3546-cd9a-11ef-917c-e386edcdc4c0
-2025-01-07 20:30:20,612 - distributed.worker - INFO - Removing Worker plugin shuffle
-2025-01-07 20:30:20,637 - distributed.scheduler - INFO - Close client connection: Client-c7be3546-cd9a-11ef-917c-e386edcdc4c0
-2025-01-07 20:30:20,646 - distributed.worker - ERROR - Failed to communicate with scheduler during heartbeat.
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 225, in read
-    frames_nosplit_nbytes_bin = await stream.read_bytes(fmt_size)
-                                ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-tornado.iostream.StreamClosedError: Stream is closed
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/worker.py", line 1269, in heartbeat
-    response = await retry_operation(
-               ^^^^^^^^^^^^^^^^^^^^^^
-    ...<14 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_comm.py", line 441, in retry_operation
-    return await retry(
-           ^^^^^^^^^^^^
-    ...<5 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_comm.py", line 420, in retry
-    return await coro()
-           ^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1259, in send_recv_from_rpc
-    return await send_recv(comm=comm, op=key, **kwargs)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1018, in send_recv
-    response = await comm.read(deserializers=deserializers)
-               ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 236, in read
-    convert_stream_closed_error(self, e)
-    ~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 142, in convert_stream_closed_error
-    raise CommClosedError(f"in {obj}: {exc}") from exc
-distributed.comm.core.CommClosedError: in <TCP (closed) ConnectionPool.heartbeat_worker local=tcp://127.0.0.1:54914 remote=tcp://127.0.0.1:37561>: Stream is closed
-2025-01-07 20:30:20,656 - distributed.core - INFO - Connection to tcp://127.0.0.1:37561 has been closed.
-2025-01-07 20:30:20,673 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:59172; closing.
-2025-01-07 20:30:20,674 - distributed.scheduler - INFO - Remove worker addr: tcp://127.0.0.1:45109 name: 0 (stimulus_id='handle-worker-cleanup-1736325020.6739438')
-2025-01-07 20:30:20,676 - distributed.scheduler - INFO - Lost all workers
-2025-01-07 20:30:20,689 - distributed.scheduler - INFO - Closing scheduler. Reason: unknown
-2025-01-07 20:30:20,692 - distributed.scheduler - INFO - Scheduler closing all comms
 ============================= slowest 20 durations =============================
-60.01s call     distributed/tests/test_tls_functional.py::test_retire_workers
-60.00s call     distributed/tests/test_tls_functional.py::test_nanny
-23.33s call     distributed/tests/test_gc.py::test_gc_diagnosis_cpu_time
-11.02s call     distributed/shuffle/tests/test_rechunk.py::test_rechunk_method
-10.05s call     distributed/tests/test_utils_test.py::test_popen_timeout
-9.96s call     distributed/tests/test_nanny.py::test_num_fds
-9.31s call     distributed/diagnostics/tests/test_progress.py::test_group_timing
-9.01s call     distributed/diagnostics/tests/test_progress.py::test_AllProgress
-8.93s call     distributed/tests/test_worker.py::test_gather_many_small[False]
-8.78s call     distributed/tests/test_stress.py::test_cancel_stress
-8.41s call     distributed/tests/test_nanny.py::test_environ_plugin
-8.24s call     distributed/shuffle/tests/test_rechunk.py::test_homogeneously_schedule_unpack
-8.09s call     distributed/tests/test_worker_memory.py::test_fail_to_pickle_spill
-7.96s call     distributed/tests/test_worker_memory.py::test_pause_executor_with_memory_monitor
-7.89s call     distributed/tests/test_stress.py::test_cancel_stress_sync
-7.70s call     distributed/tests/test_worker_memory.py::test_override_data_vs_memory_monitor
-7.21s setup    distributed/tests/test_worker_client.py::test_sync
-7.05s call     distributed/shuffle/tests/test_rechunk.py::test_rechunk_with_partially_unknown_dimension[x5-chunks5]
-6.93s call     distributed/tests/test_failed_workers.py::test_worker_doesnt_await_task_completion
-6.62s call     distributed/shuffle/tests/test_rechunk.py::test_rechunk_with_partially_unknown_dimension[x3-chunks3]
+60.00s call     distributed/tests/test_tls_functional.py::test_retire_workers
+14.35s call     distributed/tests/test_gc.py::test_gc_diagnosis_cpu_time
+10.03s call     distributed/tests/test_utils_test.py::test_popen_timeout
+8.78s call     distributed/tests/test_stress.py::test_cancel_stress_sync
+7.54s call     distributed/tests/test_stress.py::test_cancel_stress
+6.97s call     distributed/tests/test_nanny.py::test_num_fds
+6.82s call     distributed/tests/test_utils_test.py::test_bare_cluster
+6.75s call     distributed/diagnostics/tests/test_progress.py::test_group_timing
+6.57s call     distributed/tests/test_failed_workers.py::test_worker_doesnt_await_task_completion
+6.14s call     distributed/tests/test_steal.py::test_restart
+6.05s call     distributed/diagnostics/tests/test_progress.py::test_AllProgress
+5.97s call     distributed/tests/test_worker.py::test_worker_descopes_data
+5.68s call     distributed/tests/test_steal.py::test_balance_multiple_to_replica
+5.59s call     distributed/tests/test_steal.py::test_balance_with_longer_task
+5.49s call     distributed/tests/test_steal.py::test_balance_to_larger_dependency
+5.34s call     distributed/shuffle/tests/test_rechunk.py::test_homogeneously_schedule_unpack
+5.25s call     distributed/shuffle/tests/test_rechunk.py::test_rechunk_method
+5.12s call     distributed/tests/test_chaos.py::test_KillWorker[sys.exit]
+5.12s call     distributed/tests/test_steal.py::test_balance_prefers_busier_with_dependency
+4.87s call     distributed/tests/test_steal.py::test_trivial_workload_should_not_cause_work_stealing
 =========================== short test summary info ============================
 SKIPPED [1] distributed/cli/tests/test_dask_ssh.py:9: could not import 'paramiko': No module named 'paramiko'
 SKIPPED [1] distributed/comm/tests/test_ucx.py:15: could not import 'ucp': No module named 'ucp'
@@ -17224,189 +14750,10 @@
 SKIPPED [1] distributed/tests/test_worker_memory.py:871: need --runslow option to run
 SKIPPED [2] distributed/tests/test_worker_memory.py:883: need --runslow option to run
 SKIPPED [1] distributed/tests/test_worker_memory.py:997: need --runslow option to run
-FAILED distributed/diagnostics/tests/test_task_stream.py::test_client_sync - AssertionError: assert 1 == 10
- +  where 1 = len([{'key': 'inc-bf6e4c654a999b0a0ace1814a611b982', 'metadata': {}, 'nbytes': 28, 'startstops': ({'action': 'compute', 'start': 1736323136.2691898, 'stop': 1736323136.2698834},), ...}])
- +    where [{'key': 'inc-bf6e4c654a999b0a0ace1814a611b982', 'metadata': {}, 'nbytes': 28, 'startstops': ({'action': 'compute', 'start': 1736323136.2691898, 'stop': 1736323136.2698834},), ...}] = <distributed.client.get_task_stream object at 0xffff98392120>.data
-FAILED distributed/tests/test_priorities.py::test_last_in_first_out[queue on worker] - assert not True
- +  where True = all(<generator object test_last_in_first_out.<locals>.<genexpr> at 0xffff5874db10>)
-FAILED distributed/tests/test_tls_functional.py::test_nanny - TimeoutError
+FAILED distributed/tests/test_steal.py::test_steal_when_more_tasks - assert 1770738045.9156072 < (1770738043.0377011 + 1)
+ +  where 1770738045.9156072 = time()
 FAILED distributed/tests/test_tls_functional.py::test_retire_workers - TimeoutError
-FAILED distributed/tests/test_worker.py::test_log_remove_worker - AssertionError: assert {'tcp://127.0.0.1:33363': [{'action': 'worker-status-change', 'prev-status': 'running', 'status': 'closing_gracefully', 'stimulus_id': 'graceful'}, {'action': 'remove-worker', 'processing-tasks': {'y'}, 'lost-computed-tasks': set(), 'lost-scattered-tasks': set(), 'stimulus_id': 'graceful', 'expected': True}, {'action': 'retired', 'stimulus_id': 'graceful'}], 'all': [{'action': 'remove-worker', 'processing-tasks': {'y'}, 'lost-computed-tasks': set(), 'lost-scattered-tasks': set(), 'stimulus_id': 'graceful', 'expected': True, 'worker': 'tcp://127.0.0.1:33363'}, {'action': 'retire-workers', 'retired': {'tcp://127.0.0.1:33363': 'snip'}, 'could-not-retire': {}, 'stimulus_id': 'graceful'}, {'action': 'retire-workers', 'retired': {}, 'could-not-retire': {'tcp://127.0.0.1:35233': 'snip'}, 'stimulus_id': 'graceful_abort'}, {'action': 'remove-worker', 'processing-tasks': {'y'}, 'lost-computed-tasks': {'x'}, 'lost-scattered-tasks': {'z'}, 'stimulus_id': 'ungraceful', 'expected': False, 'worker': 'tcp://127.0.0.1:35233'}], 'tcp://127.0.0.1:35233': [{'action': 'worker-status-change', 'prev-status': 'running', 'status': 'closing_gracefully', 'stimulus_id': 'graceful_abort'}, {'action': 'could-not-retire', 'stimulus_id': 'graceful_abort'}, {'action': 'worker-status-change', 'prev-status': 'closing_gracefully', 'status': 'running', 'stimulus_id': 'worker-status-change'}, {'action': 'remove-worker', 'processing-tasks': {'y'}, 'lost-computed-tasks': {'x'}, 'lost-scattered-tasks': {'z'}, 'stimulus_id': 'ungraceful', 'expected': False}, {'action': 'closing-worker', 'reason': 'scheduler-remove-worker'}], 'worker-get-client': [{'client': 'Client-9ec89110-cd9a-11ef-917c-e386edcdc4c0', 'timeout': 5, 'worker': 'tcp://127.0.0.1:33363'}, {'client': 'Client-9ec89110-cd9a-11ef-917c-e386edcdc4c0', 'timeout': 5, 'worker': 'tcp://127.0.0.1:35233'}]} == {'tcp://127.0.0.1:33363': [{'action': 'worker-status-change', 'prev-status': 'running', 'status': 'closing_gracefully', 'stimulus_id': 'graceful'}, {'action': 'remove-worker', 'lost-computed-tasks': set(), 'lost-scattered-tasks': set(), 'processing-tasks': {'y'}, 'expected': True, 'stimulus_id': 'graceful'}, {'action': 'retired', 'stimulus_id': 'graceful'}], 'tcp://127.0.0.1:35233': [{'action': 'worker-status-change', 'prev-status': 'running', 'status': 'closing_gracefully', 'stimulus_id': 'graceful_abort'}, {'action': 'could-not-retire', 'stimulus_id': 'graceful_abort'}, {'action': 'worker-status-change', 'prev-status': 'closing_gracefully', 'status': 'running', 'stimulus_id': 'worker-status-change'}, {'action': 'remove-worker', 'lost-computed-tasks': {'x'}, 'lost-scattered-tasks': {'z'}, 'processing-tasks': {'y'}, 'expected': False, 'stimulus_id': 'ungraceful'}, {'action': 'closing-worker', 'reason': 'scheduler-remove-worker'}], 'all': [{'action': 'remove-worker', 'lost-computed-tasks': set(), 'lost-scattered-tasks': set(), 'processing-tasks': {'y'}, 'expected': True, 'stimulus_id': 'graceful', 'worker': 'tcp://127.0.0.1:33363'}, {'action': 'retire-workers', 'stimulus_id': 'graceful', 'retired': {'tcp://127.0.0.1:33363': 'snip'}, 'could-not-retire': {}}, {'action': 'retire-workers', 'stimulus_id': 'graceful_abort', 'retired': {}, 'could-not-retire': {'tcp://127.0.0.1:35233': 'snip'}}, {'action': 'remove-worker', 'lost-computed-tasks': {'x'}, 'lost-scattered-tasks': {'z'}, 'processing-tasks': {'y'}, 'expected': False, 'stimulus_id': 'ungraceful', 'worker': 'tcp://127.0.0.1:35233'}], 'worker-get-client': [{'client': 'Client-9ec89110-cd9a-11ef-917c-e386edcdc4c0', 'timeout': 5, 'worker': 'tcp://127.0.0.1:35233'}]}
-  
-  Common items:
-  {'all': [{'action': 'remove-worker',
-            'expected': True,
-            'lost-computed-tasks': set(),
-            'lost-scattered-tasks': set(),
-            'processing-tasks': {'y'},
-            'stimulus_id': 'graceful',
-            'worker': 'tcp://127.0.0.1:33363'},
-           {'action': 'retire-workers',
-            'could-not-retire': {},
-            'retired': {'tcp://127.0.0.1:33363': 'snip'},
-            'stimulus_id': 'graceful'},
-           {'action': 'retire-workers',
-            'could-not-retire': {'tcp://127.0.0.1:35233': 'snip'},
-            'retired': {},
-            'stimulus_id': 'graceful_abort'},
-           {'action': 'remove-worker',
-            'expected': False,
-            'lost-computed-tasks': {'x'},
-            'lost-scattered-tasks': {'z'},
-            'processing-tasks': {'y'},
-            'stimulus_id': 'ungraceful',
-            'worker': 'tcp://127.0.0.1:35233'}],
-   'tcp://127.0.0.1:33363': [{'action': 'worker-status-change',
-                              'prev-status': 'running',
-                              'status': 'closing_gracefully',
-                              'stimulus_id': 'graceful'},
-                             {'action': 'remove-worker',
-                              'expected': True,
-                              'lost-computed-tasks': set(),
-                              'lost-scattered-tasks': set(),
-                              'processing-tasks': {'y'},
-                              'stimulus_id': 'graceful'},
-                             {'action': 'retired', 'stimulus_id': 'graceful'}],
-   'tcp://127.0.0.1:35233': [{'action': 'worker-status-change',
-                              'prev-status': 'running',
-                              'status': 'closing_gracefully',
-                              'stimulus_id': 'graceful_abort'},
-                             {'action': 'could-not-retire',
-                              'stimulus_id': 'graceful_abort'},
-                             {'action': 'worker-status-change',
-                              'prev-status': 'closing_gracefully',
-                              'status': 'running',
-                              'stimulus_id': 'worker-status-change'},
-                             {'action': 'remove-worker',
-                              'expected': False,
-                              'lost-computed-tasks': {'x'},
-                              'lost-scattered-tasks': {'z'},
-                              'processing-tasks': {'y'},
-                              'stimulus_id': 'ungraceful'},
-                             {'action': 'closing-worker',
-                              'reason': 'scheduler-remove-worker'}]}
-  Differing items:
-  {'worker-get-client': [{'client': 'Client-9ec89110-cd9a-11ef-917c-e386edcdc4c0', 'timeout': 5, 'worker': 'tcp://127.0.0.1:33363'}, {'client': 'Client-9ec89110-cd9a-11ef-917c-e386edcdc4c0', 'timeout': 5, 'worker': 'tcp://127.0.0.1:35233'}]} != {'worker-get-client': [{'client': 'Client-9ec89110-cd9a-11ef-917c-e386edcdc4c0', 'timeout': 5, 'worker': 'tcp://127.0.0.1:35233'}]}
-  
-  Full diff:
-    {
-        'all': [
-            {
-                'action': 'remove-worker',
-                'expected': True,
-                'lost-computed-tasks': set(),
-                'lost-scattered-tasks': set(),
-                'processing-tasks': {
-                    'y',
-                },
-                'stimulus_id': 'graceful',
-                'worker': 'tcp://127.0.0.1:33363',
-            },
-            {
-                'action': 'retire-workers',
-                'could-not-retire': {},
-                'retired': {
-                    'tcp://127.0.0.1:33363': 'snip',
-                },
-                'stimulus_id': 'graceful',
-            },
-            {
-                'action': 'retire-workers',
-                'could-not-retire': {
-                    'tcp://127.0.0.1:35233': 'snip',
-                },
-                'retired': {},
-                'stimulus_id': 'graceful_abort',
-            },
-            {
-                'action': 'remove-worker',
-                'expected': False,
-                'lost-computed-tasks': {
-                    'x',
-                },
-                'lost-scattered-tasks': {
-                    'z',
-                },
-                'processing-tasks': {
-                    'y',
-                },
-                'stimulus_id': 'ungraceful',
-                'worker': 'tcp://127.0.0.1:35233',
-            },
-        ],
-        'tcp://127.0.0.1:33363': [
-            {
-                'action': 'worker-status-change',
-                'prev-status': 'running',
-                'status': 'closing_gracefully',
-                'stimulus_id': 'graceful',
-            },
-            {
-                'action': 'remove-worker',
-                'expected': True,
-                'lost-computed-tasks': set(),
-                'lost-scattered-tasks': set(),
-                'processing-tasks': {
-                    'y',
-                },
-                'stimulus_id': 'graceful',
-            },
-            {
-                'action': 'retired',
-                'stimulus_id': 'graceful',
-            },
-        ],
-        'tcp://127.0.0.1:35233': [
-            {
-                'action': 'worker-status-change',
-                'prev-status': 'running',
-                'status': 'closing_gracefully',
-                'stimulus_id': 'graceful_abort',
-            },
-            {
-                'action': 'could-not-retire',
-                'stimulus_id': 'graceful_abort',
-            },
-            {
-                'action': 'worker-status-change',
-                'prev-status': 'closing_gracefully',
-                'status': 'running',
-                'stimulus_id': 'worker-status-change',
-            },
-            {
-                'action': 'remove-worker',
-                'expected': False,
-                'lost-computed-tasks': {
-                    'x',
-                },
-                'lost-scattered-tasks': {
-                    'z',
-                },
-                'processing-tasks': {
-                    'y',
-                },
-                'stimulus_id': 'ungraceful',
-            },
-            {
-                'action': 'closing-worker',
-                'reason': 'scheduler-remove-worker',
-            },
-        ],
-        'worker-get-client': [
-            {
-                'client': 'Client-9ec89110-cd9a-11ef-917c-e386edcdc4c0',
-                'timeout': 5,
-  +             'worker': 'tcp://127.0.0.1:33363',
-  +         },
-  +         {
-  +             'client': 'Client-9ec89110-cd9a-11ef-917c-e386edcdc4c0',
-  +             'timeout': 5,
-                'worker': 'tcp://127.0.0.1:35233',
-            },
-        ],
-    }
-FAILED distributed/tests/test_worker_memory.py::test_fail_to_pickle_spill - TimeoutError
-= 6 failed, 2906 passed, 265 skipped, 222 deselected, 15 xfailed, 8 xpassed in 2153.94s (0:35:53) =
+= 2 failed, 2910 passed, 265 skipped, 222 deselected, 15 xfailed, 8 xpassed, 5 rerun in 1509.02s (0:25:09) =
 *** END OF RUN 1: NOT ALL TESTS HAVE YET PASSED/XFAILED ***
 *** STARTING RUN 2: python3.13 -m pytest --pyargs distributed --verbose --color=no --timeout-method=signal --timeout=300 -m not avoid_ci -rfE --last-failed --last-failed-no-failures none --ignore=distributed/comm/tests/test_comms.py --ignore=distributed/comm/tests/test_ws.py --ignore=distributed/deploy/tests/test_adaptive.py --ignore=distributed/deploy/tests/test_local.py --ignore=distributed/deploy/tests/test_slow_adaptive.py --ignore=distributed/deploy/tests/test_spec_cluster.py --deselect=distributed/cli/tests/test_dask_scheduler.py::test_no_dashboard --deselect=distributed/deploy/tests/test_local.py::test_localcluster_get_client --deselect=distributed/deploy/tests/test_old_ssh.py::test_cluster --deselect=distributed/deploy/tests/test_old_ssh.py::test_old_ssh_nprocs_renamed_to_n_workers --deselect=distributed/deploy/tests/test_old_ssh.py::test_nprocs_attribute_is_deprecated --deselect=distributed/deploy/tests/test_ssh.py::test_nprocs_attribute_is_deprecated --deselect=distributed/http/tests/test_core.py::test_prometheus_api_doc --deselect=distributed/tests/test_init.py::test_git_revision --deselect=distributed/tests/test_jupyter.py::test_jupyter_idle_timeout --deselect=distributed/tests/test_jupyter.py::test_jupyter_idle_timeout_returned --deselect=distributed/tests/test_jupyter.py::test_jupyter_server --deselect=distributed/tests/test_jupyter.py::test_shutsdown_cleanly --deselect=distributed/tests/test_profile.py::test_stack_overflow --deselect=distributed/tests/test_pubsub.py::test_client_worker --deselect=distributed/tests/test_queues.py::test_queue_in_task --deselect=distributed/tests/test_spill.py::test_spillbuffer_oserror --deselect=distributed/tests/test_steal.py::test_steal_twice --deselect=distributed/tests/test_utils_test.py::test_cluster --deselect=distributed/tests/test_variable.py::test_variable_in_task --deselect=distributed/tests/test_worker.py::test_process_executor_kills_process --deselect=distributed/tests/test_worker_memory.py::test_fail_to_pickle_execute_1 --deselect=distributed/tests/test_worker_state_machine.py::test_task_state_instance_are_garbage_collected --deselect=distributed/protocol/tests/test_protocol.py::test_deeply_nested_structures --deselect=distributed/protocol/tests/test_serialize.py::test_deeply_nested_structures --deselect=distributed/cli/tests/test_dask_scheduler.py::test_defaults --deselect=distributed/cli/tests/test_dask_scheduler.py::test_hostport --deselect=distributed/cli/tests/test_dask_spec.py::test_errors --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_file --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_file --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_file --deselect=distributed/dashboard/tests/test_scheduler_bokeh.py::test_counters --deselect=distributed/dashboard/tests/test_worker_bokeh.py::test_counters --deselect=distributed/deploy/tests/test_local.py::test_adapt_then_manual --deselect=distributed/deploy/tests/test_local.py::test_async_with --deselect=distributed/deploy/tests/test_local.py::test_close_twice --deselect=distributed/deploy/tests/test_local.py::test_cluster_info_sync --deselect=distributed/deploy/tests/test_local.py::test_local_tls --deselect=distributed/deploy/tests/test_local.py::test_no_dangling_asyncio_tasks --deselect=distributed/deploy/tests/test_local.py::test_only_local_access --deselect=distributed/deploy/tests/test_local.py::test_remote_access --deselect=distributed/diagnostics/tests/test_progress_widgets.py::test_serializers --deselect=distributed/diagnostics/tests/test_scheduler_plugin.py::test_lifecycle --deselect=distributed/http/scheduler/tests/test_missing_bokeh.py::test_missing_bokeh --deselect=distributed/http/scheduler/tests/test_scheduler_http.py::test_metrics_when_prometheus_client_not_installed --deselect=distributed/protocol/tests/test_serialize.py::test_errors --deselect=distributed/tests/test_batched.py::test_BatchedSend --deselect=distributed/tests/test_batched.py::test_close_closed --deselect=distributed/tests/test_batched.py::test_close_twice --deselect=distributed/tests/test_batched.py::test_send_after_stream_start --deselect=distributed/tests/test_batched.py::test_send_before_close --deselect=distributed/tests/test_batched.py::test_send_before_start --deselect=distributed/tests/test_batched.py::test_sending_traffic_jam --deselect=distributed/tests/test_batched.py::test_serializers --deselect=distributed/tests/test_client.py::TestClientSecurityLoader::test_security_loader --deselect=distributed/tests/test_client.py::TestClientSecurityLoader::test_security_loader_ignored_if_explicit_security_provided --deselect=distributed/tests/test_client.py::TestClientSecurityLoader::test_security_loader_ignored_if_returns_none --deselect=distributed/tests/test_client.py::test_async_with --deselect=distributed/tests/test_client.py::test_client_is_quiet_cluster_close --deselect=distributed/tests/test_client.py::test_dashboard_link_cluster --deselect=distributed/tests/test_client.py::test_dashboard_link_inproc --deselect=distributed/tests/test_client.py::test_file_descriptors_dont_leak --deselect=distributed/tests/test_client.py::test_mixing_clients_different_scheduler --deselect=distributed/tests/test_client.py::test_quiet_client_close --deselect=distributed/tests/test_client.py::test_rebalance_sync --deselect=distributed/tests/test_client.py::test_repr_localcluster --deselect=distributed/tests/test_client.py::test_security_loader --deselect=distributed/tests/test_client.py::test_security_loader_ignored_if_explicit_security_provided --deselect=distributed/tests/test_client.py::test_security_loader_ignored_if_returns_none --deselect=distributed/tests/test_client.py::test_shutdown --deselect=distributed/tests/test_client.py::test_shutdown_is_quiet_with_cluster --deselect=distributed/tests/test_client.py::test_shutdown_localcluster --deselect=distributed/tests/test_client.py::test_shutdown_stops_callbacks --deselect=distributed/tests/test_client_loop.py::test_close_loop_sync_start_new_loop --deselect=distributed/tests/test_client_loop.py::test_close_loop_sync_use_running_loop --deselect=distributed/tests/test_core.py::test_close_fast_without_active_handlers --deselect=distributed/tests/test_core.py::test_close_grace_period_for_handlers --deselect=distributed/tests/test_core.py::test_close_properly --deselect=distributed/tests/test_core.py::test_compression --deselect=distributed/tests/test_core.py::test_connection_pool --deselect=distributed/tests/test_core.py::test_connection_pool_close_while_connecting --deselect=distributed/tests/test_core.py::test_connection_pool_detects_remote_close --deselect=distributed/tests/test_core.py::test_connection_pool_outside_cancellation --deselect=distributed/tests/test_core.py::test_connection_pool_remove --deselect=distributed/tests/test_core.py::test_connection_pool_respects_limit --deselect=distributed/tests/test_core.py::test_connection_pool_tls --deselect=distributed/tests/test_core.py::test_counters --deselect=distributed/tests/test_core.py::test_deserialize_error --deselect=distributed/tests/test_core.py::test_errors --deselect=distributed/tests/test_core.py::test_identity_inproc --deselect=distributed/tests/test_core.py::test_identity_tcp --deselect=distributed/tests/test_core.py::test_large_packets_inproc --deselect=distributed/tests/test_core.py::test_messages_are_ordered_bsend --deselect=distributed/tests/test_core.py::test_messages_are_ordered_raw --deselect=distributed/tests/test_core.py::test_ports --deselect=distributed/tests/test_core.py::test_rpc_default --deselect=distributed/tests/test_core.py::test_rpc_inproc --deselect=distributed/tests/test_core.py::test_rpc_message_lifetime_default --deselect=distributed/tests/test_core.py::test_rpc_message_lifetime_inproc --deselect=distributed/tests/test_core.py::test_rpc_message_lifetime_tcp --deselect=distributed/tests/test_core.py::test_rpc_serialization --deselect=distributed/tests/test_core.py::test_rpc_tcp --deselect=distributed/tests/test_core.py::test_rpc_tls --deselect=distributed/tests/test_core.py::test_rpc_with_many_connections_inproc --deselect=distributed/tests/test_core.py::test_rpc_with_many_connections_tcp --deselect=distributed/tests/test_core.py::test_send_recv_args --deselect=distributed/tests/test_core.py::test_send_recv_cancelled --deselect=distributed/tests/test_core.py::test_server --deselect=distributed/tests/test_core.py::test_server_comms_mark_active_handlers --deselect=distributed/tests/test_core.py::test_server_raises_on_blocked_handlers --deselect=distributed/tests/test_jupyter.py::test_jupyter_idle_timeout --deselect=distributed/tests/test_jupyter.py::test_jupyter_server --deselect=distributed/tests/test_locks.py::test_errors --deselect=distributed/tests/test_nanny.py::test_scheduler_file --deselect=distributed/tests/test_nanny.py::test_scheduler_file --deselect=distributed/tests/test_nanny.py::test_scheduler_file --deselect=distributed/tests/test_nanny.py::test_worker_uses_same_host_as_nanny --deselect=distributed/tests/test_preload.py::test_failure_doesnt_crash_scheduler --deselect=distributed/tests/test_preload.py::test_preload_import_time --deselect=distributed/tests/test_preload.py::test_preload_manager_sequence --deselect=distributed/tests/test_preload.py::test_worker_preload_text --deselect=distributed/tests/test_scheduler.py::test_allowed_failures_config --deselect=distributed/tests/test_scheduler.py::test_async_context_manager --deselect=distributed/tests/test_scheduler.py::test_dashboard_host --deselect=distributed/tests/test_scheduler.py::test_file_descriptors_dont_leak --deselect=distributed/tests/test_scheduler.py::test_finished --deselect=distributed/tests/test_scheduler.py::test_multiple_listeners --deselect=distributed/tests/test_scheduler.py::test_no_dangling_asyncio_tasks --deselect=distributed/tests/test_scheduler.py::test_scheduler_file --deselect=distributed/tests/test_scheduler.py::test_scheduler_file --deselect=distributed/tests/test_scheduler.py::test_scheduler_file --deselect=distributed/tests/test_security.py::test_require_encryption --deselect=distributed/tests/test_security.py::test_tls_listen_connect --deselect=distributed/tests/test_security.py::test_tls_temporary_credentials_functional --deselect=distributed/tests/test_semaphore.py::test_threadpoolworkers_pick_correct_ioloop --deselect=distributed/tests/test_tls_functional.py::test_security_dict_input_no_security --deselect=distributed/tests/test_utils_test.py::test_ensure_no_new_clients --deselect=distributed/tests/test_utils_test.py::test_freeze_batched_send --deselect=distributed/tests/test_utils_test.py::test_locked_comm_drop_in_replacement --deselect=distributed/tests/test_utils_test.py::test_locked_comm_intercept_read --deselect=distributed/tests/test_utils_test.py::test_locked_comm_intercept_write --deselect=distributed/tests/test_worker.py::test_host_uses_scheduler_protocol --deselect=distributed/tests/test_worker.py::test_plugin_exception --deselect=distributed/tests/test_worker.py::test_plugin_internal_exception --deselect=distributed/tests/test_worker.py::test_plugin_multiple_exceptions --deselect=distributed/tests/test_worker.py::test_scheduler_file --deselect=distributed/tests/test_worker.py::test_scheduler_file --deselect=distributed/tests/test_worker.py::test_scheduler_file --deselect=distributed/tests/test_worker_client.py::test_dont_override_default_get --deselect=distributed/cli/tests/test_dask_scheduler.py::test_dashboard --deselect=distributed/cli/tests/test_dask_scheduler.py::test_dashboard_allowlist --deselect=distributed/cli/tests/test_dask_scheduler.py::test_dashboard_non_standard_ports --deselect=distributed/cli/tests/test_dask_scheduler.py::test_dashboard_port_zero --deselect=distributed/cli/tests/test_dask_scheduler.py::test_defaults --deselect=distributed/cli/tests/test_dask_scheduler.py::test_hostport --deselect=distributed/cli/tests/test_dask_scheduler.py::test_multiple_protocols --deselect=distributed/cli/tests/test_dask_scheduler.py::test_multiple_workers --deselect=distributed/cli/tests/test_dask_scheduler.py::test_multiple_workers_2 --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_command --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_command_default --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_config --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_file --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_module --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_remote_module --deselect=distributed/cli/tests/test_dask_scheduler.py::test_scheduler_port_zero --deselect=distributed/cli/tests/test_dask_scheduler.py::test_single_executable_deprecated --deselect=distributed/cli/tests/test_dask_worker.py::test_contact_listen_address --deselect=distributed/cli/tests/test_dask_worker.py::test_dashboard_non_standard_ports --deselect=distributed/cli/tests/test_dask_worker.py::test_error_during_startup --deselect=distributed/cli/tests/test_dask_worker.py::test_integer_names --deselect=distributed/cli/tests/test_dask_worker.py::test_listen_address_ipv6 --deselect=distributed/cli/tests/test_dask_worker.py::test_local_directory --deselect=distributed/cli/tests/test_dask_worker.py::test_memory_limit --deselect=distributed/cli/tests/test_dask_worker.py::test_nanny_worker_port_range --deselect=distributed/cli/tests/test_dask_worker.py::test_nanny_worker_port_range_too_many_workers_raises --deselect=distributed/cli/tests/test_dask_worker.py::test_nanny_worker_ports --deselect=distributed/cli/tests/test_dask_worker.py::test_no_nanny --deselect=distributed/cli/tests/test_dask_worker.py::test_nworkers_auto --deselect=distributed/cli/tests/test_dask_worker.py::test_nworkers_expands_name --deselect=distributed/cli/tests/test_dask_worker.py::test_nworkers_negative --deselect=distributed/cli/tests/test_dask_worker.py::test_nworkers_requires_nanny --deselect=distributed/cli/tests/test_dask_worker.py::test_preload_config --deselect=distributed/cli/tests/test_dask_worker.py::test_resources --deselect=distributed/cli/tests/test_dask_worker.py::test_respect_host_listen_address --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_address_env --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_file --deselect=distributed/cli/tests/test_dask_worker.py::test_set_lifetime_restart_via_env_var --deselect=distributed/cli/tests/test_dask_worker.py::test_set_lifetime_stagger_via_env_var --deselect=distributed/cli/tests/test_dask_worker.py::test_signal_handling --deselect=distributed/cli/tests/test_dask_worker.py::test_single_executable_deprecated --deselect=distributed/cli/tests/test_dask_worker.py::test_single_executable_works --deselect=distributed/cli/tests/test_dask_worker.py::test_timeout --deselect=distributed/cli/tests/test_dask_worker.py::test_worker_class --deselect=distributed/tests/test_config.py::test_logging_extended --deselect=distributed/tests/test_config.py::test_logging_file_config --deselect=distributed/tests/test_config.py::test_logging_mutual_exclusive --deselect=distributed/tests/test_config.py::test_logging_simple --deselect=distributed/tests/test_config.py::test_logging_simple_under_distributed --deselect=distributed/tests/test_queues.py::test_queue_in_task --deselect=distributed/tests/test_variable.py::test_variable_in_task 
 ============================= test session starts ==============================
@@ -17418,20 +14765,16 @@
 timeout: 300.0s
 timeout method: signal
 timeout func_only: False
-collecting ... collected 6 items
-run-last-failure: rerun previous 6 failures (skipped 144 files)
+collecting ... collected 2 items
+run-last-failure: rerun previous 2 failures (skipped 147 files)
 
-distributed/diagnostics/tests/test_task_stream.py::test_client_sync PASSED [ 16%]
-distributed/tests/test_priorities.py::test_last_in_first_out[queue on worker] PASSED [ 33%]
-distributed/tests/test_tls_functional.py::test_nanny FAILED              [ 50%]
-distributed/tests/test_tls_functional.py::test_retire_workers PASSED     [ 66%]
-distributed/tests/test_worker.py::test_log_remove_worker FAILED          [ 83%]
-distributed/tests/test_worker_memory.py::test_fail_to_pickle_spill PASSED [100%]
+distributed/tests/test_steal.py::test_steal_when_more_tasks PASSED       [ 50%]
+distributed/tests/test_tls_functional.py::test_retire_workers FAILED     [100%]
 
 =================================== FAILURES ===================================
-__________________________________ test_nanny __________________________________
+_____________________________ test_retire_workers ______________________________
 
-fut = <coroutine object Nanny.start_unsafe at 0xffff7e16dbd0>, timeout = 0
+fut = <coroutine object Nanny.start_unsafe at 0xffff7ec81690>, timeout = 0
 
     async def wait_for(fut: Awaitable[T], timeout: float) -> T:
         async with asyncio.timeout(timeout):
@@ -17453,10 +14796,10 @@
     stream = await self.client.connect(
 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
 
-self = <tornado.tcpclient.TCPClient object at 0xffff86998590>
-host = '127.0.0.1', port = 40993, af = <AddressFamily.AF_UNSPEC: 0>
-ssl_options = <ssl.SSLContext object at 0xffff7dd81400>
-max_buffer_size = 31544635392.0, source_ip = None, source_port = None
+self = <tornado.tcpclient.TCPClient object at 0xffff86d08590>
+host = '127.0.0.1', port = 44443, af = <AddressFamily.AF_UNSPEC: 0>
+ssl_options = <ssl.SSLContext object at 0xffff7e15aba0>
+max_buffer_size = 31544629248.0, source_ip = None, source_port = None
 timeout = None
 
     async def connect(
@@ -17552,7 +14895,7 @@
 
 self = <Timeout [expired]>
 exc_type = <class 'asyncio.exceptions.CancelledError'>
-exc_val = CancelledError(), exc_tb = <traceback object at 0xffff7dcc5600>
+exc_val = CancelledError(), exc_tb = <traceback object at 0xffff7e039000>
 
     async def __aexit__(
         self,
@@ -17637,7 +14980,7 @@
 
 During handling of the above exception, another exception occurred:
 
-fut = <coroutine object gen_cluster.<locals>._.<locals>.test_func.<locals>.async_fn at 0xffff7eb97680>
+fut = <coroutine object gen_cluster.<locals>._.<locals>.test_func.<locals>.async_fn at 0xffff856443c0>
 timeout = 60
 
     async def wait_for(fut: Awaitable[T], timeout: float) -> T:
@@ -17709,7 +15052,7 @@
 
 self = <Timeout [expired]>
 exc_type = <class 'asyncio.exceptions.CancelledError'>
-exc_val = CancelledError(), exc_tb = <traceback object at 0xffff852bda80>
+exc_val = CancelledError(), exc_tb = <traceback object at 0xffff85417dc0>
 
     async def __aexit__(
         self,
@@ -17735,46 +15078,46 @@
 
 /usr/lib/python3.13/asyncio/timeouts.py:116: TimeoutError
 ----------------------------- Captured stderr call -----------------------------
-2025-01-07 20:32:29,404 - distributed.scheduler - INFO - State start
-2025-01-07 20:32:29,417 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:35493
-2025-01-07 20:32:29,417 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:46883/status
-2025-01-07 20:32:29,417 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:32:29,484 - distributed.nanny - INFO -         Start Nanny at: 'tls://127.0.0.1:36443'
-2025-01-07 20:32:29,525 - distributed.nanny - INFO -         Start Nanny at: 'tls://127.0.0.1:45355'
-2025-01-07 20:32:30,383 - distributed.worker - INFO -       Start worker at:      tls://127.0.0.1:39807
-2025-01-07 20:32:30,383 - distributed.worker - INFO -          Listening to:      tls://127.0.0.1:39807
-2025-01-07 20:32:30,383 - distributed.worker - INFO -           Worker name:                          1
-2025-01-07 20:32:30,383 - distributed.worker - INFO -          dashboard at:            127.0.0.1:46223
-2025-01-07 20:32:30,383 - distributed.worker - INFO - Waiting to connect to:      tls://127.0.0.1:35493
-2025-01-07 20:32:30,383 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:32:30,383 - distributed.worker - INFO -               Threads:                          2
-2025-01-07 20:32:30,383 - distributed.worker - INFO -                Memory:                  58.76 GiB
-2025-01-07 20:32:30,383 - distributed.worker - INFO -       Local Directory: /tmp/dask-scratch-space/worker-bsibh5_2
-2025-01-07 20:32:30,383 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:32:30,572 - distributed.worker - INFO -       Start worker at:      tls://127.0.0.1:43515
-2025-01-07 20:32:30,572 - distributed.worker - INFO -          Listening to:      tls://127.0.0.1:43515
-2025-01-07 20:32:30,577 - distributed.worker - INFO -           Worker name:                          0
-2025-01-07 20:32:30,577 - distributed.worker - INFO -          dashboard at:            127.0.0.1:35809
-2025-01-07 20:32:30,577 - distributed.worker - INFO - Waiting to connect to:      tls://127.0.0.1:35493
-2025-01-07 20:32:30,577 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:32:30,577 - distributed.worker - INFO -               Threads:                          1
-2025-01-07 20:32:30,577 - distributed.worker - INFO -                Memory:                  58.76 GiB
-2025-01-07 20:32:30,577 - distributed.worker - INFO -       Local Directory: /tmp/dask-scratch-space/worker-wxeihcg6
-2025-01-07 20:32:30,577 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:32:30,695 - distributed.scheduler - INFO - Register worker addr: tls://127.0.0.1:39807 name: 1
-2025-01-07 20:32:30,696 - distributed.scheduler - INFO - Starting worker compute stream, tls://127.0.0.1:39807
-2025-01-07 20:32:30,696 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:38426
-2025-01-07 20:32:30,697 - distributed.worker - INFO - Starting Worker plugin shuffle
-2025-01-07 20:32:30,698 - distributed.worker - INFO -         Registered to:      tls://127.0.0.1:35493
-2025-01-07 20:32:30,698 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:32:30,721 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:35493
-2025-01-07 20:32:31,082 - distributed.scheduler - INFO - Register worker addr: tls://127.0.0.1:43515 name: 0
-2025-01-07 20:32:31,083 - distributed.scheduler - INFO - Starting worker compute stream, tls://127.0.0.1:43515
-2025-01-07 20:32:31,083 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:38438
-2025-01-07 20:32:31,083 - distributed.core - INFO - Connection to tls://127.0.0.1:38438 has been closed.
-2025-01-07 20:32:31,083 - distributed.scheduler - INFO - Remove worker addr: tls://127.0.0.1:43515 name: 0 (stimulus_id='handle-worker-cleanup-1736325151.0837178')
-2025-01-07 20:32:31,085 - distributed.worker - INFO - Starting Worker plugin shuffle
-2025-01-07 20:32:31,086 - distributed.batched - INFO - Batched Comm Closed <TLS (closed) Worker->Scheduler local=tls://127.0.0.1:38438 remote=tls://127.0.0.1:35493>
+2026-02-11 05:48:59,654 - distributed.scheduler - INFO - State start
+2026-02-11 05:48:59,662 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:33105
+2026-02-11 05:48:59,662 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:36719/status
+2026-02-11 05:48:59,662 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:48:59,733 - distributed.nanny - INFO -         Start Nanny at: 'tls://127.0.0.1:37585'
+2026-02-11 05:48:59,791 - distributed.nanny - INFO -         Start Nanny at: 'tls://127.0.0.1:40567'
+2026-02-11 05:49:00,425 - distributed.worker - INFO -       Start worker at:      tls://127.0.0.1:45521
+2026-02-11 05:49:00,425 - distributed.worker - INFO -          Listening to:      tls://127.0.0.1:45521
+2026-02-11 05:49:00,425 - distributed.worker - INFO -           Worker name:                          1
+2026-02-11 05:49:00,425 - distributed.worker - INFO -          dashboard at:            127.0.0.1:45483
+2026-02-11 05:49:00,425 - distributed.worker - INFO - Waiting to connect to:      tls://127.0.0.1:33105
+2026-02-11 05:49:00,425 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:49:00,425 - distributed.worker - INFO -               Threads:                          2
+2026-02-11 05:49:00,425 - distributed.worker - INFO -                Memory:                  58.76 GiB
+2026-02-11 05:49:00,425 - distributed.worker - INFO -       Local Directory: /tmp/dask-scratch-space/worker-d28duyfh
+2026-02-11 05:49:00,425 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:49:00,638 - distributed.worker - INFO -       Start worker at:      tls://127.0.0.1:36841
+2026-02-11 05:49:00,639 - distributed.worker - INFO -          Listening to:      tls://127.0.0.1:36841
+2026-02-11 05:49:00,639 - distributed.worker - INFO -           Worker name:                          0
+2026-02-11 05:49:00,639 - distributed.worker - INFO -          dashboard at:            127.0.0.1:37785
+2026-02-11 05:49:00,639 - distributed.worker - INFO - Waiting to connect to:      tls://127.0.0.1:33105
+2026-02-11 05:49:00,639 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:49:00,639 - distributed.worker - INFO -               Threads:                          1
+2026-02-11 05:49:00,639 - distributed.worker - INFO -                Memory:                  58.76 GiB
+2026-02-11 05:49:00,639 - distributed.worker - INFO -       Local Directory: /tmp/dask-scratch-space/worker-_og2vmzl
+2026-02-11 05:49:00,639 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:49:00,711 - distributed.scheduler - INFO - Register worker addr: tls://127.0.0.1:45521 name: 1
+2026-02-11 05:49:00,712 - distributed.scheduler - INFO - Starting worker compute stream, tls://127.0.0.1:45521
+2026-02-11 05:49:00,712 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:37812
+2026-02-11 05:49:00,713 - distributed.worker - INFO - Starting Worker plugin shuffle
+2026-02-11 05:49:00,714 - distributed.worker - INFO -         Registered to:      tls://127.0.0.1:33105
+2026-02-11 05:49:00,714 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:49:00,722 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:33105
+2026-02-11 05:49:00,926 - distributed.scheduler - INFO - Register worker addr: tls://127.0.0.1:36841 name: 0
+2026-02-11 05:49:00,927 - distributed.scheduler - INFO - Starting worker compute stream, tls://127.0.0.1:36841
+2026-02-11 05:49:00,927 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:37822
+2026-02-11 05:49:00,927 - distributed.core - INFO - Connection to tls://127.0.0.1:37822 has been closed.
+2026-02-11 05:49:00,927 - distributed.scheduler - INFO - Remove worker addr: tls://127.0.0.1:36841 name: 0 (stimulus_id='handle-worker-cleanup-1770738540.9278321')
+2026-02-11 05:49:00,928 - distributed.worker - INFO - Starting Worker plugin shuffle
+2026-02-11 05:49:00,929 - distributed.batched - INFO - Batched Comm Closed <TLS (closed) Worker->Scheduler local=tls://127.0.0.1:37822 remote=tls://127.0.0.1:33105>
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 297, in write
     raise StreamClosedError()
@@ -17793,31 +15136,30 @@
     ~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 142, in convert_stream_closed_error
     raise CommClosedError(f"in {obj}: {exc}") from exc
-distributed.comm.core.CommClosedError: in <TLS (closed) Worker->Scheduler local=tls://127.0.0.1:38438 remote=tls://127.0.0.1:35493>: Stream is closed
-2025-01-07 20:32:31,088 - distributed.worker - INFO -         Registered to:      tls://127.0.0.1:35493
-2025-01-07 20:32:31,088 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:32:31,089 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:35493
-2025-01-07 20:32:31,089 - distributed.core - INFO - Connection to tls://127.0.0.1:35493 has been closed.
-2025-01-07 20:32:31,089 - distributed.worker - INFO - Stopping worker at tls://127.0.0.1:43515. Reason: worker-handle-scheduler-connection-broken
-2025-01-07 20:32:31,129 - distributed.nanny - INFO - Closing Nanny gracefully at 'tls://127.0.0.1:36443'. Reason: worker-handle-scheduler-connection-broken
-2025-01-07 20:32:31,131 - distributed.worker - INFO - Removing Worker plugin shuffle
-2025-01-07 20:32:31,149 - distributed.nanny - INFO - Worker closed
-2025-01-07 20:32:33,166 - distributed.nanny - ERROR - Worker process died unexpectedly
-2025-01-07 20:32:33,585 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:36443'. Reason: nanny-close-gracefully
-2025-01-07 20:32:33,585 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:36443' closed.
-2025-01-07 20:33:01,141 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45355'. Reason: nanny-close
-2025-01-07 20:33:01,141 - distributed.nanny - INFO - Nanny asking worker to close. Reason: nanny-close
-2025-01-07 20:33:01,145 - distributed.worker - INFO - Stopping worker at tls://127.0.0.1:39807. Reason: nanny-close
-2025-01-07 20:33:01,145 - distributed.worker - INFO - Removing Worker plugin shuffle
-2025-01-07 20:33:01,147 - distributed.core - INFO - Connection to tls://127.0.0.1:35493 has been closed.
-2025-01-07 20:33:01,148 - distributed.core - INFO - Received 'close-stream' from tls://127.0.0.1:38426; closing.
-2025-01-07 20:33:01,149 - distributed.scheduler - INFO - Remove worker addr: tls://127.0.0.1:39807 name: 1 (stimulus_id='handle-worker-cleanup-1736325181.1493616')
-2025-01-07 20:33:01,149 - distributed.scheduler - INFO - Lost all workers
-2025-01-07 20:33:01,153 - distributed.nanny - INFO - Worker closed
-2025-01-07 20:33:01,559 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45355' closed.
-2025-01-07 20:33:01,560 - distributed.scheduler - INFO - Closing scheduler. Reason: unknown
-2025-01-07 20:33:01,560 - distributed.scheduler - INFO - Scheduler closing all comms
-2025-01-07 20:33:01,560 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Cluster creation timeout; retrying
+distributed.comm.core.CommClosedError: in <TLS (closed) Worker->Scheduler local=tls://127.0.0.1:37822 remote=tls://127.0.0.1:33105>: Stream is closed
+2026-02-11 05:49:00,931 - distributed.worker - INFO -         Registered to:      tls://127.0.0.1:33105
+2026-02-11 05:49:00,931 - distributed.worker - INFO - -------------------------------------------------
+2026-02-11 05:49:00,933 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:33105
+2026-02-11 05:49:00,933 - distributed.core - INFO - Connection to tls://127.0.0.1:33105 has been closed.
+2026-02-11 05:49:00,933 - distributed.worker - INFO - Stopping worker at tls://127.0.0.1:36841. Reason: worker-handle-scheduler-connection-broken
+2026-02-11 05:49:00,984 - distributed.nanny - INFO - Closing Nanny gracefully at 'tls://127.0.0.1:37585'. Reason: worker-handle-scheduler-connection-broken
+2026-02-11 05:49:00,987 - distributed.worker - INFO - Removing Worker plugin shuffle
+2026-02-11 05:49:01,013 - distributed.nanny - INFO - Worker closed
+2026-02-11 05:49:03,387 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37585'. Reason: nanny-close-gracefully
+2026-02-11 05:49:03,388 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37585' closed.
+2026-02-11 05:49:30,987 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:40567'. Reason: nanny-close
+2026-02-11 05:49:30,987 - distributed.nanny - INFO - Nanny asking worker to close. Reason: nanny-close
+2026-02-11 05:49:30,991 - distributed.worker - INFO - Stopping worker at tls://127.0.0.1:45521. Reason: nanny-close
+2026-02-11 05:49:30,992 - distributed.worker - INFO - Removing Worker plugin shuffle
+2026-02-11 05:49:30,993 - distributed.core - INFO - Connection to tls://127.0.0.1:33105 has been closed.
+2026-02-11 05:49:30,995 - distributed.core - INFO - Received 'close-stream' from tls://127.0.0.1:37812; closing.
+2026-02-11 05:49:30,995 - distributed.scheduler - INFO - Remove worker addr: tls://127.0.0.1:45521 name: 1 (stimulus_id='handle-worker-cleanup-1770738570.9957998')
+2026-02-11 05:49:30,996 - distributed.scheduler - INFO - Lost all workers
+2026-02-11 05:49:31,004 - distributed.nanny - INFO - Worker closed
+2026-02-11 05:49:31,632 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:40567' closed.
+2026-02-11 05:49:31,632 - distributed.scheduler - INFO - Closing scheduler. Reason: unknown
+2026-02-11 05:49:31,632 - distributed.scheduler - INFO - Scheduler closing all comms
+2026-02-11 05:49:31,632 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Cluster creation timeout; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
     s, ws = await start_cluster(
@@ -17828,17 +15170,17 @@
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 806, in start_cluster
     raise TimeoutError("Cluster creation timeout")
 TimeoutError: Cluster creation timeout
-2025-01-07 20:33:02,566 - distributed.scheduler - INFO - State start
-2025-01-07 20:33:02,571 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:34627
-2025-01-07 20:33:02,571 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:45053/status
-2025-01-07 20:33:02,571 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:33:02,588 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45127'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:02,588 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45127' closed.
-2025-01-07 20:33:02,588 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33925'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:02,588 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33925' closed.
-2025-01-07 20:33:02,594 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:34627': TLS handshake failed with remote 'tls://127.0.0.1:47624': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:02,594 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:34627': TLS handshake failed with remote 'tls://127.0.0.1:47628': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:02,594 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:49:32,637 - distributed.scheduler - INFO - State start
+2026-02-11 05:49:32,648 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:36737
+2026-02-11 05:49:32,649 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:40195/status
+2026-02-11 05:49:32,649 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:49:32,665 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:40125'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:32,666 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:40125' closed.
+2026-02-11 05:49:32,666 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41889'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:32,666 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41889' closed.
+2026-02-11 05:49:32,666 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:36737': TLS handshake failed with remote 'tls://127.0.0.1:44666': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:32,666 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:36737': TLS handshake failed with remote 'tls://127.0.0.1:44678': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:32,666 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -17907,17 +15249,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:33:03,611 - distributed.scheduler - INFO - State start
-2025-01-07 20:33:03,614 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:37349
-2025-01-07 20:33:03,615 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:34537/status
-2025-01-07 20:33:03,615 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:33:03,624 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:35669'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:03,624 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:35669' closed.
-2025-01-07 20:33:03,624 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:38217'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:03,624 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:38217' closed.
-2025-01-07 20:33:03,629 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37349': TLS handshake failed with remote 'tls://127.0.0.1:52246': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:03,629 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37349': TLS handshake failed with remote 'tls://127.0.0.1:52254': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:03,629 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:49:33,677 - distributed.scheduler - INFO - State start
+2026-02-11 05:49:33,686 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:44819
+2026-02-11 05:49:33,686 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:35481/status
+2026-02-11 05:49:33,686 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:49:33,703 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:42699'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:33,703 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:42699' closed.
+2026-02-11 05:49:33,703 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:42051'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:33,704 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:42051' closed.
+2026-02-11 05:49:33,704 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44819': TLS handshake failed with remote 'tls://127.0.0.1:46432': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:33,704 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44819': TLS handshake failed with remote 'tls://127.0.0.1:46436': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:33,704 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -17986,17 +15328,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:33:04,664 - distributed.scheduler - INFO - State start
-2025-01-07 20:33:04,672 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:36657
-2025-01-07 20:33:04,685 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:35959/status
-2025-01-07 20:33:04,685 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:33:04,710 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:46493'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:04,710 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:46493' closed.
-2025-01-07 20:33:04,710 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39979'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:04,710 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39979' closed.
-2025-01-07 20:33:04,711 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:36657': TLS handshake failed with remote 'tls://127.0.0.1:33748': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:04,711 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:36657': TLS handshake failed with remote 'tls://127.0.0.1:33758': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:04,711 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:49:34,714 - distributed.scheduler - INFO - State start
+2026-02-11 05:49:34,720 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:38459
+2026-02-11 05:49:34,720 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:46513/status
+2026-02-11 05:49:34,720 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:49:34,741 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:46043'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:34,741 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:46043' closed.
+2026-02-11 05:49:34,741 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:38887'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:34,741 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:38887' closed.
+2026-02-11 05:49:34,741 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:38459': TLS handshake failed with remote 'tls://127.0.0.1:34166': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:34,742 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:38459': TLS handshake failed with remote 'tls://127.0.0.1:34176': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:34,742 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -18065,17 +15407,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:33:05,726 - distributed.scheduler - INFO - State start
-2025-01-07 20:33:05,734 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:44487
-2025-01-07 20:33:05,734 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:36853/status
-2025-01-07 20:33:05,734 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:33:05,771 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:40607'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:05,772 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:40607' closed.
-2025-01-07 20:33:05,772 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41823'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:05,772 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41823' closed.
-2025-01-07 20:33:05,772 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44487': TLS handshake failed with remote 'tls://127.0.0.1:40232': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:05,777 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44487': TLS handshake failed with remote 'tls://127.0.0.1:40242': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:05,777 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:49:35,754 - distributed.scheduler - INFO - State start
+2026-02-11 05:49:35,757 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:38903
+2026-02-11 05:49:35,758 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:35501/status
+2026-02-11 05:49:35,758 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:49:35,778 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:35427'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:35,778 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:35427' closed.
+2026-02-11 05:49:35,778 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:35071'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:35,778 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:35071' closed.
+2026-02-11 05:49:35,783 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:38903': TLS handshake failed with remote 'tls://127.0.0.1:59432': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:35,783 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:38903': TLS handshake failed with remote 'tls://127.0.0.1:59440': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:35,783 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -18144,17 +15486,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:33:06,782 - distributed.scheduler - INFO - State start
-2025-01-07 20:33:06,798 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:38635
-2025-01-07 20:33:06,798 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:38381/status
-2025-01-07 20:33:06,798 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:33:06,842 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:40175'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:06,842 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:40175' closed.
-2025-01-07 20:33:06,842 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41299'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:06,843 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41299' closed.
-2025-01-07 20:33:06,845 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:38635': TLS handshake failed with remote 'tls://127.0.0.1:52088': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:06,846 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:38635': TLS handshake failed with remote 'tls://127.0.0.1:52090': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:06,846 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:49:36,792 - distributed.scheduler - INFO - State start
+2026-02-11 05:49:36,813 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:41817
+2026-02-11 05:49:36,814 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:36149/status
+2026-02-11 05:49:36,814 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:49:36,830 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:46215'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:36,830 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:46215' closed.
+2026-02-11 05:49:36,831 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:43291'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:36,831 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:43291' closed.
+2026-02-11 05:49:36,836 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41817': TLS handshake failed with remote 'tls://127.0.0.1:33818': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:36,836 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41817': TLS handshake failed with remote 'tls://127.0.0.1:33830': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:36,836 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -18223,17 +15565,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:33:07,855 - distributed.scheduler - INFO - State start
-2025-01-07 20:33:07,858 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:34131
-2025-01-07 20:33:07,859 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:35661/status
-2025-01-07 20:33:07,859 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:33:07,876 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39977'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:07,876 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39977' closed.
-2025-01-07 20:33:07,876 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:32943'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:07,876 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:32943' closed.
-2025-01-07 20:33:07,881 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:34131': TLS handshake failed with remote 'tls://127.0.0.1:47584': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:07,881 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:34131': TLS handshake failed with remote 'tls://127.0.0.1:47590': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:07,881 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:49:37,840 - distributed.scheduler - INFO - State start
+2026-02-11 05:49:37,848 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:41317
+2026-02-11 05:49:37,848 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:37749/status
+2026-02-11 05:49:37,848 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:49:37,865 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:34729'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:37,865 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:34729' closed.
+2026-02-11 05:49:37,865 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33059'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:37,865 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33059' closed.
+2026-02-11 05:49:37,866 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41317': TLS handshake failed with remote 'tls://127.0.0.1:42016': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:37,866 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41317': TLS handshake failed with remote 'tls://127.0.0.1:42032': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:37,866 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -18302,17 +15644,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:33:08,886 - distributed.scheduler - INFO - State start
-2025-01-07 20:33:08,895 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:40645
-2025-01-07 20:33:08,895 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:35259/status
-2025-01-07 20:33:08,896 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:33:08,917 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:42127'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:08,917 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:42127' closed.
-2025-01-07 20:33:08,917 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:36421'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:08,918 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:36421' closed.
-2025-01-07 20:33:08,918 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:40645': TLS handshake failed with remote 'tls://127.0.0.1:59632': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:08,918 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:40645': TLS handshake failed with remote 'tls://127.0.0.1:59638': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:08,918 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:49:38,876 - distributed.scheduler - INFO - State start
+2026-02-11 05:49:38,883 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:41879
+2026-02-11 05:49:38,883 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:32885/status
+2026-02-11 05:49:38,884 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:49:38,900 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41485'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:38,900 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41485' closed.
+2026-02-11 05:49:38,900 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:36859'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:38,900 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:36859' closed.
+2026-02-11 05:49:38,901 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41879': TLS handshake failed with remote 'tls://127.0.0.1:43934': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:38,901 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41879': TLS handshake failed with remote 'tls://127.0.0.1:43950': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:38,901 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -18381,17 +15723,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:33:09,926 - distributed.scheduler - INFO - State start
-2025-01-07 20:33:09,929 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:33721
-2025-01-07 20:33:09,930 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:36545/status
-2025-01-07 20:33:09,930 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:33:09,946 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41673'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:09,947 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41673' closed.
-2025-01-07 20:33:09,947 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:44575'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:09,947 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:44575' closed.
-2025-01-07 20:33:09,947 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33721': TLS handshake failed with remote 'tls://127.0.0.1:46876': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:09,947 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33721': TLS handshake failed with remote 'tls://127.0.0.1:46888': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:09,947 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:49:39,917 - distributed.scheduler - INFO - State start
+2026-02-11 05:49:39,924 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:35919
+2026-02-11 05:49:39,925 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:40067/status
+2026-02-11 05:49:39,925 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:49:39,938 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:38619'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:39,938 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:38619' closed.
+2026-02-11 05:49:39,939 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:42723'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:39,939 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:42723' closed.
+2026-02-11 05:49:39,947 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:35919': TLS handshake failed with remote 'tls://127.0.0.1:47168': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:39,948 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:35919': TLS handshake failed with remote 'tls://127.0.0.1:47176': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:39,948 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -18460,17 +15802,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:33:10,962 - distributed.scheduler - INFO - State start
-2025-01-07 20:33:10,976 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:35555
-2025-01-07 20:33:10,976 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:39401/status
-2025-01-07 20:33:10,985 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:33:11,010 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33963'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:11,010 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33963' closed.
-2025-01-07 20:33:11,011 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39863'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:11,011 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39863' closed.
-2025-01-07 20:33:11,011 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:35555': TLS handshake failed with remote 'tls://127.0.0.1:39704': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:11,011 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:35555': TLS handshake failed with remote 'tls://127.0.0.1:39710': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:11,011 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:49:40,953 - distributed.scheduler - INFO - State start
+2026-02-11 05:49:40,960 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:35547
+2026-02-11 05:49:40,961 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:39005/status
+2026-02-11 05:49:40,961 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:49:40,984 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:40809'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:40,984 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:40809' closed.
+2026-02-11 05:49:40,984 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:40521'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:40,984 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:40521' closed.
+2026-02-11 05:49:40,985 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:35547': TLS handshake failed with remote 'tls://127.0.0.1:50298': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:40,985 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:35547': TLS handshake failed with remote 'tls://127.0.0.1:50314': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:40,985 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -18539,17 +15881,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:33:12,038 - distributed.scheduler - INFO - State start
-2025-01-07 20:33:12,042 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:41443
-2025-01-07 20:33:12,042 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:35063/status
-2025-01-07 20:33:12,042 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:33:12,059 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41747'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:12,059 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41747' closed.
-2025-01-07 20:33:12,059 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45139'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:12,060 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45139' closed.
-2025-01-07 20:33:12,060 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41443': TLS handshake failed with remote 'tls://127.0.0.1:44692': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:12,060 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41443': TLS handshake failed with remote 'tls://127.0.0.1:44704': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:12,060 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:49:41,997 - distributed.scheduler - INFO - State start
+2026-02-11 05:49:42,000 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:46093
+2026-02-11 05:49:42,000 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:46481/status
+2026-02-11 05:49:42,001 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:49:42,017 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45689'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:42,018 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45689' closed.
+2026-02-11 05:49:42,018 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:43625'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:42,018 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:43625' closed.
+2026-02-11 05:49:42,018 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:46093': TLS handshake failed with remote 'tls://127.0.0.1:57810': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:42,018 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:46093': TLS handshake failed with remote 'tls://127.0.0.1:57822': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:42,018 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -18618,17 +15960,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:33:13,074 - distributed.scheduler - INFO - State start
-2025-01-07 20:33:13,083 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:35729
-2025-01-07 20:33:13,083 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:38815/status
-2025-01-07 20:33:13,084 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:33:13,103 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37555'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:13,103 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37555' closed.
-2025-01-07 20:33:13,103 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:43091'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:13,103 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:43091' closed.
-2025-01-07 20:33:13,104 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:35729': TLS handshake failed with remote 'tls://127.0.0.1:37614': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:13,104 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:35729': TLS handshake failed with remote 'tls://127.0.0.1:37622': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:13,104 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:49:43,032 - distributed.scheduler - INFO - State start
+2026-02-11 05:49:43,040 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:45055
+2026-02-11 05:49:43,040 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:41511/status
+2026-02-11 05:49:43,041 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:49:43,060 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39879'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:43,060 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39879' closed.
+2026-02-11 05:49:43,060 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:36849'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:43,060 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:36849' closed.
+2026-02-11 05:49:43,062 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:45055': TLS handshake failed with remote 'tls://127.0.0.1:33630': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:43,062 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:45055': TLS handshake failed with remote 'tls://127.0.0.1:33636': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:43,062 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -18697,17 +16039,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:33:14,115 - distributed.scheduler - INFO - State start
-2025-01-07 20:33:14,130 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:33487
-2025-01-07 20:33:14,130 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:36705/status
-2025-01-07 20:33:14,131 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:33:14,164 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39285'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:14,181 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39285' closed.
-2025-01-07 20:33:14,181 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:46743'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:14,181 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:46743' closed.
-2025-01-07 20:33:14,182 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33487': TLS handshake failed with remote 'tls://127.0.0.1:39316': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:14,182 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33487': TLS handshake failed with remote 'tls://127.0.0.1:39324': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:14,182 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:49:44,070 - distributed.scheduler - INFO - State start
+2026-02-11 05:49:44,076 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:44101
+2026-02-11 05:49:44,077 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:41703/status
+2026-02-11 05:49:44,077 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:49:44,092 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:34331'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:44,093 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:34331' closed.
+2026-02-11 05:49:44,093 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:36457'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:44,093 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:36457' closed.
+2026-02-11 05:49:44,094 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44101': TLS handshake failed with remote 'tls://127.0.0.1:49122': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:44,094 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44101': TLS handshake failed with remote 'tls://127.0.0.1:49126': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:44,094 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -18776,17 +16118,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:33:15,186 - distributed.scheduler - INFO - State start
-2025-01-07 20:33:15,199 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:43531
-2025-01-07 20:33:15,200 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:43357/status
-2025-01-07 20:33:15,200 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:33:15,242 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39341'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:15,242 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39341' closed.
-2025-01-07 20:33:15,242 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:40169'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:15,242 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:40169' closed.
-2025-01-07 20:33:15,244 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43531': TLS handshake failed with remote 'tls://127.0.0.1:59170': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:15,244 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43531': TLS handshake failed with remote 'tls://127.0.0.1:59186': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:15,244 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:49:45,102 - distributed.scheduler - INFO - State start
+2026-02-11 05:49:45,109 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:42595
+2026-02-11 05:49:45,109 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:43067/status
+2026-02-11 05:49:45,110 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:49:45,127 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:44381'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:45,127 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:44381' closed.
+2026-02-11 05:49:45,127 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:44353'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:45,127 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:44353' closed.
+2026-02-11 05:49:45,128 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:42595': TLS handshake failed with remote 'tls://127.0.0.1:42322': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:45,129 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:42595': TLS handshake failed with remote 'tls://127.0.0.1:42338': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:45,129 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -18855,17 +16197,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:33:16,262 - distributed.scheduler - INFO - State start
-2025-01-07 20:33:16,282 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:36501
-2025-01-07 20:33:16,282 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:41159/status
-2025-01-07 20:33:16,282 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:33:16,324 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:35583'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:16,333 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:35583' closed.
-2025-01-07 20:33:16,333 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:32951'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:16,333 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:32951' closed.
-2025-01-07 20:33:16,333 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:36501': TLS handshake failed with remote 'tls://127.0.0.1:55224': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:16,334 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:36501': TLS handshake failed with remote 'tls://127.0.0.1:55226': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:16,334 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:49:46,139 - distributed.scheduler - INFO - State start
+2026-02-11 05:49:46,145 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:40681
+2026-02-11 05:49:46,146 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:43651/status
+2026-02-11 05:49:46,146 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:49:46,163 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:35989'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:46,163 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:35989' closed.
+2026-02-11 05:49:46,164 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:35273'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:46,164 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:35273' closed.
+2026-02-11 05:49:46,165 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:40681': TLS handshake failed with remote 'tls://127.0.0.1:45994': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:46,165 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:40681': TLS handshake failed with remote 'tls://127.0.0.1:45996': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:46,165 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -18934,17 +16276,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:33:17,338 - distributed.scheduler - INFO - State start
-2025-01-07 20:33:17,348 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:37595
-2025-01-07 20:33:17,348 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:46103/status
-2025-01-07 20:33:17,348 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:33:17,379 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37403'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:17,379 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37403' closed.
-2025-01-07 20:33:17,379 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:38805'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:17,379 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:38805' closed.
-2025-01-07 20:33:17,380 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37595': TLS handshake failed with remote 'tls://127.0.0.1:49494': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:17,380 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37595': TLS handshake failed with remote 'tls://127.0.0.1:49510': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:17,380 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:49:47,175 - distributed.scheduler - INFO - State start
+2026-02-11 05:49:47,181 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:40483
+2026-02-11 05:49:47,181 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:46663/status
+2026-02-11 05:49:47,182 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:49:47,201 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:46031'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:47,201 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:46031' closed.
+2026-02-11 05:49:47,201 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:42705'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:47,201 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:42705' closed.
+2026-02-11 05:49:47,202 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:40483': TLS handshake failed with remote 'tls://127.0.0.1:55088': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:47,202 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:40483': TLS handshake failed with remote 'tls://127.0.0.1:55094': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:47,203 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -19013,17 +16355,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:33:18,394 - distributed.scheduler - INFO - State start
-2025-01-07 20:33:18,398 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:44023
-2025-01-07 20:33:18,398 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:37955/status
-2025-01-07 20:33:18,398 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:33:18,424 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:40203'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:18,424 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:40203' closed.
-2025-01-07 20:33:18,424 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33333'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:18,431 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33333' closed.
-2025-01-07 20:33:18,432 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44023': TLS handshake failed with remote 'tls://127.0.0.1:60076': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:18,432 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44023': TLS handshake failed with remote 'tls://127.0.0.1:60078': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:18,432 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:49:48,211 - distributed.scheduler - INFO - State start
+2026-02-11 05:49:48,217 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:38109
+2026-02-11 05:49:48,218 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:37267/status
+2026-02-11 05:49:48,218 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:49:48,235 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33175'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:48,236 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33175' closed.
+2026-02-11 05:49:48,236 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45659'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:48,236 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45659' closed.
+2026-02-11 05:49:48,237 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:38109': TLS handshake failed with remote 'tls://127.0.0.1:59586': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:48,237 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:38109': TLS handshake failed with remote 'tls://127.0.0.1:59596': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:48,237 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -19092,17 +16434,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:33:19,442 - distributed.scheduler - INFO - State start
-2025-01-07 20:33:19,447 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:42061
-2025-01-07 20:33:19,447 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:46319/status
-2025-01-07 20:33:19,447 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:33:19,473 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37899'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:19,474 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37899' closed.
-2025-01-07 20:33:19,474 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39107'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:19,474 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39107' closed.
-2025-01-07 20:33:19,474 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:42061': TLS handshake failed with remote 'tls://127.0.0.1:46212': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:19,474 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:42061': TLS handshake failed with remote 'tls://127.0.0.1:46216': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:19,474 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:49:49,245 - distributed.scheduler - INFO - State start
+2026-02-11 05:49:49,252 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:39431
+2026-02-11 05:49:49,253 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:44689/status
+2026-02-11 05:49:49,253 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:49:49,276 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37061'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:49,276 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37061' closed.
+2026-02-11 05:49:49,276 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:40287'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:49,276 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:40287' closed.
+2026-02-11 05:49:49,277 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39431': TLS handshake failed with remote 'tls://127.0.0.1:37918': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:49,277 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39431': TLS handshake failed with remote 'tls://127.0.0.1:37930': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:49,277 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -19171,17 +16513,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:33:20,490 - distributed.scheduler - INFO - State start
-2025-01-07 20:33:20,501 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:33467
-2025-01-07 20:33:20,502 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:34137/status
-2025-01-07 20:33:20,503 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:33:20,528 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41973'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:20,528 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41973' closed.
-2025-01-07 20:33:20,528 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39683'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:20,537 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39683' closed.
-2025-01-07 20:33:20,537 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33467': TLS handshake failed with remote 'tls://127.0.0.1:60344': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:20,537 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33467': TLS handshake failed with remote 'tls://127.0.0.1:60346': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:20,538 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:49:50,289 - distributed.scheduler - INFO - State start
+2026-02-11 05:49:50,292 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:35629
+2026-02-11 05:49:50,293 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:46555/status
+2026-02-11 05:49:50,293 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:49:50,310 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:42425'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:50,310 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:42425' closed.
+2026-02-11 05:49:50,310 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:34075'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:50,310 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:34075' closed.
+2026-02-11 05:49:50,319 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:35629': TLS handshake failed with remote 'tls://127.0.0.1:46756': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:50,319 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:35629': TLS handshake failed with remote 'tls://127.0.0.1:46760': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:50,320 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -19250,17 +16592,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:33:21,546 - distributed.scheduler - INFO - State start
-2025-01-07 20:33:21,560 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:39041
-2025-01-07 20:33:21,565 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:43233/status
-2025-01-07 20:33:21,566 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:33:21,584 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:43569'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:21,589 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:43569' closed.
-2025-01-07 20:33:21,589 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45191'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:21,589 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45191' closed.
-2025-01-07 20:33:21,590 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39041': TLS handshake failed with remote 'tls://127.0.0.1:39206': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:21,590 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39041': TLS handshake failed with remote 'tls://127.0.0.1:39208': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:21,590 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:49:51,325 - distributed.scheduler - INFO - State start
+2026-02-11 05:49:51,332 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:40043
+2026-02-11 05:49:51,333 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:43899/status
+2026-02-11 05:49:51,333 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:49:51,351 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:34965'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:51,352 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:34965' closed.
+2026-02-11 05:49:51,352 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45033'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:51,352 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45033' closed.
+2026-02-11 05:49:51,355 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:40043': TLS handshake failed with remote 'tls://127.0.0.1:42948': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:51,355 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:40043': TLS handshake failed with remote 'tls://127.0.0.1:42958': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:51,355 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -19329,17 +16671,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:33:22,596 - distributed.scheduler - INFO - State start
-2025-01-07 20:33:22,603 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:43989
-2025-01-07 20:33:22,604 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:36209/status
-2025-01-07 20:33:22,604 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:33:22,646 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:34045'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:22,646 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:34045' closed.
-2025-01-07 20:33:22,646 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:34909'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:22,646 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:34909' closed.
-2025-01-07 20:33:22,647 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43989': TLS handshake failed with remote 'tls://127.0.0.1:60976': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:22,647 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43989': TLS handshake failed with remote 'tls://127.0.0.1:60990': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:22,648 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:49:52,364 - distributed.scheduler - INFO - State start
+2026-02-11 05:49:52,371 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:37619
+2026-02-11 05:49:52,371 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:36337/status
+2026-02-11 05:49:52,372 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:49:52,388 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:38643'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:52,388 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:38643' closed.
+2026-02-11 05:49:52,388 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33813'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:52,389 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33813' closed.
+2026-02-11 05:49:52,391 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37619': TLS handshake failed with remote 'tls://127.0.0.1:51520': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:52,392 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37619': TLS handshake failed with remote 'tls://127.0.0.1:51530': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:52,392 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -19408,17 +16750,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:33:23,662 - distributed.scheduler - INFO - State start
-2025-01-07 20:33:23,680 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:46849
-2025-01-07 20:33:23,680 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:35489/status
-2025-01-07 20:33:23,680 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:33:23,704 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41529'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:23,704 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41529' closed.
-2025-01-07 20:33:23,704 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:46419'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:23,704 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:46419' closed.
-2025-01-07 20:33:23,723 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:46849': TLS handshake failed with remote 'tls://127.0.0.1:52862': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:23,724 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:46849': TLS handshake failed with remote 'tls://127.0.0.1:52866': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:23,724 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:49:53,401 - distributed.scheduler - INFO - State start
+2026-02-11 05:49:53,408 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:37887
+2026-02-11 05:49:53,408 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:38731/status
+2026-02-11 05:49:53,409 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:49:53,427 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41319'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:53,427 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41319' closed.
+2026-02-11 05:49:53,427 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:40893'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:53,427 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:40893' closed.
+2026-02-11 05:49:53,430 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37887': TLS handshake failed with remote 'tls://127.0.0.1:40730': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:53,430 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37887': TLS handshake failed with remote 'tls://127.0.0.1:40738': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:53,430 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -19487,17 +16829,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:33:24,730 - distributed.scheduler - INFO - State start
-2025-01-07 20:33:24,738 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:45805
-2025-01-07 20:33:24,738 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:46493/status
-2025-01-07 20:33:24,739 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:33:24,773 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41223'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:24,774 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41223' closed.
-2025-01-07 20:33:24,774 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:34991'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:24,774 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:34991' closed.
-2025-01-07 20:33:24,774 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:45805': TLS handshake failed with remote 'tls://127.0.0.1:46760': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:24,774 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:45805': TLS handshake failed with remote 'tls://127.0.0.1:46764': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:24,775 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:49:54,441 - distributed.scheduler - INFO - State start
+2026-02-11 05:49:54,444 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:46805
+2026-02-11 05:49:54,444 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:44209/status
+2026-02-11 05:49:54,445 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:49:54,470 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:42673'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:54,471 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:42673' closed.
+2026-02-11 05:49:54,487 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:42269'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:54,487 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:42269' closed.
+2026-02-11 05:49:54,488 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:46805': TLS handshake failed with remote 'tls://127.0.0.1:41594': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:54,488 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:46805': TLS handshake failed with remote 'tls://127.0.0.1:41610': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:54,488 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -19566,17 +16908,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:33:25,787 - distributed.scheduler - INFO - State start
-2025-01-07 20:33:25,796 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:37907
-2025-01-07 20:33:25,796 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:38839/status
-2025-01-07 20:33:25,806 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:33:25,828 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:38119'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:25,837 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:38119' closed.
-2025-01-07 20:33:25,837 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:43919'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:25,837 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:43919' closed.
-2025-01-07 20:33:25,838 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37907': TLS handshake failed with remote 'tls://127.0.0.1:36918': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:25,838 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37907': TLS handshake failed with remote 'tls://127.0.0.1:36934': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:25,838 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:49:55,497 - distributed.scheduler - INFO - State start
+2026-02-11 05:49:55,500 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:43975
+2026-02-11 05:49:55,500 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:37115/status
+2026-02-11 05:49:55,501 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:49:55,524 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:36871'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:55,524 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:36871' closed.
+2026-02-11 05:49:55,524 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39883'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:55,524 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39883' closed.
+2026-02-11 05:49:55,525 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43975': TLS handshake failed with remote 'tls://127.0.0.1:44138': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:55,525 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43975': TLS handshake failed with remote 'tls://127.0.0.1:44144': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:55,525 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -19645,17 +16987,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:33:26,845 - distributed.scheduler - INFO - State start
-2025-01-07 20:33:26,853 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:32997
-2025-01-07 20:33:26,853 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:38489/status
-2025-01-07 20:33:26,853 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:33:26,877 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:38611'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:26,877 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:38611' closed.
-2025-01-07 20:33:26,877 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39061'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:26,877 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39061' closed.
-2025-01-07 20:33:26,878 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:32997': TLS handshake failed with remote 'tls://127.0.0.1:39506': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:26,878 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:32997': TLS handshake failed with remote 'tls://127.0.0.1:39510': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:26,878 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:49:56,541 - distributed.scheduler - INFO - State start
+2026-02-11 05:49:56,544 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:41807
+2026-02-11 05:49:56,544 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:41165/status
+2026-02-11 05:49:56,545 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:49:56,567 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33403'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:56,568 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33403' closed.
+2026-02-11 05:49:56,568 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:34139'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:56,568 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:34139' closed.
+2026-02-11 05:49:56,569 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41807': TLS handshake failed with remote 'tls://127.0.0.1:45436': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:56,569 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41807': TLS handshake failed with remote 'tls://127.0.0.1:45444': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:56,569 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -19724,17 +17066,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:33:27,887 - distributed.scheduler - INFO - State start
-2025-01-07 20:33:27,891 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:39119
-2025-01-07 20:33:27,892 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:33377/status
-2025-01-07 20:33:27,892 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:33:27,915 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37749'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:27,915 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37749' closed.
-2025-01-07 20:33:27,916 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37581'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:27,916 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37581' closed.
-2025-01-07 20:33:27,916 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39119': TLS handshake failed with remote 'tls://127.0.0.1:54334': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:27,916 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39119': TLS handshake failed with remote 'tls://127.0.0.1:54344': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:27,916 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:49:57,582 - distributed.scheduler - INFO - State start
+2026-02-11 05:49:57,590 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:43277
+2026-02-11 05:49:57,590 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:46365/status
+2026-02-11 05:49:57,590 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:49:57,613 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:38081'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:57,613 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:38081' closed.
+2026-02-11 05:49:57,613 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:42997'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:57,613 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:42997' closed.
+2026-02-11 05:49:57,619 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43277': TLS handshake failed with remote 'tls://127.0.0.1:51256': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:57,619 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43277': TLS handshake failed with remote 'tls://127.0.0.1:51264': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:57,619 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -19803,17 +17145,17 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:33:28,926 - distributed.scheduler - INFO - State start
-2025-01-07 20:33:28,942 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:40993
-2025-01-07 20:33:28,943 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:35833/status
-2025-01-07 20:33:28,943 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:33:28,971 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37419'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:28,971 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37419' closed.
-2025-01-07 20:33:28,971 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41617'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:33:28,971 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41617' closed.
-2025-01-07 20:33:28,972 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:40993': TLS handshake failed with remote 'tls://127.0.0.1:45782': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:28,972 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:40993': TLS handshake failed with remote 'tls://127.0.0.1:45786': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:33:28,972 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
+2026-02-11 05:49:58,625 - distributed.scheduler - INFO - State start
+2026-02-11 05:49:58,633 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:44443
+2026-02-11 05:49:58,633 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:41119/status
+2026-02-11 05:49:58,633 - distributed.scheduler - INFO - Registering Worker plugin shuffle
+2026-02-11 05:49:58,656 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45655'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:58,656 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45655' closed.
+2026-02-11 05:49:58,657 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:42731'. Reason: failure-to-start-<class 'TimeoutError'>
+2026-02-11 05:49:58,657 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:42731' closed.
+2026-02-11 05:49:58,658 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44443': TLS handshake failed with remote 'tls://127.0.0.1:41744': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:58,659 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44443': TLS handshake failed with remote 'tls://127.0.0.1:41760': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
+2026-02-11 05:49:58,659 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
 Traceback (most recent call last):
   File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
     return await fut
@@ -19882,584 +17224,16 @@
         f"{type(self).__name__} start timed out after {timeout}s."
     ) from exc
 TimeoutError: Nanny start timed out after 0s.
-____________________________ test_log_remove_worker ____________________________
-
-c = <Client: No scheduler connected>
-s = <Scheduler 'tcp://127.0.0.1:46071', workers: 0, cores: 0, tasks: 0>
-a = <Worker 'tcp://127.0.0.1:38021', name: 0, status: closed, stored: 2, running: 1/1, ready: 0, comm: 0, waiting: 0>
-b = <Worker 'tcp://127.0.0.1:43361', name: 1, status: closed, stored: 2, running: 1/2, ready: 0, comm: 0, waiting: 0>
-
-    @gen_cluster(client=True)
-    async def test_log_remove_worker(c, s, a, b):
-        # Computed task
-        x = c.submit(inc, 1, key="x", workers=a.address)
-        await x
-        ev = Event()
-        # Processing task
-        y = c.submit(
-            lambda ev: ev.wait(), ev, key="y", workers=a.address, allow_other_workers=True
-        )
-        await wait_for_state("y", "processing", s)
-        # Scattered task
-        z = await c.scatter({"z": 3}, workers=a.address)
-    
-        s._broker.truncate()
-    
-        with captured_logger("distributed.scheduler", level=logging.INFO) as log:
-            # Successful graceful shutdown
-            await s.retire_workers([a.address], stimulus_id="graceful")
-            # Refuse to retire gracefully as there's nowhere to put x and z
-            await s.retire_workers([b.address], stimulus_id="graceful_abort")
-            await asyncio.sleep(0.2)
-            # Ungraceful shutdown
-            await s.remove_worker(b.address, stimulus_id="ungraceful")
-            await asyncio.sleep(0.2)
-        await ev.set()
-    
-        assert log.getvalue().splitlines() == [
-            # Successful graceful
-            f"Retire worker addresses (stimulus_id='graceful') ['{a.address}']",
-            f"Remove worker addr: {a.address} name: {a.name} (stimulus_id='graceful')",
-            f"Retired worker '{a.address}' (stimulus_id='graceful')",
-            # Aborted graceful
-            f"Retire worker addresses (stimulus_id='graceful_abort') ['{b.address}']",
-            f"Could not retire worker '{b.address}': unique data could not be "
-            "moved to any other worker (stimulus_id='graceful_abort')",
-            # Ungraceful
-            f"Remove worker addr: {b.address} name: {b.name} (stimulus_id='ungraceful')",
-            f"Removing worker '{b.address}' caused the cluster to lose already "
-            "computed task(s), which will be recomputed elsewhere: {'x'} "
-            "(stimulus_id='ungraceful')",
-            f"Removing worker '{b.address}' caused the cluster to lose scattered "
-            "data, which can't be recovered: {'z'} (stimulus_id='ungraceful')",
-            "Lost all workers",
-        ]
-    
-        events = {topic: [ev for _, ev in evs] for topic, evs in s.get_events().items()}
-        for evs in events.values():
-            for ev in evs:
-                if ev.get("action", None) == "retire-workers":
-                    for k in ("retired", "could-not-retire"):
-                        ev[k] = {addr: "snip" for addr in ev[k]}
-                if "stimulus_id" in ev:  # Strip timestamp
-                    ev["stimulus_id"] = ev["stimulus_id"].rsplit("-", 1)[0]
-    
->       assert events == {
-            a.address: [
-                {
-                    "action": "worker-status-change",
-                    "prev-status": "running",
-                    "status": "closing_gracefully",
-                    "stimulus_id": "graceful",
-                },
-                {
-                    "action": "remove-worker",
-                    "lost-computed-tasks": set(),
-                    "lost-scattered-tasks": set(),
-                    "processing-tasks": {"y"},
-                    "expected": True,
-                    "stimulus_id": "graceful",
-                },
-                {"action": "retired", "stimulus_id": "graceful"},
-            ],
-            b.address: [
-                {
-                    "action": "worker-status-change",
-                    "prev-status": "running",
-                    "status": "closing_gracefully",
-                    "stimulus_id": "graceful_abort",
-                },
-                {"action": "could-not-retire", "stimulus_id": "graceful_abort"},
-                {
-                    "action": "worker-status-change",
-                    "prev-status": "closing_gracefully",
-                    "status": "running",
-                    "stimulus_id": "worker-status-change",
-                },
-                {
-                    "action": "remove-worker",
-                    "lost-computed-tasks": {"x"},
-                    "lost-scattered-tasks": {"z"},
-                    "processing-tasks": {"y"},
-                    "expected": False,
-                    "stimulus_id": "ungraceful",
-                },
-                {"action": "closing-worker", "reason": "scheduler-remove-worker"},
-            ],
-            "all": [
-                {
-                    "action": "remove-worker",
-                    "lost-computed-tasks": set(),
-                    "lost-scattered-tasks": set(),
-                    "processing-tasks": {"y"},
-                    "expected": True,
-                    "stimulus_id": "graceful",
-                    "worker": a.address,
-                },
-                {
-                    "action": "retire-workers",
-                    "stimulus_id": "graceful",
-                    "retired": {a.address: "snip"},
-                    "could-not-retire": {},
-                },
-                {
-                    "action": "retire-workers",
-                    "stimulus_id": "graceful_abort",
-                    "retired": {},
-                    "could-not-retire": {b.address: "snip"},
-                },
-                {
-                    "action": "remove-worker",
-                    "lost-computed-tasks": {"x"},
-                    "lost-scattered-tasks": {"z"},
-                    "processing-tasks": {"y"},
-                    "expected": False,
-                    "stimulus_id": "ungraceful",
-                    "worker": b.address,
-                },
-            ],
-            "worker-get-client": [{"client": c.id, "timeout": 5, "worker": b.address}],
-        }
-E       AssertionError: assert {'tcp://127.0.0.1:38021': [{'action': 'worker-status-change', 'prev-status': 'running', 'status': 'closing_gracefully', 'stimulus_id': 'graceful'}, {'action': 'remove-worker', 'processing-tasks': {'y'}, 'lost-computed-tasks': set(), 'lost-scattered-tasks': set(), 'stimulus_id': 'graceful', 'expected': True}, {'action': 'retired', 'stimulus_id': 'graceful'}], 'all': [{'action': 'remove-worker', 'processing-tasks': {'y'}, 'lost-computed-tasks': set(), 'lost-scattered-tasks': set(), 'stimulus_id': 'graceful', 'expected': True, 'worker': 'tcp://127.0.0.1:38021'}, {'action': 'retire-workers', 'retired': {'tcp://127.0.0.1:38021': 'snip'}, 'could-not-retire': {}, 'stimulus_id': 'graceful'}, {'action': 'retire-workers', 'retired': {}, 'could-not-retire': {'tcp://127.0.0.1:43361': 'snip'}, 'stimulus_id': 'graceful_abort'}, {'action': 'remove-worker', 'processing-tasks': {'y'}, 'lost-computed-tasks': {'x'}, 'lost-scattered-tasks': {'z'}, 'stimulus_id': 'ungraceful', 'expected': False, 'worker': 'tcp://127.0.0.1:43361'}], 'tcp://127.0.0.1:43361': [{'action': 'worker-status-change', 'prev-status': 'running', 'status': 'closing_gracefully', 'stimulus_id': 'graceful_abort'}, {'action': 'could-not-retire', 'stimulus_id': 'graceful_abort'}, {'action': 'worker-status-change', 'prev-status': 'closing_gracefully', 'status': 'running', 'stimulus_id': 'worker-status-change'}, {'action': 'remove-worker', 'processing-tasks': {'y'}, 'lost-computed-tasks': {'x'}, 'lost-scattered-tasks': {'z'}, 'stimulus_id': 'ungraceful', 'expected': False}, {'action': 'closing-worker', 'reason': 'scheduler-remove-worker'}], 'worker-get-client': [{'client': 'Client-41ca24e9-cd9b-11ef-8fa2-a95899f3b377', 'timeout': 5, 'worker': 'tcp://127.0.0.1:38021'}, {'client': 'Client-41ca24e9-cd9b-11ef-8fa2-a95899f3b377', 'timeout': 5, 'worker': 'tcp://127.0.0.1:43361'}]} == {'tcp://127.0.0.1:38021': [{'action': 'worker-status-change', 'prev-status': 'running', 'status': 'closing_gracefully', 'stimulus_id': 'graceful'}, {'action': 'remove-worker', 'lost-computed-tasks': set(), 'lost-scattered-tasks': set(), 'processing-tasks': {'y'}, 'expected': True, 'stimulus_id': 'graceful'}, {'action': 'retired', 'stimulus_id': 'graceful'}], 'tcp://127.0.0.1:43361': [{'action': 'worker-status-change', 'prev-status': 'running', 'status': 'closing_gracefully', 'stimulus_id': 'graceful_abort'}, {'action': 'could-not-retire', 'stimulus_id': 'graceful_abort'}, {'action': 'worker-status-change', 'prev-status': 'closing_gracefully', 'status': 'running', 'stimulus_id': 'worker-status-change'}, {'action': 'remove-worker', 'lost-computed-tasks': {'x'}, 'lost-scattered-tasks': {'z'}, 'processing-tasks': {'y'}, 'expected': False, 'stimulus_id': 'ungraceful'}, {'action': 'closing-worker', 'reason': 'scheduler-remove-worker'}], 'all': [{'action': 'remove-worker', 'lost-computed-tasks': set(), 'lost-scattered-tasks': set(), 'processing-tasks': {'y'}, 'expected': True, 'stimulus_id': 'graceful', 'worker': 'tcp://127.0.0.1:38021'}, {'action': 'retire-workers', 'stimulus_id': 'graceful', 'retired': {'tcp://127.0.0.1:38021': 'snip'}, 'could-not-retire': {}}, {'action': 'retire-workers', 'stimulus_id': 'graceful_abort', 'retired': {}, 'could-not-retire': {'tcp://127.0.0.1:43361': 'snip'}}, {'action': 'remove-worker', 'lost-computed-tasks': {'x'}, 'lost-scattered-tasks': {'z'}, 'processing-tasks': {'y'}, 'expected': False, 'stimulus_id': 'ungraceful', 'worker': 'tcp://127.0.0.1:43361'}], 'worker-get-client': [{'client': 'Client-41ca24e9-cd9b-11ef-8fa2-a95899f3b377', 'timeout': 5, 'worker': 'tcp://127.0.0.1:43361'}]}
-E         
-E         Common items:
-E         {'all': [{'action': 'remove-worker',
-E                   'expected': True,
-E                   'lost-computed-tasks': set(),
-E                   'lost-scattered-tasks': set(),
-E                   'processing-tasks': {'y'},
-E                   'stimulus_id': 'graceful',
-E                   'worker': 'tcp://127.0.0.1:38021'},
-E                  {'action': 'retire-workers',
-E                   'could-not-retire': {},
-E                   'retired': {'tcp://127.0.0.1:38021': 'snip'},
-E                   'stimulus_id': 'graceful'},
-E                  {'action': 'retire-workers',
-E                   'could-not-retire': {'tcp://127.0.0.1:43361': 'snip'},
-E                   'retired': {},
-E                   'stimulus_id': 'graceful_abort'},
-E                  {'action': 'remove-worker',
-E                   'expected': False,
-E                   'lost-computed-tasks': {'x'},
-E                   'lost-scattered-tasks': {'z'},
-E                   'processing-tasks': {'y'},
-E                   'stimulus_id': 'ungraceful',
-E                   'worker': 'tcp://127.0.0.1:43361'}],
-E          'tcp://127.0.0.1:38021': [{'action': 'worker-status-change',
-E                                     'prev-status': 'running',
-E                                     'status': 'closing_gracefully',
-E                                     'stimulus_id': 'graceful'},
-E                                    {'action': 'remove-worker',
-E                                     'expected': True,
-E                                     'lost-computed-tasks': set(),
-E                                     'lost-scattered-tasks': set(),
-E                                     'processing-tasks': {'y'},
-E                                     'stimulus_id': 'graceful'},
-E                                    {'action': 'retired', 'stimulus_id': 'graceful'}],
-E          'tcp://127.0.0.1:43361': [{'action': 'worker-status-change',
-E                                     'prev-status': 'running',
-E                                     'status': 'closing_gracefully',
-E                                     'stimulus_id': 'graceful_abort'},
-E                                    {'action': 'could-not-retire',
-E                                     'stimulus_id': 'graceful_abort'},
-E                                    {'action': 'worker-status-change',
-E                                     'prev-status': 'closing_gracefully',
-E                                     'status': 'running',
-E                                     'stimulus_id': 'worker-status-change'},
-E                                    {'action': 'remove-worker',
-E                                     'expected': False,
-E                                     'lost-computed-tasks': {'x'},
-E                                     'lost-scattered-tasks': {'z'},
-E                                     'processing-tasks': {'y'},
-E                                     'stimulus_id': 'ungraceful'},
-E                                    {'action': 'closing-worker',
-E                                     'reason': 'scheduler-remove-worker'}]}
-E         Differing items:
-E         {'worker-get-client': [{'client': 'Client-41ca24e9-cd9b-11ef-8fa2-a95899f3b377', 'timeout': 5, 'worker': 'tcp://127.0.0.1:38021'}, {'client': 'Client-41ca24e9-cd9b-11ef-8fa2-a95899f3b377', 'timeout': 5, 'worker': 'tcp://127.0.0.1:43361'}]} != {'worker-get-client': [{'client': 'Client-41ca24e9-cd9b-11ef-8fa2-a95899f3b377', 'timeout': 5, 'worker': 'tcp://127.0.0.1:43361'}]}
-E         
-E         Full diff:
-E           {
-E               'all': [
-E                   {
-E                       'action': 'remove-worker',
-E                       'expected': True,
-E                       'lost-computed-tasks': set(),
-E                       'lost-scattered-tasks': set(),
-E                       'processing-tasks': {
-E                           'y',
-E                       },
-E                       'stimulus_id': 'graceful',
-E                       'worker': 'tcp://127.0.0.1:38021',
-E                   },
-E                   {
-E                       'action': 'retire-workers',
-E                       'could-not-retire': {},
-E                       'retired': {
-E                           'tcp://127.0.0.1:38021': 'snip',
-E                       },
-E                       'stimulus_id': 'graceful',
-E                   },
-E                   {
-E                       'action': 'retire-workers',
-E                       'could-not-retire': {
-E                           'tcp://127.0.0.1:43361': 'snip',
-E                       },
-E                       'retired': {},
-E                       'stimulus_id': 'graceful_abort',
-E                   },
-E                   {
-E                       'action': 'remove-worker',
-E                       'expected': False,
-E                       'lost-computed-tasks': {
-E                           'x',
-E                       },
-E                       'lost-scattered-tasks': {
-E                           'z',
-E                       },
-E                       'processing-tasks': {
-E                           'y',
-E                       },
-E                       'stimulus_id': 'ungraceful',
-E                       'worker': 'tcp://127.0.0.1:43361',
-E                   },
-E               ],
-E               'tcp://127.0.0.1:38021': [
-E                   {
-E                       'action': 'worker-status-change',
-E                       'prev-status': 'running',
-E                       'status': 'closing_gracefully',
-E                       'stimulus_id': 'graceful',
-E                   },
-E                   {
-E                       'action': 'remove-worker',
-E                       'expected': True,
-E                       'lost-computed-tasks': set(),
-E                       'lost-scattered-tasks': set(),
-E                       'processing-tasks': {
-E                           'y',
-E                       },
-E                       'stimulus_id': 'graceful',
-E                   },
-E                   {
-E                       'action': 'retired',
-E                       'stimulus_id': 'graceful',
-E                   },
-E               ],
-E               'tcp://127.0.0.1:43361': [
-E                   {
-E                       'action': 'worker-status-change',
-E                       'prev-status': 'running',
-E                       'status': 'closing_gracefully',
-E                       'stimulus_id': 'graceful_abort',
-E                   },
-E                   {
-E                       'action': 'could-not-retire',
-E                       'stimulus_id': 'graceful_abort',
-E                   },
-E                   {
-E                       'action': 'worker-status-change',
-E                       'prev-status': 'closing_gracefully',
-E                       'status': 'running',
-E                       'stimulus_id': 'worker-status-change',
-E                   },
-E                   {
-E                       'action': 'remove-worker',
-E                       'expected': False,
-E                       'lost-computed-tasks': {
-E                           'x',
-E                       },
-E                       'lost-scattered-tasks': {
-E                           'z',
-E                       },
-E                       'processing-tasks': {
-E                           'y',
-E                       },
-E                       'stimulus_id': 'ungraceful',
-E                   },
-E                   {
-E                       'action': 'closing-worker',
-E                       'reason': 'scheduler-remove-worker',
-E                   },
-E               ],
-E               'worker-get-client': [
-E                   {
-E                       'client': 'Client-41ca24e9-cd9b-11ef-8fa2-a95899f3b377',
-E                       'timeout': 5,
-E         +             'worker': 'tcp://127.0.0.1:38021',
-E         +         },
-E         +         {
-E         +             'client': 'Client-41ca24e9-cd9b-11ef-8fa2-a95899f3b377',
-E         +             'timeout': 5,
-E                       'worker': 'tcp://127.0.0.1:43361',
-E                   },
-E               ],
-E           }
-
-distributed/tests/test_worker.py:3016: AssertionError
------------------------------ Captured stderr call -----------------------------
-2025-01-07 20:33:37,851 - distributed.scheduler - INFO - State start
-2025-01-07 20:33:37,858 - distributed.scheduler - INFO -   Scheduler at:     tcp://127.0.0.1:46071
-2025-01-07 20:33:37,859 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:33801/status
-2025-01-07 20:33:37,859 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:33:37,876 - distributed.worker - INFO -       Start worker at:      tcp://127.0.0.1:38021
-2025-01-07 20:33:37,885 - distributed.worker - INFO -          Listening to:      tcp://127.0.0.1:38021
-2025-01-07 20:33:37,885 - distributed.worker - INFO -           Worker name:                          0
-2025-01-07 20:33:37,885 - distributed.worker - INFO -          dashboard at:            127.0.0.1:44637
-2025-01-07 20:33:37,885 - distributed.worker - INFO - Waiting to connect to:      tcp://127.0.0.1:46071
-2025-01-07 20:33:37,885 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:33:37,885 - distributed.worker - INFO -               Threads:                          1
-2025-01-07 20:33:37,885 - distributed.worker - INFO -                Memory:                  58.76 GiB
-2025-01-07 20:33:37,885 - distributed.worker - INFO -       Local Directory: /tmp/dask-scratch-space/worker-48gjzcug
-2025-01-07 20:33:37,885 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:33:37,886 - distributed.worker - INFO -       Start worker at:      tcp://127.0.0.1:43361
-2025-01-07 20:33:37,886 - distributed.worker - INFO -          Listening to:      tcp://127.0.0.1:43361
-2025-01-07 20:33:37,886 - distributed.worker - INFO -           Worker name:                          1
-2025-01-07 20:33:37,886 - distributed.worker - INFO -          dashboard at:            127.0.0.1:33971
-2025-01-07 20:33:37,886 - distributed.worker - INFO - Waiting to connect to:      tcp://127.0.0.1:46071
-2025-01-07 20:33:37,886 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:33:37,886 - distributed.worker - INFO -               Threads:                          2
-2025-01-07 20:33:37,886 - distributed.worker - INFO -                Memory:                  58.76 GiB
-2025-01-07 20:33:37,886 - distributed.worker - INFO -       Local Directory: /tmp/dask-scratch-space/worker-dn296n42
-2025-01-07 20:33:37,886 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:33:37,903 - distributed.scheduler - INFO - Register worker addr: tcp://127.0.0.1:38021 name: 0
-2025-01-07 20:33:37,903 - distributed.scheduler - INFO - Starting worker compute stream, tcp://127.0.0.1:38021
-2025-01-07 20:33:37,903 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:47146
-2025-01-07 20:33:37,904 - distributed.scheduler - INFO - Register worker addr: tcp://127.0.0.1:43361 name: 1
-2025-01-07 20:33:37,904 - distributed.scheduler - INFO - Starting worker compute stream, tcp://127.0.0.1:43361
-2025-01-07 20:33:37,904 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:47158
-2025-01-07 20:33:37,914 - distributed.worker - INFO - Starting Worker plugin shuffle
-2025-01-07 20:33:37,915 - distributed.worker - INFO - Starting Worker plugin shuffle
-2025-01-07 20:33:37,915 - distributed.worker - INFO -         Registered to:      tcp://127.0.0.1:46071
-2025-01-07 20:33:37,915 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:33:37,915 - distributed.worker - INFO -         Registered to:      tcp://127.0.0.1:46071
-2025-01-07 20:33:37,915 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:33:37,916 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:46071
-2025-01-07 20:33:37,916 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:46071
-2025-01-07 20:33:37,928 - distributed.scheduler - INFO - Receive client connection: Client-41ca24e9-cd9b-11ef-8fa2-a95899f3b377
-2025-01-07 20:33:37,937 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:47168
-2025-01-07 20:33:37,998 - distributed.scheduler - INFO - Retire worker addresses (stimulus_id='graceful') ['tcp://127.0.0.1:38021']
-2025-01-07 20:33:37,998 - distributed.active_memory_manager - INFO - Retiring worker tcp://127.0.0.1:38021; 2 keys are being moved away.
-2025-01-07 20:33:38,039 - distributed.scheduler - INFO - Remove worker addr: tcp://127.0.0.1:38021 name: 0 (stimulus_id='graceful')
-2025-01-07 20:33:38,039 - distributed.scheduler - INFO - Retired worker 'tcp://127.0.0.1:38021' (stimulus_id='graceful')
-2025-01-07 20:33:38,040 - distributed.scheduler - INFO - Retire worker addresses (stimulus_id='graceful_abort') ['tcp://127.0.0.1:43361']
-2025-01-07 20:33:38,040 - distributed.active_memory_manager - WARNING - Tried retiring worker tcp://127.0.0.1:43361, but 2 tasks could not be moved as there are no suitable workers to receive them. The worker will not be retired.
-2025-01-07 20:33:38,041 - distributed.scheduler - WARNING - Could not retire worker 'tcp://127.0.0.1:43361': unique data could not be moved to any other worker (stimulus_id='graceful_abort')
-2025-01-07 20:33:38,243 - distributed.scheduler - INFO - Remove worker addr: tcp://127.0.0.1:43361 name: 1 (stimulus_id='ungraceful')
-2025-01-07 20:33:38,243 - distributed.scheduler - WARNING - Removing worker 'tcp://127.0.0.1:43361' caused the cluster to lose already computed task(s), which will be recomputed elsewhere: {'x'} (stimulus_id='ungraceful')
-2025-01-07 20:33:38,244 - distributed.scheduler - ERROR - Removing worker 'tcp://127.0.0.1:43361' caused the cluster to lose scattered data, which can't be recovered: {'z'} (stimulus_id='ungraceful')
-2025-01-07 20:33:38,244 - distributed.scheduler - INFO - Lost all workers
-2025-01-07 20:33:38,245 - distributed.worker - INFO - Stopping worker at tcp://127.0.0.1:43361. Reason: scheduler-remove-worker
-2025-01-07 20:33:38,245 - distributed.worker.state_machine - WARNING - Async instruction for <Task cancelled name="execute('y')" coro=<Worker.execute() done, defined at /build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/worker_state_machine.py:3606>> ended with CancelledError
-2025-01-07 20:33:38,246 - distributed.worker - INFO - Removing Worker plugin shuffle
-2025-01-07 20:33:38,257 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:47158; closing.
-2025-01-07 20:33:38,451 - distributed.scheduler - INFO - Remove client Client-41ca24e9-cd9b-11ef-8fa2-a95899f3b377
-2025-01-07 20:33:38,451 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:47168; closing.
-2025-01-07 20:33:38,452 - distributed.scheduler - INFO - Remove client Client-41ca24e9-cd9b-11ef-8fa2-a95899f3b377
-2025-01-07 20:33:38,452 - distributed.scheduler - INFO - Close client connection: Client-41ca24e9-cd9b-11ef-8fa2-a95899f3b377
-2025-01-07 20:33:38,452 - distributed.worker - INFO - Stopping worker at tcp://127.0.0.1:38021. Reason: worker-close
-2025-01-07 20:33:38,462 - distributed.worker.state_machine - WARNING - Async instruction for <Task cancelled name="execute('y')" coro=<Worker.execute() done, defined at /build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/worker_state_machine.py:3606>> ended with CancelledError
-2025-01-07 20:33:38,463 - distributed.core - INFO - Connection to tcp://127.0.0.1:46071 has been closed.
-2025-01-07 20:33:38,463 - distributed.worker - INFO - Removing Worker plugin shuffle
-2025-01-07 20:33:38,464 - distributed.core - INFO - Connection to tcp://127.0.0.1:46071 has been closed.
-2025-01-07 20:33:38,464 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:47146; closing.
-2025-01-07 20:33:38,473 - distributed.scheduler - INFO - Closing scheduler. Reason: unknown
-2025-01-07 20:33:38,474 - distributed.scheduler - INFO - Scheduler closing all comms
 ============================= slowest 20 durations =============================
-60.00s call     distributed/tests/test_tls_functional.py::test_nanny
-6.95s call     distributed/tests/test_tls_functional.py::test_retire_workers
-2.36s setup    distributed/diagnostics/tests/test_task_stream.py::test_client_sync
-0.64s call     distributed/tests/test_worker.py::test_log_remove_worker
-0.54s call     distributed/tests/test_priorities.py::test_last_in_first_out[queue on worker]
-0.34s call     distributed/tests/test_worker_memory.py::test_fail_to_pickle_spill
-0.17s call     distributed/diagnostics/tests/test_task_stream.py::test_client_sync
-0.05s teardown distributed/diagnostics/tests/test_task_stream.py::test_client_sync
-0.00s setup    distributed/tests/test_priorities.py::test_last_in_first_out[queue on worker]
+60.01s call     distributed/tests/test_tls_functional.py::test_retire_workers
+0.81s call     distributed/tests/test_steal.py::test_steal_when_more_tasks
+0.00s setup    distributed/tests/test_steal.py::test_steal_when_more_tasks
+0.00s teardown distributed/tests/test_steal.py::test_steal_when_more_tasks
 0.00s teardown distributed/tests/test_tls_functional.py::test_retire_workers
-0.00s teardown distributed/tests/test_priorities.py::test_last_in_first_out[queue on worker]
-0.00s teardown distributed/tests/test_worker.py::test_log_remove_worker
-0.00s setup    distributed/tests/test_worker.py::test_log_remove_worker
-0.00s teardown distributed/tests/test_tls_functional.py::test_nanny
-0.00s teardown distributed/tests/test_worker_memory.py::test_fail_to_pickle_spill
-0.00s setup    distributed/tests/test_tls_functional.py::test_nanny
-0.00s setup    distributed/tests/test_worker_memory.py::test_fail_to_pickle_spill
 0.00s setup    distributed/tests/test_tls_functional.py::test_retire_workers
 =========================== short test summary info ============================
-FAILED distributed/tests/test_tls_functional.py::test_nanny - TimeoutError
-FAILED distributed/tests/test_worker.py::test_log_remove_worker - AssertionError: assert {'tcp://127.0.0.1:38021': [{'action': 'worker-status-change', 'prev-status': 'running', 'status': 'closing_gracefully', 'stimulus_id': 'graceful'}, {'action': 'remove-worker', 'processing-tasks': {'y'}, 'lost-computed-tasks': set(), 'lost-scattered-tasks': set(), 'stimulus_id': 'graceful', 'expected': True}, {'action': 'retired', 'stimulus_id': 'graceful'}], 'all': [{'action': 'remove-worker', 'processing-tasks': {'y'}, 'lost-computed-tasks': set(), 'lost-scattered-tasks': set(), 'stimulus_id': 'graceful', 'expected': True, 'worker': 'tcp://127.0.0.1:38021'}, {'action': 'retire-workers', 'retired': {'tcp://127.0.0.1:38021': 'snip'}, 'could-not-retire': {}, 'stimulus_id': 'graceful'}, {'action': 'retire-workers', 'retired': {}, 'could-not-retire': {'tcp://127.0.0.1:43361': 'snip'}, 'stimulus_id': 'graceful_abort'}, {'action': 'remove-worker', 'processing-tasks': {'y'}, 'lost-computed-tasks': {'x'}, 'lost-scattered-tasks': {'z'}, 'stimulus_id': 'ungraceful', 'expected': False, 'worker': 'tcp://127.0.0.1:43361'}], 'tcp://127.0.0.1:43361': [{'action': 'worker-status-change', 'prev-status': 'running', 'status': 'closing_gracefully', 'stimulus_id': 'graceful_abort'}, {'action': 'could-not-retire', 'stimulus_id': 'graceful_abort'}, {'action': 'worker-status-change', 'prev-status': 'closing_gracefully', 'status': 'running', 'stimulus_id': 'worker-status-change'}, {'action': 'remove-worker', 'processing-tasks': {'y'}, 'lost-computed-tasks': {'x'}, 'lost-scattered-tasks': {'z'}, 'stimulus_id': 'ungraceful', 'expected': False}, {'action': 'closing-worker', 'reason': 'scheduler-remove-worker'}], 'worker-get-client': [{'client': 'Client-41ca24e9-cd9b-11ef-8fa2-a95899f3b377', 'timeout': 5, 'worker': 'tcp://127.0.0.1:38021'}, {'client': 'Client-41ca24e9-cd9b-11ef-8fa2-a95899f3b377', 'timeout': 5, 'worker': 'tcp://127.0.0.1:43361'}]} == {'tcp://127.0.0.1:38021': [{'action': 'worker-status-change', 'prev-status': 'running', 'status': 'closing_gracefully', 'stimulus_id': 'graceful'}, {'action': 'remove-worker', 'lost-computed-tasks': set(), 'lost-scattered-tasks': set(), 'processing-tasks': {'y'}, 'expected': True, 'stimulus_id': 'graceful'}, {'action': 'retired', 'stimulus_id': 'graceful'}], 'tcp://127.0.0.1:43361': [{'action': 'worker-status-change', 'prev-status': 'running', 'status': 'closing_gracefully', 'stimulus_id': 'graceful_abort'}, {'action': 'could-not-retire', 'stimulus_id': 'graceful_abort'}, {'action': 'worker-status-change', 'prev-status': 'closing_gracefully', 'status': 'running', 'stimulus_id': 'worker-status-change'}, {'action': 'remove-worker', 'lost-computed-tasks': {'x'}, 'lost-scattered-tasks': {'z'}, 'processing-tasks': {'y'}, 'expected': False, 'stimulus_id': 'ungraceful'}, {'action': 'closing-worker', 'reason': 'scheduler-remove-worker'}], 'all': [{'action': 'remove-worker', 'lost-computed-tasks': set(), 'lost-scattered-tasks': set(), 'processing-tasks': {'y'}, 'expected': True, 'stimulus_id': 'graceful', 'worker': 'tcp://127.0.0.1:38021'}, {'action': 'retire-workers', 'stimulus_id': 'graceful', 'retired': {'tcp://127.0.0.1:38021': 'snip'}, 'could-not-retire': {}}, {'action': 'retire-workers', 'stimulus_id': 'graceful_abort', 'retired': {}, 'could-not-retire': {'tcp://127.0.0.1:43361': 'snip'}}, {'action': 'remove-worker', 'lost-computed-tasks': {'x'}, 'lost-scattered-tasks': {'z'}, 'processing-tasks': {'y'}, 'expected': False, 'stimulus_id': 'ungraceful', 'worker': 'tcp://127.0.0.1:43361'}], 'worker-get-client': [{'client': 'Client-41ca24e9-cd9b-11ef-8fa2-a95899f3b377', 'timeout': 5, 'worker': 'tcp://127.0.0.1:43361'}]}
-  
-  Common items:
-  {'all': [{'action': 'remove-worker',
-            'expected': True,
-            'lost-computed-tasks': set(),
-            'lost-scattered-tasks': set(),
-            'processing-tasks': {'y'},
-            'stimulus_id': 'graceful',
-            'worker': 'tcp://127.0.0.1:38021'},
-           {'action': 'retire-workers',
-            'could-not-retire': {},
-            'retired': {'tcp://127.0.0.1:38021': 'snip'},
-            'stimulus_id': 'graceful'},
-           {'action': 'retire-workers',
-            'could-not-retire': {'tcp://127.0.0.1:43361': 'snip'},
-            'retired': {},
-            'stimulus_id': 'graceful_abort'},
-           {'action': 'remove-worker',
-            'expected': False,
-            'lost-computed-tasks': {'x'},
-            'lost-scattered-tasks': {'z'},
-            'processing-tasks': {'y'},
-            'stimulus_id': 'ungraceful',
-            'worker': 'tcp://127.0.0.1:43361'}],
-   'tcp://127.0.0.1:38021': [{'action': 'worker-status-change',
-                              'prev-status': 'running',
-                              'status': 'closing_gracefully',
-                              'stimulus_id': 'graceful'},
-                             {'action': 'remove-worker',
-                              'expected': True,
-                              'lost-computed-tasks': set(),
-                              'lost-scattered-tasks': set(),
-                              'processing-tasks': {'y'},
-                              'stimulus_id': 'graceful'},
-                             {'action': 'retired', 'stimulus_id': 'graceful'}],
-   'tcp://127.0.0.1:43361': [{'action': 'worker-status-change',
-                              'prev-status': 'running',
-                              'status': 'closing_gracefully',
-                              'stimulus_id': 'graceful_abort'},
-                             {'action': 'could-not-retire',
-                              'stimulus_id': 'graceful_abort'},
-                             {'action': 'worker-status-change',
-                              'prev-status': 'closing_gracefully',
-                              'status': 'running',
-                              'stimulus_id': 'worker-status-change'},
-                             {'action': 'remove-worker',
-                              'expected': False,
-                              'lost-computed-tasks': {'x'},
-                              'lost-scattered-tasks': {'z'},
-                              'processing-tasks': {'y'},
-                              'stimulus_id': 'ungraceful'},
-                             {'action': 'closing-worker',
-                              'reason': 'scheduler-remove-worker'}]}
-  Differing items:
-  {'worker-get-client': [{'client': 'Client-41ca24e9-cd9b-11ef-8fa2-a95899f3b377', 'timeout': 5, 'worker': 'tcp://127.0.0.1:38021'}, {'client': 'Client-41ca24e9-cd9b-11ef-8fa2-a95899f3b377', 'timeout': 5, 'worker': 'tcp://127.0.0.1:43361'}]} != {'worker-get-client': [{'client': 'Client-41ca24e9-cd9b-11ef-8fa2-a95899f3b377', 'timeout': 5, 'worker': 'tcp://127.0.0.1:43361'}]}
-  
-  Full diff:
-    {
-        'all': [
-            {
-                'action': 'remove-worker',
-                'expected': True,
-                'lost-computed-tasks': set(),
-                'lost-scattered-tasks': set(),
-                'processing-tasks': {
-                    'y',
-                },
-                'stimulus_id': 'graceful',
-                'worker': 'tcp://127.0.0.1:38021',
-            },
-            {
-                'action': 'retire-workers',
-                'could-not-retire': {},
-                'retired': {
-                    'tcp://127.0.0.1:38021': 'snip',
-                },
-                'stimulus_id': 'graceful',
-            },
-            {
-                'action': 'retire-workers',
-                'could-not-retire': {
-                    'tcp://127.0.0.1:43361': 'snip',
-                },
-                'retired': {},
-                'stimulus_id': 'graceful_abort',
-            },
-            {
-                'action': 'remove-worker',
-                'expected': False,
-                'lost-computed-tasks': {
-                    'x',
-                },
-                'lost-scattered-tasks': {
-                    'z',
-                },
-                'processing-tasks': {
-                    'y',
-                },
-                'stimulus_id': 'ungraceful',
-                'worker': 'tcp://127.0.0.1:43361',
-            },
-        ],
-        'tcp://127.0.0.1:38021': [
-            {
-                'action': 'worker-status-change',
-                'prev-status': 'running',
-                'status': 'closing_gracefully',
-                'stimulus_id': 'graceful',
-            },
-            {
-                'action': 'remove-worker',
-                'expected': True,
-                'lost-computed-tasks': set(),
-                'lost-scattered-tasks': set(),
-                'processing-tasks': {
-                    'y',
-                },
-                'stimulus_id': 'graceful',
-            },
-            {
-                'action': 'retired',
-                'stimulus_id': 'graceful',
-            },
-        ],
-        'tcp://127.0.0.1:43361': [
-            {
-                'action': 'worker-status-change',
-                'prev-status': 'running',
-                'status': 'closing_gracefully',
-                'stimulus_id': 'graceful_abort',
-            },
-            {
-                'action': 'could-not-retire',
-                'stimulus_id': 'graceful_abort',
-            },
-            {
-                'action': 'worker-status-change',
-                'prev-status': 'closing_gracefully',
-                'status': 'running',
-                'stimulus_id': 'worker-status-change',
-            },
-            {
-                'action': 'remove-worker',
-                'expected': False,
-                'lost-computed-tasks': {
-                    'x',
-                },
-                'lost-scattered-tasks': {
-                    'z',
-                },
-                'processing-tasks': {
-                    'y',
-                },
-                'stimulus_id': 'ungraceful',
-            },
-            {
-                'action': 'closing-worker',
-                'reason': 'scheduler-remove-worker',
-            },
-        ],
-        'worker-get-client': [
-            {
-                'client': 'Client-41ca24e9-cd9b-11ef-8fa2-a95899f3b377',
-                'timeout': 5,
-  +             'worker': 'tcp://127.0.0.1:38021',
-  +         },
-  +         {
-  +             'client': 'Client-41ca24e9-cd9b-11ef-8fa2-a95899f3b377',
-  +             'timeout': 5,
-                'worker': 'tcp://127.0.0.1:43361',
-            },
-        ],
-    }
-==================== 2 failed, 4 passed in 73.06s (0:01:13) ====================
+FAILED distributed/tests/test_tls_functional.py::test_retire_workers - TimeoutError
+==================== 1 failed, 1 passed in 61.71s (0:01:01) ====================
 *** END OF RUN 2: NOT ALL TESTS HAVE YET PASSED/XFAILED ***
 *** STARTING RUN 3: python3.13 -m pytest --pyargs distributed --verbose --color=no --timeout-method=signal --timeout=300 -m not avoid_ci -rfE --last-failed --last-failed-no-failures none --ignore=distributed/comm/tests/test_comms.py --ignore=distributed/comm/tests/test_ws.py --ignore=distributed/deploy/tests/test_adaptive.py --ignore=distributed/deploy/tests/test_local.py --ignore=distributed/deploy/tests/test_slow_adaptive.py --ignore=distributed/deploy/tests/test_spec_cluster.py --deselect=distributed/cli/tests/test_dask_scheduler.py::test_no_dashboard --deselect=distributed/deploy/tests/test_local.py::test_localcluster_get_client --deselect=distributed/deploy/tests/test_old_ssh.py::test_cluster --deselect=distributed/deploy/tests/test_old_ssh.py::test_old_ssh_nprocs_renamed_to_n_workers --deselect=distributed/deploy/tests/test_old_ssh.py::test_nprocs_attribute_is_deprecated --deselect=distributed/deploy/tests/test_ssh.py::test_nprocs_attribute_is_deprecated --deselect=distributed/http/tests/test_core.py::test_prometheus_api_doc --deselect=distributed/tests/test_init.py::test_git_revision --deselect=distributed/tests/test_jupyter.py::test_jupyter_idle_timeout --deselect=distributed/tests/test_jupyter.py::test_jupyter_idle_timeout_returned --deselect=distributed/tests/test_jupyter.py::test_jupyter_server --deselect=distributed/tests/test_jupyter.py::test_shutsdown_cleanly --deselect=distributed/tests/test_profile.py::test_stack_overflow --deselect=distributed/tests/test_pubsub.py::test_client_worker --deselect=distributed/tests/test_queues.py::test_queue_in_task --deselect=distributed/tests/test_spill.py::test_spillbuffer_oserror --deselect=distributed/tests/test_steal.py::test_steal_twice --deselect=distributed/tests/test_utils_test.py::test_cluster --deselect=distributed/tests/test_variable.py::test_variable_in_task --deselect=distributed/tests/test_worker.py::test_process_executor_kills_process --deselect=distributed/tests/test_worker_memory.py::test_fail_to_pickle_execute_1 --deselect=distributed/tests/test_worker_state_machine.py::test_task_state_instance_are_garbage_collected --deselect=distributed/protocol/tests/test_protocol.py::test_deeply_nested_structures --deselect=distributed/protocol/tests/test_serialize.py::test_deeply_nested_structures --deselect=distributed/cli/tests/test_dask_scheduler.py::test_defaults --deselect=distributed/cli/tests/test_dask_scheduler.py::test_hostport --deselect=distributed/cli/tests/test_dask_spec.py::test_errors --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_file --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_file --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_file --deselect=distributed/dashboard/tests/test_scheduler_bokeh.py::test_counters --deselect=distributed/dashboard/tests/test_worker_bokeh.py::test_counters --deselect=distributed/deploy/tests/test_local.py::test_adapt_then_manual --deselect=distributed/deploy/tests/test_local.py::test_async_with --deselect=distributed/deploy/tests/test_local.py::test_close_twice --deselect=distributed/deploy/tests/test_local.py::test_cluster_info_sync --deselect=distributed/deploy/tests/test_local.py::test_local_tls --deselect=distributed/deploy/tests/test_local.py::test_no_dangling_asyncio_tasks --deselect=distributed/deploy/tests/test_local.py::test_only_local_access --deselect=distributed/deploy/tests/test_local.py::test_remote_access --deselect=distributed/diagnostics/tests/test_progress_widgets.py::test_serializers --deselect=distributed/diagnostics/tests/test_scheduler_plugin.py::test_lifecycle --deselect=distributed/http/scheduler/tests/test_missing_bokeh.py::test_missing_bokeh --deselect=distributed/http/scheduler/tests/test_scheduler_http.py::test_metrics_when_prometheus_client_not_installed --deselect=distributed/protocol/tests/test_serialize.py::test_errors --deselect=distributed/tests/test_batched.py::test_BatchedSend --deselect=distributed/tests/test_batched.py::test_close_closed --deselect=distributed/tests/test_batched.py::test_close_twice --deselect=distributed/tests/test_batched.py::test_send_after_stream_start --deselect=distributed/tests/test_batched.py::test_send_before_close --deselect=distributed/tests/test_batched.py::test_send_before_start --deselect=distributed/tests/test_batched.py::test_sending_traffic_jam --deselect=distributed/tests/test_batched.py::test_serializers --deselect=distributed/tests/test_client.py::TestClientSecurityLoader::test_security_loader --deselect=distributed/tests/test_client.py::TestClientSecurityLoader::test_security_loader_ignored_if_explicit_security_provided --deselect=distributed/tests/test_client.py::TestClientSecurityLoader::test_security_loader_ignored_if_returns_none --deselect=distributed/tests/test_client.py::test_async_with --deselect=distributed/tests/test_client.py::test_client_is_quiet_cluster_close --deselect=distributed/tests/test_client.py::test_dashboard_link_cluster --deselect=distributed/tests/test_client.py::test_dashboard_link_inproc --deselect=distributed/tests/test_client.py::test_file_descriptors_dont_leak --deselect=distributed/tests/test_client.py::test_mixing_clients_different_scheduler --deselect=distributed/tests/test_client.py::test_quiet_client_close --deselect=distributed/tests/test_client.py::test_rebalance_sync --deselect=distributed/tests/test_client.py::test_repr_localcluster --deselect=distributed/tests/test_client.py::test_security_loader --deselect=distributed/tests/test_client.py::test_security_loader_ignored_if_explicit_security_provided --deselect=distributed/tests/test_client.py::test_security_loader_ignored_if_returns_none --deselect=distributed/tests/test_client.py::test_shutdown --deselect=distributed/tests/test_client.py::test_shutdown_is_quiet_with_cluster --deselect=distributed/tests/test_client.py::test_shutdown_localcluster --deselect=distributed/tests/test_client.py::test_shutdown_stops_callbacks --deselect=distributed/tests/test_client_loop.py::test_close_loop_sync_start_new_loop --deselect=distributed/tests/test_client_loop.py::test_close_loop_sync_use_running_loop --deselect=distributed/tests/test_core.py::test_close_fast_without_active_handlers --deselect=distributed/tests/test_core.py::test_close_grace_period_for_handlers --deselect=distributed/tests/test_core.py::test_close_properly --deselect=distributed/tests/test_core.py::test_compression --deselect=distributed/tests/test_core.py::test_connection_pool --deselect=distributed/tests/test_core.py::test_connection_pool_close_while_connecting --deselect=distributed/tests/test_core.py::test_connection_pool_detects_remote_close --deselect=distributed/tests/test_core.py::test_connection_pool_outside_cancellation --deselect=distributed/tests/test_core.py::test_connection_pool_remove --deselect=distributed/tests/test_core.py::test_connection_pool_respects_limit --deselect=distributed/tests/test_core.py::test_connection_pool_tls --deselect=distributed/tests/test_core.py::test_counters --deselect=distributed/tests/test_core.py::test_deserialize_error --deselect=distributed/tests/test_core.py::test_errors --deselect=distributed/tests/test_core.py::test_identity_inproc --deselect=distributed/tests/test_core.py::test_identity_tcp --deselect=distributed/tests/test_core.py::test_large_packets_inproc --deselect=distributed/tests/test_core.py::test_messages_are_ordered_bsend --deselect=distributed/tests/test_core.py::test_messages_are_ordered_raw --deselect=distributed/tests/test_core.py::test_ports --deselect=distributed/tests/test_core.py::test_rpc_default --deselect=distributed/tests/test_core.py::test_rpc_inproc --deselect=distributed/tests/test_core.py::test_rpc_message_lifetime_default --deselect=distributed/tests/test_core.py::test_rpc_message_lifetime_inproc --deselect=distributed/tests/test_core.py::test_rpc_message_lifetime_tcp --deselect=distributed/tests/test_core.py::test_rpc_serialization --deselect=distributed/tests/test_core.py::test_rpc_tcp --deselect=distributed/tests/test_core.py::test_rpc_tls --deselect=distributed/tests/test_core.py::test_rpc_with_many_connections_inproc --deselect=distributed/tests/test_core.py::test_rpc_with_many_connections_tcp --deselect=distributed/tests/test_core.py::test_send_recv_args --deselect=distributed/tests/test_core.py::test_send_recv_cancelled --deselect=distributed/tests/test_core.py::test_server --deselect=distributed/tests/test_core.py::test_server_comms_mark_active_handlers --deselect=distributed/tests/test_core.py::test_server_raises_on_blocked_handlers --deselect=distributed/tests/test_jupyter.py::test_jupyter_idle_timeout --deselect=distributed/tests/test_jupyter.py::test_jupyter_server --deselect=distributed/tests/test_locks.py::test_errors --deselect=distributed/tests/test_nanny.py::test_scheduler_file --deselect=distributed/tests/test_nanny.py::test_scheduler_file --deselect=distributed/tests/test_nanny.py::test_scheduler_file --deselect=distributed/tests/test_nanny.py::test_worker_uses_same_host_as_nanny --deselect=distributed/tests/test_preload.py::test_failure_doesnt_crash_scheduler --deselect=distributed/tests/test_preload.py::test_preload_import_time --deselect=distributed/tests/test_preload.py::test_preload_manager_sequence --deselect=distributed/tests/test_preload.py::test_worker_preload_text --deselect=distributed/tests/test_scheduler.py::test_allowed_failures_config --deselect=distributed/tests/test_scheduler.py::test_async_context_manager --deselect=distributed/tests/test_scheduler.py::test_dashboard_host --deselect=distributed/tests/test_scheduler.py::test_file_descriptors_dont_leak --deselect=distributed/tests/test_scheduler.py::test_finished --deselect=distributed/tests/test_scheduler.py::test_multiple_listeners --deselect=distributed/tests/test_scheduler.py::test_no_dangling_asyncio_tasks --deselect=distributed/tests/test_scheduler.py::test_scheduler_file --deselect=distributed/tests/test_scheduler.py::test_scheduler_file --deselect=distributed/tests/test_scheduler.py::test_scheduler_file --deselect=distributed/tests/test_security.py::test_require_encryption --deselect=distributed/tests/test_security.py::test_tls_listen_connect --deselect=distributed/tests/test_security.py::test_tls_temporary_credentials_functional --deselect=distributed/tests/test_semaphore.py::test_threadpoolworkers_pick_correct_ioloop --deselect=distributed/tests/test_tls_functional.py::test_security_dict_input_no_security --deselect=distributed/tests/test_utils_test.py::test_ensure_no_new_clients --deselect=distributed/tests/test_utils_test.py::test_freeze_batched_send --deselect=distributed/tests/test_utils_test.py::test_locked_comm_drop_in_replacement --deselect=distributed/tests/test_utils_test.py::test_locked_comm_intercept_read --deselect=distributed/tests/test_utils_test.py::test_locked_comm_intercept_write --deselect=distributed/tests/test_worker.py::test_host_uses_scheduler_protocol --deselect=distributed/tests/test_worker.py::test_plugin_exception --deselect=distributed/tests/test_worker.py::test_plugin_internal_exception --deselect=distributed/tests/test_worker.py::test_plugin_multiple_exceptions --deselect=distributed/tests/test_worker.py::test_scheduler_file --deselect=distributed/tests/test_worker.py::test_scheduler_file --deselect=distributed/tests/test_worker.py::test_scheduler_file --deselect=distributed/tests/test_worker_client.py::test_dont_override_default_get --deselect=distributed/cli/tests/test_dask_scheduler.py::test_dashboard --deselect=distributed/cli/tests/test_dask_scheduler.py::test_dashboard_allowlist --deselect=distributed/cli/tests/test_dask_scheduler.py::test_dashboard_non_standard_ports --deselect=distributed/cli/tests/test_dask_scheduler.py::test_dashboard_port_zero --deselect=distributed/cli/tests/test_dask_scheduler.py::test_defaults --deselect=distributed/cli/tests/test_dask_scheduler.py::test_hostport --deselect=distributed/cli/tests/test_dask_scheduler.py::test_multiple_protocols --deselect=distributed/cli/tests/test_dask_scheduler.py::test_multiple_workers --deselect=distributed/cli/tests/test_dask_scheduler.py::test_multiple_workers_2 --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_command --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_command_default --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_config --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_file --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_module --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_remote_module --deselect=distributed/cli/tests/test_dask_scheduler.py::test_scheduler_port_zero --deselect=distributed/cli/tests/test_dask_scheduler.py::test_single_executable_deprecated --deselect=distributed/cli/tests/test_dask_worker.py::test_contact_listen_address --deselect=distributed/cli/tests/test_dask_worker.py::test_dashboard_non_standard_ports --deselect=distributed/cli/tests/test_dask_worker.py::test_error_during_startup --deselect=distributed/cli/tests/test_dask_worker.py::test_integer_names --deselect=distributed/cli/tests/test_dask_worker.py::test_listen_address_ipv6 --deselect=distributed/cli/tests/test_dask_worker.py::test_local_directory --deselect=distributed/cli/tests/test_dask_worker.py::test_memory_limit --deselect=distributed/cli/tests/test_dask_worker.py::test_nanny_worker_port_range --deselect=distributed/cli/tests/test_dask_worker.py::test_nanny_worker_port_range_too_many_workers_raises --deselect=distributed/cli/tests/test_dask_worker.py::test_nanny_worker_ports --deselect=distributed/cli/tests/test_dask_worker.py::test_no_nanny --deselect=distributed/cli/tests/test_dask_worker.py::test_nworkers_auto --deselect=distributed/cli/tests/test_dask_worker.py::test_nworkers_expands_name --deselect=distributed/cli/tests/test_dask_worker.py::test_nworkers_negative --deselect=distributed/cli/tests/test_dask_worker.py::test_nworkers_requires_nanny --deselect=distributed/cli/tests/test_dask_worker.py::test_preload_config --deselect=distributed/cli/tests/test_dask_worker.py::test_resources --deselect=distributed/cli/tests/test_dask_worker.py::test_respect_host_listen_address --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_address_env --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_file --deselect=distributed/cli/tests/test_dask_worker.py::test_set_lifetime_restart_via_env_var --deselect=distributed/cli/tests/test_dask_worker.py::test_set_lifetime_stagger_via_env_var --deselect=distributed/cli/tests/test_dask_worker.py::test_signal_handling --deselect=distributed/cli/tests/test_dask_worker.py::test_single_executable_deprecated --deselect=distributed/cli/tests/test_dask_worker.py::test_single_executable_works --deselect=distributed/cli/tests/test_dask_worker.py::test_timeout --deselect=distributed/cli/tests/test_dask_worker.py::test_worker_class --deselect=distributed/tests/test_config.py::test_logging_extended --deselect=distributed/tests/test_config.py::test_logging_file_config --deselect=distributed/tests/test_config.py::test_logging_mutual_exclusive --deselect=distributed/tests/test_config.py::test_logging_simple --deselect=distributed/tests/test_config.py::test_logging_simple_under_distributed --deselect=distributed/tests/test_queues.py::test_queue_in_task --deselect=distributed/tests/test_variable.py::test_variable_in_task 
 ============================= test session starts ==============================
@@ -20471,2723 +17245,17 @@
 timeout: 300.0s
 timeout method: signal
 timeout func_only: False
-collecting ... collected 2 items
-run-last-failure: rerun previous 2 failures (skipped 147 files)
-
-distributed/tests/test_tls_functional.py::test_nanny FAILED              [ 50%]
-distributed/tests/test_worker.py::test_log_remove_worker FAILED          [100%]
-
-=================================== FAILURES ===================================
-__________________________________ test_nanny __________________________________
-
-fut = <coroutine object Nanny.start_unsafe at 0xffffa7b292a0>, timeout = 0
-
-    async def wait_for(fut: Awaitable[T], timeout: float) -> T:
-        async with asyncio.timeout(timeout):
->           return await fut
-
-distributed/utils.py:1914: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-distributed/nanny.py:358: in start_unsafe
-    comm = await self.rpc.connect(saddr)
-distributed/core.py:1485: in connect
-    return await self._connect(addr=addr, timeout=timeout)
-distributed/core.py:1429: in _connect
-    comm = await connect(
-distributed/comm/core.py:342: in connect
-    comm = await wait_for(
-distributed/utils.py:1914: in wait_for
-    return await fut
-distributed/comm/tcp.py:546: in connect
-    stream = await self.client.connect(
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-self = <tornado.tcpclient.TCPClient object at 0xffffa9578590>
-host = '127.0.0.1', port = 37895, af = <AddressFamily.AF_UNSPEC: 0>
-ssl_options = <ssl.SSLContext object at 0xffffa625e8d0>
-max_buffer_size = 31544635392.0, source_ip = None, source_port = None
-timeout = None
-
-    async def connect(
-        self,
-        host: str,
-        port: int,
-        af: socket.AddressFamily = socket.AF_UNSPEC,
-        ssl_options: Optional[Union[Dict[str, Any], ssl.SSLContext]] = None,
-        max_buffer_size: Optional[int] = None,
-        source_ip: Optional[str] = None,
-        source_port: Optional[int] = None,
-        timeout: Optional[Union[float, datetime.timedelta]] = None,
-    ) -> IOStream:
-        """Connect to the given host and port.
-    
-        Asynchronously returns an `.IOStream` (or `.SSLIOStream` if
-        ``ssl_options`` is not None).
-    
-        Using the ``source_ip`` kwarg, one can specify the source
-        IP address to use when establishing the connection.
-        In case the user needs to resolve and
-        use a specific interface, it has to be handled outside
-        of Tornado as this depends very much on the platform.
-    
-        Raises `TimeoutError` if the input future does not complete before
-        ``timeout``, which may be specified in any form allowed by
-        `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time
-        relative to `.IOLoop.time`)
-    
-        Similarly, when the user requires a certain source port, it can
-        be specified using the ``source_port`` arg.
-    
-        .. versionchanged:: 4.5
-           Added the ``source_ip`` and ``source_port`` arguments.
-    
-        .. versionchanged:: 5.0
-           Added the ``timeout`` argument.
-        """
-        if timeout is not None:
-            if isinstance(timeout, numbers.Real):
-                timeout = IOLoop.current().time() + timeout
-            elif isinstance(timeout, datetime.timedelta):
-                timeout = IOLoop.current().time() + timeout.total_seconds()
-            else:
-                raise TypeError("Unsupported timeout %r" % timeout)
-        if timeout is not None:
-            addrinfo = await gen.with_timeout(
-                timeout, self.resolver.resolve(host, port, af)
-            )
-        else:
-            addrinfo = await self.resolver.resolve(host, port, af)
-        connector = _Connector(
-            addrinfo,
-            functools.partial(
-                self._create_stream,
-                max_buffer_size,
-                source_ip=source_ip,
-                source_port=source_port,
-            ),
-        )
->       af, addr, stream = await connector.start(connect_timeout=timeout)
-E       asyncio.exceptions.CancelledError
-
-/usr/lib/python3/dist-packages/tornado/tcpclient.py:279: CancelledError
-
-The above exception was the direct cause of the following exception:
-
-self = <Nanny: None, threads: 1>
-
-    @final
-    async def start(self):
-        async with self._startup_lock:
-            if self.status == Status.failed:
-                assert self.__startup_exc is not None
-                raise self.__startup_exc
-            elif self.status != Status.init:
-                return self
-            timeout = getattr(self, "death_timeout", None)
-    
-            async def _close_on_failure(exc: Exception) -> None:
-                await self.close(reason=f"failure-to-start-{str(type(exc))}")
-                self.status = Status.failed
-                self.__startup_exc = exc
-    
-            try:
->               await wait_for(self.start_unsafe(), timeout=timeout)
-
-distributed/core.py:528: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-distributed/utils.py:1913: in wait_for
-    async with asyncio.timeout(timeout):
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-self = <Timeout [expired]>
-exc_type = <class 'asyncio.exceptions.CancelledError'>
-exc_val = CancelledError(), exc_tb = <traceback object at 0xffffa5ff0780>
-
-    async def __aexit__(
-        self,
-        exc_type: Optional[Type[BaseException]],
-        exc_val: Optional[BaseException],
-        exc_tb: Optional[TracebackType],
-    ) -> Optional[bool]:
-        assert self._state in (_State.ENTERED, _State.EXPIRING)
-    
-        if self._timeout_handler is not None:
-            self._timeout_handler.cancel()
-            self._timeout_handler = None
-    
-        if self._state is _State.EXPIRING:
-            self._state = _State.EXPIRED
-    
-            if self._task.uncancel() <= self._cancelling and exc_type is not None:
-                # Since there are no new cancel requests, we're
-                # handling this.
-                if issubclass(exc_type, exceptions.CancelledError):
->                   raise TimeoutError from exc_val
-E                   TimeoutError
-
-/usr/lib/python3.13/asyncio/timeouts.py:116: TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-    @contextlib.asynccontextmanager
-    async def _cluster_factory():
-        workers = []
-        s = None
-        try:
-            for _ in range(60):
-                try:
->                   s, ws = await start_cluster(
-                        nthreads,
-                        scheduler,
-                        security=security,
-                        Worker=Worker,
-                        scheduler_kwargs=scheduler_kwargs,
-                        worker_kwargs=merge(
-                            {"death_timeout": min(15, int(deadline.remaining))},
-                            worker_kwargs,
-                        ),
-                    )
-
-distributed/utils_test.py:974: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-distributed/utils_test.py:791: in start_cluster
-    await asyncio.gather(*workers)
-/usr/lib/python3.13/asyncio/tasks.py:737: in _wrap_awaitable
-    return await awaitable
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-self = <Nanny: None, threads: 1>
-
-    @final
-    async def start(self):
-        async with self._startup_lock:
-            if self.status == Status.failed:
-                assert self.__startup_exc is not None
-                raise self.__startup_exc
-            elif self.status != Status.init:
-                return self
-            timeout = getattr(self, "death_timeout", None)
-    
-            async def _close_on_failure(exc: Exception) -> None:
-                await self.close(reason=f"failure-to-start-{str(type(exc))}")
-                self.status = Status.failed
-                self.__startup_exc = exc
-    
-            try:
-                await wait_for(self.start_unsafe(), timeout=timeout)
-            except asyncio.TimeoutError as exc:
-                await _close_on_failure(exc)
->               raise asyncio.TimeoutError(
-                    f"{type(self).__name__} start timed out after {timeout}s."
-                ) from exc
-E               TimeoutError: Nanny start timed out after 0s.
-
-distributed/core.py:531: TimeoutError
-
-During handling of the above exception, another exception occurred:
-
-fut = <coroutine object gen_cluster.<locals>._.<locals>.test_func.<locals>.async_fn at 0xffffa7ee4900>
-timeout = 60
-
-    async def wait_for(fut: Awaitable[T], timeout: float) -> T:
-        async with asyncio.timeout(timeout):
->           return await fut
-
-distributed/utils.py:1914: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-distributed/utils_test.py:1007: in async_fn
-    _cluster_factory() as (s, workers),
-/usr/lib/python3.13/contextlib.py:214: in __aenter__
-    return await anext(self.gen)
-distributed/utils_test.py:991: in _cluster_factory
-    await asyncio.sleep(1)
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-delay = 1, result = None
-
-    async def sleep(delay, result=None):
-        """Coroutine that completes after a given time (in seconds)."""
-        if delay <= 0:
-            await __sleep0()
-            return result
-    
-        if math.isnan(delay):
-            raise ValueError("Invalid delay: NaN (not a number)")
-    
-        loop = events.get_running_loop()
-        future = loop.create_future()
-        h = loop.call_later(delay,
-                            futures._set_result_unless_cancelled,
-                            future, result)
-        try:
->           return await future
-E           asyncio.exceptions.CancelledError
-
-/usr/lib/python3.13/asyncio/tasks.py:718: CancelledError
-
-The above exception was the direct cause of the following exception:
-
-args = (), kwds = {}
-
-    @wraps(func)
-    def inner(*args, **kwds):
-        with self._recreate_cm():
->           return func(*args, **kwds)
-
-/usr/lib/python3.13/contextlib.py:85: 
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-/usr/lib/python3.13/contextlib.py:85: in inner
-    return func(*args, **kwds)
-distributed/utils_test.py:1090: in test_func
-    return _run_and_close_tornado(async_fn_outer)
-distributed/utils_test.py:380: in _run_and_close_tornado
-    return asyncio_run(inner_fn(), loop_factory=get_loop_factory())
-/usr/lib/python3.13/asyncio/runners.py:195: in run
-    return runner.run(main)
-/usr/lib/python3.13/asyncio/runners.py:118: in run
-    return self._loop.run_until_complete(task)
-/usr/lib/python3.13/asyncio/base_events.py:720: in run_until_complete
-    return future.result()
-distributed/utils_test.py:377: in inner_fn
-    return await async_fn(*args, **kwargs)
-distributed/utils_test.py:1087: in async_fn_outer
-    return await utils_wait_for(async_fn(), timeout=timeout * 2)
-distributed/utils.py:1913: in wait_for
-    async with asyncio.timeout(timeout):
-_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
-
-self = <Timeout [expired]>
-exc_type = <class 'asyncio.exceptions.CancelledError'>
-exc_val = CancelledError(), exc_tb = <traceback object at 0xffffa60d2bc0>
-
-    async def __aexit__(
-        self,
-        exc_type: Optional[Type[BaseException]],
-        exc_val: Optional[BaseException],
-        exc_tb: Optional[TracebackType],
-    ) -> Optional[bool]:
-        assert self._state in (_State.ENTERED, _State.EXPIRING)
-    
-        if self._timeout_handler is not None:
-            self._timeout_handler.cancel()
-            self._timeout_handler = None
-    
-        if self._state is _State.EXPIRING:
-            self._state = _State.EXPIRED
-    
-            if self._task.uncancel() <= self._cancelling and exc_type is not None:
-                # Since there are no new cancel requests, we're
-                # handling this.
-                if issubclass(exc_type, exceptions.CancelledError):
->                   raise TimeoutError from exc_val
-E                   TimeoutError
-
-/usr/lib/python3.13/asyncio/timeouts.py:116: TimeoutError
------------------------------ Captured stderr call -----------------------------
-2025-01-07 20:33:44,685 - distributed.http.proxy - INFO - To route to workers diagnostics web server please install jupyter-server-proxy: python -m pip install jupyter-server-proxy
-2025-01-07 20:33:44,687 - distributed.scheduler - INFO - State start
-2025-01-07 20:33:44,700 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:42073
-2025-01-07 20:33:44,700 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:40259/status
-2025-01-07 20:33:44,705 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:33:44,791 - distributed.nanny - INFO -         Start Nanny at: 'tls://127.0.0.1:39309'
-2025-01-07 20:33:44,885 - distributed.nanny - INFO -         Start Nanny at: 'tls://127.0.0.1:39059'
-2025-01-07 20:33:47,270 - distributed.worker - INFO -       Start worker at:      tls://127.0.0.1:37991
-2025-01-07 20:33:47,270 - distributed.worker - INFO -          Listening to:      tls://127.0.0.1:37991
-2025-01-07 20:33:47,270 - distributed.worker - INFO -           Worker name:                          0
-2025-01-07 20:33:47,270 - distributed.worker - INFO -          dashboard at:            127.0.0.1:43431
-2025-01-07 20:33:47,270 - distributed.worker - INFO - Waiting to connect to:      tls://127.0.0.1:42073
-2025-01-07 20:33:47,270 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:33:47,270 - distributed.worker - INFO -               Threads:                          1
-2025-01-07 20:33:47,270 - distributed.worker - INFO -                Memory:                  58.76 GiB
-2025-01-07 20:33:47,270 - distributed.worker - INFO -       Local Directory: /tmp/dask-scratch-space/worker-0wuchnrg
-2025-01-07 20:33:47,270 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:33:47,699 - distributed.worker - INFO -       Start worker at:      tls://127.0.0.1:35157
-2025-01-07 20:33:47,699 - distributed.worker - INFO -          Listening to:      tls://127.0.0.1:35157
-2025-01-07 20:33:47,699 - distributed.worker - INFO -           Worker name:                          1
-2025-01-07 20:33:47,699 - distributed.worker - INFO -          dashboard at:            127.0.0.1:34913
-2025-01-07 20:33:47,699 - distributed.worker - INFO - Waiting to connect to:      tls://127.0.0.1:42073
-2025-01-07 20:33:47,699 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:33:47,699 - distributed.worker - INFO -               Threads:                          2
-2025-01-07 20:33:47,699 - distributed.worker - INFO -                Memory:                  58.76 GiB
-2025-01-07 20:33:47,699 - distributed.worker - INFO -       Local Directory: /tmp/dask-scratch-space/worker-xotwq2cy
-2025-01-07 20:33:47,700 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:33:48,221 - distributed.scheduler - INFO - Register worker addr: tls://127.0.0.1:37991 name: 0
-2025-01-07 20:33:49,445 - distributed.worker - INFO - Starting Worker plugin shuffle
-2025-01-07 20:33:49,446 - distributed.worker - INFO -         Registered to:      tls://127.0.0.1:42073
-2025-01-07 20:33:49,446 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:33:49,444 - distributed.scheduler - INFO - Starting worker compute stream, tls://127.0.0.1:37991
-2025-01-07 20:33:49,457 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:46394
-2025-01-07 20:33:49,458 - distributed.scheduler - INFO - Register worker addr: tls://127.0.0.1:35157 name: 1
-2025-01-07 20:33:49,459 - distributed.scheduler - INFO - Starting worker compute stream, tls://127.0.0.1:35157
-2025-01-07 20:33:49,459 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:46406
-2025-01-07 20:33:49,461 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:42073
-2025-01-07 20:33:49,465 - distributed.worker - INFO - Starting Worker plugin shuffle
-2025-01-07 20:33:49,466 - distributed.worker - INFO -         Registered to:      tls://127.0.0.1:42073
-2025-01-07 20:33:49,466 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:33:49,470 - distributed.core - INFO - Connection to tls://127.0.0.1:46406 has been closed.
-2025-01-07 20:33:49,471 - distributed.scheduler - INFO - Remove worker addr: tls://127.0.0.1:35157 name: 1 (stimulus_id='handle-worker-cleanup-1736325229.470986')
-2025-01-07 20:33:49,481 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:42073
-2025-01-07 20:33:49,481 - distributed.core - INFO - Connection to tls://127.0.0.1:42073 has been closed.
-2025-01-07 20:33:49,482 - distributed.worker - INFO - Stopping worker at tls://127.0.0.1:35157. Reason: worker-handle-scheduler-connection-broken
-2025-01-07 20:33:49,521 - distributed.nanny - INFO - Closing Nanny gracefully at 'tls://127.0.0.1:39059'. Reason: worker-handle-scheduler-connection-broken
-2025-01-07 20:33:49,522 - distributed.worker - INFO - Removing Worker plugin shuffle
-2025-01-07 20:33:49,545 - distributed.nanny - INFO - Worker closed
-2025-01-07 20:33:52,901 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39059'. Reason: nanny-close-gracefully
-2025-01-07 20:33:52,902 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39059' closed.
-2025-01-07 20:34:19,521 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39309'. Reason: nanny-close
-2025-01-07 20:34:19,521 - distributed.nanny - INFO - Nanny asking worker to close. Reason: nanny-close
-2025-01-07 20:34:19,529 - distributed.worker - INFO - Stopping worker at tls://127.0.0.1:37991. Reason: nanny-close
-2025-01-07 20:34:19,529 - distributed.worker - INFO - Removing Worker plugin shuffle
-2025-01-07 20:34:19,531 - distributed.core - INFO - Connection to tls://127.0.0.1:42073 has been closed.
-2025-01-07 20:34:19,532 - distributed.core - INFO - Received 'close-stream' from tls://127.0.0.1:46394; closing.
-2025-01-07 20:34:19,533 - distributed.scheduler - INFO - Remove worker addr: tls://127.0.0.1:37991 name: 0 (stimulus_id='handle-worker-cleanup-1736325259.533298')
-2025-01-07 20:34:19,533 - distributed.scheduler - INFO - Lost all workers
-2025-01-07 20:34:19,541 - distributed.nanny - INFO - Worker closed
-2025-01-07 20:34:20,325 - distributed.nanny - WARNING - Worker process still alive after 0.8 seconds, killing
-2025-01-07 20:34:20,337 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39309' closed.
-2025-01-07 20:34:20,338 - distributed.scheduler - INFO - Closing scheduler. Reason: unknown
-2025-01-07 20:34:20,338 - distributed.scheduler - INFO - Scheduler closing all comms
-2025-01-07 20:34:20,338 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Cluster creation timeout; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 806, in start_cluster
-    raise TimeoutError("Cluster creation timeout")
-TimeoutError: Cluster creation timeout
-2025-01-07 20:34:21,346 - distributed.scheduler - INFO - State start
-2025-01-07 20:34:21,358 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:38277
-2025-01-07 20:34:21,359 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:41045/status
-2025-01-07 20:34:21,359 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:34:21,401 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45661'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:21,401 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45661' closed.
-2025-01-07 20:34:21,401 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41247'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:21,401 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41247' closed.
-2025-01-07 20:34:21,402 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:38277': TLS handshake failed with remote 'tls://127.0.0.1:38838': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:21,402 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:38277': TLS handshake failed with remote 'tls://127.0.0.1:38840': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:21,402 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:34:22,418 - distributed.scheduler - INFO - State start
-2025-01-07 20:34:22,431 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:33753
-2025-01-07 20:34:22,431 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:38821/status
-2025-01-07 20:34:22,431 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:34:22,462 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:40547'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:22,462 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:40547' closed.
-2025-01-07 20:34:22,462 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:46491'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:22,463 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:46491' closed.
-2025-01-07 20:34:22,463 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33753': TLS handshake failed with remote 'tls://127.0.0.1:58092': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:22,463 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33753': TLS handshake failed with remote 'tls://127.0.0.1:58102': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:22,465 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:34:23,470 - distributed.scheduler - INFO - State start
-2025-01-07 20:34:23,486 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:39763
-2025-01-07 20:34:23,486 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:39695/status
-2025-01-07 20:34:23,486 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:34:23,524 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33609'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:23,524 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33609' closed.
-2025-01-07 20:34:23,524 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33587'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:23,524 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33587' closed.
-2025-01-07 20:34:23,541 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39763': TLS handshake failed with remote 'tls://127.0.0.1:57718': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:23,541 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39763': TLS handshake failed with remote 'tls://127.0.0.1:57722': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:23,541 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:34:24,550 - distributed.scheduler - INFO - State start
-2025-01-07 20:34:24,555 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:40585
-2025-01-07 20:34:24,555 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:43369/status
-2025-01-07 20:34:24,555 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:34:24,569 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33631'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:24,569 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33631' closed.
-2025-01-07 20:34:24,569 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:42137'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:24,569 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:42137' closed.
-2025-01-07 20:34:24,570 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:40585': TLS handshake failed with remote 'tls://127.0.0.1:49596': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:24,570 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:40585': TLS handshake failed with remote 'tls://127.0.0.1:49612': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:24,570 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:34:25,578 - distributed.scheduler - INFO - State start
-2025-01-07 20:34:25,582 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:37129
-2025-01-07 20:34:25,583 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:34225/status
-2025-01-07 20:34:25,583 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:34:25,601 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:42143'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:25,601 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:42143' closed.
-2025-01-07 20:34:25,601 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39489'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:25,601 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39489' closed.
-2025-01-07 20:34:25,602 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37129': TLS handshake failed with remote 'tls://127.0.0.1:34614': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:25,602 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37129': TLS handshake failed with remote 'tls://127.0.0.1:34630': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:25,602 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:34:26,610 - distributed.scheduler - INFO - State start
-2025-01-07 20:34:26,614 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:44867
-2025-01-07 20:34:26,614 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:39415/status
-2025-01-07 20:34:26,614 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:34:26,637 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45457'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:26,637 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45457' closed.
-2025-01-07 20:34:26,637 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45273'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:26,637 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45273' closed.
-2025-01-07 20:34:26,638 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44867': TLS handshake failed with remote 'tls://127.0.0.1:51734': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:26,638 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44867': TLS handshake failed with remote 'tls://127.0.0.1:51748': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:26,638 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:34:27,646 - distributed.scheduler - INFO - State start
-2025-01-07 20:34:27,650 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:33531
-2025-01-07 20:34:27,650 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:37057/status
-2025-01-07 20:34:27,650 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:34:27,680 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:43065'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:27,681 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:43065' closed.
-2025-01-07 20:34:27,681 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:35719'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:27,681 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:35719' closed.
-2025-01-07 20:34:27,681 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33531': TLS handshake failed with remote 'tls://127.0.0.1:38870': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:27,681 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33531': TLS handshake failed with remote 'tls://127.0.0.1:38876': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:27,682 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:34:28,687 - distributed.scheduler - INFO - State start
-2025-01-07 20:34:28,694 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:43579
-2025-01-07 20:34:28,695 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:35751/status
-2025-01-07 20:34:28,695 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:34:28,720 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33769'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:28,720 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33769' closed.
-2025-01-07 20:34:28,720 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37209'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:28,720 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37209' closed.
-2025-01-07 20:34:28,725 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43579': TLS handshake failed with remote 'tls://127.0.0.1:40008': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:28,725 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43579': TLS handshake failed with remote 'tls://127.0.0.1:40022': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:28,725 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:34:29,730 - distributed.scheduler - INFO - State start
-2025-01-07 20:34:29,738 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:45717
-2025-01-07 20:34:29,738 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:36843/status
-2025-01-07 20:34:29,738 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:34:29,760 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:43455'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:29,760 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:43455' closed.
-2025-01-07 20:34:29,760 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:40051'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:29,760 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:40051' closed.
-2025-01-07 20:34:29,761 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:45717': TLS handshake failed with remote 'tls://127.0.0.1:53226': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:29,761 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:45717': TLS handshake failed with remote 'tls://127.0.0.1:53240': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:29,761 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:34:30,774 - distributed.scheduler - INFO - State start
-2025-01-07 20:34:30,778 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:32883
-2025-01-07 20:34:30,778 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:43041/status
-2025-01-07 20:34:30,778 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:34:30,787 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41759'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:30,787 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41759' closed.
-2025-01-07 20:34:30,787 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:32793'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:30,787 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:32793' closed.
-2025-01-07 20:34:30,788 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:32883': TLS handshake failed with remote 'tls://127.0.0.1:57298': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:30,788 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:32883': TLS handshake failed with remote 'tls://127.0.0.1:57312': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:30,788 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:34:31,794 - distributed.scheduler - INFO - State start
-2025-01-07 20:34:31,797 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:43347
-2025-01-07 20:34:31,797 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:38023/status
-2025-01-07 20:34:31,797 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:34:31,834 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33975'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:31,834 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33975' closed.
-2025-01-07 20:34:31,834 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37347'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:31,834 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37347' closed.
-2025-01-07 20:34:31,835 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43347': TLS handshake failed with remote 'tls://127.0.0.1:54294': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:31,835 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43347': TLS handshake failed with remote 'tls://127.0.0.1:54310': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:31,835 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:34:32,854 - distributed.scheduler - INFO - State start
-2025-01-07 20:34:32,860 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:42965
-2025-01-07 20:34:32,860 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:40249/status
-2025-01-07 20:34:32,860 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:34:32,887 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:46785'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:32,887 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:46785' closed.
-2025-01-07 20:34:32,887 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45719'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:32,887 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45719' closed.
-2025-01-07 20:34:32,888 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:42965': TLS handshake failed with remote 'tls://127.0.0.1:60064': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:32,888 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:42965': TLS handshake failed with remote 'tls://127.0.0.1:60070': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:32,888 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:34:33,918 - distributed.scheduler - INFO - State start
-2025-01-07 20:34:33,921 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:36597
-2025-01-07 20:34:33,921 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:41211/status
-2025-01-07 20:34:33,922 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:34:33,956 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39907'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:33,956 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39907' closed.
-2025-01-07 20:34:33,956 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39041'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:33,956 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39041' closed.
-2025-01-07 20:34:33,965 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:36597': TLS handshake failed with remote 'tls://127.0.0.1:49808': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:33,966 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:36597': TLS handshake failed with remote 'tls://127.0.0.1:49820': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:33,966 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:34:34,974 - distributed.scheduler - INFO - State start
-2025-01-07 20:34:34,978 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:44573
-2025-01-07 20:34:34,978 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:36249/status
-2025-01-07 20:34:34,979 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:34:35,013 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33999'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:35,013 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33999' closed.
-2025-01-07 20:34:35,013 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:44399'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:35,013 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:44399' closed.
-2025-01-07 20:34:35,014 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44573': TLS handshake failed with remote 'tls://127.0.0.1:55158': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:35,014 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44573': TLS handshake failed with remote 'tls://127.0.0.1:55166': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:35,014 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:34:36,018 - distributed.scheduler - INFO - State start
-2025-01-07 20:34:36,022 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:40053
-2025-01-07 20:34:36,022 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:33897/status
-2025-01-07 20:34:36,022 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:34:36,056 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:46119'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:36,065 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:46119' closed.
-2025-01-07 20:34:36,065 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33525'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:36,065 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33525' closed.
-2025-01-07 20:34:36,067 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:40053': TLS handshake failed with remote 'tls://127.0.0.1:51338': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:36,067 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:40053': TLS handshake failed with remote 'tls://127.0.0.1:51348': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:36,067 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:34:37,087 - distributed.scheduler - INFO - State start
-2025-01-07 20:34:37,095 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:36129
-2025-01-07 20:34:37,096 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:35789/status
-2025-01-07 20:34:37,109 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:34:37,136 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33609'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:37,136 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33609' closed.
-2025-01-07 20:34:37,136 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:38715'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:37,136 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:38715' closed.
-2025-01-07 20:34:37,146 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:36129': TLS handshake failed with remote 'tls://127.0.0.1:47964': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:37,146 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:36129': TLS handshake failed with remote 'tls://127.0.0.1:47966': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:37,146 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:34:38,155 - distributed.scheduler - INFO - State start
-2025-01-07 20:34:38,160 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:38905
-2025-01-07 20:34:38,167 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:35523/status
-2025-01-07 20:34:38,169 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:34:38,208 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:46283'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:38,208 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:46283' closed.
-2025-01-07 20:34:38,209 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41513'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:38,209 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41513' closed.
-2025-01-07 20:34:38,211 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:38905': TLS handshake failed with remote 'tls://127.0.0.1:48674': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:38,211 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:38905': TLS handshake failed with remote 'tls://127.0.0.1:48690': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:38,211 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:34:39,227 - distributed.scheduler - INFO - State start
-2025-01-07 20:34:39,244 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:40801
-2025-01-07 20:34:39,244 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:46729/status
-2025-01-07 20:34:39,244 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:34:39,303 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45629'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:39,303 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45629' closed.
-2025-01-07 20:34:39,303 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45969'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:39,303 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45969' closed.
-2025-01-07 20:34:39,318 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:40801': TLS handshake failed with remote 'tls://127.0.0.1:57064': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:39,318 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:40801': TLS handshake failed with remote 'tls://127.0.0.1:57072': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:39,318 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:34:40,326 - distributed.scheduler - INFO - State start
-2025-01-07 20:34:40,340 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:41601
-2025-01-07 20:34:40,341 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:37161/status
-2025-01-07 20:34:40,341 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:34:40,403 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45401'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:40,404 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45401' closed.
-2025-01-07 20:34:40,404 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39285'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:40,404 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39285' closed.
-2025-01-07 20:34:40,416 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41601': TLS handshake failed with remote 'tls://127.0.0.1:52658': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:40,416 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41601': TLS handshake failed with remote 'tls://127.0.0.1:52668': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:40,416 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:34:41,422 - distributed.scheduler - INFO - State start
-2025-01-07 20:34:41,435 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:33237
-2025-01-07 20:34:41,435 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:40111/status
-2025-01-07 20:34:41,435 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:34:41,480 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:42623'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:41,480 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:42623' closed.
-2025-01-07 20:34:41,480 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:42679'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:41,493 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:42679' closed.
-2025-01-07 20:34:41,495 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33237': TLS handshake failed with remote 'tls://127.0.0.1:45210': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:41,495 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33237': TLS handshake failed with remote 'tls://127.0.0.1:45218': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:41,496 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:34:42,519 - distributed.scheduler - INFO - State start
-2025-01-07 20:34:42,545 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:42805
-2025-01-07 20:34:42,546 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:45077/status
-2025-01-07 20:34:42,546 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:34:42,603 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:36929'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:42,604 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:36929' closed.
-2025-01-07 20:34:42,604 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:43069'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:42,604 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:43069' closed.
-2025-01-07 20:34:42,614 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:42805': TLS handshake failed with remote 'tls://127.0.0.1:42984': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:42,614 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:42805': TLS handshake failed with remote 'tls://127.0.0.1:42990': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:42,614 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-2025-01-07 20:34:43,633 - distributed.scheduler - INFO - State start
-2025-01-07 20:34:43,650 - distributed.scheduler - INFO -   Scheduler at:     tls://127.0.0.1:37895
-2025-01-07 20:34:43,650 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:45491/status
-2025-01-07 20:34:43,650 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:34:43,704 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39463'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:43,704 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39463' closed.
-2025-01-07 20:34:43,704 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37311'. Reason: failure-to-start-<class 'TimeoutError'>
-2025-01-07 20:34:43,717 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37311' closed.
-2025-01-07 20:34:43,719 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37895': TLS handshake failed with remote 'tls://127.0.0.1:56954': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:43,719 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37895': TLS handshake failed with remote 'tls://127.0.0.1:56960': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029)
-2025-01-07 20:34:43,719 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe
-    comm = await self.rpc.connect(saddr)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect
-    return await self._connect(addr=addr, timeout=timeout)
-           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect
-    comm = await connect(
-           ^^^^^^^^^^^^^^
-    ...<4 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect
-    comm = await wait_for(
-           ^^^^^^^^^^^^^^^
-    ...<2 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for
-    return await fut
-           ^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect
-    stream = await self.client.connect(
-             ^^^^^^^^^^^^^^^^^^^^^^^^^^
-        ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
-        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    )
-    ^
-  File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect
-    af, addr, stream = await connector.start(connect_timeout=timeout)
-                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-asyncio.exceptions.CancelledError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start
-    await wait_for(self.start_unsafe(), timeout=timeout)
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for
-    async with asyncio.timeout(timeout):
-               ~~~~~~~~~~~~~~~^^^^^^^^^
-  File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__
-    raise TimeoutError from exc_val
-TimeoutError
-
-The above exception was the direct cause of the following exception:
-
-Traceback (most recent call last):
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory
-    s, ws = await start_cluster(
-            ^^^^^^^^^^^^^^^^^^^^
-    ...<9 lines>...
-    )
-    ^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster
-    await asyncio.gather(*workers)
-  File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable
-    return await awaitable
-           ^^^^^^^^^^^^^^^
-  File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start
-    raise asyncio.TimeoutError(
-        f"{type(self).__name__} start timed out after {timeout}s."
-    ) from exc
-TimeoutError: Nanny start timed out after 0s.
-____________________________ test_log_remove_worker ____________________________
-
-c = <Client: No scheduler connected>
-s = <Scheduler 'tcp://127.0.0.1:37529', workers: 0, cores: 0, tasks: 0>
-a = <Worker 'tcp://127.0.0.1:36441', name: 0, status: closed, stored: 2, running: 1/1, ready: 0, comm: 0, waiting: 0>
-b = <Worker 'tcp://127.0.0.1:35141', name: 1, status: closed, stored: 2, running: 1/2, ready: 0, comm: 0, waiting: 0>
-
-    @gen_cluster(client=True)
-    async def test_log_remove_worker(c, s, a, b):
-        # Computed task
-        x = c.submit(inc, 1, key="x", workers=a.address)
-        await x
-        ev = Event()
-        # Processing task
-        y = c.submit(
-            lambda ev: ev.wait(), ev, key="y", workers=a.address, allow_other_workers=True
-        )
-        await wait_for_state("y", "processing", s)
-        # Scattered task
-        z = await c.scatter({"z": 3}, workers=a.address)
-    
-        s._broker.truncate()
-    
-        with captured_logger("distributed.scheduler", level=logging.INFO) as log:
-            # Successful graceful shutdown
-            await s.retire_workers([a.address], stimulus_id="graceful")
-            # Refuse to retire gracefully as there's nowhere to put x and z
-            await s.retire_workers([b.address], stimulus_id="graceful_abort")
-            await asyncio.sleep(0.2)
-            # Ungraceful shutdown
-            await s.remove_worker(b.address, stimulus_id="ungraceful")
-            await asyncio.sleep(0.2)
-        await ev.set()
-    
-        assert log.getvalue().splitlines() == [
-            # Successful graceful
-            f"Retire worker addresses (stimulus_id='graceful') ['{a.address}']",
-            f"Remove worker addr: {a.address} name: {a.name} (stimulus_id='graceful')",
-            f"Retired worker '{a.address}' (stimulus_id='graceful')",
-            # Aborted graceful
-            f"Retire worker addresses (stimulus_id='graceful_abort') ['{b.address}']",
-            f"Could not retire worker '{b.address}': unique data could not be "
-            "moved to any other worker (stimulus_id='graceful_abort')",
-            # Ungraceful
-            f"Remove worker addr: {b.address} name: {b.name} (stimulus_id='ungraceful')",
-            f"Removing worker '{b.address}' caused the cluster to lose already "
-            "computed task(s), which will be recomputed elsewhere: {'x'} "
-            "(stimulus_id='ungraceful')",
-            f"Removing worker '{b.address}' caused the cluster to lose scattered "
-            "data, which can't be recovered: {'z'} (stimulus_id='ungraceful')",
-            "Lost all workers",
-        ]
-    
-        events = {topic: [ev for _, ev in evs] for topic, evs in s.get_events().items()}
-        for evs in events.values():
-            for ev in evs:
-                if ev.get("action", None) == "retire-workers":
-                    for k in ("retired", "could-not-retire"):
-                        ev[k] = {addr: "snip" for addr in ev[k]}
-                if "stimulus_id" in ev:  # Strip timestamp
-                    ev["stimulus_id"] = ev["stimulus_id"].rsplit("-", 1)[0]
-    
->       assert events == {
-            a.address: [
-                {
-                    "action": "worker-status-change",
-                    "prev-status": "running",
-                    "status": "closing_gracefully",
-                    "stimulus_id": "graceful",
-                },
-                {
-                    "action": "remove-worker",
-                    "lost-computed-tasks": set(),
-                    "lost-scattered-tasks": set(),
-                    "processing-tasks": {"y"},
-                    "expected": True,
-                    "stimulus_id": "graceful",
-                },
-                {"action": "retired", "stimulus_id": "graceful"},
-            ],
-            b.address: [
-                {
-                    "action": "worker-status-change",
-                    "prev-status": "running",
-                    "status": "closing_gracefully",
-                    "stimulus_id": "graceful_abort",
-                },
-                {"action": "could-not-retire", "stimulus_id": "graceful_abort"},
-                {
-                    "action": "worker-status-change",
-                    "prev-status": "closing_gracefully",
-                    "status": "running",
-                    "stimulus_id": "worker-status-change",
-                },
-                {
-                    "action": "remove-worker",
-                    "lost-computed-tasks": {"x"},
-                    "lost-scattered-tasks": {"z"},
-                    "processing-tasks": {"y"},
-                    "expected": False,
-                    "stimulus_id": "ungraceful",
-                },
-                {"action": "closing-worker", "reason": "scheduler-remove-worker"},
-            ],
-            "all": [
-                {
-                    "action": "remove-worker",
-                    "lost-computed-tasks": set(),
-                    "lost-scattered-tasks": set(),
-                    "processing-tasks": {"y"},
-                    "expected": True,
-                    "stimulus_id": "graceful",
-                    "worker": a.address,
-                },
-                {
-                    "action": "retire-workers",
-                    "stimulus_id": "graceful",
-                    "retired": {a.address: "snip"},
-                    "could-not-retire": {},
-                },
-                {
-                    "action": "retire-workers",
-                    "stimulus_id": "graceful_abort",
-                    "retired": {},
-                    "could-not-retire": {b.address: "snip"},
-                },
-                {
-                    "action": "remove-worker",
-                    "lost-computed-tasks": {"x"},
-                    "lost-scattered-tasks": {"z"},
-                    "processing-tasks": {"y"},
-                    "expected": False,
-                    "stimulus_id": "ungraceful",
-                    "worker": b.address,
-                },
-            ],
-            "worker-get-client": [{"client": c.id, "timeout": 5, "worker": b.address}],
-        }
-E       AssertionError: assert {'tcp://127.0.0.1:36441': [{'action': 'worker-status-change', 'prev-status': 'running', 'status': 'closing_gracefully', 'stimulus_id': 'graceful'}, {'action': 'remove-worker', 'processing-tasks': {'y'}, 'lost-computed-tasks': set(), 'lost-scattered-tasks': set(), 'stimulus_id': 'graceful', 'expected': True}, {'action': 'retired', 'stimulus_id': 'graceful'}], 'all': [{'action': 'remove-worker', 'processing-tasks': {'y'}, 'lost-computed-tasks': set(), 'lost-scattered-tasks': set(), 'stimulus_id': 'graceful', 'expected': True, 'worker': 'tcp://127.0.0.1:36441'}, {'action': 'retire-workers', 'retired': {'tcp://127.0.0.1:36441': 'snip'}, 'could-not-retire': {}, 'stimulus_id': 'graceful'}, {'action': 'retire-workers', 'retired': {}, 'could-not-retire': {'tcp://127.0.0.1:35141': 'snip'}, 'stimulus_id': 'graceful_abort'}, {'action': 'remove-worker', 'processing-tasks': {'y'}, 'lost-computed-tasks': {'x'}, 'lost-scattered-tasks': {'z'}, 'stimulus_id': 'ungraceful', 'expected': False, 'worker': 'tcp://127.0.0.1:35141'}], 'tcp://127.0.0.1:35141': [{'action': 'worker-status-change', 'prev-status': 'running', 'status': 'closing_gracefully', 'stimulus_id': 'graceful_abort'}, {'action': 'could-not-retire', 'stimulus_id': 'graceful_abort'}, {'action': 'worker-status-change', 'prev-status': 'closing_gracefully', 'status': 'running', 'stimulus_id': 'worker-status-change'}, {'action': 'remove-worker', 'processing-tasks': {'y'}, 'lost-computed-tasks': {'x'}, 'lost-scattered-tasks': {'z'}, 'stimulus_id': 'ungraceful', 'expected': False}, {'action': 'closing-worker', 'reason': 'scheduler-remove-worker'}], 'worker-get-client': [{'client': 'Client-6a80f264-cd9b-11ef-b3a3-214f47e67d60', 'timeout': 5, 'worker': 'tcp://127.0.0.1:36441'}, {'client': 'Client-6a80f264-cd9b-11ef-b3a3-214f47e67d60', 'timeout': 5, 'worker': 'tcp://127.0.0.1:35141'}]} == {'tcp://127.0.0.1:36441': [{'action': 'worker-status-change', 'prev-status': 'running', 'status': 'closing_gracefully', 'stimulus_id': 'graceful'}, {'action': 'remove-worker', 'lost-computed-tasks': set(), 'lost-scattered-tasks': set(), 'processing-tasks': {'y'}, 'expected': True, 'stimulus_id': 'graceful'}, {'action': 'retired', 'stimulus_id': 'graceful'}], 'tcp://127.0.0.1:35141': [{'action': 'worker-status-change', 'prev-status': 'running', 'status': 'closing_gracefully', 'stimulus_id': 'graceful_abort'}, {'action': 'could-not-retire', 'stimulus_id': 'graceful_abort'}, {'action': 'worker-status-change', 'prev-status': 'closing_gracefully', 'status': 'running', 'stimulus_id': 'worker-status-change'}, {'action': 'remove-worker', 'lost-computed-tasks': {'x'}, 'lost-scattered-tasks': {'z'}, 'processing-tasks': {'y'}, 'expected': False, 'stimulus_id': 'ungraceful'}, {'action': 'closing-worker', 'reason': 'scheduler-remove-worker'}], 'all': [{'action': 'remove-worker', 'lost-computed-tasks': set(), 'lost-scattered-tasks': set(), 'processing-tasks': {'y'}, 'expected': True, 'stimulus_id': 'graceful', 'worker': 'tcp://127.0.0.1:36441'}, {'action': 'retire-workers', 'stimulus_id': 'graceful', 'retired': {'tcp://127.0.0.1:36441': 'snip'}, 'could-not-retire': {}}, {'action': 'retire-workers', 'stimulus_id': 'graceful_abort', 'retired': {}, 'could-not-retire': {'tcp://127.0.0.1:35141': 'snip'}}, {'action': 'remove-worker', 'lost-computed-tasks': {'x'}, 'lost-scattered-tasks': {'z'}, 'processing-tasks': {'y'}, 'expected': False, 'stimulus_id': 'ungraceful', 'worker': 'tcp://127.0.0.1:35141'}], 'worker-get-client': [{'client': 'Client-6a80f264-cd9b-11ef-b3a3-214f47e67d60', 'timeout': 5, 'worker': 'tcp://127.0.0.1:35141'}]}
-E         
-E         Common items:
-E         {'all': [{'action': 'remove-worker',
-E                   'expected': True,
-E                   'lost-computed-tasks': set(),
-E                   'lost-scattered-tasks': set(),
-E                   'processing-tasks': {'y'},
-E                   'stimulus_id': 'graceful',
-E                   'worker': 'tcp://127.0.0.1:36441'},
-E                  {'action': 'retire-workers',
-E                   'could-not-retire': {},
-E                   'retired': {'tcp://127.0.0.1:36441': 'snip'},
-E                   'stimulus_id': 'graceful'},
-E                  {'action': 'retire-workers',
-E                   'could-not-retire': {'tcp://127.0.0.1:35141': 'snip'},
-E                   'retired': {},
-E                   'stimulus_id': 'graceful_abort'},
-E                  {'action': 'remove-worker',
-E                   'expected': False,
-E                   'lost-computed-tasks': {'x'},
-E                   'lost-scattered-tasks': {'z'},
-E                   'processing-tasks': {'y'},
-E                   'stimulus_id': 'ungraceful',
-E                   'worker': 'tcp://127.0.0.1:35141'}],
-E          'tcp://127.0.0.1:35141': [{'action': 'worker-status-change',
-E                                     'prev-status': 'running',
-E                                     'status': 'closing_gracefully',
-E                                     'stimulus_id': 'graceful_abort'},
-E                                    {'action': 'could-not-retire',
-E                                     'stimulus_id': 'graceful_abort'},
-E                                    {'action': 'worker-status-change',
-E                                     'prev-status': 'closing_gracefully',
-E                                     'status': 'running',
-E                                     'stimulus_id': 'worker-status-change'},
-E                                    {'action': 'remove-worker',
-E                                     'expected': False,
-E                                     'lost-computed-tasks': {'x'},
-E                                     'lost-scattered-tasks': {'z'},
-E                                     'processing-tasks': {'y'},
-E                                     'stimulus_id': 'ungraceful'},
-E                                    {'action': 'closing-worker',
-E                                     'reason': 'scheduler-remove-worker'}],
-E          'tcp://127.0.0.1:36441': [{'action': 'worker-status-change',
-E                                     'prev-status': 'running',
-E                                     'status': 'closing_gracefully',
-E                                     'stimulus_id': 'graceful'},
-E                                    {'action': 'remove-worker',
-E                                     'expected': True,
-E                                     'lost-computed-tasks': set(),
-E                                     'lost-scattered-tasks': set(),
-E                                     'processing-tasks': {'y'},
-E                                     'stimulus_id': 'graceful'},
-E                                    {'action': 'retired', 'stimulus_id': 'graceful'}]}
-E         Differing items:
-E         {'worker-get-client': [{'client': 'Client-6a80f264-cd9b-11ef-b3a3-214f47e67d60', 'timeout': 5, 'worker': 'tcp://127.0.0.1:36441'}, {'client': 'Client-6a80f264-cd9b-11ef-b3a3-214f47e67d60', 'timeout': 5, 'worker': 'tcp://127.0.0.1:35141'}]} != {'worker-get-client': [{'client': 'Client-6a80f264-cd9b-11ef-b3a3-214f47e67d60', 'timeout': 5, 'worker': 'tcp://127.0.0.1:35141'}]}
-E         
-E         Full diff:
-E           {
-E               'all': [
-E                   {
-E                       'action': 'remove-worker',
-E                       'expected': True,
-E                       'lost-computed-tasks': set(),
-E                       'lost-scattered-tasks': set(),
-E                       'processing-tasks': {
-E                           'y',
-E                       },
-E                       'stimulus_id': 'graceful',
-E                       'worker': 'tcp://127.0.0.1:36441',
-E                   },
-E                   {
-E                       'action': 'retire-workers',
-E                       'could-not-retire': {},
-E                       'retired': {
-E                           'tcp://127.0.0.1:36441': 'snip',
-E                       },
-E                       'stimulus_id': 'graceful',
-E                   },
-E                   {
-E                       'action': 'retire-workers',
-E                       'could-not-retire': {
-E                           'tcp://127.0.0.1:35141': 'snip',
-E                       },
-E                       'retired': {},
-E                       'stimulus_id': 'graceful_abort',
-E                   },
-E                   {
-E                       'action': 'remove-worker',
-E                       'expected': False,
-E                       'lost-computed-tasks': {
-E                           'x',
-E                       },
-E                       'lost-scattered-tasks': {
-E                           'z',
-E                       },
-E                       'processing-tasks': {
-E                           'y',
-E                       },
-E                       'stimulus_id': 'ungraceful',
-E                       'worker': 'tcp://127.0.0.1:35141',
-E                   },
-E               ],
-E               'tcp://127.0.0.1:35141': [
-E                   {
-E                       'action': 'worker-status-change',
-E                       'prev-status': 'running',
-E                       'status': 'closing_gracefully',
-E                       'stimulus_id': 'graceful_abort',
-E                   },
-E                   {
-E                       'action': 'could-not-retire',
-E                       'stimulus_id': 'graceful_abort',
-E                   },
-E                   {
-E                       'action': 'worker-status-change',
-E                       'prev-status': 'closing_gracefully',
-E                       'status': 'running',
-E                       'stimulus_id': 'worker-status-change',
-E                   },
-E                   {
-E                       'action': 'remove-worker',
-E                       'expected': False,
-E                       'lost-computed-tasks': {
-E                           'x',
-E                       },
-E                       'lost-scattered-tasks': {
-E                           'z',
-E                       },
-E                       'processing-tasks': {
-E                           'y',
-E                       },
-E                       'stimulus_id': 'ungraceful',
-E                   },
-E                   {
-E                       'action': 'closing-worker',
-E                       'reason': 'scheduler-remove-worker',
-E                   },
-E               ],
-E               'tcp://127.0.0.1:36441': [
-E                   {
-E                       'action': 'worker-status-change',
-E                       'prev-status': 'running',
-E                       'status': 'closing_gracefully',
-E                       'stimulus_id': 'graceful',
-E                   },
-E                   {
-E                       'action': 'remove-worker',
-E                       'expected': True,
-E                       'lost-computed-tasks': set(),
-E                       'lost-scattered-tasks': set(),
-E                       'processing-tasks': {
-E                           'y',
-E                       },
-E                       'stimulus_id': 'graceful',
-E                   },
-E                   {
-E                       'action': 'retired',
-E                       'stimulus_id': 'graceful',
-E                   },
-E               ],
-E               'worker-get-client': [
-E                   {
-E                       'client': 'Client-6a80f264-cd9b-11ef-b3a3-214f47e67d60',
-E                       'timeout': 5,
-E         +             'worker': 'tcp://127.0.0.1:36441',
-E         +         },
-E         +         {
-E         +             'client': 'Client-6a80f264-cd9b-11ef-b3a3-214f47e67d60',
-E         +             'timeout': 5,
-E                       'worker': 'tcp://127.0.0.1:35141',
-E                   },
-E               ],
-E           }
-
-distributed/tests/test_worker.py:3016: AssertionError
------------------------------ Captured stderr call -----------------------------
-2025-01-07 20:34:46,165 - distributed.scheduler - INFO - State start
-2025-01-07 20:34:46,169 - distributed.scheduler - INFO -   Scheduler at:     tcp://127.0.0.1:37529
-2025-01-07 20:34:46,169 - distributed.scheduler - INFO -   dashboard at:  http://127.0.0.1:34625/status
-2025-01-07 20:34:46,169 - distributed.scheduler - INFO - Registering Worker plugin shuffle
-2025-01-07 20:34:46,198 - distributed.worker - INFO -       Start worker at:      tcp://127.0.0.1:36441
-2025-01-07 20:34:46,198 - distributed.worker - INFO -          Listening to:      tcp://127.0.0.1:36441
-2025-01-07 20:34:46,198 - distributed.worker - INFO -           Worker name:                          0
-2025-01-07 20:34:46,198 - distributed.worker - INFO -          dashboard at:            127.0.0.1:34245
-2025-01-07 20:34:46,198 - distributed.worker - INFO - Waiting to connect to:      tcp://127.0.0.1:37529
-2025-01-07 20:34:46,198 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:34:46,198 - distributed.worker - INFO -               Threads:                          1
-2025-01-07 20:34:46,198 - distributed.worker - INFO -                Memory:                  58.76 GiB
-2025-01-07 20:34:46,198 - distributed.worker - INFO -       Local Directory: /tmp/dask-scratch-space/worker-oki27w81
-2025-01-07 20:34:46,199 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:34:46,200 - distributed.worker - INFO -       Start worker at:      tcp://127.0.0.1:35141
-2025-01-07 20:34:46,200 - distributed.worker - INFO -          Listening to:      tcp://127.0.0.1:35141
-2025-01-07 20:34:46,200 - distributed.worker - INFO -           Worker name:                          1
-2025-01-07 20:34:46,200 - distributed.worker - INFO -          dashboard at:            127.0.0.1:41687
-2025-01-07 20:34:46,200 - distributed.worker - INFO - Waiting to connect to:      tcp://127.0.0.1:37529
-2025-01-07 20:34:46,200 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:34:46,205 - distributed.worker - INFO -               Threads:                          2
-2025-01-07 20:34:46,205 - distributed.worker - INFO -                Memory:                  58.76 GiB
-2025-01-07 20:34:46,205 - distributed.worker - INFO -       Local Directory: /tmp/dask-scratch-space/worker-x3h7f3vr
-2025-01-07 20:34:46,205 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:34:46,218 - distributed.scheduler - INFO - Register worker addr: tcp://127.0.0.1:36441 name: 0
-2025-01-07 20:34:46,219 - distributed.scheduler - INFO - Starting worker compute stream, tcp://127.0.0.1:36441
-2025-01-07 20:34:46,219 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:45744
-2025-01-07 20:34:46,219 - distributed.scheduler - INFO - Register worker addr: tcp://127.0.0.1:35141 name: 1
-2025-01-07 20:34:46,220 - distributed.scheduler - INFO - Starting worker compute stream, tcp://127.0.0.1:35141
-2025-01-07 20:34:46,220 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:45750
-2025-01-07 20:34:46,221 - distributed.worker - INFO - Starting Worker plugin shuffle
-2025-01-07 20:34:46,222 - distributed.worker - INFO - Starting Worker plugin shuffle
-2025-01-07 20:34:46,222 - distributed.worker - INFO -         Registered to:      tcp://127.0.0.1:37529
-2025-01-07 20:34:46,222 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:34:46,222 - distributed.worker - INFO -         Registered to:      tcp://127.0.0.1:37529
-2025-01-07 20:34:46,222 - distributed.worker - INFO - -------------------------------------------------
-2025-01-07 20:34:46,223 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:37529
-2025-01-07 20:34:46,223 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:37529
-2025-01-07 20:34:46,229 - distributed.scheduler - INFO - Receive client connection: Client-6a80f264-cd9b-11ef-b3a3-214f47e67d60
-2025-01-07 20:34:46,231 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:45762
-2025-01-07 20:34:46,310 - distributed.scheduler - INFO - Retire worker addresses (stimulus_id='graceful') ['tcp://127.0.0.1:36441']
-2025-01-07 20:34:46,310 - distributed.active_memory_manager - INFO - Retiring worker tcp://127.0.0.1:36441; 2 keys are being moved away.
-2025-01-07 20:34:46,357 - distributed.scheduler - INFO - Remove worker addr: tcp://127.0.0.1:36441 name: 0 (stimulus_id='graceful')
-2025-01-07 20:34:46,357 - distributed.scheduler - INFO - Retired worker 'tcp://127.0.0.1:36441' (stimulus_id='graceful')
-2025-01-07 20:34:46,358 - distributed.scheduler - INFO - Retire worker addresses (stimulus_id='graceful_abort') ['tcp://127.0.0.1:35141']
-2025-01-07 20:34:46,359 - distributed.active_memory_manager - WARNING - Tried retiring worker tcp://127.0.0.1:35141, but 2 tasks could not be moved as there are no suitable workers to receive them. The worker will not be retired.
-2025-01-07 20:34:46,359 - distributed.scheduler - WARNING - Could not retire worker 'tcp://127.0.0.1:35141': unique data could not be moved to any other worker (stimulus_id='graceful_abort')
-2025-01-07 20:34:46,565 - distributed.scheduler - INFO - Remove worker addr: tcp://127.0.0.1:35141 name: 1 (stimulus_id='ungraceful')
-2025-01-07 20:34:46,565 - distributed.scheduler - WARNING - Removing worker 'tcp://127.0.0.1:35141' caused the cluster to lose already computed task(s), which will be recomputed elsewhere: {'x'} (stimulus_id='ungraceful')
-2025-01-07 20:34:46,565 - distributed.scheduler - ERROR - Removing worker 'tcp://127.0.0.1:35141' caused the cluster to lose scattered data, which can't be recovered: {'z'} (stimulus_id='ungraceful')
-2025-01-07 20:34:46,565 - distributed.scheduler - INFO - Lost all workers
-2025-01-07 20:34:46,566 - distributed.worker - INFO - Stopping worker at tcp://127.0.0.1:35141. Reason: scheduler-remove-worker
-2025-01-07 20:34:46,567 - distributed.worker.state_machine - WARNING - Async instruction for <Task cancelled name="execute('y')" coro=<Worker.execute() done, defined at /build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/worker_state_machine.py:3606>> ended with CancelledError
-2025-01-07 20:34:46,568 - distributed.worker - INFO - Removing Worker plugin shuffle
-2025-01-07 20:34:46,589 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:45750; closing.
-2025-01-07 20:34:46,779 - distributed.scheduler - INFO - Remove client Client-6a80f264-cd9b-11ef-b3a3-214f47e67d60
-2025-01-07 20:34:46,780 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:45762; closing.
-2025-01-07 20:34:46,780 - distributed.scheduler - INFO - Remove client Client-6a80f264-cd9b-11ef-b3a3-214f47e67d60
-2025-01-07 20:34:46,780 - distributed.scheduler - INFO - Close client connection: Client-6a80f264-cd9b-11ef-b3a3-214f47e67d60
-2025-01-07 20:34:46,805 - distributed.worker - INFO - Stopping worker at tcp://127.0.0.1:36441. Reason: worker-close
-2025-01-07 20:34:46,806 - distributed.core - INFO - Connection to tcp://127.0.0.1:37529 has been closed.
-2025-01-07 20:34:46,806 - distributed.worker.state_machine - WARNING - Async instruction for <Task cancelled name="execute('y')" coro=<Worker.execute() done, defined at /build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/worker_state_machine.py:3606>> ended with CancelledError
-2025-01-07 20:34:46,807 - distributed.worker - INFO - Removing Worker plugin shuffle
-2025-01-07 20:34:46,808 - distributed.core - INFO - Connection to tcp://127.0.0.1:37529 has been closed.
-2025-01-07 20:34:46,808 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:45744; closing.
-2025-01-07 20:34:46,824 - distributed.scheduler - INFO - Closing scheduler. Reason: unknown
-2025-01-07 20:34:46,825 - distributed.scheduler - INFO - Scheduler closing all comms
-============================= slowest 20 durations =============================
-60.01s call     distributed/tests/test_tls_functional.py::test_nanny
-0.67s call     distributed/tests/test_worker.py::test_log_remove_worker
-0.00s setup    distributed/tests/test_tls_functional.py::test_nanny
-0.00s teardown distributed/tests/test_tls_functional.py::test_nanny
-0.00s teardown distributed/tests/test_worker.py::test_log_remove_worker
-0.00s setup    distributed/tests/test_worker.py::test_log_remove_worker
-=========================== short test summary info ============================
-FAILED distributed/tests/test_tls_functional.py::test_nanny - TimeoutError
-FAILED distributed/tests/test_worker.py::test_log_remove_worker - AssertionError: assert {'tcp://127.0.0.1:36441': [{'action': 'worker-status-change', 'prev-status': 'running', 'status': 'closing_gracefully', 'stimulus_id': 'graceful'}, {'action': 'remove-worker', 'processing-tasks': {'y'}, 'lost-computed-tasks': set(), 'lost-scattered-tasks': set(), 'stimulus_id': 'graceful', 'expected': True}, {'action': 'retired', 'stimulus_id': 'graceful'}], 'all': [{'action': 'remove-worker', 'processing-tasks': {'y'}, 'lost-computed-tasks': set(), 'lost-scattered-tasks': set(), 'stimulus_id': 'graceful', 'expected': True, 'worker': 'tcp://127.0.0.1:36441'}, {'action': 'retire-workers', 'retired': {'tcp://127.0.0.1:36441': 'snip'}, 'could-not-retire': {}, 'stimulus_id': 'graceful'}, {'action': 'retire-workers', 'retired': {}, 'could-not-retire': {'tcp://127.0.0.1:35141': 'snip'}, 'stimulus_id': 'graceful_abort'}, {'action': 'remove-worker', 'processing-tasks': {'y'}, 'lost-computed-tasks': {'x'}, 'lost-scattered-tasks': {'z'}, 'stimulus_id': 'ungraceful', 'expected': False, 'worker': 'tcp://127.0.0.1:35141'}], 'tcp://127.0.0.1:35141': [{'action': 'worker-status-change', 'prev-status': 'running', 'status': 'closing_gracefully', 'stimulus_id': 'graceful_abort'}, {'action': 'could-not-retire', 'stimulus_id': 'graceful_abort'}, {'action': 'worker-status-change', 'prev-status': 'closing_gracefully', 'status': 'running', 'stimulus_id': 'worker-status-change'}, {'action': 'remove-worker', 'processing-tasks': {'y'}, 'lost-computed-tasks': {'x'}, 'lost-scattered-tasks': {'z'}, 'stimulus_id': 'ungraceful', 'expected': False}, {'action': 'closing-worker', 'reason': 'scheduler-remove-worker'}], 'worker-get-client': [{'client': 'Client-6a80f264-cd9b-11ef-b3a3-214f47e67d60', 'timeout': 5, 'worker': 'tcp://127.0.0.1:36441'}, {'client': 'Client-6a80f264-cd9b-11ef-b3a3-214f47e67d60', 'timeout': 5, 'worker': 'tcp://127.0.0.1:35141'}]} == {'tcp://127.0.0.1:36441': [{'action': 'worker-status-change', 'prev-status': 'running', 'status': 'closing_gracefully', 'stimulus_id': 'graceful'}, {'action': 'remove-worker', 'lost-computed-tasks': set(), 'lost-scattered-tasks': set(), 'processing-tasks': {'y'}, 'expected': True, 'stimulus_id': 'graceful'}, {'action': 'retired', 'stimulus_id': 'graceful'}], 'tcp://127.0.0.1:35141': [{'action': 'worker-status-change', 'prev-status': 'running', 'status': 'closing_gracefully', 'stimulus_id': 'graceful_abort'}, {'action': 'could-not-retire', 'stimulus_id': 'graceful_abort'}, {'action': 'worker-status-change', 'prev-status': 'closing_gracefully', 'status': 'running', 'stimulus_id': 'worker-status-change'}, {'action': 'remove-worker', 'lost-computed-tasks': {'x'}, 'lost-scattered-tasks': {'z'}, 'processing-tasks': {'y'}, 'expected': False, 'stimulus_id': 'ungraceful'}, {'action': 'closing-worker', 'reason': 'scheduler-remove-worker'}], 'all': [{'action': 'remove-worker', 'lost-computed-tasks': set(), 'lost-scattered-tasks': set(), 'processing-tasks': {'y'}, 'expected': True, 'stimulus_id': 'graceful', 'worker': 'tcp://127.0.0.1:36441'}, {'action': 'retire-workers', 'stimulus_id': 'graceful', 'retired': {'tcp://127.0.0.1:36441': 'snip'}, 'could-not-retire': {}}, {'action': 'retire-workers', 'stimulus_id': 'graceful_abort', 'retired': {}, 'could-not-retire': {'tcp://127.0.0.1:35141': 'snip'}}, {'action': 'remove-worker', 'lost-computed-tasks': {'x'}, 'lost-scattered-tasks': {'z'}, 'processing-tasks': {'y'}, 'expected': False, 'stimulus_id': 'ungraceful', 'worker': 'tcp://127.0.0.1:35141'}], 'worker-get-client': [{'client': 'Client-6a80f264-cd9b-11ef-b3a3-214f47e67d60', 'timeout': 5, 'worker': 'tcp://127.0.0.1:35141'}]}
-  
-  Common items:
-  {'all': [{'action': 'remove-worker',
-            'expected': True,
-            'lost-computed-tasks': set(),
-            'lost-scattered-tasks': set(),
-            'processing-tasks': {'y'},
-            'stimulus_id': 'graceful',
-            'worker': 'tcp://127.0.0.1:36441'},
-           {'action': 'retire-workers',
-            'could-not-retire': {},
-            'retired': {'tcp://127.0.0.1:36441': 'snip'},
-            'stimulus_id': 'graceful'},
-           {'action': 'retire-workers',
-            'could-not-retire': {'tcp://127.0.0.1:35141': 'snip'},
-            'retired': {},
-            'stimulus_id': 'graceful_abort'},
-           {'action': 'remove-worker',
-            'expected': False,
-            'lost-computed-tasks': {'x'},
-            'lost-scattered-tasks': {'z'},
-            'processing-tasks': {'y'},
-            'stimulus_id': 'ungraceful',
-            'worker': 'tcp://127.0.0.1:35141'}],
-   'tcp://127.0.0.1:35141': [{'action': 'worker-status-change',
-                              'prev-status': 'running',
-                              'status': 'closing_gracefully',
-                              'stimulus_id': 'graceful_abort'},
-                             {'action': 'could-not-retire',
-                              'stimulus_id': 'graceful_abort'},
-                             {'action': 'worker-status-change',
-                              'prev-status': 'closing_gracefully',
-                              'status': 'running',
-                              'stimulus_id': 'worker-status-change'},
-                             {'action': 'remove-worker',
-                              'expected': False,
-                              'lost-computed-tasks': {'x'},
-                              'lost-scattered-tasks': {'z'},
-                              'processing-tasks': {'y'},
-                              'stimulus_id': 'ungraceful'},
-                             {'action': 'closing-worker',
-                              'reason': 'scheduler-remove-worker'}],
-   'tcp://127.0.0.1:36441': [{'action': 'worker-status-change',
-                              'prev-status': 'running',
-                              'status': 'closing_gracefully',
-                              'stimulus_id': 'graceful'},
-                             {'action': 'remove-worker',
-                              'expected': True,
-                              'lost-computed-tasks': set(),
-                              'lost-scattered-tasks': set(),
-                              'processing-tasks': {'y'},
-                              'stimulus_id': 'graceful'},
-                             {'action': 'retired', 'stimulus_id': 'graceful'}]}
-  Differing items:
-  {'worker-get-client': [{'client': 'Client-6a80f264-cd9b-11ef-b3a3-214f47e67d60', 'timeout': 5, 'worker': 'tcp://127.0.0.1:36441'}, {'client': 'Client-6a80f264-cd9b-11ef-b3a3-214f47e67d60', 'timeout': 5, 'worker': 'tcp://127.0.0.1:35141'}]} != {'worker-get-client': [{'client': 'Client-6a80f264-cd9b-11ef-b3a3-214f47e67d60', 'timeout': 5, 'worker': 'tcp://127.0.0.1:35141'}]}
-  
-  Full diff:
-    {
-        'all': [
-            {
-                'action': 'remove-worker',
-                'expected': True,
-                'lost-computed-tasks': set(),
-                'lost-scattered-tasks': set(),
-                'processing-tasks': {
-                    'y',
-                },
-                'stimulus_id': 'graceful',
-                'worker': 'tcp://127.0.0.1:36441',
-            },
-            {
-                'action': 'retire-workers',
-                'could-not-retire': {},
-                'retired': {
-                    'tcp://127.0.0.1:36441': 'snip',
-                },
-                'stimulus_id': 'graceful',
-            },
-            {
-                'action': 'retire-workers',
-                'could-not-retire': {
-                    'tcp://127.0.0.1:35141': 'snip',
-                },
-                'retired': {},
-                'stimulus_id': 'graceful_abort',
-            },
-            {
-                'action': 'remove-worker',
-                'expected': False,
-                'lost-computed-tasks': {
-                    'x',
-                },
-                'lost-scattered-tasks': {
-                    'z',
-                },
-                'processing-tasks': {
-                    'y',
-                },
-                'stimulus_id': 'ungraceful',
-                'worker': 'tcp://127.0.0.1:35141',
-            },
-        ],
-        'tcp://127.0.0.1:35141': [
-            {
-                'action': 'worker-status-change',
-                'prev-status': 'running',
-                'status': 'closing_gracefully',
-                'stimulus_id': 'graceful_abort',
-            },
-            {
-                'action': 'could-not-retire',
-                'stimulus_id': 'graceful_abort',
-            },
-            {
-                'action': 'worker-status-change',
-                'prev-status': 'closing_gracefully',
-                'status': 'running',
-                'stimulus_id': 'worker-status-change',
-            },
-            {
-                'action': 'remove-worker',
-                'expected': False,
-                'lost-computed-tasks': {
-                    'x',
-                },
-                'lost-scattered-tasks': {
-                    'z',
-                },
-                'processing-tasks': {
-                    'y',
-                },
-                'stimulus_id': 'ungraceful',
-            },
-            {
-                'action': 'closing-worker',
-                'reason': 'scheduler-remove-worker',
-            },
-        ],
-        'tcp://127.0.0.1:36441': [
-            {
-                'action': 'worker-status-change',
-                'prev-status': 'running',
-                'status': 'closing_gracefully',
-                'stimulus_id': 'graceful',
-            },
-            {
-                'action': 'remove-worker',
-                'expected': True,
-                'lost-computed-tasks': set(),
-                'lost-scattered-tasks': set(),
-                'processing-tasks': {
-                    'y',
-                },
-                'stimulus_id': 'graceful',
-            },
-            {
-                'action': 'retired',
-                'stimulus_id': 'graceful',
-            },
-        ],
-        'worker-get-client': [
-            {
-                'client': 'Client-6a80f264-cd9b-11ef-b3a3-214f47e67d60',
-                'timeout': 5,
-  +             'worker': 'tcp://127.0.0.1:36441',
-  +         },
-  +         {
-  +             'client': 'Client-6a80f264-cd9b-11ef-b3a3-214f47e67d60',
-  +             'timeout': 5,
-                'worker': 'tcp://127.0.0.1:35141',
-            },
-        ],
-    }
-========================= 2 failed in 63.07s (0:01:03) =========================
-*** END OF RUN 3: NOT ALL TESTS HAVE YET PASSED/XFAILED ***
-*** STARTING RUN 4: python3.13 -m pytest --pyargs distributed --verbose --color=no --timeout-method=signal --timeout=300 -m not avoid_ci -rfE --last-failed --last-failed-no-failures none --ignore=distributed/comm/tests/test_comms.py --ignore=distributed/comm/tests/test_ws.py --ignore=distributed/deploy/tests/test_adaptive.py --ignore=distributed/deploy/tests/test_local.py --ignore=distributed/deploy/tests/test_slow_adaptive.py --ignore=distributed/deploy/tests/test_spec_cluster.py --deselect=distributed/cli/tests/test_dask_scheduler.py::test_no_dashboard --deselect=distributed/deploy/tests/test_local.py::test_localcluster_get_client --deselect=distributed/deploy/tests/test_old_ssh.py::test_cluster --deselect=distributed/deploy/tests/test_old_ssh.py::test_old_ssh_nprocs_renamed_to_n_workers --deselect=distributed/deploy/tests/test_old_ssh.py::test_nprocs_attribute_is_deprecated --deselect=distributed/deploy/tests/test_ssh.py::test_nprocs_attribute_is_deprecated --deselect=distributed/http/tests/test_core.py::test_prometheus_api_doc --deselect=distributed/tests/test_init.py::test_git_revision --deselect=distributed/tests/test_jupyter.py::test_jupyter_idle_timeout --deselect=distributed/tests/test_jupyter.py::test_jupyter_idle_timeout_returned --deselect=distributed/tests/test_jupyter.py::test_jupyter_server --deselect=distributed/tests/test_jupyter.py::test_shutsdown_cleanly --deselect=distributed/tests/test_profile.py::test_stack_overflow --deselect=distributed/tests/test_pubsub.py::test_client_worker --deselect=distributed/tests/test_queues.py::test_queue_in_task --deselect=distributed/tests/test_spill.py::test_spillbuffer_oserror --deselect=distributed/tests/test_steal.py::test_steal_twice --deselect=distributed/tests/test_utils_test.py::test_cluster --deselect=distributed/tests/test_variable.py::test_variable_in_task --deselect=distributed/tests/test_worker.py::test_process_executor_kills_process --deselect=distributed/tests/test_worker_memory.py::test_fail_to_pickle_execute_1 --deselect=distributed/tests/test_worker_state_machine.py::test_task_state_instance_are_garbage_collected --deselect=distributed/protocol/tests/test_protocol.py::test_deeply_nested_structures --deselect=distributed/protocol/tests/test_serialize.py::test_deeply_nested_structures --deselect=distributed/cli/tests/test_dask_scheduler.py::test_defaults --deselect=distributed/cli/tests/test_dask_scheduler.py::test_hostport --deselect=distributed/cli/tests/test_dask_spec.py::test_errors --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_file --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_file --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_file --deselect=distributed/dashboard/tests/test_scheduler_bokeh.py::test_counters --deselect=distributed/dashboard/tests/test_worker_bokeh.py::test_counters --deselect=distributed/deploy/tests/test_local.py::test_adapt_then_manual --deselect=distributed/deploy/tests/test_local.py::test_async_with --deselect=distributed/deploy/tests/test_local.py::test_close_twice --deselect=distributed/deploy/tests/test_local.py::test_cluster_info_sync --deselect=distributed/deploy/tests/test_local.py::test_local_tls --deselect=distributed/deploy/tests/test_local.py::test_no_dangling_asyncio_tasks --deselect=distributed/deploy/tests/test_local.py::test_only_local_access --deselect=distributed/deploy/tests/test_local.py::test_remote_access --deselect=distributed/diagnostics/tests/test_progress_widgets.py::test_serializers --deselect=distributed/diagnostics/tests/test_scheduler_plugin.py::test_lifecycle --deselect=distributed/http/scheduler/tests/test_missing_bokeh.py::test_missing_bokeh --deselect=distributed/http/scheduler/tests/test_scheduler_http.py::test_metrics_when_prometheus_client_not_installed --deselect=distributed/protocol/tests/test_serialize.py::test_errors --deselect=distributed/tests/test_batched.py::test_BatchedSend --deselect=distributed/tests/test_batched.py::test_close_closed --deselect=distributed/tests/test_batched.py::test_close_twice --deselect=distributed/tests/test_batched.py::test_send_after_stream_start --deselect=distributed/tests/test_batched.py::test_send_before_close --deselect=distributed/tests/test_batched.py::test_send_before_start --deselect=distributed/tests/test_batched.py::test_sending_traffic_jam --deselect=distributed/tests/test_batched.py::test_serializers --deselect=distributed/tests/test_client.py::TestClientSecurityLoader::test_security_loader --deselect=distributed/tests/test_client.py::TestClientSecurityLoader::test_security_loader_ignored_if_explicit_security_provided --deselect=distributed/tests/test_client.py::TestClientSecurityLoader::test_security_loader_ignored_if_returns_none --deselect=distributed/tests/test_client.py::test_async_with --deselect=distributed/tests/test_client.py::test_client_is_quiet_cluster_close --deselect=distributed/tests/test_client.py::test_dashboard_link_cluster --deselect=distributed/tests/test_client.py::test_dashboard_link_inproc --deselect=distributed/tests/test_client.py::test_file_descriptors_dont_leak --deselect=distributed/tests/test_client.py::test_mixing_clients_different_scheduler --deselect=distributed/tests/test_client.py::test_quiet_client_close --deselect=distributed/tests/test_client.py::test_rebalance_sync --deselect=distributed/tests/test_client.py::test_repr_localcluster --deselect=distributed/tests/test_client.py::test_security_loader --deselect=distributed/tests/test_client.py::test_security_loader_ignored_if_explicit_security_provided --deselect=distributed/tests/test_client.py::test_security_loader_ignored_if_returns_none --deselect=distributed/tests/test_client.py::test_shutdown --deselect=distributed/tests/test_client.py::test_shutdown_is_quiet_with_cluster --deselect=distributed/tests/test_client.py::test_shutdown_localcluster --deselect=distributed/tests/test_client.py::test_shutdown_stops_callbacks --deselect=distributed/tests/test_client_loop.py::test_close_loop_sync_start_new_loop --deselect=distributed/tests/test_client_loop.py::test_close_loop_sync_use_running_loop --deselect=distributed/tests/test_core.py::test_close_fast_without_active_handlers --deselect=distributed/tests/test_core.py::test_close_grace_period_for_handlers --deselect=distributed/tests/test_core.py::test_close_properly --deselect=distributed/tests/test_core.py::test_compression --deselect=distributed/tests/test_core.py::test_connection_pool --deselect=distributed/tests/test_core.py::test_connection_pool_close_while_connecting --deselect=distributed/tests/test_core.py::test_connection_pool_detects_remote_close --deselect=distributed/tests/test_core.py::test_connection_pool_outside_cancellation --deselect=distributed/tests/test_core.py::test_connection_pool_remove --deselect=distributed/tests/test_core.py::test_connection_pool_respects_limit --deselect=distributed/tests/test_core.py::test_connection_pool_tls --deselect=distributed/tests/test_core.py::test_counters --deselect=distributed/tests/test_core.py::test_deserialize_error --deselect=distributed/tests/test_core.py::test_errors --deselect=distributed/tests/test_core.py::test_identity_inproc --deselect=distributed/tests/test_core.py::test_identity_tcp --deselect=distributed/tests/test_core.py::test_large_packets_inproc --deselect=distributed/tests/test_core.py::test_messages_are_ordered_bsend --deselect=distributed/tests/test_core.py::test_messages_are_ordered_raw --deselect=distributed/tests/test_core.py::test_ports --deselect=distributed/tests/test_core.py::test_rpc_default --deselect=distributed/tests/test_core.py::test_rpc_inproc --deselect=distributed/tests/test_core.py::test_rpc_message_lifetime_default --deselect=distributed/tests/test_core.py::test_rpc_message_lifetime_inproc --deselect=distributed/tests/test_core.py::test_rpc_message_lifetime_tcp --deselect=distributed/tests/test_core.py::test_rpc_serialization --deselect=distributed/tests/test_core.py::test_rpc_tcp --deselect=distributed/tests/test_core.py::test_rpc_tls --deselect=distributed/tests/test_core.py::test_rpc_with_many_connections_inproc --deselect=distributed/tests/test_core.py::test_rpc_with_many_connections_tcp --deselect=distributed/tests/test_core.py::test_send_recv_args --deselect=distributed/tests/test_core.py::test_send_recv_cancelled --deselect=distributed/tests/test_core.py::test_server --deselect=distributed/tests/test_core.py::test_server_comms_mark_active_handlers --deselect=distributed/tests/test_core.py::test_server_raises_on_blocked_handlers --deselect=distributed/tests/test_jupyter.py::test_jupyter_idle_timeout --deselect=distributed/tests/test_jupyter.py::test_jupyter_server --deselect=distributed/tests/test_locks.py::test_errors --deselect=distributed/tests/test_nanny.py::test_scheduler_file --deselect=distributed/tests/test_nanny.py::test_scheduler_file --deselect=distributed/tests/test_nanny.py::test_scheduler_file --deselect=distributed/tests/test_nanny.py::test_worker_uses_same_host_as_nanny --deselect=distributed/tests/test_preload.py::test_failure_doesnt_crash_scheduler --deselect=distributed/tests/test_preload.py::test_preload_import_time --deselect=distributed/tests/test_preload.py::test_preload_manager_sequence --deselect=distributed/tests/test_preload.py::test_worker_preload_text --deselect=distributed/tests/test_scheduler.py::test_allowed_failures_config --deselect=distributed/tests/test_scheduler.py::test_async_context_manager --deselect=distributed/tests/test_scheduler.py::test_dashboard_host --deselect=distributed/tests/test_scheduler.py::test_file_descriptors_dont_leak --deselect=distributed/tests/test_scheduler.py::test_finished --deselect=distributed/tests/test_scheduler.py::test_multiple_listeners --deselect=distributed/tests/test_scheduler.py::test_no_dangling_asyncio_tasks --deselect=distributed/tests/test_scheduler.py::test_scheduler_file --deselect=distributed/tests/test_scheduler.py::test_scheduler_file --deselect=distributed/tests/test_scheduler.py::test_scheduler_file --deselect=distributed/tests/test_security.py::test_require_encryption --deselect=distributed/tests/test_security.py::test_tls_listen_connect --deselect=distributed/tests/test_security.py::test_tls_temporary_credentials_functional --deselect=distributed/tests/test_semaphore.py::test_threadpoolworkers_pick_correct_ioloop --deselect=distributed/tests/test_tls_functional.py::test_security_dict_input_no_security --deselect=distributed/tests/test_utils_test.py::test_ensure_no_new_clients --deselect=distributed/tests/test_utils_test.py::test_freeze_batched_send --deselect=distributed/tests/test_utils_test.py::test_locked_comm_drop_in_replacement --deselect=distributed/tests/test_utils_test.py::test_locked_comm_intercept_read --deselect=distributed/tests/test_utils_test.py::test_locked_comm_intercept_write --deselect=distributed/tests/test_worker.py::test_host_uses_scheduler_protocol --deselect=distributed/tests/test_worker.py::test_plugin_exception --deselect=distributed/tests/test_worker.py::test_plugin_internal_exception --deselect=distributed/tests/test_worker.py::test_plugin_multiple_exceptions --deselect=distributed/tests/test_worker.py::test_scheduler_file --deselect=distributed/tests/test_worker.py::test_scheduler_file --deselect=distributed/tests/test_worker.py::test_scheduler_file --deselect=distributed/tests/test_worker_client.py::test_dont_override_default_get --deselect=distributed/cli/tests/test_dask_scheduler.py::test_dashboard --deselect=distributed/cli/tests/test_dask_scheduler.py::test_dashboard_allowlist --deselect=distributed/cli/tests/test_dask_scheduler.py::test_dashboard_non_standard_ports --deselect=distributed/cli/tests/test_dask_scheduler.py::test_dashboard_port_zero --deselect=distributed/cli/tests/test_dask_scheduler.py::test_defaults --deselect=distributed/cli/tests/test_dask_scheduler.py::test_hostport --deselect=distributed/cli/tests/test_dask_scheduler.py::test_multiple_protocols --deselect=distributed/cli/tests/test_dask_scheduler.py::test_multiple_workers --deselect=distributed/cli/tests/test_dask_scheduler.py::test_multiple_workers_2 --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_command --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_command_default --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_config --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_file --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_module --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_remote_module --deselect=distributed/cli/tests/test_dask_scheduler.py::test_scheduler_port_zero --deselect=distributed/cli/tests/test_dask_scheduler.py::test_single_executable_deprecated --deselect=distributed/cli/tests/test_dask_worker.py::test_contact_listen_address --deselect=distributed/cli/tests/test_dask_worker.py::test_dashboard_non_standard_ports --deselect=distributed/cli/tests/test_dask_worker.py::test_error_during_startup --deselect=distributed/cli/tests/test_dask_worker.py::test_integer_names --deselect=distributed/cli/tests/test_dask_worker.py::test_listen_address_ipv6 --deselect=distributed/cli/tests/test_dask_worker.py::test_local_directory --deselect=distributed/cli/tests/test_dask_worker.py::test_memory_limit --deselect=distributed/cli/tests/test_dask_worker.py::test_nanny_worker_port_range --deselect=distributed/cli/tests/test_dask_worker.py::test_nanny_worker_port_range_too_many_workers_raises --deselect=distributed/cli/tests/test_dask_worker.py::test_nanny_worker_ports --deselect=distributed/cli/tests/test_dask_worker.py::test_no_nanny --deselect=distributed/cli/tests/test_dask_worker.py::test_nworkers_auto --deselect=distributed/cli/tests/test_dask_worker.py::test_nworkers_expands_name --deselect=distributed/cli/tests/test_dask_worker.py::test_nworkers_negative --deselect=distributed/cli/tests/test_dask_worker.py::test_nworkers_requires_nanny --deselect=distributed/cli/tests/test_dask_worker.py::test_preload_config --deselect=distributed/cli/tests/test_dask_worker.py::test_resources --deselect=distributed/cli/tests/test_dask_worker.py::test_respect_host_listen_address --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_address_env --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_file --deselect=distributed/cli/tests/test_dask_worker.py::test_set_lifetime_restart_via_env_var --deselect=distributed/cli/tests/test_dask_worker.py::test_set_lifetime_stagger_via_env_var --deselect=distributed/cli/tests/test_dask_worker.py::test_signal_handling --deselect=distributed/cli/tests/test_dask_worker.py::test_single_executable_deprecated --deselect=distributed/cli/tests/test_dask_worker.py::test_single_executable_works --deselect=distributed/cli/tests/test_dask_worker.py::test_timeout --deselect=distributed/cli/tests/test_dask_worker.py::test_worker_class --deselect=distributed/tests/test_config.py::test_logging_extended --deselect=distributed/tests/test_config.py::test_logging_file_config --deselect=distributed/tests/test_config.py::test_logging_mutual_exclusive --deselect=distributed/tests/test_config.py::test_logging_simple --deselect=distributed/tests/test_config.py::test_logging_simple_under_distributed --deselect=distributed/tests/test_queues.py::test_queue_in_task --deselect=distributed/tests/test_variable.py::test_variable_in_task 
-============================= test session starts ==============================
-platform linux -- Python 3.13.1, pytest-8.3.4, pluggy-1.5.0 -- /usr/bin/python3.13
-cachedir: .pytest_cache
-rootdir: /build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build
-configfile: pyproject.toml
-plugins: timeout-2.3.1, rerunfailures-14.0, anyio-4.7.0, typeguard-4.4.1
-timeout: 300.0s
-timeout method: signal
-timeout func_only: False
-collecting ... collected 2 items
-run-last-failure: rerun previous 2 failures (skipped 147 files)
+collecting ... collected 1 item
+run-last-failure: rerun previous 1 failure (skipped 148 files)
 
-distributed/tests/test_tls_functional.py::test_nanny PASSED              [ 50%]
-distributed/tests/test_worker.py::test_log_remove_worker PASSED          [100%]
+distributed/tests/test_tls_functional.py::test_retire_workers PASSED     [100%]
 
 ============================= slowest 20 durations =============================
-5.95s call     distributed/tests/test_tls_functional.py::test_nanny
-0.64s call     distributed/tests/test_worker.py::test_log_remove_worker
-0.00s setup    distributed/tests/test_worker.py::test_log_remove_worker
-0.00s setup    distributed/tests/test_tls_functional.py::test_nanny
-0.00s teardown distributed/tests/test_tls_functional.py::test_nanny
-0.00s teardown distributed/tests/test_worker.py::test_log_remove_worker
-============================== 2 passed in 6.84s ===============================
-*** END OF RUN 4: ALL TESTS RUN HAVE NOW PASSED/XFAILED ***
+5.71s call     distributed/tests/test_tls_functional.py::test_retire_workers
+0.00s setup    distributed/tests/test_tls_functional.py::test_retire_workers
+0.00s teardown distributed/tests/test_tls_functional.py::test_retire_workers
+============================== 1 passed in 5.90s ===============================
+*** END OF RUN 3: ALL TESTS RUN HAVE NOW PASSED/XFAILED ***
    create-stamp debian/debhelper-build-stamp
    dh_testroot -O--buildsystem=pybuild
    dh_prep -O--buildsystem=pybuild
@@ -23229,8 +17297,8 @@
    dh_gencontrol -O--buildsystem=pybuild
    dh_md5sums -O--buildsystem=pybuild
    dh_builddeb -O--buildsystem=pybuild
-dpkg-deb: building package 'python3-distributed' in '../python3-distributed_2024.12.1+ds-1_all.deb'.
 dpkg-deb: building package 'python-distributed-doc' in '../python-distributed-doc_2024.12.1+ds-1_all.deb'.
+dpkg-deb: building package 'python3-distributed' in '../python3-distributed_2024.12.1+ds-1_all.deb'.
  dpkg-genbuildinfo --build=binary -O../dask.distributed_2024.12.1+ds-1_arm64.buildinfo
  dpkg-genchanges --build=binary -O../dask.distributed_2024.12.1+ds-1_arm64.changes
 dpkg-genchanges: info: binary-only upload (no source code included)
@@ -23239,12 +17307,14 @@
 dpkg-buildpackage: info: binary-only upload (no source included)
 dpkg-genchanges: info: including full source code in upload
 I: copying local configuration
+I: user script /srv/workspace/pbuilder/3233247/tmp/hooks/B01_cleanup starting
+I: user script /srv/workspace/pbuilder/3233247/tmp/hooks/B01_cleanup finished
 I: unmounting dev/ptmx filesystem
 I: unmounting dev/pts filesystem
 I: unmounting dev/shm filesystem
 I: unmounting proc filesystem
 I: unmounting sys filesystem
 I: cleaning the build env 
-I: removing directory /srv/workspace/pbuilder/1948021 and its subdirectories
-I: Current time: Tue Jan  7 20:35:38 -12 2025
-I: pbuilder-time-stamp: 1736325338
+I: removing directory /srv/workspace/pbuilder/3233247 and its subdirectories
+I: Current time: Wed Feb 11 05:50:39 +14 2026
+I: pbuilder-time-stamp: 1770738639