Diff of the two buildlogs: -- --- b1/build.log 2022-03-27 17:58:01.956464188 +0000 +++ b2/build.log 2022-03-27 18:37:33.942644492 +0000 @@ -1,6 +1,6 @@ I: pbuilder: network access will be disabled during build -I: Current time: Sat Apr 29 11:55:19 -12 2023 -I: pbuilder-time-stamp: 1682812519 +I: Current time: Mon Mar 28 07:58:05 +14 2022 +I: pbuilder-time-stamp: 1648403885 I: Building the build Environment I: extracting base tarball [/var/cache/pbuilder/buster-reproducible-base.tgz] I: copying local configuration @@ -18,7 +18,7 @@ I: Extracting source gpgv: unknown type of key resource 'trustedkeys.kbx' gpgv: keyblock resource '/root/.gnupg/trustedkeys.kbx': General error -gpgv: Signature made Wed Feb 16 09:55:51 2022 -12 +gpgv: Signature made Thu Feb 17 11:55:51 2022 +14 gpgv: using RSA key C2FE4BD271C139B86C533E461E953E27D4311E58 gpgv: Can't check signature: No public key dpkg-source: warning: failed to verify signature on ./redis_5.0.14-1+deb10u2.dsc @@ -36,138 +36,172 @@ dpkg-source: info: applying debian-packaging/0008-Ensure-we-use-the-modules-for-third-party-libraries.patch I: using fakeroot in build. I: Installing the build-deps -I: user script /srv/workspace/pbuilder/8321/tmp/hooks/D02_print_environment starting +I: user script /srv/workspace/pbuilder/2597/tmp/hooks/D01_modify_environment starting +debug: Running on ionos2-i386. +I: Changing host+domainname to test build reproducibility +I: Adding a custom variable just for the fun of it... +I: Changing /bin/sh to bash +Removing 'diversion of /bin/sh to /bin/sh.distrib by dash' +Adding 'diversion of /bin/sh to /bin/sh.distrib by bash' +Removing 'diversion of /usr/share/man/man1/sh.1.gz to /usr/share/man/man1/sh.distrib.1.gz by dash' +Adding 'diversion of /usr/share/man/man1/sh.1.gz to /usr/share/man/man1/sh.distrib.1.gz by bash' +I: Setting pbuilder2's login shell to /bin/bash +I: Setting pbuilder2's GECOS to second user,second room,second work-phone,second home-phone,second other +I: user script /srv/workspace/pbuilder/2597/tmp/hooks/D01_modify_environment finished +I: user script /srv/workspace/pbuilder/2597/tmp/hooks/D02_print_environment starting I: set - BUILDDIR='/build' - BUILDUSERGECOS='first user,first room,first work-phone,first home-phone,first other' - BUILDUSERNAME='pbuilder1' - BUILD_ARCH='i386' - DEBIAN_FRONTEND='noninteractive' - DEB_BUILD_OPTIONS='buildinfo=+all reproducible=+all parallel=16' - DISTRIBUTION='' - HOME='/root' - HOST_ARCH='i386' + BASH=/bin/sh + BASHOPTS=checkwinsize:cmdhist:complete_fullquote:extquote:force_fignore:globasciiranges:hostcomplete:interactive_comments:progcomp:promptvars:sourcepath + BASH_ALIASES=() + BASH_ARGC=() + BASH_ARGV=() + BASH_CMDS=() + BASH_LINENO=([0]="12" [1]="0") + BASH_SOURCE=([0]="/tmp/hooks/D02_print_environment" [1]="/tmp/hooks/D02_print_environment") + BASH_VERSINFO=([0]="5" [1]="0" [2]="3" [3]="1" [4]="release" [5]="i686-pc-linux-gnu") + BASH_VERSION='5.0.3(1)-release' + BUILDDIR=/build + BUILDUSERGECOS='second user,second room,second work-phone,second home-phone,second other' + BUILDUSERNAME=pbuilder2 + BUILD_ARCH=i386 + DEBIAN_FRONTEND=noninteractive + DEB_BUILD_OPTIONS='buildinfo=+all reproducible=+all parallel=7' + DIRSTACK=() + DISTRIBUTION= + EUID=0 + FUNCNAME=([0]="Echo" [1]="main") + GROUPS=() + HOME=/root + HOSTNAME=i-capture-the-hostname + HOSTTYPE=i686 + HOST_ARCH=i386 IFS=' ' - INVOCATION_ID='5c0f9187b3b24345bfd965b93b1a379d' - LANG='C' - LANGUAGE='en_US:en' - LC_ALL='C' - LD_LIBRARY_PATH='/usr/lib/libeatmydata' - LD_PRELOAD='libeatmydata.so' - MAIL='/var/mail/root' - OPTIND='1' - PATH='/usr/sbin:/usr/bin:/sbin:/bin:/usr/games' - PBCURRENTCOMMANDLINEOPERATION='build' - PBUILDER_OPERATION='build' - PBUILDER_PKGDATADIR='/usr/share/pbuilder' - PBUILDER_PKGLIBDIR='/usr/lib/pbuilder' - PBUILDER_SYSCONFDIR='/etc' - PPID='8321' - PS1='# ' - PS2='> ' + INVOCATION_ID=733d84e1b90d445a9539dcd223f5e41d + LANG=C + LANGUAGE=de_CH:de + LC_ALL=C + LD_LIBRARY_PATH=/usr/lib/libeatmydata + LD_PRELOAD=libeatmydata.so + MACHTYPE=i686-pc-linux-gnu + MAIL=/var/mail/root + OPTERR=1 + OPTIND=1 + OSTYPE=linux-gnu + PATH=/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/i/capture/the/path + PBCURRENTCOMMANDLINEOPERATION=build + PBUILDER_OPERATION=build + PBUILDER_PKGDATADIR=/usr/share/pbuilder + PBUILDER_PKGLIBDIR=/usr/lib/pbuilder + PBUILDER_SYSCONFDIR=/etc + PIPESTATUS=([0]="0") + POSIXLY_CORRECT=y + PPID=2597 PS4='+ ' - PWD='/' - SHELL='/bin/bash' - SHLVL='2' - SUDO_COMMAND='/usr/bin/timeout -k 18.1h 18h /usr/bin/ionice -c 3 /usr/bin/nice /usr/sbin/pbuilder --build --configfile /srv/reproducible-results/rbuild-debian/tmp.s41oCvkXQ1/pbuilderrc_WvAz --hookdir /etc/pbuilder/first-build-hooks --debbuildopts -b --basetgz /var/cache/pbuilder/buster-reproducible-base.tgz --buildresult /srv/reproducible-results/rbuild-debian/tmp.s41oCvkXQ1/b1 --logfile b1/build.log redis_5.0.14-1+deb10u2.dsc' - SUDO_GID='112' - SUDO_UID='107' - SUDO_USER='jenkins' - TERM='unknown' - TZ='/usr/share/zoneinfo/Etc/GMT+12' - USER='root' - _='/usr/bin/systemd-run' - http_proxy='http://85.184.249.68:3128' + PWD=/ + SHELL=/bin/bash + SHELLOPTS=braceexpand:errexit:hashall:interactive-comments:posix + SHLVL=3 + SUDO_COMMAND='/usr/bin/timeout -k 24.1h 24h /usr/bin/ionice -c 3 /usr/bin/nice -n 11 /usr/bin/unshare --uts -- /usr/sbin/pbuilder --build --configfile /srv/reproducible-results/rbuild-debian/tmp.s41oCvkXQ1/pbuilderrc_BSES --hookdir /etc/pbuilder/rebuild-hooks --debbuildopts -b --basetgz /var/cache/pbuilder/buster-reproducible-base.tgz --buildresult /srv/reproducible-results/rbuild-debian/tmp.s41oCvkXQ1/b2 --logfile b2/build.log redis_5.0.14-1+deb10u2.dsc' + SUDO_GID=112 + SUDO_UID=107 + SUDO_USER=jenkins + TERM=unknown + TZ=/usr/share/zoneinfo/Etc/GMT-14 + UID=0 + USER=root + _='I: set' + http_proxy=http://78.137.99.97:3128 I: uname -a - Linux ionos16-i386 5.10.0-12-amd64 #1 SMP Debian 5.10.103-1 (2022-03-07) x86_64 GNU/Linux + Linux i-capture-the-hostname 5.10.0-12-686-pae #1 SMP Debian 5.10.103-1 (2022-03-07) i686 GNU/Linux I: ls -l /bin total 5476 - -rwxr-xr-x 1 root root 1302248 Apr 17 2019 bash - -rwxr-xr-x 3 root root 38280 Jul 10 2019 bunzip2 - -rwxr-xr-x 3 root root 38280 Jul 10 2019 bzcat - lrwxrwxrwx 1 root root 6 Jul 10 2019 bzcmp -> bzdiff - -rwxr-xr-x 1 root root 2227 Jul 10 2019 bzdiff - lrwxrwxrwx 1 root root 6 Jul 10 2019 bzegrep -> bzgrep - -rwxr-xr-x 1 root root 4877 Jun 24 2019 bzexe - lrwxrwxrwx 1 root root 6 Jul 10 2019 bzfgrep -> bzgrep - -rwxr-xr-x 1 root root 3641 Jul 10 2019 bzgrep - -rwxr-xr-x 3 root root 38280 Jul 10 2019 bzip2 - -rwxr-xr-x 1 root root 17768 Jul 10 2019 bzip2recover - lrwxrwxrwx 1 root root 6 Jul 10 2019 bzless -> bzmore - -rwxr-xr-x 1 root root 1297 Jul 10 2019 bzmore - -rwxr-xr-x 1 root root 38692 Feb 28 2019 cat - -rwxr-xr-x 1 root root 75588 Feb 28 2019 chgrp - -rwxr-xr-x 1 root root 63268 Feb 28 2019 chmod - -rwxr-xr-x 1 root root 75588 Feb 28 2019 chown - -rwxr-xr-x 1 root root 153732 Feb 28 2019 cp - -rwxr-xr-x 1 root root 132820 Jan 17 2019 dash - -rwxr-xr-x 1 root root 120676 Feb 28 2019 date - -rwxr-xr-x 1 root root 92040 Feb 28 2019 dd - -rwxr-xr-x 1 root root 100620 Feb 28 2019 df - -rwxr-xr-x 1 root root 149736 Feb 28 2019 dir - -rwxr-xr-x 1 root root 79412 Jan 9 2019 dmesg - lrwxrwxrwx 1 root root 8 Sep 26 2018 dnsdomainname -> hostname - lrwxrwxrwx 1 root root 8 Sep 26 2018 domainname -> hostname - -rwxr-xr-x 1 root root 34532 Feb 28 2019 echo - -rwxr-xr-x 1 root root 28 Jan 7 2019 egrep - -rwxr-xr-x 1 root root 34532 Feb 28 2019 false - -rwxr-xr-x 1 root root 28 Jan 7 2019 fgrep - -rwxr-xr-x 1 root root 67700 Jan 9 2019 findmnt - -rwsr-xr-x 1 root root 30112 Apr 22 2020 fusermount - -rwxr-xr-x 1 root root 206392 Jan 7 2019 grep - -rwxr-xr-x 2 root root 2345 Jan 5 2019 gunzip - -rwxr-xr-x 1 root root 6375 Jan 5 2019 gzexe - -rwxr-xr-x 1 root root 100952 Jan 5 2019 gzip - -rwxr-xr-x 1 root root 21916 Sep 26 2018 hostname - -rwxr-xr-x 1 root root 79752 Feb 28 2019 ln - -rwxr-xr-x 1 root root 55400 Jul 26 2018 login - -rwxr-xr-x 1 root root 149736 Feb 28 2019 ls - -rwxr-xr-x 1 root root 112032 Jan 9 2019 lsblk - -rwxr-xr-x 1 root root 87972 Feb 28 2019 mkdir - -rwxr-xr-x 1 root root 79748 Feb 28 2019 mknod - -rwxr-xr-x 1 root root 46916 Feb 28 2019 mktemp - -rwxr-xr-x 1 root root 42348 Jan 9 2019 more - -rwsr-xr-x 1 root root 50592 Jan 9 2019 mount - -rwxr-xr-x 1 root root 13724 Jan 9 2019 mountpoint - -rwxr-xr-x 1 root root 157832 Feb 28 2019 mv - lrwxrwxrwx 1 root root 8 Sep 26 2018 nisdomainname -> hostname - lrwxrwxrwx 1 root root 14 Feb 14 2019 pidof -> /sbin/killall5 - -rwxr-xr-x 1 root root 38660 Feb 28 2019 pwd - lrwxrwxrwx 1 root root 4 Apr 17 2019 rbash -> bash - -rwxr-xr-x 1 root root 46852 Feb 28 2019 readlink - -rwxr-xr-x 1 root root 75588 Feb 28 2019 rm - -rwxr-xr-x 1 root root 42756 Feb 28 2019 rmdir - -rwxr-xr-x 1 root root 22276 Jan 21 2019 run-parts - -rwxr-xr-x 1 root root 125036 Dec 22 2018 sed - lrwxrwxrwx 1 root root 4 Apr 9 02:46 sh -> dash - -rwxr-xr-x 1 root root 34532 Feb 28 2019 sleep - -rwxr-xr-x 1 root root 79652 Feb 28 2019 stty - -rwsr-xr-x 1 root root 71072 Jan 9 2019 su - -rwxr-xr-x 1 root root 34564 Feb 28 2019 sync - -rwxr-xr-x 1 root root 504024 Apr 23 2019 tar - -rwxr-xr-x 1 root root 13860 Jan 21 2019 tempfile - -rwxr-xr-x 1 root root 104292 Feb 28 2019 touch - -rwxr-xr-x 1 root root 34532 Feb 28 2019 true - -rwxr-xr-x 1 root root 17768 Apr 22 2020 ulockmgr_server - -rwsr-xr-x 1 root root 30108 Jan 9 2019 umount - -rwxr-xr-x 1 root root 34532 Feb 28 2019 uname - -rwxr-xr-x 2 root root 2345 Jan 5 2019 uncompress - -rwxr-xr-x 1 root root 149736 Feb 28 2019 vdir - -rwxr-xr-x 1 root root 34208 Jan 9 2019 wdctl - -rwxr-xr-x 1 root root 946 Jan 21 2019 which - lrwxrwxrwx 1 root root 8 Sep 26 2018 ypdomainname -> hostname - -rwxr-xr-x 1 root root 1983 Jan 5 2019 zcat - -rwxr-xr-x 1 root root 1677 Jan 5 2019 zcmp - -rwxr-xr-x 1 root root 5879 Jan 5 2019 zdiff - -rwxr-xr-x 1 root root 29 Jan 5 2019 zegrep - -rwxr-xr-x 1 root root 29 Jan 5 2019 zfgrep - -rwxr-xr-x 1 root root 2080 Jan 5 2019 zforce - -rwxr-xr-x 1 root root 7584 Jan 5 2019 zgrep - -rwxr-xr-x 1 root root 2205 Jan 5 2019 zless - -rwxr-xr-x 1 root root 1841 Jan 5 2019 zmore - -rwxr-xr-x 1 root root 4552 Jan 5 2019 znew -I: user script /srv/workspace/pbuilder/8321/tmp/hooks/D02_print_environment finished + -rwxr-xr-x 1 root root 1302248 Apr 18 2019 bash + -rwxr-xr-x 3 root root 38280 Jul 11 2019 bunzip2 + -rwxr-xr-x 3 root root 38280 Jul 11 2019 bzcat + lrwxrwxrwx 1 root root 6 Jul 11 2019 bzcmp -> bzdiff + -rwxr-xr-x 1 root root 2227 Jul 11 2019 bzdiff + lrwxrwxrwx 1 root root 6 Jul 11 2019 bzegrep -> bzgrep + -rwxr-xr-x 1 root root 4877 Jun 25 2019 bzexe + lrwxrwxrwx 1 root root 6 Jul 11 2019 bzfgrep -> bzgrep + -rwxr-xr-x 1 root root 3641 Jul 11 2019 bzgrep + -rwxr-xr-x 3 root root 38280 Jul 11 2019 bzip2 + -rwxr-xr-x 1 root root 17768 Jul 11 2019 bzip2recover + lrwxrwxrwx 1 root root 6 Jul 11 2019 bzless -> bzmore + -rwxr-xr-x 1 root root 1297 Jul 11 2019 bzmore + -rwxr-xr-x 1 root root 38692 Mar 1 2019 cat + -rwxr-xr-x 1 root root 75588 Mar 1 2019 chgrp + -rwxr-xr-x 1 root root 63268 Mar 1 2019 chmod + -rwxr-xr-x 1 root root 75588 Mar 1 2019 chown + -rwxr-xr-x 1 root root 153732 Mar 1 2019 cp + -rwxr-xr-x 1 root root 132820 Jan 18 2019 dash + -rwxr-xr-x 1 root root 120676 Mar 1 2019 date + -rwxr-xr-x 1 root root 92040 Mar 1 2019 dd + -rwxr-xr-x 1 root root 100620 Mar 1 2019 df + -rwxr-xr-x 1 root root 149736 Mar 1 2019 dir + -rwxr-xr-x 1 root root 79412 Jan 10 2019 dmesg + lrwxrwxrwx 1 root root 8 Sep 27 2018 dnsdomainname -> hostname + lrwxrwxrwx 1 root root 8 Sep 27 2018 domainname -> hostname + -rwxr-xr-x 1 root root 34532 Mar 1 2019 echo + -rwxr-xr-x 1 root root 28 Jan 8 2019 egrep + -rwxr-xr-x 1 root root 34532 Mar 1 2019 false + -rwxr-xr-x 1 root root 28 Jan 8 2019 fgrep + -rwxr-xr-x 1 root root 67700 Jan 10 2019 findmnt + -rwsr-xr-x 1 root root 30112 Apr 23 2020 fusermount + -rwxr-xr-x 1 root root 206392 Jan 8 2019 grep + -rwxr-xr-x 2 root root 2345 Jan 6 2019 gunzip + -rwxr-xr-x 1 root root 6375 Jan 6 2019 gzexe + -rwxr-xr-x 1 root root 100952 Jan 6 2019 gzip + -rwxr-xr-x 1 root root 21916 Sep 27 2018 hostname + -rwxr-xr-x 1 root root 79752 Mar 1 2019 ln + -rwxr-xr-x 1 root root 55400 Jul 27 2018 login + -rwxr-xr-x 1 root root 149736 Mar 1 2019 ls + -rwxr-xr-x 1 root root 112032 Jan 10 2019 lsblk + -rwxr-xr-x 1 root root 87972 Mar 1 2019 mkdir + -rwxr-xr-x 1 root root 79748 Mar 1 2019 mknod + -rwxr-xr-x 1 root root 46916 Mar 1 2019 mktemp + -rwxr-xr-x 1 root root 42348 Jan 10 2019 more + -rwsr-xr-x 1 root root 50592 Jan 10 2019 mount + -rwxr-xr-x 1 root root 13724 Jan 10 2019 mountpoint + -rwxr-xr-x 1 root root 157832 Mar 1 2019 mv + lrwxrwxrwx 1 root root 8 Sep 27 2018 nisdomainname -> hostname + lrwxrwxrwx 1 root root 14 Feb 15 2019 pidof -> /sbin/killall5 + -rwxr-xr-x 1 root root 38660 Mar 1 2019 pwd + lrwxrwxrwx 1 root root 4 Apr 18 2019 rbash -> bash + -rwxr-xr-x 1 root root 46852 Mar 1 2019 readlink + -rwxr-xr-x 1 root root 75588 Mar 1 2019 rm + -rwxr-xr-x 1 root root 42756 Mar 1 2019 rmdir + -rwxr-xr-x 1 root root 22276 Jan 22 2019 run-parts + -rwxr-xr-x 1 root root 125036 Dec 23 2018 sed + lrwxrwxrwx 1 root root 4 Mar 28 07:58 sh -> bash + lrwxrwxrwx 1 root root 4 Mar 8 00:12 sh.distrib -> dash + -rwxr-xr-x 1 root root 34532 Mar 1 2019 sleep + -rwxr-xr-x 1 root root 79652 Mar 1 2019 stty + -rwsr-xr-x 1 root root 71072 Jan 10 2019 su + -rwxr-xr-x 1 root root 34564 Mar 1 2019 sync + -rwxr-xr-x 1 root root 504024 Apr 24 2019 tar + -rwxr-xr-x 1 root root 13860 Jan 22 2019 tempfile + -rwxr-xr-x 1 root root 104292 Mar 1 2019 touch + -rwxr-xr-x 1 root root 34532 Mar 1 2019 true + -rwxr-xr-x 1 root root 17768 Apr 23 2020 ulockmgr_server + -rwsr-xr-x 1 root root 30108 Jan 10 2019 umount + -rwxr-xr-x 1 root root 34532 Mar 1 2019 uname + -rwxr-xr-x 2 root root 2345 Jan 6 2019 uncompress + -rwxr-xr-x 1 root root 149736 Mar 1 2019 vdir + -rwxr-xr-x 1 root root 34208 Jan 10 2019 wdctl + -rwxr-xr-x 1 root root 946 Jan 22 2019 which + lrwxrwxrwx 1 root root 8 Sep 27 2018 ypdomainname -> hostname + -rwxr-xr-x 1 root root 1983 Jan 6 2019 zcat + -rwxr-xr-x 1 root root 1677 Jan 6 2019 zcmp + -rwxr-xr-x 1 root root 5879 Jan 6 2019 zdiff + -rwxr-xr-x 1 root root 29 Jan 6 2019 zegrep + -rwxr-xr-x 1 root root 29 Jan 6 2019 zfgrep + -rwxr-xr-x 1 root root 2080 Jan 6 2019 zforce + -rwxr-xr-x 1 root root 7584 Jan 6 2019 zgrep + -rwxr-xr-x 1 root root 2205 Jan 6 2019 zless + -rwxr-xr-x 1 root root 1841 Jan 6 2019 zmore + -rwxr-xr-x 1 root root 4552 Jan 6 2019 znew +I: user script /srv/workspace/pbuilder/2597/tmp/hooks/D02_print_environment finished -> Attempting to satisfy build-dependencies -> Creating pbuilder-satisfydepends-dummy package Package: pbuilder-satisfydepends-dummy @@ -270,7 +304,7 @@ Get: 51 http://deb.debian.org/debian buster/main i386 lua-cjson-dev i386 2.1.0+dfsg-2.1 [31.7 kB] Get: 52 http://deb.debian.org/debian buster/main i386 tcl8.6 i386 8.6.9+dfsg-2 [123 kB] Get: 53 http://deb.debian.org/debian buster/main i386 tcl i386 8.6.9+1 [5636 B] -Fetched 22.7 MB in 1s (39.7 MB/s) +Fetched 22.7 MB in 1s (20.8 MB/s) debconf: delaying package configuration, since apt-utils is not installed Selecting previously unselected package libbsd0:i386. (Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 19234 files and directories currently installed.) @@ -506,7 +540,8 @@ fakeroot is already the newest version (1.23-1). 0 upgraded, 0 newly installed, 0 to remove and 0 not upgraded. I: Building the package -I: Running cd /build/redis-5.0.14/ && env PATH="/usr/sbin:/usr/bin:/sbin:/bin:/usr/games" HOME="/nonexistent/first-build" dpkg-buildpackage -us -uc -b && env PATH="/usr/sbin:/usr/bin:/sbin:/bin:/usr/games" HOME="/nonexistent/first-build" dpkg-genchanges -S > ../redis_5.0.14-1+deb10u2_source.changes +hostname: Name or service not known +I: Running cd /build/redis-5.0.14/ && env PATH="/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/i/capture/the/path" HOME="/nonexistent/second-build" dpkg-buildpackage -us -uc -b && env PATH="/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/i/capture/the/path" HOME="/nonexistent/second-build" dpkg-genchanges -S > ../redis_5.0.14-1+deb10u2_source.changes dpkg-buildpackage: info: source package redis dpkg-buildpackage: info: source version 5:5.0.14-1+deb10u2 dpkg-buildpackage: info: source distribution buster-security @@ -519,7 +554,7 @@ debian/rules override_dh_auto_clean make[1]: Entering directory '/build/redis-5.0.14' dh_auto_clean - make -j16 distclean + make -j7 distclean make[2]: Entering directory '/build/redis-5.0.14' cd src && make distclean make[3]: Entering directory '/build/redis-5.0.14/src' @@ -554,13 +589,13 @@ echo 'luaL_dostring(lua, "module = nil; require = nil; package = nil");' >>debian/lua_libs_debian.c # Build the non-bundled Lua libraries dh_auto_build --sourcedirectory=deps/lua/src -- lua_struct.o lua_cmsgpack.o - cd deps/lua/src && make -j16 "INSTALL=install --strip-program=true" lua_struct.o lua_cmsgpack.o + cd deps/lua/src && make -j7 "INSTALL=install --strip-program=true" lua_struct.o lua_cmsgpack.o make[2]: Entering directory '/build/redis-5.0.14/deps/lua/src' cc -O2 -Wall -Wdate-time -D_FORTIFY_SOURCE=2 -c -o lua_struct.o lua_struct.c cc -O2 -Wall -Wdate-time -D_FORTIFY_SOURCE=2 -c -o lua_cmsgpack.o lua_cmsgpack.c make[2]: Leaving directory '/build/redis-5.0.14/deps/lua/src' dh_auto_build --parallel -- V=1 USE_SYSTEM_JEMALLOC=yes USE_SYSTEM_LUA=yes USE_SYSTEM_HIREDIS=yes - make -j16 "INSTALL=install --strip-program=true" V=1 USE_SYSTEM_JEMALLOC=yes USE_SYSTEM_LUA=yes USE_SYSTEM_HIREDIS=yes + make -j7 "INSTALL=install --strip-program=true" V=1 USE_SYSTEM_JEMALLOC=yes USE_SYSTEM_LUA=yes USE_SYSTEM_HIREDIS=yes make[2]: Entering directory '/build/redis-5.0.14' cd src && make all make[3]: Entering directory '/build/redis-5.0.14/src' @@ -636,8 +671,6 @@ cc -std=c99 -pedantic -DREDIS_STATIC='' -Wall -W -Wno-missing-field-initializers -O2 -g -ggdb -g -O2 -ffile-prefix-map=/build/redis-5.0.14=. -fstack-protector-strong -Wformat -Werror=format-security -I../deps/linenoise -DUSE_JEMALLOC -I/usr/include/jemalloc/include -I/usr/include/lua5.1 -I/usr/include/hiredis -Wdate-time -D_FORTIFY_SOURCE=2 -c bio.c cc -std=c99 -pedantic -DREDIS_STATIC='' -Wall -W -Wno-missing-field-initializers -O2 -g -ggdb -g -O2 -ffile-prefix-map=/build/redis-5.0.14=. -fstack-protector-strong -Wformat -Werror=format-security -I../deps/linenoise -DUSE_JEMALLOC -I/usr/include/jemalloc/include -I/usr/include/lua5.1 -I/usr/include/hiredis -Wdate-time -D_FORTIFY_SOURCE=2 -c rio.c cc -std=c99 -pedantic -DREDIS_STATIC='' -Wall -W -Wno-missing-field-initializers -O2 -g -ggdb -g -O2 -ffile-prefix-map=/build/redis-5.0.14=. -fstack-protector-strong -Wformat -Werror=format-security -I../deps/linenoise -DUSE_JEMALLOC -I/usr/include/jemalloc/include -I/usr/include/lua5.1 -I/usr/include/hiredis -Wdate-time -D_FORTIFY_SOURCE=2 -c rand.c -cc -std=c99 -pedantic -DREDIS_STATIC='' -Wall -W -Wno-missing-field-initializers -O2 -g -ggdb -g -O2 -ffile-prefix-map=/build/redis-5.0.14=. -fstack-protector-strong -Wformat -Werror=format-security -I../deps/linenoise -DUSE_JEMALLOC -I/usr/include/jemalloc/include -I/usr/include/lua5.1 -I/usr/include/hiredis -Wdate-time -D_FORTIFY_SOURCE=2 -c memtest.c -cc -std=c99 -pedantic -DREDIS_STATIC='' -Wall -W -Wno-missing-field-initializers -O2 -g -ggdb -g -O2 -ffile-prefix-map=/build/redis-5.0.14=. -fstack-protector-strong -Wformat -Werror=format-security -I../deps/linenoise -DUSE_JEMALLOC -I/usr/include/jemalloc/include -I/usr/include/lua5.1 -I/usr/include/hiredis -Wdate-time -D_FORTIFY_SOURCE=2 -c crc64.c In file included from scripting.c:36: ../debian/lua_libs_debian.c: In function 'luaLoadLibraries': /usr/include/lua5.1/lauxlib.h:115:25: warning: value computed is not used [-Wunused-value] @@ -646,6 +679,8 @@ ../debian/lua_libs_debian.c:7:1: note: in expansion of macro 'luaL_dostring' luaL_dostring(lua, "module = nil; require = nil; package = nil"); ^~~~~~~~~~~~~ +cc -std=c99 -pedantic -DREDIS_STATIC='' -Wall -W -Wno-missing-field-initializers -O2 -g -ggdb -g -O2 -ffile-prefix-map=/build/redis-5.0.14=. -fstack-protector-strong -Wformat -Werror=format-security -I../deps/linenoise -DUSE_JEMALLOC -I/usr/include/jemalloc/include -I/usr/include/lua5.1 -I/usr/include/hiredis -Wdate-time -D_FORTIFY_SOURCE=2 -c memtest.c +cc -std=c99 -pedantic -DREDIS_STATIC='' -Wall -W -Wno-missing-field-initializers -O2 -g -ggdb -g -O2 -ffile-prefix-map=/build/redis-5.0.14=. -fstack-protector-strong -Wformat -Werror=format-security -I../deps/linenoise -DUSE_JEMALLOC -I/usr/include/jemalloc/include -I/usr/include/lua5.1 -I/usr/include/hiredis -Wdate-time -D_FORTIFY_SOURCE=2 -c crc64.c cc -std=c99 -pedantic -DREDIS_STATIC='' -Wall -W -Wno-missing-field-initializers -O2 -g -ggdb -g -O2 -ffile-prefix-map=/build/redis-5.0.14=. -fstack-protector-strong -Wformat -Werror=format-security -I../deps/linenoise -DUSE_JEMALLOC -I/usr/include/jemalloc/include -I/usr/include/lua5.1 -I/usr/include/hiredis -Wdate-time -D_FORTIFY_SOURCE=2 -c bitops.c cc -std=c99 -pedantic -DREDIS_STATIC='' -Wall -W -Wno-missing-field-initializers -O2 -g -ggdb -g -O2 -ffile-prefix-map=/build/redis-5.0.14=. -fstack-protector-strong -Wformat -Werror=format-security -I../deps/linenoise -DUSE_JEMALLOC -I/usr/include/jemalloc/include -I/usr/include/lua5.1 -I/usr/include/hiredis -Wdate-time -D_FORTIFY_SOURCE=2 -c sentinel.c cc -std=c99 -pedantic -DREDIS_STATIC='' -Wall -W -Wno-missing-field-initializers -O2 -g -ggdb -g -O2 -ffile-prefix-map=/build/redis-5.0.14=. -fstack-protector-strong -Wformat -Werror=format-security -I../deps/linenoise -DUSE_JEMALLOC -I/usr/include/jemalloc/include -I/usr/include/lua5.1 -I/usr/include/hiredis -Wdate-time -D_FORTIFY_SOURCE=2 -c notify.c @@ -669,8 +704,6 @@ cc -std=c99 -pedantic -DREDIS_STATIC='' -Wall -W -Wno-missing-field-initializers -O2 -g -ggdb -g -O2 -ffile-prefix-map=/build/redis-5.0.14=. -fstack-protector-strong -Wformat -Werror=format-security -I../deps/linenoise -DUSE_JEMALLOC -I/usr/include/jemalloc/include -I/usr/include/lua5.1 -I/usr/include/hiredis -Wdate-time -D_FORTIFY_SOURCE=2 -c rax.c cc -std=c99 -pedantic -DREDIS_STATIC='' -Wall -W -Wno-missing-field-initializers -O2 -g -ggdb -g -O2 -ffile-prefix-map=/build/redis-5.0.14=. -fstack-protector-strong -Wformat -Werror=format-security -I../deps/linenoise -DUSE_JEMALLOC -I/usr/include/jemalloc/include -I/usr/include/lua5.1 -I/usr/include/hiredis -Wdate-time -D_FORTIFY_SOURCE=2 -c t_stream.c cc -std=c99 -pedantic -DREDIS_STATIC='' -Wall -W -Wno-missing-field-initializers -O2 -g -ggdb -g -O2 -ffile-prefix-map=/build/redis-5.0.14=. -fstack-protector-strong -Wformat -Werror=format-security -I../deps/linenoise -DUSE_JEMALLOC -I/usr/include/jemalloc/include -I/usr/include/lua5.1 -I/usr/include/hiredis -Wdate-time -D_FORTIFY_SOURCE=2 -c listpack.c -cc -std=c99 -pedantic -DREDIS_STATIC='' -Wall -W -Wno-missing-field-initializers -O2 -g -ggdb -g -O2 -ffile-prefix-map=/build/redis-5.0.14=. -fstack-protector-strong -Wformat -Werror=format-security -I../deps/linenoise -DUSE_JEMALLOC -I/usr/include/jemalloc/include -I/usr/include/lua5.1 -I/usr/include/hiredis -Wdate-time -D_FORTIFY_SOURCE=2 -c localtime.c -cc -std=c99 -pedantic -DREDIS_STATIC='' -Wall -W -Wno-missing-field-initializers -O2 -g -ggdb -g -O2 -ffile-prefix-map=/build/redis-5.0.14=. -fstack-protector-strong -Wformat -Werror=format-security -I../deps/linenoise -DUSE_JEMALLOC -I/usr/include/jemalloc/include -I/usr/include/lua5.1 -I/usr/include/hiredis -Wdate-time -D_FORTIFY_SOURCE=2 -c lolwut.c listpack.c: In function 'lpSeek': listpack.c:771:19: warning: comparison of integer expressions of different signedness: 'long int' and 'uint32_t' {aka 'unsigned int'} [-Wsign-compare] if (index >= numele) return NULL; /* Out of range the other side. */ @@ -678,6 +711,8 @@ listpack.c:774:19: warning: comparison of integer expressions of different signedness: 'long int' and 'uint32_t' {aka 'unsigned int'} [-Wsign-compare] if (index > numele/2) { ^ +cc -std=c99 -pedantic -DREDIS_STATIC='' -Wall -W -Wno-missing-field-initializers -O2 -g -ggdb -g -O2 -ffile-prefix-map=/build/redis-5.0.14=. -fstack-protector-strong -Wformat -Werror=format-security -I../deps/linenoise -DUSE_JEMALLOC -I/usr/include/jemalloc/include -I/usr/include/lua5.1 -I/usr/include/hiredis -Wdate-time -D_FORTIFY_SOURCE=2 -c localtime.c +cc -std=c99 -pedantic -DREDIS_STATIC='' -Wall -W -Wno-missing-field-initializers -O2 -g -ggdb -g -O2 -ffile-prefix-map=/build/redis-5.0.14=. -fstack-protector-strong -Wformat -Werror=format-security -I../deps/linenoise -DUSE_JEMALLOC -I/usr/include/jemalloc/include -I/usr/include/lua5.1 -I/usr/include/hiredis -Wdate-time -D_FORTIFY_SOURCE=2 -c lolwut.c cc -std=c99 -pedantic -DREDIS_STATIC='' -Wall -W -Wno-missing-field-initializers -O2 -g -ggdb -g -O2 -ffile-prefix-map=/build/redis-5.0.14=. -fstack-protector-strong -Wformat -Werror=format-security -I../deps/linenoise -DUSE_JEMALLOC -I/usr/include/jemalloc/include -I/usr/include/lua5.1 -I/usr/include/hiredis -Wdate-time -D_FORTIFY_SOURCE=2 -c lolwut5.c cc -std=c99 -pedantic -DREDIS_STATIC='' -Wall -W -Wno-missing-field-initializers -O2 -g -ggdb -g -O2 -ffile-prefix-map=/build/redis-5.0.14=. -fstack-protector-strong -Wformat -Werror=format-security -I../deps/linenoise -DUSE_JEMALLOC -I/usr/include/jemalloc/include -I/usr/include/lua5.1 -I/usr/include/hiredis -Wdate-time -D_FORTIFY_SOURCE=2 -c redis-cli.c cc -std=c99 -pedantic -DREDIS_STATIC='' -Wall -W -Wno-missing-field-initializers -O2 -g -ggdb -g -O2 -ffile-prefix-map=/build/redis-5.0.14=. -fstack-protector-strong -Wformat -Werror=format-security -I../deps/linenoise -DUSE_JEMALLOC -I/usr/include/jemalloc/include -I/usr/include/lua5.1 -I/usr/include/hiredis -Wdate-time -D_FORTIFY_SOURCE=2 -c redis-benchmark.c @@ -685,9 +720,9 @@ cc -Wl,-z,relro -Wl,-z,now -Wl,-no-as-needed -ldl -latomic -llua5.1-cjson -llua5.1-bitop ../deps/lua/src/lua_struct.o ../deps/lua/src/lua_cmsgpack.o -g -ggdb -rdynamic -o redis-server adlist.o quicklist.o ae.o anet.o dict.o server.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o crc16.o endianconv.o slowlog.o scripting.o bio.o rio.o rand.o memtest.o crc64.o bitops.o sentinel.o notify.o setproctitle.o blocked.o hyperloglog.o latency.o sparkline.o redis-check-rdb.o redis-check-aof.o geo.o lazyfree.o module.o evict.o expire.o geohash.o geohash_helper.o childinfo.o defrag.o siphash.o rax.o t_stream.o listpack.o localtime.o lolwut.o lolwut5.o -llua5.1 -ljemalloc -lm -ldl -pthread -lrt -lhiredis install --strip-program=true redis-server redis-sentinel install --strip-program=true redis-server redis-check-rdb -install --strip-program=true redis-server redis-check-aof install: WARNING: ignoring --strip-program option as -s option was not specified install: WARNING: ignoring --strip-program option as -s option was not specified +install --strip-program=true redis-server redis-check-aof install: WARNING: ignoring --strip-program option as -s option was not specified cc -Wl,-z,relro -Wl,-z,now -Wl,-no-as-needed -ldl -latomic -llua5.1-cjson -llua5.1-bitop ../deps/lua/src/lua_struct.o ../deps/lua/src/lua_cmsgpack.o -g -ggdb -rdynamic -o redis-cli anet.o adlist.o dict.o redis-cli.o zmalloc.o release.o ae.o crc64.o siphash.o crc16.o ../deps/linenoise/linenoise.o -llua5.1 -ljemalloc -lm -ldl -pthread -lrt -lhiredis @@ -702,7 +737,7 @@ ./runtest --clients 1 || true Cleanup: may take some time... OK Starting test server at port 11111 -[ready]: 10158 +[ready]: 27862 Testing unit/printver Testing Redis version 5.0.14 (00000000) [1/50 done]: unit/printver (1 seconds) @@ -733,7 +768,7 @@ [ok]: MIGRATE with multiple keys: stress command rewriting [ok]: MIGRATE with multiple keys: delete just ack keys [ok]: MIGRATE AUTH: correct and wrong password cases -[2/50 done]: unit/dump (24 seconds) +[2/50 done]: unit/dump (32 seconds) Testing unit/auth [ok]: AUTH fails if there is no password configured server side [ok]: AUTH fails when a wrong password is given @@ -757,7 +792,7 @@ [ok]: Protocol desync regression test #2 [ok]: Protocol desync regression test #3 [ok]: Regression for a crash with blocking ops and pipelining -[4/50 done]: unit/protocol (0 seconds) +[4/50 done]: unit/protocol (1 seconds) Testing unit/keyspace [ok]: DEL against a single item [ok]: Vararg DEL @@ -812,7 +847,7 @@ [ok]: ZSCAN with PATTERN [ok]: ZSCAN scores: regression test for issue #2175 [ok]: SCAN regression test for issue #4906 -[6/50 done]: unit/scan (8 seconds) +[6/50 done]: unit/scan (16 seconds) Testing unit/type/string [ok]: SET and GET an item [ok]: SET and GET an empty item @@ -862,7 +897,7 @@ [ok]: Extended SET PX option [ok]: Extended SET using multiple options at once [ok]: GETRANGE with huge ranges, Github issue #1844 -[7/50 done]: unit/type/string (7 seconds) +[7/50 done]: unit/type/string (18 seconds) Testing unit/type/incr [ok]: INCR against non existing key [ok]: INCR against key created by incr itself @@ -1004,18 +1039,18 @@ [ok]: LREM starting from tail with negative count (2) - ziplist [ok]: LREM deleting objects that may be int encoded - ziplist [ok]: Regression for bug 593 - chaining BRPOPLPUSH with other blocking cmds -[9/50 done]: unit/type/list (12 seconds) +[9/50 done]: unit/type/list (15 seconds) Testing unit/type/list-2 [ok]: LTRIM stress testing - linkedlist [ok]: LTRIM stress testing - ziplist -[10/50 done]: unit/type/list-2 (14 seconds) +[10/50 done]: unit/type/list-2 (30 seconds) Testing unit/type/list-3 [ok]: Explicit regression for a list bug [ok]: Regression for quicklist #3343 bug [ok]: Stress tester for #3343-alike bugs [ok]: ziplist implementation: value encoding and backlink [ok]: ziplist implementation: encoding stress testing -[11/50 done]: unit/type/list-3 (20 seconds) +[11/50 done]: unit/type/list-3 (40 seconds) Testing unit/type/set [ok]: SADD, SCARD, SISMEMBER, SMEMBERS basics - regular set [ok]: SADD, SCARD, SISMEMBER, SMEMBERS basics - intset @@ -1088,7 +1123,7 @@ [ok]: SMOVE wrong dst key type [ok]: SMOVE with identical source and destination [ok]: intsets implementation stress testing -[12/50 done]: unit/type/set (6 seconds) +[12/50 done]: unit/type/set (13 seconds) Testing unit/type/zset [ok]: Check encoding - ziplist [ok]: ZSET basic ZADD and score update - ziplist @@ -1254,7 +1289,7 @@ [ok]: BZPOPMIN with variadic ZADD [ok]: BZPOPMIN with zero timeout should block indefinitely [ok]: ZSET skiplist order consistency when elements are moved -[13/50 done]: unit/type/zset (11 seconds) +[13/50 done]: unit/type/zset (27 seconds) Testing unit/type/hash [ok]: HSET/HLEN - Small hash creation [ok]: Is the small hash encoded with a ziplist? @@ -1313,8 +1348,7 @@ [ok]: Hash fuzzing #1 - 512 fields [ok]: Hash fuzzing #2 - 512 fields [ok]: Stress test the hash ziplist -> hashtable encoding conversion -[ok]: Test HINCRBYFLOAT for correct float representation (issue #2846) -[14/50 done]: unit/type/hash (5 seconds) +[14/50 done]: unit/type/hash (12 seconds) Testing unit/type/stream [ok]: XADD can add entries into a stream that XRANGE can fetch [ok]: XADD IDs are incremental @@ -1353,7 +1387,7 @@ [ok]: XSETID cannot SETID on non-existent key [ok]: Empty stream can be rewrite into AOF correctly [ok]: Stream can be rewrite into AOF correctly after XDEL lastid -[15/50 done]: unit/type/stream (17 seconds) +[15/50 done]: unit/type/stream (45 seconds) Testing unit/type/stream-cgroups [ok]: XGROUP CREATE: creation and duplicate group name detection [ok]: XGROUP CREATE: automatic stream creation fails without MKSTREAM @@ -1423,7 +1457,7 @@ [ok]: SORT speed, 100 element list BY hash field, 100 times [ok]: SORT speed, 100 element list directly, 100 times [ok]: SORT speed, 100 element list BY , 100 times -[17/50 done]: unit/sort (7 seconds) +[17/50 done]: unit/sort (19 seconds) Testing unit/expire [ok]: EXPIRE - set timeouts multiple times [ok]: EXPIRE - It should be still possible to read 'x' @@ -1449,7 +1483,7 @@ [ok]: 5 keys in, 5 keys out [ok]: EXPIRE with empty string as TTL should report an error [ok]: SET - use EX/PX option, TTL should not be reseted after loadaof -[18/50 done]: unit/expire (14 seconds) +[18/50 done]: unit/expire (15 seconds) Testing unit/other [ok]: SAVE - make sure there are all the types as values [ok]: FUZZ stresser with data model binary @@ -1465,7 +1499,7 @@ [ok]: APPEND fuzzing [ok]: FLUSHDB [ok]: Perform a final SAVE to leave a clean DB on disk -[19/50 done]: unit/other (8 seconds) +[19/50 done]: unit/other (18 seconds) Testing unit/multi [ok]: MUTLI / EXEC basics [ok]: DISCARD @@ -1496,7 +1530,7 @@ [ok]: MULTI / EXEC is propagated correctly (empty transaction) [ok]: MULTI / EXEC is propagated correctly (read-only commands) [ok]: MULTI / EXEC is propagated correctly (write command, no effect) -[20/50 done]: unit/multi (2 seconds) +[20/50 done]: unit/multi (3 seconds) Testing unit/quit [ok]: QUIT returns OK [ok]: Pipelined commands after QUIT must not be executed @@ -1505,8 +1539,6 @@ Testing unit/aofrw [ok]: AOF rewrite during write load: RDB preamble=yes [ok]: AOF rewrite during write load: RDB preamble=no -Waiting for process 15201 to exit... -Waiting for process 15201 to exit... [ok]: Turning off AOF kills the background writing child if any [ok]: AOF rewrite of list with quicklist encoding, string data [ok]: AOF rewrite of list with quicklist encoding, int data @@ -1524,11 +1556,11 @@ [ok]: AOF rewrite of zset with skiplist encoding, int data [ok]: BGREWRITEAOF is delayed if BGSAVE is in progress [ok]: BGREWRITEAOF is refused if already in progress -[22/50 done]: unit/aofrw (122 seconds) +[22/50 done]: unit/aofrw (87 seconds) Testing integration/block-repl [ok]: First server should have role slave after SLAVEOF [ok]: Test replication with blocking lists and sorted sets operations -[23/50 done]: integration/block-repl (26 seconds) +[23/50 done]: integration/block-repl (29 seconds) Testing integration/replication [ok]: Slave enters handshake [ok]: Slave is able to detect timeout during handshake @@ -1546,14 +1578,9 @@ [ok]: ROLE in master reports master with a slave [ok]: ROLE in slave reports slave in connected state [ok]: Connect multiple replicas at the same time (issue #141), diskless=no -Waiting for process 16225 to exit... [ok]: Connect multiple replicas at the same time (issue #141), diskless=yes -Waiting for process 16833 to exit... -Waiting for process 16824 to exit... -Waiting for process 16815 to exit... -Waiting for process 16801 to exit... [ok]: Master stream is correctly processed while the replica has a script in -BUSY state -[24/50 done]: integration/replication (187 seconds) +[24/50 done]: integration/replication (199 seconds) Testing integration/replication-2 [ok]: First server should have role slave after SLAVEOF [ok]: If min-slaves-to-write is honored, write is accepted @@ -1562,7 +1589,7 @@ [ok]: No write if min-slaves-max-lag is > of the slave lag [ok]: min-slaves-to-write is ignored by slaves [ok]: MASTER and SLAVE dataset should be identical after complex ops -[25/50 done]: integration/replication-2 (14 seconds) +[25/50 done]: integration/replication-2 (17 seconds) Testing integration/replication-3 [ok]: First server should have role slave after SLAVEOF [ok]: MASTER and SLAVE consistency with expire @@ -1570,7 +1597,7 @@ [ok]: First server should have role slave after SLAVEOF [ok]: MASTER and SLAVE consistency with EVALSHA replication [ok]: SLAVE can reload "lua" AUX RDB fields of duplicated scripts -[26/50 done]: integration/replication-3 (27 seconds) +[26/50 done]: integration/replication-3 (39 seconds) Testing integration/replication-4 [ok]: First server should have role slave after SLAVEOF [ok]: Test replication with parallel clients writing in differnet DBs @@ -1581,7 +1608,7 @@ [ok]: First server should have role slave after SLAVEOF [ok]: Replication: commands with many arguments (issue #1221) [ok]: Replication of SPOP command -- alsoPropagate() API -[27/50 done]: integration/replication-4 (24 seconds) +[27/50 done]: integration/replication-4 (37 seconds) Testing integration/replication-psync [ok]: Slave should be able to synchronize with the master [ok]: Detect write load to master @@ -1613,7 +1640,7 @@ [ok]: Slave should be able to synchronize with the master [ok]: Detect write load to master [ok]: Test replication partial resync: backlog expired (diskless: yes, reconnect: 1) -[28/50 done]: integration/replication-psync (103 seconds) +[28/50 done]: integration/replication-psync (99 seconds) Testing integration/aof [ok]: Unfinished MULTI: Server should start if load-truncated is yes [ok]: Short read: Server should start if load-truncated is yes @@ -1635,7 +1662,7 @@ [ok]: AOF+EXPIRE: Server should have been started [ok]: AOF+EXPIRE: List should be empty [ok]: Redis should not try to convert DEL into EXPIREAT for EXPIRE -1 -[29/50 done]: integration/aof (3 seconds) +[29/50 done]: integration/aof (4 seconds) Testing integration/rdb [ok]: RDB encoding loading test [ok]: Server started empty with non-existing RDB file @@ -1654,34 +1681,34 @@ [32/50 done]: integration/logging (1 seconds) Testing integration/psync2 [ok]: PSYNC2: --- CYCLE 1 --- -[ok]: PSYNC2: [NEW LAYOUT] Set #2 as master -[ok]: PSYNC2: Set #4 to replicate from #2 +[ok]: PSYNC2: [NEW LAYOUT] Set #4 as master [ok]: PSYNC2: Set #1 to replicate from #4 -[ok]: PSYNC2: Set #0 to replicate from #4 +[ok]: PSYNC2: Set #2 to replicate from #1 [ok]: PSYNC2: Set #3 to replicate from #1 +[ok]: PSYNC2: Set #0 to replicate from #4 [ok]: PSYNC2: cluster is consistent after failover [ok]: PSYNC2: generate load while killing replication links -[ok]: PSYNC2: cluster is consistent after load (x = 69559) +[ok]: PSYNC2: cluster is consistent after load (x = 28442) [ok]: PSYNC2: total sum of full synchronizations is exactly 4 [ok]: PSYNC2: --- CYCLE 2 --- -[ok]: PSYNC2: [NEW LAYOUT] Set #3 as master -[ok]: PSYNC2: Set #4 to replicate from #3 +[ok]: PSYNC2: [NEW LAYOUT] Set #4 as master +[ok]: PSYNC2: Set #2 to replicate from #4 [ok]: PSYNC2: Set #0 to replicate from #4 -[ok]: PSYNC2: Set #2 to replicate from #0 +[ok]: PSYNC2: Set #3 to replicate from #4 [ok]: PSYNC2: Set #1 to replicate from #2 [ok]: PSYNC2: cluster is consistent after failover [ok]: PSYNC2: generate load while killing replication links -[ok]: PSYNC2: cluster is consistent after load (x = 156276) +[ok]: PSYNC2: cluster is consistent after load (x = 54118) [ok]: PSYNC2: total sum of full synchronizations is exactly 4 [ok]: PSYNC2: --- CYCLE 3 --- [ok]: PSYNC2: [NEW LAYOUT] Set #4 as master -[ok]: PSYNC2: Set #1 to replicate from #4 [ok]: PSYNC2: Set #0 to replicate from #4 +[ok]: PSYNC2: Set #1 to replicate from #0 [ok]: PSYNC2: Set #2 to replicate from #1 -[ok]: PSYNC2: Set #3 to replicate from #2 +[ok]: PSYNC2: Set #3 to replicate from #0 [ok]: PSYNC2: cluster is consistent after failover [ok]: PSYNC2: generate load while killing replication links -[ok]: PSYNC2: cluster is consistent after load (x = 225533) +[ok]: PSYNC2: cluster is consistent after load (x = 84301) [ok]: PSYNC2: total sum of full synchronizations is exactly 4 [ok]: PSYNC2: Bring the master back again for next test [ok]: PSYNC2: Partial resync after restart using RDB aux fields @@ -1696,18 +1723,12 @@ [ok]: PSYNC2 #3899 regression: kill first replica [ok]: PSYNC2 #3899 regression: kill first replica [ok]: PSYNC2 #3899 regression: kill chained replica -[ok]: PSYNC2 #3899 regression: kill first replica -[ok]: PSYNC2 #3899 regression: kill first replica -[ok]: PSYNC2 #3899 regression: kill first replica [ok]: PSYNC2 #3899 regression: kill chained replica -[ok]: PSYNC2 #3899 regression: kill first replica -[ok]: PSYNC2 #3899 regression: kill first replica -[ok]: PSYNC2 #3899 regression: kill first replica -[ok]: PSYNC2 #3899 regression: kill first replica [ok]: PSYNC2 #3899 regression: kill chained replica [ok]: PSYNC2 #3899 regression: kill chained replica +[ok]: PSYNC2 #3899 regression: kill first replica [ok]: PSYNC2 #3899 regression: verify consistency -[34/50 done]: integration/psync2-reg (21 seconds) +[34/50 done]: integration/psync2-reg (22 seconds) Testing unit/pubsub [ok]: Pub/Sub PING [ok]: PUBLISH/SUBSCRIBE basics @@ -1835,7 +1856,7 @@ [ok]: PRNG is seeded randomly for command replication [ok]: Using side effects is not a problem with command replication [ok]: Test scripting debug protocol parsing -[37/50 done]: unit/scripting (6 seconds) +[37/50 done]: unit/scripting (7 seconds) Testing unit/maxmemory [ok]: Without maxmemory small integers are shared [ok]: With maxmemory and non-LRU policy integers are still shared @@ -1858,7 +1879,7 @@ [ok]: maxmemory - policy volatile-ttl should only remove volatile keys. [ok]: slave buffer are counted correctly [ok]: replica buffer don't induce eviction -[38/50 done]: unit/maxmemory (28 seconds) +[38/50 done]: unit/maxmemory (57 seconds) Testing unit/introspection [ok]: CLIENT LIST [ok]: MONITOR can log executed commands @@ -1879,15 +1900,15 @@ [ok]: command stats for BRPOP [ok]: command stats for MULTI [ok]: command stats for scripts -[40/50 done]: unit/introspection-2 (6 seconds) +[40/50 done]: unit/introspection-2 (7 seconds) Testing unit/limits [ok]: Check if maxclients works refusing connections -[41/50 done]: unit/limits (2 seconds) +[41/50 done]: unit/limits (1 seconds) Testing unit/obuf-limits [ok]: Client output buffer hard limit is enforced [ok]: Client output buffer soft limit is not enforced if time is not overreached [ok]: Client output buffer soft limit is enforced if time is overreached -[42/50 done]: unit/obuf-limits (24 seconds) +[42/50 done]: unit/obuf-limits (35 seconds) Testing unit/bitops [ok]: BITCOUNT returns 0 against non existing key [ok]: BITCOUNT returns 0 with out of range indexes @@ -1931,7 +1952,7 @@ [ok]: BITPOS bit=0 changes behavior if end is given [ok]: BITPOS bit=1 fuzzy testing using SETBIT [ok]: BITPOS bit=0 fuzzy testing using SETBIT -[43/50 done]: unit/bitops (1 seconds) +[43/50 done]: unit/bitops (4 seconds) Testing unit/bitfield [ok]: BITFIELD signed SET and GET basics [ok]: BITFIELD unsigned SET and GET basics @@ -1946,7 +1967,7 @@ [ok]: BITFIELD overflow wrap fuzzing [ok]: BITFIELD regression for #3221 [ok]: BITFIELD regression for #3564 -[44/50 done]: unit/bitfield (1 seconds) +[44/50 done]: unit/bitfield (2 seconds) Testing unit/geo [ok]: GEOADD create [ok]: GEOADD update @@ -1972,7 +1993,7 @@ [ok]: GEORANGE STOREDIST option: plain usage [ok]: GEORANGE STOREDIST option: COUNT ASC and DESC [ok]: GEOADD + GEORANGE randomized test -[45/50 done]: unit/geo (12 seconds) +[45/50 done]: unit/geo (24 seconds) Testing unit/memefficiency [ok]: Memory efficiency with values in range 32 [ok]: Memory efficiency with values in range 64 @@ -1981,7 +2002,7 @@ [ok]: Memory efficiency with values in range 16384 [ok]: Active defrag [ok]: Active defrag big keys -[46/50 done]: unit/memefficiency (21 seconds) +[46/50 done]: unit/memefficiency (60 seconds) Testing unit/hyperloglog [ok]: HyperLogLog self test passes [ok]: PFADD without arguments creates an HLL value @@ -2003,11 +2024,11 @@ [ok]: PFCOUNT multiple-keys merge returns cardinality of union #2 [ok]: PFDEBUG GETREG returns the HyperLogLog raw registers [ok]: PFADD / PFCOUNT cache invalidation works -[47/50 done]: unit/hyperloglog (29 seconds) +[47/50 done]: unit/hyperloglog (51 seconds) Testing unit/lazyfree [ok]: UNLINK can reclaim memory in background [ok]: FLUSHDB ASYNC can reclaim memory in background -[48/50 done]: unit/lazyfree (1 seconds) +[48/50 done]: unit/lazyfree (2 seconds) Testing unit/wait [ok]: Setup slave [ok]: WAIT should acknowledge 1 additional copy of the data @@ -2016,61 +2037,61 @@ [49/50 done]: unit/wait (6 seconds) Testing unit/pendingquerybuf [ok]: pending querybuf: check size of pending_querybuf after set a big value -[50/50 done]: unit/pendingquerybuf (5 seconds) +[50/50 done]: unit/pendingquerybuf (9 seconds) The End Execution time of different units: 1 seconds - unit/printver - 24 seconds - unit/dump + 32 seconds - unit/dump 1 seconds - unit/auth - 0 seconds - unit/protocol + 1 seconds - unit/protocol 2 seconds - unit/keyspace - 8 seconds - unit/scan - 7 seconds - unit/type/string + 16 seconds - unit/scan + 18 seconds - unit/type/string 0 seconds - unit/type/incr - 12 seconds - unit/type/list - 14 seconds - unit/type/list-2 - 20 seconds - unit/type/list-3 - 6 seconds - unit/type/set - 11 seconds - unit/type/zset - 5 seconds - unit/type/hash - 17 seconds - unit/type/stream + 15 seconds - unit/type/list + 30 seconds - unit/type/list-2 + 40 seconds - unit/type/list-3 + 13 seconds - unit/type/set + 27 seconds - unit/type/zset + 12 seconds - unit/type/hash + 45 seconds - unit/type/stream 4 seconds - unit/type/stream-cgroups - 7 seconds - unit/sort - 14 seconds - unit/expire - 8 seconds - unit/other - 2 seconds - unit/multi + 19 seconds - unit/sort + 15 seconds - unit/expire + 18 seconds - unit/other + 3 seconds - unit/multi 0 seconds - unit/quit - 122 seconds - unit/aofrw - 26 seconds - integration/block-repl - 187 seconds - integration/replication - 14 seconds - integration/replication-2 - 27 seconds - integration/replication-3 - 24 seconds - integration/replication-4 - 103 seconds - integration/replication-psync - 3 seconds - integration/aof + 87 seconds - unit/aofrw + 29 seconds - integration/block-repl + 199 seconds - integration/replication + 17 seconds - integration/replication-2 + 39 seconds - integration/replication-3 + 37 seconds - integration/replication-4 + 99 seconds - integration/replication-psync + 4 seconds - integration/aof 1 seconds - integration/rdb 1 seconds - integration/convert-zipmap-hash-on-load 1 seconds - integration/logging 27 seconds - integration/psync2 - 21 seconds - integration/psync2-reg + 22 seconds - integration/psync2-reg 1 seconds - unit/pubsub 1 seconds - unit/slowlog - 6 seconds - unit/scripting - 28 seconds - unit/maxmemory + 7 seconds - unit/scripting + 57 seconds - unit/maxmemory 0 seconds - unit/introspection - 6 seconds - unit/introspection-2 - 2 seconds - unit/limits - 24 seconds - unit/obuf-limits - 1 seconds - unit/bitops - 1 seconds - unit/bitfield - 12 seconds - unit/geo - 21 seconds - unit/memefficiency - 29 seconds - unit/hyperloglog - 1 seconds - unit/lazyfree + 7 seconds - unit/introspection-2 + 1 seconds - unit/limits + 35 seconds - unit/obuf-limits + 4 seconds - unit/bitops + 2 seconds - unit/bitfield + 24 seconds - unit/geo + 60 seconds - unit/memefficiency + 51 seconds - unit/hyperloglog + 2 seconds - unit/lazyfree 6 seconds - unit/wait - 5 seconds - unit/pendingquerybuf + 9 seconds - unit/pendingquerybuf \o/ All tests passed without errors! @@ -2097,480 +2118,480 @@ Starting redis #18 at port 30018 Starting redis #19 at port 30019 Testing unit: 00-base.tcl -12:10:08> (init) Restart killed instances: OK -12:10:08> Cluster nodes are reachable: OK -12:10:08> Cluster nodes hard reset: OK -12:10:08> Cluster Join and auto-discovery test: OK -12:10:12> Before slots allocation, all nodes report cluster failure: OK -12:10:12> Different nodes have different IDs: OK -12:10:12> It is possible to perform slot allocation: OK -12:10:12> After the join, every node gets a different config epoch: OK -12:10:12> Nodes should report cluster_state is ok now: OK -12:10:15> It is possible to write and read from the cluster: OK +08:18:39> (init) Restart killed instances: OK +08:18:39> Cluster nodes are reachable: OK +08:18:39> Cluster nodes hard reset: OK +08:18:39> Cluster Join and auto-discovery test: OK +08:18:43> Before slots allocation, all nodes report cluster failure: OK +08:18:44> Different nodes have different IDs: OK +08:18:44> It is possible to perform slot allocation: OK +08:18:44> After the join, every node gets a different config epoch: OK +08:18:45> Nodes should report cluster_state is ok now: OK +08:18:48> It is possible to write and read from the cluster: OK Testing unit: 01-faildet.tcl -12:10:15> (init) Restart killed instances: OK -12:10:15> Cluster nodes are reachable: OK -12:10:15> Cluster nodes hard reset: OK -12:10:15> Cluster Join and auto-discovery test: OK -12:10:19> Before slots allocation, all nodes report cluster failure: OK -12:10:19> Create a 5 nodes cluster: OK -12:10:22> Cluster should start ok: OK -12:10:22> Killing two slave nodes: OK -12:10:22> Cluster should be still up: OK -12:10:22> Killing one master node: OK -12:10:22> Cluster should be down now: OK -12:10:26> Restarting master node: OK -12:10:26> Cluster should be up again: OK +08:18:48> (init) Restart killed instances: OK +08:18:48> Cluster nodes are reachable: OK +08:18:48> Cluster nodes hard reset: OK +08:18:48> Cluster Join and auto-discovery test: OK +08:18:52> Before slots allocation, all nodes report cluster failure: OK +08:18:52> Create a 5 nodes cluster: OK +08:18:56> Cluster should start ok: OK +08:18:56> Killing two slave nodes: OK +08:18:56> Cluster should be still up: OK +08:18:56> Killing one master node: OK +08:18:57> Cluster should be down now: OK +08:19:00> Restarting master node: OK +08:19:00> Cluster should be up again: OK Testing unit: 02-failover.tcl -12:10:32> (init) Restart killed instances: redis/5 redis/6 OK -12:10:32> Cluster nodes are reachable: OK -12:10:32> Cluster nodes hard reset: OK -12:10:32> Cluster Join and auto-discovery test: OK -12:10:36> Before slots allocation, all nodes report cluster failure: OK -12:10:36> Create a 5 nodes cluster: OK -12:10:40> Cluster is up: OK -12:10:40> Cluster is writable: OK -12:10:40> Instance #5 is a slave: OK -12:10:40> Instance #5 synced with the master: OK -12:10:40> Killing one master node: OK -12:10:40> Wait for failover: OK -12:10:44> Cluster should eventually be up again: OK -12:10:44> Cluster is writable: OK -12:10:45> Instance #5 is now a master: OK -12:10:45> Restarting the previously killed master node: OK -12:10:45> Instance #0 gets converted into a slave: OK +08:19:07> (init) Restart killed instances: redis/5 redis/6 OK +08:19:07> Cluster nodes are reachable: OK +08:19:07> Cluster nodes hard reset: OK +08:19:07> Cluster Join and auto-discovery test: OK +08:19:10> Before slots allocation, all nodes report cluster failure: OK +08:19:10> Create a 5 nodes cluster: OK +08:19:14> Cluster is up: OK +08:19:14> Cluster is writable: OK +08:19:14> Instance #5 is a slave: OK +08:19:14> Instance #5 synced with the master: OK +08:19:14> Killing one master node: OK +08:19:14> Wait for failover: OK +08:19:18> Cluster should eventually be up again: OK +08:19:18> Cluster is writable: OK +08:19:19> Instance #5 is now a master: OK +08:19:19> Restarting the previously killed master node: OK +08:19:19> Instance #0 gets converted into a slave: OK Testing unit: 03-failover-loop.tcl -12:10:45> (init) Restart killed instances: OK -12:10:45> Cluster nodes are reachable: OK -12:10:45> Cluster nodes hard reset: OK -12:10:45> Cluster Join and auto-discovery test: OK -12:10:48> Before slots allocation, all nodes report cluster failure: OK -12:10:48> Create a 5 nodes cluster: OK -12:10:52> Cluster is up: OK +08:19:19> (init) Restart killed instances: OK +08:19:19> Cluster nodes are reachable: OK +08:19:19> Cluster nodes hard reset: OK +08:19:19> Cluster Join and auto-discovery test: OK +08:19:23> Before slots allocation, all nodes report cluster failure: OK +08:19:23> Create a 5 nodes cluster: OK +08:19:27> Cluster is up: OK --- Iteration 19 --- -12:10:52> Wait for slave of #3 to sync: OK -12:10:52> Cluster is writable before failover: OK -12:10:52> Killing node #3: OK -12:10:52> Wait failover by #8 with old epoch 4: OK -12:10:57> Cluster should eventually be up again: OK -12:10:57> Cluster is writable again: OK -12:10:57> Restarting node #3: OK -12:10:57> Instance #3 is now a slave: OK -12:10:57> We can read back the value we set before: OK +08:19:27> Wait for slave of #0 to sync: OK +08:19:27> Cluster is writable before failover: OK +08:19:27> Killing node #0: OK +08:19:28> Wait failover by #5 with old epoch 1: OK +08:19:32> Cluster should eventually be up again: OK +08:19:32> Cluster is writable again: OK +08:19:32> Restarting node #0: OK +08:19:32> Instance #0 is now a slave: OK +08:19:32> We can read back the value we set before: OK --- Iteration 18 --- -12:10:57> Cluster is writable before failover: OK -12:10:57> Killing node #3: OK -12:10:57> Cluster should eventually be up again: OK -12:10:57> Cluster is writable again: OK -12:10:57> Restarting node #3: OK -12:10:57> Instance #3 is now a slave: OK -12:10:57> We can read back the value we set before: OK +08:19:32> Wait for slave of #3 to sync: OK +08:19:32> Cluster is writable before failover: OK +08:19:32> Killing node #3: OK +08:19:32> Wait failover by #8 with old epoch 4: OK +08:19:37> Cluster should eventually be up again: OK +08:19:37> Cluster is writable again: OK +08:19:37> Restarting node #3: OK +08:19:37> Instance #3 is now a slave: OK +08:19:37> We can read back the value we set before: OK --- Iteration 17 --- -12:10:57> Cluster is writable before failover: OK -12:10:57> Killing node #5: OK -12:10:57> Cluster should eventually be up again: OK -12:10:57> Cluster is writable again: OK -12:10:57> Restarting node #5: OK -12:10:57> Instance #5 is now a slave: OK -12:10:57> We can read back the value we set before: OK +08:19:37> Cluster is writable before failover: OK +08:19:37> Killing node #6: OK +08:19:37> Cluster should eventually be up again: OK +08:19:37> Cluster is writable again: OK +08:19:37> Restarting node #6: OK +08:19:37> Instance #6 is now a slave: OK +08:19:37> We can read back the value we set before: OK --- Iteration 16 --- -12:10:57> Cluster is writable before failover: OK -12:10:57> Killing node #6: OK -12:10:57> Cluster should eventually be up again: OK -12:10:57> Cluster is writable again: OK -12:10:57> Restarting node #6: OK -12:10:57> Instance #6 is now a slave: OK -12:10:57> We can read back the value we set before: OK +08:19:37> Wait for slave of #8 to sync: OK +08:19:38> Cluster is writable before failover: OK +08:19:38> Killing node #8: OK +08:19:38> Wait failover by #3 with old epoch 22: OK +08:19:42> Cluster should eventually be up again: OK +08:19:42> Cluster is writable again: OK +08:19:43> Restarting node #8: OK +08:19:44> Instance #8 is now a slave: OK +08:19:44> We can read back the value we set before: OK --- Iteration 15 --- -12:10:57> Cluster is writable before failover: OK -12:10:57> Killing node #6: OK -12:10:57> Cluster should eventually be up again: OK -12:10:57> Cluster is writable again: OK -12:10:57> Restarting node #6: OK -12:10:57> Instance #6 is now a slave: OK -12:10:57> We can read back the value we set before: OK +08:19:46> Wait for slave of #5 to sync: OK +08:19:46> Cluster is writable before failover: OK +08:19:46> Killing node #5: OK +08:19:46> Wait failover by #0 with old epoch 21: OK +08:19:50> Cluster should eventually be up again: OK +08:19:51> Cluster is writable again: OK +08:19:51> Restarting node #5: OK +08:19:52> Instance #5 is now a slave: OK +08:19:52> We can read back the value we set before: OK --- Iteration 14 --- -12:10:58> Wait for slave of #0 to sync: OK -12:10:58> Cluster is writable before failover: OK -12:10:58> Killing node #0: OK -12:10:58> Wait failover by #5 with old epoch 1: OK -12:11:03> Cluster should eventually be up again: OK -12:11:03> Cluster is writable again: OK -12:11:03> Restarting node #0: OK -12:11:03> Instance #0 is now a slave: OK -12:11:03> We can read back the value we set before: OK +08:19:53> Wait for slave of #0 to sync: OK +08:19:53> Cluster is writable before failover: OK +08:19:54> Killing node #0: OK +08:19:54> Wait failover by #5 with old epoch 24: OK +08:19:58> Cluster should eventually be up again: OK +08:19:59> Cluster is writable again: OK +08:20:00> Restarting node #0: OK +08:20:00> Instance #0 is now a slave: OK +08:20:00> We can read back the value we set before: OK --- Iteration 13 --- -12:11:03> Cluster is writable before failover: OK -12:11:03> Killing node #0: OK -12:11:03> Cluster should eventually be up again: OK -12:11:03> Cluster is writable again: OK -12:11:03> Restarting node #0: OK -12:11:03> Instance #0 is now a slave: OK -12:11:03> We can read back the value we set before: OK +08:20:02> Wait for slave of #4 to sync: OK +08:20:02> Cluster is writable before failover: OK +08:20:02> Killing node #4: OK +08:20:02> Wait failover by #9 with old epoch 5: OK +08:20:06> Cluster should eventually be up again: OK +08:20:07> Cluster is writable again: OK +08:20:08> Restarting node #4: OK +08:20:08> Instance #4 is now a slave: OK +08:20:08> We can read back the value we set before: OK --- Iteration 12 --- -12:11:03> Wait for slave of #4 to sync: OK -12:11:03> Cluster is writable before failover: OK -12:11:03> Killing node #4: OK -12:11:03> Wait failover by #9 with old epoch 5: OK -12:11:07> Cluster should eventually be up again: OK -12:11:07> Cluster is writable again: OK -12:11:07> Restarting node #4: OK -12:11:07> Instance #4 is now a slave: OK -12:11:07> We can read back the value we set before: OK +08:20:09> Cluster is writable before failover: OK +08:20:10> Killing node #0: OK +08:20:10> Cluster should eventually be up again: OK +08:20:10> Cluster is writable again: OK +08:20:11> Restarting node #0: OK +08:20:11> Instance #0 is now a slave: OK +08:20:11> We can read back the value we set before: OK --- Iteration 11 --- -12:11:07> Wait for slave of #1 to sync: OK -12:11:07> Cluster is writable before failover: OK -12:11:07> Killing node #1: OK -12:11:07> Wait failover by #6 with old epoch 2: OK -12:11:11> Cluster should eventually be up again: OK -12:11:11> Cluster is writable again: OK -12:11:11> Restarting node #1: OK -12:11:11> Instance #1 is now a slave: OK -12:11:11> We can read back the value we set before: OK +08:20:13> Wait for slave of #5 to sync: OK +08:20:13> Cluster is writable before failover: OK +08:20:14> Killing node #5: OK +08:20:14> Wait failover by #0 with old epoch 25: OK +08:20:18> Cluster should eventually be up again: OK +08:20:18> Cluster is writable again: OK +08:20:19> Restarting node #5: OK +08:20:19> Instance #5 is now a slave: OK +08:20:19> We can read back the value we set before: OK --- Iteration 10 --- -12:11:11> Cluster is writable before failover: OK -12:11:11> Killing node #0: OK -12:11:12> Cluster should eventually be up again: OK -12:11:12> Cluster is writable again: OK -12:11:12> Restarting node #0: OK -12:11:12> Instance #0 is now a slave: OK -12:11:12> We can read back the value we set before: OK +08:20:21> Cluster is writable before failover: OK +08:20:21> Killing node #8: OK +08:20:21> Cluster should eventually be up again: OK +08:20:21> Cluster is writable again: OK +08:20:22> Restarting node #8: OK +08:20:22> Instance #8 is now a slave: OK +08:20:22> We can read back the value we set before: OK --- Iteration 9 --- -12:11:12> Cluster is writable before failover: OK -12:11:12> Killing node #3: OK -12:11:12> Cluster should eventually be up again: OK -12:11:12> Cluster is writable again: OK -12:11:12> Restarting node #3: OK -12:11:12> Instance #3 is now a slave: OK -12:11:12> We can read back the value we set before: OK +08:20:23> Wait for slave of #1 to sync: OK +08:20:23> Cluster is writable before failover: OK +08:20:24> Killing node #1: OK +08:20:24> Wait failover by #6 with old epoch 2: OK +08:20:28> Cluster should eventually be up again: OK +08:20:28> Cluster is writable again: OK +08:20:29> Restarting node #1: OK +08:20:29> Instance #1 is now a slave: OK +08:20:29> We can read back the value we set before: OK --- Iteration 8 --- -12:11:12> Wait for slave of #6 to sync: OK -12:11:12> Cluster is writable before failover: OK -12:11:12> Killing node #6: OK -12:11:13> Wait failover by #1 with old epoch 24: OK -12:11:17> Cluster should eventually be up again: OK -12:11:17> Cluster is writable again: OK -12:11:17> Restarting node #6: OK -12:11:17> Instance #6 is now a slave: OK -12:11:17> We can read back the value we set before: OK +08:20:30> Cluster is writable before failover: OK +08:20:31> Killing node #5: OK +08:20:31> Cluster should eventually be up again: OK +08:20:31> Cluster is writable again: OK +08:20:31> Restarting node #5: OK +08:20:32> Instance #5 is now a slave: OK +08:20:32> We can read back the value we set before: OK --- Iteration 7 --- -12:11:17> Wait for slave of #5 to sync: OK -12:11:17> Cluster is writable before failover: OK -12:11:17> Killing node #5: OK -12:11:17> Wait failover by #0 with old epoch 22: OK -12:11:21> Cluster should eventually be up again: OK -12:11:21> Cluster is writable again: OK -12:11:21> Restarting node #5: OK -12:11:21> Instance #5 is now a slave: OK -12:11:21> We can read back the value we set before: OK +08:20:33> Cluster is writable before failover: OK +08:20:33> Killing node #5: OK +08:20:33> Cluster should eventually be up again: OK +08:20:34> Cluster is writable again: OK +08:20:34> Restarting node #5: OK +08:20:34> Instance #5 is now a slave: OK +08:20:34> We can read back the value we set before: OK --- Iteration 6 --- -12:11:21> Cluster is writable before failover: OK -12:11:21> Killing node #4: OK -12:11:21> Cluster should eventually be up again: OK -12:11:21> Cluster is writable again: OK -12:11:21> Restarting node #4: OK -12:11:21> Instance #4 is now a slave: OK -12:11:21> We can read back the value we set before: OK +08:20:36> Cluster is writable before failover: OK +08:20:36> Killing node #4: OK +08:20:37> Cluster should eventually be up again: OK +08:20:37> Cluster is writable again: OK +08:20:37> Restarting node #4: OK +08:20:37> Instance #4 is now a slave: OK +08:20:37> We can read back the value we set before: OK --- Iteration 5 --- -12:11:22> Wait for slave of #8 to sync: OK -12:11:22> Cluster is writable before failover: OK -12:11:22> Killing node #8: OK -12:11:22> Wait failover by #3 with old epoch 21: OK -12:11:26> Cluster should eventually be up again: OK -12:11:26> Cluster is writable again: OK -12:11:26> Restarting node #8: OK -12:11:26> Instance #8 is now a slave: OK -12:11:26> We can read back the value we set before: OK +08:20:39> Wait for slave of #2 to sync: OK +08:20:39> Cluster is writable before failover: OK +08:20:40> Killing node #2: OK +08:20:40> Wait failover by #7 with old epoch 3: OK +08:20:45> Cluster should eventually be up again: OK +08:20:45> Cluster is writable again: OK +08:20:46> Restarting node #2: OK +08:20:46> Instance #2 is now a slave: OK +08:20:46> We can read back the value we set before: OK --- Iteration 4 --- -12:11:26> Cluster is writable before failover: OK -12:11:26> Killing node #5: OK -12:11:26> Cluster should eventually be up again: OK -12:11:26> Cluster is writable again: OK -12:11:26> Restarting node #5: OK -12:11:26> Instance #5 is now a slave: OK -12:11:26> We can read back the value we set before: OK +08:20:47> Cluster is writable before failover: OK +08:20:48> Killing node #8: OK +08:20:48> Cluster should eventually be up again: OK +08:20:48> Cluster is writable again: OK +08:20:48> Restarting node #8: OK +08:20:48> Instance #8 is now a slave: OK +08:20:48> We can read back the value we set before: OK --- Iteration 3 --- -12:11:26> Cluster is writable before failover: OK -12:11:26> Killing node #6: OK -12:11:26> Cluster should eventually be up again: OK -12:11:26> Cluster is writable again: OK -12:11:26> Restarting node #6: OK -12:11:26> Instance #6 is now a slave: OK -12:11:26> We can read back the value we set before: OK +08:20:49> Cluster is writable before failover: OK +08:20:50> Killing node #2: OK +08:20:50> Cluster should eventually be up again: OK +08:20:50> Cluster is writable again: OK +08:20:51> Restarting node #2: OK +08:20:51> Instance #2 is now a slave: OK +08:20:51> We can read back the value we set before: OK --- Iteration 2 --- -12:11:26> Wait for slave of #9 to sync: OK -12:11:26> Cluster is writable before failover: OK -12:11:26> Killing node #9: OK -12:11:26> Wait failover by #4 with old epoch 23: OK -12:11:31> Cluster should eventually be up again: OK -12:11:31> Cluster is writable again: OK -12:11:31> Restarting node #9: OK -12:11:31> Instance #9 is now a slave: OK -12:11:31> We can read back the value we set before: OK +08:20:52> Cluster is writable before failover: OK +08:20:53> Killing node #5: OK +08:20:53> Cluster should eventually be up again: OK +08:20:53> Cluster is writable again: OK +08:20:54> Restarting node #5: OK +08:20:54> Instance #5 is now a slave: OK +08:20:54> We can read back the value we set before: OK --- Iteration 1 --- -12:11:31> Wait for slave of #1 to sync: OK -12:11:31> Cluster is writable before failover: OK -12:11:31> Killing node #1: OK -12:11:31> Wait failover by #6 with old epoch 25: OK -12:11:35> Cluster should eventually be up again: OK -12:11:35> Cluster is writable again: OK -12:11:35> Restarting node #1: OK -12:11:35> Instance #1 is now a slave: OK -12:11:35> We can read back the value we set before: OK -12:11:35> Post condition: current_epoch >= my_epoch everywhere: OK +08:20:55> Cluster is writable before failover: OK +08:20:56> Killing node #2: OK +08:20:56> Cluster should eventually be up again: OK +08:20:56> Cluster is writable again: OK +08:20:57> Restarting node #2: OK +08:20:57> Instance #2 is now a slave: OK +08:20:57> We can read back the value we set before: OK +08:20:58> Post condition: current_epoch >= my_epoch everywhere: OK Testing unit: 04-resharding.tcl -12:11:35> (init) Restart killed instances: OK -12:11:35> Cluster nodes are reachable: OK -12:11:35> Cluster nodes hard reset: OK -12:11:35> Cluster Join and auto-discovery test: OK -12:11:39> Before slots allocation, all nodes report cluster failure: OK -12:11:39> Create a 5 nodes cluster: OK -12:11:42> Cluster is up: OK -12:11:42> Enable AOF in all the instances: OK -12:11:42> Cluster consistency during live resharding: WWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWW...Starting resharding...W.................................W................................................W.............................................................................W.......................................W...................................W.....................................W..................................................W................................................W.....................................W...............................W...............................................W........................................................W.........................................W..........................................W.......................................W..........................................W.............................W.................................W...................................................W................................W...........................W...............W.....................................W..................................W..........WWWWW...Starting resharding...W.................................W............................................................W........................................................W......................................................W................................W.................................................W...........................................W....................................W............................................W...............................W.....................................W.........................................................W....................................................W............................................W..........................................W...................................W...............................................W....................................................W.............................W.........................................................W.............................................W................................W.......................WWWWWWW...Starting resharding...W.................................W................................................W....................................................................W......................................................W..........................................W..................................................W...........................................W...............................................W...............................................W.............................................W..................................W.........................................W....................................W.........................................W.....................................W....................................................W...........................................................W............................................W.......................................W.......................................W..............................W.............................................W................WWWWWWW...Starting resharding...W...............................................................W......................................................W............................................................W..........................................W...........................................W..............................................W.....................................W..................................W..............................................W...................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................OK -12:12:24> Verify 50000 keys for consistency with logical content: OK -12:12:31> Crash and restart all the instances: OK -12:12:35> Cluster should eventually be up again: OK -12:12:37> Verify 50000 keys after the crash & restart: OK -12:12:44> Disable AOF in all the instances: OK -12:12:44> Verify slaves consistency: OK +08:20:58> (init) Restart killed instances: OK +08:20:58> Cluster nodes are reachable: OK +08:20:58> Cluster nodes hard reset: OK +08:21:00> Cluster Join and auto-discovery test: OK +08:21:04> Before slots allocation, all nodes report cluster failure: OK +08:21:04> Create a 5 nodes cluster: OK +08:21:08> Cluster is up: OK +08:21:08> Enable AOF in all the instances: OK +08:21:09> Cluster consistency during live resharding: WWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWW...Starting resharding...W...................................................W...............................................................................................W...............................................................................W.................W................................W...........................................................................W.....................................................W.................................................................................W................................................................................W....................................................................................W.......................................................................................................W.............................................................................................W.........................................................................W.............................................................WWWWWW...Starting resharding...W...................................................W.............................................................................................W.....................................................................W...................................................W...................................................W..............................................................W.....................................................W.....................................................................W....................................................................................W..........................................................W.................................................W.................................................................W..................................................................W...................................................................W................................................................................................WWWWW...Starting resharding...W.............................................W........................................................................W.............................................................................................W.........................................................................W...............................................................................................W...............................................................W...........................................................................................................W................................................................................................W............................................................W......................................................................W......................................................................W.......................................W............................................................W.............................................................WWWWWW...Starting resharding...W.....................................................................................................................................W..................................................................................................W....................................................................................................W....................................................W.......................................................W.........................................................................W..................................................W......................................................W.............................................W...............................................W..............................................................W...........................................................W....................................W.............................................................W......................................................W....................WWWW...Starting resharding...W............................................................................................................W...........................................................................................W........................................................W...............................................................................................W..............................................................................W..................................................................................................W.............................................................................................W................................................................................................W....................................................................................................W...............................................................W..........................................................W...............................................................W...................WWWWWWWOK +08:26:15> Verify 50000 keys for consistency with logical content: OK +08:26:28> Crash and restart all the instances: OK +08:26:32> Cluster should eventually be up again: OK +08:26:34> Verify 50000 keys after the crash & restart: OK +08:26:47> Disable AOF in all the instances: OK +08:26:47> Verify slaves consistency: OK Testing unit: 05-slave-selection.tcl -12:12:45> (init) Restart killed instances: OK -12:12:45> Cluster nodes are reachable: OK -12:12:45> Cluster nodes hard reset: OK -12:12:45> Cluster Join and auto-discovery test: OK -12:12:50> Before slots allocation, all nodes report cluster failure: OK -12:12:50> Create a 5 nodes cluster: OK -12:12:54> Cluster is up: OK -12:12:54> The first master has actually two slaves: OK -12:12:54> Slaves of #0 are instance #5 and #10 as expected: OK -12:12:54> Instance #5 and #10 synced with the master: OK -12:12:54> Slaves are both able to receive and acknowledge writes: OK -12:12:54> Write data while slave #10 is paused and can't receive it: OK -12:13:04> Wait for instance #5 (and not #10) to turn into a master: OK -12:13:08> Wait for the node #10 to return alive before ending the test: OK -12:13:08> Cluster should eventually be up again: OK -12:13:08> Node #10 should eventually replicate node #5: OK -12:13:09> (init) Restart killed instances: redis/0 OK -12:13:09> Cluster nodes are reachable: OK -12:13:09> Cluster nodes hard reset: OK -12:13:09> Cluster Join and auto-discovery test: OK -12:13:14> Before slots allocation, all nodes report cluster failure: OK -12:13:14> Create a 3 nodes cluster: OK -12:13:17> Cluster is up: OK -12:13:17> The first master has actually 5 slaves: OK -12:13:17> Slaves of #0 are instance #3, #6, #9, #12 and #15 as expected: OK -12:13:17> Instance #3, #6, #9, #12 and #15 synced with the master: OK -12:13:17> New Master down consecutively: OK +08:26:50> (init) Restart killed instances: OK +08:26:50> Cluster nodes are reachable: OK +08:26:50> Cluster nodes hard reset: OK +08:26:50> Cluster Join and auto-discovery test: OK +08:26:54> Before slots allocation, all nodes report cluster failure: OK +08:26:54> Create a 5 nodes cluster: OK +08:26:57> Cluster is up: OK +08:26:57> The first master has actually two slaves: OK +08:26:57> Slaves of #0 are instance #5 and #10 as expected: OK +08:26:57> Instance #5 and #10 synced with the master: OK +08:26:58> Slaves are both able to receive and acknowledge writes: OK +08:26:58> Write data while slave #10 is paused and can't receive it: OK +08:27:08> Wait for instance #5 (and not #10) to turn into a master: OK +08:27:12> Wait for the node #10 to return alive before ending the test: OK +08:27:12> Cluster should eventually be up again: OK +08:27:12> Node #10 should eventually replicate node #5: OK +08:27:13> (init) Restart killed instances: redis/0 OK +08:27:13> Cluster nodes are reachable: OK +08:27:13> Cluster nodes hard reset: OK +08:27:13> Cluster Join and auto-discovery test: OK +08:27:17> Before slots allocation, all nodes report cluster failure: OK +08:27:17> Create a 3 nodes cluster: OK +08:27:21> Cluster is up: OK +08:27:21> The first master has actually 5 slaves: OK +08:27:21> Slaves of #0 are instance #3, #6, #9, #12 and #15 as expected: OK +08:27:21> Instance #3, #6, #9, #12 and #15 synced with the master: OK +08:27:21> New Master down consecutively: OK Testing unit: 06-slave-stop-cond.tcl -12:13:47> (init) Restart killed instances: redis/0 redis/3 redis/9 redis/12 redis/15 OK -12:13:47> Cluster nodes are reachable: OK -12:13:47> Cluster nodes hard reset: OK -12:13:48> Cluster Join and auto-discovery test: OK -12:13:51> Before slots allocation, all nodes report cluster failure: OK -12:13:51> Create a 5 nodes cluster: OK -12:13:54> Cluster is up: OK -12:13:54> The first master has actually one slave: OK -12:13:54> Slaves of #0 is instance #5 as expected: OK -12:13:54> Instance #5 synced with the master: OK -12:13:54> Lower the slave validity factor of #5 to the value of 2: OK -12:13:54> Break master-slave link and prevent further reconnections: OK -12:14:14> Slave #5 is reachable and alive: OK -12:14:14> Slave #5 should not be able to failover: OK -12:14:24> Cluster should be down: OK +08:27:51> (init) Restart killed instances: redis/0 redis/3 redis/9 redis/12 redis/15 OK +08:27:51> Cluster nodes are reachable: OK +08:27:51> Cluster nodes hard reset: OK +08:27:52> Cluster Join and auto-discovery test: OK +08:27:56> Before slots allocation, all nodes report cluster failure: OK +08:27:56> Create a 5 nodes cluster: OK +08:27:59> Cluster is up: OK +08:27:59> The first master has actually one slave: OK +08:27:59> Slaves of #0 is instance #5 as expected: OK +08:27:59> Instance #5 synced with the master: OK +08:27:59> Lower the slave validity factor of #5 to the value of 2: OK +08:27:59> Break master-slave link and prevent further reconnections: OK +08:28:19> Slave #5 is reachable and alive: OK +08:28:19> Slave #5 should not be able to failover: OK +08:28:29> Cluster should be down: OK Testing unit: 07-replica-migration.tcl -12:14:24> (init) Restart killed instances: redis/0 OK -12:14:24> Cluster nodes are reachable: OK -12:14:24> Cluster nodes hard reset: OK -12:14:24> Cluster Join and auto-discovery test: OK -12:14:27> Before slots allocation, all nodes report cluster failure: OK -12:14:27> Create a 5 nodes cluster: OK -12:14:31> Cluster is up: OK -12:14:31> Each master should have two replicas attached: OK -12:14:31> Killing all the slaves of master #0 and #1: OK -12:14:36> Master #0 should have at least one replica: OK -12:14:41> Master #1 should have at least one replica: OK -12:14:41> Master #2 should have at least one replica: OK -12:14:41> Master #3 should have at least one replica: OK -12:14:41> Master #4 should have at least one replica: OK -12:14:41> (init) Restart killed instances: redis/5 redis/6 redis/10 redis/11 OK -12:14:41> Cluster nodes are reachable: OK -12:14:41> Cluster nodes hard reset: OK -12:14:41> Cluster Join and auto-discovery test: OK -12:14:44> Before slots allocation, all nodes report cluster failure: OK -12:14:44> Create a 5 nodes cluster: OK -12:14:48> Cluster is up: OK -12:14:48> Kill slave #7 of master #2. Only slave left is #12 now: OK -12:14:48> Killing master node #2, #12 should failover: OK -12:14:48> Wait for failover: OK -12:14:52> Cluster should eventually be up again: OK -12:14:52> Cluster is writable: OK -12:14:53> Instance 12 is now a master without slaves: OK -12:14:53> Master #12 should get at least one migrated replica: OK +08:28:29> (init) Restart killed instances: redis/0 OK +08:28:29> Cluster nodes are reachable: OK +08:28:29> Cluster nodes hard reset: OK +08:28:30> Cluster Join and auto-discovery test: OK +08:28:33> Before slots allocation, all nodes report cluster failure: OK +08:28:33> Create a 5 nodes cluster: OK +08:28:37> Cluster is up: OK +08:28:37> Each master should have two replicas attached: OK +08:28:37> Killing all the slaves of master #0 and #1: OK +08:28:41> Master #0 should have at least one replica: OK +08:28:46> Master #1 should have at least one replica: OK +08:28:46> Master #2 should have at least one replica: OK +08:28:46> Master #3 should have at least one replica: OK +08:28:46> Master #4 should have at least one replica: OK +08:28:46> (init) Restart killed instances: redis/5 redis/6 redis/10 redis/11 OK +08:28:46> Cluster nodes are reachable: OK +08:28:47> Cluster nodes hard reset: OK +08:28:47> Cluster Join and auto-discovery test: OK +08:28:52> Before slots allocation, all nodes report cluster failure: OK +08:28:52> Create a 5 nodes cluster: OK +08:28:56> Cluster is up: OK +08:28:56> Kill slave #7 of master #2. Only slave left is #12 now: OK +08:28:56> Killing master node #2, #12 should failover: OK +08:28:56> Wait for failover: OK +08:29:00> Cluster should eventually be up again: OK +08:29:00> Cluster is writable: OK +08:29:00> Instance 12 is now a master without slaves: OK +08:29:00> Master #12 should get at least one migrated replica: OK Testing unit: 08-update-msg.tcl -12:14:58> (init) Restart killed instances: redis/2 redis/7 OK -12:14:58> Cluster nodes are reachable: OK -12:14:58> Cluster nodes hard reset: OK -12:14:58> Cluster Join and auto-discovery test: OK -12:15:01> Before slots allocation, all nodes report cluster failure: OK -12:15:01> Create a 5 nodes cluster: OK -12:15:05> Cluster is up: OK -12:15:05> Cluster is writable: OK -12:15:05> Instance #5 is a slave: OK -12:15:05> Instance #5 synced with the master: OK -12:15:05> Killing one master node: OK -12:15:05> Wait for failover: OK -12:15:09> Cluster should eventually be up again: OK -12:15:09> Cluster is writable: OK -12:15:09> Instance #5 is now a master: OK -12:15:09> Killing the new master #5: OK -12:15:09> Cluster should be down now: OK -12:15:13> Restarting the old master node: OK -12:15:13> Instance #0 gets converted into a slave: OK -12:15:13> Restarting the new master node: OK -12:15:13> Cluster is up again: OK +08:29:06> (init) Restart killed instances: redis/2 redis/7 OK +08:29:06> Cluster nodes are reachable: OK +08:29:06> Cluster nodes hard reset: OK +08:29:06> Cluster Join and auto-discovery test: OK +08:29:10> Before slots allocation, all nodes report cluster failure: OK +08:29:10> Create a 5 nodes cluster: OK +08:29:13> Cluster is up: OK +08:29:13> Cluster is writable: OK +08:29:14> Instance #5 is a slave: OK +08:29:14> Instance #5 synced with the master: OK +08:29:14> Killing one master node: OK +08:29:14> Wait for failover: OK +08:29:18> Cluster should eventually be up again: OK +08:29:18> Cluster is writable: OK +08:29:18> Instance #5 is now a master: OK +08:29:18> Killing the new master #5: OK +08:29:18> Cluster should be down now: OK +08:29:22> Restarting the old master node: OK +08:29:22> Instance #0 gets converted into a slave: OK +08:29:22> Restarting the new master node: OK +08:29:22> Cluster is up again: OK Testing unit: 09-pubsub.tcl -12:15:19> (init) Restart killed instances: OK -12:15:19> Cluster nodes are reachable: OK -12:15:19> Cluster nodes hard reset: OK -12:15:19> Cluster Join and auto-discovery test: OK -12:15:23> Before slots allocation, all nodes report cluster failure: OK -12:15:23> Create a 5 nodes cluster: OK -12:15:27> Test publishing to master: OK -12:15:27> Test publishing to slave: OK +08:29:29> (init) Restart killed instances: OK +08:29:29> Cluster nodes are reachable: OK +08:29:29> Cluster nodes hard reset: OK +08:29:29> Cluster Join and auto-discovery test: OK +08:29:33> Before slots allocation, all nodes report cluster failure: OK +08:29:33> Create a 5 nodes cluster: OK +08:29:37> Test publishing to master: OK +08:29:37> Test publishing to slave: OK Testing unit: 10-manual-failover.tcl -12:15:27> (init) Restart killed instances: OK -12:15:27> Cluster nodes are reachable: OK -12:15:27> Cluster nodes hard reset: OK -12:15:27> Cluster Join and auto-discovery test: OK -12:15:32> Before slots allocation, all nodes report cluster failure: OK -12:15:32> Create a 5 nodes cluster: OK -12:15:36> Cluster is up: OK -12:15:36> Cluster is writable: OK -12:15:36> Instance #5 is a slave: OK -12:15:36> Instance #5 synced with the master: OK -12:15:36> Send CLUSTER FAILOVER to #5, during load: WWWWWWWWWWOK -12:15:39> Wait for failover: OK -12:15:39> Cluster should eventually be up again: OK -12:15:39> Cluster is writable: OK -12:15:39> Instance #5 is now a master: OK -12:15:39> Verify 50000 keys for consistency with logical content: OK -12:15:40> Instance #0 gets converted into a slave: OK -12:15:40> (init) Restart killed instances: OK -12:15:40> Cluster nodes are reachable: OK -12:15:40> Cluster nodes hard reset: OK -12:15:40> Cluster Join and auto-discovery test: OK -12:15:43> Before slots allocation, all nodes report cluster failure: OK -12:15:43> Create a 5 nodes cluster: OK -12:15:47> Cluster is up: OK -12:15:47> Cluster is writable: OK -12:15:47> Instance #5 is a slave: OK -12:15:47> Instance #5 synced with the master: OK -12:15:47> Make instance #0 unreachable without killing it: OK -12:15:47> Send CLUSTER FAILOVER to instance #5: OK -12:15:47> Instance #5 is still a slave after some time (no failover): OK -12:15:52> Wait for instance #0 to return back alive: OK -12:15:57> (init) Restart killed instances: OK -12:15:57> Cluster nodes are reachable: OK -12:15:57> Cluster nodes hard reset: OK -12:15:57> Cluster Join and auto-discovery test: OK -12:16:01> Before slots allocation, all nodes report cluster failure: OK -12:16:01> Create a 5 nodes cluster: OK -12:16:04> Cluster is up: OK -12:16:04> Cluster is writable: OK -12:16:04> Instance #5 is a slave: OK -12:16:04> Instance #5 synced with the master: OK -12:16:04> Make instance #0 unreachable without killing it: OK -12:16:04> Send CLUSTER FAILOVER to instance #5: OK -12:16:04> Instance #5 is a master after some time: OK -12:16:04> Wait for instance #0 to return back alive: OK +08:29:37> (init) Restart killed instances: OK +08:29:37> Cluster nodes are reachable: OK +08:29:37> Cluster nodes hard reset: OK +08:29:37> Cluster Join and auto-discovery test: OK +08:29:41> Before slots allocation, all nodes report cluster failure: OK +08:29:41> Create a 5 nodes cluster: OK +08:29:45> Cluster is up: OK +08:29:45> Cluster is writable: OK +08:29:45> Instance #5 is a slave: OK +08:29:45> Instance #5 synced with the master: OK +08:29:46> Send CLUSTER FAILOVER to #5, during load: WWWWWWWWWWOK +08:29:50> Wait for failover: OK +08:29:50> Cluster should eventually be up again: OK +08:29:50> Cluster is writable: OK +08:29:50> Instance #5 is now a master: OK +08:29:50> Verify 50000 keys for consistency with logical content: OK +08:29:53> Instance #0 gets converted into a slave: OK +08:29:53> (init) Restart killed instances: OK +08:29:53> Cluster nodes are reachable: OK +08:29:53> Cluster nodes hard reset: OK +08:29:53> Cluster Join and auto-discovery test: OK +08:29:57> Before slots allocation, all nodes report cluster failure: OK +08:29:57> Create a 5 nodes cluster: OK +08:30:02> Cluster is up: OK +08:30:02> Cluster is writable: OK +08:30:02> Instance #5 is a slave: OK +08:30:02> Instance #5 synced with the master: OK +08:30:02> Make instance #0 unreachable without killing it: OK +08:30:02> Send CLUSTER FAILOVER to instance #5: OK +08:30:02> Instance #5 is still a slave after some time (no failover): OK +08:30:07> Wait for instance #0 to return back alive: OK +08:30:12> (init) Restart killed instances: OK +08:30:12> Cluster nodes are reachable: OK +08:30:12> Cluster nodes hard reset: OK +08:30:12> Cluster Join and auto-discovery test: OK +08:30:17> Before slots allocation, all nodes report cluster failure: OK +08:30:17> Create a 5 nodes cluster: OK +08:30:20> Cluster is up: OK +08:30:20> Cluster is writable: OK +08:30:20> Instance #5 is a slave: OK +08:30:20> Instance #5 synced with the master: OK +08:30:20> Make instance #0 unreachable without killing it: OK +08:30:20> Send CLUSTER FAILOVER to instance #5: OK +08:30:20> Instance #5 is a master after some time: OK +08:30:20> Wait for instance #0 to return back alive: OK Testing unit: 11-manual-takeover.tcl -12:16:14> (init) Restart killed instances: OK -12:16:14> Cluster nodes are reachable: OK -12:16:14> Cluster nodes hard reset: OK -12:16:14> Cluster Join and auto-discovery test: OK -12:16:19> Before slots allocation, all nodes report cluster failure: OK -12:16:19> Create a 5 nodes cluster: OK -12:16:23> Cluster is up: OK -12:16:23> Cluster is writable: OK -12:16:23> Killing majority of master nodes: OK -12:16:23> Cluster should eventually be down: OK -12:16:26> Use takeover to bring slaves back: OK -12:16:27> Cluster should eventually be up again: OK -12:16:30> Cluster is writable: OK -12:16:30> Instance #5, #6, #7 are now masters: OK -12:16:30> Restarting the previously killed master nodes: OK -12:16:30> Instance #0, #1, #2 gets converted into a slaves: OK +08:30:30> (init) Restart killed instances: OK +08:30:30> Cluster nodes are reachable: OK +08:30:30> Cluster nodes hard reset: OK +08:30:30> Cluster Join and auto-discovery test: OK +08:30:34> Before slots allocation, all nodes report cluster failure: OK +08:30:34> Create a 5 nodes cluster: OK +08:30:38> Cluster is up: OK +08:30:38> Cluster is writable: OK +08:30:38> Killing majority of master nodes: OK +08:30:39> Cluster should eventually be down: OK +08:30:42> Use takeover to bring slaves back: OK +08:30:42> Cluster should eventually be up again: OK +08:30:45> Cluster is writable: OK +08:30:45> Instance #5, #6, #7 are now masters: OK +08:30:45> Restarting the previously killed master nodes: OK +08:30:46> Instance #0, #1, #2 gets converted into a slaves: OK Testing unit: 12-replica-migration-2.tcl -12:16:30> (init) Restart killed instances: OK -12:16:30> Cluster nodes are reachable: OK -12:16:30> Cluster nodes hard reset: OK -12:16:30> Cluster Join and auto-discovery test: OK -12:16:33> Before slots allocation, all nodes report cluster failure: OK -12:16:33> Create a 5 nodes cluster: OK -12:16:36> Cluster is up: OK -12:16:36> Each master should have at least two replicas attached: OK -12:16:36> Resharding all the master #0 slots away from it: >>> Performing Cluster Check (using node 127.0.0.1:30000) +08:30:46> (init) Restart killed instances: OK +08:30:46> Cluster nodes are reachable: OK +08:30:46> Cluster nodes hard reset: OK +08:30:47> Cluster Join and auto-discovery test: OK +08:30:50> Before slots allocation, all nodes report cluster failure: OK +08:30:50> Create a 5 nodes cluster: OK +08:30:54> Cluster is up: OK +08:30:55> Each master should have at least two replicas attached: OK +08:30:55> Resharding all the master #0 slots away from it: >>> Performing Cluster Check (using node 127.0.0.1:30000) [OK] All nodes agree about slots configuration. >>> Check for open slots... >>> Check slots coverage... [OK] All 16384 slots covered. >>> Rebalancing across 5 nodes. Total weight = 4.00 -Moving 909 slots from 127.0.0.1:30000 to 127.0.0.1:30002 -############################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################# -Moving 850 slots from 127.0.0.1:30000 to 127.0.0.1:30004 -################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################## -Moving 755 slots from 127.0.0.1:30000 to 127.0.0.1:30003 -################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################### -Moving 745 slots from 127.0.0.1:30000 to 127.0.0.1:30001 -######################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################### +Moving 871 slots from 127.0.0.1:30000 to 127.0.0.1:30003 +####################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################### +Moving 863 slots from 127.0.0.1:30000 to 127.0.0.1:30004 +############################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################### +Moving 744 slots from 127.0.0.1:30000 to 127.0.0.1:30001 +######################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################## +Moving 742 slots from 127.0.0.1:30000 to 127.0.0.1:30002 +###################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################### OK -12:17:38> Master #0 should lose its replicas: OK -12:17:39> Resharding back some slot to master #0: >>> Performing Cluster Check (using node 127.0.0.1:30000) +08:33:12> Master #0 should lose its replicas: OK +08:33:13> Resharding back some slot to master #0: >>> Performing Cluster Check (using node 127.0.0.1:30000) [OK] All nodes agree about slots configuration. >>> Check for open slots... >>> Check slots coverage... [OK] All 16384 slots covered. >>> Rebalancing across 5 nodes. Total weight = 4.01 -Moving 11 slots from 127.0.0.1:30002 to 127.0.0.1:30000 -########### Moving 11 slots from 127.0.0.1:30003 to 127.0.0.1:30000 ########### Moving 11 slots from 127.0.0.1:30001 to 127.0.0.1:30000 ########### Moving 11 slots from 127.0.0.1:30004 to 127.0.0.1:30000 ########### +Moving 11 slots from 127.0.0.1:30002 to 127.0.0.1:30000 +########### OK -12:17:50> Master #0 should re-acquire one or more replicas: OK +08:33:25> Master #0 should re-acquire one or more replicas: OK Testing unit: 13-no-failover-option.tcl -12:17:56> (init) Restart killed instances: OK -12:17:56> Cluster nodes are reachable: OK -12:17:56> Cluster nodes hard reset: OK -12:17:56> Cluster Join and auto-discovery test: OK -12:17:59> Before slots allocation, all nodes report cluster failure: OK -12:17:59> Create a 5 nodes cluster: OK -12:18:03> Cluster is up: OK -12:18:03> Cluster is writable: OK -12:18:03> Instance #5 is a slave: OK -12:18:03> Instance #5 synced with the master: OK -12:18:03> The nofailover flag is propagated: OK -12:18:04> Killing one master node: OK -12:18:04> Cluster should be still down after some time: OK -12:18:14> Instance #5 is still a slave: OK -12:18:14> Restarting the previously killed master node: OK +08:33:30> (init) Restart killed instances: OK +08:33:30> Cluster nodes are reachable: OK +08:33:30> Cluster nodes hard reset: OK +08:33:30> Cluster Join and auto-discovery test: OK +08:33:34> Before slots allocation, all nodes report cluster failure: OK +08:33:34> Create a 5 nodes cluster: OK +08:33:38> Cluster is up: OK +08:33:38> Cluster is writable: OK +08:33:38> Instance #5 is a slave: OK +08:33:38> Instance #5 synced with the master: OK +08:33:38> The nofailover flag is propagated: OK +08:33:40> Killing one master node: OK +08:33:40> Cluster should be still down after some time: OK +08:33:50> Instance #5 is still a slave: OK +08:33:50> Restarting the previously killed master node: OK Testing unit: 16-transactions-on-replica.tcl -12:18:14> (init) Restart killed instances: OK -12:18:14> Cluster nodes are reachable: OK -12:18:14> Cluster nodes hard reset: OK -12:18:14> Cluster Join and auto-discovery test: OK -12:18:18> Before slots allocation, all nodes report cluster failure: OK -12:18:18> Create a primary with a replica: OK -12:18:23> Cluster should start ok: OK -12:18:23> Cant read from replica without READONLY: OK -12:18:23> Can read from replica after READONLY: OK -12:18:23> Can preform HSET primary and HGET from replica: OK -12:18:23> MULTI-EXEC with write operations is MOVED: OK -12:18:23> read-only blocking operations from replica: OK +08:33:50> (init) Restart killed instances: OK +08:33:50> Cluster nodes are reachable: OK +08:33:50> Cluster nodes hard reset: OK +08:33:50> Cluster Join and auto-discovery test: OK +08:33:54> Before slots allocation, all nodes report cluster failure: OK +08:33:54> Create a primary with a replica: OK +08:33:59> Cluster should start ok: OK +08:33:59> Cant read from replica without READONLY: OK +08:33:59> Can read from replica after READONLY: OK +08:33:59> Can preform HSET primary and HGET from replica: OK +08:33:59> MULTI-EXEC with write operations is MOVED: OK +08:33:59> read-only blocking operations from replica: OK Cleaning up... GOOD! No errors. ./runtest-sentinel || true @@ -2585,88 +2606,88 @@ Starting redis #3 at port 30003 Starting redis #4 at port 30004 Testing unit: 00-base.tcl -12:18:23> (init) Restart killed instances: OK -12:18:23> (init) Remove old master entry from sentinels: OK -12:18:23> (init) Create a master-slaves cluster of 5 instances: OK -12:18:24> (init) Sentinels can start monitoring a master: OK -12:18:24> (init) Sentinels can talk with the master: OK -12:18:24> (init) Sentinels are able to auto-discover other sentinels: OK -12:18:26> (init) Sentinels are able to auto-discover slaves: OK -12:18:26> Basic failover works if the master is down: OK -12:18:30> New master 127.0.0.1:30002 role matches: OK -12:18:30> All the other slaves now point to the new master: OK -12:18:30> The old master eventually gets reconfigured as a slave: OK -12:18:40> ODOWN is not possible without N (quorum) Sentinels reports: OK -12:18:40> Failover is not possible without majority agreement: OK -12:18:40> Failover works if we configure for absolute agreement: OK -12:18:45> New master 127.0.0.1:30004 role matches: OK +08:34:00> (init) Restart killed instances: OK +08:34:00> (init) Remove old master entry from sentinels: OK +08:34:00> (init) Create a master-slaves cluster of 5 instances: OK +08:34:01> (init) Sentinels can start monitoring a master: OK +08:34:01> (init) Sentinels can talk with the master: OK +08:34:01> (init) Sentinels are able to auto-discover other sentinels: OK +08:34:03> (init) Sentinels are able to auto-discover slaves: OK +08:34:03> Basic failover works if the master is down: OK +08:34:06> New master 127.0.0.1:30004 role matches: OK +08:34:06> All the other slaves now point to the new master: OK +08:34:06> The old master eventually gets reconfigured as a slave: OK +08:34:17> ODOWN is not possible without N (quorum) Sentinels reports: OK +08:34:17> Failover is not possible without majority agreement: OK +08:34:18> Failover works if we configure for absolute agreement: OK +08:34:22> New master 127.0.0.1:30002 role matches: OK Testing unit: 01-conf-update.tcl -12:18:45> (init) Restart killed instances: OK -12:18:45> (init) Remove old master entry from sentinels: OK -12:18:45> (init) Create a master-slaves cluster of 5 instances: OK -12:18:46> (init) Sentinels can start monitoring a master: OK -12:18:46> (init) Sentinels can talk with the master: OK -12:18:46> (init) Sentinels are able to auto-discover other sentinels: OK -12:18:48> (init) Sentinels are able to auto-discover slaves: OK -12:18:48> We can failover with Sentinel 1 crashed: OK -12:18:51> After Sentinel 1 is restarted, its config gets updated: OK -12:18:51> New master 127.0.0.1:30004 role matches: OK +08:34:22> (init) Restart killed instances: OK +08:34:22> (init) Remove old master entry from sentinels: OK +08:34:22> (init) Create a master-slaves cluster of 5 instances: OK +08:34:23> (init) Sentinels can start monitoring a master: OK +08:34:23> (init) Sentinels can talk with the master: OK +08:34:23> (init) Sentinels are able to auto-discover other sentinels: OK +08:34:25> (init) Sentinels are able to auto-discover slaves: OK +08:34:25> We can failover with Sentinel 1 crashed: OK +08:34:29> After Sentinel 1 is restarted, its config gets updated: OK +08:34:29> New master 127.0.0.1:30002 role matches: OK Testing unit: 02-slaves-reconf.tcl -12:18:51> (init) Restart killed instances: OK -12:18:51> (init) Remove old master entry from sentinels: OK -12:18:51> (init) Create a master-slaves cluster of 5 instances: OK -12:18:52> (init) Sentinels can start monitoring a master: OK -12:18:53> (init) Sentinels can talk with the master: OK -12:18:53> (init) Sentinels are able to auto-discover other sentinels: OK -12:18:55> (init) Sentinels are able to auto-discover slaves: OK -12:18:55> Check that slaves replicate from current master: OK -12:18:55> Crash the master and force a failover: OK -12:18:58> Check that slaves replicate from current master: OK -12:18:59> Kill a slave instance: OK -12:18:59> Crash the master and force a failover: OK -12:19:02> Check that slaves replicate from current master: OK -12:19:13> Wait for failover to end: OK -12:19:13> Restart killed slave and test replication of slaves again...: OK -12:19:13> Check that slaves replicate from current master: OK +08:34:29> (init) Restart killed instances: OK +08:34:29> (init) Remove old master entry from sentinels: OK +08:34:29> (init) Create a master-slaves cluster of 5 instances: OK +08:34:30> (init) Sentinels can start monitoring a master: OK +08:34:30> (init) Sentinels can talk with the master: OK +08:34:30> (init) Sentinels are able to auto-discover other sentinels: OK +08:34:32> (init) Sentinels are able to auto-discover slaves: OK +08:34:32> Check that slaves replicate from current master: OK +08:34:32> Crash the master and force a failover: OK +08:34:35> Check that slaves replicate from current master: OK +08:34:36> Kill a slave instance: OK +08:34:36> Crash the master and force a failover: OK +08:35:18> Check that slaves replicate from current master: OK +08:35:29> Wait for failover to end: OK +08:35:29> Restart killed slave and test replication of slaves again...: OK +08:35:29> Check that slaves replicate from current master: OK Testing unit: 03-runtime-reconf.tcl Testing unit: 04-slave-selection.tcl Testing unit: 05-manual.tcl -12:19:34> (init) Restart killed instances: OK -12:19:34> (init) Remove old master entry from sentinels: OK -12:19:34> (init) Create a master-slaves cluster of 5 instances: OK -12:19:35> (init) Sentinels can start monitoring a master: OK -12:19:35> (init) Sentinels can talk with the master: OK -12:19:35> (init) Sentinels are able to auto-discover other sentinels: OK -12:19:37> (init) Sentinels are able to auto-discover slaves: OK -12:19:37> Manual failover works: OK -12:19:38> New master 127.0.0.1:30001 role matches: OK -12:19:38> All the other slaves now point to the new master: OK -12:19:38> The old master eventually gets reconfigured as a slave: OK +08:35:50> (init) Restart killed instances: OK +08:35:50> (init) Remove old master entry from sentinels: OK +08:35:50> (init) Create a master-slaves cluster of 5 instances: OK +08:35:51> (init) Sentinels can start monitoring a master: OK +08:35:51> (init) Sentinels can talk with the master: OK +08:35:51> (init) Sentinels are able to auto-discover other sentinels: OK +08:35:53> (init) Sentinels are able to auto-discover slaves: OK +08:35:53> Manual failover works: OK +08:35:54> New master 127.0.0.1:30001 role matches: OK +08:35:54> All the other slaves now point to the new master: OK +08:35:54> The old master eventually gets reconfigured as a slave: OK Testing unit: 06-ckquorum.tcl -12:19:48> (init) Restart killed instances: OK -12:19:48> (init) Remove old master entry from sentinels: OK -12:19:49> (init) Create a master-slaves cluster of 5 instances: OK -12:19:49> (init) Sentinels can start monitoring a master: OK -12:19:49> (init) Sentinels can talk with the master: OK -12:19:49> (init) Sentinels are able to auto-discover other sentinels: OK -12:19:51> (init) Sentinels are able to auto-discover slaves: OK -12:19:51> CKQUORUM reports OK and the right amount of Sentinels: OK -12:19:51> CKQUORUM detects quorum cannot be reached: OK -12:19:51> CKQUORUM detects failover authorization cannot be reached: OK +08:36:05> (init) Restart killed instances: OK +08:36:05> (init) Remove old master entry from sentinels: OK +08:36:05> (init) Create a master-slaves cluster of 5 instances: OK +08:36:06> (init) Sentinels can start monitoring a master: OK +08:36:06> (init) Sentinels can talk with the master: OK +08:36:06> (init) Sentinels are able to auto-discover other sentinels: OK +08:36:08> (init) Sentinels are able to auto-discover slaves: OK +08:36:08> CKQUORUM reports OK and the right amount of Sentinels: OK +08:36:08> CKQUORUM detects quorum cannot be reached: OK +08:36:08> CKQUORUM detects failover authorization cannot be reached: OK Testing unit: 07-down-conditions.tcl -12:19:57> (init) Restart killed instances: OK -12:19:57> (init) Remove old master entry from sentinels: OK -12:19:57> (init) Create a master-slaves cluster of 5 instances: OK -12:19:57> (init) Sentinels can start monitoring a master: OK -12:19:57> (init) Sentinels can talk with the master: OK -12:19:57> (init) Sentinels are able to auto-discover other sentinels: OK -12:19:59> (init) Sentinels are able to auto-discover slaves: OK -12:19:59> Crash the majority of Sentinels to prevent failovers for this unit: OK -12:19:59> SDOWN is triggered by non-responding but not crashed instance: OK -12:20:09> SDOWN is triggered by crashed instance: OK -12:20:12> SDOWN is triggered by masters advertising as slaves: OK -12:20:49> SDOWN is triggered by misconfigured instance repling with errors: OK -12:20:52> SDOWN is triggered if we rename PING to PONG: OK +08:36:13> (init) Restart killed instances: OK +08:36:13> (init) Remove old master entry from sentinels: OK +08:36:13> (init) Create a master-slaves cluster of 5 instances: OK +08:36:13> (init) Sentinels can start monitoring a master: OK +08:36:13> (init) Sentinels can talk with the master: OK +08:36:13> (init) Sentinels are able to auto-discover other sentinels: OK +08:36:15> (init) Sentinels are able to auto-discover slaves: OK +08:36:15> Crash the majority of Sentinels to prevent failovers for this unit: OK +08:36:15> SDOWN is triggered by non-responding but not crashed instance: OK +08:36:25> SDOWN is triggered by crashed instance: OK +08:36:28> SDOWN is triggered by masters advertising as slaves: OK +08:37:06> SDOWN is triggered by misconfigured instance repling with errors: OK +08:37:08> SDOWN is triggered if we rename PING to PONG: OK Cleaning up... GOOD! No errors. make[1]: Leaving directory '/build/redis-5.0.14' @@ -2703,8 +2724,8 @@ dh_strip dh_makeshlibs dh_shlibdeps -dpkg-shlibdeps: warning: package could avoid a useless dependency if debian/redis-tools/usr/bin/redis-check-aof debian/redis-tools/usr/bin/redis-check-rdb debian/redis-tools/usr/bin/redis-benchmark debian/redis-tools/usr/bin/redis-cli were not linked against libatomic.so.1 (they use none of the library's symbols) -dpkg-shlibdeps: warning: package could avoid a useless dependency if debian/redis-tools/usr/bin/redis-check-aof debian/redis-tools/usr/bin/redis-check-rdb debian/redis-tools/usr/bin/redis-benchmark debian/redis-tools/usr/bin/redis-cli were not linked against librt.so.1 (they use none of the library's symbols) +dpkg-shlibdeps: warning: package could avoid a useless dependency if debian/redis-tools/usr/bin/redis-check-rdb debian/redis-tools/usr/bin/redis-check-aof debian/redis-tools/usr/bin/redis-benchmark debian/redis-tools/usr/bin/redis-cli were not linked against libatomic.so.1 (they use none of the library's symbols) +dpkg-shlibdeps: warning: package could avoid a useless dependency if debian/redis-tools/usr/bin/redis-check-rdb debian/redis-tools/usr/bin/redis-check-aof debian/redis-tools/usr/bin/redis-benchmark debian/redis-tools/usr/bin/redis-cli were not linked against librt.so.1 (they use none of the library's symbols) dh_installdeb dh_gencontrol dh_md5sums @@ -2712,8 +2733,8 @@ dpkg-deb: building package 'redis' in '../redis_5.0.14-1+deb10u2_all.deb'. dpkg-deb: building package 'redis-tools-dbgsym' in '../redis-tools-dbgsym_5.0.14-1+deb10u2_i386.deb'. dpkg-deb: building package 'redis-tools' in '../redis-tools_5.0.14-1+deb10u2_i386.deb'. -dpkg-deb: building package 'redis-server' in '../redis-server_5.0.14-1+deb10u2_i386.deb'. dpkg-deb: building package 'redis-sentinel' in '../redis-sentinel_5.0.14-1+deb10u2_i386.deb'. +dpkg-deb: building package 'redis-server' in '../redis-server_5.0.14-1+deb10u2_i386.deb'. dpkg-genbuildinfo --build=binary dpkg-genchanges --build=binary >../redis_5.0.14-1+deb10u2_i386.changes dpkg-genchanges: info: binary-only upload (no source code included) @@ -2722,12 +2743,14 @@ dpkg-buildpackage: info: binary-only upload (no source included) dpkg-genchanges: info: not including original source code in upload I: copying local configuration +I: user script /srv/workspace/pbuilder/2597/tmp/hooks/B01_cleanup starting +I: user script /srv/workspace/pbuilder/2597/tmp/hooks/B01_cleanup finished I: unmounting dev/ptmx filesystem I: unmounting dev/pts filesystem I: unmounting dev/shm filesystem I: unmounting proc filesystem I: unmounting sys filesystem I: cleaning the build env -I: removing directory /srv/workspace/pbuilder/8321 and its subdirectories -I: Current time: Sat Apr 29 12:21:01 -12 2023 -I: pbuilder-time-stamp: 1682814061 +I: removing directory /srv/workspace/pbuilder/2597 and its subdirectories +I: Current time: Mon Mar 28 08:37:32 +14 2022 +I: pbuilder-time-stamp: 1648406252