--- /srv/reproducible-results/rbuild-debian/r-b-build.obZgKhvV/b1/libpqxx_7.10.0-2_amd64.changes +++ /srv/reproducible-results/rbuild-debian/r-b-build.obZgKhvV/b2/libpqxx_7.10.0-2_amd64.changes ├── Files │ @@ -1,5 +1,5 @@ │ │ ff621924c9939d6d80bdb58203b23dca 2520444 debug optional libpqxx-7.10-dbgsym_7.10.0-2_amd64.deb │ 0d7d87ebe02b84408d81502b64d620ba 190540 libs optional libpqxx-7.10_7.10.0-2_amd64.deb │ 85054eee2d1c27be0a3c838e5e3b55df 358800 libdevel optional libpqxx-dev_7.10.0-2_amd64.deb │ - 0b73cd773750de8dc2235299eacd7ea4 2637444 doc optional libpqxx-doc_7.10.0-2_all.deb │ + 0f35862c635e64e40620a320705d5eee 2637384 doc optional libpqxx-doc_7.10.0-2_all.deb ├── libpqxx-doc_7.10.0-2_all.deb │ ├── file list │ │ @@ -1,3 +1,3 @@ │ │ -rw-r--r-- 0 0 0 4 2025-01-23 16:15:05.000000 debian-binary │ │ --rw-r--r-- 0 0 0 32392 2025-01-23 16:15:05.000000 control.tar.xz │ │ --rw-r--r-- 0 0 0 2604860 2025-01-23 16:15:05.000000 data.tar.xz │ │ +-rw-r--r-- 0 0 0 32420 2025-01-23 16:15:05.000000 control.tar.xz │ │ +-rw-r--r-- 0 0 0 2604772 2025-01-23 16:15:05.000000 data.tar.xz │ ├── control.tar.xz │ │ ├── control.tar │ │ │ ├── ./md5sums │ │ │ │ ├── ./md5sums │ │ │ │ │┄ Files differ │ ├── data.tar.xz │ │ ├── data.tar │ │ │ ├── file list │ │ │ │ @@ -2,15 +2,15 @@ │ │ │ │ drwxr-xr-x 0 root (0) root (0) 0 2025-01-23 16:15:05.000000 ./usr/ │ │ │ │ drwxr-xr-x 0 root (0) root (0) 0 2025-01-23 16:15:05.000000 ./usr/share/ │ │ │ │ drwxr-xr-x 0 root (0) root (0) 0 2025-01-23 16:15:05.000000 ./usr/share/doc/ │ │ │ │ drwxr-xr-x 0 root (0) root (0) 0 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/ │ │ │ │ -rw-r--r-- 0 root (0) root (0) 1322 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/changelog.Debian.gz │ │ │ │ -rw-r--r-- 0 root (0) root (0) 2807 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/copyright │ │ │ │ drwxr-xr-x 0 root (0) root (0) 0 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/ │ │ │ │ --rw-r--r-- 0 root (0) root (0) 18206 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/accessing-results.html │ │ │ │ +-rw-r--r-- 0 root (0) root (0) 18203 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/accessing-results.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 81499 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/annotated.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 22300 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/annotated_dup.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 63073 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/array-composite_8hxx_source.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 87535 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/array_8hxx_source.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 674 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/bc_s.png │ │ │ │ -rw-r--r-- 0 root (0) root (0) 634 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/bc_sd.png │ │ │ │ -rw-r--r-- 0 root (0) root (0) 8304 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/binary.html │ │ │ │ @@ -829,71 +829,71 @@ │ │ │ │ -rw-r--r-- 0 root (0) root (0) 168 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/nav_fd.png │ │ │ │ -rw-r--r-- 0 root (0) root (0) 95 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/nav_g.png │ │ │ │ -rw-r--r-- 0 root (0) root (0) 98 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/nav_h.png │ │ │ │ -rw-r--r-- 0 root (0) root (0) 111 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/nav_hd.png │ │ │ │ -rw-r--r-- 0 root (0) root (0) 2167 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/navtree.css │ │ │ │ -rw-r--r-- 0 root (0) root (0) 15935 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/navtree.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 5968 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/navtreedata.js │ │ │ │ --rw-r--r-- 0 root (0) root (0) 19107 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/navtreeindex0.js │ │ │ │ +-rw-r--r-- 0 root (0) root (0) 19104 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/navtreeindex0.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 21285 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/navtreeindex1.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 19510 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/navtreeindex2.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 15193 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/navtreeindex3.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 17784 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/navtreeindex4.js │ │ │ │ --rw-r--r-- 0 root (0) root (0) 18016 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/navtreeindex5.js │ │ │ │ +-rw-r--r-- 0 root (0) root (0) 18019 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/navtreeindex5.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 2477 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/navtreeindex6.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 13256 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/nontransaction_8hxx_source.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 16353 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/notification_8hxx_source.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 122 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/open.png │ │ │ │ -rw-r--r-- 0 root (0) root (0) 6420 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/pages.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 12962 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/parameters.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 52695 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/params_8hxx_source.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 5810 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/performance.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 29855 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/pipeline_8hxx_source.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 696 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/plus.svg │ │ │ │ -rw-r--r-- 0 root (0) root (0) 696 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/plusd.svg │ │ │ │ -rw-r--r-- 0 root (0) root (0) 8893 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/pqxx-source_8hxx_source.html │ │ │ │ --rw-r--r-- 0 root (0) root (0) 14798 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/prepared.html │ │ │ │ +-rw-r--r-- 0 root (0) root (0) 14801 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/prepared.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 9364 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/prepared__statement_8hxx_source.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 101844 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/range_8hxx_source.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 5685 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/resize.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 8660 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/result-connection_8hxx_source.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 11425 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/result-creation_8hxx_source.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 8756 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/result-pipeline_8hxx_source.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 8591 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/result-sql__cursor_8hxx_source.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 63743 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/result_8hxx_source.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 29407 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/result__iter_8hxx_source.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 62185 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/result__iterator_8hxx_source.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 19467 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/robusttransaction_8hxx_source.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 91997 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/row_8hxx_source.html │ │ │ │ drwxr-xr-x 0 root (0) root (0) 0 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/ │ │ │ │ --rw-r--r-- 0 root (0) root (0) 7544 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_0.js │ │ │ │ --rw-r--r-- 0 root (0) root (0) 4728 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_1.js │ │ │ │ --rw-r--r-- 0 root (0) root (0) 6981 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_10.js │ │ │ │ --rw-r--r-- 0 root (0) root (0) 21619 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_11.js │ │ │ │ +-rw-r--r-- 0 root (0) root (0) 7545 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_0.js │ │ │ │ +-rw-r--r-- 0 root (0) root (0) 4729 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_1.js │ │ │ │ +-rw-r--r-- 0 root (0) root (0) 6978 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_10.js │ │ │ │ +-rw-r--r-- 0 root (0) root (0) 21620 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_11.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 7537 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_12.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 2857 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_13.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 906 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_14.js │ │ │ │ --rw-r--r-- 0 root (0) root (0) 1395 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_15.js │ │ │ │ +-rw-r--r-- 0 root (0) root (0) 1394 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_15.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 95 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_16.js │ │ │ │ --rw-r--r-- 0 root (0) root (0) 1206 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_17.js │ │ │ │ +-rw-r--r-- 0 root (0) root (0) 1207 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_17.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 319 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_18.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 15545 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_2.js │ │ │ │ --rw-r--r-- 0 root (0) root (0) 4604 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_3.js │ │ │ │ +-rw-r--r-- 0 root (0) root (0) 4603 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_3.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 9409 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_4.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 7171 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_5.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 5454 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_6.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 1038 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_7.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 5821 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_8.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 147 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_9.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 2895 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_a.js │ │ │ │ --rw-r--r-- 0 root (0) root (0) 1394 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_b.js │ │ │ │ --rw-r--r-- 0 root (0) root (0) 9602 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_c.js │ │ │ │ --rw-r--r-- 0 root (0) root (0) 9163 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_d.js │ │ │ │ --rw-r--r-- 0 root (0) root (0) 6426 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_e.js │ │ │ │ --rw-r--r-- 0 root (0) root (0) 4664 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_f.js │ │ │ │ +-rw-r--r-- 0 root (0) root (0) 1393 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_b.js │ │ │ │ +-rw-r--r-- 0 root (0) root (0) 9603 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_c.js │ │ │ │ +-rw-r--r-- 0 root (0) root (0) 9162 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_d.js │ │ │ │ +-rw-r--r-- 0 root (0) root (0) 6428 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_e.js │ │ │ │ +-rw-r--r-- 0 root (0) root (0) 4663 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_f.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 875 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/classes_0.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 1086 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/classes_1.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 975 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/classes_10.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 835 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/classes_11.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 157 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/classes_12.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 85 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/classes_13.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 3820 2025-01-23 16:15:05.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/classes_2.js │ │ │ ├── ./usr/share/doc/libpqxx-doc/doxygen-html/accessing-results.html │ │ │ │ @@ -93,38 +93,38 @@ │ │ │ │
A query produces a result set consisting of rows, and each row consists of fields. There are several ways to receive this data.
│ │ │ │The fields are "untyped." That is to say, libpqxx has no opinion on what their types are. The database sends the data in a very flexible textual format. When you read a field, you specify what type you want it to be, and libpqxx converts the text format to that type for you.
│ │ │ │If a value does not conform to the format for the type you specify, the conversion fails. For example, if you have strings that all happen to contain numbers, you can read them as int
. But if any of the values is empty, or it's null (for a type that doesn't support null), or it's some string that does not look like an integer, or it's too large, you can't convert it to int
.
So usually, reading result data from the database means not just retrieving the data; it also means converting it to some target type.
│ │ │ │ -The simplest way to query rows of data is to call one of a transaction's "query" functions, passing as template arguments the types of columns you want to get back (e.g. int
, std::string
, double
, and so on) and as a regular argument the query itself.
You can then iterate over the result to go over the rows of data:
│ │ │ │The "query" functions execute your query, load the complete result data from the database, and then as you iterate, convert each row it received to a tuple of C++ types that you indicated.
│ │ │ │There are different query functions for querying any number of rows (query()
); querying just one row of data as a std::tuple
and throwing an error if there's more than one row (query1()
); or querying
There's another way to go through the rows coming out of a query. It's usually easier and faster if there are a lot of rows, but there are drawbacks.
│ │ │ │One, you start getting rows before all the data has come in from the database. That speeds things up, but what happens if you lose your network connection while transferring the data? Your application may already have processed some of the data before finding out that the rest isn't coming. If that is a problem for your application, streaming may not be the right choice.
│ │ │ │Two, streaming only works for some types of query. The stream()
function wraps your query in a PostgreSQL COPY
command, and COPY
only supports a few commands: SELECT
, VALUES
, or an INSERT
, UPDATE
, or DELETE
with a RETURNING
clause. See the COPY
documentation here: [ https://www.postgresql.org/docs/current/sql-copy.html ](https://www.postgresql.org/docs/current/sql-copy.html).
Three, when you convert a field to a "view" type (such as std::string_view
or pqxx::bytes_view
), the view points to underlying data which only stays valid until you iterate to the next row or exit the loop. So if you want to use that data for longer than a single iteration of the streaming loop, you'll have to store it somewhere yourself.
Now for the good news. Streaming does make it very easy to query data and loop over it, and often faster than with the "query" or "exec" functions:
│ │ │ │The conversion to C++ types (here int
, std::string_view
, and two float
s) is built into the function. You never even see row
objects, field
objects, iterators, or conversion methods. You just put in your query and you receive your data.
Sometimes you want more from a query result than just rows of data. You may need to know right away how many rows of result data you received, or how many rows your UPDATE
statement has affected, or the names of the columns, etc.
For that, use the transaction's "exec" query execution functions. Apart from a few exceptions, these return a pqxx::result
object. A result
is a container of pqxx::row
objects, so you can iterate them as normal, or index them like you would index an array. Each row
in turn is a container of pqxx::field
, Each field
holds a value, but doesn't know its type. You specify the type when you read the value.
For example, your code might do:
│ │ │ │The other takes a pointer and a size:
│ │ │ │There are some restrictions on binary_cast
that you must be aware of.
First, your data must of a type that gives us bytes. So: char
, unsigned char
, signed char
, int8_t
, uint8_t
, or of course std::byte
. You can't feed in a vector of double
, or anything like that.
Second, the data must be laid out as a contiguous block in memory. If there's no std::data()
implementation for your type, it's not suitable.
Third, binary_cast
only constructs something like a std::string_view
. It does not make a copy of your actual data. So, make sure that your data remains alive and in the same place while you're using it.
Low-level parser for C++ arrays.
│ │ │ │ -Clunky old API for parsing SQL arrays.
│ │ │ │) as the separator between array elements. All built-in SQL types use comma, except for
box` which uses semicolon. However some custom types may not work.The input is a C-style string containing the textual representation of an array, as returned by the database. The parser reads this representation on the fly. The string must remain in memory until parsing is done.
│ │ │ │Parse the array by making calls to get_next until it returns a juncture of done
. The juncture tells you what the parser found in that step: did the array "nest" to a deeper level, or "un-nest" back up?
Input stream that gets its data from a result field.
│ │ │ │ -int
, use the field's as<...>()
member function. To read a field efficiently just as a string, use its c_str()
or its as<std::string_vview>()
.int
, use the field's as<...>()
member function. To read a field efficiently just as a string, use its c_str()
or its as<std::string_vview>()
.Works like any other istream to read data from a field. It supports all formatting and streaming operations of std::istream
. For convenience there is a fieldstream alias, which defines a basic_fieldstream for char
. This is similar to how e.g. std::ifstream
relates to std::basic_ifstream
.
This class has only been tested for the char type (and its default traits).
│ │ │ │Input stream that gets its data from a large object.
│ │ │ │ -This class worked like any other istream, but to read data from a large object. It supported all formatting and streaming operations of std::istream
.
This functionality was considered too fragile and complex, so it has been replaced with a single, much simpler class.
│ │ │ │Stream that reads and writes a large object.
│ │ │ │ -This worked like a std::iostream, but to read data from, or write data to, a large object. It supported all formatting and streaming operations of std::iostream
.
This functionality was considered too fragile and complex, so it has been replaced with a single, much simpler class.
│ │ │ │Output stream that writes data back to a large object.
│ │ │ │ -This worked like any other ostream, but to write data to a large object. It supported all formatting and streaming operations of std::ostream
.
This functionality was considered too fragile and complex, so it has been replaced with a single, much simpler class.
│ │ │ │Read up to std::size(buf)
bytes from the object.
std::span<std::byte>
.std::span<std::byte>
.Retrieves bytes from the blob, at the current position, until buf
is full (i.e. its current size is reached), or there are no more bytes to read, whichever comes first.
This function will not change either the size or the capacity of buf
, only its contents.
Returns the filled portion of buf
. This may be empty.
The entries are ordered from oldest to newest handler.
│ │ │ │The pointers point to the real errorhandlers. The container it returns however is a copy of the one internal to the connection, not a reference.
│ │ │ │ │ │ │ │Set session variable, using SQL's SET
command.
SET
command.SET
command.This method of handling errors is obsolete. Use a "notice handler" instead.
│ │ │ │Identity of a large object.
│ │ │ │ -Encapsulates the identity of a large object.
│ │ │ │A largeobject must be accessed only from within a backend transaction, but the object's identity remains valid as long as the object exists.
│ │ │ │Streambuf to use large objects in standard I/O streams.
│ │ │ │ -The standard streambuf classes provide uniform access to data storage such as files or string buffers, so they can be accessed using standard input or output streams. This streambuf implementation provided similar access to large objects, so they could be read and written using the same stream classes.
│ │ │ │This functionality was considered too fragile and complex, so it has been replaced with a single, much simpler class.
│ │ │ │Accessor for large object's contents.
│ │ │ │ -blob
class instead. blob
class instead. ( | │ │ │ │binarystring const & | │ │ │ │value | ) | │ │ │ │& | │ │ │ │ │ │ │ │
The binarystring must stay valid for as long as the params
remains active.
│ │ │ │ | ) | │ │ │ │const | │ │ │ │ │ │ │ │
Produce a slice of this row, containing the given range of columns.
│ │ │ │ -The slice runs from the range's starting column to the range's end column, exclusive. It looks just like a normal result row, except slices can be empty.
│ │ │ │ │ │ │ │Stream data from the database.
│ │ │ │ - │ │ │ │ + │ │ │ │For larger data sets, retrieving data this way is likely to be faster than executing a query and then iterating and converting the rows fields. You will also be able to start processing before all of the data has come in.
│ │ │ │There are also downsides. Not all kinds of query will work in a stream. But straightforward SELECT
and UPDATE ... RETURNING
queries should work. This function makes use of pqxx::stream_from, which in turn uses PostgreSQL's COPY
command, so see the documentation for those to get the full details.
There are other downsides. If there stream encounters an error, it may leave the entire connection in an unusable state, so you'll have to give the whole thing up. Finally, opening a stream puts the connection in a special state, so you won't be able to do many other things with the connection or the transaction while the stream is open.
│ │ │ │There are two ways of starting a stream: you stream either all rows in a table (using one of the factories, table()
or raw_table()
), or the results of a query (using the query()
factory).
Usually you'll want the stream
convenience wrapper in transaction_base, * so you don't need to deal with this class directly.
Execute query, and stream over the results.
│ │ │ │ -Stream all rows in table, all columns.
│ │ │ │ -Stream given columns from all rows in table.
│ │ │ │ -Stream given columns from all rows in table.
│ │ │ │ -Create a stream, without specifying columns.
│ │ │ │ -Fields will be inserted in whatever order the columns have in the database.
│ │ │ │You'll probably want to specify the columns, so that the mapping between your data fields and the table is explicit in your code, and not hidden in an "implicit contract" between your code and your schema.
│ │ │ │ │ │ │ │Create a stream, specifying column names as a container of strings.
│ │ │ │ -Communication with the database mostly happens in a text format. When you include an integer value in a query, either you use to_string
to convert it to that text format, or under the bonnet, libpqxx does it for you. When you get a query result field "as a float," libpqxx converts from the text format to a floating-point type. These conversions are everywhere in libpqxx.
The conversion system supports many built-in types, but it is also extensible. You can "teach" libpqxx (in the scope of your own application) to convert additional types of values to and from PostgreSQL's string format.
│ │ │ │This is massively useful, but it's not for the faint of heart. You'll need to specialise some templates. And, the API for doing this can change with any major libpqxx release.
│ │ │ │If that happens, your code may fail to compile with the newer libpqxx version, and you'll have to go through the NEWS
file to find the API changes. Usually it'll be a small change, like an additional function you need to implement, or a constant you need to define.
In your application, a conversion is driven entirely by a C++ type you specify. The value's SQL type on the database side has nothing to do with it. Nor is there anything in the string that would identify its type. Your code says "convert to this type" and libpqxx does it.
│ │ │ │So, if you've SELECTed a 64-bit integer from the database, and you try to convert it to a C++ short,
one of two things will happen: either the number is small enough to fit in your short
and it just works, or else it throws a conversion exception. Similarly, if you try to read a 32-bit SQL int
as a C++ 32-bit unsigned int
, that'll work fine, unless the value happens to be negative. In such cases the conversion will throw a conversion_error
.
Or, your database table might have a text column, but a given field may contain a string that looks just like a number. You can convert that value to an integer type just fine. Or to a floating-point type. All that matters to the conversion is the actual value, and the type your code specifies.
│ │ │ │In some cases the templates for these conversions can tell the type from the arguments you pass them:
│ │ │ │In other cases you may need to instantiate template explicitly:
│ │ │ │Let's say you have some other SQL type which you want to be able to store in, or retrieve from, the database. What would it take to support that?
│ │ │ │Sometimes you do not need complete support. You might need a conversion to a string but not from a string, for example. You write out the conversion at compile time, so don't be too afraid to be incomplete. If you leave out one of these steps, it's not going to crash at run time or mess up your data. The worst that can happen is that your code won't build.
│ │ │ │So what do you need for a complete conversion?
│ │ │ │First off, of course, you need a C++ type. It may be your own, but it doesn't have to be. It could be a type from a third-party library, or even one from the standard library that libpqxx does not yet support.
│ │ │ │First thing to do is specialise the pqxx::type_name
variable to give the type a human-readable name. That allows libpqxx error messages and such to talk about the type. If you don't define a name, libpqxx will try to figure one out with some help from the compiler, but it may not always be easy to read.
Then, does your type have a built-in null value? For example, a char *
can be null on the C++ side. Or some types are always null, such as nullptr
. You specialise the pqxx::nullness
template to specify the details.
Finally, you specialise the pqxx::string_traits
template. This is where you define the actual conversions.
Let's go through these steps one by one.
│ │ │ │ -You'll need a type for which the conversions are not yet defined, because the C++ type is what determines the right conversion. One type, one set of conversions.
│ │ │ │The type doesn't have to be one that you create. The conversion logic was designed such that you can build it around any type. So you can just as easily build a conversion for a type that's defined somewhere else. There's no need to include any special methods or other members inside the type itself. That's also why libpqxx can convert built-in types like int
.
By the way, if the type is an enum, you don't need to do any of this. Just invoke the preprocessor macro PQXX_DECLARE_ENUM_CONVERSION
, from the global namespace near the top of your translation unit, and pass the type as an argument.
The library also provides specialisations for std::optional<T>
, std::shared_ptr<T>
, and std::unique_ptr<T>
. If you have conversions for T
, you'll also automatically have conversions for those.
When errors happen during conversion, libpqxx will compose error messages for the user. Sometimes these will include the name of the type that's being converted.
│ │ │ │To tell libpqxx the name of each type, there's a template variable called pqxx::type_name
. For any given type T
, it should have a specialisation that provides that T
's human-readable name:
(Yes, this means that you need to define something inside the pqxx namespace. Future versions of libpqxx may move this into a separate namespace.)
│ │ │ │Define this early on in your translation unit, before any code that might cause libpqxx to need the name. That way, the libpqxx code which needs to know the type's name can see your definition.
│ │ │ │ -A struct template pqxx::nullness
defines whether your type has a natural "null value" built in. If so, it also provides member functions for producing and recognising null values.
The simplest scenario is also the most common: most types don't have a null value built in. There is no "null `int`" in C++. In that kind of case, just derive your nullness traits from pqxx::no_null
as a shorthand:
int
, use the field's as<...>()
member function. To read a field efficiently just as a string, use its c_str()
or its as<std::string_vview>()
. int
, use the field's as<...>()
member function. To read a field efficiently just as a string, use its c_str()
or its as<std::string_vview>()
. bytes
and bytes_view
for binary data. In C++20 or better, any contiguous_range
of std::byte
will do. bytes
and bytes_view
for binary data. In C++20 or better, any contiguous_range
of std::byte
will do. std::span<std::byte>
. std::span<std::byte>
. SET
command. SET
command. field::as<...>()
or field::c_str()
. field::as<...>()
or field::c_str()
. blob
class instead. blob
class instead. SET
command. To set a session variable, use the connection's set_session_var function.SET
command. To set a session variable, use the connection's set_session_var function.Write a result field to any type of stream.
│ │ │ │ -This can be convenient when writing a field to an output stream. More importantly, it lets you write a field to e.g. a stringstream
which you can then use to read, format and convert the field in ways that to() does not support.
Example: parse a field into a variable of the nonstandard long long
type.
Pass this to a stream_from
constructor to stream query results.
Pass this to a stream_from
constructor to stream table contents.
Inserting the 101
in there is awkward and even dangerous. We'll get to that in a moment. Here's how you do it better, using parameters:
That second argument to exec()
, the {101}
, constructs a pqxx::params
object. The exec()
call will fill this value in where the query says $1
.
Doing this saves you work. If you don't use statement parameters, you'll need to quote and escape your values (see connection::quote()
and friends) as you insert them into your query as literal values.
Or if you forget to do that, you leave yourself open to horrible SQL injection attacks. Trust me, I was born in a town whose name started with an apostrophe!
│ │ │ │With parameters you can pass your values as they are, and they will go across the wire to the database in a safe format.
│ │ │ │In some cases it may even be faster! When a parameter represents binary data (as in the SQL BYTEA
type), libpqxx will send it directly as binary, which is a bit more efficient than the standard textual format in which the data normally gets sent to the database. If you insert the binary data directly in your query text, your CPU will have some extra work to do, converting the data into a text format, escaping it, and adding quotes; and the data will take up more bytes, which take time to transmit.
The pqxx::params
class is quite fleixble. It can contain any number of parameter values, of many different types.
You can pass them in while constructing the params
object:
Or you can add them one by one:
│ │ │ │You can also combine the two, passing some values int the constructor and adding the rest later. You can even insert a params
into a params
:
Each of these examples will produce the same list of parameters.
│ │ │ │ -If your code gets particularly complex, it may sometimes happen that it becomes hard to track which parameter value belongs with which placeholder. Did you intend to pass this numeric value as $7
, or as $8
? The answer may depend on an if
that happened earlier in a different function.
(Generally if things get that complex, it's a good idea to look for simpler solutions. But especially when performance matters, sometimes you can't avoid complexity like that.)
│ │ │ │There's a little helper class called placeholders
. You can use it as a counter which produces those placeholder strings, $1
, $2
, $3
, et cetera. When you start generating a complex statement, you can create both a params
and a placeholders
:
Prepared statements are SQL queries that you define once and then invoke as many times as you like, typically with varying parameters. It's a lot like a function that you can define ad hoc, within the scope of one connection.
│ │ │ │If you have an SQL statement that you're going to execute many times in quick succession, it may (but see below!) be more efficient to prepare it once and reuse it. This saves the database backend the effort of parsing the SQL and figuring out an efficient execution plan.
│ │ │ │ -You create a prepared statement by preparing it on the connection (using the pqxx::connection::prepare
functions), passing an identifying name for the statement, and its SQL text.
The statement's name should consist of ASCII letters, digits, and underscores only, and start with an ASCII letter. The name is case-sensitive.
│ │ │ │You can pass parameters to a prepared statemet, just like you can with a regular statement. The query text can contain $1
, $2
etc. as placeholders for parameter values that you will provide when you invoke the prepared satement.
See Statement parameters for more about this. And here's a simple example of preparing a statement and invoking it with parameters:
│ │ │ │This example looks up the prepared statement "find," passes name
and min_salary
as parameters, and invokes the statement with those values:
There is one special case: the nameless prepared statement. You may prepare a statement without a name, i.e. whose name is an empty string. The unnamed statement can be redefined at any time, without un-preparing it first.
│ │ │ │ -Don't assume that using prepared statements will speed up your application. There are cases where prepared statements are actually slower than plain SQL.
│ │ │ │The reason is that the backend can often produce a better execution plan when it knows the statement's actual parameter values.
│ │ │ │For example, say you've got a web application and you're querying for users with status "inactive" who have email addresses in a given domain name X. If X is a very popular provider, the best way for the database engine to plan the query may be to list the inactive users first and then filter for the email addresses you're looking for. But in other cases, it may be much faster to find matching email addresses first and then see which of their owners are "inactive." A prepared statement must be planned to fit either case, but a direct query will be optimised based on table statistics, partial indexes, etc.
│ │ │ │So, as with any optimisation... measure where your real performance problems are before you start making changes, and then afterwards, measure whether your changes actually helped. Don't complicate your code unless it solves a real problem. Knuth's Law applies.
│ │ │ │ -Since libpqxx is a wrapper around libpq, the C-level client library, most strings you pass to the library should be compatible with C-style strings. So they must end with a single byte with value 0, and the text within them cannot contain any such zero bytes.
│ │ │ │(The pqxx::zview
type exists specifically to tell libpqxx: "this is a
│ │ │ │ C-compatible string, containing no zero bytes but ending in a zero byte.")
One example is prepared statement names. But the same also goes for the parameters values. Any string you pass as a parameter will end at the first char with value zero. If you pass a string that contains a zero byte, the last byte in the value will be the one just before the zero.
│ │ │ │So, if you need a zero byte in a string, consider that it's really a binary string, which is not the same thing as a text string. SQL represents binary data as the BYTEA
type, or in binary large objects ("blobs").
Most of the time it's fine to retrieve data from the database using SELECT
queries, and store data using INSERT
. But for those cases where efficiency matters, there are two data streaming mechanisms to help you do this more efficiently: "streaming queries," for reading query results from the database; and the pqxx::stream_to class, for writing data from the client into a table.
These are less flexible than SQL queries. Also, depending on your needs, it may be a problem to lose your connection while you're in mid-stream, not knowing that the query may not complete. But, you get some scalability and memory efficiencies in return.
│ │ │ │Just like regular querying, these streaming mechanisms do data conversion for you. You deal with the C++ data types, and the database deals with the SQL data types.
│ │ │ │ -So how do you deal with nulls? It depends on the C++ type you're using. Some types may have a built-in null value. For instance, if you have a char const *
value and you convert it to an SQL string, then converting a nullptr
will produce a NULL SQL value.
But what do you do about C++ types which don't have a built-in null value, such as int
? The trick is to wrap it in std::optional
. The difference between int
and std::optional<int>
is that the former always has an int
value, and the latter doesn't have to.
Actually it's not just std::optional
. You can do the same thing with std::unique_ptr
or std::shared_ptr
. A smart pointer is less efficient than std::optional
in most situations because they allocate their value on the heap, but sometimes that's what you want in order to save moving or copying large values around.
This part is not generic though. It won't work with just any smart-pointer type, just the ones which are explicitly supported: shared_ptr
and unique_ptr
. If you really need to, you can build support for additional wrappers and smart pointers by copying the implementation patterns from the existing smart-pointer support.
Use transaction_base::stream to read large amounts of data directly from the database. In terms of API it works just like transaction_base::query, but it's faster than the exec
and query
functions For larger data sets. Also, you won't need to keep your full result set in memory. That can really matter with larger data sets.
Another performance advantage is that with a streaming query, you can start processing your data right after the first row of data comes in from the server. With exec()
or query()
you need to wait to receive all data, and only then can you begin processing. With streaming queries you can be processing data on the client side while the server is still sending you the rest.
Not all kinds of queries will work in a stream. Internally the streams make use of PostgreSQL's COPY
command, so see the PostgreSQL documentation for COPY
for the exact limitations. Basic SELECT
and UPDATE ... RETURNING
queries will just work, but fancier constructs may not.
As you read a row, the stream converts its fields to a tuple type containing the value types you ask for:
│ │ │ │On each iteration, the stream gives you a std::tuple
of the column types you specify. It converts the row's fields (which internally arrive at the client in text format) to your chosen types.
The auto [name, score]
in the example is a structured binding which unpacks the tuple's fields into separate variables. If you prefer, you can choose to receive the tuple instead: for (std::tuple<int, std::string_view> :
.
Here are the things you need to be aware of when deciding whether to stream a query, or just execute it normally.
│ │ │ │First, when you stream a query, there is no metadata describing how many rows it returned, what the columns are called, and so on. With a regular query you get a result object which contains this metadata as well as the data itself. If you absolutely need this metadata for a particular query, then that means you can't stream the query.
│ │ │ │Second, under the bonnet, streaming from a query uses a PostgreSQL-specific SQL command COPY (...) TO STDOUT
. There are some limitations on what kinds of queries this command can handle. These limitations may change over time, so I won't describe them here. Instead, see PostgreSQL's COPY documentation for the details. (Look for the TO
variant, with a query as the data source.)
Third: when you stream a query, you start receiving and processing data before you even know whether you will receive all of the data. If you lose your connection to the database halfway through, you will have processed half your data, unaware that the query may never execute to completion. If this is a problem for your application, don't stream that query!
│ │ │ │The fourth and final factor is performance. If you're interested in streaming, obviously you care about this one.
│ │ │ │I can't tell you a priori whether streaming will make your query faster. It depends on how many rows you're retrieving, how much data there is in those rows, the speed of your network connection to the database, your client encoding, how much processing you do per row, and the details of the client-side system: hardware speed, CPU load, and available memory.
│ │ │ │Ultimately, no amount of theory beats real-world measurement for your specific situation so... if it really matters, measure. (And as per Knuth's Law: if it doesn't really matter, don't optimise.)
│ │ │ │That said, here are a few data points from some toy benchmarks:
│ │ │ │If your query returns e.g. a hundred small rows, it's not likely to make up a significant portion of your application's run time. Streaming is likely to be slower than regular querying, but most likely the difference just won't amtter.
│ │ │ │If your query returns a thousand small rows, streaming is probably still going to be a bit slower than regular querying, though "your mileage may vary."
│ │ │ │If you're querying ten thousand small rows, however, it becomes more likely that streaming will speed it up. The advantage increases as the number of rows increases.
│ │ │ │That's for small rows, based on a test where each row consisted of just one integer number. If your query returns larger rows, with more columns, I find that streaming seems to become more attractive. In a simple test with 4 columns (two integers and two strings), streaming even just a thousand rows was considerably faster than a regular query.
│ │ │ │If your network connection to the database is slow, however, that may make streaming a bit less effcient. There is a bit more communication back and forth between the client and the database to set up a stream. This overhead takes a more or less constant amount of time, so for larger data sets it will tend to become insignificant compared to the other performance costs.
│ │ │ │ -Use stream_to
to write data directly to a database table. This saves you having to perform an INSERT
for every row, and so it can be significantly faster if you want to insert more than just one or two rows at a time.
As with stream_from
, you can specify the table and the columns, and not much else. You insert tuple-like objects of your choice: