--- /srv/reproducible-results/rbuild-debian/r-b-build.WK3hGIqL/b1/libpqxx_7.9.1-2_amd64.changes +++ /srv/reproducible-results/rbuild-debian/r-b-build.WK3hGIqL/b2/libpqxx_7.9.1-2_amd64.changes ├── Files │ @@ -1,5 +1,5 @@ │ │ 889b50df09b411dd73df7bc3702138e8 2283580 debug optional libpqxx-7.9-dbgsym_7.9.1-2_amd64.deb │ 5794ab68423219a23be4ffc4c8723215 183556 libs optional libpqxx-7.9_7.9.1-2_amd64.deb │ 573fdea12c1698ba6a1e8fa2b20ca530 346832 libdevel optional libpqxx-dev_7.9.1-2_amd64.deb │ - 0b770c7afad3d1e6b54cd890354f9398 2602980 doc optional libpqxx-doc_7.9.1-2_all.deb │ + 5270499e3eeb00f4faab3591a1023c7b 2602916 doc optional libpqxx-doc_7.9.1-2_all.deb ├── libpqxx-doc_7.9.1-2_all.deb │ ├── file list │ │ @@ -1,3 +1,3 @@ │ │ -rw-r--r-- 0 0 0 4 2024-07-10 18:27:49.000000 debian-binary │ │ --rw-r--r-- 0 0 0 31940 2024-07-10 18:27:49.000000 control.tar.xz │ │ --rw-r--r-- 0 0 0 2570848 2024-07-10 18:27:49.000000 data.tar.xz │ │ +-rw-r--r-- 0 0 0 31984 2024-07-10 18:27:49.000000 control.tar.xz │ │ +-rw-r--r-- 0 0 0 2570740 2024-07-10 18:27:49.000000 data.tar.xz │ ├── control.tar.xz │ │ ├── control.tar │ │ │ ├── ./md5sums │ │ │ │ ├── ./md5sums │ │ │ │ │┄ Files differ │ ├── data.tar.xz │ │ ├── data.tar │ │ │ ├── file list │ │ │ │ @@ -385,15 +385,15 @@ │ │ │ │ -rw-r--r-- 0 root (0) root (0) 9688 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/connection-sql__cursor_8hxx_source.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 8828 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/connection-stream__from_8hxx_source.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 8758 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/connection-stream__to_8hxx_source.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 14179 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/connection-transaction_8hxx_source.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 128127 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/connection_8hxx_source.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 257528 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/conversions_8hxx_source.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 59526 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/cursor_8hxx_source.html │ │ │ │ --rw-r--r-- 0 root (0) root (0) 36025 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/datatypes.html │ │ │ │ +-rw-r--r-- 0 root (0) root (0) 36027 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/datatypes.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 13979 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/dbtransaction_8hxx_source.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 19448 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/deprecated.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 5220 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/dir_09456df80b5baeba1147d2b9ef5f002c.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 397 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/dir_09456df80b5baeba1147d2b9ef5f002c_dep.map │ │ │ │ -rw-r--r-- 0 root (0) root (0) 1857 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/dir_09456df80b5baeba1147d2b9ef5f002c_dep.png │ │ │ │ -rw-r--r-- 0 root (0) root (0) 11099 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/dir_3abbb4e2076021b5d2239498be5fcb30.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 1544 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/dir_3abbb4e2076021b5d2239498be5fcb30.js │ │ │ │ @@ -821,30 +821,30 @@ │ │ │ │ -rw-r--r-- 0 root (0) root (0) 111 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/nav_hd.png │ │ │ │ -rw-r--r-- 0 root (0) root (0) 2167 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/navtree.css │ │ │ │ -rw-r--r-- 0 root (0) root (0) 15935 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/navtree.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 5932 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/navtreedata.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 19176 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/navtreeindex0.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 21143 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/navtreeindex1.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 19444 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/navtreeindex2.js │ │ │ │ --rw-r--r-- 0 root (0) root (0) 15186 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/navtreeindex3.js │ │ │ │ +-rw-r--r-- 0 root (0) root (0) 15188 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/navtreeindex3.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 18077 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/navtreeindex4.js │ │ │ │ --rw-r--r-- 0 root (0) root (0) 17721 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/navtreeindex5.js │ │ │ │ +-rw-r--r-- 0 root (0) root (0) 17719 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/navtreeindex5.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 271 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/navtreeindex6.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 13252 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/nontransaction_8hxx_source.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 15955 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/notification_8hxx_source.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 122 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/open.png │ │ │ │ -rw-r--r-- 0 root (0) root (0) 6420 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/pages.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 10844 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/parameters.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 53070 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/params_8hxx_source.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 5810 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/performance.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 29854 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/pipeline_8hxx_source.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 696 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/plus.svg │ │ │ │ -rw-r--r-- 0 root (0) root (0) 696 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/plusd.svg │ │ │ │ -rw-r--r-- 0 root (0) root (0) 8892 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/pqxx-source_8hxx_source.html │ │ │ │ --rw-r--r-- 0 root (0) root (0) 13311 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/prepared.html │ │ │ │ +-rw-r--r-- 0 root (0) root (0) 13309 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/prepared.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 4859 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/prepared__statement_8hxx_source.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 101843 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/range_8hxx_source.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 5685 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/resize.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 8659 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/result-connection_8hxx_source.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 10652 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/result-creation_8hxx_source.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 8755 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/result-pipeline_8hxx_source.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 8590 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/result-sql__cursor_8hxx_source.html │ │ │ │ @@ -854,34 +854,34 @@ │ │ │ │ -rw-r--r-- 0 root (0) root (0) 19458 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/robusttransaction_8hxx_source.html │ │ │ │ -rw-r--r-- 0 root (0) root (0) 90886 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/row_8hxx_source.html │ │ │ │ drwxr-xr-x 0 root (0) root (0) 0 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/ │ │ │ │ -rw-r--r-- 0 root (0) root (0) 7418 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_0.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 4742 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_1.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 6978 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_10.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 21004 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_11.js │ │ │ │ --rw-r--r-- 0 root (0) root (0) 7438 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_12.js │ │ │ │ +-rw-r--r-- 0 root (0) root (0) 7440 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_12.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 2857 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_13.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 906 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_14.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 1394 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_15.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 95 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_16.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 1207 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_17.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 319 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_18.js │ │ │ │ --rw-r--r-- 0 root (0) root (0) 15180 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_2.js │ │ │ │ +-rw-r--r-- 0 root (0) root (0) 15181 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_2.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 4706 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_3.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 8727 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_4.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 6995 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_5.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 5454 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_6.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 1038 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_7.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 5821 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_8.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 147 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_9.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 2860 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_a.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 1297 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_b.js │ │ │ │ --rw-r--r-- 0 root (0) root (0) 8995 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_c.js │ │ │ │ +-rw-r--r-- 0 root (0) root (0) 8996 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_c.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 8813 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_d.js │ │ │ │ --rw-r--r-- 0 root (0) root (0) 6247 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_e.js │ │ │ │ +-rw-r--r-- 0 root (0) root (0) 6245 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_e.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 4374 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/all_f.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 875 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/classes_0.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 1086 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/classes_1.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 975 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/classes_10.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 835 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/classes_11.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 157 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/classes_12.js │ │ │ │ -rw-r--r-- 0 root (0) root (0) 85 2024-07-10 18:27:49.000000 ./usr/share/doc/libpqxx-doc/doxygen-html/search/classes_13.js │ │ │ ├── ./usr/share/doc/libpqxx-doc/doxygen-html/accessing-results.html │ │ │ │ @@ -93,38 +93,38 @@ │ │ │ │
A query produces a result set consisting of rows, and each row consists of fields. There are several ways to receive this data.
│ │ │ │The fields are "untyped." That is to say, libpqxx has no opinion on what their types are. The database sends the data in a very flexible textual format. When you read a field, you specify what type you want it to be, and libpqxx converts the text format to that type for you.
│ │ │ │If a value does not conform to the format for the type you specify, the conversion fails. For example, if you have strings that all happen to contain numbers, you can read them as int
. But if any of the values is empty, or it's null (for a type that doesn't support null), or it's some string that does not look like an integer, or it's too large, you can't convert it to int
.
So usually, reading result data from the database means not just retrieving the data; it also means converting it to some target type.
│ │ │ │ -The simplest way to query rows of data is to call one of a transaction's "query" functions, passing as template arguments the types of columns you want to get back (e.g. int
, std::string
, double
, and so on) and as a regular argument the query itself.
You can then iterate over the result to go over the rows of data:
│ │ │ │The "query" functions execute your query, load the complete result data from the database, and then as you iterate, convert each row it received to a tuple of C++ types that you indicated.
│ │ │ │There are different query functions for querying any number of rows (query()
); querying just one row of data as a std::tuple
and throwing an error if there's more than one row (query1()
); or querying
There's another way to go through the rows coming out of a query. It's usually easier and faster if there are a lot of rows, but there are drawbacks.
│ │ │ │One, you start getting rows before all the data has come in from the database. That speeds things up, but what happens if you lose your network connection while transferring the data? Your application may already have processed some of the data before finding out that the rest isn't coming. If that is a problem for your application, streaming may not be the right choice.
│ │ │ │Two, streaming only works for some types of query. The stream()
function wraps your query in a PostgreSQL COPY
command, and COPY
only supports a few commands: SELECT
, VALUES
, or an INSERT
, UPDATE
, or DELETE
with a RETURNING
clause. See the COPY
documentation here: [ https://www.postgresql.org/docs/current/sql-copy.html ](https://www.postgresql.org/docs/current/sql-copy.html).
Three, when you convert a field to a "view" type (such as std::string_view
or pqxx::bytes_view
), the view points to underlying data which only stays valid until you iterate to the next row or exit the loop. So if you want to use that data for longer than a single iteration of the streaming loop, you'll have to store it somewhere yourself.
Now for the good news. Streaming does make it very easy to query data and loop over it, and often faster than with the "query" or "exec" functions:
│ │ │ │The conversion to C++ types (here int
, std::string_view
, and two float
s) is built into the function. You never even see row
objects, field
objects, iterators, or conversion methods. You just put in your query and you receive your data.
Sometimes you want more from a query result than just rows of data. You may need to know right away how many rows of result data you received, or how many rows your UPDATE
statement has affected, or the names of the columns, etc.
For that, use the transaction's "exec" query execution functions. Apart from a few exceptions, these return a pqxx::result
object. A result
is a container of pqxx::row
objects, so you can iterate them as normal, or index them like you would index an array. Each row
in turn is a container of pqxx::field
, Each field
holds a value, but doesn't know its type. You specify the type when you read the value.
For example, your code might do:
│ │ │ │The other takes a pointer and a size:
│ │ │ │There are some restrictions on binary_cast
that you must be aware of.
First, your data must of a type that gives us bytes. So: char
, unsigned char
, signed char
, int8_t
, uint8_t
, or of course std::byte
. You can't feed in a vector of double
, or anything like that.
Second, the data must be laid out as a contiguous block in memory. If there's no std::data()
implementation for your type, it's not suitable.
Third, binary_cast
only constructs something like a std::string_view
. It does not make a copy of your actual data. So, make sure that your data remains alive and in the same place while you're using it.
Input stream that gets its data from a large object.
│ │ │ │ -This class worked like any other istream, but to read data from a large object. It supported all formatting and streaming operations of std::istream
.
This functionality was considered too fragile and complex, so it has been replaced with a single, much simpler class.
│ │ │ │Stream that reads and writes a large object.
│ │ │ │ -This worked like a std::iostream, but to read data from, or write data to, a large object. It supported all formatting and streaming operations of std::iostream
.
This functionality was considered too fragile and complex, so it has been replaced with a single, much simpler class.
│ │ │ │Output stream that writes data back to a large object.
│ │ │ │ -This worked like any other ostream, but to write data to a large object. It supported all formatting and streaming operations of std::ostream
.
This functionality was considered too fragile and complex, so it has been replaced with a single, much simpler class.
│ │ │ │Set session variable, using SQL's SET
command.
SET
command.SET
command.var | Variable to set. |
value | New value for Var. This can be any SQL expression. If it's a string, be sure that it's properly escaped and quoted. |
│ │ │ │ PQXX_PRIVATE std::string | reason (connection const &, int err) const |
Identity of a large object.
│ │ │ │ -Encapsulates the identity of a large object.
│ │ │ │A largeobject must be accessed only from within a backend transaction, but the object's identity remains valid as long as the object exists.
│ │ │ │Streambuf to use large objects in standard I/O streams.
│ │ │ │ -The standard streambuf classes provide uniform access to data storage such as files or string buffers, so they can be accessed using standard input or output streams. This streambuf implementation provided similar access to large objects, so they could be read and written using the same stream classes.
│ │ │ │This functionality was considered too fragile and complex, so it has been replaced with a single, much simpler class.
│ │ │ │Accessor for large object's contents.
│ │ │ │ -blob
class instead. blob
class instead. ( | │ │ │ │binarystring const & | │ │ │ │value | ) | │ │ │ │& | │ │ │ │ │ │ │ │
The binarystring must stay valid for as long as the params
remains active.
Produce a slice of this row, containing the given range of columns.
│ │ │ │ -The slice runs from the range's starting column to the range's end column, exclusive. It looks just like a normal result row, except slices can be empty.
│ │ │ │ │ │ │ │Stream data from the database.
│ │ │ │ - │ │ │ │ + │ │ │ │For larger data sets, retrieving data this way is likely to be faster than executing a query and then iterating and converting the rows fields. You will also be able to start processing before all of the data has come in.
│ │ │ │There are also downsides. Not all kinds of query will work in a stream. But straightforward SELECT
and UPDATE ... RETURNING
queries should work. This function makes use of pqxx::stream_from, which in turn uses PostgreSQL's COPY
command, so see the documentation for those to get the full details.
There are other downsides. If there stream encounters an error, it may leave the entire connection in an unusable state, so you'll have to give the whole thing up. Finally, opening a stream puts the connection in a special state, so you won't be able to do many other things with the connection or the transaction while the stream is open.
│ │ │ │There are two ways of starting a stream: you stream either all rows in a table (using one of the factories, table()
or raw_table()
), or the results of a query (using the query()
factory).
Usually you'll want the stream
convenience wrapper in transaction_base, * so you don't need to deal with this class directly.
Execute query, and stream over the results.
│ │ │ │ -Create a stream, without specifying columns.
│ │ │ │ -Fields will be inserted in whatever order the columns have in the database.
│ │ │ │You'll probably want to specify the columns, so that the mapping between your data fields and the table is explicit in your code, and not hidden in an "implicit contract" between your code and your schema.
│ │ │ │ │ │ │ │Create a stream, specifying column names as a container of strings.
│ │ │ │ -Create a stream, specifying column names as a sequence of strings.
│ │ │ │ -Communication with the database mostly happens in a text format. When you include an integer value in a query, either you use to_string
to convert it to that text format, or under the bonnet, libpqxx does it for you. When you get a query result field "as a float," libpqxx converts from the text format to a floating-point type. These conversions are everywhere in libpqxx.
The conversion system supports many built-in types, but it is also extensible. You can "teach" libpqxx (in the scope of your own application) to convert additional types of values to and from PostgreSQL's string format.
│ │ │ │This is massively useful, but it's not for the faint of heart. You'll need to specialise some templates. And, the API for doing this can change with any major libpqxx release.
│ │ │ │If that happens, your code may fail to compile with the newer libpqxx version, and you'll have to go through the NEWS
file to find the API changes. Usually it'll be a small change, like an additional function you need to implement, or a constant you need to define.
In your application, a conversion is driven entirely by a C++ type you specify. The value's SQL type on the database side has nothing to do with it. Nor is there anything in the string that would identify its type. Your code says "convert to this type" and libpqxx does it.
│ │ │ │So, if you've SELECTed a 64-bit integer from the database, and you try to convert it to a C++ short,
one of two things will happen: either the number is small enough to fit in your short
and it just works, or else it throws a conversion exception. Similarly, if you try to read a 32-bit SQL int
as a C++ 32-bit unsigned int
, that'll work fine, unless the value happens to be negative. In such cases the conversion will throw a conversion_error
.
Or, your database table might have a text column, but a given field may contain a string that looks just like a number. You can convert that value to an integer type just fine. Or to a floating-point type. All that matters to the conversion is the actual value, and the type your code specifies.
│ │ │ │In some cases the templates for these conversions can tell the type from the arguments you pass them:
│ │ │ │In other cases you may need to instantiate template explicitly:
│ │ │ │Let's say you have some other SQL type which you want to be able to store in, or retrieve from, the database. What would it take to support that?
│ │ │ │Sometimes you do not need complete support. You might need a conversion to a string but not from a string, for example. You write out the conversion at compile time, so don't be too afraid to be incomplete. If you leave out one of these steps, it's not going to crash at run time or mess up your data. The worst that can happen is that your code won't build.
│ │ │ │So what do you need for a complete conversion?
│ │ │ │First off, of course, you need a C++ type. It may be your own, but it doesn't have to be. It could be a type from a third-party library, or even one from the standard library that libpqxx does not yet support.
│ │ │ │First thing to do is specialise the pqxx::type_name
variable to give the type a human-readable name. That allows libpqxx error messages and such to talk about the type. If you don't define a name, libpqxx will try to figure one out with some help from the compiler, but it may not always be easy to read.
Then, does your type have a built-in null value? For example, a char *
can be null on the C++ side. Or some types are always null, such as nullptr
. You specialise the pqxx::nullness
template to specify the details.
Finally, you specialise the pqxx::string_traits
template. This is where you define the actual conversions.
Let's go through these steps one by one.
│ │ │ │ -You'll need a type for which the conversions are not yet defined, because the C++ type is what determines the right conversion. One type, one set of conversions.
│ │ │ │The type doesn't have to be one that you create. The conversion logic was designed such that you can build it around any type. So you can just as easily build a conversion for a type that's defined somewhere else. There's no need to include any special methods or other members inside the type itself. That's also why libpqxx can convert built-in types like int
.
By the way, if the type is an enum, you don't need to do any of this. Just invoke the preprocessor macro PQXX_DECLARE_ENUM_CONVERSION
, from the global namespace near the top of your translation unit, and pass the type as an argument.
The library also provides specialisations for std::optional<T>
, std::shared_ptr<T>
, and std::unique_ptr<T>
. If you have conversions for T
, you'll also automatically have conversions for those.
When errors happen during conversion, libpqxx will compose error messages for the user. Sometimes these will include the name of the type that's being converted.
│ │ │ │To tell libpqxx the name of each type, there's a template variable called pqxx::type_name
. For any given type T
, it should have a specialisation that provides that T
's human-readable name:
(Yes, this means that you need to define something inside the pqxx namespace. Future versions of libpqxx may move this into a separate namespace.)
│ │ │ │Define this early on in your translation unit, before any code that might cause libpqxx to need the name. That way, the libpqxx code which needs to know the type's name can see your definition.
│ │ │ │ -A struct template pqxx::nullness
defines whether your type has a natural "null value" built in. If so, it also provides member functions for producing and recognising null values.
The simplest scenario is also the most common: most types don't have a null value built in. There is no "null `int`" in C++. In that kind of case, just derive your nullness traits from pqxx::no_null
as a shorthand:
You may be wondering why there's a function to produce a null value, but also a function to check whether a value is null. Why not just compare the value to the result of null()
? Because two null values may not be equal (like in SQL, where NULL <> NULL
). Or T
may have multiple different null values. Or T
may override the comparison operator to behave in some unusual way.
As a third case, your type may be one that always represents a null value. This is the case for std::nullptr_t
and std::nullopt_t
. In that case, you set nullness<TYPE>::always_null
to true
(as well as has_null
of course), and you won't need to define any actual conversions.
This part is the most work. You can skip it for types that are always null, but those will be rare.
│ │ │ │The APIs for doing this are designed so that you don't need to allocate memory on the free store, also known as "the heap": new
/delete
. Memory allocation can be hidden inside std::string
, std::vector
, etc. The conversion API allows you to use std::string
for convenience, or memory buffers for speed.
Start by specialising the pqxx::string_traits
template. You don't absolutely have to implement all parts of this API. Generally, if it compilers, you're OK for the time being. Just bear in mind that future libpqxx versions may change the API — or how it uses the API internally.
You'll also need to write those member functions, or as many of them as needed to get your code to build.
│ │ │ │ -We start off simple: from_string
parses a string as a value of T
, and returns that value.
The string may or may not be zero-terminated; it's just the string_view
from beginning to end (with end
being exclusive). In your tests, be sure to cover cases where the string does not end in a zero byte!
It's perfectly possible that the string doesn't actually represent a T
value. Mistakes happen. There can be corner cases. When you run into this, throw a pqxx::conversion_error
.
(Of course it's also possible that you run into some other error, so it's fine to throw different exceptions as well. But when it's definitely "this is not
│ │ │ │ the right format for a `T`," throw conversion_error
.)
In this function, you convert a value of T
into a string that the postgres server will understand.
The caller will provide you with a buffer where you can write the string, if you need it: from begin
to end
exclusive. It's a half-open interval, so don't access *end
.
If the buffer is insufficient for you to do the conversion, throw a pqxx::conversion_overrun
. It doesn't have to be exact: you can be a little pessimistic and demand a bit more space than you need. Just be sure to throw the exception if there's any risk of overrunning the buffer.
You don't have to use the buffer for this function though. For example, pqxx::string_traits<bool>::to_buf
returns a compile-time constant string and completely ignores the buffer.
Even if you do use the buffer, your string does not have to start at the beginning of the buffer. For example, the integer conversions may work from right to left, if that's easier: they can start by writing the least significant digit to the end of the buffer, divide the remainder by 10, and repeat for the next digit.
│ │ │ │Return a pqxx::zview
. This is basically a std::string_view
, but with one difference: when you create a zview
you guarantee that there is a valid zero byte right after the string_view
. The zero byte does not count as part of its size, but it has to be there.
The trailing zero should not go inside the zview
, but if you convert into the buffer, do make sure you that trailing stays inside the buffer, i.e. before the end
. (If there's no room for that zero inside the buffer, throw pqxx::conversion_error
).
Beware of locales when converting. If you use standard library features like sprintf
, they may obey whatever locale is currently set on the system where the code runs. That means that a simple integer like 1000000 may come out as "1000000" on your system, but as "1,000,000" on mine, or as "1.000.000" for somebody else, and on an Indian system it may be "1,00,000". Don't let that happen, or it will confuse things. Use only non-locale-sensitive library functions. Values coming from or going to the database should be in fixed, non-localised formats.
If your conversions need to deal with fields in types that libpqxx already supports, you can use the conversion functions for those: pqxx::from_string
, pqxx::to_string
, pqxx::to_buf
. They in turn will call the string_traits
specialisations for those types. Or, you can call their string_traits
directly.
This is a stricter version of to_buf
. All the same requirements apply, but in addition you must write your string into the given buffer, starting exactly at begin
.
That's why this function returns just a simple pointer: the address right behind the trailing zero. If the caller wants to use the string, they can find it at begin
. If they want to write another value into the rest of the buffer, they can continue writing at the location you returned.
Here you estimate how much buffer space you need for converting a T
to a string. Be precise if you can, but pessimistic if you must. It's usually better to waste a few bytes of space than to spend a lot of time computing the exact buffer space you need. And failing the conversion because you under-budgeted the buffer is worst of all.
Include the trailing zero in the buffer size. If your to_buf
takes more space than just what's needed to store the result, include that too.
Make size_buffer
a constexpr
function if you can. It can allow the caller to allocate the buffer on the stack, with a size known at compile time.
When converting arrays or composite values to strings, libpqxx may need to quote values and escape any special characters. This takes time.
│ │ │ │Some types though, such as integral or floating-point types, can never have any special characters such as quotes, commas, or backslashes in their string representations. In such cases, there's no need to quote or escape such values in SQL arrays or composite types.
│ │ │ │If your type is like that, you can tell libpqxx about this by defining:
│ │ │ │The code that converts this type of field to strings in an array or a composite type can then use a simpler, more efficient variant of the code. It's always safe to leave this out; it's just an optimisation for when you're completely sure that it's safe.
│ │ │ │Do not do this if a string representation of your type may contain a comma; semicolon; parenthesis; brace; quote; backslash; newline; or any other character that might need escaping.
│ │ │ │ -This one you don't generally need to worry about. Read on if you're writing a type which represents raw binary data, or if you're writing a template where some specialisations may contain raw binary data.
│ │ │ │When you call parameterised statements, or prepared statements with parameters, libpqxx needs to pass your parameters on to libpq, the underlying C-level PostgreSQL client library.
│ │ │ │There are two formats for doing that: text and binary. In the first, we represent all values as strings in the PostgreSQL text format, and the server then converts them into its own internal binary representation. That's what those string conversions above are all about, and it's what we do for almost all types of parameters.
│ │ │ │But we do it differently when the parameter is a contiguous series of raw bytes and the corresponding SQL type is BYTEA
. There is a text format for those, but we bypass it for efficiency. The server can use the binary data in the exact same form, without any conversion or extra processing. The binary data is also twice as compact during transport.
(People sometimes ask why we can't just treat all types as binary. However the general case isn't so clear-cut. The binary formats are not documented, there are no guarantees that they will be platform-independent or that they will remain stable across postgres releases, and there's no really solid way to detect when we might get the format wrong. On top of all that, the conversions aren't necessarily as straightforward and efficient as they sound. So, for the general case, libpqxx sticks with the text formats. Raw binary data alone stands out as a clear win.)
│ │ │ │Long story short, the machinery for passing parameters needs to know: is this parameter a binary string, or not? In the normal case it can assume "no," and that's what it does. The text format is always a safe choice; we just try to use the binary format where it's faster.
│ │ │ ├── ./usr/share/doc/libpqxx-doc/doxygen-html/deprecated.html │ │ │ │ @@ -93,81 +93,81 @@ │ │ │ │int
, use the field's as<...>()
member function. To read a field efficiently just as a string, use its c_str()
or its as<std::string_vview>()
. bytes
and bytes_view
for binary data. In C++20 or better, any contiguous_range
of std::byte
will do. std::span<std::byte>
. SET
command. SET
command. field::as<...>()
or field::c_str()
. blob
class instead. blob
class instead. SET
command. To set a session variable, use the connection's set_session_var function.struct pqxx::from_query_t | │ │ │ │
Marker for stream_from constructors: "stream from query.".
│ │ │ │ -struct pqxx::from_table_t | │ │ │ │
Marker for stream_from constructors: "stream from table.".
│ │ │ │ -Remove any constness, volatile, and reference-ness from a type.
│ │ │ │ -Encrypt a password.
│ │ │ │ -Encrypt password.
│ │ │ │ -Pass this to a stream_from
constructor to stream query results.
Pass this to a stream_from
constructor to stream table contents.
Most of the time it's fine to retrieve data from the database using SELECT
queries, and store data using INSERT
. But for those cases where efficiency matters, there are two data streaming mechanisms to help you do this more efficiently: "streaming queries," for reading query results from the database; and the pqxx::stream_to class, for writing data from the client into a table.
These are less flexible than SQL queries. Also, depending on your needs, it may be a problem to lose your connection while you're in mid-stream, not knowing that the query may not complete. But, you get some scalability and memory efficiencies in return.
│ │ │ │Just like regular querying, these streaming mechanisms do data conversion for you. You deal with the C++ data types, and the database deals with the SQL data types.
│ │ │ │ -So how do you deal with nulls? It depends on the C++ type you're using. Some types may have a built-in null value. For instance, if you have a char const *
value and you convert it to an SQL string, then converting a nullptr
will produce a NULL SQL value.
But what do you do about C++ types which don't have a built-in null value, such as int
? The trick is to wrap it in std::optional
. The difference between int
and std::optional<int>
is that the former always has an int
value, and the latter doesn't have to.
Actually it's not just std::optional
. You can do the same thing with std::unique_ptr
or std::shared_ptr
. A smart pointer is less efficient than std::optional
in most situations because they allocate their value on the heap, but sometimes that's what you want in order to save moving or copying large values around.
This part is not generic though. It won't work with just any smart-pointer type, just the ones which are explicitly supported: shared_ptr
and unique_ptr
. If you really need to, you can build support for additional wrappers and smart pointers by copying the implementation patterns from the existing smart-pointer support.
Use transaction_base::stream to read large amounts of data directly from the database. In terms of API it works just like transaction_base::query, but it's faster than the exec
and query
functions For larger data sets. Also, you won't need to keep your full result set in memory. That can really matter with larger data sets.
Another performance advantage is that with a streaming query, you can start processing your data right after the first row of data comes in from the server. With exec()
or query()
you need to wait to receive all data, and only then can you begin processing. With streaming queries you can be processing data on the client side while the server is still sending you the rest.
Not all kinds of queries will work in a stream. Internally the streams make use of PostgreSQL's COPY
command, so see the PostgreSQL documentation for COPY
for the exact limitations. Basic SELECT
and UPDATE ... RETURNING
queries will just work, but fancier constructs may not.
As you read a row, the stream converts its fields to a tuple type containing the value types you ask for:
│ │ │ │On each iteration, the stream gives you a std::tuple
of the column types you specify. It converts the row's fields (which internally arrive at the client in text format) to your chosen types.
The auto [name, score]
in the example is a structured binding which unpacks the tuple's fields into separate variables. If you prefer, you can choose to receive the tuple instead: for (std::tuple<int, std::string_view> :
.
Here are the things you need to be aware of when deciding whether to stream a query, or just execute it normally.
│ │ │ │First, when you stream a query, there is no metadata describing how many rows it returned, what the columns are called, and so on. With a regular query you get a result object which contains this metadata as well as the data itself. If you absolutely need this metadata for a particular query, then that means you can't stream the query.
│ │ │ │Second, under the bonnet, streaming from a query uses a PostgreSQL-specific SQL command COPY (...) TO STDOUT
. There are some limitations on what kinds of queries this command can handle. These limitations may change over time, so I won't describe them here. Instead, see PostgreSQL's COPY documentation for the details. (Look for the TO
variant, with a query as the data source.)
Third: when you stream a query, you start receiving and processing data before you even know whether you will receive all of the data. If you lose your connection to the database halfway through, you will have processed half your data, unaware that the query may never execute to completion. If this is a problem for your application, don't stream that query!
│ │ │ │The fourth and final factor is performance. If you're interested in streaming, obviously you care about this one.
│ │ │ │I can't tell you a priori whether streaming will make your query faster. It depends on how many rows you're retrieving, how much data there is in those rows, the speed of your network connection to the database, your client encoding, how much processing you do per row, and the details of the client-side system: hardware speed, CPU load, and available memory.
│ │ │ │Ultimately, no amount of theory beats real-world measurement for your specific situation so... if it really matters, measure. (And as per Knuth's Law: if it doesn't really matter, don't optimise.)
│ │ │ │That said, here are a few data points from some toy benchmarks:
│ │ │ │If your query returns e.g. a hundred small rows, it's not likely to make up a significant portion of your application's run time. Streaming is likely to be slower than regular querying, but most likely the difference just won't amtter.
│ │ │ │If your query returns a thousand small rows, streaming is probably still going to be a bit slower than regular querying, though "your mileage may vary."
│ │ │ │If you're querying ten thousand small rows, however, it becomes more likely that streaming will speed it up. The advantage increases as the number of rows increases.
│ │ │ │That's for small rows, based on a test where each row consisted of just one integer number. If your query returns larger rows, with more columns, I find that streaming seems to become more attractive. In a simple test with 4 columns (two integers and two strings), streaming even just a thousand rows was considerably faster than a regular query.
│ │ │ │If your network connection to the database is slow, however, that may make streaming a bit less effcient. There is a bit more communication back and forth between the client and the database to set up a stream. This overhead takes a more or less constant amount of time, so for larger data sets it will tend to become insignificant compared to the other performance costs.
│ │ │ │ -Use stream_to
to write data directly to a database table. This saves you having to perform an INSERT
for every row, and so it can be significantly faster if you want to insert more than just one or two rows at a time.
As with stream_from
, you can specify the table and the columns, and not much else. You insert tuple-like objects of your choice: