Merge remote-tracking branch 'origin/upstream-master' am: d47448d647

Original change: https://android-review.googlesource.com/c/platform/external/liburing/+/1773366

Change-Id: Icabe9401694c9594dcfd11510a09c5b26940bd2f
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..7a6f75c
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,130 @@
+*.rej
+*.orig
+*~
+/*.patch
+
+*.o
+*.o[ls]
+
+/src/liburing.a
+/src/liburing.so*
+/src/include/liburing/compat.h
+
+/examples/io_uring-cp
+/examples/io_uring-test
+/examples/link-cp
+/examples/ucontext-cp
+
+/test/232c93d07b74-test
+/test/35fa71a030ca-test
+/test/500f9fbadef8-test
+/test/7ad0e4b2f83c-test
+/test/8a9973408177-test
+/test/917257daa0fe-test
+/test/a0908ae19763-test
+/test/a4c0b3decb33-test
+/test/accept
+/test/accept-link
+/test/accept-reuse
+/test/accept-test
+/test/across-fork
+/test/b19062a56726-test
+/test/b5837bd5311d-test
+/test/ce593a6c480a-test
+/test/close-opath
+/test/config.local
+/test/connect
+/test/cq-full
+/test/cq-overflow
+/test/cq-overflow-peek
+/test/cq-peek-batch
+/test/cq-ready
+/test/cq-size
+/test/d4ae271dfaae-test
+/test/d77a67ed5f27-test
+/test/defer
+/test/double-poll-crash
+/test/eeed8b54e0df-test
+/test/eventfd
+/test/eventfd-disable
+/test/eventfd-ring
+/test/fadvise
+/test/fallocate
+/test/fc2a85cb02ef-test
+/test/file-register
+/test/file-update
+/test/files-exit-hang-poll
+/test/files-exit-hang-timeout
+/test/fixed-link
+/test/fsync
+/test/hardlink
+/test/io-cancel
+/test/io_uring_enter
+/test/io_uring_register
+/test/io_uring_setup
+/test/iopoll
+/test/lfs-openat
+/test/lfs-openat-write
+/test/link
+/test/link-timeout
+/test/link_drain
+/test/madvise
+/test/mkdir
+/test/nop
+/test/nop-all-sizes
+/test/open-close
+/test/openat2
+/test/personality
+/test/pipe-eof
+/test/pipe-reuse
+/test/poll
+/test/poll-cancel
+/test/poll-cancel-ton
+/test/poll-link
+/test/poll-many
+/test/poll-ring
+/test/poll-v-poll
+/test/probe
+/test/read-write
+/test/register-restrictions
+/test/rename
+/test/ring-leak
+/test/ring-leak2
+/test/self
+/test/send_recv
+/test/send_recvmsg
+/test/sendmsg_fs_cve
+/test/shared-wq
+/test/short-read
+/test/shutdown
+/test/sigfd-deadlock
+/test/socket-rw
+/test/socket-rw-eagain
+/test/splice
+/test/sq-full
+/test/sq-full-cpp
+/test/sq-poll-dup
+/test/sq-poll-kthread
+/test/sq-poll-share
+/test/sqpoll-disable-exit
+/test/sqpoll-exit-hang
+/test/sqpoll-sleep
+/test/sq-space_left
+/test/statx
+/test/stdout
+/test/submit-reuse
+/test/symlink
+/test/teardowns
+/test/thread-exit
+/test/timeout
+/test/timeout-new
+/test/timeout-overflow
+/test/unlink
+/test/wakeup-hang
+/test/*.dmesg
+
+config-host.h
+config-host.mak
+config.log
+
+liburing.pc
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..e02fdd0
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,22 @@
+language: cpp
+os:
+  - linux
+compiler:
+  - clang
+  - gcc
+env:
+  matrix:
+    - BUILD_ARCH="x86"
+    - BUILD_ARCH="x86_64"
+  global:
+    - MAKEFLAGS="-j 2"
+matrix:
+  exclude:
+    - os: linux
+      compiler: clang
+      env: BUILD_ARCH="x86" # Only do the gcc x86 build to reduce clutter
+before_install:
+  - EXTRA_CFLAGS="-Werror"
+script:
+  - ./configure && make
+  - sudo make runtests || true
diff --git a/COPYING b/COPYING
new file mode 100644
index 0000000..e5ab03e
--- /dev/null
+++ b/COPYING
@@ -0,0 +1,502 @@
+                  GNU LESSER GENERAL PUBLIC LICENSE
+                       Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+ 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL.  It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+                            Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+  This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it.  You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+  When we speak of free software, we are referring to freedom of use,
+not price.  Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+  To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights.  These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+  For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you.  You must make sure that they, too, receive or can get the source
+code.  If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it.  And you must show them these terms so they know their rights.
+
+  We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+  To protect each distributor, we want to make it very clear that
+there is no warranty for the free library.  Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+
+  Finally, software patents pose a constant threat to the existence of
+any free program.  We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder.  Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+  Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License.  This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License.  We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+  When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library.  The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom.  The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+  We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License.  It also provides other free software developers Less
+of an advantage over competing non-free programs.  These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries.  However, the Lesser license provides advantages in certain
+special circumstances.
+
+  For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard.  To achieve this, non-free programs must be
+allowed to use the library.  A more frequent case is that a free
+library does the same job as widely used non-free libraries.  In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+  In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software.  For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+  Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.  Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library".  The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+
+                  GNU LESSER GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+  A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+  The "Library", below, refers to any such software library or work
+which has been distributed under these terms.  A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language.  (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+  "Source code" for a work means the preferred form of the work for
+making modifications to it.  For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+  Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it).  Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+
+  1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+  You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+  2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) The modified work must itself be a software library.
+
+    b) You must cause the files modified to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    c) You must cause the whole of the work to be licensed at no
+    charge to all third parties under the terms of this License.
+
+    d) If a facility in the modified Library refers to a function or a
+    table of data to be supplied by an application program that uses
+    the facility, other than as an argument passed when the facility
+    is invoked, then you must make a good faith effort to ensure that,
+    in the event an application does not supply such function or
+    table, the facility still operates, and performs whatever part of
+    its purpose remains meaningful.
+
+    (For example, a function in a library to compute square roots has
+    a purpose that is entirely well-defined independent of the
+    application.  Therefore, Subsection 2d requires that any
+    application-supplied function or table used by this function must
+    be optional: if the application does not supply it, the square
+    root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library.  To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License.  (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.)  Do not make any other change in
+these notices.
+
+  Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+  This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+  4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+  If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library".  Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+  However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library".  The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+  When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library.  The
+threshold for this to be true is not precisely defined by law.
+
+  If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work.  (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+  Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+  6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+  You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License.  You must supply a copy of this License.  If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License.  Also, you must do one
+of these things:
+
+    a) Accompany the work with the complete corresponding
+    machine-readable source code for the Library including whatever
+    changes were used in the work (which must be distributed under
+    Sections 1 and 2 above); and, if the work is an executable linked
+    with the Library, with the complete machine-readable "work that
+    uses the Library", as object code and/or source code, so that the
+    user can modify the Library and then relink to produce a modified
+    executable containing the modified Library.  (It is understood
+    that the user who changes the contents of definitions files in the
+    Library will not necessarily be able to recompile the application
+    to use the modified definitions.)
+
+    b) Use a suitable shared library mechanism for linking with the
+    Library.  A suitable mechanism is one that (1) uses at run time a
+    copy of the library already present on the user's computer system,
+    rather than copying library functions into the executable, and (2)
+    will operate properly with a modified version of the library, if
+    the user installs one, as long as the modified version is
+    interface-compatible with the version that the work was made with.
+
+    c) Accompany the work with a written offer, valid for at
+    least three years, to give the same user the materials
+    specified in Subsection 6a, above, for a charge no more
+    than the cost of performing this distribution.
+
+    d) If distribution of the work is made by offering access to copy
+    from a designated place, offer equivalent access to copy the above
+    specified materials from the same place.
+
+    e) Verify that the user has already received a copy of these
+    materials or that you have already sent this user a copy.
+
+  For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it.  However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+  It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system.  Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+  7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+    a) Accompany the combined library with a copy of the same work
+    based on the Library, uncombined with any other library
+    facilities.  This must be distributed under the terms of the
+    Sections above.
+
+    b) Give prominent notice with the combined library of the fact
+    that part of it is a work based on the Library, and explaining
+    where to find the accompanying uncombined form of the same work.
+
+  8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License.  Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License.  However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+  9. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Library or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+  10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+
+  11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded.  In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+  13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation.  If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+  14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission.  For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this.  Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+                            NO WARRANTY
+
+  15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU.  SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+                     END OF TERMS AND CONDITIONS
+
+           How to Apply These Terms to Your New Libraries
+
+  If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change.  You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms of the
+ordinary General Public License).
+
+  To apply these terms, attach the following notices to the library.  It is
+safest to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the library's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This library is free software; you can redistribute it and/or
+    modify it under the terms of the GNU Lesser General Public
+    License as published by the Free Software Foundation; either
+    version 2.1 of the License, or (at your option) any later version.
+
+    This library is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+    Lesser General Public License for more details.
+
+    You should have received a copy of the GNU Lesser General Public
+    License along with this library; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
+
+Also add information on how to contact you by electronic and paper mail.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the library, if
+necessary.  Here is a sample; alter the names:
+
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the
+  library `Frob' (a library for tweaking knobs) written by James Random Hacker.
+
+  <signature of Ty Coon>, 1 April 1990
+  Ty Coon, President of Vice
+
+That's all there is to it!
diff --git a/COPYING.GPL b/COPYING.GPL
new file mode 100644
index 0000000..d159169
--- /dev/null
+++ b/COPYING.GPL
@@ -0,0 +1,339 @@
+                    GNU GENERAL PUBLIC LICENSE
+                       Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                            Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users.  This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it.  (Some other Free Software Foundation software is covered by
+the GNU Lesser General Public License instead.)  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+  To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have.  You must make sure that they, too, receive or can get the
+source code.  And you must show them these terms so they know their
+rights.
+
+  We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+  Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software.  If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+  Finally, any free program is threatened constantly by software
+patents.  We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary.  To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+                    GNU GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License.  The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language.  (Hereinafter, translation is included without limitation in
+the term "modification".)  Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+  1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+  2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) You must cause the modified files to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    b) You must cause any work that you distribute or publish, that in
+    whole or in part contains or is derived from the Program or any
+    part thereof, to be licensed as a whole at no charge to all third
+    parties under the terms of this License.
+
+    c) If the modified program normally reads commands interactively
+    when run, you must cause it, when started running for such
+    interactive use in the most ordinary way, to print or display an
+    announcement including an appropriate copyright notice and a
+    notice that there is no warranty (or else, saying that you provide
+    a warranty) and that users may redistribute the program under
+    these conditions, and telling the user how to view a copy of this
+    License.  (Exception: if the Program itself is interactive but
+    does not normally print such an announcement, your work based on
+    the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+    a) Accompany it with the complete corresponding machine-readable
+    source code, which must be distributed under the terms of Sections
+    1 and 2 above on a medium customarily used for software interchange; or,
+
+    b) Accompany it with a written offer, valid for at least three
+    years, to give any third party, for a charge no more than your
+    cost of physically performing source distribution, a complete
+    machine-readable copy of the corresponding source code, to be
+    distributed under the terms of Sections 1 and 2 above on a medium
+    customarily used for software interchange; or,
+
+    c) Accompany it with the information you received as to the offer
+    to distribute corresponding source code.  (This alternative is
+    allowed only for noncommercial distribution and only if you
+    received the program in object code or executable form with such
+    an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it.  For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable.  However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License.  Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+  5. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Program or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+  6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+  7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded.  In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+  9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation.  If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+  10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission.  For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this.  Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+                            NO WARRANTY
+
+  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+                     END OF TERMS AND CONDITIONS
+
+            How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License along
+    with this program; if not, write to the Free Software Foundation, Inc.,
+    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+    Gnomovision version 69, Copyright (C) year name of author
+    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+    This is free software, and you are welcome to redistribute it
+    under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License.  Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary.  Here is a sample; alter the names:
+
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+  `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+  <signature of Ty Coon>, 1 April 1989
+  Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs.  If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library.  If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..ae941fa
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,7 @@
+Copyright 2020 Jens Axboe
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..5d9c4dc
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,84 @@
+NAME=liburing
+SPECFILE=$(NAME).spec
+VERSION=$(shell awk '/Version:/ { print $$2 }' $(SPECFILE))
+TAG = $(NAME)-$(VERSION)
+RPMBUILD=$(shell `which rpmbuild >&/dev/null` && echo "rpmbuild" || echo "rpm")
+
+INSTALL=install
+
+default: all
+
+all:
+	@$(MAKE) -C src
+	@$(MAKE) -C test
+	@$(MAKE) -C examples
+
+.PHONY: all install default clean test
+.PHONY: FORCE cscope
+
+partcheck: all
+	@echo "make partcheck => TODO add tests with out kernel support"
+
+runtests: all
+	@$(MAKE) -C test runtests
+runtests-loop:
+	@$(MAKE) -C test runtests-loop
+
+config-host.mak: configure
+	@if [ ! -e "$@" ]; then					\
+	  echo "Running configure ...";				\
+	  ./configure;						\
+	else							\
+	  echo "$@ is out-of-date, running configure";		\
+	  sed -n "/.*Configured with/s/[^:]*: //p" "$@" | sh;	\
+	fi
+
+ifneq ($(MAKECMDGOALS),clean)
+include config-host.mak
+endif
+
+%.pc: %.pc.in config-host.mak $(SPECFILE)
+	sed -e "s%@prefix@%$(prefix)%g" \
+	    -e "s%@libdir@%$(libdir)%g" \
+	    -e "s%@includedir@%$(includedir)%g" \
+	    -e "s%@NAME@%$(NAME)%g" \
+	    -e "s%@VERSION@%$(VERSION)%g" \
+	    $< >$@
+
+install: $(NAME).pc
+	@$(MAKE) -C src install prefix=$(DESTDIR)$(prefix) \
+		includedir=$(DESTDIR)$(includedir) \
+		libdir=$(DESTDIR)$(libdir) \
+		libdevdir=$(DESTDIR)$(libdevdir) \
+		relativelibdir=$(relativelibdir)
+	$(INSTALL) -D -m 644 $(NAME).pc $(DESTDIR)$(libdevdir)/pkgconfig/$(NAME).pc
+	$(INSTALL) -m 755 -d $(DESTDIR)$(mandir)/man2
+	$(INSTALL) -m 644 man/*.2 $(DESTDIR)$(mandir)/man2
+	$(INSTALL) -m 755 -d $(DESTDIR)$(mandir)/man3
+	$(INSTALL) -m 644 man/*.3 $(DESTDIR)$(mandir)/man3
+	$(INSTALL) -m 755 -d $(DESTDIR)$(mandir)/man7
+	$(INSTALL) -m 644 man/*.7 $(DESTDIR)$(mandir)/man7
+
+install-tests:
+	@$(MAKE) -C test install prefix=$(DESTDIR)$(prefix) datadir=$(DESTDIR)$(datadir)
+
+clean:
+	@rm -f config-host.mak config-host.h cscope.out $(NAME).pc test/*.dmesg
+	@$(MAKE) -C src clean
+	@$(MAKE) -C test clean
+	@$(MAKE) -C examples clean
+
+cscope:
+	@cscope -b -R
+
+tag-archive:
+	@git tag $(TAG)
+
+create-archive:
+	@git archive --prefix=$(NAME)-$(VERSION)/ -o $(NAME)-$(VERSION).tar.gz $(TAG)
+	@echo "The final archive is ./$(NAME)-$(VERSION).tar.gz."
+
+archive: clean tag-archive create-archive
+
+srpm: create-archive
+	$(RPMBUILD) --define "_sourcedir `pwd`" --define "_srcrpmdir `pwd`" --nodeps -bs $(SPECFILE)
diff --git a/Makefile.quiet b/Makefile.quiet
new file mode 100644
index 0000000..906d22b
--- /dev/null
+++ b/Makefile.quiet
@@ -0,0 +1,11 @@
+ifneq ($(findstring $(MAKEFLAGS),s),s)
+ifndef V
+	QUIET_CC	= @echo '    '   CC $@;
+	QUIET_CXX	= @echo '    '  CXX $@;
+	QUIET_LINK	= @echo '  '   LINK $@;
+	QUIET_AR	= @echo '    '   AR $@;
+	QUIET_RANLIB	= @echo '' RANLIB $@;
+endif
+endif
+
+
diff --git a/README b/README
new file mode 100644
index 0000000..a76021e
--- /dev/null
+++ b/README
@@ -0,0 +1,46 @@
+liburing
+--------
+
+This is the io_uring library, liburing. liburing provides helpers to setup and
+teardown io_uring instances, and also a simplified interface for
+applications that don't need (or want) to deal with the full kernel
+side implementation.
+
+For more info on io_uring, please see:
+
+https://kernel.dk/io_uring.pdf
+
+Subscribe to io-uring@vger.kernel.org for io_uring related discussions
+and development for both kernel and userspace. The list is archived here:
+
+https://lore.kernel.org/io-uring/
+
+
+ulimit settings
+---------------
+
+io_uring accounts memory it needs under the rlimit memlocked option, which
+can be quite low on some setups (64K). The default is usually enough for
+most use cases, but bigger rings or things like registered buffers deplete
+it quickly. root isn't under this restriction, but regular users are. Going
+into detail on how to bump the limit on various systems is beyond the scope
+of this little blurb, but check /etc/security/limits.conf for user specific
+settings, or /etc/systemd/user.conf and /etc/systemd/system.conf for systemd
+setups.
+
+Regressions tests
+-----------------
+
+The bulk of liburing is actually regression/unit tests for both liburing and
+the kernel io_uring support. Please note that this suite isn't expected to
+pass on older kernels, and may even crash or hang older kernels!
+
+License
+-------
+
+All software contained within this repo is dual licensed LGPL and MIT, see
+COPYING and LICENSE, except for a header coming from the kernel which is
+dual licensed GPL with a Linux-syscall-note exception and MIT, see
+COPYING.GPL and <https://spdx.org/licenses/Linux-syscall-note.html>.
+
+Jens Axboe 2020-01-20
diff --git a/configure b/configure
new file mode 100755
index 0000000..3b96cde
--- /dev/null
+++ b/configure
@@ -0,0 +1,380 @@
+#!/bin/sh
+
+cc=${CC:-gcc}
+cxx=${CXX:-g++}
+
+for opt do
+  optarg=$(expr "x$opt" : 'x[^=]*=\(.*\)')
+  case "$opt" in
+  --help|-h) show_help=yes
+  ;;
+  --prefix=*) prefix="$optarg"
+  ;;
+  --includedir=*) includedir="$optarg"
+  ;;
+  --libdir=*) libdir="$optarg"
+  ;;
+  --libdevdir=*) libdevdir="$optarg"
+  ;;
+  --mandir=*) mandir="$optarg"
+  ;;
+  --datadir=*) datadir="$optarg"
+  ;;
+  --cc=*) cc="$optarg"
+  ;;
+  --cxx=*) cxx="$optarg"
+  ;;
+  *)
+    echo "ERROR: unknown option $opt"
+    echo "Try '$0 --help' for more information"
+    exit 1
+  ;;
+  esac
+done
+
+if test -z "$prefix"; then
+  prefix=/usr
+fi
+if test -z "$includedir"; then
+  includedir="$prefix/include"
+fi
+if test -z "$libdir"; then
+  libdir="$prefix/lib"
+fi
+if test -z "$libdevdir"; then
+  libdevdir="$prefix/lib"
+fi
+if test -z "$mandir"; then
+  mandir="$prefix/man"
+fi
+if test -z "$datadir"; then
+  datadir="$prefix/share"
+fi
+
+if test x"$libdir" = x"$libdevdir"; then
+  relativelibdir=""
+else
+  relativelibdir="$libdir/"
+fi
+
+if test "$show_help" = "yes"; then
+cat <<EOF
+
+Usage: configure [options]
+Options: [defaults in brackets after descriptions]
+  --help                   print this message
+  --prefix=PATH            install in PATH [$prefix]
+  --includedir=PATH        install headers in PATH [$includedir]
+  --libdir=PATH            install runtime libraries in PATH [$libdir]
+  --libdevdir=PATH         install development libraries in PATH [$libdevdir]
+  --mandir=PATH            install man pages in PATH [$mandir]
+  --datadir=PATH           install shared data in PATH [$datadir]
+EOF
+exit 0
+fi
+
+TMPC="$(mktemp --tmpdir fio-conf-XXXXXXXXXX.c)"
+TMPC2="$(mktemp --tmpdir fio-conf-XXXXXXXXXX-2.c)"
+TMPO="$(mktemp --tmpdir fio-conf-XXXXXXXXXX.o)"
+TMPE="$(mktemp --tmpdir fio-conf-XXXXXXXXXX.exe)"
+
+# NB: do not call "exit" in the trap handler; this is buggy with some shells;
+# see <1285349658-3122-1-git-send-email-loic.minier@linaro.org>
+trap "rm -f $TMPC $TMPC2 $TMPO $TMPE" EXIT INT QUIT TERM
+
+rm -rf config.log
+
+config_host_mak="config-host.mak"
+config_host_h="config-host.h"
+
+rm -rf $config_host_mak
+rm -rf $config_host_h
+
+fatal() {
+  echo $@
+  echo "Configure failed, check config.log and/or the above output"
+  rm -rf $config_host_mak
+  rm -rf $config_host_h
+  exit 1
+}
+
+# Print result for each configuration test
+print_config() {
+  printf "%-30s%s\n" "$1" "$2"
+}
+
+# Default CFLAGS
+CFLAGS="-D_GNU_SOURCE -include config-host.h"
+BUILD_CFLAGS=""
+
+# Print configure header at the top of $config_host_h
+echo "/*" > $config_host_h
+echo " * Automatically generated by configure - do not modify" >> $config_host_h
+printf " * Configured with:" >> $config_host_h
+printf " * '%s'" "$0" "$@" >> $config_host_h
+echo "" >> $config_host_h
+echo " */" >> $config_host_h
+
+echo "# Automatically generated by configure - do not modify" > $config_host_mak
+printf "# Configured with:" >> $config_host_mak
+printf " '%s'" "$0" "$@" >> $config_host_mak
+echo >> $config_host_mak
+
+do_cxx() {
+    # Run the compiler, capturing its output to the log.
+    echo $cxx "$@" >> config.log
+    $cxx "$@" >> config.log 2>&1 || return $?
+    return 0
+}
+
+do_cc() {
+    # Run the compiler, capturing its output to the log.
+    echo $cc "$@" >> config.log
+    $cc "$@" >> config.log 2>&1 || return $?
+    # Test passed. If this is an --enable-werror build, rerun
+    # the test with -Werror and bail out if it fails. This
+    # makes warning-generating-errors in configure test code
+    # obvious to developers.
+    if test "$werror" != "yes"; then
+        return 0
+    fi
+    # Don't bother rerunning the compile if we were already using -Werror
+    case "$*" in
+        *-Werror*)
+           return 0
+        ;;
+    esac
+    echo $cc -Werror "$@" >> config.log
+    $cc -Werror "$@" >> config.log 2>&1 && return $?
+    echo "ERROR: configure test passed without -Werror but failed with -Werror."
+    echo "This is probably a bug in the configure script. The failing command"
+    echo "will be at the bottom of config.log."
+    fatal "You can run configure with --disable-werror to bypass this check."
+}
+
+compile_prog() {
+  local_cflags="$1"
+  local_ldflags="$2 $LIBS"
+  echo "Compiling test case $3" >> config.log
+  do_cc $CFLAGS $local_cflags -o $TMPE $TMPC $LDFLAGS $local_ldflags
+}
+
+compile_prog_cxx() {
+  local_cflags="$1"
+  local_ldflags="$2 $LIBS"
+  echo "Compiling test case $3" >> config.log
+  do_cxx $CFLAGS $local_cflags -o $TMPE $TMPC $LDFLAGS $local_ldflags
+}
+
+has() {
+  type "$1" >/dev/null 2>&1
+}
+
+output_mak() {
+  echo "$1=$2" >> $config_host_mak
+}
+
+output_sym() {
+  output_mak "$1" "y"
+  echo "#define $1" >> $config_host_h
+}
+
+print_and_output_mak() {
+  print_config "$1" "$2"
+  output_mak "$1" "$2"
+}
+print_and_output_mak "prefix" "$prefix"
+print_and_output_mak "includedir" "$includedir"
+print_and_output_mak "libdir" "$libdir"
+print_and_output_mak "libdevdir" "$libdevdir"
+print_and_output_mak "relativelibdir" "$relativelibdir"
+print_and_output_mak "mandir" "$mandir"
+print_and_output_mak "datadir" "$datadir"
+
+##########################################
+# check for __kernel_rwf_t
+__kernel_rwf_t="no"
+cat > $TMPC << EOF
+#include <linux/fs.h>
+int main(int argc, char **argv)
+{
+  __kernel_rwf_t x;
+  x = 0;
+  return x;
+}
+EOF
+if compile_prog "" "" "__kernel_rwf_t"; then
+  __kernel_rwf_t="yes"
+fi
+print_config "__kernel_rwf_t" "$__kernel_rwf_t"
+
+##########################################
+# check for __kernel_timespec
+__kernel_timespec="no"
+cat > $TMPC << EOF
+#include <linux/time.h>
+#include <linux/time_types.h>
+int main(int argc, char **argv)
+{
+  struct __kernel_timespec ts;
+  ts.tv_sec = 0;
+  ts.tv_nsec = 1;
+  return 0;
+}
+EOF
+if compile_prog "" "" "__kernel_timespec"; then
+  __kernel_timespec="yes"
+fi
+print_config "__kernel_timespec" "$__kernel_timespec"
+
+##########################################
+# check for open_how
+open_how="no"
+cat > $TMPC << EOF
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <string.h>
+int main(int argc, char **argv)
+{
+  struct open_how how;
+  how.flags = 0;
+  how.mode = 0;
+  how.resolve = 0;
+  return 0;
+}
+EOF
+if compile_prog "" "" "open_how"; then
+  open_how="yes"
+fi
+print_config "open_how" "$open_how"
+
+##########################################
+# check for statx
+statx="no"
+cat > $TMPC << EOF
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <string.h>
+#include <linux/stat.h>
+int main(int argc, char **argv)
+{
+  struct statx x;
+
+  return memset(&x, 0, sizeof(x)) != NULL;
+}
+EOF
+if compile_prog "" "" "statx"; then
+  statx="yes"
+fi
+print_config "statx" "$statx"
+
+##########################################
+# check for C++
+has_cxx="no"
+cat > $TMPC << EOF
+#include <iostream>
+int main(int argc, char **argv)
+{
+  std::cout << "Test";
+  return 0;
+}
+EOF
+if compile_prog_cxx "" "" "C++"; then
+  has_cxx="yes"
+fi
+print_config "C++" "$has_cxx"
+
+##########################################
+# check for ucontext support
+has_ucontext="no"
+cat > $TMPC << EOF
+#include <ucontext.h>
+int main(int argc, char **argv)
+{
+  ucontext_t ctx;
+  getcontext(&ctx);
+  return 0;
+}
+EOF
+if compile_prog "" "" "has_ucontext"; then
+  has_ucontext="yes"
+fi
+print_config "has_ucontext" "$has_ucontext"
+
+
+#############################################################################
+
+if test "$__kernel_rwf_t" = "yes"; then
+  output_sym "CONFIG_HAVE_KERNEL_RWF_T"
+fi
+if test "$__kernel_timespec" = "yes"; then
+  output_sym "CONFIG_HAVE_KERNEL_TIMESPEC"
+fi
+if test "$open_how" = "yes"; then
+  output_sym "CONFIG_HAVE_OPEN_HOW"
+fi
+if test "$statx" = "yes"; then
+  output_sym "CONFIG_HAVE_STATX"
+fi
+if test "$has_cxx" = "yes"; then
+  output_sym "CONFIG_HAVE_CXX"
+fi
+if test "$has_ucontext" = "yes"; then
+  output_sym "CONFIG_HAVE_UCONTEXT"
+fi
+
+echo "CC=$cc" >> $config_host_mak
+print_config "CC" "$cc"
+echo "CXX=$cxx" >> $config_host_mak
+print_config "CXX" "$cxx"
+
+# generate compat.h
+compat_h="src/include/liburing/compat.h"
+cat > $compat_h << EOF
+/* SPDX-License-Identifier: MIT */
+#ifndef LIBURING_COMPAT_H
+#define LIBURING_COMPAT_H
+
+EOF
+
+if test "$__kernel_rwf_t" != "yes"; then
+cat >> $compat_h << EOF
+typedef int __kernel_rwf_t;
+
+EOF
+fi
+if test "$__kernel_timespec" != "yes"; then
+cat >> $compat_h << EOF
+#include <stdint.h>
+
+struct __kernel_timespec {
+	int64_t		tv_sec;
+	long long	tv_nsec;
+};
+
+EOF
+else
+cat >> $compat_h << EOF
+#include <linux/time_types.h>
+
+EOF
+fi
+if test "$open_how" != "yes"; then
+cat >> $compat_h << EOF
+#include <inttypes.h>
+
+struct open_how {
+	uint64_t	flags;
+	uint64_t	mode;
+	uint64_t	resolve;
+};
+
+EOF
+fi
+
+cat >> $compat_h << EOF
+#endif
+EOF
diff --git a/debian/README.Debian b/debian/README.Debian
new file mode 100644
index 0000000..15b9fd0
--- /dev/null
+++ b/debian/README.Debian
@@ -0,0 +1,7 @@
+liburing for Debian
+
+The newest Linux IO interface i.e. io_uring, need
+userspace library to support it. This package
+liburing is the library for io_uring.
+
+ -- Liu Changcheng <changcheng.liu@aliyun.com>  Thu, 14 Nov 2019 21:35:39 +0800
diff --git a/debian/changelog b/debian/changelog
new file mode 100644
index 0000000..f0032e3
--- /dev/null
+++ b/debian/changelog
@@ -0,0 +1,27 @@
+liburing (0.7-1) stable; urgency=low
+
+  * Update to 0.7
+  * Fix library symlinks
+
+ -- Stefan Metzmacher <metze@samba.org>  Thu, 23 Jul 2020 00:23:00 +0200
+
+liburing (0.4-2) stable; urgency=low
+
+  * Fix /usr/lib/*/liburing.so symlink to /lib/*/liburing.so.1.0.4
+
+ -- Stefan Metzmacher <metze@samba.org>  Fri, 07 Feb 2020 15:30:00 +0100
+
+liburing (0.4-1) stable; urgency=low
+
+  * Package liburing-0.4 using a packaging layout similar to libaio1
+
+ -- Stefan Metzmacher <metze@samba.org>  Thu, 06 Feb 2020 11:30:00 +0100
+
+liburing (0.2-1ubuntu1) stable; urgency=low
+
+  * Initial release.
+  * commit 4bce856d43ab1f9a64477aa5a8f9f02f53e64b74
+  *	 Author: Jens Axboe <axboe@kernel.dk>
+  *	 Date:   Mon Nov 11 16:00:58 2019 -0700
+
+ -- Liu Changcheng <changcheng.liu@aliyun.com>  Fri, 15 Nov 2019 00:06:46 +0800
diff --git a/debian/compat b/debian/compat
new file mode 100644
index 0000000..ec63514
--- /dev/null
+++ b/debian/compat
@@ -0,0 +1 @@
+9
diff --git a/debian/control b/debian/control
new file mode 100644
index 0000000..831a314
--- /dev/null
+++ b/debian/control
@@ -0,0 +1,48 @@
+Source: liburing
+Section: libs
+Priority: optional
+Maintainer: Liu Changcheng <changcheng.liu@intel.com>
+Build-Depends: debhelper (>=9)
+Standards-Version: 4.1.4
+Homepage: https://git.kernel.dk/cgit/liburing/tree/README
+Vcs-Git: https://git.kernel.dk/liburing
+Vcs-Browser: https://git.kernel.dk/cgit/liburing/
+
+Package: liburing1
+Architecture: linux-any
+Multi-Arch: same
+Pre-Depends: ${misc:Pre-Depends}
+Depends: ${misc:Depends}, ${shlibs:Depends}
+Description: userspace library for using io_uring
+ io_uring is kernel feature to improve development
+ The newese Linux IO interface, io_uring could improve
+ system performance a lot. liburing is the userpace
+ library to use io_uring feature.
+ .
+ This package contains the shared library.
+
+Package: liburing1-udeb
+Package-Type: udeb
+Section: debian-installer
+Architecture: linux-any
+Depends: ${misc:Depends}, ${shlibs:Depends},
+Description: userspace library for using io_uring
+ io_uring is kernel feature to improve development
+ The newese Linux IO interface, io_uring could improve
+ system performance a lot. liburing is the userpace
+ library to use io_uring feature.
+ .
+ This package contains the udeb shared library.
+
+Package: liburing-dev
+Section: libdevel
+Architecture: linux-any
+Multi-Arch: same
+Depends: ${misc:Depends}, liburing1 (= ${binary:Version}),
+Description: userspace library for using io_uring
+ io_uring is kernel feature to improve development
+ The newese Linux IO interface, io_uring could improve
+ system performance a lot. liburing is the userpace
+ library to use io_uring feature.
+ .
+ This package contains the static library and the header files.
diff --git a/debian/copyright b/debian/copyright
new file mode 100644
index 0000000..0b3f3eb
--- /dev/null
+++ b/debian/copyright
@@ -0,0 +1,49 @@
+Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Upstream-Name: liburing
+Source: https://git.kernel.dk/cgit/liburing/
+
+Files: *
+Copyright: 2019 Jens Axboe <axboe@kernel.dk>
+License: GPL-2+ / MIT
+
+Files: debian/*
+Copyright: 2019 Changcheng Liu <changcheng.liu@aliyun.com>
+License: GPL-2+
+
+License: GPL-2+
+ This package is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+ .
+ This package is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ GNU General Public License for more details.
+ .
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <https://www.gnu.org/licenses/>
+ .
+ On Debian systems, the complete text of the GNU General
+ Public License version 2 can be found in "/usr/share/common-licenses/GPL-2".
+
+License: MIT
+ Copyright 2020 Jens Axboe
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in all
+ copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ SOFTWARE.
diff --git a/debian/liburing-dev.install b/debian/liburing-dev.install
new file mode 100644
index 0000000..a00d956
--- /dev/null
+++ b/debian/liburing-dev.install
@@ -0,0 +1,4 @@
+usr/include
+usr/lib/*/lib*.so
+usr/lib/*/lib*.a
+usr/lib/*/pkgconfig
diff --git a/debian/liburing-dev.manpages b/debian/liburing-dev.manpages
new file mode 100644
index 0000000..fbbee23
--- /dev/null
+++ b/debian/liburing-dev.manpages
@@ -0,0 +1,6 @@
+man/io_uring_setup.2
+man/io_uring_enter.2
+man/io_uring_register.2
+man/io_uring_queue_exit.3
+man/io_uring_queue_init.3
+man/io_uring_get_sqe.3
diff --git a/debian/liburing1-udeb.install b/debian/liburing1-udeb.install
new file mode 100644
index 0000000..622f9ef
--- /dev/null
+++ b/debian/liburing1-udeb.install
@@ -0,0 +1 @@
+lib/*/lib*.so.*
diff --git a/debian/liburing1.install b/debian/liburing1.install
new file mode 100644
index 0000000..622f9ef
--- /dev/null
+++ b/debian/liburing1.install
@@ -0,0 +1 @@
+lib/*/lib*.so.*
diff --git a/debian/liburing1.symbols b/debian/liburing1.symbols
new file mode 100644
index 0000000..29109f2
--- /dev/null
+++ b/debian/liburing1.symbols
@@ -0,0 +1,32 @@
+liburing.so.1 liburing1 #MINVER#
+ (symver)LIBURING_0.1 0.1-1
+ io_uring_get_sqe@LIBURING_0.1 0.1-1
+ io_uring_queue_exit@LIBURING_0.1 0.1-1
+ io_uring_queue_init@LIBURING_0.1 0.1-1
+ io_uring_queue_mmap@LIBURING_0.1 0.1-1
+ io_uring_register_buffers@LIBURING_0.1 0.1-1
+ io_uring_register_eventfd@LIBURING_0.1 0.1-1
+ io_uring_register_eventfd_async@LIBURING_0.6 0.6-1
+ io_uring_register_files@LIBURING_0.1 0.1-1
+ io_uring_submit@LIBURING_0.1 0.1-1
+ io_uring_submit_and_wait@LIBURING_0.1 0.1-1
+ io_uring_unregister_buffers@LIBURING_0.1 0.1-1
+ io_uring_unregister_files@LIBURING_0.1 0.1-1
+ (symver)LIBURING_0.2 0.2-1
+ __io_uring_get_cqe@LIBURING_0.2 0.2-1
+ io_uring_queue_init_params@LIBURING_0.2 0.2-1
+ io_uring_register_files_update@LIBURING_0.2 0.2-1
+ io_uring_peek_batch_cqe@LIBURING_0.2 0.2-1
+ io_uring_wait_cqe_timeout@LIBURING_0.2 0.2-1
+ io_uring_wait_cqes@LIBURING_0.2 0.2-1
+ (symver)LIBURING_0.3 0.3-1
+ (symver)LIBURING_0.4 0.4-1
+ (symver)LIBURING_0.5 0.5-1
+ (symver)LIBURING_0.6 0.6-1
+ (symver)LIBURING_0.7 0.7-1
+ io_uring_get_probe@LIBURING_0.4 0.4-1
+ io_uring_get_probe_ring@LIBURING_0.4 0.4-1
+ io_uring_register_personality@LIBURING_0.4 0.4-1
+ io_uring_register_probe@LIBURING_0.4 0.4-1
+ io_uring_ring_dontfork@LIBURING_0.4 0.4-1
+ io_uring_unregister_personality@LIBURING_0.4 0.4-1
diff --git a/debian/patches/series b/debian/patches/series
new file mode 100644
index 0000000..4a97dfa
--- /dev/null
+++ b/debian/patches/series
@@ -0,0 +1 @@
+# You must remove unused comment lines for the released package.
diff --git a/debian/rules b/debian/rules
new file mode 100755
index 0000000..1a334b3
--- /dev/null
+++ b/debian/rules
@@ -0,0 +1,81 @@
+#!/usr/bin/make -f
+
+# Uncomment this to turn on verbose mode.
+#export DH_VERBOSE=1
+
+DEB_BUILD_MAINT_OPTIONS = hardening=+bindnow
+DEB_CFLAGS_MAINT_PREPEND = -Wall
+
+include /usr/share/dpkg/default.mk
+include /usr/share/dpkg/buildtools.mk
+
+export CC
+
+lib := liburing1
+libdbg := $(lib)-dbg
+libudeb := $(lib)-udeb
+libdev := liburing-dev
+
+build-indep:
+
+build-arch:
+	dh_testdir
+
+	$(MAKE) CPPFLAGS="$(CPPFLAGS)" CFLAGS="$(CFLAGS)" LDFLAGS="$(LDFLAGS)"
+
+build: build-indep build-arch
+
+clean:
+	dh_testdir
+	dh_testroot
+
+	$(MAKE) clean
+
+	dh_clean
+
+check-arch: build-arch
+	dh_testdir
+
+ifeq (,$(filter nocheck,$(DEB_BUILD_OPTIONS)))
+	$(MAKE) CPPFLAGS="$(CPPFLAGS)" CFLAGS="$(CFLAGS)" LDFLAGS="$(LDFLAGS)" \
+	        partcheck
+endif
+
+install-arch: check-arch
+	dh_testdir
+	dh_testroot
+	dh_clean
+	dh_installdirs
+
+	$(MAKE) install \
+	  DESTDIR=$(CURDIR)/debian/tmp \
+	  libdir=/lib/$(DEB_HOST_MULTIARCH) \
+	  libdevdir=/usr/lib/$(DEB_HOST_MULTIARCH) \
+	  relativelibdir=/lib/$(DEB_HOST_MULTIARCH)/
+
+binary: binary-indep binary-arch
+
+binary-indep:
+	# Nothing to do.
+
+binary-arch: install-arch
+	dh_testdir
+	dh_testroot
+	dh_install -a
+	dh_installdocs -a
+	dh_installexamples -a
+	dh_installman -a
+	dh_lintian -a
+	dh_link -a
+	dh_strip -a --ddeb-migration='$(libdbg) (<< 0.3)'
+	dh_compress -a
+	dh_fixperms -a
+	dh_makeshlibs -a --add-udeb '$(libudeb)'
+	dh_shlibdeps -a
+	dh_installdeb -a
+	dh_gencontrol -a
+	dh_md5sums -a
+	dh_builddeb -a
+
+.PHONY: clean build-indep build-arch build
+.PHONY: install-arch binary-indep binary-arch binary
diff --git a/debian/source/format b/debian/source/format
new file mode 100644
index 0000000..163aaf8
--- /dev/null
+++ b/debian/source/format
@@ -0,0 +1 @@
+3.0 (quilt)
diff --git a/debian/source/local-options b/debian/source/local-options
new file mode 100644
index 0000000..00131ee
--- /dev/null
+++ b/debian/source/local-options
@@ -0,0 +1,2 @@
+#abort-on-upstream-changes
+#unapply-patches
diff --git a/debian/source/options b/debian/source/options
new file mode 100644
index 0000000..51da836
--- /dev/null
+++ b/debian/source/options
@@ -0,0 +1 @@
+extend-diff-ignore = "(^|/)(config\.log|config-host\.h|config-host\.mak|liburing\.pc)$"
diff --git a/debian/watch b/debian/watch
new file mode 100644
index 0000000..f0e30c4
--- /dev/null
+++ b/debian/watch
@@ -0,0 +1,3 @@
+# Site          Directory               Pattern                 Version Script
+version=4
+https://git.kernel.dk/cgit/liburing/ snapshot\/liburing-([\d\.]+)\.tar\.(?:gz|xz) debian uupdate
diff --git a/examples/Makefile b/examples/Makefile
new file mode 100644
index 0000000..60c1b71
--- /dev/null
+++ b/examples/Makefile
@@ -0,0 +1,29 @@
+CPPFLAGS ?=
+override CPPFLAGS += -D_GNU_SOURCE -I../src/include/
+CFLAGS ?= -g -O2
+XCFLAGS =
+override CFLAGS += -Wall -L../src/
+
+include ../Makefile.quiet
+
+ifneq ($(MAKECMDGOALS),clean)
+include ../config-host.mak
+endif
+
+all_targets += io_uring-test io_uring-cp link-cp
+
+ifdef CONFIG_HAVE_UCONTEXT
+all_targets += ucontext-cp
+endif
+
+all: $(all_targets)
+
+test_srcs := io_uring-test.c io_uring-cp.c link-cp.c
+
+test_objs := $(patsubst %.c,%.ol,$(test_srcs))
+
+%: %.c
+	$(QUIET_CC)$(CC) $(CPPFLAGS) $(CFLAGS) -o $@ $< -luring $(XCFLAGS)
+
+clean:
+	@rm -f $(all_targets) $(test_objs)
diff --git a/examples/io_uring-cp.c b/examples/io_uring-cp.c
new file mode 100644
index 0000000..2a44c30
--- /dev/null
+++ b/examples/io_uring-cp.c
@@ -0,0 +1,279 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * gcc -Wall -O2 -D_GNU_SOURCE -o io_uring-cp io_uring-cp.c -luring
+ */
+#include <stdio.h>
+#include <fcntl.h>
+#include <string.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <assert.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include "liburing.h"
+
+#define QD	64
+#define BS	(32*1024)
+
+static int infd, outfd;
+
+struct io_data {
+	int read;
+	off_t first_offset, offset;
+	size_t first_len;
+	struct iovec iov;
+};
+
+static int setup_context(unsigned entries, struct io_uring *ring)
+{
+	int ret;
+
+	ret = io_uring_queue_init(entries, ring, 0);
+	if (ret < 0) {
+		fprintf(stderr, "queue_init: %s\n", strerror(-ret));
+		return -1;
+	}
+
+	return 0;
+}
+
+static int get_file_size(int fd, off_t *size)
+{
+	struct stat st;
+
+	if (fstat(fd, &st) < 0)
+		return -1;
+	if (S_ISREG(st.st_mode)) {
+		*size = st.st_size;
+		return 0;
+	} else if (S_ISBLK(st.st_mode)) {
+		unsigned long long bytes;
+
+		if (ioctl(fd, BLKGETSIZE64, &bytes) != 0)
+			return -1;
+
+		*size = bytes;
+		return 0;
+	}
+
+	return -1;
+}
+
+static void queue_prepped(struct io_uring *ring, struct io_data *data)
+{
+	struct io_uring_sqe *sqe;
+
+	sqe = io_uring_get_sqe(ring);
+	assert(sqe);
+
+	if (data->read)
+		io_uring_prep_readv(sqe, infd, &data->iov, 1, data->offset);
+	else
+		io_uring_prep_writev(sqe, outfd, &data->iov, 1, data->offset);
+
+	io_uring_sqe_set_data(sqe, data);
+}
+
+static int queue_read(struct io_uring *ring, off_t size, off_t offset)
+{
+	struct io_uring_sqe *sqe;
+	struct io_data *data;
+
+	data = malloc(size + sizeof(*data));
+	if (!data)
+		return 1;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		free(data);
+		return 1;
+	}
+
+	data->read = 1;
+	data->offset = data->first_offset = offset;
+
+	data->iov.iov_base = data + 1;
+	data->iov.iov_len = size;
+	data->first_len = size;
+
+	io_uring_prep_readv(sqe, infd, &data->iov, 1, offset);
+	io_uring_sqe_set_data(sqe, data);
+	return 0;
+}
+
+static void queue_write(struct io_uring *ring, struct io_data *data)
+{
+	data->read = 0;
+	data->offset = data->first_offset;
+
+	data->iov.iov_base = data + 1;
+	data->iov.iov_len = data->first_len;
+
+	queue_prepped(ring, data);
+	io_uring_submit(ring);
+}
+
+static int copy_file(struct io_uring *ring, off_t insize)
+{
+	unsigned long reads, writes;
+	struct io_uring_cqe *cqe;
+	off_t write_left, offset;
+	int ret;
+
+	write_left = insize;
+	writes = reads = offset = 0;
+
+	while (insize || write_left) {
+		int had_reads, got_comp;
+	
+		/*
+		 * Queue up as many reads as we can
+		 */
+		had_reads = reads;
+		while (insize) {
+			off_t this_size = insize;
+
+			if (reads + writes >= QD)
+				break;
+			if (this_size > BS)
+				this_size = BS;
+			else if (!this_size)
+				break;
+
+			if (queue_read(ring, this_size, offset))
+				break;
+
+			insize -= this_size;
+			offset += this_size;
+			reads++;
+		}
+
+		if (had_reads != reads) {
+			ret = io_uring_submit(ring);
+			if (ret < 0) {
+				fprintf(stderr, "io_uring_submit: %s\n", strerror(-ret));
+				break;
+			}
+		}
+
+		/*
+		 * Queue is full at this point. Find at least one completion.
+		 */
+		got_comp = 0;
+		while (write_left) {
+			struct io_data *data;
+
+			if (!got_comp) {
+				ret = io_uring_wait_cqe(ring, &cqe);
+				got_comp = 1;
+			} else {
+				ret = io_uring_peek_cqe(ring, &cqe);
+				if (ret == -EAGAIN) {
+					cqe = NULL;
+					ret = 0;
+				}
+			}
+			if (ret < 0) {
+				fprintf(stderr, "io_uring_peek_cqe: %s\n",
+							strerror(-ret));
+				return 1;
+			}
+			if (!cqe)
+				break;
+
+			data = io_uring_cqe_get_data(cqe);
+			if (cqe->res < 0) {
+				if (cqe->res == -EAGAIN) {
+					queue_prepped(ring, data);
+					io_uring_cqe_seen(ring, cqe);
+					continue;
+				}
+				fprintf(stderr, "cqe failed: %s\n",
+						strerror(-cqe->res));
+				return 1;
+			} else if (cqe->res != data->iov.iov_len) {
+				/* Short read/write, adjust and requeue */
+				data->iov.iov_base += cqe->res;
+				data->iov.iov_len -= cqe->res;
+				data->offset += cqe->res;
+				queue_prepped(ring, data);
+				io_uring_cqe_seen(ring, cqe);
+				continue;
+			}
+
+			/*
+			 * All done. if write, nothing else to do. if read,
+			 * queue up corresponding write.
+			 */
+			if (data->read) {
+				queue_write(ring, data);
+				write_left -= data->first_len;
+				reads--;
+				writes++;
+			} else {
+				free(data);
+				writes--;
+			}
+			io_uring_cqe_seen(ring, cqe);
+		}
+	}
+
+	/* wait out pending writes */
+	while (writes) {
+		struct io_data *data;
+
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret) {
+			fprintf(stderr, "wait_cqe=%d\n", ret);
+			return 1;
+		}
+		if (cqe->res < 0) {
+			fprintf(stderr, "write res=%d\n", cqe->res);
+			return 1;
+		}
+		data = io_uring_cqe_get_data(cqe);
+		free(data);
+		writes--;
+		io_uring_cqe_seen(ring, cqe);
+	}
+
+	return 0;
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring;
+	off_t insize;
+	int ret;
+
+	if (argc < 3) {
+		printf("%s: infile outfile\n", argv[0]);
+		return 1;
+	}
+
+	infd = open(argv[1], O_RDONLY);
+	if (infd < 0) {
+		perror("open infile");
+		return 1;
+	}
+	outfd = open(argv[2], O_WRONLY | O_CREAT | O_TRUNC, 0644);
+	if (outfd < 0) {
+		perror("open outfile");
+		return 1;
+	}
+
+	if (setup_context(QD, &ring))
+		return 1;
+	if (get_file_size(infd, &insize))
+		return 1;
+
+	ret = copy_file(&ring, insize);
+
+	close(infd);
+	close(outfd);
+	io_uring_queue_exit(&ring);
+	return ret;
+}
diff --git a/examples/io_uring-test.c b/examples/io_uring-test.c
new file mode 100644
index 0000000..1a68536
--- /dev/null
+++ b/examples/io_uring-test.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Simple app that demonstrates how to setup an io_uring interface,
+ * submit and complete IO against it, and then tear it down.
+ *
+ * gcc -Wall -O2 -D_GNU_SOURCE -o io_uring-test io_uring-test.c -luring
+ */
+#include <stdio.h>
+#include <fcntl.h>
+#include <string.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include "liburing.h"
+
+#define QD	4
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring;
+	int i, fd, ret, pending, done;
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	struct iovec *iovecs;
+	struct stat sb;
+	ssize_t fsize;
+	off_t offset;
+	void *buf;
+
+	if (argc < 2) {
+		printf("%s: file\n", argv[0]);
+		return 1;
+	}
+
+	ret = io_uring_queue_init(QD, &ring, 0);
+	if (ret < 0) {
+		fprintf(stderr, "queue_init: %s\n", strerror(-ret));
+		return 1;
+	}
+
+	fd = open(argv[1], O_RDONLY | O_DIRECT);
+	if (fd < 0) {
+		perror("open");
+		return 1;
+	}
+
+	if (fstat(fd, &sb) < 0) {
+		perror("fstat");
+		return 1;
+	}
+
+	fsize = 0;
+	iovecs = calloc(QD, sizeof(struct iovec));
+	for (i = 0; i < QD; i++) {
+		if (posix_memalign(&buf, 4096, 4096))
+			return 1;
+		iovecs[i].iov_base = buf;
+		iovecs[i].iov_len = 4096;
+		fsize += 4096;
+	}
+
+	offset = 0;
+	i = 0;
+	do {
+		sqe = io_uring_get_sqe(&ring);
+		if (!sqe)
+			break;
+		io_uring_prep_readv(sqe, fd, &iovecs[i], 1, offset);
+		offset += iovecs[i].iov_len;
+		i++;
+		if (offset > sb.st_size)
+			break;
+	} while (1);
+
+	ret = io_uring_submit(&ring);
+	if (ret < 0) {
+		fprintf(stderr, "io_uring_submit: %s\n", strerror(-ret));
+		return 1;
+	} else if (ret != i) {
+		fprintf(stderr, "io_uring_submit submitted less %d\n", ret);
+		return 1;
+	}
+
+	done = 0;
+	pending = ret;
+	fsize = 0;
+	for (i = 0; i < pending; i++) {
+		ret = io_uring_wait_cqe(&ring, &cqe);
+		if (ret < 0) {
+			fprintf(stderr, "io_uring_wait_cqe: %s\n", strerror(-ret));
+			return 1;
+		}
+
+		done++;
+		ret = 0;
+		if (cqe->res != 4096 && cqe->res + fsize != sb.st_size) {
+			fprintf(stderr, "ret=%d, wanted 4096\n", cqe->res);
+			ret = 1;
+		}
+		fsize += cqe->res;
+		io_uring_cqe_seen(&ring, cqe);
+		if (ret)
+			break;
+	}
+
+	printf("Submitted=%d, completed=%d, bytes=%lu\n", pending, done,
+						(unsigned long) fsize);
+	close(fd);
+	io_uring_queue_exit(&ring);
+	return 0;
+}
diff --git a/examples/link-cp.c b/examples/link-cp.c
new file mode 100644
index 0000000..e15dfc3
--- /dev/null
+++ b/examples/link-cp.c
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Very basic proof-of-concept for doing a copy with linked SQEs. Needs a
+ * bit of error handling and short read love.
+ */
+#include <stdio.h>
+#include <fcntl.h>
+#include <string.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <assert.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include "liburing.h"
+
+#define QD	64
+#define BS	(32*1024)
+
+struct io_data {
+	size_t offset;
+	int index;
+	struct iovec iov;
+};
+
+static int infd, outfd;
+static unsigned inflight;
+
+static int setup_context(unsigned entries, struct io_uring *ring)
+{
+	int ret;
+
+	ret = io_uring_queue_init(entries, ring, 0);
+	if (ret < 0) {
+		fprintf(stderr, "queue_init: %s\n", strerror(-ret));
+		return -1;
+	}
+
+	return 0;
+}
+
+static int get_file_size(int fd, off_t *size)
+{
+	struct stat st;
+
+	if (fstat(fd, &st) < 0)
+		return -1;
+	if (S_ISREG(st.st_mode)) {
+		*size = st.st_size;
+		return 0;
+	} else if (S_ISBLK(st.st_mode)) {
+		unsigned long long bytes;
+
+		if (ioctl(fd, BLKGETSIZE64, &bytes) != 0)
+			return -1;
+
+		*size = bytes;
+		return 0;
+	}
+
+	return -1;
+}
+
+static void queue_rw_pair(struct io_uring *ring, off_t size, off_t offset)
+{
+	struct io_uring_sqe *sqe;
+	struct io_data *data;
+	void *ptr;
+
+	ptr = malloc(size + sizeof(*data));
+	data = ptr + size;
+	data->index = 0;
+	data->offset = offset;
+	data->iov.iov_base = ptr;
+	data->iov.iov_len = size;
+
+	sqe = io_uring_get_sqe(ring);
+	io_uring_prep_readv(sqe, infd, &data->iov, 1, offset);
+	sqe->flags |= IOSQE_IO_LINK;
+	io_uring_sqe_set_data(sqe, data);
+
+	sqe = io_uring_get_sqe(ring);
+	io_uring_prep_writev(sqe, outfd, &data->iov, 1, offset);
+	io_uring_sqe_set_data(sqe, data);
+}
+
+static int handle_cqe(struct io_uring *ring, struct io_uring_cqe *cqe)
+{
+	struct io_data *data = io_uring_cqe_get_data(cqe);
+	int ret = 0;
+
+	data->index++;
+
+	if (cqe->res < 0) {
+		if (cqe->res == -ECANCELED) {
+			queue_rw_pair(ring, BS, data->offset);
+			inflight += 2;
+		} else {
+			printf("cqe error: %s\n", strerror(-cqe->res));
+			ret = 1;
+		}
+	}
+
+	if (data->index == 2) {
+		void *ptr = (void *) data - data->iov.iov_len;
+
+		free(ptr);
+	}
+	io_uring_cqe_seen(ring, cqe);
+	return ret;
+}
+
+static int copy_file(struct io_uring *ring, off_t insize)
+{
+	struct io_uring_cqe *cqe;
+	size_t this_size;
+	off_t offset;
+
+	offset = 0;
+	while (insize) {
+		int has_inflight = inflight;
+		int depth;
+
+		while (insize && inflight < QD) {
+			this_size = BS;
+			if (this_size > insize)
+				this_size = insize;
+			queue_rw_pair(ring, this_size, offset);
+			offset += this_size;
+			insize -= this_size;
+			inflight += 2;
+		}
+
+		if (has_inflight != inflight)
+			io_uring_submit(ring);
+
+		if (insize)
+			depth = QD;
+		else
+			depth = 1;
+		while (inflight >= depth) {
+			int ret;
+
+			ret = io_uring_wait_cqe(ring, &cqe);
+			if (ret < 0) {
+				printf("wait cqe: %s\n", strerror(-ret));
+				return 1;
+			}
+			if (handle_cqe(ring, cqe))
+				return 1;
+			inflight--;
+		}
+	}
+
+	return 0;
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring;
+	off_t insize;
+	int ret;
+
+	if (argc < 3) {
+		printf("%s: infile outfile\n", argv[0]);
+		return 1;
+	}
+
+	infd = open(argv[1], O_RDONLY);
+	if (infd < 0) {
+		perror("open infile");
+		return 1;
+	}
+	outfd = open(argv[2], O_WRONLY | O_CREAT | O_TRUNC, 0644);
+	if (outfd < 0) {
+		perror("open outfile");
+		return 1;
+	}
+
+	if (setup_context(QD, &ring))
+		return 1;
+	if (get_file_size(infd, &insize))
+		return 1;
+
+	ret = copy_file(&ring, insize);
+
+	close(infd);
+	close(outfd);
+	io_uring_queue_exit(&ring);
+	return ret;
+}
diff --git a/examples/ucontext-cp.c b/examples/ucontext-cp.c
new file mode 100644
index 0000000..ea0c934
--- /dev/null
+++ b/examples/ucontext-cp.c
@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * gcc -Wall -O2 -D_GNU_SOURCE -o ucontext-cp ucontext-cp.c -luring
+ */
+#define _POSIX_C_SOURCE 199309L
+#include <stdio.h>
+#include <fcntl.h>
+#include <string.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <assert.h>
+#include <errno.h>
+#include <ucontext.h>
+#include <signal.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include <sys/timerfd.h>
+#include <sys/poll.h>
+#include "liburing.h"
+
+#define QD	64
+#define BS	1024
+
+#ifndef SIGSTKSZ
+#define SIGSTKSZ 8192
+#endif
+
+typedef struct {
+	struct io_uring *ring;
+	unsigned char *stack_buf;
+	ucontext_t ctx_main, ctx_fnew;
+} async_context;
+
+typedef struct {
+	async_context *pctx;
+	int *psuccess;
+	int *pfailure;
+	int infd;
+	int outfd;
+} arguments_bundle;
+
+#define DEFINE_AWAIT_OP(operation) 					\
+static ssize_t await_##operation(					\
+	async_context *pctx,						\
+	int fd,								\
+	const struct iovec *ioves,					\
+	unsigned int nr_vecs,						\
+	off_t offset)							\
+{									\
+	struct io_uring_sqe *sqe = io_uring_get_sqe(pctx->ring);	\
+	struct io_uring_cqe *cqe;					\
+									\
+	if (!sqe)							\
+		return -1;						\
+									\
+	io_uring_prep_##operation(sqe, fd, ioves, nr_vecs, offset);	\
+	io_uring_sqe_set_data(sqe, pctx);				\
+	swapcontext(&pctx->ctx_fnew, &pctx->ctx_main);			\
+	io_uring_peek_cqe(pctx->ring, &cqe);				\
+	assert(cqe);							\
+	io_uring_cqe_seen(pctx->ring, cqe);				\
+									\
+	return cqe->res;						\
+}
+
+DEFINE_AWAIT_OP(readv)
+DEFINE_AWAIT_OP(writev)
+#undef DEFINE_AWAIT_OP
+
+int await_poll(async_context *pctx, int fd, short poll_mask) {
+	struct io_uring_sqe *sqe = io_uring_get_sqe(pctx->ring);
+	struct io_uring_cqe *cqe;
+	if (!sqe)
+		return -1;
+
+	io_uring_prep_poll_add(sqe, fd, poll_mask);
+	io_uring_sqe_set_data(sqe, pctx);
+	swapcontext(&pctx->ctx_fnew, &pctx->ctx_main);
+	io_uring_peek_cqe(pctx->ring, &cqe);
+	assert(cqe);
+	io_uring_cqe_seen(pctx->ring, cqe);
+
+	return cqe->res;
+}
+
+int await_delay(async_context *pctx, time_t seconds) {
+	struct io_uring_sqe *sqe = io_uring_get_sqe(pctx->ring);
+	struct io_uring_cqe *cqe;
+	struct __kernel_timespec ts = {
+		.tv_sec = seconds,
+		.tv_nsec = 0
+	};
+
+	if (!sqe)
+		return -1;
+
+	io_uring_prep_timeout(sqe, &ts, 0, 0);
+	io_uring_sqe_set_data(sqe, pctx);
+	swapcontext(&pctx->ctx_fnew, &pctx->ctx_main);
+	io_uring_peek_cqe(pctx->ring, &cqe);
+	assert(cqe);
+	io_uring_cqe_seen(pctx->ring, cqe);
+
+	return 0;
+}
+
+static int setup_context(async_context *pctx, struct io_uring *ring)
+{
+	int ret;
+
+	pctx->ring = ring;
+	ret = getcontext(&pctx->ctx_fnew);
+	if (ret < 0) {
+		perror("getcontext");
+		return -1;
+	}
+	pctx->stack_buf = malloc(SIGSTKSZ);
+	if (!pctx->stack_buf) {
+		perror("malloc");
+		return -1;
+	}
+	pctx->ctx_fnew.uc_stack.ss_sp = pctx->stack_buf;
+	pctx->ctx_fnew.uc_stack.ss_size = SIGSTKSZ;
+	pctx->ctx_fnew.uc_link = &pctx->ctx_main;
+
+	return 0;
+}
+
+static int copy_file(async_context *pctx, int infd, int outfd, struct iovec* piov)
+{
+	off_t offset = 0;
+
+	for (;;) {
+		ssize_t bytes_read;
+
+		printf("%d->%d: readv %ld bytes from %ld\n", infd, outfd, (long) piov->iov_len, (long) offset);
+		if ((bytes_read = await_readv(pctx, infd, piov, 1, offset)) < 0) {
+			perror("await_readv");
+			return 1;
+		}
+		if (bytes_read == 0)
+			return 0;
+
+		piov->iov_len = bytes_read;
+
+		printf("%d->%d: writev %ld bytes from %ld\n", infd, outfd, (long) piov->iov_len, (long) offset);
+		if (await_writev(pctx, outfd, piov, 1, offset) != bytes_read) {
+			perror("await_writev");
+			return 1;
+		}
+		if (bytes_read < BS)
+			return 0;
+		offset += bytes_read;
+
+		printf("%d->%d: wait %ds\n", infd, outfd, 1);
+		await_delay(pctx, 1);
+	}
+}
+
+static void copy_file_wrapper(arguments_bundle *pbundle)
+{
+	struct iovec iov = {
+		.iov_base = malloc(BS),
+		.iov_len = BS,
+	};
+	async_context *pctx = pbundle->pctx;
+
+	int ret = copy_file(pctx, pbundle->infd, pbundle->outfd, &iov);
+
+	printf("%d->%d: done with ret code %d\n", pbundle->infd, pbundle->outfd, ret);
+
+	if (ret == 0) {
+		++*pbundle->psuccess;
+	} else {
+		++*pbundle->pfailure;
+	}
+
+	free(iov.iov_base);
+	close(pbundle->infd);
+	close(pbundle->outfd);
+	free(pbundle->pctx->stack_buf);
+	free(pbundle->pctx);
+	free(pbundle);
+
+	swapcontext(&pctx->ctx_fnew, &pctx->ctx_main);
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring;
+	int i, req_count, ret;
+	int success = 0, failure = 0;
+
+	if (argc < 3) {
+		fprintf(stderr, "%s: infile1 outfile1 [infile2 outfile2 [...]]\n", argv[0]);
+		return 1;
+	}
+
+	ret = io_uring_queue_init(QD, &ring, 0);
+	if (ret < 0) {
+		fprintf(stderr, "queue_init: %s\n", strerror(-ret));
+		return -1;
+	}
+
+	req_count = (argc - 1) / 2;
+	printf("copying %d files...\n", req_count);
+
+	for (i = 1; i < argc; i += 2) {
+		int infd, outfd;
+
+		async_context *pctx = malloc(sizeof(*pctx));
+
+		if (!pctx || setup_context(pctx, &ring))
+			return 1;
+
+		infd = open(argv[i], O_RDONLY);
+		if (infd < 0) {
+			perror("open infile");
+			return 1;
+		}
+		outfd = open(argv[i + 1], O_WRONLY | O_CREAT | O_TRUNC, 0644);
+		if (outfd < 0) {
+			perror("open outfile");
+			return 1;
+		}
+
+		arguments_bundle *pbundle = malloc(sizeof(*pbundle));
+		pbundle->pctx = pctx;
+		pbundle->psuccess = &success;
+		pbundle->pfailure = &failure;
+		pbundle->infd = infd;
+		pbundle->outfd = outfd;
+
+		makecontext(&pctx->ctx_fnew, (void (*)(void)) copy_file_wrapper, 1, pbundle);
+
+		if (swapcontext(&pctx->ctx_main, &pctx->ctx_fnew)) {
+			perror("swapcontext");
+			return 1;
+		}
+	}
+
+	/* event loop */
+	while (success + failure < req_count) {
+		struct io_uring_cqe *cqe;
+
+		/* usually be timed waiting */
+		ret = io_uring_submit_and_wait(&ring, 1);
+		if (ret < 0) {
+			fprintf(stderr, "submit_and_wait: %s\n", strerror(-ret));
+			return 1;
+		}
+
+		ret = io_uring_wait_cqe(&ring, &cqe);
+		if (ret < 0) {
+			fprintf(stderr, "wait_cqe: %s\n", strerror(-ret));
+			return 1;
+		}
+
+		async_context *pctx = io_uring_cqe_get_data(cqe);
+
+		if (swapcontext(&pctx->ctx_main, &pctx->ctx_fnew)) {
+			perror("swapcontext");
+			return 1;
+		}
+	}
+
+	io_uring_queue_exit(&ring);
+
+	printf("finished with %d success(es) and %d failure(s)\n", success, failure);
+
+	return failure > 0;
+}
diff --git a/liburing.pc.in b/liburing.pc.in
new file mode 100644
index 0000000..e621939
--- /dev/null
+++ b/liburing.pc.in
@@ -0,0 +1,12 @@
+prefix=@prefix@
+exec_prefix=${prefix}
+libdir=@libdir@
+includedir=@includedir@
+
+Name: @NAME@
+Version: @VERSION@
+Description: io_uring library
+URL: http://git.kernel.dk/cgit/liburing/
+
+Libs: -L${libdir} -luring
+Cflags: -I${includedir}
diff --git a/liburing.spec b/liburing.spec
new file mode 100644
index 0000000..0268d23
--- /dev/null
+++ b/liburing.spec
@@ -0,0 +1,66 @@
+Name: liburing
+Version: 2.0
+Release: 1%{?dist}
+Summary: Linux-native io_uring I/O access library
+License: (GPLv2 with exceptions and LGPLv2+) or MIT
+Source0: https://brick.kernel.dk/snaps/%{name}-%{version}.tar.gz
+Source1: https://brick.kernel.dk/snaps/%{name}-%{version}.tar.gz.asc
+URL: https://git.kernel.dk/cgit/liburing/
+BuildRequires: gcc
+BuildRequires: make
+
+%description
+Provides native async IO for the Linux kernel, in a fast and efficient
+manner, for both buffered and O_DIRECT.
+
+%package devel
+Summary: Development files for Linux-native io_uring I/O access library
+Requires: %{name}%{_isa} = %{version}-%{release}
+Requires: pkgconfig
+
+%description devel
+This package provides header files to include and libraries to link with
+for the Linux-native io_uring.
+
+%prep
+%autosetup
+
+%build
+%set_build_flags
+./configure --prefix=%{_prefix} --libdir=/%{_libdir} --libdevdir=/%{_libdir} --mandir=%{_mandir} --includedir=%{_includedir}
+
+%make_build
+
+%install
+%make_install
+
+%files
+%attr(0755,root,root) %{_libdir}/liburing.so.*
+%license COPYING
+
+%files devel
+%{_includedir}/liburing/
+%{_includedir}/liburing.h
+%{_libdir}/liburing.so
+%exclude %{_libdir}/liburing.a
+%{_libdir}/pkgconfig/*
+%{_mandir}/man2/*
+%{_mandir}/man3/*
+%{_mandir}/man7/*
+
+%changelog
+* Thu Oct 31 2019 Jeff Moyer <jmoyer@redhat.com> - 0.2-1
+- Add io_uring_cq_ready()
+- Add io_uring_peek_batch_cqe()
+- Add io_uring_prep_accept()
+- Add io_uring_prep_{recv,send}msg()
+- Add io_uring_prep_timeout_remove()
+- Add io_uring_queue_init_params()
+- Add io_uring_register_files_update()
+- Add io_uring_sq_space_left()
+- Add io_uring_wait_cqe_timeout()
+- Add io_uring_wait_cqes()
+- Add io_uring_wait_cqes_timeout()
+
+* Tue Jan 8 2019 Jens Axboe <axboe@kernel.dk> - 0.1
+- Initial version
diff --git a/make-debs.sh b/make-debs.sh
new file mode 100755
index 0000000..01d563c
--- /dev/null
+++ b/make-debs.sh
@@ -0,0 +1,53 @@
+#!/usr/bin/env bash
+# Copyright (C) 2019  Liu Changcheng <changcheng.liu@aliyun.com>
+# Author: Liu Changcheng <changcheng.liu@aliyun.com>
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+set -xe
+
+# Create dir for build
+base=${1:-/tmp/release}
+codename=$(lsb_release -sc)
+releasedir=$base/$(lsb_release -si)/liburing
+rm -rf $releasedir
+mkdir -p $releasedir
+
+src_dir=$(readlink -e `basename $0`)
+liburing_dir=$(dirname $src_dir)
+basename=$(basename $liburing_dir)
+dirname=$(dirname $liburing_dir)
+version=$(git describe --match "lib*" | cut -d '-' -f 2)
+outfile="liburing-$version"
+orgfile=$(echo $outfile | tr '-' '_')
+
+# Prepare source code
+cp -arf ${dirname}/${basename} ${releasedir}/${outfile}
+cd ${releasedir}/${outfile}
+git clean -dxf
+
+# Change changelog if it's needed
+cur_ver=`head -l debian/changelog | sed -n -e 's/.* (\(.*\)) .*/\1/p'`
+if [ "$cur_ver" != "$version-1" ]; then
+	dch -D $codename --force-distribution -b -v "$version-1" "new version"
+fi
+
+# Create tar archieve
+cd ../
+tar cvzf ${outfile}.tar.gz ${outfile}
+ln -s ${outfile}.tar.gz ${orgfile}.orig.tar.gz
+
+# Build debian package
+cd -
+debuild
diff --git a/man/io_uring.7 b/man/io_uring.7
new file mode 100644
index 0000000..a63b3e9
--- /dev/null
+++ b/man/io_uring.7
@@ -0,0 +1,736 @@
+.\" Copyright (C) 2020 Shuveb Hussain <shuveb@gmail.com>
+.\" SPDX-License-Identifier: LGPL-2.0-or-later
+.\"
+
+.TH IO_URING 7 2020-07-26 "Linux" "Linux Programmer's Manual"
+.SH NAME
+io_uring \- Asynchronous I/O facility
+.SH SYNOPSIS
+.nf
+.B "#include <linux/io_uring.h>"
+.fi
+.PP
+.SH DESCRIPTION
+.PP
+.B io_uring
+is a Linux-specific API for asynchronous I/O.
+It allows the user to submit one or more I/O requests,
+which are processed asynchronously without blocking the calling process.
+.B io_uring
+gets its name from ring buffers which are shared between user space and
+kernel space. This arrangement allows for efficient I/O,
+while avoiding the overhead of copying buffers between them,
+where possible.
+This interface makes
+.B io_uring
+different from other UNIX I/O APIs,
+wherein,
+rather than just communicate between kernel and user space with system calls, 
+ring buffers are used as the main mode of communication.
+This arrangement has various performance benefits which are discussed in a
+separate section below.
+This man page uses the terms shared buffers, shared ring buffers and
+queues interchangeably.
+.PP
+The general programming model you need to follow for
+.B io_uring
+is outlined below
+.IP \(bu
+Set up shared buffers with
+.BR io_uring_setup (2)
+and
+.BR mmap (2),
+mapping into user space shared buffers for the submission queue (SQ) and the 
+completion queue (CQ).
+You place I/O requests you want to make on the SQ,
+while the kernel places the results of those operations on the CQ.
+.IP \(bu
+For every I/O request you need to make (like to read a file, write a file, 
+accept a socket connection, etc), you create a submission queue entry,
+or SQE,
+describe the I/O operation you need to get done and add it to the tail of
+the submission queue (SQ).
+Each I/O operation is,
+in essence,
+the equivalent of a system call you would have made otherwise,
+if you were not using
+.BR io_uring .
+You can add more than one SQE to the queue depending on the number of
+operations you want to request.
+.IP \(bu
+After you add one or more SQEs,
+you need to call
+.BR io_uring_enter (2)
+to tell the kernel to dequeue your I/O requests off the SQ and begin
+processing them.
+.IP \(bu
+For each SQE you submit,
+once it is done processing the request,
+the kernel places a completion queue event or CQE at the tail of the
+completion queue or CQ.
+The kernel places exactly one matching CQE in the CQ for every SQE you
+submit on the SQ.
+After you retrieve a CQE,
+minimally,
+you might be interested in checking the
+.I res
+field of the CQE structure,
+which corresponds to the return value of the system
+call's equivalent,
+had you used it directly without using 
+.BR io_uring .
+For instance,
+a read operation under 
+.BR io_uring ,
+started with the
+.BR IORING_OP_READ
+operation,
+which issues the equivalent of the
+.BR read (2) 
+system call,
+would return as part of 
+.I res
+what
+.BR read (2)
+would have returned if called directly,
+without using 
+.BR io_uring .
+.IP \(bu
+Optionally, 
+.BR io_uring_enter (2)
+can also wait for a specified number of requests to be processed by the kernel
+before it returns.
+If you specified a certain number of completions to wait for,
+the kernel would have placed at least those many number of CQEs on the CQ,
+which you can then readily read,
+right after the return from
+.BR io_uring_enter (2).
+.IP \(bu
+It is important to remember that I/O requests submitted to the kernel can
+complete in any order.
+It is not necessary for the kernel to process one request after another,
+in the order you placed them.
+Given that the interface is a ring,
+the requests are attempted in order,
+however that doesn't imply any sort of ordering on their completion.
+When more than one request is in flight,
+it is not possible to determine which one will complete first.
+When you dequeue CQEs off the CQ,
+you should always check which submitted request it corresponds to.
+The most common method for doing so is utilizing the
+.I user_data
+field in the request, which is passed back on the completion side.
+.PP
+Adding to and reading from the queues:
+.IP \(bu
+You add SQEs to the tail of the SQ.
+The kernel reads SQEs off the head of the queue.
+.IP \(bu
+The kernel adds CQEs to the tail of the CQ.
+You read CQEs off the head of the queue.
+.SS Submission queue polling
+One of the goals of 
+.B io_uring
+is to provide a means for efficient I/O.
+To this end,
+.B io_uring
+supports a polling mode that lets you avoid the call to
+.BR io_uring_enter (2),
+which you use to inform the kernel that you have queued SQEs on to the SQ.
+With SQ Polling,
+.B io_uring
+starts a kernel thread that polls the submission queue for any I/O
+requests you submit by adding SQEs.
+With SQ Polling enabled,
+there is no need for you to call 
+.BR io_uring_enter (2),
+letting you avoid the overhead of system calls.
+A designated kernel thread dequeues SQEs off the SQ as you add them and
+dispatches them for asynchronous processing.
+.SS Setting up io_uring
+.PP
+The main steps in setting up
+.B io_uring
+consist of mapping in the shared buffers with
+.BR mmap (2)
+calls.
+In the example program included in this man page, 
+the function
+.BR app_setup_uring ()
+sets up 
+.B io_uring
+with a QUEUE_DEPTH deep submission queue.
+Pay attention to the 2 
+.BR mmap (2)
+calls that set up the shared submission and completion queues.
+If your kernel is older than version 5.4,
+three 
+.BR mmap(2) 
+calls are required.
+.PP
+.SS Submitting I/O requests
+The process of submitting a request consists of describing the I/O
+operation you need to get done using an 
+.B io_uring_sqe
+structure instance.
+These details describe the equivalent system call and its parameters.
+Because the range of I/O operations Linux supports are very varied and the
+.B io_uring_sqe
+structure needs to be able to describe them, 
+it has several fields,
+some packed into unions for space efficiency.
+Here is a simplified version of struct 
+.B io_uring_sqe 
+with some of the most often used fields:
+.PP
+.in +4n
+.EX
+struct io_uring_sqe {
+        __u8    opcode;         /* type of operation for this sqe */
+        __s32   fd;             /* file descriptor to do IO on */
+        __u64   off;            /* offset into file */
+        __u64   addr;           /* pointer to buffer or iovecs */
+        __u32   len;            /* buffer size or number of iovecs */
+        __u64   user_data;      /* data to be passed back at completion time */
+        __u8    flags;          /* IOSQE_ flags */
+        ...
+};
+.EE
+.in
+
+Here is struct 
+.B io_uring_sqe
+in full:
+
+.in +4n
+.EX
+struct io_uring_sqe {
+        __u8    opcode;         /* type of operation for this sqe */
+        __u8    flags;          /* IOSQE_ flags */
+        __u16   ioprio;         /* ioprio for the request */
+        __s32   fd;             /* file descriptor to do IO on */
+        union {
+                __u64   off;    /* offset into file */
+                __u64   addr2;
+        };
+        union {
+                __u64   addr;   /* pointer to buffer or iovecs */
+                __u64   splice_off_in;
+        };
+        __u32   len;            /* buffer size or number of iovecs */
+        union {
+                __kernel_rwf_t  rw_flags;
+                __u32           fsync_flags;
+                __u16           poll_events;    /* compatibility */
+                __u32           poll32_events;  /* word-reversed for BE */
+                __u32           sync_range_flags;
+                __u32           msg_flags;
+                __u32           timeout_flags;
+                __u32           accept_flags;
+                __u32           cancel_flags;
+                __u32           open_flags;
+                __u32           statx_flags;
+                __u32           fadvise_advice;
+                __u32           splice_flags;
+        };
+        __u64   user_data;      /* data to be passed back at completion time */
+        union {
+                struct {
+                        /* pack this to avoid bogus arm OABI complaints */
+                        union {
+                                /* index into fixed buffers, if used */
+                                __u16   buf_index;
+                                /* for grouped buffer selection */
+                                __u16   buf_group;
+                        } __attribute__((packed));
+                        /* personality to use, if used */
+                        __u16   personality;
+                        __s32   splice_fd_in;
+                };
+                __u64   __pad2[3];
+        };
+};
+.EE
+.in
+.PP
+To submit an I/O request to 
+.BR io_uring ,
+you need to acquire a submission queue entry (SQE) from the submission
+queue (SQ),
+fill it up with details of the operation you want to submit and call 
+.BR io_uring_enter (2). 
+If you want to avoid calling 
+.BR io_uring_enter (2),
+you have the option of setting up Submission Queue Polling.
+.PP
+SQEs are added to the tail of the submission queue.
+The kernel picks up SQEs off the head of the SQ.
+The general algorithm to get the next available SQE and update the tail is
+as follows.
+.PP
+.in +4n
+.EX
+struct io_uring_sqe *sqe;
+unsigned tail, index;
+tail = *sqring->tail;
+index = tail & (*sqring->ring_mask);
+sqe = &sqring->sqes[index];
+/* fill up details about this I/O request */
+describe_io(sqe);
+/* fill the sqe index into the SQ ring array */
+sqring->array[index] = index;
+tail++;
+atomic_store_release(sqring->tail, tail);
+.EE
+.in
+.PP
+To get the index of an entry,
+the application must mask the current tail index with the size mask of the
+ring.
+This holds true for both SQs and CQs.
+Once the SQE is acquired,
+the necessary fields are filled in,
+describing the request.
+While the CQ ring directly indexes the shared array of CQEs,
+the submission side has an indirection array between them.
+The submission side ring buffer is an index into this array,
+which in turn contains the index into the SQEs.
+.PP
+The following code snippet demonstrates how a read operation,
+an equivalent of a
+.BR preadv2 (2)
+system call is described by filling up an SQE with the necessary
+parameters.
+.PP
+.in +4n
+.EX
+struct iovec iovecs[16];
+ ...
+sqe->opcode = IORING_OP_READV;
+sqe->fd = fd;
+sqe->addr = (unsigned long) iovecs;
+sqe->len = 16;
+sqe->off = offset;
+sqe->flags = 0;
+.EE
+.in
+.TP 
+.B Memory ordering
+Modern compilers and CPUs freely reorder reads and writes without 
+affecting the program's outcome to optimize performance. 
+Some aspects of this need to be kept in mind on SMP systems since 
+.B io_uring
+involves buffers shared between kernel and user space.
+These buffers are both visible and modifiable from kernel and user space.
+As heads and tails belonging to these shared buffers are updated by kernel
+and user space,
+changes need to be coherently visible on either side,
+irrespective of whether a CPU switch took place after the kernel-user mode
+switch happened.
+We use memory barriers to enforce this coherency.
+Being significantly large subjects on their own,
+memory barriers are out of scope for further discussion on this man page.
+.TP
+.B Letting the kernel know about I/O submissions
+Once you place one or more SQEs on to the SQ,
+you need to let the kernel know that you've done so.
+You can do this by calling the
+.BR io_uring_enter (2) 
+system call.
+This system call is also capable of waiting for a specified count of
+events to complete.
+This way,
+you can be sure to find completion events in the completion queue without
+having to poll it for events later.
+.SS Reading completion events
+Similar to the submission queue (SQ),
+the completion queue (CQ) is a shared buffer between the kernel and user
+space.
+Whereas you placed submission queue entries on the tail of the SQ and the
+kernel read off the head,
+when it comes to the CQ,
+the kernel places completion queue events or CQEs on the tail of the CQ and
+you read off its head.
+.PP
+Submission is flexible (and thus a bit more complicated) since it needs to
+be able to encode different types of system calls that take various
+parameters.
+Completion,
+on the other hand is simpler since we're looking only for a return value
+back from the kernel.
+This is easily understood by looking at the completion queue event
+structure,
+struct 
+.BR io_uring_cqe :
+.PP
+.in +4n
+.EX
+struct io_uring_cqe {
+	__u64	user_data;  /* sqe->data submission passed back */
+	__s32	res;        /* result code for this event */
+	__u32	flags;
+};
+.EE
+.in
+.PP
+Here,
+.I user_data
+is custom data that is passed unchanged from submission to completion.
+That is,
+from SQEs to CQEs.
+This field can be used to set context,
+uniquely identifying submissions that got completed.
+Given that I/O requests can complete in any order,
+this field can be used to correlate a submission with a completion.
+.I res
+is the result from the system call that was performed as part of the
+submission;
+its return value.
+The
+.I flags
+field could carry request-specific metadata in the future,
+but is currently unused.
+.PP
+The general sequence to read completion events off the completion queue is
+as follows:
+.PP
+.in +4n
+.EX
+unsigned head;
+head = *cqring->head;
+if (head != atomic_load_acquire(cqring->tail)) {
+    struct io_uring_cqe *cqe;
+    unsigned index;
+    index = head & (cqring->mask);
+    cqe = &cqring->cqes[index];
+    /* process completed CQE */
+    process_cqe(cqe);
+    /* CQE consumption complete */
+    head++;
+}
+atomic_store_release(cqring->head, head);
+.EE
+.in
+.PP
+It helps to be reminded that the kernel adds CQEs to the tail of the CQ,
+while you need to dequeue them off the head.
+To get the index of an entry at the head,
+the application must mask the current head index with the size mask of the
+ring.
+Once the CQE has been consumed or processed,
+the head needs to be updated to reflect the consumption of the CQE.
+Attention should be paid to the read and write barriers to ensure
+successful read and update of the head.
+.SS io_uring performance
+Because of the shared ring buffers between kernel and user space,
+.B io_uring
+can be a zero-copy system.
+Copying buffers to and fro becomes necessary when system calls that
+transfer data between kernel and user space are involved.
+But since the bulk of the communication in 
+.B io_uring
+is via buffers shared between the kernel and user space,
+this huge performance overhead is completely avoided.
+.PP
+While system calls may not seem like a significant overhead,
+in high performance applications,
+making a lot of them will begin to matter.
+While workarounds the operating system has in place to deal with Specter
+and Meltdown are ideally best done away with,
+unfortunately,
+some of these workarounds are around the system call interface,
+making system calls not as cheap as before on affected hardware.
+While newer hardware should not need these workarounds,
+hardware with these vulnerabilities can be expected to be in the wild for a
+long time.
+While using synchronous programming interfaces or even when using
+asynchronous programming interfaces under Linux,
+there is at least one system call involved in the submission of each
+request.
+In
+.BR io_uring ,
+on the other hand,
+you can batch several requests in one go,
+simply by queueing up multiple SQEs,
+each describing an I/O operation you want and make a single call to 
+.BR io_uring_enter (2). 
+This is possible due to
+.BR io_uring 's
+shared buffers based design.
+.PP
+While this batching in itself can avoid the overhead associated with
+potentially multiple and frequent system calls,
+you can reduce even this overhead further with Submission Queue Polling,
+by having the kernel poll and pick up your SQEs for processing as you add
+them to the submission queue. This avoids the
+.BR io_uring_enter (2)
+call you need to make to tell the kernel to pick SQEs up.
+For high-performance applications,
+this means even lesser system call overheads.
+.SH CONFORMING TO
+.B io_uring
+is Linux-specific.
+.SH EXAMPLES
+The following example uses
+.B io_uring
+to copy stdin to stdout.
+Using shell redirection,
+you should be able to copy files with this example.
+Because it uses a queue depth of only one,
+this example processes I/O requests one after the other.
+It is purposefully kept this way to aid understanding.
+In real-world scenarios however,
+you'll want to have a larger queue depth to parallelize I/O request
+processing so as to gain the kind of performance benefits
+.B io_uring
+provides with its asynchronous processing of requests.
+.PP
+.EX
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include <sys/syscall.h>
+#include <sys/mman.h>
+#include <sys/uio.h>
+#include <linux/fs.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <string.h>
+#include <stdatomic.h>
+
+#include <linux/io_uring.h>
+
+#define QUEUE_DEPTH 1
+#define BLOCK_SZ    1024
+
+/* Macros for barriers needed by io_uring */
+#define io_uring_smp_store_release(p, v)            \\
+    atomic_store_explicit((_Atomic typeof(*(p)) *)(p), (v), \\
+                  memory_order_release)
+#define io_uring_smp_load_acquire(p)                \\
+    atomic_load_explicit((_Atomic typeof(*(p)) *)(p),   \\
+                 memory_order_acquire)
+
+int ring_fd;
+unsigned *sring_tail, *sring_mask, *sring_array, 
+            *cring_head, *cring_tail, *cring_mask;
+struct io_uring_sqe *sqes;
+struct io_uring_cqe *cqes;
+char buff[BLOCK_SZ];
+off_t offset;
+
+/*
+ * System call wrappers provided since glibc does not yet
+ * provide wrappers for io_uring system calls.
+* */
+
+int io_uring_setup(unsigned entries, struct io_uring_params *p)
+{
+    return (int) syscall(__NR_io_uring_setup, entries, p);
+}
+
+int io_uring_enter(int ring_fd, unsigned int to_submit,
+                   unsigned int min_complete, unsigned int flags)
+{
+    return (int) syscall(__NR_io_uring_enter, ring_fd, to_submit, min_complete,
+                         flags, NULL, 0);
+}
+
+int app_setup_uring(void) {
+    struct io_uring_params p;
+    void *sq_ptr, *cq_ptr;
+
+    /* See io_uring_setup(2) for io_uring_params.flags you can set */
+    memset(&p, 0, sizeof(p));
+    ring_fd = io_uring_setup(QUEUE_DEPTH, &p);
+    if (ring_fd < 0) {
+        perror("io_uring_setup");
+        return 1;
+    }
+
+    /*
+     * io_uring communication happens via 2 shared kernel-user space ring
+     * buffers, which can be jointly mapped with a single mmap() call in
+     * kernels >= 5.4.
+     */
+
+    int sring_sz = p.sq_off.array + p.sq_entries * sizeof(unsigned);
+    int cring_sz = p.cq_off.cqes + p.cq_entries * sizeof(struct io_uring_cqe);
+
+    /* Rather than check for kernel version, the recommended way is to
+     * check the features field of the io_uring_params structure, which is a 
+     * bitmask. If IORING_FEAT_SINGLE_MMAP is set, we can do away with the
+     * second mmap() call to map in the completion ring separately.
+     */
+    if (p.features & IORING_FEAT_SINGLE_MMAP) {
+        if (cring_sz > sring_sz)
+            sring_sz = cring_sz;
+        cring_sz = sring_sz;
+    }
+
+    /* Map in the submission and completion queue ring buffers.
+     *  Kernels < 5.4 only map in the submission queue, though.
+     */
+    sq_ptr = mmap(0, sring_sz, PROT_READ | PROT_WRITE,
+                  MAP_SHARED | MAP_POPULATE,
+                  ring_fd, IORING_OFF_SQ_RING);
+    if (sq_ptr == MAP_FAILED) {
+        perror("mmap");
+        return 1;
+    }
+
+    if (p.features & IORING_FEAT_SINGLE_MMAP) {
+        cq_ptr = sq_ptr;
+    } else {
+        /* Map in the completion queue ring buffer in older kernels separately */
+        cq_ptr = mmap(0, cring_sz, PROT_READ | PROT_WRITE,
+                      MAP_SHARED | MAP_POPULATE,
+                      ring_fd, IORING_OFF_CQ_RING);
+        if (cq_ptr == MAP_FAILED) {
+            perror("mmap");
+            return 1;
+        }
+    }
+    /* Save useful fields for later easy reference */
+    sring_tail = sq_ptr + p.sq_off.tail;
+    sring_mask = sq_ptr + p.sq_off.ring_mask;
+    sring_array = sq_ptr + p.sq_off.array;
+
+    /* Map in the submission queue entries array */
+    sqes = mmap(0, p.sq_entries * sizeof(struct io_uring_sqe),
+                   PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
+                   ring_fd, IORING_OFF_SQES);
+    if (sqes == MAP_FAILED) {
+        perror("mmap");
+        return 1;
+    }
+
+    /* Save useful fields for later easy reference */
+    cring_head = cq_ptr + p.cq_off.head;
+    cring_tail = cq_ptr + p.cq_off.tail;
+    cring_mask = cq_ptr + p.cq_off.ring_mask;
+    cqes = cq_ptr + p.cq_off.cqes;
+
+    return 0;
+}
+
+/*
+* Read from completion queue.
+* In this function, we read completion events from the completion queue.
+* We dequeue the CQE, update and head and return the result of the operation.
+* */
+
+int read_from_cq() {
+    struct io_uring_cqe *cqe;
+    unsigned head, reaped = 0;
+
+    /* Read barrier */
+    head = io_uring_smp_load_acquire(cring_head);
+    /*
+    * Remember, this is a ring buffer. If head == tail, it means that the
+    * buffer is empty.
+    * */
+    if (head == *cring_tail)
+        return -1;
+
+    /* Get the entry */
+    cqe = &cqes[head & (*cring_mask)];
+    if (cqe->res < 0)
+        fprintf(stderr, "Error: %s\\n", strerror(abs(cqe->res)));
+
+    head++;
+
+    /* Write barrier so that update to the head are made visible */
+    io_uring_smp_store_release(cring_head, head);
+
+    return cqe->res;
+}
+
+/*
+* Submit a read or a write request to the submission queue.
+* */
+
+int submit_to_sq(int fd, int op) {
+    unsigned index, tail;
+
+    /* Add our submission queue entry to the tail of the SQE ring buffer */
+    tail = *sring_tail;
+    index = tail & *sring_mask;
+    struct io_uring_sqe *sqe = &sqes[index];
+    /* Fill in the parameters required for the read or write operation */
+    sqe->opcode = op;
+    sqe->fd = fd;
+    sqe->addr = (unsigned long) buff;
+    if (op == IORING_OP_READ) {
+        memset(buff, 0, sizeof(buff));
+        sqe->len = BLOCK_SZ;
+    }
+    else {
+        sqe->len = strlen(buff);
+    }
+    sqe->off = offset;
+
+    sring_array[index] = index;
+    tail++;
+
+    /* Update the tail */
+    io_uring_smp_store_release(sring_tail, tail);
+
+    /*
+    * Tell the kernel we have submitted events with the io_uring_enter() system
+    * call. We also pass in the IOURING_ENTER_GETEVENTS flag which causes the
+    * io_uring_enter() call to wait until min_complete (the 3rd param) events
+    * complete.
+    * */
+    int ret =  io_uring_enter(ring_fd, 1,1,
+                              IORING_ENTER_GETEVENTS);
+    if(ret < 0) {
+        perror("io_uring_enter");
+        return -1;
+    }
+
+    return ret;
+}
+
+int main(int argc, char *argv[]) {
+    int res;
+
+    /* Setup io_uring for use */
+    if(app_setup_uring()) {
+        fprintf(stderr, "Unable to setup uring!\\n");
+        return 1;
+    }
+
+    /* 
+    * A while loop that reads from stdin and writes to stdout.
+    * Breaks on EOF.
+    */
+    while (1) {
+        /* Initiate read from stdin and wait for it to complete */
+        submit_to_sq(STDIN_FILENO, IORING_OP_READ);
+        /* Read completion queue entry */
+        res = read_from_cq();
+        if (res > 0) {
+            /* Read successful. Write to stdout. */
+            submit_to_sq(STDOUT_FILENO, IORING_OP_WRITE);
+            read_from_cq();
+        } else if (res == 0) {
+            /* reached EOF */
+            break;
+        }
+        else if (res < 0) {
+            /* Error reading file */
+            fprintf(stderr, "Error: %s\\n", strerror(abs(res)));
+            break;
+        }
+        offset += res;
+    }
+
+    return 0;
+}
+.EE
+.SH SEE ALSO
+.BR io_uring_enter (2)
+.BR io_uring_register (2)
+.BR io_uring_setup (2)
diff --git a/man/io_uring_enter.2 b/man/io_uring_enter.2
new file mode 100644
index 0000000..909cc9b
--- /dev/null
+++ b/man/io_uring_enter.2
@@ -0,0 +1,1158 @@
+.\" Copyright (C) 2019 Jens Axboe <axboe@kernel.dk>
+.\" Copyright (C) 2019 Red Hat, Inc.
+.\"
+.\" SPDX-License-Identifier: LGPL-2.0-or-later
+.\"
+.TH IO_URING_ENTER 2 2019-01-22 "Linux" "Linux Programmer's Manual"
+.SH NAME
+io_uring_enter \- initiate and/or complete asynchronous I/O
+.SH SYNOPSIS
+.nf
+.BR "#include <linux/io_uring.h>"
+.PP
+.BI "int io_uring_enter(unsigned int " fd ", unsigned int " to_submit ,
+.BI "                   unsigned int " min_complete ", unsigned int " flags ,
+.BI "                   sigset_t *" sig );
+.fi
+.PP
+.SH DESCRIPTION
+.PP
+.BR io_uring_enter ()
+is used to initiate and complete I/O using the shared submission and
+completion queues setup by a call to
+.BR io_uring_setup (2).
+A single call can both submit new I/O and wait for completions of I/O
+initiated by this call or previous calls to
+.BR io_uring_enter ().
+
+.I fd
+is the file descriptor returned by
+.BR io_uring_setup (2).
+.I to_submit
+specifies the number of I/Os to submit from the submission queue.
+.I flags
+is a bitmask of the following values:
+.TP
+.B IORING_ENTER_GETEVENTS
+If this flag is set, then the system call will wait for the specificied
+number of events in
+.I min_complete
+before returning. This flag can be set along with
+.I to_submit
+to both submit and complete events in a single system call.
+.TP
+.B IORING_ENTER_SQ_WAKEUP
+If the ring has been created with
+.B IORING_SETUP_SQPOLL,
+then this flag asks the kernel to wakeup the SQ kernel thread to submit IO.
+.TP
+.B IORING_ENTER_SQ_WAIT
+If the ring has been created with
+.B IORING_SETUP_SQPOLL,
+then the application has no real insight into when the SQ kernel thread has
+consumed entries from the SQ ring. This can lead to a situation where the
+application can no longer get a free SQE entry to submit, without knowing
+when it one becomes available as the SQ kernel thread consumes them. If
+the system call is used with this flag set, then it will wait until at least
+one entry is free in the SQ ring.
+.PP
+.PP
+If the io_uring instance was configured for polling, by specifying
+.B IORING_SETUP_IOPOLL
+in the call to
+.BR io_uring_setup (2),
+then min_complete has a slightly different meaning.  Passing a value
+of 0 instructs the kernel to return any events which are already complete,
+without blocking.  If
+.I min_complete
+is a non-zero value, the kernel will still return immediately if any
+completion events are available.  If no event completions are
+available, then the call will poll either until one or more
+completions become available, or until the process has exceeded its
+scheduler time slice.
+
+Note that, for interrupt driven I/O (where
+.B IORING_SETUP_IOPOLL
+was not specified in the call to
+.BR io_uring_setup (2)),
+an application may check the completion queue for event completions
+without entering the kernel at all.
+.PP
+When the system call returns that a certain amount of SQEs have been
+consumed and submitted, it's safe to reuse SQE entries in the ring. This is
+true even if the actual IO submission had to be punted to async context,
+which means that the SQE may in fact not have been submitted yet. If the
+kernel requires later use of a particular SQE entry, it will have made a
+private copy of it.
+
+.I sig
+is a pointer to a signal mask (see
+.BR sigprocmask (2));
+if
+.I sig
+is not NULL,
+.BR io_uring_enter ()
+first replaces the current signal mask by the one pointed to by
+.IR sig ,
+then waits for events to become available in the completion queue, and
+then restores the original signal mask.  The following
+.BR io_uring_enter ()
+call:
+.PP
+.in +4n
+.EX
+ret = io_uring_enter(fd, 0, 1, IORING_ENTER_GETEVENTS, &sig);
+.EE
+.in
+.PP
+is equivalent to
+.I atomically
+executing the following calls:
+.PP
+.in +4n
+.EX
+pthread_sigmask(SIG_SETMASK, &sig, &orig);
+ret = io_uring_enter(fd, 0, 1, IORING_ENTER_GETEVENTS, NULL);
+pthread_sigmask(SIG_SETMASK, &orig, NULL);
+.EE
+.in
+.PP
+See the description of
+.BR pselect (2)
+for an explanation of why the
+.I sig
+parameter is necessary.
+
+Submission queue entries are represented using the following data
+structure:
+.PP
+.in +4n
+.EX
+/*
+ * IO submission data structure (Submission Queue Entry)
+ */
+struct io_uring_sqe {
+    __u8    opcode;         /* type of operation for this sqe */
+    __u8    flags;          /* IOSQE_ flags */
+    __u16   ioprio;         /* ioprio for the request */
+    __s32   fd;             /* file descriptor to do IO on */
+    union {
+        __u64   off;            /* offset into file */
+        __u64   addr2;
+    };
+    union {
+        __u64   addr;       /* pointer to buffer or iovecs */
+        __u64   splice_off_in;
+    }
+    __u32   len;            /* buffer size or number of iovecs */
+    union {
+        __kernel_rwf_t  rw_flags;
+        __u32    fsync_flags;
+        __u16    poll_events;   /* compatibility */
+        __u32    poll32_events; /* word-reversed for BE */
+        __u32    sync_range_flags;
+        __u32    msg_flags;
+        __u32    timeout_flags;
+        __u32    accept_flags;
+        __u32    cancel_flags;
+        __u32    open_flags;
+        __u32    statx_flags;
+        __u32    fadvise_advice;
+        __u32    splice_flags;
+    };
+    __u64    user_data;     /* data to be passed back at completion time */
+    union {
+	struct {
+	    /* index into fixed buffers, if used */
+            union {
+                /* index into fixed buffers, if used */
+                __u16    buf_index;
+                /* for grouped buffer selection */
+                __u16    buf_group;
+            }
+	    /* personality to use, if used */
+	    __u16    personality;
+            __s32    splice_fd_in;
+	};
+        __u64    __pad2[3];
+    };
+};
+.EE
+.in
+.PP
+The
+.I opcode
+describes the operation to be performed.  It can be one of:
+.TP
+.B IORING_OP_NOP
+Do not perform any I/O.  This is useful for testing the performance of
+the io_uring implementation itself.
+.TP
+.B IORING_OP_READV
+.TP
+.B IORING_OP_WRITEV
+Vectored read and write operations, similar to
+.BR preadv2 (2)
+and
+.BR pwritev2 (2).
+If the file is not seekable,
+.I off
+must be set to zero.
+
+.TP
+.B IORING_OP_READ_FIXED
+.TP
+.B IORING_OP_WRITE_FIXED
+Read from or write to pre-mapped buffers.  See
+.BR io_uring_register (2)
+for details on how to setup a context for fixed reads and writes.
+
+.TP
+.B IORING_OP_FSYNC
+File sync.  See also
+.BR fsync (2).
+Note that, while I/O is initiated in the order in which it appears in
+the submission queue, completions are unordered.  For example, an
+application which places a write I/O followed by an fsync in the
+submission queue cannot expect the fsync to apply to the write.  The
+two operations execute in parallel, so the fsync may complete before
+the write is issued to the storage.  The same is also true for
+previously issued writes that have not completed prior to the fsync.
+
+.TP
+.B IORING_OP_POLL_ADD
+Poll the
+.I fd
+specified in the submission queue entry for the events
+specified in the
+.I poll_events
+field.  Unlike poll or epoll without
+.BR EPOLLONESHOT ,
+this interface always works in one shot mode.  That is, once the poll
+operation is completed, it will have to be resubmitted. This command works like
+an async
+.BR poll(2)
+and the completion event result is the returned mask of events.
+
+.TP
+.B IORING_OP_POLL_REMOVE
+Remove an existing poll request.  If found, the
+.I res
+field of the
+.I "struct io_uring_cqe"
+will contain 0.  If not found,
+.I res
+will contain
+.B -ENOENT.
+
+.TP
+.B IORING_OP_EPOLL_CTL
+Add, remove or modify entries in the interest list of
+.BR epoll (7).
+See
+.BR epoll_ctl (2)
+for details of the system call.
+.I fd
+holds the file descriptor that represents the epoll instance,
+.I addr
+holds the file descriptor to add, remove or modify,
+.I len
+holds the operation (EPOLL_CTL_ADD, EPOLL_CTL_DEL, EPOLL_CTL_MOD) to perform and,
+.I off
+holds a pointer to the
+.I epoll_events
+structure. Available since 5.6.
+
+.TP
+.B IORING_OP_SYNC_FILE_RANGE
+Issue the equivalent of a \fBsync_file_range\fR (2) on the file descriptor. The
+.I fd
+field is the file descriptor to sync, the
+.I off
+field holds the offset in bytes, the
+.I len
+field holds the length in bytes, and the
+.I sync_range_flags
+field holds the flags for the command. See also
+.BR sync_file_range (2)
+for the general description of the related system call. Available since 5.2.
+
+.TP
+.B IORING_OP_SENDMSG
+Issue the equivalent of a
+.BR sendmsg(2)
+system call.
+.I fd
+must be set to the socket file descriptor,
+.I addr
+must contain a pointer to the msghdr structure, and
+.I msg_flags
+holds the flags associated with the system call. See also
+.BR sendmsg (2)
+for the general description of the related system call. Available since 5.3.
+
+.TP
+.B IORING_OP_RECVMSG
+Works just like IORING_OP_SENDMSG, except for
+.BR recvmsg(2)
+instead. See the description of IORING_OP_SENDMSG. Available since 5.3.
+
+.TP
+.B IORING_OP_SEND
+Issue the equivalent of a
+.BR send(2)
+system call.
+.I fd
+must be set to the socket file descriptor,
+.I addr
+must contain a pointer to the buffer,
+.I len
+denotes the length of the buffer to send, and
+.I msg_flags
+holds the flags associated with the system call. See also
+.BR send(2)
+for the general description of the related system call. Available since 5.6.
+
+.TP
+.B IORING_OP_RECV
+Works just like IORING_OP_SEND, except for
+.BR recv(2)
+instead. See the description of IORING_OP_SEND. Available since 5.6.
+
+.TP
+.B IORING_OP_TIMEOUT
+This command will register a timeout operation. The
+.I addr
+field must contain a pointer to a struct timespec64 structure,
+.I len
+must contain 1 to signify one timespec64 structure,
+.I timeout_flags
+may contain IORING_TIMEOUT_ABS
+for an absolute timeout value, or 0 for a relative timeout.
+.I off
+may contain a completion event count. A timeout
+will trigger a wakeup event on the completion ring for anyone waiting for
+events. A timeout condition is met when either the specified timeout expires,
+or the specified number of events have completed. Either condition will
+trigger the event. If set to 0, completed events are not counted, which
+effectively acts like a timer. io_uring timeouts use the
+.B CLOCK_MONOTONIC
+clock source. The request will complete with
+.I -ETIME
+if the timeout got completed through expiration of the timer, or
+.I 0
+if the timeout got completed through requests completing on their own. If
+the timeout was cancelled before it expired, the request will complete with
+.I -ECANCELED.
+Available since 5.4.
+
+.TP
+.B IORING_OP_TIMEOUT_REMOVE
+If
+.I timeout_flags are zero, then it attempts to remove an existing timeout
+operation.
+.I addr
+must contain the
+.I user_data
+field of the previously issued timeout operation. If the specified timeout
+request is found and cancelled successfully, this request will terminate
+with a result value of
+.I 0
+If the timeout request was found but expiration was already in progress,
+this request will terminate with a result value of
+.I -EBUSY
+If the timeout request wasn't found, the request will terminate with a result
+value of
+.I -ENOENT
+Available since 5.5.
+
+If
+.I timeout_flags
+contain
+.I IORING_TIMEOUT_UPDATE,
+instead of removing an existing operation it updates it.
+.I addr
+and return values are same as before.
+.I addr2
+field must contain a pointer to a struct timespec64 structure.
+.I timeout_flags
+may also contain IORING_TIMEOUT_ABS.
+Available since 5.11.
+
+.TP
+.B IORING_OP_ACCEPT
+Issue the equivalent of an
+.BR accept4(2)
+system call.
+.I fd
+must be set to the socket file descriptor,
+.I addr
+must contain the pointer to the sockaddr structure, and
+.I addr2
+must contain a pointer to the socklen_t addrlen field. See also
+.BR accept4(2)
+for the general description of the related system call. Available since 5.5.
+
+.TP
+.B IORING_OP_ASYNC_CANCEL
+Attempt to cancel an already issued request.
+.I addr
+must contain the
+.I user_data
+field of the request that should be cancelled. The cancellation request will
+complete with one of the following results codes. If found, the
+.I res
+field of the cqe will contain 0. If not found,
+.I res
+will contain -ENOENT. If found and attempted cancelled, the
+.I res
+field will contain -EALREADY. In this case, the request may or may not
+terminate. In general, requests that are interruptible (like socket IO) will
+get cancelled, while disk IO requests cannot be cancelled if already started.
+Available since 5.5.
+
+.TP
+.B IORING_OP_LINK_TIMEOUT
+This request must be linked with another request through
+.I IOSQE_IO_LINK
+which is described below. Unlike
+.I IORING_OP_TIMEOUT,
+.I IORING_OP_LINK_TIMEOUT
+acts on the linked request, not the completion queue. The format of the command
+is otherwise like
+.I IORING_OP_TIMEOUT,
+except there's no completion event count as it's tied to a specific request.
+If used, the timeout specified in the command will cancel the linked command,
+unless the linked command completes before the timeout. The timeout will
+complete with
+.I -ETIME
+if the timer expired and the linked request was attempted cancelled, or
+.I -ECANCELED
+if the timer got cancelled because of completion of the linked request. Like
+.B IORING_OP_TIMEOUT
+the clock source used is
+.B CLOCK_MONOTONIC
+Available since 5.5.
+
+
+.TP
+.B IORING_OP_CONNECT
+Issue the equivalent of a
+.BR connect(2)
+system call.
+.I fd
+must be set to the socket file descriptor,
+.I addr
+must contain the const pointer to the sockaddr structure, and
+.I off
+must contain the socklen_t addrlen field. See also
+.BR connect(2)
+for the general description of the related system call. Available since 5.5.
+
+.TP
+.B IORING_OP_FALLOCATE
+Issue the equivalent of a
+.BR fallocate(2)
+system call.
+.I fd
+must be set to the file descriptor,
+.I len
+must contain the mode associated with the operation,
+.I off
+must contain the offset on which to operate, and
+.I addr
+must contain the length. See also
+.BR fallocate(2)
+for the general description of the related system call. Available since 5.6.
+
+.TP
+.B IORING_OP_FADVISE
+Issue the equivalent of a
+.BR posix_fadvise(2)
+system call.
+.I fd
+must be set to the file descriptor,
+.I off
+must contain the offset on which to operate,
+.I len
+must contain the length, and
+.I fadvise_advice
+must contain the advice associated with the operation. See also
+.BR posix_fadvise(2)
+for the general description of the related system call. Available since 5.6.
+
+.TP
+.B IORING_OP_MADVISE
+Issue the equivalent of a
+.BR madvise(2)
+system call.
+.I addr
+must contain the address to operate on,
+.I len
+must contain the length on which to operate,
+and
+.I fadvise_advice
+must contain the advice associated with the operation. See also
+.BR madvise(2)
+for the general description of the related system call. Available since 5.6.
+
+.TP
+.B IORING_OP_OPENAT
+Issue the equivalent of a
+.BR openat(2)
+system call.
+.I fd
+is the
+.I dirfd
+argument,
+.I addr
+must contain a pointer to the
+.I *pathname
+argument,
+.I open_flags
+should contain any flags passed in, and
+.I len
+is access mode of the file. See also
+.BR openat(2)
+for the general description of the related system call. Available since 5.6.
+
+.TP
+.B IORING_OP_OPENAT2
+Issue the equivalent of a
+.BR openat2(2)
+system call.
+.I fd
+is the
+.I dirfd
+argument,
+.I addr
+must contain a pointer to the
+.I *pathname
+argument,
+.I len
+should contain the size of the open_how structure, and
+.I off
+should be set to the address of the open_how structure. See also
+.BR openat2(2)
+for the general description of the related system call. Available since 5.6.
+
+.TP
+.B IORING_OP_CLOSE
+Issue the equivalent of a
+.BR close(2)
+system call.
+.I fd
+is the file descriptor to be closed. See also
+.BR close(2)
+for the general description of the related system call. Available since 5.6.
+
+.TP
+.B IORING_OP_STATX
+Issue the equivalent of a
+.BR statx(2)
+system call.
+.I fd
+is the
+.I dirfd
+argument,
+.I addr
+must contain a pointer to the
+.I *pathname
+string,
+.I statx_flags
+is the
+.I flags
+argument,
+.I len
+should be the
+.I mask
+argument, and
+.I off
+must contain a pointer to the
+.I statxbuf
+to be filled in. See also
+.BR statx(2)
+for the general description of the related system call. Available since 5.6.
+
+.TP
+.B IORING_OP_READ
+.TP
+.B IORING_OP_WRITE
+Issue the equivalent of a
+.BR pread(2)
+or
+.BR pwrite(2)
+system call.
+.I fd
+is the file descriptor to be operated on,
+.I addr
+contains the buffer in question,
+.I len
+contains the length of the IO operation, and
+.I offs
+contains the read or write offset. If
+.I fd
+does not refer to a seekable file,
+.I off
+must be set to zero. If
+.I offs
+is set to -1, the offset will use (and advance) the file position, like the
+.BR read(2)
+and
+.BR write(2)
+system calls. These are non-vectored versions of the
+.B IORING_OP_READV
+and
+.B IORING_OP_WRITEV
+opcodes. See also
+.BR read(2)
+and
+.BR write(2)
+for the general description of the related system call. Available since 5.6.
+
+.TP
+.B IORING_OP_SPLICE
+Issue the equivalent of a
+.BR splice(2)
+system call.
+.I splice_fd_in
+is the file descriptor to read from,
+.I splice_off_in
+is an offset to read from,
+.I fd
+is the file descriptor to write to,
+.I off
+is an offset from which to start writing to. A sentinel value of -1 is used
+to pass the equivalent of a NULL for the offsets to
+.BR splice(2).
+.I len
+contains the number of bytes to copy.
+.I splice_flags
+contains a bit mask for the flag field associated with the system call.
+Please note that one of the file descriptors must refer to a pipe.
+See also
+.BR splice(2)
+for the general description of the related system call. Available since 5.7.
+
+.TP
+.B IORING_OP_TEE
+Issue the equivalent of a
+.BR tee(2)
+system call.
+.I splice_fd_in
+is the file descriptor to read from,
+.I fd
+is the file descriptor to write to,
+.I len
+contains the number of bytes to copy, and
+.I splice_flags
+contains a bit mask for the flag field associated with the system call.
+Please note that both of the file descriptors must refer to a pipe.
+See also
+.BR tee(2)
+for the general description of the related system call. Available since 5.8.
+
+.TP
+.B IORING_OP_FILES_UPDATE
+This command is an alternative to using
+.B IORING_REGISTER_FILES_UPDATE
+which then works in an async fashion, like the rest of the io_uring commands.
+The arguments passed in are the same.
+.I addr
+must contain a pointer to the array of file descriptors,
+.I len
+must contain the length of the array, and
+.I off
+must contain the offset at which to operate. Note that the array of file
+descriptors pointed to in
+.I addr
+must remain valid until this operation has completed. Available since 5.6.
+
+.TP
+.B IORING_OP_PROVIDE_BUFFERS
+This command allows an application to register a group of buffers to be used
+by commands that read/receive data. Using buffers in this manner can eliminate
+the need to separate the poll + read, which provides a convenient point in
+time to allocate a buffer for a given request. It's often infeasible to have
+as many buffers available as pending reads or receive. With this feature, the
+application can have its pool of buffers ready in the kernel, and when the
+file or socket is ready to read/receive data, a buffer can be selected for the
+operation.
+.I fd
+must contain the number of buffers to provide,
+.I addr
+must contain the starting address to add buffers from,
+.I len
+must contain the length of each buffer to add from the range,
+.I buf_group
+must contain the group ID of this range of buffers, and
+.I off
+must contain the starting buffer ID of this range of buffers. With that set,
+the kernel adds buffers starting with the memory address in
+.I addr,
+each with a length of
+.I len.
+Hence the application should provide
+.I len * fd
+worth of memory in
+.I addr.
+Buffers are grouped by the group ID, and each buffer within this group will be
+identical in size according to the above arguments. This allows the application
+to provide different groups of buffers, and this is often used to have
+differently sized buffers available depending on what the expectations are of
+the individual request. When submitting a request that should use a provided
+buffer, the
+.B IOSQE_BUFFER_SELECT
+flag must be set, and
+.I buf_group
+must be set to the desired buffer group ID where the buffer should be selected
+from. Available since 5.7.
+
+.TP
+.B IORING_OP_REMOVE_BUFFERS
+Remove buffers previously registered with
+.B IORING_OP_PROVIDE_BUFFERS.
+.I fd
+must contain the number of buffers to remove, and
+.I buf_group
+must contain the buffer group ID from which to remove the buffers. Available
+since 5.7.
+
+.TP
+.B IORING_OP_SHUTDOWN
+Issue the equivalent of a
+.BR shutdown(2)
+system call.
+.I fd
+is the file descriptor to the socket being shutdown, no other fields should
+be set. Available since 5.11.
+
+.TP
+.B IORING_OP_RENAMEAT
+Issue the equivalent of a
+.BR renameat2(2)
+system call.
+.I fd
+should be set to the
+.I olddirfd,
+.I addr
+should be set to the
+.I oldpath,
+.I len
+should be set to the
+.I newdirfd,
+.I addr
+should be set to the
+.I oldpath,
+.I addr2
+should be set to the
+.I newpath,
+and finally
+.I rename_flags
+should be set to the
+.I flags
+passed in to
+.BR renameat2(2).
+Available since 5.11.
+
+.TP
+.B IORING_OP_UNLINKAT
+Issue the equivalent of a
+.BR unlinkat2(2)
+system call.
+.I fd
+should be set to the
+.I dirfd,
+.I addr
+should be set to the
+.I pathname,
+and
+.I unlink_flags
+should be set to the
+.I flags
+being passed in to
+.BR unlinkat(2).
+Available since 5.11.
+
+.PP
+The
+.I flags
+field is a bit mask. The supported flags are:
+.TP
+.B IOSQE_FIXED_FILE
+When this flag is specified,
+.I fd
+is an index into the files array registered with the io_uring instance (see the
+.B IORING_REGISTER_FILES
+section of the
+.BR io_uring_register (2)
+man page). Available since 5.1.
+.TP
+.B IOSQE_IO_DRAIN
+When this flag is specified, the SQE will not be started before previously
+submitted SQEs have completed, and new SQEs will not be started before this
+one completes. Available since 5.2.
+.TP
+.B IOSQE_IO_LINK
+When this flag is specified, it forms a link with the next SQE in the
+submission ring. That next SQE will not be started before this one completes.
+This, in effect, forms a chain of SQEs, which can be arbitrarily long. The tail
+of the chain is denoted by the first SQE that does not have this flag set.
+This flag has no effect on previous SQE submissions, nor does it impact SQEs
+that are outside of the chain tail. This means that multiple chains can be
+executing in parallel, or chains and individual SQEs. Only members inside the
+chain are serialized. A chain of SQEs will be broken, if any request in that
+chain ends in error. io_uring considers any unexpected result an error. This
+means that, eg, a short read will also terminate the remainder of the chain.
+If a chain of SQE links is broken, the remaining unstarted part of the chain
+will be terminated and completed with
+.B -ECANCELED
+as the error code. Available since 5.3.
+.TP
+.B IOSQE_IO_HARDLINK
+Like IOSQE_IO_LINK, but it doesn't sever regardless of the completion result.
+Note that the link will still sever if we fail submitting the parent request,
+hard links are only resilient in the presence of completion results for
+requests that did submit correctly. IOSQE_IO_HARDLINK implies IOSQE_IO_LINK.
+Available since 5.5.
+.TP
+.B IOSQE_ASYNC
+Normal operation for io_uring is to try and issue an sqe as non-blocking first,
+and if that fails, execute it in an async manner. To support more efficient
+overlapped operation of requests that the application knows/assumes will
+always (or most of the time) block, the application can ask for an sqe to be
+issued async from the start. Available since 5.6.
+.TP
+.B IOSQE_BUFFER_SELECT
+Used in conjunction with the
+.B IORING_OP_PROVIDE_BUFFERS
+command, which registers a pool of buffers to be used by commands that read
+or receive data. When buffers are registered for this use case, and this
+flag is set in the command, io_uring will grab a buffer from this pool when
+the request is ready to receive or read data. If succesful, the resulting CQE
+will have
+.B IORING_CQE_F_BUFFER
+set in the flags part of the struct, and the upper
+.B IORING_CQE_BUFFER_SHIFT
+bits will contain the ID of the selected buffers. This allows the application
+to know exactly which buffer was selected for the operation. If no buffers
+are available and this flag is set, then the request will fail with
+.B -ENOBUFS
+as the error code. Once a buffer has been used, it is no longer available in
+the kernel pool. The application must re-register the given buffer again when
+it is ready to recycle it (eg has completed using it). Available since 5.7.
+
+.PP
+.I ioprio
+specifies the I/O priority.  See
+.BR ioprio_get (2)
+for a description of Linux I/O priorities.
+
+.I fd
+specifies the file descriptor against which the operation will be
+performed, with the exception noted above.
+
+If the operation is one of
+.B IORING_OP_READ_FIXED
+or
+.BR IORING_OP_WRITE_FIXED ,
+.I addr
+and
+.I len
+must fall within the buffer located at
+.I buf_index
+in the fixed buffer array.  If the operation is either
+.B IORING_OP_READV
+or
+.BR IORING_OP_WRITEV ,
+then
+.I addr
+points to an iovec array of
+.I len
+entries.
+
+.IR rw_flags ,
+specified for read and write operations, contains a bitwise OR of
+per-I/O flags, as described in the
+.BR preadv2 (2)
+man page.
+
+The
+.I fsync_flags
+bit mask may contain either 0, for a normal file integrity sync, or
+.B IORING_FSYNC_DATASYNC
+to provide data sync only semantics.  See the descriptions of
+.B O_SYNC
+and
+.B O_DSYNC
+in the
+.BR open (2)
+manual page for more information.
+
+The bits that may be set in
+.I poll_events
+are defined in \fI<poll.h>\fP, and documented in
+.BR poll (2).
+
+.I user_data
+is an application-supplied value that will be copied into
+the completion queue entry (see below).
+.I buf_index
+is an index into an array of fixed buffers, and is only valid if fixed
+buffers were registered.
+.I personality
+is the credentials id to use for this operation. See
+.BR io_uring_register(2)
+for how to register personalities with io_uring. If set to 0, the current
+personality of the submitting task is used.
+.PP
+Once the submission queue entry is initialized, I/O is submitted by
+placing the index of the submission queue entry into the tail of the
+submission queue.  After one or more indexes are added to the queue,
+and the queue tail is advanced, the
+.BR io_uring_enter (2)
+system call can be invoked to initiate the I/O.
+
+Completions use the following data structure:
+.PP
+.in +4n
+.EX
+/*
+ * IO completion data structure (Completion Queue Entry)
+ */
+struct io_uring_cqe {
+    __u64    user_data; /* sqe->data submission passed back */
+    __s32    res;       /* result code for this event */
+    __u32    flags;
+};
+.EE
+.in
+.PP
+.I user_data
+is copied from the field of the same name in the submission queue
+entry.  The primary use case is to store data that the application
+will need to access upon completion of this particular I/O.  The
+.I flags
+is reserved for future use.
+.I res
+is the operation-specific result, but io_uring-specific errors
+(e.g. flags or opcode invalid) are returned through this field.
+They are described in section
+.B CQE ERRORS.
+.PP
+For read and write opcodes, the
+return values match those documented in the
+.BR preadv2 (2)
+and
+.BR pwritev2 (2)
+man pages.
+Return codes for the io_uring-specific opcodes are documented in the
+description of the opcodes above.
+.PP
+.SH RETURN VALUE
+.BR io_uring_enter ()
+returns the number of I/Os successfully consumed.  This can be zero
+if
+.I to_submit
+was zero or if the submission queue was empty. Note that if the ring was
+created with
+.B IORING_SETUP_SQPOLL
+specified, then the return value will generally be the same as
+.I to_submit
+as submission happens outside the context of the system call.
+
+The errors related to a submission queue entry will be returned through a
+completion queue entry (see section
+.B CQE ERRORS),
+rather than through the system call itself.
+
+Errors that occur not on behalf of a submission queue entry are returned via the
+system call directly. On such an error, -1 is returned and
+.I errno
+is set appropriately.
+.PP
+.SH ERRORS
+These are the errors returned by
+.BR io_uring_enter ()
+system call.
+.TP
+.B EAGAIN
+The kernel was unable to allocate memory for the request, or otherwise ran out
+of resources to handle it. The application should wait for some completions and
+try again.
+.TP
+.B EBADF
+.I fd
+is not a valid file descriptor.
+.TP
+.B EBADFD
+.I fd
+is a valid file descriptor, but the io_uring ring is not in the right state
+(enabled). See
+.BR io_uring_register (2)
+for details on how to enable the ring.
+.TP
+.B EBUSY
+The application is attempting to overcommit the number of requests it can have
+pending. The application should wait for some completions and try again. May
+occur if the application tries to queue more requests than we have room for in
+the CQ ring, or if the application attempts to wait for more events without
+having reaped the ones already present in the CQ ring.
+.TP
+.B EINVAL
+Some bits in the
+.I flags
+argument are invalid.
+.TP
+.B EFAULT
+An invalid user space address was specified for the
+.I sig
+argument.
+.TP
+.B ENXIO
+The io_uring instance is in the process of being torn down.
+.TP
+.B EOPNOTSUPP
+.I fd
+does not refer to an io_uring instance.
+.TP
+.B EINTR
+The operation was interrupted by a delivery of a signal before it could
+complete; see
+.BR signal(7).
+Can happen while waiting for events with
+.B IORING_ENTER_GETEVENTS.
+
+.SH CQE ERRORS
+These io_uring-specific errors are returned as a negative value in the
+.I res
+field of the completion queue entry.
+.TP
+.B EACCES
+The
+.I flags
+field or
+.I opcode
+in a submission queue entry is not allowed due to registered restrictions.
+See
+.BR io_uring_register (2)
+for details on how restrictions work.
+.TP
+.B EBADF
+The
+.I fd
+field in the submission queue entry is invalid, or the
+.B IOSQE_FIXED_FILE
+flag was set in the submission queue entry, but no files were registered
+with the io_uring instance.
+.TP
+.B EFAULT
+buffer is outside of the process' accessible address space
+.TP
+.B EFAULT
+.B IORING_OP_READ_FIXED
+or
+.B IORING_OP_WRITE_FIXED
+was specified in the
+.I opcode
+field of the submission queue entry, but either buffers were not
+registered for this io_uring instance, or the address range described
+by
+.I addr
+and
+.I len
+does not fit within the buffer registered at
+.IR buf_index .
+.TP
+.B EINVAL
+The
+.I flags
+field or
+.I opcode
+in a submission queue entry is invalid.
+.TP
+.B EINVAL
+The
+.I buf_index
+member of the submission queue entry is invalid.
+.TP
+.B EINVAL
+The
+.I personality
+field in a submission queue entry is invalid.
+.TP
+.B EINVAL
+.B IORING_OP_NOP
+was specified in the submission queue entry, but the io_uring context
+was setup for polling
+.RB ( IORING_SETUP_IOPOLL
+was specified in the call to io_uring_setup).
+.TP
+.B EINVAL
+.B IORING_OP_READV
+or
+.B IORING_OP_WRITEV
+was specified in the submission queue entry, but the io_uring instance
+has fixed buffers registered.
+.TP
+.B EINVAL
+.B IORING_OP_READ_FIXED
+or
+.B IORING_OP_WRITE_FIXED
+was specified in the submission queue entry, and the
+.I buf_index
+is invalid.
+.TP
+.B EINVAL
+.BR IORING_OP_READV ,
+.BR IORING_OP_WRITEV ,
+.BR IORING_OP_READ_FIXED ,
+.B IORING_OP_WRITE_FIXED
+or
+.B IORING_OP_FSYNC
+was specified in the submission queue entry, but the io_uring instance
+was configured for IOPOLLing, or any of
+.IR addr ,
+.IR ioprio ,
+.IR off ,
+.IR len ,
+or
+.I buf_index
+was set in the submission queue entry.
+.TP
+.B EINVAL
+.B IORING_OP_POLL_ADD
+or
+.B IORING_OP_POLL_REMOVE
+was specified in the
+.I opcode
+field of the submission queue entry, but the io_uring instance was
+configured for busy-wait polling
+.RB ( IORING_SETUP_IOPOLL ),
+or any of
+.IR ioprio ,
+.IR off ,
+.IR len ,
+or
+.I buf_index
+was non-zero in the submission queue entry.
+.TP
+.B EINVAL
+.B IORING_OP_POLL_ADD
+was specified in the
+.I opcode
+field of the submission queue entry, and the
+.I addr
+field was non-zero.
+.TP
+.B EOPNOTSUPP
+.I opcode
+is valid, but not supported by this kernel.
+.TP
+.B EOPNOTSUPP
+.B IOSQE_BUFFER_SELECT
+was set in the
+.I flags
+field of the submission queue entry, but the
+.I opcode
+doesn't support buffer selection.
diff --git a/man/io_uring_get_sqe.3 b/man/io_uring_get_sqe.3
new file mode 100644
index 0000000..24834f3
--- /dev/null
+++ b/man/io_uring_get_sqe.3
@@ -0,0 +1,37 @@
+.\" Copyright (C) 2020 Jens Axboe <axboe@kernel.dk>
+.\" Copyright (C) 2020 Red Hat, Inc.
+.\"
+.\" SPDX-License-Identifier: LGPL-2.0-or-later
+.\"
+.TH io_uring_get_sqe 3 "July 10, 2020" "liburing-0.7" "liburing Manual"
+.SH NAME
+io_uring_get_sqe - get the next vacant event from the submission queue
+.SH SYNOPSIS
+.nf
+.BR "#include <liburing.h>"
+.PP
+.BI "struct io_uring_sqe *io_uring_get_sqe(struct io_uring " *ring );
+.fi
+.PP
+.SH DESCRIPTION
+.PP
+The io_uring_get_sqe() function gets the next vacant event from the submission
+queue belonging to the
+.I ring
+param.
+
+On success io_uring_get_sqe() returns a pointer to the submission queue event.
+On failure NULL is returned.
+
+If a submission queue event is returned, it should be filled out via one of the
+prep functions such as
+.BR io_uring_prep_read (3)
+and submitted via
+.BR io_uring_submit (3).
+
+.SH RETURN VALUE
+.BR io_uring_get_sqe (3)
+returns a pointer to the next submission queue event on success and NULL on
+failure.
+.SH SEE ALSO
+.BR io_uring_submit (3)
diff --git a/man/io_uring_queue_exit.3 b/man/io_uring_queue_exit.3
new file mode 100644
index 0000000..294b5f3
--- /dev/null
+++ b/man/io_uring_queue_exit.3
@@ -0,0 +1,27 @@
+.\" Copyright (C) 2020 Jens Axboe <axboe@kernel.dk>
+.\" Copyright (C) 2020 Red Hat, Inc.
+.\"
+.\" SPDX-License-Identifier: LGPL-2.0-or-later
+.\"
+.TH io_uring_queue_exit 3 "July 10, 2020" "liburing-0.7" "liburing Manual"
+.SH NAME
+io_uring_queue_exit - tear down io_uring submission and completion queues
+.SH SYNOPSIS
+.nf
+.BR "#include <liburing.h>"
+.PP
+.BI "void io_uring_queue_exit(struct io_uring * ring );"
+.fi
+.PP
+.SH DESCRIPTION
+.PP
+.BR io_uring_queue_exit (3)
+will release all resources acquired and initialized by
+.BR io_uring_queue_init (3).
+It first unmaps the memory shared between the application and the kernel and then closes the io_uring file descriptor.
+.SH RETURN VALUE
+None
+.SH SEE ALSO
+.BR io_uring_setup (2),
+.BR mmap (2),
+.BR io_uring_queue_init (3)
diff --git a/man/io_uring_queue_init.3 b/man/io_uring_queue_init.3
new file mode 100644
index 0000000..1980fa4
--- /dev/null
+++ b/man/io_uring_queue_init.3
@@ -0,0 +1,44 @@
+.\" Copyright (C) 2020 Jens Axboe <axboe@kernel.dk>
+.\" Copyright (C) 2020 Red Hat, Inc.
+.\"
+.\" SPDX-License-Identifier: LGPL-2.0-or-later
+.\"
+.TH io_uring_queue_init 3 "July 10, 2020" "liburing-0.7" "liburing Manual"
+.SH NAME
+io_uring_queue_init - setup io_uring submission and completion queues
+.SH SYNOPSIS
+.nf
+.BR "#include <liburing.h>"
+.PP
+.BI "int io_uring_queue_init(unsigned " entries ", struct io_uring *" ring ,
+.BI "                        unsigned " flags );
+.fi
+.PP
+.SH DESCRIPTION
+.PP
+The io_uring_queue_init() function executes the io_uring_setup syscall to
+initialize the submission and completion queues in the kernel with at least
+.I entries
+entries and then maps the resulting file descriptor to memory shared between the
+application and the kernel.
+
+On success io_uring_queue_init() returns 0 and
+.I ring
+will point to the shared memory containing the io_uring queues. On failure
+-errno is returned.
+
+.I flags
+will be passed through to the io_uring_setup syscall (see 
+.BR io_uring_setup (2)).
+
+On success, the resources held by
+.I ring
+should be released via a corresponding call to
+.BR io_uring_queue_exit (3).
+.SH RETURN VALUE
+.BR io_uring_queue_init (3)
+returns 0 on success and -errno on failure.
+.SH SEE ALSO
+.BR io_uring_setup (2),
+.BR mmap (2),
+.BR io_uring_queue_exit (3)
diff --git a/man/io_uring_register.2 b/man/io_uring_register.2
new file mode 100644
index 0000000..5326a87
--- /dev/null
+++ b/man/io_uring_register.2
@@ -0,0 +1,405 @@
+.\" Copyright (C) 2019 Jens Axboe <axboe@kernel.dk>
+.\" Copyright (C) 2019 Red Hat, Inc.
+.\"
+.\" SPDX-License-Identifier: LGPL-2.0-or-later
+.\"
+.TH IO_URING_REGISTER 2 2019-01-17 "Linux" "Linux Programmer's Manual"
+.SH NAME
+io_uring_register \- register files or user buffers for asynchronous I/O 
+.SH SYNOPSIS
+.nf
+.BR "#include <linux/io_uring.h>"
+.PP
+.BI "int io_uring_register(unsigned int " fd ", unsigned int " opcode ,
+.BI "                      void *" arg ", unsigned int " nr_args );
+.fi
+.PP
+.SH DESCRIPTION
+.PP
+
+The
+.BR io_uring_register ()
+system call registers resources (e.g. user buffers, files, eventfd,
+personality, restrictions) for use in an
+.BR io_uring (7)
+instance referenced by
+.IR fd .
+Registering files or user buffers allows the kernel to take long term
+references to internal data structures or create long term mappings of
+application memory, greatly reducing per-I/O overhead.
+
+.I fd
+is the file descriptor returned by a call to
+.BR io_uring_setup (2).
+.I opcode
+can be one of:
+
+.TP
+.B IORING_REGISTER_BUFFERS
+.I arg
+points to a
+.I struct iovec
+array of
+.I nr_args
+entries.  The buffers associated with the iovecs will be locked in
+memory and charged against the user's
+.B RLIMIT_MEMLOCK
+resource limit.  See
+.BR getrlimit (2)
+for more information.  Additionally, there is a size limit of 1GiB per
+buffer.  Currently, the buffers must be anonymous, non-file-backed
+memory, such as that returned by
+.BR malloc (3)
+or
+.BR mmap (2)
+with the
+.B MAP_ANONYMOUS
+flag set.  It is expected that this limitation will be lifted in the
+future. Huge pages are supported as well. Note that the entire huge
+page will be pinned in the kernel, even if only a portion of it is
+used.
+
+After a successful call, the supplied buffers are mapped into the
+kernel and eligible for I/O.  To make use of them, the application
+must specify the
+.B IORING_OP_READ_FIXED
+or
+.B IORING_OP_WRITE_FIXED
+opcodes in the submission queue entry (see the
+.I struct io_uring_sqe
+definition in
+.BR io_uring_enter (2)),
+and set the
+.I buf_index
+field to the desired buffer index.  The memory range described by the
+submission queue entry's
+.I addr
+and
+.I len
+fields must fall within the indexed buffer.
+
+It is perfectly valid to setup a large buffer and then only use part
+of it for an I/O, as long as the range is within the originally mapped
+region.
+
+An application can increase or decrease the size or number of
+registered buffers by first unregistering the existing buffers, and
+then issuing a new call to
+.BR io_uring_register ()
+with the new buffers.
+
+Note that registering buffers will wait for the ring to idle. If the application
+currently has requests in-flight, the registration will wait for those to
+finish before proceeding.
+
+An application need not unregister buffers explicitly before shutting
+down the io_uring instance. Available since 5.1.
+
+.TP
+.B IORING_UNREGISTER_BUFFERS
+This operation takes no argument, and
+.I arg
+must be passed as NULL.  All previously registered buffers associated
+with the io_uring instance will be released. Available since 5.1.
+
+.TP
+.B IORING_REGISTER_FILES
+Register files for I/O.
+.I arg
+contains a pointer to an array of
+.I nr_args
+file descriptors (signed 32 bit integers).
+
+To make use of the registered files, the
+.B IOSQE_FIXED_FILE
+flag must be set in the
+.I flags
+member of the
+.IR "struct io_uring_sqe" ,
+and the
+.I fd
+member is set to the index of the file in the file descriptor array.
+
+The file set may be sparse, meaning that the
+.B fd
+field in the array may be set to
+.B -1.
+See
+.B IORING_REGISTER_FILES_UPDATE
+for how to update files in place.
+
+Note that registering files will wait for the ring to idle. If the application
+currently has requests in-flight, the registration will wait for those to
+finish before proceeding. See
+.B IORING_REGISTER_FILES_UPDATE
+for how to update an existing set without that limitation.
+
+Files are automatically unregistered when the io_uring instance is
+torn down. An application need only unregister if it wishes to
+register a new set of fds. Available since 5.1.
+
+.TP
+.B IORING_REGISTER_FILES_UPDATE
+This operation replaces existing files in the registered file set with new
+ones, either turning a sparse entry (one where fd is equal to -1) into a
+real one, removing an existing entry (new one is set to -1), or replacing
+an existing entry with a new existing entry.
+
+.I arg
+must contain a pointer to a struct io_uring_files_update, which contains
+an offset on which to start the update, and an array of file descriptors to
+use for the update.
+.I nr_args
+must contain the number of descriptors in the passed in array. Available
+since 5.5.
+
+File descriptors can be skipped if they are set to
+.B IORING_REGISTER_FILES_SKIP.
+Skipping an fd will not touch the file associated with the previous
+fd at that index. Available since 5.12.
+
+
+.TP
+.B IORING_UNREGISTER_FILES
+This operation requires no argument, and
+.I arg
+must be passed as NULL.  All previously registered files associated
+with the io_uring instance will be unregistered. Available since 5.1.
+
+.TP
+.B IORING_REGISTER_EVENTFD
+It's possible to use eventfd(2) to get notified of completion events on an
+io_uring instance. If this is desired, an eventfd file descriptor can be
+registered through this operation.
+.I arg
+must contain a pointer to the eventfd file descriptor, and
+.I nr_args
+must be 1. Available since 5.2.
+
+An application can temporarily disable notifications, coming through the
+registered eventfd, by setting the
+.B IORING_CQ_EVENTFD_DISABLED
+bit in the
+.I flags
+field of the CQ ring.
+Available since 5.8.
+
+.TP
+.B IORING_REGISTER_EVENTFD_ASYNC
+This works just like
+.B IORING_REGISTER_EVENTFD
+, except notifications are only posted for events that complete in an async
+manner. This means that events that complete inline while being submitted
+do not trigger a notification event. The arguments supplied are the same as
+for
+.B IORING_REGISTER_EVENTFD.
+Available since 5.6.
+
+.TP
+.B IORING_UNREGISTER_EVENTFD
+Unregister an eventfd file descriptor to stop notifications. Since only one
+eventfd descriptor is currently supported, this operation takes no argument,
+and
+.I arg
+must be passed as NULL and
+.I nr_args
+must be zero. Available since 5.2.
+
+.TP
+.B IORING_REGISTER_PROBE
+This operation returns a structure, io_uring_probe, which contains information
+about the opcodes supported by io_uring on the running kernel.
+.I arg
+must contain a pointer to a struct io_uring_probe, and
+.I nr_args
+must contain the size of the ops array in that probe struct. The ops array
+is of the type io_uring_probe_op, which holds the value of the opcode and
+a flags field. If the flags field has
+.B IO_URING_OP_SUPPORTED
+set, then this opcode is supported on the running kernel. Available since 5.6.
+
+.TP
+.B IORING_REGISTER_PERSONALITY
+This operation registers credentials of the running application with io_uring,
+and returns an id associated with these credentials. Applications wishing to
+share a ring between separate users/processes can pass in this credential id
+in the sqe
+.B personality
+field. If set, that particular sqe will be issued with these credentials. Must
+be invoked with
+.I arg
+set to NULL and
+.I nr_args
+set to zero. Available since 5.6.
+
+.TP
+.B IORING_UNREGISTER_PERSONALITY
+This operation unregisters a previously registered personality with io_uring.
+.I nr_args
+must be set to the id in question, and
+.I arg
+must be set to NULL. Available since 5.6.
+
+.TP
+.B IORING_REGISTER_ENABLE_RINGS
+This operation enables an io_uring ring started in a disabled state
+.RB (IORING_SETUP_R_DISABLED
+was specified in the call to
+.BR io_uring_setup (2)).
+While the io_uring ring is disabled, submissions are not allowed and
+registrations are not restricted.
+
+After the execution of this operation, the io_uring ring is enabled:
+submissions and registration are allowed, but they will
+be validated following the registered restrictions (if any).
+This operation takes no argument, must be invoked with
+.I arg
+set to NULL and
+.I nr_args
+set to zero. Available since 5.10.
+
+.TP
+.B IORING_REGISTER_RESTRICTIONS
+.I arg
+points to a
+.I struct io_uring_restriction
+array of
+.I nr_args
+entries.
+
+With an entry it is possible to allow an
+.BR io_uring_register ()
+.I opcode,
+or specify which
+.I opcode
+and
+.I flags
+of the submission queue entry are allowed,
+or require certain
+.I flags
+to be specified (these flags must be set on each submission queue entry).
+
+All the restrictions must be submitted with a single
+.BR io_uring_register ()
+call and they are handled as an allowlist (opcodes and flags not registered,
+are not allowed).
+
+Restrictions can be registered only if the io_uring ring started in a disabled
+state
+.RB (IORING_SETUP_R_DISABLED
+must be specified in the call to
+.BR io_uring_setup (2)).
+
+Available since 5.10.
+
+.SH RETURN VALUE
+
+On success,
+.BR io_uring_register ()
+returns 0.  On error, -1 is returned, and
+.I errno
+is set accordingly.
+
+.SH ERRORS
+.TP
+.B EACCES
+The
+.I opcode
+field is not allowed due to registered restrictions.
+.TP
+.B EBADF
+One or more fds in the
+.I fd
+array are invalid.
+.TP
+.B EBADFD
+.B IORING_REGISTER_ENABLE_RINGS
+or
+.B IORING_REGISTER_RESTRICTIONS
+was specified, but the io_uring ring is not disabled.
+.TP
+.B EBUSY
+.B IORING_REGISTER_BUFFERS
+or
+.B IORING_REGISTER_FILES
+or
+.B IORING_REGISTER_RESTRICTIONS
+was specified, but there were already buffers, files, or restrictions
+registered.
+.TP
+.B EFAULT
+buffer is outside of the process' accessible address space, or
+.I iov_len
+is greater than 1GiB.
+.TP
+.B EINVAL
+.B IORING_REGISTER_BUFFERS
+or
+.B IORING_REGISTER_FILES
+was specified, but
+.I nr_args
+is 0.
+.TP
+.B EINVAL
+.B IORING_REGISTER_BUFFERS
+was specified, but
+.I nr_args
+exceeds
+.B UIO_MAXIOV
+.TP
+.B EINVAL
+.B IORING_UNREGISTER_BUFFERS
+or
+.B IORING_UNREGISTER_FILES
+was specified, and
+.I nr_args
+is non-zero or
+.I arg
+is non-NULL.
+.TP
+.B EINVAL
+.B IORING_REGISTER_RESTRICTIONS
+was specified, but
+.I nr_args
+exceeds the maximum allowed number of restrictions or restriction
+.I opcode
+is invalid.
+.TP
+.B EMFILE
+.B IORING_REGISTER_FILES
+was specified and
+.I nr_args
+exceeds the maximum allowed number of files in a fixed file set.
+.TP
+.B EMFILE
+.B IORING_REGISTER_FILES
+was specified and adding
+.I nr_args
+file references would exceed the maximum allowed number of files the user
+is allowed to have according to the
+.B
+RLIMIT_NOFILE
+resource limit and the caller does not have
+.B CAP_SYS_RESOURCE
+capability. Note that this is a per user limit, not per process.
+.TP
+.B ENOMEM
+Insufficient kernel resources are available, or the caller had a
+non-zero
+.B RLIMIT_MEMLOCK
+soft resource limit, but tried to lock more memory than the limit
+permitted.  This limit is not enforced if the process is privileged
+.RB ( CAP_IPC_LOCK ).
+.TP
+.B ENXIO
+.B IORING_UNREGISTER_BUFFERS
+or
+.B IORING_UNREGISTER_FILES
+was specified, but there were no buffers or files registered.
+.TP
+.B ENXIO
+Attempt to register files or buffers on an io_uring instance that is already
+undergoing file or buffer registration, or is being torn down.
+.TP
+.B EOPNOTSUPP
+User buffers point to file-backed memory.
diff --git a/man/io_uring_setup.2 b/man/io_uring_setup.2
new file mode 100644
index 0000000..cb8eba9
--- /dev/null
+++ b/man/io_uring_setup.2
@@ -0,0 +1,471 @@
+.\" Copyright (C) 2019 Jens Axboe <axboe@kernel.dk>
+.\" Copyright (C) 2019 Jon Corbet <corbet@lwn.net>
+.\" Copyright (C) 2019 Red Hat, Inc.
+.\"
+.\" SPDX-License-Identifier: LGPL-2.0-or-later
+.\"
+.TH IO_URING_SETUP 2 2019-01-29 "Linux" "Linux Programmer's Manual"
+.SH NAME
+io_uring_setup \- setup a context for performing asynchronous I/O
+.SH SYNOPSIS
+.nf
+.BR "#include <linux/io_uring.h>"
+.PP
+.BI "int io_uring_setup(u32 " entries ", struct io_uring_params *" p );
+.fi
+.PP
+.SH DESCRIPTION
+.PP
+The io_uring_setup() system call sets up a submission queue (SQ) and
+completion queue (CQ) with at least
+.I entries
+entries, and returns a file descriptor which can be used to perform
+subsequent operations on the io_uring instance.  The submission and
+completion queues are shared between userspace and the kernel, which
+eliminates the need to copy data when initiating and completing I/O.
+
+.I params
+is used by the application to pass options to the kernel, and by the
+kernel to convey information about the ring buffers.
+.PP
+.in +4n
+.EX
+struct io_uring_params {
+    __u32 sq_entries;
+    __u32 cq_entries;
+    __u32 flags;
+    __u32 sq_thread_cpu;
+    __u32 sq_thread_idle;
+    __u32 features;
+    __u32 resv[4];
+    struct io_sqring_offsets sq_off;
+    struct io_cqring_offsets cq_off;
+};
+.EE
+.in
+.PP
+The
+.IR flags ,
+.IR sq_thread_cpu ,
+and
+.I sq_thread_idle
+fields are used to configure the io_uring instance.
+.I flags
+is a bit mask of 0 or more of the following values ORed
+together:
+.TP
+.B IORING_SETUP_IOPOLL
+Perform busy-waiting for an I/O completion, as opposed to getting
+notifications via an asynchronous IRQ (Interrupt Request).  The file
+system (if any) and block device must support polling in order for
+this to work.  Busy-waiting provides lower latency, but may consume
+more CPU resources than interrupt driven I/O.  Currently, this feature
+is usable only on a file descriptor opened using the
+.B O_DIRECT
+flag.  When a read or write is submitted to a polled context, the
+application must poll for completions on the CQ ring by calling
+.BR io_uring_enter (2).
+It is illegal to mix and match polled and non-polled I/O on an io_uring
+instance.
+
+.TP
+.B IORING_SETUP_SQPOLL
+When this flag is specified, a kernel thread is created to perform
+submission queue polling.  An io_uring instance configured in this way
+enables an application to issue I/O without ever context switching
+into the kernel.  By using the submission queue to fill in new
+submission queue entries and watching for completions on the
+completion queue, the application can submit and reap I/Os without
+doing a single system call.
+
+If the kernel thread is idle for more than
+.I sq_thread_idle
+milliseconds, it will set the
+.B IORING_SQ_NEED_WAKEUP
+bit in the
+.I flags
+field of the
+.IR "struct io_sq_ring" .
+When this happens, the application must call
+.BR io_uring_enter (2)
+to wake the kernel thread.  If I/O is kept busy, the kernel thread
+will never sleep.  An application making use of this feature will need
+to guard the
+.BR io_uring_enter (2)
+call with the following code sequence:
+
+.in +4n
+.EX
+/*
+ * Ensure that the wakeup flag is read after the tail pointer
+ * has been written. It's important to use memory load acquire
+ * semantics for the flags read, as otherwise the application
+ * and the kernel might not agree on the consistency of the
+ * wakeup flag.
+ */
+unsigned flags = atomic_load_relaxed(sq_ring->flags);
+if (flags & IORING_SQ_NEED_WAKEUP)
+    io_uring_enter(fd, 0, 0, IORING_ENTER_SQ_WAKEUP);
+.EE
+.in
+
+where
+.I sq_ring
+is a submission queue ring setup using the
+.I struct io_sqring_offsets
+described below.
+.TP
+.BR
+Before version 5.11 of the Linux kernel, to successfully use this feature, the
+application must register a set of files to be used for IO through
+.BR io_uring_register (2)
+using the
+.B IORING_REGISTER_FILES
+opcode. Failure to do so will result in submitted IO being errored with
+.B EBADF.
+The presence of this feature can be detected by the
+.B IORING_FEAT_SQPOLL_NONFIXED
+feature flag.
+In version 5.11 and later, it is no longer necessary to register files to use
+this feature. 5.11 also allows using this as non-root, if the user has the
+.B CAP_SYS_NICE
+capability.
+.TP
+.B IORING_SETUP_SQ_AFF
+If this flag is specified, then the poll thread will be bound to the
+cpu set in the
+.I sq_thread_cpu
+field of the
+.IR "struct io_uring_params" .
+This flag is only meaningful when
+.B IORING_SETUP_SQPOLL
+is specified. When cgroup setting
+.I cpuset.cpus
+changes (typically in container environment), the bounded cpu set may be
+changed as well.
+.TP
+.B IORING_SETUP_CQSIZE
+Create the completion queue with
+.IR "struct io_uring_params.cq_entries"
+entries.  The value must be greater than
+.IR entries ,
+and may be rounded up to the next power-of-two.
+.TP
+.B IORING_SETUP_CLAMP
+If this flag is specified, and if
+.IR entries
+exceeds
+.B IORING_MAX_ENTRIES ,
+then
+.IR entries
+will be clamped at
+.B IORING_MAX_ENTRIES .
+If the flag
+.BR IORING_SETUP_SQPOLL
+is set, and if the value of
+.IR "struct io_uring_params.cq_entries"
+exceeds
+.B IORING_MAX_CQ_ENTRIES ,
+then it will be clamped at
+.B IORING_MAX_CQ_ENTRIES .
+.TP
+.B IORING_SETUP_ATTACH_WQ
+This flag should be set in conjunction with 
+.IR "struct io_uring_params.wq_fd"
+being set to an existing io_uring ring file descriptor. When set, the
+io_uring instance being created will share the asynchronous worker
+thread backend of the specified io_uring ring, rather than create a new
+separate thread pool.
+.TP
+.B IORING_SETUP_R_DISABLED
+If this flag is specified, the io_uring ring starts in a disabled state.
+In this state, restrictions can be registered, but submissions are not allowed.
+See
+.BR io_uring_register (2)
+for details on how to enable the ring. Available since 5.10.
+.PP
+If no flags are specified, the io_uring instance is setup for
+interrupt driven I/O.  I/O may be submitted using
+.BR io_uring_enter (2)
+and can be reaped by polling the completion queue.
+
+The
+.I resv
+array must be initialized to zero.
+
+.I features
+is filled in by the kernel, which specifies various features supported
+by current kernel version.
+.TP
+.B IORING_FEAT_SINGLE_MMAP
+If this flag is set, the two SQ and CQ rings can be mapped with a single
+.I mmap(2)
+call. The SQEs must still be allocated separately. This brings the necessary
+.I mmap(2)
+calls down from three to two.
+.TP
+.B IORING_FEAT_NODROP
+If this flag is set, io_uring supports never dropping completion events.
+If a completion event occurs and the CQ ring is full, the kernel stores
+the event internally until such a time that the CQ ring has room for more
+entries. If this overflow condition is entered, attempting to submit more
+IO with fail with the
+.B -EBUSY
+error value, if it can't flush the overflown events to the CQ ring. If this
+happens, the application must reap events from the CQ ring and attempt the
+submit again.
+.TP
+.B IORING_FEAT_SUBMIT_STABLE
+If this flag is set, applications can be certain that any data for
+async offload has been consumed when the kernel has consumed the SQE.
+.TP
+.B IORING_FEAT_RW_CUR_POS
+If this flag is set, applications can specify
+.I offset
+== -1 with
+.B IORING_OP_{READV,WRITEV}
+,
+.B IORING_OP_{READ,WRITE}_FIXED
+, and
+.B IORING_OP_{READ,WRITE}
+to mean current file position, which behaves like
+.I preadv2(2)
+and
+.I pwritev2(2)
+with
+.I offset
+== -1. It'll use (and update) the current file position. This obviously comes
+with the caveat that if the application has multiple reads or writes in flight,
+then the end result will not be as expected. This is similar to threads sharing
+a file descriptor and doing IO using the current file position.
+.TP
+.B IORING_FEAT_CUR_PERSONALITY
+If this flag is set, then io_uring guarantees that both sync and async
+execution of a request assumes the credentials of the task that called
+.I
+io_uring_enter(2)
+to queue the requests. If this flag isn't set, then requests are issued with
+the credentials of the task that originally registered the io_uring. If only
+one task is using a ring, then this flag doesn't matter as the credentials
+will always be the same. Note that this is the default behavior, tasks can
+still register different personalities through
+.I
+io_uring_register(2)
+with
+.B IORING_REGISTER_PERSONALITY
+and specify the personality to use in the sqe.
+.TP
+.B IORING_FEAT_FAST_POLL
+If this flag is set, then io_uring supports using an internal poll mechanism
+to drive data/space readiness. This means that requests that cannot read or
+write data to a file no longer need to be punted to an async thread for
+handling, instead they will begin operation when the file is ready. This is
+similar to doing poll + read/write in userspace, but eliminates the need to do
+so. If this flag is set, requests waiting on space/data consume a lot less
+resources doing so as they are not blocking a thread.
+.TP
+.B IORING_FEAT_POLL_32BITS
+If this flag is set, the
+.B IORING_OP_POLL_ADD
+command accepts the full 32-bit range of epoll based flags. Most notably
+.B EPOLLEXCLUSIVE
+which allows exclusive (waking single waiters) behavior.
+.TP
+.B IORING_FEAT_SQPOLL_NONFIXED
+If this flag is set, the
+.B IORING_SETUP_SQPOLL
+feature no longer requires the use of fixed files. Any normal file descriptor
+can be used for IO commands without needing registration.
+
+.PP
+The rest of the fields in the
+.I struct io_uring_params
+are filled in by the kernel, and provide the information necessary to
+memory map the submission queue, completion queue, and the array of
+submission queue entries.
+.I sq_entries
+specifies the number of submission queue entries allocated.
+.I sq_off
+describes the offsets of various ring buffer fields:
+.PP
+.in +4n
+.EX
+struct io_sqring_offsets {
+    __u32 head;
+    __u32 tail;
+    __u32 ring_mask;
+    __u32 ring_entries;
+    __u32 flags;
+    __u32 dropped;
+    __u32 array;
+    __u32 resv[3];
+};
+.EE
+.in
+.PP
+Taken together,
+.I sq_entries
+and
+.I sq_off
+provide all of the information necessary for accessing the submission
+queue ring buffer and the submission queue entry array.  The
+submission queue can be mapped with a call like:
+.PP
+.in +4n
+.EX
+ptr = mmap(0, sq_off.array + sq_entries * sizeof(__u32),
+           PROT_READ|PROT_WRITE, MAP_SHARED|MAP_POPULATE,
+           ring_fd, IORING_OFF_SQ_RING);
+.EE
+.in
+.PP
+where
+.I sq_off
+is the
+.I io_sqring_offsets
+structure, and
+.I ring_fd
+is the file descriptor returned from
+.BR io_uring_setup (2).
+The addition of
+.I sq_off.array
+to the length of the region accounts for the fact that the ring
+located at the end of the data structure.  As an example, the ring
+buffer head pointer can be accessed by adding
+.I sq_off.head
+to the address returned from
+.BR mmap (2):
+.PP
+.in +4n
+.EX
+head = ptr + sq_off.head;
+.EE
+.in
+
+The
+.I flags
+field is used by the kernel to communicate state information to the
+application.  Currently, it is used to inform the application when a
+call to
+.BR io_uring_enter (2)
+is necessary.  See the documentation for the
+.B IORING_SETUP_SQPOLL
+flag above.
+The
+.I dropped
+member is incremented for each invalid submission queue entry
+encountered in the ring buffer.
+
+The head and tail track the ring buffer state.  The tail is
+incremented by the application when submitting new I/O, and the head
+is incremented by the kernel when the I/O has been successfully
+submitted.  Determining the index of the head or tail into the ring is
+accomplished by applying a mask:
+.PP
+.in +4n
+.EX
+index = tail & ring_mask;
+.EE
+.in
+.PP
+The array of submission queue entries is mapped with:
+.PP
+.in +4n
+.EX
+sqentries = mmap(0, sq_entries * sizeof(struct io_uring_sqe),
+                 PROT_READ|PROT_WRITE, MAP_SHARED|MAP_POPULATE,
+                 ring_fd, IORING_OFF_SQES);
+.EE
+.in
+.PP
+The completion queue is described by
+.I cq_entries
+and
+.I cq_off
+shown here:
+.PP
+.in +4n
+.EX
+struct io_cqring_offsets {
+    __u32 head;
+    __u32 tail;
+    __u32 ring_mask;
+    __u32 ring_entries;
+    __u32 overflow;
+    __u32 cqes;
+    __u32 flags;
+    __u32 resv[3];
+};
+.EE
+.in
+.PP
+The completion queue is simpler, since the entries are not separated
+from the queue itself, and can be mapped with:
+.PP
+.in +4n
+.EX
+ptr = mmap(0, cq_off.cqes + cq_entries * sizeof(struct io_uring_cqe),
+           PROT_READ|PROT_WRITE, MAP_SHARED|MAP_POPULATE, ring_fd,
+           IORING_OFF_CQ_RING);
+.EE
+.in
+.PP
+Closing the file descriptor returned by
+.BR io_uring_setup (2)
+will free all resources associated with the io_uring context.
+.PP
+.SH RETURN VALUE
+.BR io_uring_setup (2)
+returns a new file descriptor on success.  The application may then
+provide the file descriptor in a subsequent
+.BR mmap (2)
+call to map the submission and completion queues, or to the
+.BR io_uring_register (2)
+or
+.BR io_uring_enter (2)
+system calls.
+
+On error, -1 is returned and
+.I errno
+is set appropriately.
+.PP
+.SH ERRORS
+.TP
+.B EFAULT
+params is outside your accessible address space.
+.TP
+.B EINVAL
+The resv array contains non-zero data, p.flags contains an unsupported
+flag,
+.I entries
+is out of bounds,
+.B IORING_SETUP_SQ_AFF
+was specified, but
+.B IORING_SETUP_SQPOLL
+was not, or
+.B IORING_SETUP_CQSIZE
+was specified, but
+.I io_uring_params.cq_entries
+was invalid.
+.TP
+.B EMFILE
+The per-process limit on the number of open file descriptors has been
+reached (see the description of
+.B RLIMIT_NOFILE
+in
+.BR getrlimit (2)).
+.TP
+.B ENFILE
+The system-wide limit on the total number of open files has been
+reached.
+.TP
+.B ENOMEM
+Insufficient kernel resources are available.
+.TP
+.B EPERM
+.B IORING_SETUP_SQPOLL
+was specified, but the effective user ID of the caller did not have sufficient
+privileges.
+.SH SEE ALSO
+.BR io_uring_register (2),
+.BR io_uring_enter (2)
diff --git a/src/Makefile b/src/Makefile
new file mode 100644
index 0000000..dfca826
--- /dev/null
+++ b/src/Makefile
@@ -0,0 +1,74 @@
+prefix ?= /usr
+includedir ?= $(prefix)/include
+libdir ?= $(prefix)/lib
+libdevdir ?= $(prefix)/lib
+
+CPPFLAGS ?=
+override CPPFLAGS += -Iinclude/ -include ../config-host.h
+CFLAGS ?= -g -fomit-frame-pointer -O2
+override CFLAGS += -Wall -Wextra -Wno-unused-parameter -Wno-sign-compare
+SO_CFLAGS=-fPIC $(CFLAGS)
+L_CFLAGS=$(CFLAGS)
+LINK_FLAGS=
+LINK_FLAGS+=$(LDFLAGS)
+ENABLE_SHARED ?= 1
+
+soname=liburing.so.2
+minor=0
+micro=0
+libname=$(soname).$(minor).$(micro)
+all_targets += liburing.a
+
+ifeq ($(ENABLE_SHARED),1)
+all_targets += $(libname)
+endif
+
+include ../Makefile.quiet
+
+ifneq ($(MAKECMDGOALS),clean)
+include ../config-host.mak
+endif
+
+all: $(all_targets)
+
+liburing_srcs := setup.c queue.c syscall.c register.c
+
+liburing_objs := $(patsubst %.c,%.ol,$(liburing_srcs))
+liburing_sobjs := $(patsubst %.c,%.os,$(liburing_srcs))
+
+$(liburing_objs) $(liburing_sobjs): include/liburing/io_uring.h
+
+%.os: %.c
+	$(QUIET_CC)$(CC) $(CPPFLAGS) $(SO_CFLAGS) -c -o $@ $<
+
+%.ol: %.c
+	$(QUIET_CC)$(CC) $(CPPFLAGS) $(L_CFLAGS) -c -o $@ $<
+
+AR ?= ar
+RANLIB ?= ranlib
+liburing.a: $(liburing_objs)
+	@rm -f liburing.a
+	$(QUIET_AR)$(AR) r liburing.a $^
+	$(QUIET_RANLIB)$(RANLIB) liburing.a
+
+$(libname): $(liburing_sobjs) liburing.map
+	$(QUIET_CC)$(CC) $(SO_CFLAGS) -shared -Wl,--version-script=liburing.map -Wl,-soname=$(soname) -o $@ $(liburing_sobjs) $(LINK_FLAGS)
+
+install: $(all_targets)
+	install -D -m 644 include/liburing/io_uring.h $(includedir)/liburing/io_uring.h
+	install -D -m 644 include/liburing.h $(includedir)/liburing.h
+	install -D -m 644 include/liburing/compat.h $(includedir)/liburing/compat.h
+	install -D -m 644 include/liburing/barrier.h $(includedir)/liburing/barrier.h
+	install -D -m 644 liburing.a $(libdevdir)/liburing.a
+ifeq ($(ENABLE_SHARED),1)
+	install -D -m 755 $(libname) $(libdir)/$(libname)
+	ln -sf $(libname) $(libdir)/$(soname)
+	ln -sf $(relativelibdir)$(libname) $(libdevdir)/liburing.so
+endif
+
+$(liburing_objs): include/liburing.h
+
+clean:
+	@rm -f $(all_targets) $(liburing_objs) $(liburing_sobjs) $(soname).new
+	@rm -f *.so* *.a *.o
+	@rm -f include/liburing/compat.h
diff --git a/src/include/liburing.h b/src/include/liburing.h
new file mode 100644
index 0000000..51dc602
--- /dev/null
+++ b/src/include/liburing.h
@@ -0,0 +1,700 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef LIB_URING_H
+#define LIB_URING_H
+
+#ifndef _XOPEN_SOURCE
+#define _XOPEN_SOURCE 500 /* Required for glibc to expose sigset_t */
+#endif
+
+#include <sys/socket.h>
+#include <sys/uio.h>
+#include <sys/stat.h>
+#include <errno.h>
+#include <signal.h>
+#include <stdbool.h>
+#include <inttypes.h>
+#include <time.h>
+#include <linux/swab.h>
+#include "liburing/compat.h"
+#include "liburing/io_uring.h"
+#include "liburing/barrier.h"
+
+#ifndef uring_unlikely
+#  define uring_unlikely(cond)      __builtin_expect(!!(cond), 0)
+#endif
+
+#ifndef uring_likely
+#  define uring_likely(cond)        __builtin_expect(!!(cond), 1)
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Library interface to io_uring
+ */
+struct io_uring_sq {
+	unsigned *khead;
+	unsigned *ktail;
+	unsigned *kring_mask;
+	unsigned *kring_entries;
+	unsigned *kflags;
+	unsigned *kdropped;
+	unsigned *array;
+	struct io_uring_sqe *sqes;
+
+	unsigned sqe_head;
+	unsigned sqe_tail;
+
+	size_t ring_sz;
+	void *ring_ptr;
+
+	unsigned pad[4];
+};
+
+struct io_uring_cq {
+	unsigned *khead;
+	unsigned *ktail;
+	unsigned *kring_mask;
+	unsigned *kring_entries;
+	unsigned *kflags;
+	unsigned *koverflow;
+	struct io_uring_cqe *cqes;
+
+	size_t ring_sz;
+	void *ring_ptr;
+
+	unsigned pad[4];
+};
+
+struct io_uring {
+	struct io_uring_sq sq;
+	struct io_uring_cq cq;
+	unsigned flags;
+	int ring_fd;
+
+	unsigned features;
+	unsigned pad[3];
+};
+
+/*
+ * Library interface
+ */
+
+/*
+ * return an allocated io_uring_probe structure, or NULL if probe fails (for
+ * example, if it is not available). The caller is responsible for freeing it
+ */
+extern struct io_uring_probe *io_uring_get_probe_ring(struct io_uring *ring);
+/* same as io_uring_get_probe_ring, but takes care of ring init and teardown */
+extern struct io_uring_probe *io_uring_get_probe(void);
+
+/*
+ * frees a probe allocated through io_uring_get_probe() or
+ * io_uring_get_probe_ring()
+ */
+extern void io_uring_free_probe(struct io_uring_probe *probe);
+
+static inline int io_uring_opcode_supported(const struct io_uring_probe *p, int op)
+{
+	if (op > p->last_op)
+		return 0;
+	return (p->ops[op].flags & IO_URING_OP_SUPPORTED) != 0;
+}
+
+extern int io_uring_queue_init_params(unsigned entries, struct io_uring *ring,
+	struct io_uring_params *p);
+extern int io_uring_queue_init(unsigned entries, struct io_uring *ring,
+	unsigned flags);
+extern int io_uring_queue_mmap(int fd, struct io_uring_params *p,
+	struct io_uring *ring);
+extern int io_uring_ring_dontfork(struct io_uring *ring);
+extern void io_uring_queue_exit(struct io_uring *ring);
+unsigned io_uring_peek_batch_cqe(struct io_uring *ring,
+	struct io_uring_cqe **cqes, unsigned count);
+extern int io_uring_wait_cqes(struct io_uring *ring,
+	struct io_uring_cqe **cqe_ptr, unsigned wait_nr,
+	struct __kernel_timespec *ts, sigset_t *sigmask);
+extern int io_uring_wait_cqe_timeout(struct io_uring *ring,
+	struct io_uring_cqe **cqe_ptr, struct __kernel_timespec *ts);
+extern int io_uring_submit(struct io_uring *ring);
+extern int io_uring_submit_and_wait(struct io_uring *ring, unsigned wait_nr);
+extern struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring);
+
+extern int io_uring_register_buffers(struct io_uring *ring,
+					const struct iovec *iovecs,
+					unsigned nr_iovecs);
+extern int io_uring_unregister_buffers(struct io_uring *ring);
+extern int io_uring_register_files(struct io_uring *ring, const int *files,
+					unsigned nr_files);
+extern int io_uring_unregister_files(struct io_uring *ring);
+extern int io_uring_register_files_update(struct io_uring *ring, unsigned off,
+					int *files, unsigned nr_files);
+extern int io_uring_register_eventfd(struct io_uring *ring, int fd);
+extern int io_uring_register_eventfd_async(struct io_uring *ring, int fd);
+extern int io_uring_unregister_eventfd(struct io_uring *ring);
+extern int io_uring_register_probe(struct io_uring *ring,
+					struct io_uring_probe *p, unsigned nr);
+extern int io_uring_register_personality(struct io_uring *ring);
+extern int io_uring_unregister_personality(struct io_uring *ring, int id);
+extern int io_uring_register_restrictions(struct io_uring *ring,
+					  struct io_uring_restriction *res,
+					  unsigned int nr_res);
+extern int io_uring_enable_rings(struct io_uring *ring);
+extern int __io_uring_sqring_wait(struct io_uring *ring);
+
+/*
+ * Helper for the peek/wait single cqe functions. Exported because of that,
+ * but probably shouldn't be used directly in an application.
+ */
+extern int __io_uring_get_cqe(struct io_uring *ring,
+			      struct io_uring_cqe **cqe_ptr, unsigned submit,
+			      unsigned wait_nr, sigset_t *sigmask);
+
+#define LIBURING_UDATA_TIMEOUT	((__u64) -1)
+
+#define io_uring_for_each_cqe(ring, head, cqe)				\
+	/*								\
+	 * io_uring_smp_load_acquire() enforces the order of tail	\
+	 * and CQE reads.						\
+	 */								\
+	for (head = *(ring)->cq.khead;					\
+	     (cqe = (head != io_uring_smp_load_acquire((ring)->cq.ktail) ? \
+		&(ring)->cq.cqes[head & (*(ring)->cq.kring_mask)] : NULL)); \
+	     head++)							\
+
+/*
+ * Must be called after io_uring_for_each_cqe()
+ */
+static inline void io_uring_cq_advance(struct io_uring *ring,
+				       unsigned nr)
+{
+	if (nr) {
+		struct io_uring_cq *cq = &ring->cq;
+
+		/*
+		 * Ensure that the kernel only sees the new value of the head
+		 * index after the CQEs have been read.
+		 */
+		io_uring_smp_store_release(cq->khead, *cq->khead + nr);
+	}
+}
+
+/*
+ * Must be called after io_uring_{peek,wait}_cqe() after the cqe has
+ * been processed by the application.
+ */
+static inline void io_uring_cqe_seen(struct io_uring *ring,
+				     struct io_uring_cqe *cqe)
+{
+	if (cqe)
+		io_uring_cq_advance(ring, 1);
+}
+
+/*
+ * Command prep helpers
+ */
+static inline void io_uring_sqe_set_data(struct io_uring_sqe *sqe, void *data)
+{
+	sqe->user_data = (unsigned long) data;
+}
+
+static inline void *io_uring_cqe_get_data(const struct io_uring_cqe *cqe)
+{
+	return (void *) (uintptr_t) cqe->user_data;
+}
+
+static inline void io_uring_sqe_set_flags(struct io_uring_sqe *sqe,
+					  unsigned flags)
+{
+	sqe->flags = flags;
+}
+
+static inline void io_uring_prep_rw(int op, struct io_uring_sqe *sqe, int fd,
+				    const void *addr, unsigned len,
+				    __u64 offset)
+{
+	sqe->opcode = op;
+	sqe->flags = 0;
+	sqe->ioprio = 0;
+	sqe->fd = fd;
+	sqe->off = offset;
+	sqe->addr = (unsigned long) addr;
+	sqe->len = len;
+	sqe->rw_flags = 0;
+	sqe->user_data = 0;
+	sqe->__pad2[0] = sqe->__pad2[1] = sqe->__pad2[2] = 0;
+}
+
+/**
+ * @pre Either fd_in or fd_out must be a pipe.
+ * @param off_in If fd_in refers to a pipe, off_in must be (int64_t) -1;
+ *               If fd_in does not refer to a pipe and off_in is (int64_t) -1, then bytes are read
+ *               from fd_in starting from the file offset and it is adjust appropriately;
+ *               If fd_in does not refer to a pipe and off_in is not (int64_t) -1, then the
+ *               starting offset of fd_in will be off_in.
+ * @param off_out The description of off_in also applied to off_out.
+ * @param splice_flags see man splice(2) for description of flags.
+ *
+ * This splice operation can be used to implement sendfile by splicing to an intermediate pipe
+ * first, then splice to the final destination.
+ * In fact, the implementation of sendfile in kernel uses splice internally.
+ *
+ * NOTE that even if fd_in or fd_out refers to a pipe, the splice operation can still failed with
+ * EINVAL if one of the fd doesn't explicitly support splice operation, e.g. reading from terminal
+ * is unsupported from kernel 5.7 to 5.11.
+ * Check issue #291 for more information.
+ */
+static inline void io_uring_prep_splice(struct io_uring_sqe *sqe,
+					int fd_in, int64_t off_in,
+					int fd_out, int64_t off_out,
+					unsigned int nbytes,
+					unsigned int splice_flags)
+{
+	io_uring_prep_rw(IORING_OP_SPLICE, sqe, fd_out, NULL, nbytes, off_out);
+	sqe->splice_off_in = off_in;
+	sqe->splice_fd_in = fd_in;
+	sqe->splice_flags = splice_flags;
+}
+
+static inline void io_uring_prep_tee(struct io_uring_sqe *sqe,
+				     int fd_in, int fd_out,
+				     unsigned int nbytes,
+				     unsigned int splice_flags)
+{
+	io_uring_prep_rw(IORING_OP_TEE, sqe, fd_out, NULL, nbytes, 0);
+	sqe->splice_off_in = 0;
+	sqe->splice_fd_in = fd_in;
+	sqe->splice_flags = splice_flags;
+}
+
+static inline void io_uring_prep_readv(struct io_uring_sqe *sqe, int fd,
+				       const struct iovec *iovecs,
+				       unsigned nr_vecs, off_t offset)
+{
+	io_uring_prep_rw(IORING_OP_READV, sqe, fd, iovecs, nr_vecs, offset);
+}
+
+static inline void io_uring_prep_read_fixed(struct io_uring_sqe *sqe, int fd,
+					    void *buf, unsigned nbytes,
+					    off_t offset, int buf_index)
+{
+	io_uring_prep_rw(IORING_OP_READ_FIXED, sqe, fd, buf, nbytes, offset);
+	sqe->buf_index = buf_index;
+}
+
+static inline void io_uring_prep_writev(struct io_uring_sqe *sqe, int fd,
+					const struct iovec *iovecs,
+					unsigned nr_vecs, off_t offset)
+{
+	io_uring_prep_rw(IORING_OP_WRITEV, sqe, fd, iovecs, nr_vecs, offset);
+}
+
+static inline void io_uring_prep_write_fixed(struct io_uring_sqe *sqe, int fd,
+					     const void *buf, unsigned nbytes,
+					     off_t offset, int buf_index)
+{
+	io_uring_prep_rw(IORING_OP_WRITE_FIXED, sqe, fd, buf, nbytes, offset);
+	sqe->buf_index = buf_index;
+}
+
+static inline void io_uring_prep_recvmsg(struct io_uring_sqe *sqe, int fd,
+					 struct msghdr *msg, unsigned flags)
+{
+	io_uring_prep_rw(IORING_OP_RECVMSG, sqe, fd, msg, 1, 0);
+	sqe->msg_flags = flags;
+}
+
+static inline void io_uring_prep_sendmsg(struct io_uring_sqe *sqe, int fd,
+					 const struct msghdr *msg, unsigned flags)
+{
+	io_uring_prep_rw(IORING_OP_SENDMSG, sqe, fd, msg, 1, 0);
+	sqe->msg_flags = flags;
+}
+
+static inline void io_uring_prep_poll_add(struct io_uring_sqe *sqe, int fd,
+					  unsigned poll_mask)
+{
+	io_uring_prep_rw(IORING_OP_POLL_ADD, sqe, fd, NULL, 0, 0);
+#if __BYTE_ORDER == __BIG_ENDIAN
+	poll_mask = __swahw32(poll_mask);
+#endif
+	sqe->poll32_events = poll_mask;
+}
+
+static inline void io_uring_prep_poll_remove(struct io_uring_sqe *sqe,
+					     void *user_data)
+{
+	io_uring_prep_rw(IORING_OP_POLL_REMOVE, sqe, -1, user_data, 0, 0);
+}
+
+static inline void io_uring_prep_poll_update(struct io_uring_sqe *sqe,
+					     void *old_user_data,
+					     void *new_user_data,
+					     unsigned poll_mask, unsigned flags)
+{
+	io_uring_prep_rw(IORING_OP_POLL_REMOVE, sqe, -1, old_user_data, flags,
+			 (__u64)new_user_data);
+#if __BYTE_ORDER == __BIG_ENDIAN
+	poll_mask = __swahw32(poll_mask);
+#endif
+	sqe->poll32_events = poll_mask;
+}
+
+static inline void io_uring_prep_fsync(struct io_uring_sqe *sqe, int fd,
+				       unsigned fsync_flags)
+{
+	io_uring_prep_rw(IORING_OP_FSYNC, sqe, fd, NULL, 0, 0);
+	sqe->fsync_flags = fsync_flags;
+}
+
+static inline void io_uring_prep_nop(struct io_uring_sqe *sqe)
+{
+	io_uring_prep_rw(IORING_OP_NOP, sqe, -1, NULL, 0, 0);
+}
+
+static inline void io_uring_prep_timeout(struct io_uring_sqe *sqe,
+					 struct __kernel_timespec *ts,
+					 unsigned count, unsigned flags)
+{
+	io_uring_prep_rw(IORING_OP_TIMEOUT, sqe, -1, ts, 1, count);
+	sqe->timeout_flags = flags;
+}
+
+static inline void io_uring_prep_timeout_remove(struct io_uring_sqe *sqe,
+						__u64 user_data, unsigned flags)
+{
+	io_uring_prep_rw(IORING_OP_TIMEOUT_REMOVE, sqe, -1,
+				(void *)(unsigned long)user_data, 0, 0);
+	sqe->timeout_flags = flags;
+}
+
+static inline void io_uring_prep_timeout_update(struct io_uring_sqe *sqe,
+						struct __kernel_timespec *ts,
+						__u64 user_data, unsigned flags)
+{
+	io_uring_prep_rw(IORING_OP_TIMEOUT_REMOVE, sqe, -1,
+				(void *)(unsigned long)user_data, 0,
+				(uintptr_t)ts);
+	sqe->timeout_flags = flags | IORING_TIMEOUT_UPDATE;
+}
+
+static inline void io_uring_prep_accept(struct io_uring_sqe *sqe, int fd,
+					struct sockaddr *addr,
+					socklen_t *addrlen, int flags)
+{
+	io_uring_prep_rw(IORING_OP_ACCEPT, sqe, fd, addr, 0,
+				(__u64) (unsigned long) addrlen);
+	sqe->accept_flags = flags;
+}
+
+static inline void io_uring_prep_cancel(struct io_uring_sqe *sqe, void *user_data,
+					int flags)
+{
+	io_uring_prep_rw(IORING_OP_ASYNC_CANCEL, sqe, -1, user_data, 0, 0);
+	sqe->cancel_flags = flags;
+}
+
+static inline void io_uring_prep_link_timeout(struct io_uring_sqe *sqe,
+					      struct __kernel_timespec *ts,
+					      unsigned flags)
+{
+	io_uring_prep_rw(IORING_OP_LINK_TIMEOUT, sqe, -1, ts, 1, 0);
+	sqe->timeout_flags = flags;
+}
+
+static inline void io_uring_prep_connect(struct io_uring_sqe *sqe, int fd,
+					 const struct sockaddr *addr,
+					 socklen_t addrlen)
+{
+	io_uring_prep_rw(IORING_OP_CONNECT, sqe, fd, addr, 0, addrlen);
+}
+
+static inline void io_uring_prep_files_update(struct io_uring_sqe *sqe,
+					      int *fds, unsigned nr_fds,
+					      int offset)
+{
+	io_uring_prep_rw(IORING_OP_FILES_UPDATE, sqe, -1, fds, nr_fds, offset);
+}
+
+static inline void io_uring_prep_fallocate(struct io_uring_sqe *sqe, int fd,
+					   int mode, off_t offset, off_t len)
+{
+
+	io_uring_prep_rw(IORING_OP_FALLOCATE, sqe, fd,
+			(const uintptr_t *) (unsigned long) len, mode, offset);
+}
+
+static inline void io_uring_prep_openat(struct io_uring_sqe *sqe, int dfd,
+					const char *path, int flags, mode_t mode)
+{
+	io_uring_prep_rw(IORING_OP_OPENAT, sqe, dfd, path, mode, 0);
+	sqe->open_flags = flags;
+}
+
+static inline void io_uring_prep_close(struct io_uring_sqe *sqe, int fd)
+{
+	io_uring_prep_rw(IORING_OP_CLOSE, sqe, fd, NULL, 0, 0);
+}
+
+static inline void io_uring_prep_read(struct io_uring_sqe *sqe, int fd,
+				      void *buf, unsigned nbytes, off_t offset)
+{
+	io_uring_prep_rw(IORING_OP_READ, sqe, fd, buf, nbytes, offset);
+}
+
+static inline void io_uring_prep_write(struct io_uring_sqe *sqe, int fd,
+				       const void *buf, unsigned nbytes, off_t offset)
+{
+	io_uring_prep_rw(IORING_OP_WRITE, sqe, fd, buf, nbytes, offset);
+}
+
+struct statx;
+static inline void io_uring_prep_statx(struct io_uring_sqe *sqe, int dfd,
+				const char *path, int flags, unsigned mask,
+				struct statx *statxbuf)
+{
+	io_uring_prep_rw(IORING_OP_STATX, sqe, dfd, path, mask,
+				(__u64) (unsigned long) statxbuf);
+	sqe->statx_flags = flags;
+}
+
+static inline void io_uring_prep_fadvise(struct io_uring_sqe *sqe, int fd,
+					 off_t offset, off_t len, int advice)
+{
+	io_uring_prep_rw(IORING_OP_FADVISE, sqe, fd, NULL, len, offset);
+	sqe->fadvise_advice = advice;
+}
+
+static inline void io_uring_prep_madvise(struct io_uring_sqe *sqe, void *addr,
+					 off_t length, int advice)
+{
+	io_uring_prep_rw(IORING_OP_MADVISE, sqe, -1, addr, length, 0);
+	sqe->fadvise_advice = advice;
+}
+
+static inline void io_uring_prep_send(struct io_uring_sqe *sqe, int sockfd,
+				      const void *buf, size_t len, int flags)
+{
+	io_uring_prep_rw(IORING_OP_SEND, sqe, sockfd, buf, len, 0);
+	sqe->msg_flags = flags;
+}
+
+static inline void io_uring_prep_recv(struct io_uring_sqe *sqe, int sockfd,
+				      void *buf, size_t len, int flags)
+{
+	io_uring_prep_rw(IORING_OP_RECV, sqe, sockfd, buf, len, 0);
+	sqe->msg_flags = flags;
+}
+
+static inline void io_uring_prep_openat2(struct io_uring_sqe *sqe, int dfd,
+					const char *path, struct open_how *how)
+{
+	io_uring_prep_rw(IORING_OP_OPENAT2, sqe, dfd, path, sizeof(*how),
+				(uint64_t) (uintptr_t) how);
+}
+
+struct epoll_event;
+static inline void io_uring_prep_epoll_ctl(struct io_uring_sqe *sqe, int epfd,
+					   int fd, int op,
+					   struct epoll_event *ev)
+{
+	io_uring_prep_rw(IORING_OP_EPOLL_CTL, sqe, epfd, ev, op, fd);
+}
+
+static inline void io_uring_prep_provide_buffers(struct io_uring_sqe *sqe,
+						 void *addr, int len, int nr,
+						 int bgid, int bid)
+{
+	io_uring_prep_rw(IORING_OP_PROVIDE_BUFFERS, sqe, nr, addr, len, bid);
+	sqe->buf_group = bgid;
+}
+
+static inline void io_uring_prep_remove_buffers(struct io_uring_sqe *sqe,
+						int nr, int bgid)
+{
+	io_uring_prep_rw(IORING_OP_REMOVE_BUFFERS, sqe, nr, NULL, 0, 0);
+	sqe->buf_group = bgid;
+}
+
+static inline void io_uring_prep_shutdown(struct io_uring_sqe *sqe, int fd,
+					  int how)
+{
+	io_uring_prep_rw(IORING_OP_SHUTDOWN, sqe, fd, NULL, how, 0);
+}
+
+static inline void io_uring_prep_unlinkat(struct io_uring_sqe *sqe, int dfd,
+					  const char *path, int flags)
+{
+	io_uring_prep_rw(IORING_OP_UNLINKAT, sqe, dfd, path, 0, 0);
+	sqe->unlink_flags = flags;
+}
+
+static inline void io_uring_prep_renameat(struct io_uring_sqe *sqe, int olddfd,
+					  const char *oldpath, int newdfd,
+					  const char *newpath, int flags)
+{
+	io_uring_prep_rw(IORING_OP_RENAMEAT, sqe, olddfd, oldpath, newdfd,
+				(uint64_t) (uintptr_t) newpath);
+	sqe->rename_flags = flags;
+}
+
+static inline void io_uring_prep_sync_file_range(struct io_uring_sqe *sqe,
+						 int fd, unsigned len,
+						 off_t offset, int flags)
+{
+	io_uring_prep_rw(IORING_OP_SYNC_FILE_RANGE, sqe, fd, NULL, len, offset);
+	sqe->sync_range_flags = flags;
+}
+
+static inline void io_uring_prep_mkdirat(struct io_uring_sqe *sqe, int dfd,
+					const char *path, mode_t mode)
+{
+	io_uring_prep_rw(IORING_OP_MKDIRAT, sqe, dfd, path, mode, 0);
+}
+
+static inline void io_uring_prep_symlinkat(struct io_uring_sqe *sqe,
+					const char *target, int newdirfd, const char *linkpath)
+{
+	io_uring_prep_rw(IORING_OP_SYMLINKAT, sqe, newdirfd, target, 0,
+				(uint64_t) (uintptr_t) linkpath);
+}
+
+static inline void io_uring_prep_linkat(struct io_uring_sqe *sqe, int olddfd,
+					const char *oldpath, int newdfd,
+					const char *newpath, int flags)
+{
+	io_uring_prep_rw(IORING_OP_LINKAT, sqe, olddfd, oldpath, newdfd,
+				(uint64_t) (uintptr_t) newpath);
+	sqe->hardlink_flags = flags;
+}
+
+/*
+ * Returns number of unconsumed (if SQPOLL) or unsubmitted entries exist in
+ * the SQ ring
+ */
+static inline unsigned io_uring_sq_ready(const struct io_uring *ring)
+{
+	/*
+	 * Without a barrier, we could miss an update and think the SQ wasn't ready.
+	 * We don't need the load acquire for non-SQPOLL since then we drive updates.
+	 */
+	if (ring->flags & IORING_SETUP_SQPOLL)
+		return ring->sq.sqe_tail - io_uring_smp_load_acquire(ring->sq.khead);
+
+	/* always use real head, to avoid losing sync for short submit */
+	return ring->sq.sqe_tail - *ring->sq.khead;
+}
+
+/*
+ * Returns how much space is left in the SQ ring.
+ */
+static inline unsigned io_uring_sq_space_left(const struct io_uring *ring)
+{
+	return *ring->sq.kring_entries - io_uring_sq_ready(ring);
+}
+
+/*
+ * Only applicable when using SQPOLL - allows the caller to wait for space
+ * to free up in the SQ ring, which happens when the kernel side thread has
+ * consumed one or more entries. If the SQ ring is currently non-full, no
+ * action is taken. Note: may return -EINVAL if the kernel doesn't support
+ * this feature.
+ */
+static inline int io_uring_sqring_wait(struct io_uring *ring)
+{
+	if (!(ring->flags & IORING_SETUP_SQPOLL))
+		return 0;
+	if (io_uring_sq_space_left(ring))
+		return 0;
+
+	return __io_uring_sqring_wait(ring);
+}
+
+/*
+ * Returns how many unconsumed entries are ready in the CQ ring
+ */
+static inline unsigned io_uring_cq_ready(const struct io_uring *ring)
+{
+	return io_uring_smp_load_acquire(ring->cq.ktail) - *ring->cq.khead;
+}
+
+/*
+ * Returns true if the eventfd notification is currently enabled
+ */
+static inline bool io_uring_cq_eventfd_enabled(const struct io_uring *ring)
+{
+	if (!ring->cq.kflags)
+		return true;
+
+	return !(*ring->cq.kflags & IORING_CQ_EVENTFD_DISABLED);
+}
+
+/*
+ * Toggle eventfd notification on or off, if an eventfd is registered with
+ * the ring.
+ */
+static inline int io_uring_cq_eventfd_toggle(struct io_uring *ring,
+					     bool enabled)
+{
+	uint32_t flags;
+
+	if (!!enabled == io_uring_cq_eventfd_enabled(ring))
+		return 0;
+
+	if (!ring->cq.kflags)
+		return -EOPNOTSUPP;
+
+	flags = *ring->cq.kflags;
+
+	if (enabled)
+		flags &= ~IORING_CQ_EVENTFD_DISABLED;
+	else
+		flags |= IORING_CQ_EVENTFD_DISABLED;
+
+	IO_URING_WRITE_ONCE(*ring->cq.kflags, flags);
+
+	return 0;
+}
+
+/*
+ * Return an IO completion, waiting for 'wait_nr' completions if one isn't
+ * readily available. Returns 0 with cqe_ptr filled in on success, -errno on
+ * failure.
+ */
+static inline int io_uring_wait_cqe_nr(struct io_uring *ring,
+				      struct io_uring_cqe **cqe_ptr,
+				      unsigned wait_nr)
+{
+	return __io_uring_get_cqe(ring, cqe_ptr, 0, wait_nr, NULL);
+}
+
+/*
+ * Return an IO completion, if one is readily available. Returns 0 with
+ * cqe_ptr filled in on success, -errno on failure.
+ */
+static inline int io_uring_peek_cqe(struct io_uring *ring,
+				    struct io_uring_cqe **cqe_ptr)
+{
+	return io_uring_wait_cqe_nr(ring, cqe_ptr, 0);
+}
+
+/*
+ * Return an IO completion, waiting for it if necessary. Returns 0 with
+ * cqe_ptr filled in on success, -errno on failure.
+ */
+static inline int io_uring_wait_cqe(struct io_uring *ring,
+				    struct io_uring_cqe **cqe_ptr)
+{
+	return io_uring_wait_cqe_nr(ring, cqe_ptr, 1);
+}
+
+ssize_t io_uring_mlock_size(unsigned entries, unsigned flags);
+ssize_t io_uring_mlock_size_params(unsigned entries, struct io_uring_params *p);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/include/liburing/barrier.h b/src/include/liburing/barrier.h
new file mode 100644
index 0000000..89ac682
--- /dev/null
+++ b/src/include/liburing/barrier.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef LIBURING_BARRIER_H
+#define LIBURING_BARRIER_H
+
+/*
+From the kernel documentation file refcount-vs-atomic.rst:
+
+A RELEASE memory ordering guarantees that all prior loads and
+stores (all po-earlier instructions) on the same CPU are completed
+before the operation. It also guarantees that all po-earlier
+stores on the same CPU and all propagated stores from other CPUs
+must propagate to all other CPUs before the release operation
+(A-cumulative property). This is implemented using
+:c:func:`smp_store_release`.
+
+An ACQUIRE memory ordering guarantees that all post loads and
+stores (all po-later instructions) on the same CPU are
+completed after the acquire operation. It also guarantees that all
+po-later stores on the same CPU must propagate to all other CPUs
+after the acquire operation executes. This is implemented using
+:c:func:`smp_acquire__after_ctrl_dep`.
+*/
+
+#ifdef __cplusplus
+#include <atomic>
+
+template <typename T>
+static inline void IO_URING_WRITE_ONCE(T &var, T val)
+{
+	std::atomic_store_explicit(reinterpret_cast<std::atomic<T> *>(&var),
+				   val, std::memory_order_relaxed);
+}
+template <typename T>
+static inline T IO_URING_READ_ONCE(const T &var)
+{
+	return std::atomic_load_explicit(
+		reinterpret_cast<const std::atomic<T> *>(&var),
+		std::memory_order_relaxed);
+}
+
+template <typename T>
+static inline void io_uring_smp_store_release(T *p, T v)
+{
+	std::atomic_store_explicit(reinterpret_cast<std::atomic<T> *>(p), v,
+				   std::memory_order_release);
+}
+
+template <typename T>
+static inline T io_uring_smp_load_acquire(const T *p)
+{
+	return std::atomic_load_explicit(
+		reinterpret_cast<const std::atomic<T> *>(p),
+		std::memory_order_acquire);
+}
+#else
+#include <stdatomic.h>
+
+#define IO_URING_WRITE_ONCE(var, val)				\
+	atomic_store_explicit((_Atomic __typeof__(var) *)&(var),	\
+			      (val), memory_order_relaxed)
+#define IO_URING_READ_ONCE(var)					\
+	atomic_load_explicit((_Atomic __typeof__(var) *)&(var),	\
+			     memory_order_relaxed)
+
+#define io_uring_smp_store_release(p, v)			\
+	atomic_store_explicit((_Atomic __typeof__(*(p)) *)(p), (v), \
+			      memory_order_release)
+#define io_uring_smp_load_acquire(p)				\
+	atomic_load_explicit((_Atomic __typeof__(*(p)) *)(p),	\
+			     memory_order_acquire)
+#endif
+
+#endif /* defined(LIBURING_BARRIER_H) */
diff --git a/src/include/liburing/io_uring.h b/src/include/liburing/io_uring.h
new file mode 100644
index 0000000..e4a4fc4
--- /dev/null
+++ b/src/include/liburing/io_uring.h
@@ -0,0 +1,412 @@
+/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */
+/*
+ * Header file for the io_uring interface.
+ *
+ * Copyright (C) 2019 Jens Axboe
+ * Copyright (C) 2019 Christoph Hellwig
+ */
+#ifndef LINUX_IO_URING_H
+#define LINUX_IO_URING_H
+
+#include <linux/fs.h>
+#include <linux/types.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * IO submission data structure (Submission Queue Entry)
+ */
+struct io_uring_sqe {
+	__u8	opcode;		/* type of operation for this sqe */
+	__u8	flags;		/* IOSQE_ flags */
+	__u16	ioprio;		/* ioprio for the request */
+	__s32	fd;		/* file descriptor to do IO on */
+	union {
+		__u64	off;	/* offset into file */
+		__u64	addr2;
+	};
+	union {
+		__u64	addr;	/* pointer to buffer or iovecs */
+		__u64	splice_off_in;
+	};
+	__u32	len;		/* buffer size or number of iovecs */
+	union {
+		__kernel_rwf_t	rw_flags;
+		__u32		fsync_flags;
+		__u16		poll_events;	/* compatibility */
+		__u32		poll32_events;	/* word-reversed for BE */
+		__u32		sync_range_flags;
+		__u32		msg_flags;
+		__u32		timeout_flags;
+		__u32		accept_flags;
+		__u32		cancel_flags;
+		__u32		open_flags;
+		__u32		statx_flags;
+		__u32		fadvise_advice;
+		__u32		splice_flags;
+		__u32		rename_flags;
+		__u32		unlink_flags;
+		__u32		hardlink_flags;
+	};
+	__u64	user_data;	/* data to be passed back at completion time */
+	union {
+		struct {
+			/* pack this to avoid bogus arm OABI complaints */
+			union {
+				/* index into fixed buffers, if used */
+				__u16	buf_index;
+				/* for grouped buffer selection */
+				__u16	buf_group;
+			} __attribute__((packed));
+			/* personality to use, if used */
+			__u16	personality;
+			__s32	splice_fd_in;
+		};
+		__u64	__pad2[3];
+	};
+};
+
+enum {
+	IOSQE_FIXED_FILE_BIT,
+	IOSQE_IO_DRAIN_BIT,
+	IOSQE_IO_LINK_BIT,
+	IOSQE_IO_HARDLINK_BIT,
+	IOSQE_ASYNC_BIT,
+	IOSQE_BUFFER_SELECT_BIT,
+};
+
+/*
+ * sqe->flags
+ */
+/* use fixed fileset */
+#define IOSQE_FIXED_FILE	(1U << IOSQE_FIXED_FILE_BIT)
+/* issue after inflight IO */
+#define IOSQE_IO_DRAIN		(1U << IOSQE_IO_DRAIN_BIT)
+/* links next sqe */
+#define IOSQE_IO_LINK		(1U << IOSQE_IO_LINK_BIT)
+/* like LINK, but stronger */
+#define IOSQE_IO_HARDLINK	(1U << IOSQE_IO_HARDLINK_BIT)
+/* always go async */
+#define IOSQE_ASYNC		(1U << IOSQE_ASYNC_BIT)
+/* select buffer from sqe->buf_group */
+#define IOSQE_BUFFER_SELECT	(1U << IOSQE_BUFFER_SELECT_BIT)
+
+/*
+ * io_uring_setup() flags
+ */
+#define IORING_SETUP_IOPOLL	(1U << 0)	/* io_context is polled */
+#define IORING_SETUP_SQPOLL	(1U << 1)	/* SQ poll thread */
+#define IORING_SETUP_SQ_AFF	(1U << 2)	/* sq_thread_cpu is valid */
+#define IORING_SETUP_CQSIZE	(1U << 3)	/* app defines CQ size */
+#define IORING_SETUP_CLAMP	(1U << 4)	/* clamp SQ/CQ ring sizes */
+#define IORING_SETUP_ATTACH_WQ	(1U << 5)	/* attach to existing wq */
+#define IORING_SETUP_R_DISABLED	(1U << 6)	/* start with ring disabled */
+
+enum {
+	IORING_OP_NOP,
+	IORING_OP_READV,
+	IORING_OP_WRITEV,
+	IORING_OP_FSYNC,
+	IORING_OP_READ_FIXED,
+	IORING_OP_WRITE_FIXED,
+	IORING_OP_POLL_ADD,
+	IORING_OP_POLL_REMOVE,
+	IORING_OP_SYNC_FILE_RANGE,
+	IORING_OP_SENDMSG,
+	IORING_OP_RECVMSG,
+	IORING_OP_TIMEOUT,
+	IORING_OP_TIMEOUT_REMOVE,
+	IORING_OP_ACCEPT,
+	IORING_OP_ASYNC_CANCEL,
+	IORING_OP_LINK_TIMEOUT,
+	IORING_OP_CONNECT,
+	IORING_OP_FALLOCATE,
+	IORING_OP_OPENAT,
+	IORING_OP_CLOSE,
+	IORING_OP_FILES_UPDATE,
+	IORING_OP_STATX,
+	IORING_OP_READ,
+	IORING_OP_WRITE,
+	IORING_OP_FADVISE,
+	IORING_OP_MADVISE,
+	IORING_OP_SEND,
+	IORING_OP_RECV,
+	IORING_OP_OPENAT2,
+	IORING_OP_EPOLL_CTL,
+	IORING_OP_SPLICE,
+	IORING_OP_PROVIDE_BUFFERS,
+	IORING_OP_REMOVE_BUFFERS,
+	IORING_OP_TEE,
+	IORING_OP_SHUTDOWN,
+	IORING_OP_RENAMEAT,
+	IORING_OP_UNLINKAT,
+	IORING_OP_MKDIRAT,
+	IORING_OP_SYMLINKAT,
+	IORING_OP_LINKAT,
+
+	/* this goes last, obviously */
+	IORING_OP_LAST,
+};
+
+/*
+ * sqe->fsync_flags
+ */
+#define IORING_FSYNC_DATASYNC	(1U << 0)
+
+/*
+ * sqe->timeout_flags
+ */
+#define IORING_TIMEOUT_ABS	(1U << 0)
+#define IORING_TIMEOUT_UPDATE	(1U << 1)
+
+/*
+ * sqe->splice_flags
+ * extends splice(2) flags
+ */
+#define SPLICE_F_FD_IN_FIXED	(1U << 31) /* the last bit of __u32 */
+
+/*
+ * POLL_ADD flags. Note that since sqe->poll_events is the flag space, the
+ * command flags for POLL_ADD are stored in sqe->len.
+ *
+ * IORING_POLL_ADD_MULTI	Multishot poll. Sets IORING_CQE_F_MORE if
+ *				the poll handler will continue to report
+ *				CQEs on behalf of the same SQE.
+ *
+ * IORING_POLL_UPDATE		Update existing poll request, matching
+ *				sqe->addr as the old user_data field.
+ */
+#define IORING_POLL_ADD_MULTI	(1U << 0)
+#define IORING_POLL_UPDATE_EVENTS	(1U << 1)
+#define IORING_POLL_UPDATE_USER_DATA	(1U << 2)
+
+/*
+ * IO completion data structure (Completion Queue Entry)
+ */
+struct io_uring_cqe {
+	__u64	user_data;	/* sqe->data submission passed back */
+	__s32	res;		/* result code for this event */
+	__u32	flags;
+};
+
+/*
+ * cqe->flags
+ *
+ * IORING_CQE_F_BUFFER	If set, the upper 16 bits are the buffer ID
+ * IORING_CQE_F_MORE	If set, parent SQE will generate more CQE entries
+ */
+#define IORING_CQE_F_BUFFER		(1U << 0)
+#define IORING_CQE_F_MORE		(1U << 1)
+
+enum {
+	IORING_CQE_BUFFER_SHIFT		= 16,
+};
+
+/*
+ * Magic offsets for the application to mmap the data it needs
+ */
+#define IORING_OFF_SQ_RING		0ULL
+#define IORING_OFF_CQ_RING		0x8000000ULL
+#define IORING_OFF_SQES			0x10000000ULL
+
+/*
+ * Filled with the offset for mmap(2)
+ */
+struct io_sqring_offsets {
+	__u32 head;
+	__u32 tail;
+	__u32 ring_mask;
+	__u32 ring_entries;
+	__u32 flags;
+	__u32 dropped;
+	__u32 array;
+	__u32 resv1;
+	__u64 resv2;
+};
+
+/*
+ * sq_ring->flags
+ */
+#define IORING_SQ_NEED_WAKEUP	(1U << 0) /* needs io_uring_enter wakeup */
+#define IORING_SQ_CQ_OVERFLOW	(1U << 1) /* CQ ring is overflown */
+
+struct io_cqring_offsets {
+	__u32 head;
+	__u32 tail;
+	__u32 ring_mask;
+	__u32 ring_entries;
+	__u32 overflow;
+	__u32 cqes;
+	__u32 flags;
+	__u32 resv1;
+	__u64 resv2;
+};
+
+/*
+ * cq_ring->flags
+ */
+
+/* disable eventfd notifications */
+#define IORING_CQ_EVENTFD_DISABLED	(1U << 0)
+
+/*
+ * io_uring_enter(2) flags
+ */
+#define IORING_ENTER_GETEVENTS	(1U << 0)
+#define IORING_ENTER_SQ_WAKEUP	(1U << 1)
+#define IORING_ENTER_SQ_WAIT	(1U << 2)
+#define IORING_ENTER_EXT_ARG	(1U << 3)
+
+/*
+ * Passed in for io_uring_setup(2). Copied back with updated info on success
+ */
+struct io_uring_params {
+	__u32 sq_entries;
+	__u32 cq_entries;
+	__u32 flags;
+	__u32 sq_thread_cpu;
+	__u32 sq_thread_idle;
+	__u32 features;
+	__u32 wq_fd;
+	__u32 resv[3];
+	struct io_sqring_offsets sq_off;
+	struct io_cqring_offsets cq_off;
+};
+
+/*
+ * io_uring_params->features flags
+ */
+#define IORING_FEAT_SINGLE_MMAP		(1U << 0)
+#define IORING_FEAT_NODROP		(1U << 1)
+#define IORING_FEAT_SUBMIT_STABLE	(1U << 2)
+#define IORING_FEAT_RW_CUR_POS		(1U << 3)
+#define IORING_FEAT_CUR_PERSONALITY	(1U << 4)
+#define IORING_FEAT_FAST_POLL		(1U << 5)
+#define IORING_FEAT_POLL_32BITS 	(1U << 6)
+#define IORING_FEAT_SQPOLL_NONFIXED	(1U << 7)
+#define IORING_FEAT_EXT_ARG		(1U << 8)
+#define IORING_FEAT_NATIVE_WORKERS	(1U << 9)
+#define IORING_FEAT_RSRC_TAGS		(1U << 10)
+
+/*
+ * io_uring_register(2) opcodes and arguments
+ */
+enum {
+	IORING_REGISTER_BUFFERS			= 0,
+	IORING_UNREGISTER_BUFFERS		= 1,
+	IORING_REGISTER_FILES			= 2,
+	IORING_UNREGISTER_FILES			= 3,
+	IORING_REGISTER_EVENTFD			= 4,
+	IORING_UNREGISTER_EVENTFD		= 5,
+	IORING_REGISTER_FILES_UPDATE		= 6,
+	IORING_REGISTER_EVENTFD_ASYNC		= 7,
+	IORING_REGISTER_PROBE			= 8,
+	IORING_REGISTER_PERSONALITY		= 9,
+	IORING_UNREGISTER_PERSONALITY		= 10,
+	IORING_REGISTER_RESTRICTIONS		= 11,
+	IORING_REGISTER_ENABLE_RINGS		= 12,
+
+	/* extended with tagging */
+	IORING_REGISTER_FILES2			= 13,
+	IORING_REGISTER_FILES_UPDATE2		= 14,
+	IORING_REGISTER_BUFFERS2		= 15,
+	IORING_REGISTER_BUFFERS_UPDATE		= 16,
+
+	/* this goes last */
+	IORING_REGISTER_LAST
+};
+
+/* deprecated, see struct io_uring_rsrc_update */
+struct io_uring_files_update {
+	__u32 offset;
+	__u32 resv;
+	__aligned_u64 /* __s32 * */ fds;
+};
+
+struct io_uring_rsrc_register {
+	__u32 nr;
+	__u32 resv;
+	__u64 resv2;
+	__aligned_u64 data;
+	__aligned_u64 tags;
+};
+
+struct io_uring_rsrc_update {
+	__u32 offset;
+	__u32 resv;
+	__aligned_u64 data;
+};
+
+struct io_uring_rsrc_update2 {
+	__u32 offset;
+	__u32 resv;
+	__aligned_u64 data;
+	__aligned_u64 tags;
+	__u32 nr;
+	__u32 resv2;
+};
+
+/* Skip updating fd indexes set to this value in the fd table */
+#define IORING_REGISTER_FILES_SKIP	(-2)
+
+#define IO_URING_OP_SUPPORTED	(1U << 0)
+
+struct io_uring_probe_op {
+	__u8 op;
+	__u8 resv;
+	__u16 flags;	/* IO_URING_OP_* flags */
+	__u32 resv2;
+};
+
+struct io_uring_probe {
+	__u8 last_op;	/* last opcode supported */
+	__u8 ops_len;	/* length of ops[] array below */
+	__u16 resv;
+	__u32 resv2[3];
+	struct io_uring_probe_op ops[];
+};
+
+struct io_uring_restriction {
+	__u16 opcode;
+	union {
+		__u8 register_op; /* IORING_RESTRICTION_REGISTER_OP */
+		__u8 sqe_op;      /* IORING_RESTRICTION_SQE_OP */
+		__u8 sqe_flags;   /* IORING_RESTRICTION_SQE_FLAGS_* */
+	};
+	__u8 resv;
+	__u32 resv2[3];
+};
+
+/*
+ * io_uring_restriction->opcode values
+ */
+enum {
+	/* Allow an io_uring_register(2) opcode */
+	IORING_RESTRICTION_REGISTER_OP		= 0,
+
+	/* Allow an sqe opcode */
+	IORING_RESTRICTION_SQE_OP		= 1,
+
+	/* Allow sqe flags */
+	IORING_RESTRICTION_SQE_FLAGS_ALLOWED	= 2,
+
+	/* Require sqe flags (these flags must be set on each submission) */
+	IORING_RESTRICTION_SQE_FLAGS_REQUIRED	= 3,
+
+	IORING_RESTRICTION_LAST
+};
+
+struct io_uring_getevents_arg {
+	__u64	sigmask;
+	__u32	sigmask_sz;
+	__u32	pad;
+	__u64	ts;
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/liburing.map b/src/liburing.map
new file mode 100644
index 0000000..012ac4e
--- /dev/null
+++ b/src/liburing.map
@@ -0,0 +1,39 @@
+LIBURING_2.0 {
+	global:
+		io_uring_get_probe;
+		io_uring_get_probe_ring;
+		io_uring_free_probe;
+		io_uring_get_sqe;
+		io_uring_peek_batch_cqe;
+		io_uring_queue_exit;
+		io_uring_queue_init;
+		io_uring_queue_init_params;
+		io_uring_queue_mmap;
+		io_uring_register_buffers;
+		io_uring_register_eventfd;
+		io_uring_register_eventfd_async;
+		io_uring_register_files;
+		io_uring_register_files_update;
+		io_uring_register_personality;
+		io_uring_register_probe;
+		io_uring_ring_dontfork;
+		io_uring_submit;
+		io_uring_submit_and_wait;
+		io_uring_unregister_buffers;
+		io_uring_unregister_eventfd;
+		io_uring_unregister_files;
+		io_uring_unregister_personality;
+		io_uring_wait_cqe_timeout;
+		io_uring_wait_cqes;
+
+		__io_uring_get_cqe;
+		__io_uring_sqring_wait;
+	local:
+		*;
+};
+
+LIBURING_2.1 {
+	global:
+		io_uring_mlock_size_params;
+		io_uring_mlock_size;
+} LIBURING_2.0;
diff --git a/src/queue.c b/src/queue.c
new file mode 100644
index 0000000..2f0f19b
--- /dev/null
+++ b/src/queue.c
@@ -0,0 +1,404 @@
+/* SPDX-License-Identifier: MIT */
+#define _POSIX_C_SOURCE 200112L
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#include <errno.h>
+#include <string.h>
+#include <stdbool.h>
+
+#include "liburing/compat.h"
+#include "liburing/io_uring.h"
+#include "liburing.h"
+#include "liburing/barrier.h"
+
+#include "syscall.h"
+
+/*
+ * Returns true if we're not using SQ thread (thus nobody submits but us)
+ * or if IORING_SQ_NEED_WAKEUP is set, so submit thread must be explicitly
+ * awakened. For the latter case, we set the thread wakeup flag.
+ */
+static inline bool sq_ring_needs_enter(struct io_uring *ring, unsigned *flags)
+{
+	if (!(ring->flags & IORING_SETUP_SQPOLL))
+		return true;
+
+	if (uring_unlikely(IO_URING_READ_ONCE(*ring->sq.kflags) &
+			   IORING_SQ_NEED_WAKEUP)) {
+		*flags |= IORING_ENTER_SQ_WAKEUP;
+		return true;
+	}
+
+	return false;
+}
+
+static inline bool cq_ring_needs_flush(struct io_uring *ring)
+{
+	return IO_URING_READ_ONCE(*ring->sq.kflags) & IORING_SQ_CQ_OVERFLOW;
+}
+
+static int __io_uring_peek_cqe(struct io_uring *ring,
+			       struct io_uring_cqe **cqe_ptr,
+			       unsigned *nr_available)
+{
+	struct io_uring_cqe *cqe;
+	int err = 0;
+	unsigned available;
+	unsigned mask = *ring->cq.kring_mask;
+
+	do {
+		unsigned tail = io_uring_smp_load_acquire(ring->cq.ktail);
+		unsigned head = *ring->cq.khead;
+
+		cqe = NULL;
+		available = tail - head;
+		if (!available)
+			break;
+
+		cqe = &ring->cq.cqes[head & mask];
+		if (!(ring->features & IORING_FEAT_EXT_ARG) &&
+				cqe->user_data == LIBURING_UDATA_TIMEOUT) {
+			if (cqe->res < 0)
+				err = cqe->res;
+			io_uring_cq_advance(ring, 1);
+			if (!err)
+				continue;
+			cqe = NULL;
+		}
+
+		break;
+	} while (1);
+
+	*cqe_ptr = cqe;
+	*nr_available = available;
+	return err;
+}
+
+struct get_data {
+	unsigned submit;
+	unsigned wait_nr;
+	unsigned get_flags;
+	int sz;
+	void *arg;
+};
+
+static int _io_uring_get_cqe(struct io_uring *ring, struct io_uring_cqe **cqe_ptr,
+			     struct get_data *data)
+{
+	struct io_uring_cqe *cqe = NULL;
+	int err;
+
+	do {
+		bool need_enter = false;
+		bool cq_overflow_flush = false;
+		unsigned flags = 0;
+		unsigned nr_available;
+		int ret;
+
+		err = __io_uring_peek_cqe(ring, &cqe, &nr_available);
+		if (err)
+			break;
+		if (!cqe && !data->wait_nr && !data->submit) {
+			if (!cq_ring_needs_flush(ring)) {
+				err = -EAGAIN;
+				break;
+			}
+			cq_overflow_flush = true;
+		}
+		if (data->wait_nr > nr_available || cq_overflow_flush) {
+			flags = IORING_ENTER_GETEVENTS | data->get_flags;
+			need_enter = true;
+		}
+		if (data->submit) {
+			sq_ring_needs_enter(ring, &flags);
+			need_enter = true;
+		}
+		if (!need_enter)
+			break;
+
+		ret = __sys_io_uring_enter2(ring->ring_fd, data->submit,
+				data->wait_nr, flags, data->arg,
+				data->sz);
+		if (ret < 0) {
+			err = -errno;
+			break;
+		}
+
+		data->submit -= ret;
+		if (cqe)
+			break;
+	} while (1);
+
+	*cqe_ptr = cqe;
+	return err;
+}
+
+int __io_uring_get_cqe(struct io_uring *ring, struct io_uring_cqe **cqe_ptr,
+		       unsigned submit, unsigned wait_nr, sigset_t *sigmask)
+{
+	struct get_data data = {
+		.submit		= submit,
+		.wait_nr 	= wait_nr,
+		.get_flags	= 0,
+		.sz		= _NSIG / 8,
+		.arg		= sigmask,
+	};
+
+	return _io_uring_get_cqe(ring, cqe_ptr, &data);
+}
+
+/*
+ * Fill in an array of IO completions up to count, if any are available.
+ * Returns the amount of IO completions filled.
+ */
+unsigned io_uring_peek_batch_cqe(struct io_uring *ring,
+				 struct io_uring_cqe **cqes, unsigned count)
+{
+	unsigned ready;
+	bool overflow_checked = false;
+
+again:
+	ready = io_uring_cq_ready(ring);
+	if (ready) {
+		unsigned head = *ring->cq.khead;
+		unsigned mask = *ring->cq.kring_mask;
+		unsigned last;
+		int i = 0;
+
+		count = count > ready ? ready : count;
+		last = head + count;
+		for (;head != last; head++, i++)
+			cqes[i] = &ring->cq.cqes[head & mask];
+
+		return count;
+	}
+
+	if (overflow_checked)
+		goto done;
+
+	if (cq_ring_needs_flush(ring)) {
+		__sys_io_uring_enter(ring->ring_fd, 0, 0,
+				     IORING_ENTER_GETEVENTS, NULL);
+		overflow_checked = true;
+		goto again;
+	}
+
+done:
+	return 0;
+}
+
+/*
+ * Sync internal state with kernel ring state on the SQ side. Returns the
+ * number of pending items in the SQ ring, for the shared ring.
+ */
+int __io_uring_flush_sq(struct io_uring *ring)
+{
+	struct io_uring_sq *sq = &ring->sq;
+	const unsigned mask = *sq->kring_mask;
+	unsigned ktail = *sq->ktail;
+	unsigned to_submit = sq->sqe_tail - sq->sqe_head;
+
+	if (!to_submit)
+		goto out;
+
+	/*
+	 * Fill in sqes that we have queued up, adding them to the kernel ring
+	 */
+	do {
+		sq->array[ktail & mask] = sq->sqe_head & mask;
+		ktail++;
+		sq->sqe_head++;
+	} while (--to_submit);
+
+	/*
+	 * Ensure that the kernel sees the SQE updates before it sees the tail
+	 * update.
+	 */
+	io_uring_smp_store_release(sq->ktail, ktail);
+out:
+	/*
+	 * This _may_ look problematic, as we're not supposed to be reading
+	 * SQ->head without acquire semantics. When we're in SQPOLL mode, the
+	 * kernel submitter could be updating this right now. For non-SQPOLL,
+	 * task itself does it, and there's no potential race. But even for
+	 * SQPOLL, the load is going to be potentially out-of-date the very
+	 * instant it's done, regardless or whether or not it's done
+	 * atomically. Worst case, we're going to be over-estimating what
+	 * we can submit. The point is, we need to be able to deal with this
+	 * situation regardless of any perceived atomicity.
+	 */
+	return ktail - *sq->khead;
+}
+
+/*
+ * If we have kernel support for IORING_ENTER_EXT_ARG, then we can use that
+ * more efficiently than queueing an internal timeout command.
+ */
+static int io_uring_wait_cqes_new(struct io_uring *ring,
+				  struct io_uring_cqe **cqe_ptr,
+				  unsigned wait_nr, struct __kernel_timespec *ts,
+				  sigset_t *sigmask)
+{
+	struct io_uring_getevents_arg arg = {
+		.sigmask	= (unsigned long) sigmask,
+		.sigmask_sz	= _NSIG / 8,
+		.ts		= (unsigned long) ts
+	};
+	struct get_data data = {
+		.submit		= __io_uring_flush_sq(ring),
+		.wait_nr	= wait_nr,
+		.get_flags	= IORING_ENTER_EXT_ARG,
+		.sz		= sizeof(arg),
+		.arg		= &arg
+	};
+
+	return _io_uring_get_cqe(ring, cqe_ptr, &data);
+}
+
+/*
+ * Like io_uring_wait_cqe(), except it accepts a timeout value as well. Note
+ * that an sqe is used internally to handle the timeout. For kernel doesn't
+ * support IORING_FEAT_EXT_ARG, applications using this function must never
+ * set sqe->user_data to LIBURING_UDATA_TIMEOUT!
+ *
+ * For kernels without IORING_FEAT_EXT_ARG (5.10 and older), if 'ts' is
+ * specified, the application need not call io_uring_submit() before
+ * calling this function, as we will do that on its behalf. From this it also
+ * follows that this function isn't safe to use for applications that split SQ
+ * and CQ handling between two threads and expect that to work without
+ * synchronization, as this function manipulates both the SQ and CQ side.
+ *
+ * For kernels with IORING_FEAT_EXT_ARG, no implicit submission is done and
+ * hence this function is safe to use for applications that split SQ and CQ
+ * handling between two threads.
+ */
+int io_uring_wait_cqes(struct io_uring *ring, struct io_uring_cqe **cqe_ptr,
+		       unsigned wait_nr, struct __kernel_timespec *ts,
+		       sigset_t *sigmask)
+{
+	unsigned to_submit = 0;
+
+	if (ts) {
+		struct io_uring_sqe *sqe;
+		int ret;
+
+		if (ring->features & IORING_FEAT_EXT_ARG)
+			return io_uring_wait_cqes_new(ring, cqe_ptr, wait_nr,
+							ts, sigmask);
+
+		/*
+		 * If the SQ ring is full, we may need to submit IO first
+		 */
+		sqe = io_uring_get_sqe(ring);
+		if (!sqe) {
+			ret = io_uring_submit(ring);
+			if (ret < 0)
+				return ret;
+			sqe = io_uring_get_sqe(ring);
+			if (!sqe)
+				return -EAGAIN;
+		}
+		io_uring_prep_timeout(sqe, ts, wait_nr, 0);
+		sqe->user_data = LIBURING_UDATA_TIMEOUT;
+		to_submit = __io_uring_flush_sq(ring);
+	}
+
+	return __io_uring_get_cqe(ring, cqe_ptr, to_submit, wait_nr, sigmask);
+}
+
+/*
+ * See io_uring_wait_cqes() - this function is the same, it just always uses
+ * '1' as the wait_nr.
+ */
+int io_uring_wait_cqe_timeout(struct io_uring *ring,
+			      struct io_uring_cqe **cqe_ptr,
+			      struct __kernel_timespec *ts)
+{
+	return io_uring_wait_cqes(ring, cqe_ptr, 1, ts, NULL);
+}
+
+/*
+ * Submit sqes acquired from io_uring_get_sqe() to the kernel.
+ *
+ * Returns number of sqes submitted
+ */
+static int __io_uring_submit(struct io_uring *ring, unsigned submitted,
+			     unsigned wait_nr)
+{
+	unsigned flags;
+	int ret;
+
+	flags = 0;
+	if (sq_ring_needs_enter(ring, &flags) || wait_nr) {
+		if (wait_nr || (ring->flags & IORING_SETUP_IOPOLL))
+			flags |= IORING_ENTER_GETEVENTS;
+
+		ret = __sys_io_uring_enter(ring->ring_fd, submitted, wait_nr,
+						flags, NULL);
+		if (ret < 0)
+			return -errno;
+	} else
+		ret = submitted;
+
+	return ret;
+}
+
+static int __io_uring_submit_and_wait(struct io_uring *ring, unsigned wait_nr)
+{
+	return __io_uring_submit(ring, __io_uring_flush_sq(ring), wait_nr);
+}
+
+/*
+ * Submit sqes acquired from io_uring_get_sqe() to the kernel.
+ *
+ * Returns number of sqes submitted
+ */
+int io_uring_submit(struct io_uring *ring)
+{
+	return __io_uring_submit_and_wait(ring, 0);
+}
+
+/*
+ * Like io_uring_submit(), but allows waiting for events as well.
+ *
+ * Returns number of sqes submitted
+ */
+int io_uring_submit_and_wait(struct io_uring *ring, unsigned wait_nr)
+{
+	return __io_uring_submit_and_wait(ring, wait_nr);
+}
+
+/*
+ * Return an sqe to fill. Application must later call io_uring_submit()
+ * when it's ready to tell the kernel about it. The caller may call this
+ * function multiple times before calling io_uring_submit().
+ *
+ * Returns a vacant sqe, or NULL if we're full.
+ */
+struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring)
+{
+	struct io_uring_sq *sq = &ring->sq;
+	unsigned int head = io_uring_smp_load_acquire(sq->khead);
+	unsigned int next = sq->sqe_tail + 1;
+	struct io_uring_sqe *sqe = NULL;
+
+	if (next - head <= *sq->kring_entries) {
+		sqe = &sq->sqes[sq->sqe_tail & *sq->kring_mask];
+		sq->sqe_tail = next;
+	}
+	return sqe;
+}
+
+int __io_uring_sqring_wait(struct io_uring *ring)
+{
+	int ret;
+
+	ret = __sys_io_uring_enter(ring->ring_fd, 0, 0, IORING_ENTER_SQ_WAIT,
+					NULL);
+	if (ret < 0)
+		ret = -errno;
+	return ret;
+}
diff --git a/src/register.c b/src/register.c
new file mode 100644
index 0000000..994aaff
--- /dev/null
+++ b/src/register.c
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: MIT */
+#define _POSIX_C_SOURCE 200112L
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#include <errno.h>
+#include <string.h>
+
+#include "liburing/compat.h"
+#include "liburing/io_uring.h"
+#include "liburing.h"
+
+#include "syscall.h"
+
+int io_uring_register_buffers(struct io_uring *ring, const struct iovec *iovecs,
+			      unsigned nr_iovecs)
+{
+	int ret;
+
+	ret = __sys_io_uring_register(ring->ring_fd, IORING_REGISTER_BUFFERS,
+					iovecs, nr_iovecs);
+	if (ret < 0)
+		return -errno;
+
+	return 0;
+}
+
+int io_uring_unregister_buffers(struct io_uring *ring)
+{
+	int ret;
+
+	ret = __sys_io_uring_register(ring->ring_fd, IORING_UNREGISTER_BUFFERS,
+					NULL, 0);
+	if (ret < 0)
+		return -errno;
+
+	return 0;
+}
+
+/*
+ * Register an update for an existing file set. The updates will start at
+ * 'off' in the original array, and 'nr_files' is the number of files we'll
+ * update.
+ *
+ * Returns number of files updated on success, -ERROR on failure.
+ */
+int io_uring_register_files_update(struct io_uring *ring, unsigned off,
+				   int *files, unsigned nr_files)
+{
+	struct io_uring_files_update up = {
+		.offset	= off,
+		.fds	= (unsigned long) files,
+	};
+	int ret;
+
+	ret = __sys_io_uring_register(ring->ring_fd,
+					IORING_REGISTER_FILES_UPDATE, &up,
+					nr_files);
+	if (ret < 0)
+		return -errno;
+
+	return ret;
+}
+
+int io_uring_register_files(struct io_uring *ring, const int *files,
+			      unsigned nr_files)
+{
+	int ret;
+
+	ret = __sys_io_uring_register(ring->ring_fd, IORING_REGISTER_FILES,
+					files, nr_files);
+	if (ret < 0)
+		return -errno;
+
+	return 0;
+}
+
+int io_uring_unregister_files(struct io_uring *ring)
+{
+	int ret;
+
+	ret = __sys_io_uring_register(ring->ring_fd, IORING_UNREGISTER_FILES,
+					NULL, 0);
+	if (ret < 0)
+		return -errno;
+
+	return 0;
+}
+
+int io_uring_register_eventfd(struct io_uring *ring, int event_fd)
+{
+	int ret;
+
+	ret = __sys_io_uring_register(ring->ring_fd, IORING_REGISTER_EVENTFD,
+					&event_fd, 1);
+	if (ret < 0)
+		return -errno;
+
+	return 0;
+}
+
+int io_uring_unregister_eventfd(struct io_uring *ring)
+{
+	int ret;
+
+	ret = __sys_io_uring_register(ring->ring_fd, IORING_UNREGISTER_EVENTFD,
+					NULL, 0);
+	if (ret < 0)
+		return -errno;
+
+	return 0;
+}
+
+int io_uring_register_eventfd_async(struct io_uring *ring, int event_fd)
+{
+	int ret;
+
+	ret = __sys_io_uring_register(ring->ring_fd, IORING_REGISTER_EVENTFD_ASYNC,
+			&event_fd, 1);
+	if (ret < 0)
+		return -errno;
+
+	return 0;
+}
+
+int io_uring_register_probe(struct io_uring *ring, struct io_uring_probe *p,
+			    unsigned int nr_ops)
+{
+	int ret;
+
+	ret = __sys_io_uring_register(ring->ring_fd, IORING_REGISTER_PROBE,
+					p, nr_ops);
+	if (ret < 0)
+		return -errno;
+
+	return 0;
+}
+
+int io_uring_register_personality(struct io_uring *ring)
+{
+	int ret;
+
+	ret = __sys_io_uring_register(ring->ring_fd, IORING_REGISTER_PERSONALITY,
+					NULL, 0);
+	if (ret < 0)
+		return -errno;
+
+	return ret;
+}
+
+int io_uring_unregister_personality(struct io_uring *ring, int id)
+{
+	int ret;
+
+	ret = __sys_io_uring_register(ring->ring_fd, IORING_UNREGISTER_PERSONALITY,
+					NULL, id);
+	if (ret < 0)
+		return -errno;
+
+	return ret;
+}
+
+int io_uring_register_restrictions(struct io_uring *ring,
+				   struct io_uring_restriction *res,
+				   unsigned int nr_res)
+{
+	int ret;
+
+	ret = __sys_io_uring_register(ring->ring_fd, IORING_REGISTER_RESTRICTIONS,
+				      res, nr_res);
+	if (ret < 0)
+		return -errno;
+
+	return 0;
+}
+
+int io_uring_enable_rings(struct io_uring *ring)
+{
+	int ret;
+
+	ret = __sys_io_uring_register(ring->ring_fd,
+				      IORING_REGISTER_ENABLE_RINGS, NULL, 0);
+	if (ret < 0)
+		return -errno;
+
+	return ret;
+}
diff --git a/src/setup.c b/src/setup.c
new file mode 100644
index 0000000..54225e8
--- /dev/null
+++ b/src/setup.c
@@ -0,0 +1,356 @@
+/* SPDX-License-Identifier: MIT */
+#define _DEFAULT_SOURCE
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#include <errno.h>
+#include <string.h>
+#include <stdlib.h>
+#include <signal.h>
+
+#include "liburing/compat.h"
+#include "liburing/io_uring.h"
+#include "liburing.h"
+
+#include "syscall.h"
+
+static void io_uring_unmap_rings(struct io_uring_sq *sq, struct io_uring_cq *cq)
+{
+	munmap(sq->ring_ptr, sq->ring_sz);
+	if (cq->ring_ptr && cq->ring_ptr != sq->ring_ptr)
+		munmap(cq->ring_ptr, cq->ring_sz);
+}
+
+static int io_uring_mmap(int fd, struct io_uring_params *p,
+			 struct io_uring_sq *sq, struct io_uring_cq *cq)
+{
+	size_t size;
+	int ret;
+
+	sq->ring_sz = p->sq_off.array + p->sq_entries * sizeof(unsigned);
+	cq->ring_sz = p->cq_off.cqes + p->cq_entries * sizeof(struct io_uring_cqe);
+
+	if (p->features & IORING_FEAT_SINGLE_MMAP) {
+		if (cq->ring_sz > sq->ring_sz)
+			sq->ring_sz = cq->ring_sz;
+		cq->ring_sz = sq->ring_sz;
+	}
+	sq->ring_ptr = mmap(0, sq->ring_sz, PROT_READ | PROT_WRITE,
+			MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_SQ_RING);
+	if (sq->ring_ptr == MAP_FAILED)
+		return -errno;
+
+	if (p->features & IORING_FEAT_SINGLE_MMAP) {
+		cq->ring_ptr = sq->ring_ptr;
+	} else {
+		cq->ring_ptr = mmap(0, cq->ring_sz, PROT_READ | PROT_WRITE,
+				MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_CQ_RING);
+		if (cq->ring_ptr == MAP_FAILED) {
+			cq->ring_ptr = NULL;
+			ret = -errno;
+			goto err;
+		}
+	}
+
+	sq->khead = sq->ring_ptr + p->sq_off.head;
+	sq->ktail = sq->ring_ptr + p->sq_off.tail;
+	sq->kring_mask = sq->ring_ptr + p->sq_off.ring_mask;
+	sq->kring_entries = sq->ring_ptr + p->sq_off.ring_entries;
+	sq->kflags = sq->ring_ptr + p->sq_off.flags;
+	sq->kdropped = sq->ring_ptr + p->sq_off.dropped;
+	sq->array = sq->ring_ptr + p->sq_off.array;
+
+	size = p->sq_entries * sizeof(struct io_uring_sqe);
+	sq->sqes = mmap(0, size, PROT_READ | PROT_WRITE,
+				MAP_SHARED | MAP_POPULATE, fd,
+				IORING_OFF_SQES);
+	if (sq->sqes == MAP_FAILED) {
+		ret = -errno;
+err:
+		io_uring_unmap_rings(sq, cq);
+		return ret;
+	}
+
+	cq->khead = cq->ring_ptr + p->cq_off.head;
+	cq->ktail = cq->ring_ptr + p->cq_off.tail;
+	cq->kring_mask = cq->ring_ptr + p->cq_off.ring_mask;
+	cq->kring_entries = cq->ring_ptr + p->cq_off.ring_entries;
+	cq->koverflow = cq->ring_ptr + p->cq_off.overflow;
+	cq->cqes = cq->ring_ptr + p->cq_off.cqes;
+	if (p->cq_off.flags)
+		cq->kflags = cq->ring_ptr + p->cq_off.flags;
+	return 0;
+}
+
+/*
+ * For users that want to specify sq_thread_cpu or sq_thread_idle, this
+ * interface is a convenient helper for mmap()ing the rings.
+ * Returns -errno on error, or zero on success.  On success, 'ring'
+ * contains the necessary information to read/write to the rings.
+ */
+int io_uring_queue_mmap(int fd, struct io_uring_params *p, struct io_uring *ring)
+{
+	int ret;
+
+	memset(ring, 0, sizeof(*ring));
+	ret = io_uring_mmap(fd, p, &ring->sq, &ring->cq);
+	if (!ret) {
+		ring->flags = p->flags;
+		ring->ring_fd = fd;
+	}
+	return ret;
+}
+
+/*
+ * Ensure that the mmap'ed rings aren't available to a child after a fork(2).
+ * This uses madvise(..., MADV_DONTFORK) on the mmap'ed ranges.
+ */
+int io_uring_ring_dontfork(struct io_uring *ring)
+{
+	size_t len;
+	int ret;
+
+	if (!ring->sq.ring_ptr || !ring->sq.sqes || !ring->cq.ring_ptr)
+		return -EINVAL;
+
+	len = *ring->sq.kring_entries * sizeof(struct io_uring_sqe);
+	ret = madvise(ring->sq.sqes, len, MADV_DONTFORK);
+	if (ret == -1)
+		return -errno;
+
+	len = ring->sq.ring_sz;
+	ret = madvise(ring->sq.ring_ptr, len, MADV_DONTFORK);
+	if (ret == -1)
+		return -errno;
+
+	if (ring->cq.ring_ptr != ring->sq.ring_ptr) {
+		len = ring->cq.ring_sz;
+		ret = madvise(ring->cq.ring_ptr, len, MADV_DONTFORK);
+		if (ret == -1)
+			return -errno;
+	}
+
+	return 0;
+}
+
+int io_uring_queue_init_params(unsigned entries, struct io_uring *ring,
+			       struct io_uring_params *p)
+{
+	int fd, ret;
+
+	fd = __sys_io_uring_setup(entries, p);
+	if (fd < 0)
+		return -errno;
+
+	ret = io_uring_queue_mmap(fd, p, ring);
+	if (ret) {
+		close(fd);
+		return ret;
+	}
+
+	ring->features = p->features;
+	return 0;
+}
+
+/*
+ * Returns -errno on error, or zero on success. On success, 'ring'
+ * contains the necessary information to read/write to the rings.
+ */
+int io_uring_queue_init(unsigned entries, struct io_uring *ring, unsigned flags)
+{
+	struct io_uring_params p;
+
+	memset(&p, 0, sizeof(p));
+	p.flags = flags;
+
+	return io_uring_queue_init_params(entries, ring, &p);
+}
+
+void io_uring_queue_exit(struct io_uring *ring)
+{
+	struct io_uring_sq *sq = &ring->sq;
+	struct io_uring_cq *cq = &ring->cq;
+
+	munmap(sq->sqes, *sq->kring_entries * sizeof(struct io_uring_sqe));
+	io_uring_unmap_rings(sq, cq);
+	close(ring->ring_fd);
+}
+
+struct io_uring_probe *io_uring_get_probe_ring(struct io_uring *ring)
+{
+	struct io_uring_probe *probe;
+	size_t len;
+	int r;
+
+	len = sizeof(*probe) + 256 * sizeof(struct io_uring_probe_op);
+	probe = malloc(len);
+	if (!probe)
+		return NULL;
+	memset(probe, 0, len);
+
+	r = io_uring_register_probe(ring, probe, 256);
+	if (r >= 0)
+		return probe;
+
+	free(probe);
+	return NULL;
+}
+
+struct io_uring_probe *io_uring_get_probe(void)
+{
+	struct io_uring ring;
+	struct io_uring_probe *probe;
+	int r;
+
+	r = io_uring_queue_init(2, &ring, 0);
+	if (r < 0)
+		return NULL;
+
+	probe = io_uring_get_probe_ring(&ring);
+	io_uring_queue_exit(&ring);
+	return probe;
+}
+
+void io_uring_free_probe(struct io_uring_probe *probe)
+{
+	free(probe);
+}
+
+static int __fls(int x)
+{
+	int r = 32;
+
+	if (!x)
+		return 0;
+	if (!(x & 0xffff0000u)) {
+		x <<= 16;
+		r -= 16;
+	}
+	if (!(x & 0xff000000u)) {
+		x <<= 8;
+		r -= 8;
+	}
+	if (!(x & 0xf0000000u)) {
+		x <<= 4;
+		r -= 4;
+	}
+	if (!(x & 0xc0000000u)) {
+		x <<= 2;
+		r -= 2;
+	}
+	if (!(x & 0x80000000u)) {
+		x <<= 1;
+		r -= 1;
+	}
+	return r;
+}
+
+static unsigned roundup_pow2(unsigned depth)
+{
+	return 1UL << __fls(depth - 1);
+}
+
+static size_t npages(size_t size, unsigned page_size)
+{
+	size--;
+	size /= page_size;
+	return __fls(size);
+}
+
+#define KRING_SIZE	320
+
+static size_t rings_size(unsigned entries, unsigned cq_entries, unsigned page_size)
+{
+	size_t pages, sq_size, cq_size;
+
+	cq_size = KRING_SIZE;
+	cq_size += cq_entries * sizeof(struct io_uring_cqe);
+	cq_size = (cq_size + 63) & ~63UL;
+	pages = (size_t) 1 << npages(cq_size, page_size);
+
+	sq_size = sizeof(struct io_uring_sqe) * entries;
+	pages += (size_t) 1 << npages(sq_size, page_size);
+	return pages * page_size;
+}
+
+#define KERN_MAX_ENTRIES	32768
+#define KERN_MAX_CQ_ENTRIES	(2 * KERN_MAX_ENTRIES)
+
+/*
+ * Return the required ulimit -l memlock memory required for a given ring
+ * setup, in bytes. May return -errno on error. On newer (5.12+) kernels,
+ * io_uring no longer requires any memlock memory, and hence this function
+ * will return 0 for that case. On older (5.11 and prior) kernels, this will
+ * return the required memory so that the caller can ensure that enough space
+ * is available before setting up a ring with the specified parameters.
+ */
+ssize_t io_uring_mlock_size_params(unsigned entries, struct io_uring_params *p)
+{
+	struct io_uring_params lp = { };
+	struct io_uring ring;
+	unsigned cq_entries;
+	long page_size;
+	ssize_t ret;
+
+	/*
+	 * We only really use this inited ring to see if the kernel is newer
+	 * or not. Newer kernels don't require memlocked memory. If we fail,
+	 * it's most likely because it's an older kernel and we have no
+	 * available memlock space. Just continue on, lp.features will still
+	 * be zeroed at this point and we'll do the right thing.
+	 */
+	ret = io_uring_queue_init_params(entries, &ring, &lp);
+	if (!ret)
+		io_uring_queue_exit(&ring);
+
+	/*
+	 * Native workers imply using cgroup memory accounting, and hence no
+	 * memlock memory is needed for the ring allocations.
+	 */
+	if (lp.features & IORING_FEAT_NATIVE_WORKERS)
+		return 0;
+
+	if (!entries)
+		return -EINVAL;
+	if (entries > KERN_MAX_ENTRIES) {
+		if (!(p->flags & IORING_SETUP_CLAMP))
+			return -EINVAL;
+		entries = KERN_MAX_ENTRIES;
+	}
+
+	entries = roundup_pow2(entries);
+	if (p->flags & IORING_SETUP_CQSIZE) {
+		if (!p->cq_entries)
+			return -EINVAL;
+		cq_entries = p->cq_entries;
+		if (cq_entries > KERN_MAX_CQ_ENTRIES) {
+			if (!(p->flags & IORING_SETUP_CLAMP))
+				return -EINVAL;
+			cq_entries = KERN_MAX_CQ_ENTRIES;
+		}
+		cq_entries = roundup_pow2(cq_entries);
+		if (cq_entries < entries)
+			return -EINVAL;
+	} else {
+		cq_entries = 2 * entries;
+	}
+
+	page_size = sysconf(_SC_PAGESIZE);
+	if (page_size < 0)
+		page_size = 4096;
+
+	return rings_size(entries, cq_entries, page_size);
+}
+
+/*
+ * Return required ulimit -l memory space for a given ring setup. See
+ * @io_uring_mlock_size_params().
+ */
+ssize_t io_uring_mlock_size(unsigned entries, unsigned flags)
+{
+	struct io_uring_params p = { .flags = flags, };
+
+	return io_uring_mlock_size_params(entries, &p);
+}
diff --git a/src/syscall.c b/src/syscall.c
new file mode 100644
index 0000000..2fd3dd4
--- /dev/null
+++ b/src/syscall.c
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: MIT */
+#define _DEFAULT_SOURCE
+
+/*
+ * Will go away once libc support is there
+ */
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <sys/uio.h>
+#include "liburing/compat.h"
+#include "liburing/io_uring.h"
+#include "syscall.h"
+
+#ifdef __alpha__
+/*
+ * alpha is the only exception, all other architectures
+ * have common numbers for new system calls.
+ */
+# ifndef __NR_io_uring_setup
+#  define __NR_io_uring_setup		535
+# endif
+# ifndef __NR_io_uring_enter
+#  define __NR_io_uring_enter		536
+# endif
+# ifndef __NR_io_uring_register
+#  define __NR_io_uring_register	537
+# endif
+#else /* !__alpha__ */
+# ifndef __NR_io_uring_setup
+#  define __NR_io_uring_setup		425
+# endif
+# ifndef __NR_io_uring_enter
+#  define __NR_io_uring_enter		426
+# endif
+# ifndef __NR_io_uring_register
+#  define __NR_io_uring_register	427
+# endif
+#endif
+
+int __sys_io_uring_register(int fd, unsigned opcode, const void *arg,
+			    unsigned nr_args)
+{
+	return syscall(__NR_io_uring_register, fd, opcode, arg, nr_args);
+}
+
+int __sys_io_uring_setup(unsigned entries, struct io_uring_params *p)
+{
+	return syscall(__NR_io_uring_setup, entries, p);
+}
+
+int __sys_io_uring_enter2(int fd, unsigned to_submit, unsigned min_complete,
+			 unsigned flags, sigset_t *sig, int sz)
+{
+	return syscall(__NR_io_uring_enter, fd, to_submit, min_complete,
+			flags, sig, sz);
+}
+
+int __sys_io_uring_enter(int fd, unsigned to_submit, unsigned min_complete,
+			 unsigned flags, sigset_t *sig)
+{
+	return __sys_io_uring_enter2(fd, to_submit, min_complete, flags, sig,
+					_NSIG / 8);
+}
diff --git a/src/syscall.h b/src/syscall.h
new file mode 100644
index 0000000..3b94efc
--- /dev/null
+++ b/src/syscall.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef LIBURING_SYSCALL_H
+#define LIBURING_SYSCALL_H
+
+#include <signal.h>
+
+struct io_uring_params;
+
+/*
+ * System calls
+ */
+extern int __sys_io_uring_setup(unsigned entries, struct io_uring_params *p);
+extern int __sys_io_uring_enter(int fd, unsigned to_submit,
+	unsigned min_complete, unsigned flags, sigset_t *sig);
+extern int __sys_io_uring_enter2(int fd, unsigned to_submit,
+	unsigned min_complete, unsigned flags, sigset_t *sig, int sz);
+extern int __sys_io_uring_register(int fd, unsigned int opcode, const void *arg,
+	unsigned int nr_args);
+
+#endif
diff --git a/test/232c93d07b74-test.c b/test/232c93d07b74-test.c
new file mode 100644
index 0000000..cd194cb
--- /dev/null
+++ b/test/232c93d07b74-test.c
@@ -0,0 +1,305 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Test case for socket read/write through IORING_OP_READV and
+ * IORING_OP_WRITEV, using both TCP and sockets and blocking and
+ * non-blocking IO.
+ *
+ * Heavily based on a test case from Hrvoje Zeba <zeba.hrvoje@gmail.com>
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <assert.h>
+
+#include <pthread.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <netinet/tcp.h>
+#include <netinet/in.h>
+
+#include "liburing.h"
+
+#define RECV_BUFF_SIZE 2
+#define SEND_BUFF_SIZE 3
+
+#define PORT	0x1235
+
+struct params {
+	int tcp;
+	int non_blocking;
+};
+
+pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
+pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
+int rcv_ready = 0;
+
+static void set_rcv_ready(void)
+{
+	pthread_mutex_lock(&mutex);
+
+	rcv_ready = 1;
+	pthread_cond_signal(&cond);
+
+	pthread_mutex_unlock(&mutex);
+}
+
+static void wait_for_rcv_ready(void)
+{
+	pthread_mutex_lock(&mutex);
+
+	while (!rcv_ready)
+		pthread_cond_wait(&cond, &mutex);
+
+	pthread_mutex_unlock(&mutex);
+}
+
+static void *rcv(void *arg)
+{
+	struct params *p = arg;
+	int s0;
+	int res;
+
+	if (p->tcp) {
+		int val = 1;
+                
+
+		s0 = socket(AF_INET, SOCK_STREAM | SOCK_CLOEXEC, IPPROTO_TCP);
+		res = setsockopt(s0, SOL_SOCKET, SO_REUSEPORT, &val, sizeof(val));
+		assert(res != -1);
+		res = setsockopt(s0, SOL_SOCKET, SO_REUSEADDR, &val, sizeof(val));
+		assert(res != -1);
+
+		struct sockaddr_in addr;
+
+		addr.sin_family = AF_INET;
+		addr.sin_port = PORT;
+		addr.sin_addr.s_addr = 0x0100007fU;
+		res = bind(s0, (struct sockaddr *) &addr, sizeof(addr));
+		assert(res != -1);
+	} else {
+		s0 = socket(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0);
+		assert(s0 != -1);
+
+		struct sockaddr_un addr;
+		memset(&addr, 0, sizeof(addr));
+
+		addr.sun_family = AF_UNIX;
+		memcpy(addr.sun_path, "\0sock", 6);
+		res = bind(s0, (struct sockaddr *) &addr, sizeof(addr));
+		assert(res != -1);
+	}
+	res = listen(s0, 128);
+	assert(res != -1);
+
+	set_rcv_ready();
+
+	int s1 = accept(s0, NULL, NULL);
+	assert(s1 != -1);
+
+	if (p->non_blocking) {
+		int flags = fcntl(s1, F_GETFL, 0);
+		assert(flags != -1);
+
+		flags |= O_NONBLOCK;
+		res = fcntl(s1, F_SETFL, flags);
+		assert(res != -1);
+	}
+
+	struct io_uring m_io_uring;
+	void *ret = NULL;
+
+	res = io_uring_queue_init(32, &m_io_uring, 0);
+	assert(res >= 0);
+
+	int bytes_read = 0;
+	int expected_byte = 0;
+	int done = 0;
+
+	while (!done && bytes_read != 33) {
+		char buff[RECV_BUFF_SIZE];
+		struct iovec iov;
+
+		iov.iov_base = buff;
+		iov.iov_len = sizeof(buff);
+
+		struct io_uring_sqe *sqe = io_uring_get_sqe(&m_io_uring);
+		assert(sqe != NULL);
+
+		io_uring_prep_readv(sqe, s1, &iov, 1, 0);
+
+		res = io_uring_submit(&m_io_uring);
+		assert(res != -1);
+
+		struct io_uring_cqe *cqe;
+		unsigned head;
+		unsigned count = 0;
+
+		while (!done && count != 1) {
+			io_uring_for_each_cqe(&m_io_uring, head, cqe) {
+				if (cqe->res < 0)
+					assert(cqe->res == -EAGAIN);
+				else {
+					int i;
+
+					for (i = 0; i < cqe->res; i++) {
+						if (buff[i] != expected_byte) {
+							fprintf(stderr,
+								"Received %d, wanted %d\n",
+								buff[i], expected_byte);
+							ret++;
+							done = 1;
+						 }
+						 expected_byte++;
+					}
+					bytes_read += cqe->res;
+				}
+
+				count++;
+			}
+
+			assert(count <= 1);
+			io_uring_cq_advance(&m_io_uring, count);
+		}
+	}
+
+	shutdown(s1, SHUT_RDWR);
+	close(s1);
+	close(s0);
+	io_uring_queue_exit(&m_io_uring);
+	return ret;
+}
+
+static void *snd(void *arg)
+{
+	struct params *p = arg;
+	int s0;
+	int ret;
+
+	wait_for_rcv_ready();
+
+	if (p->tcp) {
+		int val = 1;
+
+		s0 = socket(AF_INET, SOCK_STREAM | SOCK_CLOEXEC, IPPROTO_TCP);
+		ret = setsockopt(s0, IPPROTO_TCP, TCP_NODELAY, &val, sizeof(val));
+		assert(ret != -1);
+
+		struct sockaddr_in addr;
+
+		addr.sin_family = AF_INET;
+		addr.sin_port = PORT;
+		addr.sin_addr.s_addr = 0x0100007fU;
+		ret = connect(s0, (struct sockaddr*) &addr, sizeof(addr));
+		assert(ret != -1);
+	} else {
+		s0 = socket(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0);
+		assert(s0 != -1);
+
+		struct sockaddr_un addr;
+		memset(&addr, 0, sizeof(addr));
+
+		addr.sun_family = AF_UNIX;
+		memcpy(addr.sun_path, "\0sock", 6);
+		ret = connect(s0, (struct sockaddr*) &addr, sizeof(addr));
+		assert(ret != -1);
+	}
+
+	if (p->non_blocking) {
+		int flags = fcntl(s0, F_GETFL, 0);
+		assert(flags != -1);
+
+		flags |= O_NONBLOCK;
+		ret = fcntl(s0, F_SETFL, flags);
+		assert(ret != -1);
+	}
+
+	struct io_uring m_io_uring;
+
+	ret = io_uring_queue_init(32, &m_io_uring, 0);
+	assert(ret >= 0);
+
+	int bytes_written = 0;
+	int done = 0;
+
+	while (!done && bytes_written != 33) {
+		char buff[SEND_BUFF_SIZE];
+		int i;
+
+		for (i = 0; i < SEND_BUFF_SIZE; i++)
+			buff[i] = i + bytes_written;
+
+		struct iovec iov;
+
+		iov.iov_base = buff;
+		iov.iov_len = sizeof(buff);
+
+		struct io_uring_sqe *sqe = io_uring_get_sqe(&m_io_uring);
+		assert(sqe != NULL);
+
+		io_uring_prep_writev(sqe, s0, &iov, 1, 0);
+
+		ret = io_uring_submit(&m_io_uring);
+		assert(ret != -1);
+
+		struct io_uring_cqe *cqe;
+		unsigned head;
+		unsigned count = 0;
+
+		while (!done && count != 1) {
+			io_uring_for_each_cqe(&m_io_uring, head, cqe) {
+				if (cqe->res < 0) {
+					if (cqe->res == -EPIPE) {
+						done = 1;
+						break;
+					}
+					assert(cqe->res == -EAGAIN);
+				} else {
+					bytes_written += cqe->res;
+				}
+
+				count++;
+			}
+
+			assert(count <= 1);
+			io_uring_cq_advance(&m_io_uring, count);
+		}
+		usleep(100000);
+	}
+
+	shutdown(s0, SHUT_RDWR);
+	close(s0);
+	io_uring_queue_exit(&m_io_uring);
+	return NULL;
+}
+
+int main(int argc, char *argv[])
+{
+	struct params p;
+	pthread_t t1, t2;
+	void *res1, *res2;
+	int i, exit_val = 0;
+
+	if (argc > 1)
+		return 0;
+
+	for (i = 0; i < 4; i++) {
+		p.tcp = i & 1;
+		p.non_blocking = (i & 2) >> 1;
+
+		rcv_ready = 0;
+
+		pthread_create(&t1, NULL, rcv, &p);
+		pthread_create(&t2, NULL, snd, &p);
+		pthread_join(t1, &res1);
+		pthread_join(t2, &res2);
+		if (res1 || res2) {
+			fprintf(stderr, "Failed tcp=%d, non_blocking=%d\n", p.tcp, p.non_blocking);
+			exit_val = 1;
+		}
+	}
+
+	return exit_val;
+}
diff --git a/test/35fa71a030ca-test.c b/test/35fa71a030ca-test.c
new file mode 100644
index 0000000..f5fcc4d
--- /dev/null
+++ b/test/35fa71a030ca-test.c
@@ -0,0 +1,326 @@
+/* SPDX-License-Identifier: MIT */
+// autogenerated by syzkaller (https://github.com/google/syzkaller)
+
+#include <dirent.h>
+#include <endian.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdarg.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/prctl.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <time.h>
+#include <unistd.h>
+#include <sys/mman.h>
+
+#include <linux/futex.h>
+
+#include "liburing.h"
+#include "../src/syscall.h"
+
+#if !defined(SYS_futex) && defined(SYS_futex_time64)
+# define SYS_futex SYS_futex_time64
+#endif
+
+static void sleep_ms(uint64_t ms)
+{
+  usleep(ms * 1000);
+}
+
+static uint64_t current_time_ms(void)
+{
+  struct timespec ts;
+  if (clock_gettime(CLOCK_MONOTONIC, &ts))
+    exit(1);
+  return (uint64_t)ts.tv_sec * 1000 + (uint64_t)ts.tv_nsec / 1000000;
+}
+
+static void thread_start(void* (*fn)(void*), void* arg)
+{
+  pthread_t th;
+  pthread_attr_t attr;
+  pthread_attr_init(&attr);
+  pthread_attr_setstacksize(&attr, 128 << 10);
+  int i;
+  for (i = 0; i < 100; i++) {
+    if (pthread_create(&th, &attr, fn, arg) == 0) {
+      pthread_attr_destroy(&attr);
+      return;
+    }
+    if (errno == EAGAIN) {
+      usleep(50);
+      continue;
+    }
+    break;
+  }
+  exit(1);
+}
+
+typedef struct {
+  int state;
+} event_t;
+
+static void event_init(event_t* ev)
+{
+  ev->state = 0;
+}
+
+static void event_reset(event_t* ev)
+{
+  ev->state = 0;
+}
+
+static void event_set(event_t* ev)
+{
+  if (ev->state)
+    exit(1);
+  __atomic_store_n(&ev->state, 1, __ATOMIC_RELEASE);
+  syscall(SYS_futex, &ev->state, FUTEX_WAKE | FUTEX_PRIVATE_FLAG);
+}
+
+static void event_wait(event_t* ev)
+{
+  while (!__atomic_load_n(&ev->state, __ATOMIC_ACQUIRE))
+    syscall(SYS_futex, &ev->state, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, 0, 0);
+}
+
+static int event_isset(event_t* ev)
+{
+  return __atomic_load_n(&ev->state, __ATOMIC_ACQUIRE);
+}
+
+static int event_timedwait(event_t* ev, uint64_t timeout)
+{
+  uint64_t start = current_time_ms();
+  uint64_t now = start;
+  for (;;) {
+    uint64_t remain = timeout - (now - start);
+    struct timespec ts;
+    ts.tv_sec = remain / 1000;
+    ts.tv_nsec = (remain % 1000) * 1000 * 1000;
+    syscall(SYS_futex, &ev->state, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, 0, &ts);
+    if (__atomic_load_n(&ev->state, __ATOMIC_RELAXED))
+      return 1;
+    now = current_time_ms();
+    if (now - start > timeout)
+      return 0;
+  }
+}
+
+static bool write_file(const char* file, const char* what, ...)
+{
+  char buf[1024];
+  va_list args;
+  va_start(args, what);
+  vsnprintf(buf, sizeof(buf), what, args);
+  va_end(args);
+  buf[sizeof(buf) - 1] = 0;
+  int len = strlen(buf);
+  int fd = open(file, O_WRONLY | O_CLOEXEC);
+  if (fd == -1)
+    return false;
+  if (write(fd, buf, len) != len) {
+    int err = errno;
+    close(fd);
+    errno = err;
+    return false;
+  }
+  close(fd);
+  return true;
+}
+
+static void kill_and_wait(int pid, int* status)
+{
+  kill(-pid, SIGKILL);
+  kill(pid, SIGKILL);
+  int i;
+  for (i = 0; i < 100; i++) {
+    if (waitpid(-1, status, WNOHANG | __WALL) == pid)
+      return;
+    usleep(1000);
+  }
+  DIR* dir = opendir("/sys/fs/fuse/connections");
+  if (dir) {
+    for (;;) {
+      struct dirent* ent = readdir(dir);
+      if (!ent)
+        break;
+      if (strcmp(ent->d_name, ".") == 0 || strcmp(ent->d_name, "..") == 0)
+        continue;
+      char abort[300];
+      snprintf(abort, sizeof(abort), "/sys/fs/fuse/connections/%s/abort",
+               ent->d_name);
+      int fd = open(abort, O_WRONLY);
+      if (fd == -1) {
+        continue;
+      }
+      if (write(fd, abort, 1) < 0) {
+      }
+      close(fd);
+    }
+    closedir(dir);
+  } else {
+  }
+  while (waitpid(-1, status, __WALL) != pid) {
+  }
+}
+
+#define SYZ_HAVE_SETUP_TEST 1
+static void setup_test()
+{
+  prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0);
+  setpgrp();
+  write_file("/proc/self/oom_score_adj", "1000");
+}
+
+struct thread_t {
+  int created, call;
+  event_t ready, done;
+};
+
+static struct thread_t threads[16];
+static void execute_call(int call);
+static int running;
+
+static void* thr(void* arg)
+{
+  struct thread_t* th = (struct thread_t*)arg;
+  for (;;) {
+    event_wait(&th->ready);
+    event_reset(&th->ready);
+    execute_call(th->call);
+    __atomic_fetch_sub(&running, 1, __ATOMIC_RELAXED);
+    event_set(&th->done);
+  }
+  return 0;
+}
+
+static void execute_one(void)
+{
+  int i, call, thread;
+  for (call = 0; call < 3; call++) {
+    for (thread = 0; thread < (int)(sizeof(threads) / sizeof(threads[0]));
+         thread++) {
+      struct thread_t* th = &threads[thread];
+      if (!th->created) {
+        th->created = 1;
+        event_init(&th->ready);
+        event_init(&th->done);
+        event_set(&th->done);
+        thread_start(thr, th);
+      }
+      if (!event_isset(&th->done))
+        continue;
+      event_reset(&th->done);
+      th->call = call;
+      __atomic_fetch_add(&running, 1, __ATOMIC_RELAXED);
+      event_set(&th->ready);
+      event_timedwait(&th->done, 45);
+      break;
+    }
+  }
+  for (i = 0; i < 100 && __atomic_load_n(&running, __ATOMIC_RELAXED); i++)
+    sleep_ms(1);
+}
+
+static void execute_one(void);
+
+#define WAIT_FLAGS __WALL
+
+static void loop(void)
+{
+  int iter;
+  for (iter = 0;; iter++) {
+    int pid = fork();
+    if (pid < 0)
+      exit(1);
+    if (pid == 0) {
+      setup_test();
+      execute_one();
+      exit(0);
+    }
+    int status = 0;
+    uint64_t start = current_time_ms();
+    for (;;) {
+      if (waitpid(-1, &status, WNOHANG | WAIT_FLAGS) == pid)
+        break;
+      sleep_ms(1);
+      if (current_time_ms() - start < 5 * 1000)
+        continue;
+      kill_and_wait(pid, &status);
+      break;
+    }
+  }
+}
+
+uint64_t r[1] = {0xffffffffffffffff};
+
+void execute_call(int call)
+{
+  long res;
+  switch (call) {
+  case 0:
+    *(uint32_t*)0x20000040 = 0;
+    *(uint32_t*)0x20000044 = 0;
+    *(uint32_t*)0x20000048 = 0;
+    *(uint32_t*)0x2000004c = 0;
+    *(uint32_t*)0x20000050 = 0;
+    *(uint32_t*)0x20000054 = 0;
+    *(uint32_t*)0x20000058 = 0;
+    *(uint32_t*)0x2000005c = 0;
+    *(uint32_t*)0x20000060 = 0;
+    *(uint32_t*)0x20000064 = 0;
+    *(uint32_t*)0x20000068 = 0;
+    *(uint32_t*)0x2000006c = 0;
+    *(uint32_t*)0x20000070 = 0;
+    *(uint32_t*)0x20000074 = 0;
+    *(uint32_t*)0x20000078 = 0;
+    *(uint32_t*)0x2000007c = 0;
+    *(uint32_t*)0x20000080 = 0;
+    *(uint32_t*)0x20000084 = 0;
+    *(uint64_t*)0x20000088 = 0;
+    *(uint32_t*)0x20000090 = 0;
+    *(uint32_t*)0x20000094 = 0;
+    *(uint32_t*)0x20000098 = 0;
+    *(uint32_t*)0x2000009c = 0;
+    *(uint32_t*)0x200000a0 = 0;
+    *(uint32_t*)0x200000a4 = 0;
+    *(uint32_t*)0x200000a8 = 0;
+    *(uint32_t*)0x200000ac = 0;
+    *(uint64_t*)0x200000b0 = 0;
+    res = __sys_io_uring_setup(0x64, (struct io_uring_params *) 0x20000040UL);
+    if (res != -1)
+      r[0] = res;
+    break;
+  case 1:
+    __sys_io_uring_register((long)r[0], 0, 0, 0);
+    break;
+  case 2:
+    __sys_io_uring_register((long)r[0], 0, 0, 0);
+    break;
+  }
+}
+
+static void sig_int(int sig)
+{
+	exit(0);
+}
+
+int main(int argc, char *argv[])
+{
+	if (argc > 1)
+		return 0;
+	signal(SIGINT, sig_int);
+	mmap((void *) 0x20000000, 0x1000000, 3, 0x32, -1, 0);
+	loop();
+	return 0;
+}
diff --git a/test/500f9fbadef8-test.c b/test/500f9fbadef8-test.c
new file mode 100644
index 0000000..dbd5751
--- /dev/null
+++ b/test/500f9fbadef8-test.c
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: Single depth submit+wait poll hang test
+ *
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+
+#include "helpers.h"
+#include "liburing.h"
+
+#define BLOCKS	4096
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring;
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	struct iovec iov;
+	char buf[32];
+	off_t offset;
+	unsigned blocks;
+	int ret, fd;
+
+	if (argc > 1)
+		return 0;
+
+	t_posix_memalign(&iov.iov_base, 4096, 4096);
+	iov.iov_len = 4096;
+
+	ret = io_uring_queue_init(1, &ring, IORING_SETUP_IOPOLL);
+	if (ret) {
+		fprintf(stderr, "ring setup failed\n");
+		return 1;
+
+	}
+
+	sprintf(buf, "./XXXXXX");
+	fd = mkostemp(buf, O_WRONLY | O_DIRECT | O_CREAT);
+	if (fd < 0) {
+		perror("mkostemp");
+		return 1;
+	}
+
+	offset = 0;
+	blocks = BLOCKS;
+	do {
+		sqe = io_uring_get_sqe(&ring);
+		if (!sqe) {
+			fprintf(stderr, "get sqe failed\n");
+			goto err;
+		}
+		io_uring_prep_writev(sqe, fd, &iov, 1, offset);
+		ret = io_uring_submit_and_wait(&ring, 1);
+		if (ret < 0) {
+			fprintf(stderr, "submit_and_wait: %d\n", ret);
+			goto err;
+		}
+		ret = io_uring_wait_cqe(&ring, &cqe);
+		if (ret < 0) {
+			fprintf(stderr, "wait completion: %d\n", ret);
+			goto err;
+		}
+		if (cqe->res != 4096) {
+			if (cqe->res == -EOPNOTSUPP)
+				goto skipped;
+			goto err;
+		}
+		io_uring_cqe_seen(&ring, cqe);
+		offset += 4096;
+	} while (--blocks);
+		
+	close(fd);
+	unlink(buf);
+	return 0;
+err:
+	close(fd);
+	unlink(buf);
+	return 1;
+skipped:
+	fprintf(stderr, "Polling not supported in current dir, test skipped\n");
+	close(fd);
+	unlink(buf);
+	return 0;
+}
diff --git a/test/7ad0e4b2f83c-test.c b/test/7ad0e4b2f83c-test.c
new file mode 100644
index 0000000..4d760e1
--- /dev/null
+++ b/test/7ad0e4b2f83c-test.c
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: MIT */
+#include <stdio.h>
+#include <time.h>
+#include <sys/time.h>
+#include "liburing.h"
+
+static unsigned long long mtime_since(const struct timeval *s,
+				      const struct timeval *e)
+{
+	long long sec, usec;
+
+	sec = e->tv_sec - s->tv_sec;
+	usec = (e->tv_usec - s->tv_usec);
+	if (sec > 0 && usec < 0) {
+		sec--;
+		usec += 1000000;
+	}
+
+	sec *= 1000;
+	usec /= 1000;
+	return sec + usec;
+}
+
+static unsigned long long mtime_since_now(struct timeval *tv)
+{
+	struct timeval end;
+
+	gettimeofday(&end, NULL);
+	return mtime_since(tv, &end);
+}
+
+int main(int argc, char *argv[])
+{
+	struct __kernel_timespec ts1, ts2;
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	struct io_uring ring;
+	unsigned long msec;
+	struct timeval tv;
+	int ret;
+
+	if (argc > 1)
+		return 0;
+
+	ret = io_uring_queue_init(32, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "io_uring_queue_init=%d\n", ret);
+		return 1;
+	}
+
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_nop(sqe);
+	ret = io_uring_submit(&ring);
+	if (ret != 1) {
+		fprintf(stderr, "io_uring_submit1=%d\n", ret);
+		return 1;
+	}
+
+
+	ts1.tv_sec = 5,
+	ts1.tv_nsec = 0;
+	ret = io_uring_wait_cqe_timeout(&ring, &cqe, &ts1);
+	if (ret) {
+		fprintf(stderr, "io_uring_wait_cqe_timeout=%d\n", ret);
+		return 1;
+	}
+	io_uring_cqe_seen(&ring, cqe);
+	gettimeofday(&tv, NULL);
+
+	ts2.tv_sec = 1;
+	ts2.tv_nsec = 0;
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_timeout(sqe, &ts2, 0, 0);
+	sqe->user_data = 89;
+	ret = io_uring_submit(&ring);
+	if (ret != 1) {
+		fprintf(stderr, "io_uring_submit2=%d\n", ret);
+		return 1;
+	}
+
+	io_uring_wait_cqe(&ring, &cqe);
+	io_uring_cqe_seen(&ring, cqe);
+	msec = mtime_since_now(&tv);
+	if (msec >= 900 && msec <= 1100) {
+		io_uring_queue_exit(&ring);
+		return 0;
+	}
+
+	fprintf(stderr, "%s: Timeout seems wonky (got %lu)\n", __FUNCTION__,
+								msec);
+	io_uring_queue_exit(&ring);
+	return 1;
+}
diff --git a/test/8a9973408177-test.c b/test/8a9973408177-test.c
new file mode 100644
index 0000000..94bf781
--- /dev/null
+++ b/test/8a9973408177-test.c
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: MIT */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+
+#include "liburing.h"
+
+static int register_file(struct io_uring *ring)
+{
+	char buf[32];
+	int ret, fd;
+
+	sprintf(buf, "./XXXXXX");
+	fd = mkstemp(buf);
+	if (fd < 0) {
+		perror("open");
+		return 1;
+	}
+
+	ret = io_uring_register_files(ring, &fd, 1);
+	if (ret) {
+		fprintf(stderr, "file register %d\n", ret);
+		return 1;
+	}
+
+	ret = io_uring_unregister_files(ring);
+	if (ret) {
+		fprintf(stderr, "file register %d\n", ret);
+		return 1;
+	}
+
+	unlink(buf);
+	close(fd);
+	return 0;
+}
+
+static int test_single_fsync(struct io_uring *ring)
+{
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	char buf[32];
+	int fd, ret;
+
+	sprintf(buf, "./XXXXXX");
+	fd = mkstemp(buf);
+	if (fd < 0) {
+		perror("open");
+		return 1;
+	}
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+
+	io_uring_prep_fsync(sqe, fd, 0);
+
+	ret = io_uring_submit(ring);
+	if (ret <= 0) {
+		printf("sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	ret = io_uring_wait_cqe(ring, &cqe);
+	if (ret < 0) {
+		printf("wait completion %d\n", ret);
+		goto err;
+	}
+
+	io_uring_cqe_seen(ring, cqe);
+	unlink(buf);
+	return 0;
+err:
+	unlink(buf);
+	return 1;
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring;
+	int ret;
+
+	if (argc > 1)
+		return 0;
+
+	ret = io_uring_queue_init(8, &ring, 0);
+	if (ret) {
+		printf("ring setup failed\n");
+		return 1;
+	}
+
+	ret = register_file(&ring);
+	if (ret)
+		return ret;
+	ret = test_single_fsync(&ring);
+	if (ret) {
+		printf("test_single_fsync failed\n");
+		return ret;
+	}
+
+	return 0;
+}
diff --git a/test/917257daa0fe-test.c b/test/917257daa0fe-test.c
new file mode 100644
index 0000000..1d00ef1
--- /dev/null
+++ b/test/917257daa0fe-test.c
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: MIT */
+// autogenerated by syzkaller (https://github.com/google/syzkaller)
+
+#include <endian.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <unistd.h>
+
+#include "liburing.h"
+#include "../src/syscall.h"
+
+int main(int argc, char *argv[])
+{
+  if (argc > 1)
+    return 0;
+
+  mmap((void *) 0x20000000, 0x1000000, 3, 0x32, -1, 0);
+
+  *(uint32_t*)0x20000000 = 0;
+  *(uint32_t*)0x20000004 = 0;
+  *(uint32_t*)0x20000008 = 6;
+  *(uint32_t*)0x2000000c = 0;
+  *(uint32_t*)0x20000010 = 0x3af;
+  *(uint32_t*)0x20000014 = 0;
+  *(uint32_t*)0x20000018 = 0;
+  *(uint32_t*)0x2000001c = 0;
+  *(uint32_t*)0x20000020 = 0;
+  *(uint32_t*)0x20000024 = 0;
+  *(uint32_t*)0x20000028 = 0;
+  *(uint32_t*)0x2000002c = 0;
+  *(uint32_t*)0x20000030 = 0;
+  *(uint32_t*)0x20000034 = 0;
+  *(uint32_t*)0x20000038 = 0;
+  *(uint32_t*)0x2000003c = 0;
+  *(uint32_t*)0x20000040 = 0;
+  *(uint32_t*)0x20000044 = 0;
+  *(uint64_t*)0x20000048 = 0;
+  *(uint32_t*)0x20000050 = 0;
+  *(uint32_t*)0x20000054 = 0;
+  *(uint32_t*)0x20000058 = 0;
+  *(uint32_t*)0x2000005c = 0;
+  *(uint32_t*)0x20000060 = 0;
+  *(uint32_t*)0x20000064 = 0;
+  *(uint32_t*)0x20000068 = 0;
+  *(uint32_t*)0x2000006c = 0;
+  *(uint64_t*)0x20000070 = 0;
+  __sys_io_uring_setup(0x7a6, (struct io_uring_params *) 0x20000000UL);
+  return 0;
+}
diff --git a/test/Makefile b/test/Makefile
new file mode 100644
index 0000000..2f0a694
--- /dev/null
+++ b/test/Makefile
@@ -0,0 +1,296 @@
+prefix ?= /usr
+datadir ?= $(prefix)/share
+
+INSTALL=install
+
+CPPFLAGS ?=
+override CPPFLAGS += -D_GNU_SOURCE -D__SANE_USERSPACE_TYPES__ \
+	-I../src/include/ -include ../config-host.h
+CFLAGS ?= -g -O2
+XCFLAGS =
+override CFLAGS += -Wall -Wextra -Wno-unused-parameter -Wno-sign-compare \
+	-L../src/
+CXXFLAGS ?=
+override CXXFLAGS += $(CFLAGS) -std=c++11
+
+test_targets += \
+	232c93d07b74-test \
+	35fa71a030ca-test \
+	500f9fbadef8-test \
+	7ad0e4b2f83c-test \
+	8a9973408177-test \
+	917257daa0fe-test \
+	a0908ae19763-test \
+	a4c0b3decb33-test \
+	accept \
+	accept-link \
+	accept-reuse \
+	accept-test \
+	across-fork splice \
+	b19062a56726-test \
+	b5837bd5311d-test \
+	ce593a6c480a-test \
+	close-opath \
+	connect \
+	cq-full \
+	cq-overflow \
+	cq-peek-batch \
+	cq-ready \
+	cq-size \
+	d4ae271dfaae-test \
+	d77a67ed5f27-test \
+	defer \
+	double-poll-crash \
+	eeed8b54e0df-test \
+	eventfd \
+	eventfd-disable \
+	eventfd-ring \
+	fadvise \
+	fallocate \
+	fc2a85cb02ef-test \
+	file-register \
+	file-update \
+	files-exit-hang-poll \
+	files-exit-hang-timeout \
+	fixed-link \
+	fsync \
+	hardlink \
+	io-cancel \
+	io_uring_enter \
+	io_uring_register \
+	io_uring_setup \
+	iopoll \
+	lfs-openat \
+	lfs-openat-write \
+	link \
+	link-timeout \
+	link_drain \
+	madvise \
+	mkdir \
+	multicqes_drain \
+	nop \
+	nop-all-sizes \
+	open-close \
+	openat2 \
+	personality \
+	pipe-eof \
+	pipe-reuse \
+	poll \
+	poll-cancel \
+	poll-cancel-ton \
+	poll-link \
+	poll-many \
+	poll-mshot-update \
+	poll-ring \
+	poll-v-poll \
+	probe \
+	read-write \
+	register-restrictions \
+	rename \
+	ring-leak \
+	ring-leak2 \
+	rw_merge_test \
+	self \
+	send_recv \
+	send_recvmsg \
+	shared-wq \
+	short-read \
+	shutdown \
+	sigfd-deadlock \
+	socket-rw \
+	socket-rw-eagain \
+	sq-full \
+	sq-poll-dup \
+	sq-poll-kthread \
+	sq-poll-share \
+	sqpoll-disable-exit \
+	sqpoll-exit-hang \
+	sqpoll-sleep \
+	sq-space_left \
+	stdout \
+	submit-reuse \
+	symlink \
+	teardowns \
+	thread-exit \
+	timeout \
+	timeout-new \
+	timeout-overflow \
+	unlink \
+	wakeup-hang \
+	sendmsg_fs_cve \
+	rsrc_tags \
+	# EOL
+
+all_targets += $(test_targets)
+
+include ../Makefile.quiet
+
+ifneq ($(MAKECMDGOALS),clean)
+include ../config-host.mak
+endif
+
+ifdef CONFIG_HAVE_STATX
+test_targets += statx
+endif
+all_targets += statx
+
+ifdef CONFIG_HAVE_CXX
+test_targets += sq-full-cpp
+endif
+all_targets += sq-full-cpp
+
+helpers = helpers.o
+
+all: ${helpers} $(test_targets)
+
+helpers.o: helpers.c helpers.c
+	$(QUIET_CC)$(CC) $(CPPFLAGS) $(CFLAGS) -o $@ -c $< -luring
+
+%: %.c ${helpers} helpers.h
+	$(QUIET_CC)$(CC) $(CPPFLAGS) $(CFLAGS) -o $@ $< ${helpers} -luring $(XCFLAGS)
+
+%: %.cc ${helpers} helpers.h
+	$(QUIET_CXX)$(CXX) $(CPPFLAGS) $(CXXFLAGS) -o $@ $< ${helpers} -luring $(XCFLAGS)
+
+test_srcs := \
+	helpers.c \
+	232c93d07b74-test.c \
+	35fa71a030ca-test.c \
+	500f9fbadef8-test.c \
+	7ad0e4b2f83c-test.c \
+	8a9973408177-test.c \
+	917257daa0fe-test.c \
+	a0908ae19763-test.c \
+	a4c0b3decb33-test.c \
+	accept-link.c \
+	accept-reuse.c \
+	accept-test.c \
+	accept.c \
+	across-fork.c \
+	b19062a56726-test.c \
+	b5837bd5311d-test.c \
+	ce593a6c480a-test.c \
+	close-opath.c \
+	connect.c \
+	cq-full.c \
+	cq-overflow.c \
+	cq-peek-batch.c \
+	cq-ready.c\
+	cq-size.c \
+	d4ae271dfaae-test.c \
+	d77a67ed5f27-test.c \
+	defer.c \
+	double-poll-crash.c \
+	eeed8b54e0df-test.c \
+	eventfd-disable.c \
+	eventfd-ring.c \
+	eventfd.c \
+	fadvise.c \
+	fallocate.c \
+	fc2a85cb02ef-test.c \
+	file-register.c \
+	file-update.c \
+	files-exit-hang-poll.c \
+	files-exit-hang-timeout.c \
+	fixed-link.c \
+	fsync.c \
+	hardlink.c \
+	io-cancel.c \
+	io_uring_enter.c \
+	io_uring_register.c \
+	io_uring_setup.c \
+	iopoll.c \
+	lfs-openat-write.c \
+	lfs-openat.c \
+	link-timeout.c \
+	link.c \
+	link_drain.c \
+	madvise.c \
+	mkdir.c \
+	multicqes_drain.c \
+	nop-all-sizes.c \
+	nop.c \
+	open-close.c \
+	openat2.c \
+	personality.c \
+	pipe-eof.c \
+	pipe-reuse.c \
+	poll-cancel-ton.c \
+	poll-cancel.c \
+	poll-link.c \
+	poll-many.c \
+	poll-mshot-update.c \
+	poll-ring.c \
+	poll-v-poll.c \
+	poll.c \
+	probe.c \
+	read-write.c \
+	register-restrictions.c \
+	rename.c \
+	ring-leak.c \
+	ring-leak2.c \
+	rw_merge_test.c \
+	self.c \
+	send_recvmsg.c \
+	shared-wq.c \
+	short-read.c \
+	shutdown.c \
+	sigfd-deadlock.c \
+	socket-rw.c \
+	socket-rw-eagain.c \
+	splice.c \
+	sq-full-cpp.cc \
+	sq-full.c \
+	sq-poll-dup.c \
+	sq-poll-kthread.c \
+	sq-poll-share.c \
+	sqpoll-disable-exit.c \
+	sqpoll-exit-hang.c \
+	sqpoll-sleep.c \
+	sq-space_left.c \
+	statx.c \
+	stdout.c \
+	submit-reuse.c \
+	symlink.c \
+	teardowns.c \
+	thread-exit.c \
+	timeout-new.c \
+	timeout-overflow.c \
+	timeout.c \
+	unlink.c \
+	wakeup-hang.c \
+	sendmsg_fs_cve.c \
+	rsrc_tags.c \
+	# EOL
+
+test_objs := $(patsubst %.c,%.ol,$(patsubst %.cc,%.ol,$(test_srcs)))
+
+35fa71a030ca-test: XCFLAGS = -lpthread
+232c93d07b74-test: XCFLAGS = -lpthread
+send_recv: XCFLAGS = -lpthread
+send_recvmsg: XCFLAGS = -lpthread
+poll-link: XCFLAGS = -lpthread
+accept-link: XCFLAGS = -lpthread
+submit-reuse: XCFLAGS = -lpthread
+poll-v-poll: XCFLAGS = -lpthread
+across-fork: XCFLAGS = -lpthread
+ce593a6c480a-test: XCFLAGS = -lpthread
+wakeup-hang: XCFLAGS = -lpthread
+pipe-eof: XCFLAGS = -lpthread
+timeout-new: XCFLAGS = -lpthread
+thread-exit: XCFLAGS = -lpthread
+ring-leak2: XCFLAGS = -lpthread
+poll-mshot-update: XCFLAGS = -lpthread
+
+install: $(test_targets) runtests.sh runtests-loop.sh
+	$(INSTALL) -D -d -m 755 $(datadir)/liburing-test/
+	$(INSTALL) -D -m 755 $(test_targets) $(datadir)/liburing-test/
+	$(INSTALL) -D -m 755 runtests.sh  $(datadir)/liburing-test/
+	$(INSTALL) -D -m 755 runtests-loop.sh  $(datadir)/liburing-test/
+clean:
+	@rm -f $(all_targets) $(test_objs) helpers.o
+
+runtests: all
+	@./runtests.sh $(test_targets)
+runtests-loop: all
+	@./runtests-loop.sh $(test_targets)
diff --git a/test/a0908ae19763-test.c b/test/a0908ae19763-test.c
new file mode 100644
index 0000000..00cb559
--- /dev/null
+++ b/test/a0908ae19763-test.c
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: MIT */
+// autogenerated by syzkaller (https://github.com/google/syzkaller)
+
+#include <endian.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <unistd.h>
+
+#include "liburing.h"
+#include "../src/syscall.h"
+
+uint64_t r[1] = {0xffffffffffffffff};
+
+int main(int argc, char *argv[])
+{
+  if (argc > 1)
+    return 0;
+  mmap((void *) 0x20000000, 0x1000000, 3, 0x32, -1, 0);
+  intptr_t res = 0;
+  *(uint32_t*)0x20000080 = 0;
+  *(uint32_t*)0x20000084 = 0;
+  *(uint32_t*)0x20000088 = 0;
+  *(uint32_t*)0x2000008c = 0;
+  *(uint32_t*)0x20000090 = 0;
+  *(uint32_t*)0x20000094 = 0;
+  *(uint32_t*)0x20000098 = 0;
+  *(uint32_t*)0x2000009c = 0;
+  *(uint32_t*)0x200000a0 = 0;
+  *(uint32_t*)0x200000a4 = 0;
+  *(uint32_t*)0x200000a8 = 0;
+  *(uint32_t*)0x200000ac = 0;
+  *(uint32_t*)0x200000b0 = 0;
+  *(uint32_t*)0x200000b4 = 0;
+  *(uint32_t*)0x200000b8 = 0;
+  *(uint32_t*)0x200000bc = 0;
+  *(uint32_t*)0x200000c0 = 0;
+  *(uint32_t*)0x200000c4 = 0;
+  *(uint64_t*)0x200000c8 = 0;
+  *(uint32_t*)0x200000d0 = 0;
+  *(uint32_t*)0x200000d4 = 0;
+  *(uint32_t*)0x200000d8 = 0;
+  *(uint32_t*)0x200000dc = 0;
+  *(uint32_t*)0x200000e0 = 0;
+  *(uint32_t*)0x200000e4 = 0;
+  *(uint32_t*)0x200000e8 = 0;
+  *(uint32_t*)0x200000ec = 0;
+  *(uint64_t*)0x200000f0 = 0;
+  res = __sys_io_uring_setup(0xa4, (struct io_uring_params *) 0x20000080);
+  if (res != -1)
+    r[0] = res;
+  *(uint32_t*)0x20000280 = -1;
+  __sys_io_uring_register(r[0], 2, (const void *) 0x20000280, 1);
+  return 0;
+}
diff --git a/test/a4c0b3decb33-test.c b/test/a4c0b3decb33-test.c
new file mode 100644
index 0000000..34b0af2
--- /dev/null
+++ b/test/a4c0b3decb33-test.c
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: MIT */
+// autogenerated by syzkaller (https://github.com/google/syzkaller)
+
+#include <dirent.h>
+#include <endian.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <stdarg.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/prctl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/mman.h>
+#include <time.h>
+#include <unistd.h>
+
+#include "liburing.h"
+#include "../src/syscall.h"
+
+static void sleep_ms(uint64_t ms)
+{
+	usleep(ms * 1000);
+}
+
+static uint64_t current_time_ms(void)
+{
+	struct timespec ts;
+	if (clock_gettime(CLOCK_MONOTONIC, &ts))
+		exit(1);
+	return (uint64_t)ts.tv_sec * 1000 + (uint64_t)ts.tv_nsec / 1000000;
+}
+
+static bool write_file(const char* file, const char* what, ...)
+{
+	char buf[1024];
+	va_list args;
+	va_start(args, what);
+	vsnprintf(buf, sizeof(buf), what, args);
+	va_end(args);
+	buf[sizeof(buf) - 1] = 0;
+	int len = strlen(buf);
+	int fd = open(file, O_WRONLY | O_CLOEXEC);
+	if (fd == -1)
+		return false;
+	if (write(fd, buf, len) != len) {
+		int err = errno;
+		close(fd);
+		errno = err;
+		return false;
+	}
+	close(fd);
+	return true;
+}
+
+static void kill_and_wait(int pid, int* status)
+{
+	kill(-pid, SIGKILL);
+	kill(pid, SIGKILL);
+	int i;
+	for (i = 0; i < 100; i++) {
+		if (waitpid(-1, status, WNOHANG | __WALL) == pid)
+			return;
+		usleep(1000);
+	}
+	DIR* dir = opendir("/sys/fs/fuse/connections");
+	if (dir) {
+		for (;;) {
+			struct dirent* ent = readdir(dir);
+			if (!ent)
+				break;
+			if (strcmp(ent->d_name, ".") == 0 || strcmp(ent->d_name, "..") == 0)
+				continue;
+			char abort[300];
+			snprintf(abort, sizeof(abort), "/sys/fs/fuse/connections/%s/abort",
+					ent->d_name);
+			int fd = open(abort, O_WRONLY);
+			if (fd == -1) {
+				continue;
+			}
+			if (write(fd, abort, 1) < 0) {
+			}
+			close(fd);
+		}
+		closedir(dir);
+	} else {
+	}
+	while (waitpid(-1, status, __WALL) != pid) {
+	}
+}
+
+static void setup_test()
+{
+	prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0);
+	setpgrp();
+	write_file("/proc/self/oom_score_adj", "1000");
+}
+
+static void execute_one(void);
+
+#define WAIT_FLAGS __WALL
+
+static void loop(void)
+{
+	int iter;
+	for (iter = 0; iter < 5000; iter++) {
+		int pid = fork();
+		if (pid < 0)
+			exit(1);
+		if (pid == 0) {
+			setup_test();
+			execute_one();
+			exit(0);
+		}
+		int status = 0;
+		uint64_t start = current_time_ms();
+		for (;;) {
+			if (waitpid(-1, &status, WNOHANG | WAIT_FLAGS) == pid)
+				break;
+			sleep_ms(1);
+			if (current_time_ms() - start < 5 * 1000)
+				continue;
+			kill_and_wait(pid, &status);
+			break;
+		}
+	}
+}
+
+void execute_one(void)
+{
+	*(uint32_t*)0x20000080 = 0;
+	*(uint32_t*)0x20000084 = 0;
+	*(uint32_t*)0x20000088 = 3;
+	*(uint32_t*)0x2000008c = 3;
+	*(uint32_t*)0x20000090 = 0x175;
+	*(uint32_t*)0x20000094 = 0;
+	*(uint32_t*)0x20000098 = 0;
+	*(uint32_t*)0x2000009c = 0;
+	*(uint32_t*)0x200000a0 = 0;
+	*(uint32_t*)0x200000a4 = 0;
+	*(uint32_t*)0x200000a8 = 0;
+	*(uint32_t*)0x200000ac = 0;
+	*(uint32_t*)0x200000b0 = 0;
+	*(uint32_t*)0x200000b4 = 0;
+	*(uint32_t*)0x200000b8 = 0;
+	*(uint32_t*)0x200000bc = 0;
+	*(uint32_t*)0x200000c0 = 0;
+	*(uint32_t*)0x200000c4 = 0;
+	*(uint64_t*)0x200000c8 = 0;
+	*(uint32_t*)0x200000d0 = 0;
+	*(uint32_t*)0x200000d4 = 0;
+	*(uint32_t*)0x200000d8 = 0;
+	*(uint32_t*)0x200000dc = 0;
+	*(uint32_t*)0x200000e0 = 0;
+	*(uint32_t*)0x200000e4 = 0;
+	*(uint32_t*)0x200000e8 = 0;
+	*(uint32_t*)0x200000ec = 0;
+	*(uint64_t*)0x200000f0 = 0;
+	__sys_io_uring_setup(0x983, (struct io_uring_params *) 0x20000080);
+}
+
+static void sig_int(int sig)
+{
+	exit(0);
+}
+
+int main(int argc, char *argv[])
+{
+	if (argc > 1)
+		return 0;
+	signal(SIGINT, sig_int);
+	mmap((void *) 0x20000000, 0x1000000, 3, 0x32, -1, 0);
+	loop();
+	return 0;
+}
diff --git a/test/accept-link.c b/test/accept-link.c
new file mode 100644
index 0000000..605e0ec
--- /dev/null
+++ b/test/accept-link.c
@@ -0,0 +1,251 @@
+/* SPDX-License-Identifier: MIT */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+#include <assert.h>
+#include <pthread.h>
+#include <sys/socket.h>
+#include <netinet/tcp.h>
+#include <netinet/in.h>
+#include <poll.h>
+
+#include "liburing.h"
+
+pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
+pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
+
+static int recv_thread_ready = 0;
+static int recv_thread_done = 0;
+
+static void signal_var(int *var)
+{
+        pthread_mutex_lock(&mutex);
+        *var = 1;
+        pthread_cond_signal(&cond);
+        pthread_mutex_unlock(&mutex);
+}
+
+static void wait_for_var(int *var)
+{
+        pthread_mutex_lock(&mutex);
+
+        while (!*var)
+                pthread_cond_wait(&cond, &mutex);
+
+        pthread_mutex_unlock(&mutex);
+}
+
+struct data {
+	unsigned expected[2];
+	unsigned just_positive[2];
+	unsigned long timeout;
+	int port;
+	int stop;
+};
+
+static void *send_thread(void *arg)
+{
+	struct data *data = arg;
+	int ret;
+
+	wait_for_var(&recv_thread_ready);
+
+	if (data->stop)
+		return NULL;
+
+	int s0 = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
+	assert(s0 != -1);
+
+	struct sockaddr_in addr;
+
+	addr.sin_family = AF_INET;
+	addr.sin_port = data->port;
+	addr.sin_addr.s_addr = 0x0100007fU;
+
+	ret = connect(s0, (struct sockaddr*)&addr, sizeof(addr));
+	assert(ret != -1);
+
+	wait_for_var(&recv_thread_done);
+
+	close(s0);
+	return NULL;
+}
+
+void *recv_thread(void *arg)
+{
+	struct data *data = arg;
+	struct io_uring ring;
+	int i, ret;
+
+	ret = io_uring_queue_init(8, &ring, 0);
+	assert(ret == 0);
+
+	int s0 = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
+	assert(s0 != -1);
+
+	int32_t val = 1;
+	ret = setsockopt(s0, SOL_SOCKET, SO_REUSEPORT, &val, sizeof(val));
+	assert(ret != -1);
+	ret = setsockopt(s0, SOL_SOCKET, SO_REUSEADDR, &val, sizeof(val));
+	assert(ret != -1);
+
+	struct sockaddr_in addr;
+
+	addr.sin_family = AF_INET;
+	addr.sin_addr.s_addr = 0x0100007fU;
+
+	i = 0;
+	do {
+		data->port = 1025 + (rand() % 64510);
+		addr.sin_port = data->port;
+
+		if (bind(s0, (struct sockaddr*)&addr, sizeof(addr)) != -1)
+			break;
+	} while (++i < 100);
+
+	if (i >= 100) {
+		printf("Can't find good port, skipped\n");
+		data->stop = 1;
+		signal_var(&recv_thread_ready);
+		goto out;
+	}
+
+	ret = listen(s0, 128);
+	assert(ret != -1);
+
+	signal_var(&recv_thread_ready);
+
+	struct io_uring_sqe *sqe;
+
+	sqe = io_uring_get_sqe(&ring);
+	assert(sqe != NULL);
+
+	io_uring_prep_accept(sqe, s0, NULL, NULL, 0);
+	sqe->flags |= IOSQE_IO_LINK;
+	sqe->user_data = 1;
+
+	sqe = io_uring_get_sqe(&ring);
+	assert(sqe != NULL);
+
+	struct __kernel_timespec ts;
+	ts.tv_sec = data->timeout / 1000000000;
+	ts.tv_nsec = data->timeout % 1000000000;
+	io_uring_prep_link_timeout(sqe, &ts, 0);
+	sqe->user_data = 2;
+
+	ret = io_uring_submit(&ring);
+	assert(ret == 2);
+
+	for (i = 0; i < 2; i++) {
+		struct io_uring_cqe *cqe;
+		int idx;
+
+		if (io_uring_wait_cqe(&ring, &cqe)) {
+			fprintf(stderr, "wait cqe failed\n");
+			goto err;
+		}
+		idx = cqe->user_data - 1;
+		if (cqe->res != data->expected[idx]) {
+			if (cqe->res > 0 && data->just_positive[idx])
+				goto ok;
+			if (cqe->res == -EBADF) {
+				fprintf(stdout, "Accept not supported, skipping\n");
+				data->stop = 1;
+				goto out;
+			}
+			fprintf(stderr, "cqe %" PRIu64 " got %d, wanted %d\n",
+					(uint64_t) cqe->user_data, cqe->res,
+					data->expected[idx]);
+			goto err;
+		}
+ok:
+		if (cqe->user_data == 1 && cqe->res > 0)
+			close(cqe->res);
+
+		io_uring_cqe_seen(&ring, cqe);
+	}
+
+	signal_var(&recv_thread_done);
+
+out:
+	close(s0);
+	return NULL;
+err:
+	close(s0);
+	return (void *) 1;
+}
+
+static int test_accept_timeout(int do_connect, unsigned long timeout)
+{
+	struct io_uring ring;
+	struct io_uring_params p = {};
+	pthread_t t1, t2;
+	struct data d;
+	void *tret;
+	int ret, fast_poll;
+
+	ret = io_uring_queue_init_params(1, &ring, &p);
+	if (ret) {
+		fprintf(stderr, "queue_init: %d\n", ret);
+		return 1;
+	};
+
+	fast_poll = (p.features & IORING_FEAT_FAST_POLL) != 0;
+	io_uring_queue_exit(&ring);
+
+	recv_thread_ready = 0;
+	recv_thread_done = 0;
+
+	memset(&d, 0, sizeof(d));
+	d.timeout = timeout;
+	if (!do_connect) {
+		if (fast_poll) {
+			d.expected[0] = -ECANCELED;
+			d.expected[1] = -ETIME;
+		} else {
+			d.expected[0] = -EINTR;
+			d.expected[1] = -EALREADY;
+		}
+	} else {
+		d.expected[0] = -1U;
+		d.just_positive[0] = 1;
+		d.expected[1] = -ECANCELED;
+	}
+
+	pthread_create(&t1, NULL, recv_thread, &d);
+
+	if (do_connect)
+		pthread_create(&t2, NULL, send_thread, &d);
+
+	pthread_join(t1, &tret);
+	if (tret)
+		ret++;
+
+	if (do_connect) {
+		pthread_join(t2, &tret);
+		if (tret)
+			ret++;
+	}
+
+	return ret;
+}
+
+int main(int argc, char *argv[])
+{
+	if (argc > 1)
+		return 0;
+	if (test_accept_timeout(0, 200000000)) {
+		fprintf(stderr, "accept timeout 0 failed\n");
+		return 1;
+	}
+
+	if (test_accept_timeout(1, 1000000000)) {
+		fprintf(stderr, "accept and connect timeout 0 failed\n");
+		return 1;
+	}
+
+	return 0;
+}
diff --git a/test/accept-reuse.c b/test/accept-reuse.c
new file mode 100644
index 0000000..c95ac70
--- /dev/null
+++ b/test/accept-reuse.c
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: MIT */
+#include <liburing.h>
+#include <netdb.h>
+#include <string.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <errno.h>
+#include "liburing.h"
+#include "../src/syscall.h"
+
+struct io_uring io_uring;
+
+int sys_io_uring_enter(const int fd,
+		       const unsigned to_submit,
+		       const unsigned min_complete,
+		       const unsigned flags, sigset_t * const sig)
+{
+	return __sys_io_uring_enter(fd, to_submit, min_complete, flags, sig);
+}
+
+int submit_sqe(void)
+{
+	struct io_uring_sq *sq = &io_uring.sq;
+	const unsigned tail = *sq->ktail;
+
+	sq->array[tail & *sq->kring_mask] = 0;
+	io_uring_smp_store_release(sq->ktail, tail + 1);
+
+	return sys_io_uring_enter(io_uring.ring_fd, 1, 0, 0, NULL);
+}
+
+int main(int argc, char **argv)
+{
+	struct addrinfo *addr_info_list = NULL;
+	struct addrinfo *ai, *addr_info = NULL;
+	struct io_uring_params params;
+	struct io_uring_sqe *sqe;
+	struct addrinfo hints;
+	struct sockaddr sa;
+	socklen_t sa_size = sizeof(sa);
+	int ret, listen_fd, connect_fd, val, i;
+
+	if (argc > 1)
+		return 0;
+
+	memset(&params, 0, sizeof(params));
+	ret = io_uring_queue_init_params(4, &io_uring, &params);
+	if (ret) {
+		fprintf(stderr, "io_uring_init_failed: %d\n", ret);
+		return 1;
+	}
+	if (!(params.features & IORING_FEAT_SUBMIT_STABLE)) {
+		fprintf(stdout, "FEAT_SUBMIT_STABLE not there, skipping\n");
+		return 0;
+	}
+
+	memset(&hints, 0, sizeof(hints));
+	hints.ai_family = AF_UNSPEC;
+	hints.ai_socktype = SOCK_STREAM;
+	hints.ai_flags = AI_PASSIVE | AI_NUMERICSERV;
+
+	ret = getaddrinfo(NULL, "12345", &hints, &addr_info_list);
+	if (ret < 0) {
+		perror("getaddrinfo");
+		return 1;
+	}
+
+	for (ai = addr_info_list; ai; ai = ai->ai_next) {
+		if (ai->ai_family == AF_INET || ai->ai_family == AF_INET6) {
+			addr_info = ai;
+			break;
+		}
+	}
+	if (!addr_info) {
+		fprintf(stderr, "addrinfo not found\n");
+		return 1;
+	}
+
+	sqe = &io_uring.sq.sqes[0];
+	listen_fd = -1;
+
+	ret = socket(addr_info->ai_family, SOCK_STREAM,
+			   addr_info->ai_protocol);
+	if (ret < 0) {
+		perror("socket");
+		return 1;
+	}
+	listen_fd = ret;
+
+	val = 1;
+	setsockopt(listen_fd, SOL_SOCKET, SO_REUSEADDR, &val, sizeof(int));
+	setsockopt(listen_fd, SOL_SOCKET, SO_REUSEPORT, &val, sizeof(int));
+
+	ret = bind(listen_fd, addr_info->ai_addr, addr_info->ai_addrlen);
+	if (ret < 0) {
+		perror("bind");
+		return 1;
+	}
+
+	ret = listen(listen_fd, SOMAXCONN);
+	if (ret < 0) {
+		perror("listen");
+		return 1;
+	}
+
+	memset(&sa, 0, sizeof(sa));
+
+	io_uring_prep_accept(sqe, listen_fd, &sa, &sa_size, 0);
+	sqe->user_data = 1;
+	ret = submit_sqe();
+	if (ret != 1) {
+		fprintf(stderr, "submit failed: %d\n", ret);
+		return 1;
+	}
+
+	connect_fd = -1;
+	ret = socket(addr_info->ai_family, SOCK_STREAM, addr_info->ai_protocol);
+	if (ret < 0) {
+		perror("socket");
+		return 1;
+	}
+	connect_fd = ret;
+
+	io_uring_prep_connect(sqe, connect_fd, addr_info->ai_addr,
+				addr_info->ai_addrlen);
+	sqe->user_data = 2;
+	ret = submit_sqe();
+	if (ret != 1) {
+		fprintf(stderr, "submit failed: %d\n", ret);
+		return 1;
+	}
+
+	for (i = 0; i < 2; i++) {
+		struct io_uring_cqe *cqe = NULL;
+
+		ret = io_uring_wait_cqe(&io_uring, &cqe);
+		if (ret) {
+			fprintf(stderr, "io_uring_wait_cqe: %d\n", ret);
+			return 1;
+		}
+
+		switch (cqe->user_data) {
+		case 1:
+			if (cqe->res < 0) {
+				fprintf(stderr, "accept failed: %d\n", cqe->res);
+				return 1;
+			}
+			break;
+		case 2:
+			if (cqe->res) {
+				fprintf(stderr, "connect failed: %d\n", cqe->res);
+				return 1;
+			}
+			break;
+		}
+		io_uring_cq_advance(&io_uring, 1);
+	}
+
+	freeaddrinfo(addr_info_list);
+	io_uring_queue_exit(&io_uring);
+	return 0;
+}
diff --git a/test/accept-test.c b/test/accept-test.c
new file mode 100644
index 0000000..71d9d80
--- /dev/null
+++ b/test/accept-test.c
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: Check to see if accept handles addr and addrlen
+ */
+#include <stdio.h>
+#include <errno.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <assert.h>
+#include "liburing.h"
+
+int main(int argc, char *argv[])
+{
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	struct io_uring ring;
+	struct sockaddr_un addr;
+	socklen_t addrlen = sizeof(addr);
+	int ret, fd;
+	struct __kernel_timespec ts = {
+		.tv_sec = 0,
+		.tv_nsec = 1000000
+	};
+
+	if (argc > 1)
+		return 0;
+
+	if (io_uring_queue_init(4, &ring, 0) != 0) {
+		fprintf(stderr, "ring setup failed\n");
+		return 1;
+	}
+
+	fd = socket(AF_UNIX, SOCK_STREAM, 0);
+	assert(fd != -1);
+
+	memset(&addr, 0, sizeof(addr));
+	addr.sun_family = AF_UNIX;
+	memcpy(addr.sun_path, "\0sock", 6);
+
+	ret = bind(fd, (struct sockaddr *)&addr, addrlen);
+	assert(ret != -1);
+	ret = listen(fd, 128);
+	assert(ret != -1);
+
+	sqe = io_uring_get_sqe(&ring);
+	if (!sqe) {
+		fprintf(stderr, "get sqe failed\n");
+		return 1;
+	}
+	io_uring_prep_accept(sqe, fd, (struct sockaddr*)&addr, &addrlen, 0);
+	sqe->user_data = 1;
+
+	ret = io_uring_submit(&ring);
+	if (ret != 1) {
+		fprintf(stderr, "Got submit %d, expected 1\n", ret);
+		return 1;
+	}
+
+	ret = io_uring_wait_cqe_timeout(&ring, &cqe, &ts);
+	if (!ret) {
+		if (cqe->res == -EBADF || cqe->res == -EINVAL) {
+			fprintf(stdout, "Accept not supported, skipping\n");
+			goto out;
+		} else if (cqe->res < 0) {
+			fprintf(stderr, "cqe error %d\n", cqe->res);
+			goto err;
+		}
+	} else if (ret != -ETIME) {
+		fprintf(stderr, "accept() failed to use addr & addrlen parameters!\n");
+		return 1;
+	}
+
+out:
+	io_uring_queue_exit(&ring);
+	return 0;
+err:
+	io_uring_queue_exit(&ring);
+	return 1;
+}
diff --git a/test/accept.c b/test/accept.c
new file mode 100644
index 0000000..f096f8a
--- /dev/null
+++ b/test/accept.c
@@ -0,0 +1,437 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Check that IORING_OP_ACCEPT works, and send some data across to verify we
+ * didn't get a junk fd.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <assert.h>
+
+#include <errno.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/socket.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <sys/un.h>
+#include <netinet/tcp.h>
+#include <netinet/in.h>
+
+#include "helpers.h"
+#include "liburing.h"
+
+static int no_accept;
+
+struct data {
+	char buf[128];
+	struct iovec iov;
+};
+
+static void queue_send(struct io_uring *ring, int fd)
+{
+	struct io_uring_sqe *sqe;
+	struct data *d;
+
+	d = t_malloc(sizeof(*d));
+	d->iov.iov_base = d->buf;
+	d->iov.iov_len = sizeof(d->buf);
+
+	sqe = io_uring_get_sqe(ring);
+	io_uring_prep_writev(sqe, fd, &d->iov, 1, 0);
+}
+
+static void queue_recv(struct io_uring *ring, int fd)
+{
+	struct io_uring_sqe *sqe;
+	struct data *d;
+
+	d = t_malloc(sizeof(*d));
+	d->iov.iov_base = d->buf;
+	d->iov.iov_len = sizeof(d->buf);
+
+	sqe = io_uring_get_sqe(ring);
+	io_uring_prep_readv(sqe, fd, &d->iov, 1, 0);
+}
+
+static int accept_conn(struct io_uring *ring, int fd)
+{
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	int ret;
+
+	sqe = io_uring_get_sqe(ring);
+	io_uring_prep_accept(sqe, fd, NULL, NULL, 0);
+
+	ret = io_uring_submit(ring);
+	assert(ret != -1);
+
+	ret = io_uring_wait_cqe(ring, &cqe);
+	assert(!ret);
+	ret = cqe->res;
+	io_uring_cqe_seen(ring, cqe);
+	return ret;
+}
+
+static int start_accept_listen(struct sockaddr_in *addr, int port_off)
+{
+	int fd, ret;
+
+	fd = socket(AF_INET, SOCK_STREAM | SOCK_CLOEXEC, IPPROTO_TCP);
+
+	int32_t val = 1;
+	ret = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &val, sizeof(val));
+	assert(ret != -1);
+	ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &val, sizeof(val));
+	assert(ret != -1);
+
+	struct sockaddr_in laddr;
+
+	if (!addr)
+		addr = &laddr;
+
+	addr->sin_family = AF_INET;
+	addr->sin_port = 0x1235 + port_off;
+	addr->sin_addr.s_addr = 0x0100007fU;
+
+	ret = bind(fd, (struct sockaddr*)addr, sizeof(*addr));
+	assert(ret != -1);
+	ret = listen(fd, 128);
+	assert(ret != -1);
+
+	return fd;
+}
+
+static int test(struct io_uring *ring, int accept_should_error)
+{
+	struct io_uring_cqe *cqe;
+	struct sockaddr_in addr;
+	uint32_t head;
+	uint32_t count = 0;
+	int done = 0;
+	int p_fd[2];
+        int ret;
+
+	int32_t val, recv_s0 = start_accept_listen(&addr, 0);
+
+	p_fd[1] = socket(AF_INET, SOCK_STREAM | SOCK_CLOEXEC, IPPROTO_TCP);
+
+	val = 1;
+	ret = setsockopt(p_fd[1], IPPROTO_TCP, TCP_NODELAY, &val, sizeof(val));
+	assert(ret != -1);
+
+	int32_t flags = fcntl(p_fd[1], F_GETFL, 0);
+	assert(flags != -1);
+
+	flags |= O_NONBLOCK;
+	ret = fcntl(p_fd[1], F_SETFL, flags);
+	assert(ret != -1);
+
+	ret = connect(p_fd[1], (struct sockaddr*)&addr, sizeof(addr));
+	assert(ret == -1);
+
+	flags = fcntl(p_fd[1], F_GETFL, 0);
+	assert(flags != -1);
+
+	flags &= ~O_NONBLOCK;
+	ret = fcntl(p_fd[1], F_SETFL, flags);
+	assert(ret != -1);
+
+	p_fd[0] = accept_conn(ring, recv_s0);
+	if (p_fd[0] == -EINVAL) {
+		if (accept_should_error)
+			goto out;
+		fprintf(stdout, "Accept not supported, skipping\n");
+		no_accept = 1;
+		goto out;
+	} else if (p_fd[0] < 0) {
+		if (accept_should_error &&
+		    (p_fd[0] == -EBADF || p_fd[0] == -EINVAL))
+			goto out;
+		fprintf(stderr, "Accept got %d\n", p_fd[0]);
+		goto err;
+	}
+
+	queue_send(ring, p_fd[1]);
+	queue_recv(ring, p_fd[0]);
+
+	ret = io_uring_submit_and_wait(ring, 2);
+	assert(ret != -1);
+
+	while (count < 2) {
+		io_uring_for_each_cqe(ring, head, cqe) {
+			if (cqe->res < 0) {
+				fprintf(stderr, "Got cqe res %d\n", cqe->res);
+				done = 1;
+				break;
+			}
+			assert(cqe->res == 128);
+			count++;
+		}
+
+		assert(count <= 2);
+		io_uring_cq_advance(ring, count);
+		if (done)
+			goto err;
+	}
+
+out:
+	close(p_fd[0]);
+	close(p_fd[1]);
+	close(recv_s0);
+	return 0;
+err:
+	close(p_fd[0]);
+	close(p_fd[1]);
+	close(recv_s0);
+	return 1;
+}
+
+static void sig_alrm(int sig)
+{
+	exit(0);
+}
+
+static int test_accept_pending_on_exit(void)
+{
+	struct io_uring m_io_uring;
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	int fd, ret;
+
+	ret = io_uring_queue_init(32, &m_io_uring, 0);
+	assert(ret >= 0);
+
+	fd = start_accept_listen(NULL, 0);
+
+	sqe = io_uring_get_sqe(&m_io_uring);
+	io_uring_prep_accept(sqe, fd, NULL, NULL, 0);
+	ret = io_uring_submit(&m_io_uring);
+	assert(ret != -1);
+
+	signal(SIGALRM, sig_alrm);
+	alarm(1);
+	ret = io_uring_wait_cqe(&m_io_uring, &cqe);
+	assert(!ret);
+	io_uring_cqe_seen(&m_io_uring, cqe);
+
+	io_uring_queue_exit(&m_io_uring);
+	return 0;
+}
+
+/*
+ * Test issue many accepts and see if we handle cancellation on exit
+ */
+static int test_accept_many(unsigned nr, unsigned usecs)
+{
+	struct io_uring m_io_uring;
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	unsigned long cur_lim;
+	struct rlimit rlim;
+	int *fds, i, ret;
+
+	if (getrlimit(RLIMIT_NPROC, &rlim) < 0) {
+		perror("getrlimit");
+		return 1;
+	}
+
+	cur_lim = rlim.rlim_cur;
+	rlim.rlim_cur = nr / 4;
+
+	if (setrlimit(RLIMIT_NPROC, &rlim) < 0) {
+		perror("setrlimit");
+		return 1;
+	}
+
+	ret = io_uring_queue_init(2 * nr, &m_io_uring, 0);
+	assert(ret >= 0);
+
+	fds = t_calloc(nr, sizeof(int));
+
+	for (i = 0; i < nr; i++)
+		fds[i] = start_accept_listen(NULL, i);
+
+	for (i = 0; i < nr; i++) {
+		sqe = io_uring_get_sqe(&m_io_uring);
+		io_uring_prep_accept(sqe, fds[i], NULL, NULL, 0);
+		sqe->user_data = 1 + i;
+		ret = io_uring_submit(&m_io_uring);
+		assert(ret == 1);
+	}
+
+	if (usecs)
+		usleep(usecs);
+
+	for (i = 0; i < nr; i++) {
+		if (io_uring_peek_cqe(&m_io_uring, &cqe))
+			break;
+		if (cqe->res != -ECANCELED) {
+			fprintf(stderr, "Expected cqe to be cancelled\n");
+			goto err;
+		}
+		io_uring_cqe_seen(&m_io_uring, cqe);
+	}
+out:
+	rlim.rlim_cur = cur_lim;
+	if (setrlimit(RLIMIT_NPROC, &rlim) < 0) {
+		perror("setrlimit");
+		return 1;
+	}
+
+	free(fds);
+	io_uring_queue_exit(&m_io_uring);
+	return 0;
+err:
+	ret = 1;
+	goto out;
+}
+
+static int test_accept_cancel(unsigned usecs)
+{
+	struct io_uring m_io_uring;
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	int fd, i, ret;
+
+	ret = io_uring_queue_init(32, &m_io_uring, 0);
+	assert(ret >= 0);
+
+	fd = start_accept_listen(NULL, 0);
+
+	sqe = io_uring_get_sqe(&m_io_uring);
+	io_uring_prep_accept(sqe, fd, NULL, NULL, 0);
+	sqe->user_data = 1;
+        ret = io_uring_submit(&m_io_uring);
+	assert(ret == 1);
+
+	if (usecs)
+		usleep(usecs);
+
+	sqe = io_uring_get_sqe(&m_io_uring);
+	io_uring_prep_cancel(sqe, (void *) 1, 0);
+	sqe->user_data = 2;
+	ret = io_uring_submit(&m_io_uring);
+	assert(ret == 1);
+
+	for (i = 0; i < 2; i++) {
+		ret = io_uring_wait_cqe(&m_io_uring, &cqe);
+		assert(!ret);
+		/*
+		 * Two cases here:
+		 *
+		 * 1) We cancel the accept4() before it got started, we should
+		 *    get '0' for the cancel request and '-ECANCELED' for the
+		 *    accept request.
+		 * 2) We cancel the accept4() after it's already running, we
+		 *    should get '-EALREADY' for the cancel request and
+		 *    '-EINTR' for the accept request.
+		 */
+		if (cqe->user_data == 1) {
+			if (cqe->res != -EINTR && cqe->res != -ECANCELED) {
+				fprintf(stderr, "Cancelled accept got %d\n", cqe->res);
+				goto err;
+			}
+		} else if (cqe->user_data == 2) {
+			if (cqe->res != -EALREADY && cqe->res != 0) {
+				fprintf(stderr, "Cancel got %d\n", cqe->res);
+				goto err;
+			}
+		}
+		io_uring_cqe_seen(&m_io_uring, cqe);
+	}
+
+	io_uring_queue_exit(&m_io_uring);
+	return 0;
+err:
+	io_uring_queue_exit(&m_io_uring);
+	return 1;
+}
+
+static int test_accept(void)
+{
+	struct io_uring m_io_uring;
+	int ret;
+
+	ret = io_uring_queue_init(32, &m_io_uring, 0);
+	assert(ret >= 0);
+	ret = test(&m_io_uring, 0);
+	io_uring_queue_exit(&m_io_uring);
+	return ret;
+}
+
+static int test_accept_sqpoll(void)
+{
+	struct io_uring m_io_uring;
+	struct io_uring_params p = { };
+	int ret, should_fail;
+
+	p.flags = IORING_SETUP_SQPOLL;
+	ret = t_create_ring_params(32, &m_io_uring, &p);
+	if (ret == T_SETUP_SKIP)
+		return 0;
+	else if (ret < 0)
+		return ret;
+
+	should_fail = 1;
+	if (p.features & IORING_FEAT_SQPOLL_NONFIXED)
+		should_fail = 0;
+
+	ret = test(&m_io_uring, should_fail);
+	io_uring_queue_exit(&m_io_uring);
+	return ret;
+}
+
+int main(int argc, char *argv[])
+{
+	int ret;
+
+	if (argc > 1)
+		return 0;
+
+	ret = test_accept();
+	if (ret) {
+		fprintf(stderr, "test_accept failed\n");
+		return ret;
+	}
+	if (no_accept)
+		return 0;
+
+	ret = test_accept_sqpoll();
+	if (ret) {
+		fprintf(stderr, "test_accept_sqpoll failed\n");
+		return ret;
+	}
+
+	ret = test_accept_cancel(0);
+	if (ret) {
+		fprintf(stderr, "test_accept_cancel nodelay failed\n");
+		return ret;
+	}
+
+	ret = test_accept_cancel(10000);
+	if (ret) {
+		fprintf(stderr, "test_accept_cancel delay failed\n");
+		return ret;
+	}
+
+	ret = test_accept_many(128, 0);
+	if (ret) {
+		fprintf(stderr, "test_accept_many failed\n");
+		return ret;
+	}
+
+	ret = test_accept_many(128, 100000);
+	if (ret) {
+		fprintf(stderr, "test_accept_many failed\n");
+		return ret;
+	}
+
+	ret = test_accept_pending_on_exit();
+	if (ret) {
+		fprintf(stderr, "test_accept_pending_on_exit failed\n");
+		return ret;
+	}
+
+	return 0;
+}
diff --git a/test/across-fork.c b/test/across-fork.c
new file mode 100644
index 0000000..009fe52
--- /dev/null
+++ b/test/across-fork.c
@@ -0,0 +1,283 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: test sharing a ring across a fork
+ */
+#include <fcntl.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include "liburing.h"
+
+
+struct forktestmem
+{
+	struct io_uring ring;
+	pthread_barrier_t barrier;
+	pthread_barrierattr_t barrierattr;
+};
+
+static int open_tempfile(const char *dir, const char *fname)
+{
+	int fd;
+	char buf[32];
+
+	snprintf(buf, sizeof(buf), "%s/%s",
+		 dir, fname);
+	fd = open(buf, O_RDWR | O_CREAT | O_APPEND, S_IRUSR | S_IWUSR);
+	if (fd < 0) {
+		perror("open");
+		exit(1);
+	}
+
+	return fd;
+}
+
+static int submit_write(struct io_uring *ring, int fd, const char *str,
+			int wait)
+{
+	struct io_uring_sqe *sqe;
+	struct iovec iovec;
+	int ret;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "could not get sqe\n");
+		return 1;
+	}
+
+	iovec.iov_base = (char *) str;
+	iovec.iov_len = strlen(str);
+	io_uring_prep_writev(sqe, fd, &iovec, 1, 0);
+	ret = io_uring_submit_and_wait(ring, wait);
+	if (ret < 0) {
+		fprintf(stderr, "submit failed: %s\n", strerror(-ret));
+		return 1;
+	}
+
+	return 0;
+}
+
+static int wait_cqe(struct io_uring *ring, const char *stage)
+{
+	struct io_uring_cqe *cqe;
+	int ret;
+
+	ret = io_uring_wait_cqe(ring, &cqe);
+	if (ret) {
+		fprintf(stderr, "%s wait_cqe failed %d\n", stage, ret);
+		return 1;
+	}
+	if (cqe->res < 0) {
+		fprintf(stderr, "%s cqe failed %d\n", stage, cqe->res);
+		return 1;
+	}
+
+	io_uring_cqe_seen(ring, cqe);
+	return 0;
+}
+
+static int verify_file(const char *tmpdir, const char *fname, const char* expect)
+{
+	int fd;
+	char buf[512];
+	int err = 0;
+
+	memset(buf, 0, sizeof(buf));
+
+	fd = open_tempfile(tmpdir, fname);
+	if (fd < 0)
+		return 1;
+
+	if (read(fd, buf, sizeof(buf) - 1) < 0)
+		return 1;
+
+	if (strcmp(buf, expect) != 0) {
+		fprintf(stderr, "content mismatch for %s\n"
+			"got:\n%s\n"
+			"expected:\n%s\n",
+			fname, buf, expect);
+		err = 1;
+	}
+
+	close(fd);
+	return err;
+}
+
+static void cleanup(const char *tmpdir)
+{
+	char buf[32];
+
+	/* don't check errors, called during partial runs */
+
+	snprintf(buf, sizeof(buf), "%s/%s", tmpdir, "shared");
+	unlink(buf);
+
+	snprintf(buf, sizeof(buf), "%s/%s", tmpdir, "parent1");
+	unlink(buf);
+
+	snprintf(buf, sizeof(buf), "%s/%s", tmpdir, "parent2");
+	unlink(buf);
+
+	snprintf(buf, sizeof(buf), "%s/%s", tmpdir, "child");
+	unlink(buf);
+
+	rmdir(tmpdir);
+}
+
+int main(int argc, char *argv[])
+{
+	struct forktestmem *shmem;
+	char tmpdir[] = "forktmpXXXXXX";
+	int shared_fd;
+	int ret;
+	pid_t p;
+
+	if (argc > 1)
+		return 0;
+
+	shmem = mmap(0, sizeof(struct forktestmem), PROT_READ|PROT_WRITE,
+		   MAP_SHARED | MAP_ANONYMOUS, 0, 0);
+	if (!shmem) {
+		fprintf(stderr, "mmap failed\n");
+		exit(1);
+	}
+
+	pthread_barrierattr_init(&shmem->barrierattr);
+	pthread_barrierattr_setpshared(&shmem->barrierattr, 1);
+	pthread_barrier_init(&shmem->barrier, &shmem->barrierattr, 2);
+
+	ret = io_uring_queue_init(10, &shmem->ring, 0);
+	if (ret < 0) {
+		fprintf(stderr, "queue init failed\n");
+		exit(1);
+	}
+
+	if (mkdtemp(tmpdir) == NULL) {
+		fprintf(stderr, "temp directory creation failed\n");
+		exit(1);
+	}
+
+	shared_fd = open_tempfile(tmpdir, "shared");
+
+	/*
+	 * First do a write before the fork, to test whether child can
+	 * reap that
+	 */
+	if (submit_write(&shmem->ring, shared_fd, "before fork: write shared fd\n", 0))
+		goto errcleanup;
+
+	p = fork();
+	switch (p) {
+	case -1:
+		fprintf(stderr, "fork failed\n");
+		goto errcleanup;
+
+	default: {
+		/* parent */
+		int parent_fd1;
+		int parent_fd2;
+		int wstatus;
+
+		/* wait till fork is started up */
+		pthread_barrier_wait(&shmem->barrier);
+
+		parent_fd1 = open_tempfile(tmpdir, "parent1");
+		parent_fd2 = open_tempfile(tmpdir, "parent2");
+
+		/* do a parent write to the shared fd */
+		if (submit_write(&shmem->ring, shared_fd, "parent: write shared fd\n", 0))
+			goto errcleanup;
+
+		/* do a parent write to an fd where same numbered fd exists in child */
+		if (submit_write(&shmem->ring, parent_fd1, "parent: write parent fd 1\n", 0))
+			goto errcleanup;
+
+		/* do a parent write to an fd where no same numbered fd exists in child */
+		if (submit_write(&shmem->ring, parent_fd2, "parent: write parent fd 2\n", 0))
+			goto errcleanup;
+
+		/* wait to switch read/writ roles with child */
+		pthread_barrier_wait(&shmem->barrier);
+
+		/* now wait for child to exit, to ensure we still can read completion */
+		waitpid(p, &wstatus, 0);
+		if (WEXITSTATUS(wstatus) != 0) {
+			fprintf(stderr, "child failed\n");
+			goto errcleanup;
+		}
+
+		if (wait_cqe(&shmem->ring, "p cqe 1"))
+			goto errcleanup;
+
+		if (wait_cqe(&shmem->ring, "p cqe 2"))
+			goto errcleanup;
+
+		/* check that IO can still be submitted after child exited */
+		if (submit_write(&shmem->ring, shared_fd, "parent: write shared fd after child exit\n", 0))
+			goto errcleanup;
+
+		if (wait_cqe(&shmem->ring, "p cqe 3"))
+			goto errcleanup;
+
+		break;
+	}
+	case 0: {
+		/* child */
+		int child_fd;
+
+		/* wait till fork is started up */
+		pthread_barrier_wait(&shmem->barrier);
+
+		child_fd = open_tempfile(tmpdir, "child");
+
+		if (wait_cqe(&shmem->ring, "c cqe shared"))
+			exit(1);
+
+		if (wait_cqe(&shmem->ring, "c cqe parent 1"))
+			exit(1);
+
+		if (wait_cqe(&shmem->ring, "c cqe parent 2"))
+			exit(1);
+
+		if (wait_cqe(&shmem->ring, "c cqe parent 3"))
+			exit(1);
+
+		/* wait to switch read/writ roles with parent */
+		pthread_barrier_wait(&shmem->barrier);
+
+		if (submit_write(&shmem->ring, child_fd, "child: write child fd\n", 0))
+			exit(1);
+
+		/* ensure both writes have finished before child exits */
+		if (submit_write(&shmem->ring, shared_fd, "child: write shared fd\n", 2))
+			exit(1);
+
+		exit(0);
+	}
+	}
+
+	if (verify_file(tmpdir, "shared",
+			 "before fork: write shared fd\n"
+			 "parent: write shared fd\n"
+			 "child: write shared fd\n"
+			 "parent: write shared fd after child exit\n") ||
+	    verify_file(tmpdir, "parent1", "parent: write parent fd 1\n") ||
+	    verify_file(tmpdir, "parent2", "parent: write parent fd 2\n") ||
+	    verify_file(tmpdir, "child", "child: write child fd\n"))
+		goto errcleanup;
+
+	cleanup(tmpdir);
+	exit(0);
+
+errcleanup:
+	cleanup(tmpdir);
+	exit(1);
+}
diff --git a/test/b19062a56726-test.c b/test/b19062a56726-test.c
new file mode 100644
index 0000000..6a0f686
--- /dev/null
+++ b/test/b19062a56726-test.c
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: MIT */
+// autogenerated by syzkaller (https://github.com/google/syzkaller)
+
+#include <endian.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <unistd.h>
+
+#include "liburing.h"
+#include "../src/syscall.h"
+
+int main(int argc, char *argv[])
+{
+  if (argc > 1)
+    return 0;
+
+  mmap((void *) 0x20000000, 0x1000000, 3, 0x32, -1, 0);
+
+  *(uint32_t*)0x20000200 = 0;
+  *(uint32_t*)0x20000204 = 0;
+  *(uint32_t*)0x20000208 = 5;
+  *(uint32_t*)0x2000020c = 0x400;
+  *(uint32_t*)0x20000210 = 0;
+  *(uint32_t*)0x20000214 = 0;
+  *(uint32_t*)0x20000218 = 0;
+  *(uint32_t*)0x2000021c = 0;
+  *(uint32_t*)0x20000220 = 0;
+  *(uint32_t*)0x20000224 = 0;
+  *(uint32_t*)0x20000228 = 0;
+  *(uint32_t*)0x2000022c = 0;
+  *(uint32_t*)0x20000230 = 0;
+  *(uint32_t*)0x20000234 = 0;
+  *(uint32_t*)0x20000238 = 0;
+  *(uint32_t*)0x2000023c = 0;
+  *(uint32_t*)0x20000240 = 0;
+  *(uint32_t*)0x20000244 = 0;
+  *(uint64_t*)0x20000248 = 0;
+  *(uint32_t*)0x20000250 = 0;
+  *(uint32_t*)0x20000254 = 0;
+  *(uint32_t*)0x20000258 = 0;
+  *(uint32_t*)0x2000025c = 0;
+  *(uint32_t*)0x20000260 = 0;
+  *(uint32_t*)0x20000264 = 0;
+  *(uint32_t*)0x20000268 = 0;
+  *(uint32_t*)0x2000026c = 0;
+  *(uint64_t*)0x20000270 = 0;
+  __sys_io_uring_setup(0xc9f, (struct io_uring_params *) 0x20000200);
+  return 0;
+}
diff --git a/test/b5837bd5311d-test.c b/test/b5837bd5311d-test.c
new file mode 100644
index 0000000..57a2b58
--- /dev/null
+++ b/test/b5837bd5311d-test.c
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: Check to see if wait_nr is being honored.
+ */
+#include <stdio.h>
+#include "liburing.h"
+
+int main(int argc, char *argv[])
+{
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	struct io_uring ring;
+	int ret;
+	struct __kernel_timespec ts = {
+		.tv_sec = 0,
+		.tv_nsec = 10000000
+	};
+
+	if (argc > 1)
+		return 0;
+
+	if (io_uring_queue_init(4, &ring, 0) != 0) {
+		fprintf(stderr, "ring setup failed\n");
+		return 1;
+	}
+
+	/*
+	 * First, submit the timeout sqe so we can actually finish the test
+	 * if everything is in working order.
+	 */
+	sqe = io_uring_get_sqe(&ring);
+	if (!sqe) {
+		fprintf(stderr, "get sqe failed\n");
+		return 1;
+	}
+	io_uring_prep_timeout(sqe, &ts, (unsigned)-1, 0);
+
+	ret = io_uring_submit(&ring);
+	if (ret != 1) {
+		fprintf(stderr, "Got submit %d, expected 1\n", ret);
+		return 1;
+	}
+
+	/*
+	 * Next, submit a nop and wait for two events. If everything is working
+	 * as it should, we should be waiting for more than a millisecond and we
+	 * should see two cqes. Otherwise, execution continues immediately
+	 * and we see only one cqe.
+	 */
+	sqe = io_uring_get_sqe(&ring);
+	if (!sqe) {
+		fprintf(stderr, "get sqe failed\n");
+		return 1;
+	}
+	io_uring_prep_nop(sqe);
+
+	ret = io_uring_submit_and_wait(&ring, 2);
+	if (ret != 1) {
+		fprintf(stderr, "Got submit %d, expected 1\n", ret);
+		return 1;
+	}
+
+	if (io_uring_peek_cqe(&ring, &cqe) != 0) {
+		fprintf(stderr, "Unable to peek cqe!\n");
+		return 1;
+	}
+
+	io_uring_cqe_seen(&ring, cqe);
+
+	if (io_uring_peek_cqe(&ring, &cqe) != 0) {
+		fprintf(stderr, "Unable to peek cqe!\n");
+		return 1;
+	}
+
+	io_uring_queue_exit(&ring);
+	return 0;
+}
diff --git a/test/ce593a6c480a-test.c b/test/ce593a6c480a-test.c
new file mode 100644
index 0000000..c6949f0
--- /dev/null
+++ b/test/ce593a6c480a-test.c
@@ -0,0 +1,135 @@
+/*
+ * Test 5.7 regression with task_work not being run while a task is
+ * waiting on another event in the kernel.
+ */
+#include <errno.h>
+#include <poll.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/eventfd.h>
+#include <unistd.h>
+#include <pthread.h>
+#include "liburing.h"
+#include "helpers.h"
+
+static int use_sqpoll = 0;
+
+void notify_fd(int fd)
+{
+	char buf[8] = {0, 0, 0, 0, 0, 0, 1};
+	int ret;
+
+	ret = write(fd, &buf, 8);
+	if (ret < 0)
+		perror("write");
+}
+
+void *delay_set_fd_from_thread(void *data)
+{
+	int fd = (intptr_t) data;
+
+	sleep(1);
+	notify_fd(fd);
+	return NULL;
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring_params p = {};
+	struct io_uring ring;
+	int loop_fd, other_fd;
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe = NULL;
+	int ret, use_fd;
+	char buf[8] = {0, 0, 0, 0, 0, 0, 1};
+	pthread_t tid;
+
+	if (argc > 1)
+		return 0;
+
+	/* Create an eventfd to be registered with the loop to be
+	 * notified of events being ready
+	 */
+	loop_fd = eventfd(0, EFD_CLOEXEC);
+	if (loop_fd == -1) {
+		fprintf(stderr, "eventfd errno=%d\n", errno);
+		return 1;
+	}
+
+	/* Create an eventfd that can create events */
+	use_fd = other_fd = eventfd(0, EFD_CLOEXEC);
+	if (other_fd == -1) {
+		fprintf(stderr, "eventfd errno=%d\n", errno);
+		return 1;
+	}
+
+	if (use_sqpoll)
+		p.flags = IORING_SETUP_SQPOLL;
+
+	/* Setup the ring with a registered event fd to be notified on events */
+	ret = t_create_ring_params(8, &ring, &p);
+	if (ret == T_SETUP_SKIP)
+		return 0;
+	else if (ret < 0)
+		return ret;
+
+	ret = io_uring_register_eventfd(&ring, loop_fd);
+	if (ret < 0) {
+		fprintf(stderr, "register_eventfd=%d\n", ret);
+		return 1;
+	}
+
+	if (use_sqpoll) {
+		ret = io_uring_register_files(&ring, &other_fd, 1);
+		if (ret < 0) {
+			fprintf(stderr, "register_files=%d\n", ret);
+			return 1;
+		}
+		use_fd = 0;
+	}
+
+	/* Submit a poll operation to wait on an event in other_fd */
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_poll_add(sqe, use_fd, POLLIN);
+	sqe->user_data = 1;
+	if (use_sqpoll)
+		sqe->flags |= IOSQE_FIXED_FILE;
+	ret = io_uring_submit(&ring);
+	if (ret != 1) {
+		fprintf(stderr, "submit=%d\n", ret);
+		return 1;
+	}
+
+	/*
+	 * CASE 3: Hangs forever in Linux 5.7.5; Works in Linux 5.6.0 When this
+	 * code is uncommented, we don't se a notification on other_fd until
+	 * _after_ we have started the read on loop_fd. In that case, the read() on
+	 * loop_fd seems to hang forever.
+	*/
+    	pthread_create(&tid, NULL, delay_set_fd_from_thread,
+			(void*) (intptr_t) other_fd);
+
+	/* Wait on the event fd for an event to be ready */
+	ret = read(loop_fd, buf, 8);
+	if (ret < 0) {
+		perror("read");
+		return 1;
+	} else if (ret != 8) {
+		fprintf(stderr, "Odd-sized eventfd read: %d\n", ret);
+		return 1;
+	}
+
+
+	ret = io_uring_wait_cqe(&ring, &cqe);
+	if (ret) {
+		fprintf(stderr, "wait_cqe=%d\n", ret);
+		return ret;
+	}
+	if (cqe->res < 0) {
+		fprintf(stderr, "cqe->res=%d\n", cqe->res);
+		return 1;
+	}
+
+	io_uring_cqe_seen(&ring, cqe);
+	return 0;
+}
diff --git a/test/close-opath.c b/test/close-opath.c
new file mode 100644
index 0000000..f267dad
--- /dev/null
+++ b/test/close-opath.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: MIT
+
+#define _GNU_SOURCE 1
+#define _FILE_OFFSET_BITS 64
+
+// Test program for io_uring IORING_OP_CLOSE with O_PATH file.
+// Author: Clayton Harris <bugs@claycon.org>, 2020-06-07
+
+// linux                5.6.14-300.fc32.x86_64
+// gcc                  10.1.1-1.fc32
+// liburing.x86_64      0.5-1.fc32
+
+// gcc -O2 -Wall -Wextra -std=c11 -o close_opath close_opath.c -luring
+// ./close_opath testfilepath
+
+#include <errno.h>
+#include <fcntl.h>
+#include <liburing.h>
+#include <sys/stat.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+
+typedef struct
+{
+	const char *const flnames;
+	const int oflags;
+} oflgs_t;
+
+static int test_io_uring_close(struct io_uring *ring, int fd)
+{
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	int ret;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "io_uring_get_sqe() failed\n");
+		return -ENOENT;
+	}
+
+	io_uring_prep_close(sqe, fd);
+
+	ret = io_uring_submit(ring);
+	if (ret < 0) {
+		fprintf(stderr, "io_uring_submit() failed, errno %d: %s\n",
+			-ret, strerror(-ret));
+		return ret;
+	}
+
+	ret = io_uring_wait_cqe(ring, &cqe);
+	if (ret < 0) {
+		fprintf(stderr, "io_uring_wait_cqe() failed, errno %d: %s\n",
+			-ret, strerror(-ret));
+		return ret;
+	}
+
+	ret = cqe->res;
+	io_uring_cqe_seen(ring, cqe);
+
+	if (ret < 0 && ret != -EOPNOTSUPP && ret != -EINVAL && ret != -EBADF) {
+		fprintf(stderr, "io_uring close() failed, errno %d: %s\n",
+			-ret, strerror(-ret));
+		return ret;
+	}
+
+	return 0;
+}
+
+static int open_file(const char *path, const oflgs_t *oflgs)
+{
+	int fd;
+
+	fd = openat(AT_FDCWD, path, oflgs->oflags, 0);
+	if (fd < 0) {
+		int err = errno;
+		fprintf(stderr, "openat(%s, %s) failed, errno %d: %s\n",
+			path, oflgs->flnames, err, strerror(err));
+		return -err;
+	}
+
+	return fd;
+}
+
+int main(int argc, char *argv[])
+{
+	const char *fname = ".";
+	struct io_uring ring;
+	int ret, i;
+	static const oflgs_t oflgs[] = {
+		{ "O_RDONLY", O_RDONLY },
+		{ "O_PATH", O_PATH }
+	};
+
+	ret = io_uring_queue_init(2, &ring, 0);
+	if (ret < 0) {
+		fprintf(stderr, "io_uring_queue_init() failed, errno %d: %s\n",
+			-ret, strerror(-ret));
+		return 0x02;
+	}
+
+#define OFLGS_SIZE (sizeof(oflgs) / sizeof(oflgs[0]))
+
+	ret = 0;
+	for (i = 0; i < OFLGS_SIZE; i++) {
+		int fd;
+
+		fd = open_file(fname, &oflgs[i]);
+		if (fd < 0) {
+			ret |= 0x02;
+			break;
+		}
+
+		/* Should always succeed */
+		if (test_io_uring_close(&ring, fd) < 0)
+			ret |= 0x04 << i;
+	}
+#undef OFLGS_SIZE
+
+	io_uring_queue_exit(&ring);
+	return ret;
+}
diff --git a/test/config b/test/config
new file mode 100644
index 0000000..6c0925a
--- /dev/null
+++ b/test/config
@@ -0,0 +1,10 @@
+# Copy this to config.local, uncomment and define values
+#
+# Define tests to exclude from running
+# TEST_EXCLUDE=""
+#
+# Define raw test devices (or files) for test cases, if any
+# declare -A TEST_MAP=()
+#
+# If no TEST_MAP entry exists for a test, use the ones given in TEST_FILES
+# TEST_FILES="/dev/somedevice /data/somefile"
diff --git a/test/connect.c b/test/connect.c
new file mode 100644
index 0000000..0ba3ee6
--- /dev/null
+++ b/test/connect.c
@@ -0,0 +1,395 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Check that IORING_OP_CONNECT works, with and without other side
+ * being open.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <errno.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <poll.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+#include <arpa/inet.h>
+
+#include "liburing.h"
+
+static int no_connect;
+static int use_port;
+
+static int create_socket(void)
+{
+	int fd;
+
+	fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
+	if (fd == -1) {
+		perror("socket()");
+		return -1;
+	}
+
+	return fd;
+}
+
+static int submit_and_wait(struct io_uring *ring, int *res)
+{
+	struct io_uring_cqe *cqe;
+	int ret;
+
+	ret = io_uring_submit_and_wait(ring, 1);
+	if (ret != 1) {
+		fprintf(stderr, "io_using_submit: got %d\n", ret);
+		return 1;
+	}
+
+	ret = io_uring_peek_cqe(ring, &cqe);
+	if (ret) {
+		fprintf(stderr, "io_uring_peek_cqe(): no cqe returned");
+		return 1;
+	}
+
+	*res = cqe->res;
+	io_uring_cqe_seen(ring, cqe);
+	return 0;
+}
+
+static int wait_for(struct io_uring *ring, int fd, int mask)
+{
+	struct io_uring_sqe *sqe;
+	int ret, res;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "unable to get sqe\n");
+		return -1;
+	}
+
+	io_uring_prep_poll_add(sqe, fd, mask);
+	sqe->user_data = 2;
+
+	ret = submit_and_wait(ring, &res);
+	if (ret)
+		return -1;
+
+	if (res < 0) {
+		fprintf(stderr, "poll(): failed with %d\n", res);
+		return -1;
+	}
+
+	return res;
+}
+
+static int listen_on_socket(int fd)
+{
+	struct sockaddr_in addr;
+	int ret;
+
+	memset(&addr, 0, sizeof(addr));
+	addr.sin_family = AF_INET;
+	addr.sin_port = use_port;
+	addr.sin_addr.s_addr = 0x0100007fU;
+
+	ret = bind(fd, (struct sockaddr*)&addr, sizeof(addr));
+	if (ret == -1) {
+		perror("bind()");
+		return -1;
+	}
+
+	ret = listen(fd, 128);
+	if (ret == -1) {
+		perror("listen()");
+		return -1;
+	}
+
+	return 0;
+}
+
+static int configure_connect(int fd, struct sockaddr_in* addr)
+{
+	int ret, val = 1;
+
+	ret = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &val, sizeof(val));
+	if (ret == -1) {
+		perror("setsockopt()");
+		return -1;
+	}
+
+	ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &val, sizeof(val));
+	if (ret == -1) {
+		perror("setsockopt()");
+		return -1;
+	}
+
+	memset(addr, 0, sizeof(*addr));
+	addr->sin_family = AF_INET;
+	addr->sin_port = use_port;
+	ret = inet_aton("127.0.0.1", &addr->sin_addr);
+	return ret;
+}
+
+static int connect_socket(struct io_uring *ring, int fd, int *code)
+{
+	struct sockaddr_in addr;
+	int ret, res;
+	socklen_t code_len = sizeof(*code);
+	struct io_uring_sqe *sqe;
+
+	if (configure_connect(fd, &addr) == -1)
+		return -1;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "unable to get sqe\n");
+		return -1;
+	}
+
+	io_uring_prep_connect(sqe, fd, (struct sockaddr*)&addr, sizeof(addr));
+	sqe->user_data = 1;
+
+	ret = submit_and_wait(ring, &res);
+	if (ret)
+		return -1;
+
+	if (res == -EINPROGRESS) {
+		ret = wait_for(ring, fd, POLLOUT | POLLHUP | POLLERR);
+		if (ret == -1)
+			return -1;
+
+		int ev = (ret & POLLOUT) || (ret & POLLHUP) || (ret & POLLERR);
+		if (!ev) {
+			fprintf(stderr, "poll(): returned invalid value %#x\n", ret);
+			return -1;
+		}
+
+		ret = getsockopt(fd, SOL_SOCKET, SO_ERROR, code, &code_len);
+		if (ret == -1) {
+			perror("getsockopt()");
+			return -1;
+		}
+	} else
+		*code = res;
+	return 0;
+}
+
+static int test_connect_with_no_peer(struct io_uring *ring)
+{
+	int connect_fd;
+	int ret, code;
+
+	connect_fd = create_socket();
+	if (connect_fd == -1)
+		return -1;
+
+	ret = connect_socket(ring, connect_fd, &code);
+	if (ret == -1)
+		goto err;
+
+	if (code != -ECONNREFUSED) {
+		if (code == -EINVAL || code == -EBADF || code == -EOPNOTSUPP) {
+			fprintf(stdout, "No connect support, skipping\n");
+			no_connect = 1;
+			goto out;
+		}
+		fprintf(stderr, "connect failed with %d\n", code);
+		goto err;
+	}
+
+out:
+	close(connect_fd);
+	return 0;
+
+err:
+	close(connect_fd);
+	return -1;
+}
+
+static int test_connect(struct io_uring *ring)
+{
+	int accept_fd;
+	int connect_fd;
+	int ret, code;
+
+	accept_fd = create_socket();
+	if (accept_fd == -1)
+		return -1;
+
+	ret = listen_on_socket(accept_fd);
+	if (ret == -1)
+		goto err1;
+
+	connect_fd = create_socket();
+	if (connect_fd == -1)
+		goto err1;
+
+	ret = connect_socket(ring, connect_fd, &code);
+	if (ret == -1)
+		goto err2;
+
+	if (code != 0) {
+		fprintf(stderr, "connect failed with %d\n", code);
+		goto err2;
+	}
+
+	close(connect_fd);
+	close(accept_fd);
+
+	return 0;
+
+err2:
+	close(connect_fd);
+
+err1:
+	close(accept_fd);
+	return -1;
+}
+
+static int test_connect_timeout(struct io_uring *ring)
+{
+	int connect_fd[2] = {-1, -1};
+	int accept_fd = -1;
+	int ret, code;
+	struct sockaddr_in addr;
+	struct io_uring_sqe *sqe;
+	struct __kernel_timespec ts = {.tv_sec = 0, .tv_nsec = 100000};
+
+	connect_fd[0] = create_socket();
+	if (connect_fd[0] == -1)
+		return -1;
+
+	connect_fd[1] = create_socket();
+	if (connect_fd[1] == -1)
+		goto err;
+
+	accept_fd = create_socket();
+	if (accept_fd == -1)
+		goto err;
+
+	if (configure_connect(connect_fd[0], &addr) == -1)
+		goto err;
+
+	if (configure_connect(connect_fd[1], &addr) == -1)
+		goto err;
+
+	ret = bind(accept_fd, (struct sockaddr*)&addr, sizeof(addr));
+	if (ret == -1) {
+		perror("bind()");
+		goto err;
+	}
+
+	ret = listen(accept_fd, 0);  // no backlog in order to block connect_fd[1]
+	if (ret == -1) {
+		perror("listen()");
+		goto err;
+	}
+
+	// We first connect with one client socket in order to fill the accept queue.
+	ret = connect_socket(ring, connect_fd[0], &code);
+	if (ret == -1 || code != 0) {
+		fprintf(stderr, "unable to connect\n");
+		goto err;
+	}
+
+	// We do not offload completion events from listening socket on purpose. 
+	// This way we create a state where the second connect request being stalled by OS.  
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "unable to get sqe\n");
+		goto err;
+	}
+
+	io_uring_prep_connect(sqe, connect_fd[1], (struct sockaddr*)&addr, sizeof(addr));
+	sqe->user_data = 1;
+	sqe->flags |= IOSQE_IO_LINK;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "unable to get sqe\n");
+		goto err;
+	}
+	io_uring_prep_link_timeout(sqe, &ts, 0);
+	sqe->user_data = 2;
+
+	ret = io_uring_submit(ring);
+	if (ret != 2) {
+		fprintf(stderr, "submitted %d\n", ret);
+		return -1;
+	}
+
+	for (int i = 0; i < 2; i++) {
+		int expected;
+		struct io_uring_cqe *cqe;
+
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret) {
+			fprintf(stderr, "wait_cqe=%d\n", ret);
+			return -1;
+		}
+
+		expected = (cqe->user_data == 1) ? -ECANCELED : -ETIME;
+		if (expected != cqe->res) {
+			fprintf(stderr, "cqe %d, res %d, wanted %d\n", 
+					(int)cqe->user_data, cqe->res, expected);
+			goto err;
+		}
+		io_uring_cqe_seen(ring, cqe);
+	}
+
+	close(connect_fd[0]);
+	close(connect_fd[1]);
+	close(accept_fd);
+	return 0;
+
+err:
+	if (connect_fd[0] != -1)
+		close(connect_fd[0]);
+	if (connect_fd[1] != -1)
+		close(connect_fd[1]);
+		
+	if (accept_fd != -1)
+		close(accept_fd);
+	return -1;
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring;
+	int ret;
+
+	if (argc > 1)
+		return 0;
+
+	ret = io_uring_queue_init(8, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "io_uring_queue_setup() = %d\n", ret);
+		return 1;
+	}
+
+	srand(getpid());
+	use_port = (rand() % 61440) + 4096;
+
+	ret = test_connect_with_no_peer(&ring);
+	if (ret == -1) {
+		fprintf(stderr, "test_connect_with_no_peer(): failed\n");
+		return 1;
+	}
+	if (no_connect)
+		return 0;
+
+	ret = test_connect(&ring);
+	if (ret == -1) {
+		fprintf(stderr, "test_connect(): failed\n");
+		return 1;
+	}
+
+	ret = test_connect_timeout(&ring);
+	if (ret == -1) {
+		fprintf(stderr, "test_connect_timeout(): failed\n");
+		return 1;
+	}
+
+	io_uring_queue_exit(&ring);
+	return 0;
+}
diff --git a/test/cq-full.c b/test/cq-full.c
new file mode 100644
index 0000000..5c4041b
--- /dev/null
+++ b/test/cq-full.c
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: test CQ ring overflow
+ *
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+
+#include "liburing.h"
+
+static int queue_n_nops(struct io_uring *ring, int n)
+{
+	struct io_uring_sqe *sqe;
+	int i, ret;
+
+	for (i = 0; i < n; i++) {
+		sqe = io_uring_get_sqe(ring);
+		if (!sqe) {
+			printf("get sqe failed\n");
+			goto err;
+		}
+
+		io_uring_prep_nop(sqe);
+	}
+
+	ret = io_uring_submit(ring);
+	if (ret < n) {
+		printf("Submitted only %d\n", ret);
+		goto err;
+	} else if (ret < 0) {
+		printf("sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	return 0;
+err:
+	return 1;
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring_cqe *cqe;
+	struct io_uring_params p;
+	struct io_uring ring;
+	int i, ret;
+
+	if (argc > 1)
+		return 0;
+
+	memset(&p, 0, sizeof(p));
+	ret = io_uring_queue_init_params(4, &ring, &p);
+	if (ret) {
+		printf("ring setup failed\n");
+		return 1;
+
+	}
+
+	if (queue_n_nops(&ring, 4))
+		goto err;
+	if (queue_n_nops(&ring, 4))
+		goto err;
+	if (queue_n_nops(&ring, 4))
+		goto err;
+
+	i = 0;
+	do {
+		ret = io_uring_peek_cqe(&ring, &cqe);
+		if (ret < 0) {
+			if (ret == -EAGAIN)
+				break;
+			printf("wait completion %d\n", ret);
+			goto err;
+		}
+		io_uring_cqe_seen(&ring, cqe);
+		if (!cqe)
+			break;
+		i++;
+	} while (1);
+
+	if (i < 8 ||
+	    ((*ring.cq.koverflow != 4) && !(p.features & IORING_FEAT_NODROP))) {
+		printf("CQ overflow fail: %d completions, %u overflow\n", i,
+				*ring.cq.koverflow);
+		goto err;
+	}
+
+	io_uring_queue_exit(&ring);
+	return 0;
+err:
+	io_uring_queue_exit(&ring);
+	return 1;
+}
diff --git a/test/cq-overflow.c b/test/cq-overflow.c
new file mode 100644
index 0000000..945dc93
--- /dev/null
+++ b/test/cq-overflow.c
@@ -0,0 +1,293 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: run various CQ ring overflow tests
+ *
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+
+#include "helpers.h"
+#include "liburing.h"
+
+#define FILE_SIZE	(256 * 1024)
+#define BS		4096
+#define BUFFERS		(FILE_SIZE / BS)
+
+static struct iovec *vecs;
+
+#define ENTRIES	8
+
+static int test_io(const char *file, unsigned long usecs, unsigned *drops, int fault)
+{
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	struct io_uring_params p;
+	unsigned reaped, total;
+	struct io_uring ring;
+	int nodrop, i, fd, ret;
+
+	fd = open(file, O_RDONLY | O_DIRECT);
+	if (fd < 0) {
+		perror("file open");
+		goto err;
+	}
+
+	memset(&p, 0, sizeof(p));
+	ret = io_uring_queue_init_params(ENTRIES, &ring, &p);
+	if (ret) {
+		fprintf(stderr, "ring create failed: %d\n", ret);
+		goto err;
+	}
+	nodrop = 0;
+	if (p.features & IORING_FEAT_NODROP)
+		nodrop = 1;
+
+	total = 0;
+	for (i = 0; i < BUFFERS / 2; i++) {
+		off_t offset;
+
+		sqe = io_uring_get_sqe(&ring);
+		if (!sqe) {
+			fprintf(stderr, "sqe get failed\n");
+			goto err;
+		}
+		offset = BS * (rand() % BUFFERS);
+		if (fault && i == ENTRIES + 4)
+			vecs[i].iov_base = NULL;
+		io_uring_prep_readv(sqe, fd, &vecs[i], 1, offset);
+
+		ret = io_uring_submit(&ring);
+		if (nodrop && ret == -EBUSY) {
+			*drops = 1;
+			total = i;
+			break;
+		} else if (ret != 1) {
+			fprintf(stderr, "submit got %d, wanted %d\n", ret, 1);
+			total = i;
+			break;
+		}
+		total++;
+	}
+
+	if (*drops)
+		goto reap_it;
+
+	usleep(usecs);
+
+	for (i = total; i < BUFFERS; i++) {
+		off_t offset;
+
+		sqe = io_uring_get_sqe(&ring);
+		if (!sqe) {
+			fprintf(stderr, "sqe get failed\n");
+			goto err;
+		}
+		offset = BS * (rand() % BUFFERS);
+		io_uring_prep_readv(sqe, fd, &vecs[i], 1, offset);
+
+		ret = io_uring_submit(&ring);
+		if (nodrop && ret == -EBUSY) {
+			*drops = 1;
+			break;
+		} else if (ret != 1) {
+			fprintf(stderr, "submit got %d, wanted %d\n", ret, 1);
+			break;
+		}
+		total++;
+	}
+
+reap_it:
+	reaped = 0;
+	do {
+		if (nodrop) {
+			/* nodrop should never lose events */
+			if (reaped == total)
+				break;
+		} else {
+			if (reaped + *ring.cq.koverflow == total)
+				break;
+		}
+		ret = io_uring_wait_cqe(&ring, &cqe);
+		if (ret) {
+			fprintf(stderr, "wait_cqe=%d\n", ret);
+			goto err;
+		}
+		if (cqe->res != BS) {
+			if (!(fault && cqe->res == -EFAULT)) {
+				fprintf(stderr, "cqe res %d, wanted %d\n",
+						cqe->res, BS);
+				goto err;
+			}
+		}
+		io_uring_cqe_seen(&ring, cqe);
+		reaped++;
+	} while (1);
+
+	if (!io_uring_peek_cqe(&ring, &cqe)) {
+		fprintf(stderr, "found unexpected completion\n");
+		goto err;
+	}
+
+	if (!nodrop) {
+		*drops = *ring.cq.koverflow;
+	} else if (*ring.cq.koverflow) {
+		fprintf(stderr, "Found %u overflows\n", *ring.cq.koverflow);
+		goto err;
+	}
+
+	io_uring_queue_exit(&ring);
+	close(fd);
+	return 0;
+err:
+	if (fd != -1)
+		close(fd);
+	io_uring_queue_exit(&ring);
+	return 1;
+}
+
+static int reap_events(struct io_uring *ring, unsigned nr_events, int do_wait)
+{
+	struct io_uring_cqe *cqe;
+	int i, ret = 0, seq = 0;
+
+	for (i = 0; i < nr_events; i++) {
+		if (do_wait)
+			ret = io_uring_wait_cqe(ring, &cqe);
+		else
+			ret = io_uring_peek_cqe(ring, &cqe);
+		if (ret) {
+			if (ret != -EAGAIN)
+				fprintf(stderr, "cqe peek failed: %d\n", ret);
+			break;
+		}
+		if (cqe->user_data != seq) {
+			fprintf(stderr, "cqe sequence out-of-order\n");
+			fprintf(stderr, "got %d, wanted %d\n", (int) cqe->user_data,
+					seq);
+			return -EINVAL;
+		}
+		seq++;
+		io_uring_cqe_seen(ring, cqe);
+	}
+
+	return i ? i : ret;
+}
+
+/*
+ * Submit some NOPs and watch if the overflow is correct
+ */
+static int test_overflow(void)
+{
+	struct io_uring ring;
+	struct io_uring_params p;
+	struct io_uring_sqe *sqe;
+	unsigned pending;
+	int ret, i, j;
+
+	memset(&p, 0, sizeof(p));
+	ret = io_uring_queue_init_params(4, &ring, &p);
+	if (ret) {
+		fprintf(stderr, "io_uring_queue_init failed %d\n", ret);
+		return 1;
+	}
+
+	/* submit 4x4 SQEs, should overflow the ring by 8 */
+	pending = 0;
+	for (i = 0; i < 4; i++) {
+		for (j = 0; j < 4; j++) {
+			sqe = io_uring_get_sqe(&ring);
+			if (!sqe) {
+				fprintf(stderr, "get sqe failed\n");
+				goto err;
+			}
+
+			io_uring_prep_nop(sqe);
+			sqe->user_data = (i * 4) + j;
+		}
+
+		ret = io_uring_submit(&ring);
+		if (ret == 4) {
+			pending += 4;
+			continue;
+		}
+		if (p.features & IORING_FEAT_NODROP) {
+			if (ret == -EBUSY)
+				break;
+		}
+		fprintf(stderr, "sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	/* we should now have 8 completions ready */
+	ret = reap_events(&ring, pending, 0);
+	if (ret < 0)
+		goto err;
+
+	if (!(p.features & IORING_FEAT_NODROP)) {
+		if (*ring.cq.koverflow != 8) {
+			fprintf(stderr, "cq ring overflow %d, expected 8\n",
+					*ring.cq.koverflow);
+			goto err;
+		}
+	}
+	io_uring_queue_exit(&ring);
+	return 0;
+err:
+	io_uring_queue_exit(&ring);
+	return 1;
+}
+
+int main(int argc, char *argv[])
+{
+	unsigned iters, drops;
+	unsigned long usecs;
+	int ret;
+
+	if (argc > 1)
+		return 0;
+
+	ret = test_overflow();
+	if (ret) {
+		printf("test_overflow failed\n");
+		return ret;
+	}
+
+	t_create_file(".basic-rw", FILE_SIZE);
+
+	vecs = t_create_buffers(BUFFERS, BS);
+
+	iters = 0;
+	usecs = 1000;
+	do {
+		drops = 0;
+
+		if (test_io(".basic-rw", usecs, &drops, 0)) {
+			fprintf(stderr, "test_io nofault failed\n");
+			goto err;
+		}
+		if (drops)
+			break;
+		usecs = (usecs * 12) / 10;
+		iters++;
+	} while (iters < 40);
+
+	if (test_io(".basic-rw", usecs, &drops, 0)) {
+		fprintf(stderr, "test_io nofault failed\n");
+		goto err;
+	}
+
+	if (test_io(".basic-rw", usecs, &drops, 1)) {
+		fprintf(stderr, "test_io fault failed\n");
+		goto err;
+	}
+
+	unlink(".basic-rw");
+	return 0;
+err:
+	unlink(".basic-rw");
+	return 1;
+}
diff --git a/test/cq-peek-batch.c b/test/cq-peek-batch.c
new file mode 100644
index 0000000..6c47bec
--- /dev/null
+++ b/test/cq-peek-batch.c
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: test CQ peek-batch
+ *
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+
+#include "liburing.h"
+
+static int queue_n_nops(struct io_uring *ring, int n, int offset)
+{
+	struct io_uring_sqe *sqe;
+	int i, ret;
+
+	for (i = 0; i < n; i++) {
+		sqe = io_uring_get_sqe(ring);
+		if (!sqe) {
+			printf("get sqe failed\n");
+			goto err;
+		}
+
+		io_uring_prep_nop(sqe);
+		sqe->user_data = i + offset;
+	}
+
+	ret = io_uring_submit(ring);
+	if (ret < n) {
+		printf("Submitted only %d\n", ret);
+		goto err;
+	} else if (ret < 0) {
+		printf("sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	return 0;
+err:
+	return 1;
+}
+
+#define CHECK_BATCH(ring, got, cqes, count, expected) do {\
+	got = io_uring_peek_batch_cqe((ring), cqes, count);\
+	if (got != expected) {\
+		printf("Got %d CQs, expected %d\n", got, expected);\
+		goto err;\
+	}\
+} while(0)
+
+int main(int argc, char *argv[])
+{
+	struct io_uring_cqe *cqes[8];
+	struct io_uring ring;
+	int ret, i;
+	unsigned got;
+
+	if (argc > 1)
+		return 0;
+
+	ret = io_uring_queue_init(4, &ring, 0);
+	if (ret) {
+		printf("ring setup failed\n");
+		return 1;
+
+	}
+
+	CHECK_BATCH(&ring, got, cqes, 4, 0);
+	if (queue_n_nops(&ring, 4, 0))
+		goto err;
+
+	CHECK_BATCH(&ring, got, cqes, 4, 4);
+	for (i=0;i<4;i++) {
+		if (i != cqes[i]->user_data) {
+			printf("Got user_data %" PRIu64 ", expected %d\n",
+				(uint64_t) cqes[i]->user_data, i);
+			goto err;
+		}
+	}
+
+	if (queue_n_nops(&ring, 4, 4))
+		goto err;
+
+	io_uring_cq_advance(&ring, 4);
+	CHECK_BATCH(&ring, got, cqes, 4, 4);
+	for (i=0;i<4;i++) {
+		if (i + 4 != cqes[i]->user_data) {
+			printf("Got user_data %" PRIu64 ", expected %d\n",
+				(uint64_t) cqes[i]->user_data, i + 4);
+			goto err;
+		}
+	}
+
+	io_uring_cq_advance(&ring, 8);
+	io_uring_queue_exit(&ring);
+	return 0;
+err:
+	io_uring_queue_exit(&ring);
+	return 1;
+}
diff --git a/test/cq-ready.c b/test/cq-ready.c
new file mode 100644
index 0000000..7af7e54
--- /dev/null
+++ b/test/cq-ready.c
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: test CQ ready
+ *
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+
+#include "liburing.h"
+
+static int queue_n_nops(struct io_uring *ring, int n)
+{
+	struct io_uring_sqe *sqe;
+	int i, ret;
+
+	for (i = 0; i < n; i++) {
+		sqe = io_uring_get_sqe(ring);
+		if (!sqe) {
+			printf("get sqe failed\n");
+			goto err;
+		}
+
+		io_uring_prep_nop(sqe);
+	}
+
+	ret = io_uring_submit(ring);
+	if (ret < n) {
+		printf("Submitted only %d\n", ret);
+		goto err;
+	} else if (ret < 0) {
+		printf("sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	return 0;
+err:
+	return 1;
+}
+
+#define CHECK_READY(ring, expected) do {\
+	ready = io_uring_cq_ready((ring));\
+	if (ready != expected) {\
+		printf("Got %d CQs ready, expected %d\n", ready, expected);\
+		goto err;\
+	}\
+} while(0)
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring;
+	int ret;
+	unsigned ready;
+
+	if (argc > 1)
+		return 0;
+
+	ret = io_uring_queue_init(4, &ring, 0);
+	if (ret) {
+		printf("ring setup failed\n");
+		return 1;
+
+	}
+
+	CHECK_READY(&ring, 0);
+	if (queue_n_nops(&ring, 4))
+		goto err;
+
+	CHECK_READY(&ring, 4);
+	io_uring_cq_advance(&ring, 4);
+	CHECK_READY(&ring, 0);
+	if (queue_n_nops(&ring, 4))
+		goto err;
+
+	CHECK_READY(&ring, 4);
+
+	io_uring_cq_advance(&ring, 1);
+	CHECK_READY(&ring, 3);
+
+	io_uring_cq_advance(&ring, 2);
+	CHECK_READY(&ring, 1);
+
+	io_uring_cq_advance(&ring, 1);
+	CHECK_READY(&ring, 0);
+
+	io_uring_queue_exit(&ring);
+	return 0;
+err:
+	io_uring_queue_exit(&ring);
+	return 1;
+}
diff --git a/test/cq-size.c b/test/cq-size.c
new file mode 100644
index 0000000..b7dd5b4
--- /dev/null
+++ b/test/cq-size.c
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: test CQ ring sizing
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+
+#include "liburing.h"
+
+int main(int argc, char *argv[])
+{
+	struct io_uring_params p;
+	struct io_uring ring;
+	int ret;
+
+	if (argc > 1)
+		return 0;
+
+	memset(&p, 0, sizeof(p));
+	p.flags = IORING_SETUP_CQSIZE;
+	p.cq_entries = 64;
+
+	ret = io_uring_queue_init_params(4, &ring, &p);
+	if (ret) {
+		if (ret == -EINVAL) {
+			printf("Skipped, not supported on this kernel\n");
+			goto done;
+		}
+		printf("ring setup failed\n");
+		return 1;
+	}
+
+	if (p.cq_entries < 64) {
+		printf("cq entries invalid (%d)\n", p.cq_entries);
+		goto err;
+	}
+	io_uring_queue_exit(&ring);
+
+	memset(&p, 0, sizeof(p));
+	p.flags = IORING_SETUP_CQSIZE;
+	p.cq_entries = 0;
+
+	ret = io_uring_queue_init_params(4, &ring, &p);
+	if (ret >= 0 || errno != EINVAL) {
+		printf("zero sized cq ring succeeded\n");
+		goto err;
+	}
+
+done:
+	return 0;
+err:
+	io_uring_queue_exit(&ring);
+	return 1;
+}
diff --git a/test/d4ae271dfaae-test.c b/test/d4ae271dfaae-test.c
new file mode 100644
index 0000000..80d3f71
--- /dev/null
+++ b/test/d4ae271dfaae-test.c
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Test case for SQPOLL missing a 'ret' clear in case of busy.
+ *
+ * Heavily based on a test case from
+ * Xiaoguang Wang <xiaoguang.wang@linux.alibaba.com>
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <string.h>
+
+#include "helpers.h"
+#include "liburing.h"
+
+#define FILE_SIZE	(128 * 1024)
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring;
+	int i, fd, ret;
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	struct iovec *iovecs;
+	struct io_uring_params p;
+	char *fname;
+	void *buf;
+
+	if (geteuid()) {
+		fprintf(stdout, "Test requires root, skipping\n");
+		return 0;
+	}
+
+	memset(&p, 0, sizeof(p));
+	p.flags = IORING_SETUP_SQPOLL;
+	ret = t_create_ring_params(4, &ring, &p);
+	if (ret == T_SETUP_SKIP)
+		return 0;
+	else if (ret < 0)
+		return 1;
+
+	if (argc > 1) {
+		fname = argv[1];
+	} else {
+		fname = ".sqpoll.tmp";
+		t_create_file(fname, FILE_SIZE);
+	}
+
+	fd = open(fname, O_RDONLY | O_DIRECT);
+	if (fd < 0) {
+		perror("open");
+		goto out;
+	}
+
+	iovecs = t_calloc(10, sizeof(struct iovec));
+	for (i = 0; i < 10; i++) {
+		t_posix_memalign(&buf, 4096, 4096);
+		iovecs[i].iov_base = buf;
+		iovecs[i].iov_len = 4096;
+	}
+
+	ret = io_uring_register_files(&ring, &fd, 1);
+	if (ret < 0) {
+		fprintf(stderr, "register files %d\n", ret);
+		goto out;
+	}
+
+	for (i = 0; i < 10; i++) {
+		sqe = io_uring_get_sqe(&ring);
+		if (!sqe)
+			break;
+
+		io_uring_prep_readv(sqe, 0, &iovecs[i], 1, 0);
+		sqe->flags |= IOSQE_FIXED_FILE;
+
+		ret = io_uring_submit(&ring);
+		usleep(1000);
+	}
+
+	for (i = 0; i < 10; i++) {
+		ret = io_uring_wait_cqe(&ring, &cqe);
+		if (ret) {
+			fprintf(stderr, "wait_cqe=%d\n", ret);
+			break;
+		}
+		if (cqe->res != 4096) {
+			fprintf(stderr, "ret=%d, wanted 4096\n", cqe->res);
+			ret = 1;
+			break;
+		}
+		io_uring_cqe_seen(&ring, cqe);
+	}
+
+	close(fd);
+out:
+	if (fname != argv[1])
+		unlink(fname);
+	io_uring_queue_exit(&ring);
+	return ret;
+}
diff --git a/test/d77a67ed5f27-test.c b/test/d77a67ed5f27-test.c
new file mode 100644
index 0000000..e56fdcd
--- /dev/null
+++ b/test/d77a67ed5f27-test.c
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: MIT */
+#include <stdio.h>
+#include <unistd.h>
+#include <string.h>
+#include <signal.h>
+#include <stdlib.h>
+#include "liburing.h"
+#include "helpers.h"
+
+static void sig_alrm(int sig)
+{
+	fprintf(stderr, "Timed out!\n");
+	exit(1);
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	struct io_uring_params p;
+	struct io_uring ring;
+	int ret, data;
+
+	if (argc > 1)
+		return 0;
+
+	signal(SIGALRM, sig_alrm);
+
+	memset(&p, 0, sizeof(p));
+	p.sq_thread_idle = 100;
+	p.flags = IORING_SETUP_SQPOLL;
+	ret = t_create_ring_params(4, &ring, &p);
+	if (ret == T_SETUP_SKIP)
+		return 0;
+	else if (ret < 0)
+		return 1;
+
+	/* make sure sq thread is sleeping at this point */
+	usleep(150000);
+	alarm(1);
+
+	sqe = io_uring_get_sqe(&ring);
+	if (!sqe) {
+		fprintf(stderr, "sqe get failed\n");
+		return 1;
+	}
+
+	io_uring_prep_nop(sqe);
+	io_uring_sqe_set_data(sqe, (void *) (unsigned long) 42);
+	io_uring_submit_and_wait(&ring, 1);
+
+	ret = io_uring_peek_cqe(&ring, &cqe);
+	if (ret) {
+		fprintf(stderr, "cqe get failed\n");
+		return 1;
+	}
+
+	data = (unsigned long) io_uring_cqe_get_data(cqe);
+	if (data != 42) {
+		fprintf(stderr, "invalid data: %d\n", data);
+		return 1;
+	}
+
+	return 0;
+}
diff --git a/test/defer.c b/test/defer.c
new file mode 100644
index 0000000..885cf5c
--- /dev/null
+++ b/test/defer.c
@@ -0,0 +1,308 @@
+/* SPDX-License-Identifier: MIT */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+#include <sys/uio.h>
+#include <stdbool.h>
+
+#include "helpers.h"
+#include "liburing.h"
+
+struct test_context {
+	struct io_uring *ring;
+	struct io_uring_sqe **sqes;
+	struct io_uring_cqe *cqes;
+	int nr;
+};
+
+static void free_context(struct test_context *ctx)
+{
+	free(ctx->sqes);
+	free(ctx->cqes);
+	memset(ctx, 0, sizeof(*ctx));
+}
+
+static int init_context(struct test_context *ctx, struct io_uring *ring, int nr)
+{
+	struct io_uring_sqe *sqe;
+	int i;
+
+	memset(ctx, 0, sizeof(*ctx));
+	ctx->nr = nr;
+	ctx->ring = ring;
+	ctx->sqes = t_malloc(nr * sizeof(*ctx->sqes));
+	ctx->cqes = t_malloc(nr * sizeof(*ctx->cqes));
+
+	if (!ctx->sqes || !ctx->cqes)
+		goto err;
+
+	for (i = 0; i < nr; i++) {
+		sqe = io_uring_get_sqe(ring);
+		if (!sqe)
+			goto err;
+		io_uring_prep_nop(sqe);
+		sqe->user_data = i;
+		ctx->sqes[i] = sqe;
+	}
+
+	return 0;
+err:
+	free_context(ctx);
+	printf("init context failed\n");
+	return 1;
+}
+
+static int wait_cqes(struct test_context *ctx)
+{
+	int ret, i;
+	struct io_uring_cqe *cqe;
+
+	for (i = 0; i < ctx->nr; i++) {
+		ret = io_uring_wait_cqe(ctx->ring, &cqe);
+
+		if (ret < 0) {
+			printf("wait_cqes: wait completion %d\n", ret);
+			return 1;
+		}
+		memcpy(&ctx->cqes[i], cqe, sizeof(*cqe));
+		io_uring_cqe_seen(ctx->ring, cqe);
+	}
+
+	return 0;
+}
+
+static int test_cancelled_userdata(struct io_uring *ring)
+{
+	struct test_context ctx;
+	int ret, i, nr = 100;
+
+	if (init_context(&ctx, ring, nr))
+		return 1;
+
+	for (i = 0; i < nr; i++)
+		ctx.sqes[i]->flags |= IOSQE_IO_LINK;
+
+	ret = io_uring_submit(ring);
+	if (ret <= 0) {
+		printf("sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	if (wait_cqes(&ctx))
+		goto err;
+
+	for (i = 0; i < nr; i++) {
+		if (i != ctx.cqes[i].user_data) {
+			printf("invalid user data\n");
+			goto err;
+		}
+	}
+
+	free_context(&ctx);
+	return 0;
+err:
+	free_context(&ctx);
+	return 1;
+}
+
+static int test_thread_link_cancel(struct io_uring *ring)
+{
+	struct test_context ctx;
+	int ret, i, nr = 100;
+
+	if (init_context(&ctx, ring, nr))
+		return 1;
+
+	for (i = 0; i < nr; i++)
+		ctx.sqes[i]->flags |= IOSQE_IO_LINK;
+
+	ret = io_uring_submit(ring);
+	if (ret <= 0) {
+		printf("sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	if (wait_cqes(&ctx))
+		goto err;
+
+	for (i = 0; i < nr; i++) {
+		bool fail = false;
+
+		if (i == 0)
+			fail = (ctx.cqes[i].res != -EINVAL);
+		else
+			fail = (ctx.cqes[i].res != -ECANCELED);
+
+		if (fail) {
+			printf("invalid status\n");
+			goto err;
+		}
+	}
+
+	free_context(&ctx);
+	return 0;
+err:
+	free_context(&ctx);
+	return 1;
+}
+
+static int test_drain_with_linked_timeout(struct io_uring *ring)
+{
+	const int nr = 3;
+	struct __kernel_timespec ts = { .tv_sec = 1, .tv_nsec = 0, };
+	struct test_context ctx;
+	int ret, i;
+
+	if (init_context(&ctx, ring, nr * 2))
+		return 1;
+
+	for (i = 0; i < nr; i++) {
+		io_uring_prep_timeout(ctx.sqes[2 * i], &ts, 0, 0);
+		ctx.sqes[2 * i]->flags |= IOSQE_IO_LINK | IOSQE_IO_DRAIN;
+		io_uring_prep_link_timeout(ctx.sqes[2 * i + 1], &ts, 0);
+	}
+
+	ret = io_uring_submit(ring);
+	if (ret <= 0) {
+		printf("sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	if (wait_cqes(&ctx))
+		goto err;
+
+	free_context(&ctx);
+	return 0;
+err:
+	free_context(&ctx);
+	return 1;
+}
+
+static int run_drained(struct io_uring *ring, int nr)
+{
+	struct test_context ctx;
+	int ret, i;
+
+	if (init_context(&ctx, ring, nr))
+		return 1;
+
+	for (i = 0; i < nr; i++)
+		ctx.sqes[i]->flags |= IOSQE_IO_DRAIN;
+
+	ret = io_uring_submit(ring);
+	if (ret <= 0) {
+		printf("sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	if (wait_cqes(&ctx))
+		goto err;
+
+	free_context(&ctx);
+	return 0;
+err:
+	free_context(&ctx);
+	return 1;
+}
+
+static int test_overflow_hung(struct io_uring *ring)
+{
+	struct io_uring_sqe *sqe;
+	int ret, nr = 10;
+
+	while (*ring->cq.koverflow != 1000) {
+		sqe = io_uring_get_sqe(ring);
+		if (!sqe) {
+			printf("get sqe failed\n");
+			return 1;
+		}
+
+		io_uring_prep_nop(sqe);
+		ret = io_uring_submit(ring);
+		if (ret <= 0) {
+			printf("sqe submit failed: %d\n", ret);
+			return 1;
+		}
+	}
+
+	return run_drained(ring, nr);
+}
+
+static int test_dropped_hung(struct io_uring *ring)
+{
+	int nr = 10;
+
+	*ring->sq.kdropped = 1000;
+	return run_drained(ring, nr);
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring, poll_ring, sqthread_ring;
+	struct io_uring_params p;
+	int ret, no_sqthread = 0;
+
+	if (argc > 1)
+		return 0;
+
+	memset(&p, 0, sizeof(p));
+	ret = io_uring_queue_init_params(1000, &ring, &p);
+	if (ret) {
+		printf("ring setup failed\n");
+		return 1;
+	}
+
+	ret = io_uring_queue_init(1000, &poll_ring, IORING_SETUP_IOPOLL);
+	if (ret) {
+		printf("poll_ring setup failed\n");
+		return 1;
+	}
+
+	ret = t_create_ring(1000, &sqthread_ring,
+				IORING_SETUP_SQPOLL | IORING_SETUP_IOPOLL);
+	if (ret == T_SETUP_SKIP)
+		return 0;
+	else if (ret < 0)
+		return 1;
+
+	ret = test_cancelled_userdata(&poll_ring);
+	if (ret) {
+		printf("test_cancelled_userdata failed\n");
+		return ret;
+	}
+
+	if (no_sqthread) {
+		printf("test_thread_link_cancel: skipped, not root\n");
+	} else {
+		ret = test_thread_link_cancel(&sqthread_ring);
+		if (ret) {
+			printf("test_thread_link_cancel failed\n");
+			return ret;
+		}
+	}
+
+	if (!(p.features & IORING_FEAT_NODROP)) {
+		ret = test_overflow_hung(&ring);
+		if (ret) {
+			printf("test_overflow_hung failed\n");
+			return ret;
+		}
+	}
+
+	ret = test_dropped_hung(&ring);
+	if (ret) {
+		printf("test_dropped_hung failed\n");
+		return ret;
+	}
+
+	ret = test_drain_with_linked_timeout(&ring);
+	if (ret) {
+		printf("test_drain_with_linked_timeout failed\n");
+		return ret;
+	}
+
+	return 0;
+}
diff --git a/test/double-poll-crash.c b/test/double-poll-crash.c
new file mode 100644
index 0000000..2a012e5
--- /dev/null
+++ b/test/double-poll-crash.c
@@ -0,0 +1,186 @@
+// https://syzkaller.appspot.com/bug?id=5c9918d20f771265ad0ffae3c8f3859d24850692
+// autogenerated by syzkaller (https://github.com/google/syzkaller)
+
+#include <endian.h>
+#include <fcntl.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+
+#include "liburing.h"
+#include "../src/syscall.h"
+
+#define SIZEOF_IO_URING_SQE 64
+#define SIZEOF_IO_URING_CQE 16
+#define SQ_HEAD_OFFSET 0
+#define SQ_TAIL_OFFSET 64
+#define SQ_RING_MASK_OFFSET 256
+#define SQ_RING_ENTRIES_OFFSET 264
+#define SQ_FLAGS_OFFSET 276
+#define SQ_DROPPED_OFFSET 272
+#define CQ_HEAD_OFFSET 128
+#define CQ_TAIL_OFFSET 192
+#define CQ_RING_MASK_OFFSET 260
+#define CQ_RING_ENTRIES_OFFSET 268
+#define CQ_RING_OVERFLOW_OFFSET 284
+#define CQ_FLAGS_OFFSET 280
+#define CQ_CQES_OFFSET 320
+
+static long syz_io_uring_setup(volatile long a0, volatile long a1,
+                               volatile long a2, volatile long a3,
+                               volatile long a4, volatile long a5)
+{
+  uint32_t entries = (uint32_t)a0;
+  struct io_uring_params* setup_params = (struct io_uring_params*)a1;
+  void* vma1 = (void*)a2;
+  void* vma2 = (void*)a3;
+  void** ring_ptr_out = (void**)a4;
+  void** sqes_ptr_out = (void**)a5;
+  uint32_t fd_io_uring = __sys_io_uring_setup(entries, setup_params);
+  uint32_t sq_ring_sz =
+      setup_params->sq_off.array + setup_params->sq_entries * sizeof(uint32_t);
+  uint32_t cq_ring_sz = setup_params->cq_off.cqes +
+                        setup_params->cq_entries * SIZEOF_IO_URING_CQE;
+  uint32_t ring_sz = sq_ring_sz > cq_ring_sz ? sq_ring_sz : cq_ring_sz;
+  *ring_ptr_out = mmap(vma1, ring_sz, PROT_READ | PROT_WRITE,
+                       MAP_SHARED | MAP_POPULATE | MAP_FIXED, fd_io_uring,
+                       IORING_OFF_SQ_RING);
+  uint32_t sqes_sz = setup_params->sq_entries * SIZEOF_IO_URING_SQE;
+  *sqes_ptr_out =
+      mmap(vma2, sqes_sz, PROT_READ | PROT_WRITE,
+           MAP_SHARED | MAP_POPULATE | MAP_FIXED, fd_io_uring, IORING_OFF_SQES);
+  return fd_io_uring;
+}
+
+static long syz_io_uring_submit(volatile long a0, volatile long a1,
+                                volatile long a2, volatile long a3)
+{
+  char* ring_ptr = (char*)a0;
+  char* sqes_ptr = (char*)a1;
+  char* sqe = (char*)a2;
+  uint32_t sqes_index = (uint32_t)a3;
+  uint32_t sq_ring_entries = *(uint32_t*)(ring_ptr + SQ_RING_ENTRIES_OFFSET);
+  uint32_t cq_ring_entries = *(uint32_t*)(ring_ptr + CQ_RING_ENTRIES_OFFSET);
+  uint32_t sq_array_off =
+      (CQ_CQES_OFFSET + cq_ring_entries * SIZEOF_IO_URING_CQE + 63) & ~63;
+  if (sq_ring_entries)
+    sqes_index %= sq_ring_entries;
+  char* sqe_dest = sqes_ptr + sqes_index * SIZEOF_IO_URING_SQE;
+  memcpy(sqe_dest, sqe, SIZEOF_IO_URING_SQE);
+  uint32_t sq_ring_mask = *(uint32_t*)(ring_ptr + SQ_RING_MASK_OFFSET);
+  uint32_t* sq_tail_ptr = (uint32_t*)(ring_ptr + SQ_TAIL_OFFSET);
+  uint32_t sq_tail = *sq_tail_ptr & sq_ring_mask;
+  uint32_t sq_tail_next = *sq_tail_ptr + 1;
+  uint32_t* sq_array = (uint32_t*)(ring_ptr + sq_array_off);
+  *(sq_array + sq_tail) = sqes_index;
+  __atomic_store_n(sq_tail_ptr, sq_tail_next, __ATOMIC_RELEASE);
+  return 0;
+}
+
+static long syz_open_dev(volatile long a0, volatile long a1, volatile long a2)
+{
+  if (a0 == 0xc || a0 == 0xb) {
+    char buf[128];
+    sprintf(buf, "/dev/%s/%d:%d", a0 == 0xc ? "char" : "block", (uint8_t)a1,
+            (uint8_t)a2);
+    return open(buf, O_RDWR, 0);
+  } else {
+    char buf[1024];
+    char* hash;
+    strncpy(buf, (char*)a0, sizeof(buf) - 1);
+    buf[sizeof(buf) - 1] = 0;
+    while ((hash = strchr(buf, '#'))) {
+      *hash = '0' + (char)(a1 % 10);
+      a1 /= 10;
+    }
+    return open(buf, a2, 0);
+  }
+}
+
+#ifndef __NR_io_uring_enter
+#define __NR_io_uring_enter 426
+#endif
+
+uint64_t r[4] = {0xffffffffffffffff, 0x0, 0x0, 0xffffffffffffffff};
+
+int main(int argc, char *argv[])
+{
+
+  if (argc > 1)
+    return 0;
+
+  mmap((void *)0x1ffff000ul, 0x1000ul, 0ul, 0x32ul, -1, 0ul);
+  mmap((void *)0x20000000ul, 0x1000000ul, 7ul, 0x32ul, -1, 0ul);
+  mmap((void *)0x21000000ul, 0x1000ul, 0ul, 0x32ul, -1, 0ul);
+  intptr_t res = 0;
+  *(uint32_t*)0x20000484 = 0;
+  *(uint32_t*)0x20000488 = 0;
+  *(uint32_t*)0x2000048c = 0;
+  *(uint32_t*)0x20000490 = 0;
+  *(uint32_t*)0x20000498 = -1;
+  *(uint32_t*)0x2000049c = 0;
+  *(uint32_t*)0x200004a0 = 0;
+  *(uint32_t*)0x200004a4 = 0;
+  res = -1;
+  res = syz_io_uring_setup(0x6ad4, 0x20000480, 0x20ee7000, 0x20ffb000,
+                           0x20000180, 0x20000040);
+  if (res != -1) {
+    r[0] = res;
+    r[1] = *(uint64_t*)0x20000180;
+    r[2] = *(uint64_t*)0x20000040;
+  }
+  res = -1;
+  res = syz_open_dev(0xc, 4, 0x15);
+  if (res != -1)
+    r[3] = res;
+  *(uint8_t*)0x20000000 = 6;
+  *(uint8_t*)0x20000001 = 0;
+  *(uint16_t*)0x20000002 = 0;
+  *(uint32_t*)0x20000004 = r[3];
+  *(uint64_t*)0x20000008 = 0;
+  *(uint64_t*)0x20000010 = 0;
+  *(uint32_t*)0x20000018 = 0;
+  *(uint16_t*)0x2000001c = 0;
+  *(uint16_t*)0x2000001e = 0;
+  *(uint64_t*)0x20000020 = 0;
+  *(uint16_t*)0x20000028 = 0;
+  *(uint16_t*)0x2000002a = 0;
+  *(uint8_t*)0x2000002c = 0;
+  *(uint8_t*)0x2000002d = 0;
+  *(uint8_t*)0x2000002e = 0;
+  *(uint8_t*)0x2000002f = 0;
+  *(uint8_t*)0x20000030 = 0;
+  *(uint8_t*)0x20000031 = 0;
+  *(uint8_t*)0x20000032 = 0;
+  *(uint8_t*)0x20000033 = 0;
+  *(uint8_t*)0x20000034 = 0;
+  *(uint8_t*)0x20000035 = 0;
+  *(uint8_t*)0x20000036 = 0;
+  *(uint8_t*)0x20000037 = 0;
+  *(uint8_t*)0x20000038 = 0;
+  *(uint8_t*)0x20000039 = 0;
+  *(uint8_t*)0x2000003a = 0;
+  *(uint8_t*)0x2000003b = 0;
+  *(uint8_t*)0x2000003c = 0;
+  *(uint8_t*)0x2000003d = 0;
+  *(uint8_t*)0x2000003e = 0;
+  *(uint8_t*)0x2000003f = 0;
+  syz_io_uring_submit(r[1], r[2], 0x20000000, 0);
+  __sys_io_uring_enter(r[0], 0x20450c, 0, 0ul, 0ul);
+  *(uint32_t*)0x20000080 = 0x7ff;
+  *(uint32_t*)0x20000084 = 0x8b7;
+  *(uint32_t*)0x20000088 = 3;
+  *(uint32_t*)0x2000008c = 0x101;
+  *(uint8_t*)0x20000090 = 9;
+  memcpy((void*)0x20000091, "\xaf\x09\x01\xbc\xf9\xc6\xe4\x92\x86\x51\x7d\x7f"
+                            "\xbd\x43\x7d\x16\x69\x3e\x05",
+         19);
+  ioctl(r[3], 0x5404, 0x20000080ul);
+  return 0;
+}
diff --git a/test/eeed8b54e0df-test.c b/test/eeed8b54e0df-test.c
new file mode 100644
index 0000000..b6e27cc
--- /dev/null
+++ b/test/eeed8b54e0df-test.c
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: -EAGAIN handling
+ *
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+
+#include "helpers.h"
+#include "liburing.h"
+
+#define BLOCK	4096
+
+#ifndef RWF_NOWAIT
+#define RWF_NOWAIT	8
+#endif
+
+static int get_file_fd(void)
+{
+	ssize_t ret;
+	char *buf;
+	int fd;
+
+	fd = open("testfile", O_RDWR | O_CREAT, 0644);
+	if (fd < 0) {
+		perror("open file");
+		return -1;
+	}
+
+	buf = t_malloc(BLOCK);
+	ret = write(fd, buf, BLOCK);
+	if (ret != BLOCK) {
+		if (ret < 0)
+			perror("write");
+		else
+			printf("Short write\n");
+		goto err;
+	}
+	fsync(fd);
+
+	if (posix_fadvise(fd, 0, 4096, POSIX_FADV_DONTNEED)) {
+		perror("fadvise");
+err:
+		close(fd);
+		free(buf);
+		return -1;
+	}
+
+	free(buf);
+	return fd;
+}
+
+static void put_file_fd(int fd)
+{
+	close(fd);
+	unlink("testfile");
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring;
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	struct iovec iov;
+	int ret, fd;
+
+	if (argc > 1)
+		return 0;
+
+	iov.iov_base = t_malloc(4096);
+	iov.iov_len = 4096;
+
+	ret = io_uring_queue_init(2, &ring, 0);
+	if (ret) {
+		printf("ring setup failed\n");
+		return 1;
+
+	}
+
+	sqe = io_uring_get_sqe(&ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		return 1;
+	}
+
+	fd = get_file_fd();
+	if (fd < 0)
+		return 1;
+
+	io_uring_prep_readv(sqe, fd, &iov, 1, 0);
+	sqe->rw_flags = RWF_NOWAIT;
+
+	ret = io_uring_submit(&ring);
+	if (ret != 1) {
+		printf("Got submit %d, expected 1\n", ret);
+		goto err;
+	}
+
+	ret = io_uring_peek_cqe(&ring, &cqe);
+	if (ret) {
+		printf("Ring peek got %d\n", ret);
+		goto err;
+	}
+
+	if (cqe->res != -EAGAIN && cqe->res != 4096) {
+		printf("cqe error: %d\n", cqe->res);
+		goto err;
+	}
+
+	put_file_fd(fd);
+	return 0;
+err:
+	put_file_fd(fd);
+	return 1;
+}
diff --git a/test/eventfd-disable.c b/test/eventfd-disable.c
new file mode 100644
index 0000000..f172fd7
--- /dev/null
+++ b/test/eventfd-disable.c
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: test disable/enable notifications through eventfd
+ *
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+#include <sys/poll.h>
+#include <sys/eventfd.h>
+
+#include "liburing.h"
+
+int main(int argc, char *argv[])
+{
+	struct io_uring_params p = {};
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	struct io_uring ring;
+	uint64_t ptr;
+	struct iovec vec = {
+		.iov_base = &ptr,
+		.iov_len = sizeof(ptr)
+	};
+	int ret, evfd, i;
+
+	if (argc > 1)
+		return 0;
+
+	ret = io_uring_queue_init_params(64, &ring, &p);
+	if (ret) {
+		fprintf(stderr, "ring setup failed: %d\n", ret);
+		return 1;
+	}
+
+	evfd = eventfd(0, EFD_CLOEXEC);
+	if (evfd < 0) {
+		perror("eventfd");
+		return 1;
+	}
+
+	ret = io_uring_register_eventfd(&ring, evfd);
+	if (ret) {
+		fprintf(stderr, "failed to register evfd: %d\n", ret);
+		return 1;
+	}
+
+	if (!io_uring_cq_eventfd_enabled(&ring)) {
+		fprintf(stderr, "eventfd disabled\n");
+		return 1;
+	}
+
+	ret = io_uring_cq_eventfd_toggle(&ring, false);
+	if (ret) {
+		fprintf(stdout, "Skipping, CQ flags not available!\n");
+		return 0;
+	}
+
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_readv(sqe, evfd, &vec, 1, 0);
+	sqe->user_data = 1;
+
+	ret = io_uring_submit(&ring);
+	if (ret != 1) {
+		fprintf(stderr, "submit: %d\n", ret);
+		return 1;
+	}
+
+	for (i = 0; i < 63; i++) {
+		sqe = io_uring_get_sqe(&ring);
+		io_uring_prep_nop(sqe);
+		sqe->user_data = 2;
+	}
+
+	ret = io_uring_submit(&ring);
+	if (ret != 63) {
+		fprintf(stderr, "submit: %d\n", ret);
+		return 1;
+	}
+
+	for (i = 0; i < 63; i++) {
+		ret = io_uring_wait_cqe(&ring, &cqe);
+		if (ret) {
+			fprintf(stderr, "wait: %d\n", ret);
+			return 1;
+		}
+
+		switch (cqe->user_data) {
+		case 1: /* eventfd */
+			fprintf(stderr, "eventfd unexpected: %d\n", (int)ptr);
+			return 1;
+		case 2:
+			if (cqe->res) {
+				fprintf(stderr, "nop: %d\n", cqe->res);
+				return 1;
+			}
+			break;
+		}
+		io_uring_cqe_seen(&ring, cqe);
+	}
+
+	ret = io_uring_cq_eventfd_toggle(&ring, true);
+	if (ret) {
+		fprintf(stderr, "io_uring_cq_eventfd_toggle: %d\n", ret);
+		return 1;
+	}
+
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_nop(sqe);
+	sqe->user_data = 2;
+
+	ret = io_uring_submit(&ring);
+	if (ret != 1) {
+		fprintf(stderr, "submit: %d\n", ret);
+		return 1;
+	}
+
+	for (i = 0; i < 2; i++) {
+		ret = io_uring_wait_cqe(&ring, &cqe);
+		if (ret) {
+			fprintf(stderr, "wait: %d\n", ret);
+			return 1;
+		}
+
+		switch (cqe->user_data) {
+		case 1: /* eventfd */
+			if (cqe->res != sizeof(ptr)) {
+				fprintf(stderr, "read res: %d\n", cqe->res);
+				return 1;
+			}
+
+			if (ptr != 1) {
+				fprintf(stderr, "eventfd: %d\n", (int)ptr);
+				return 1;
+			}
+			break;
+		case 2:
+			if (cqe->res) {
+				fprintf(stderr, "nop: %d\n", cqe->res);
+				return 1;
+			}
+			break;
+		}
+		io_uring_cqe_seen(&ring, cqe);
+	}
+
+	return 0;
+}
diff --git a/test/eventfd-ring.c b/test/eventfd-ring.c
new file mode 100644
index 0000000..67e102c
--- /dev/null
+++ b/test/eventfd-ring.c
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: run various nop tests
+ *
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+#include <sys/poll.h>
+#include <sys/eventfd.h>
+
+#include "liburing.h"
+
+int main(int argc, char *argv[])
+{
+	struct io_uring_params p = {};
+	struct io_uring ring1, ring2;
+	struct io_uring_sqe *sqe;
+	int ret, evfd1, evfd2;
+
+	if (argc > 1)
+		return 0;
+
+	ret = io_uring_queue_init_params(8, &ring1, &p);
+	if (ret) {
+		fprintf(stderr, "ring setup failed: %d\n", ret);
+		return 1;
+	}
+	if (!(p.features & IORING_FEAT_CUR_PERSONALITY)) {
+		fprintf(stdout, "Skipping\n");
+		return 0;
+	}
+	ret = io_uring_queue_init(8, &ring2, 0);
+	if (ret) {
+		fprintf(stderr, "ring setup failed: %d\n", ret);
+		return 1;
+	}
+
+	evfd1 = eventfd(0, EFD_CLOEXEC);
+	if (evfd1 < 0) {
+		perror("eventfd");
+		return 1;
+	}
+
+	evfd2 = eventfd(0, EFD_CLOEXEC);
+	if (evfd2 < 0) {
+		perror("eventfd");
+		return 1;
+	}
+
+	ret = io_uring_register_eventfd(&ring1, evfd1);
+	if (ret) {
+		fprintf(stderr, "failed to register evfd: %d\n", ret);
+		return 1;
+	}
+
+	ret = io_uring_register_eventfd(&ring2, evfd2);
+	if (ret) {
+		fprintf(stderr, "failed to register evfd: %d\n", ret);
+		return 1;
+	}
+
+	sqe = io_uring_get_sqe(&ring1);
+	io_uring_prep_poll_add(sqe, evfd2, POLLIN);
+	sqe->user_data = 1;
+
+	sqe = io_uring_get_sqe(&ring2);
+	io_uring_prep_poll_add(sqe, evfd1, POLLIN);
+	sqe->user_data = 1;
+
+	ret = io_uring_submit(&ring1);
+	if (ret != 1) {
+		fprintf(stderr, "submit: %d\n", ret);
+		return 1;
+	}
+
+	ret = io_uring_submit(&ring2);
+	if (ret != 1) {
+		fprintf(stderr, "submit: %d\n", ret);
+		return 1;
+	}
+
+	sqe = io_uring_get_sqe(&ring1);
+	io_uring_prep_nop(sqe);
+	sqe->user_data = 3;
+
+	ret = io_uring_submit(&ring1);
+	if (ret != 1) {
+		fprintf(stderr, "submit: %d\n", ret);
+		return 1;
+	}
+
+	return 0;
+}
diff --git a/test/eventfd.c b/test/eventfd.c
new file mode 100644
index 0000000..1a7e3f3
--- /dev/null
+++ b/test/eventfd.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: run various nop tests
+ *
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+#include <sys/poll.h>
+#include <sys/eventfd.h>
+
+#include "liburing.h"
+
+int main(int argc, char *argv[])
+{
+	struct io_uring_params p = {};
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	struct io_uring ring;
+	uint64_t ptr;
+	struct iovec vec = {
+		.iov_base = &ptr,
+		.iov_len = sizeof(ptr)
+	};
+	int ret, evfd, i;
+
+	if (argc > 1)
+		return 0;
+
+	ret = io_uring_queue_init_params(8, &ring, &p);
+	if (ret) {
+		fprintf(stderr, "ring setup failed: %d\n", ret);
+		return 1;
+	}
+	if (!(p.features & IORING_FEAT_CUR_PERSONALITY)) {
+		fprintf(stdout, "Skipping\n");
+		return 0;
+	}
+
+	evfd = eventfd(0, EFD_CLOEXEC);
+	if (evfd < 0) {
+		perror("eventfd");
+		return 1;
+	}
+
+	ret = io_uring_register_eventfd(&ring, evfd);
+	if (ret) {
+		fprintf(stderr, "failed to register evfd: %d\n", ret);
+		return 1;
+	}
+
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_poll_add(sqe, evfd, POLLIN);
+	sqe->flags |= IOSQE_IO_LINK;
+	sqe->user_data = 1;
+
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_readv(sqe, evfd, &vec, 1, 0);
+	sqe->flags |= IOSQE_IO_LINK;
+	sqe->user_data = 2;
+
+	ret = io_uring_submit(&ring);
+	if (ret != 2) {
+		fprintf(stderr, "submit: %d\n", ret);
+		return 1;
+	}
+
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_nop(sqe);
+	sqe->user_data = 3;
+
+	ret = io_uring_submit(&ring);
+	if (ret != 1) {
+		fprintf(stderr, "submit: %d\n", ret);
+		return 1;
+	}
+
+	for (i = 0; i < 3; i++) {
+		ret = io_uring_wait_cqe(&ring, &cqe);
+		if (ret) {
+			fprintf(stderr, "wait: %d\n", ret);
+			return 1;
+		}
+		switch (cqe->user_data) {
+		case 1:
+			/* POLLIN */
+			if (cqe->res != 1) {
+				fprintf(stderr, "poll: %d\n", cqe->res);
+				return 1;
+			}
+			break;
+		case 2:
+			if (cqe->res != sizeof(ptr)) {
+				fprintf(stderr, "read: %d\n", cqe->res);
+				return 1;
+			}
+			break;
+		case 3:
+			if (cqe->res) {
+				fprintf(stderr, "nop: %d\n", cqe->res);
+				return 1;
+			}
+			break;
+		}
+		io_uring_cqe_seen(&ring, cqe);
+	}
+
+	return 0;
+}
diff --git a/test/fadvise.c b/test/fadvise.c
new file mode 100644
index 0000000..b6d4462
--- /dev/null
+++ b/test/fadvise.c
@@ -0,0 +1,200 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: basic fadvise test
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <sys/time.h>
+
+#include "helpers.h"
+#include "liburing.h"
+
+#define FILE_SIZE	(128 * 1024)
+#define LOOPS		100
+#define MIN_LOOPS	10
+
+static unsigned long long utime_since(const struct timeval *s,
+				      const struct timeval *e)
+{
+	long long sec, usec;
+
+	sec = e->tv_sec - s->tv_sec;
+	usec = (e->tv_usec - s->tv_usec);
+	if (sec > 0 && usec < 0) {
+		sec--;
+		usec += 1000000;
+	}
+
+	sec *= 1000000;
+	return sec + usec;
+}
+
+static unsigned long long utime_since_now(struct timeval *tv)
+{
+	struct timeval end;
+
+	gettimeofday(&end, NULL);
+	return utime_since(tv, &end);
+}
+
+static int do_fadvise(struct io_uring *ring, int fd, off_t offset, off_t len,
+		      int advice)
+{
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	int ret;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "failed to get sqe\n");
+		return 1;
+	}
+
+	io_uring_prep_fadvise(sqe, fd, offset, len, advice);
+	sqe->user_data = advice;
+	ret = io_uring_submit_and_wait(ring, 1);
+	if (ret != 1) {
+		fprintf(stderr, "submit: %d\n", ret);
+		return 1;
+	}
+
+	ret = io_uring_wait_cqe(ring, &cqe);
+	if (ret) {
+		fprintf(stderr, "wait: %d\n", ret);
+		return 1;
+	}
+
+	ret = cqe->res;
+	if (ret == -EINVAL || ret == -EBADF) {
+		fprintf(stdout, "Fadvise not supported, skipping\n");
+		unlink(".fadvise.tmp");
+		exit(0);
+	} else if (ret) {
+		fprintf(stderr, "cqe->res=%d\n", cqe->res);
+	}
+	io_uring_cqe_seen(ring, cqe);
+	return ret;
+}
+
+static long do_read(int fd, char *buf)
+{
+	struct timeval tv;
+	int ret;
+	long t;
+
+	ret = lseek(fd, 0, SEEK_SET);
+	if (ret) {
+		perror("lseek");
+		return -1;
+	}
+	
+	gettimeofday(&tv, NULL);
+	ret = read(fd, buf, FILE_SIZE);
+	t = utime_since_now(&tv);
+	if (ret < 0) {
+		perror("read");
+		return -1;
+	} else if (ret != FILE_SIZE) {
+		fprintf(stderr, "short read1: %d\n", ret);
+		return -1;
+	}
+
+	return t;
+}
+
+static int test_fadvise(struct io_uring *ring, const char *filename)
+{
+	unsigned long cached_read, uncached_read, cached_read2;
+	int fd, ret;
+	char *buf;
+
+	fd = open(filename, O_RDONLY);
+	if (fd < 0) {
+		perror("open");
+		return 1;
+	}
+
+	buf = t_malloc(FILE_SIZE);
+
+	cached_read = do_read(fd, buf);
+	if (cached_read == -1)
+		return 1;
+
+	ret = do_fadvise(ring, fd, 0, FILE_SIZE, POSIX_FADV_DONTNEED);
+	if (ret)
+		return 1;
+
+	uncached_read = do_read(fd, buf);
+	if (uncached_read == -1)
+		return 1;
+
+	ret = do_fadvise(ring, fd, 0, FILE_SIZE, POSIX_FADV_DONTNEED);
+	if (ret)
+		return 1;
+
+	ret = do_fadvise(ring, fd, 0, FILE_SIZE, POSIX_FADV_WILLNEED);
+	if (ret)
+		return 1;
+
+	fsync(fd);
+
+	cached_read2 = do_read(fd, buf);
+	if (cached_read2 == -1)
+		return 1;
+
+	if (cached_read < uncached_read &&
+	    cached_read2 < uncached_read)
+		return 0;
+
+	return 2;
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring;
+	int ret, i, good, bad;
+	char *fname;
+
+	if (argc > 1) {
+		fname = argv[1];
+	} else {
+		fname = ".fadvise.tmp";
+		t_create_file(fname, FILE_SIZE);
+	}
+	if (io_uring_queue_init(8, &ring, 0)) {
+		fprintf(stderr, "ring creation failed\n");
+		goto err;
+	}
+
+	good = bad = 0;
+	for (i = 0; i < LOOPS; i++) {
+		ret = test_fadvise(&ring, fname);
+		if (ret == 1) {
+			fprintf(stderr, "read_fadvise failed\n");
+			goto err;
+		} else if (!ret)
+			good++;
+		else if (ret == 2)
+			bad++;
+		if (i >= MIN_LOOPS && !bad)
+			break;
+	}
+	if (bad > good) {
+		fprintf(stderr, "Suspicious timings\n");
+		goto err;
+	}
+
+	if (fname != argv[1])
+		unlink(fname);
+	io_uring_queue_exit(&ring);
+	return 0;
+err:
+	if (fname != argv[1])
+		unlink(fname);
+	return 1;
+}
diff --git a/test/fallocate.c b/test/fallocate.c
new file mode 100644
index 0000000..da90be8
--- /dev/null
+++ b/test/fallocate.c
@@ -0,0 +1,252 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: test io_uring fallocate
+ *
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/resource.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+
+#include "liburing.h"
+
+static int no_fallocate;
+
+static int test_fallocate_rlimit(struct io_uring *ring)
+{
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	struct rlimit rlim;
+	char buf[32];
+	int fd, ret;
+
+	if (getrlimit(RLIMIT_FSIZE, &rlim) < 0) {
+		perror("getrlimit");
+		return 1;
+	}
+	rlim.rlim_cur = 64 * 1024;
+	rlim.rlim_max = 64 * 1024;
+	if (setrlimit(RLIMIT_FSIZE, &rlim) < 0) {
+		perror("setrlimit");
+		return 1;
+	}
+
+	sprintf(buf, "./XXXXXX");
+	fd = mkstemp(buf);
+	if (fd < 0) {
+		perror("open");
+		return 1;
+	}
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "get sqe failed\n");
+		goto err;
+	}
+	io_uring_prep_fallocate(sqe, fd, 0, 0, 128*1024);
+
+	ret = io_uring_submit(ring);
+	if (ret <= 0) {
+		fprintf(stderr, "sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	ret = io_uring_wait_cqe(ring, &cqe);
+	if (ret < 0) {
+		fprintf(stderr, "wait completion %d\n", ret);
+		goto err;
+	}
+
+	if (cqe->res == -EINVAL) {
+		fprintf(stdout, "Fallocate not supported, skipping\n");
+		no_fallocate = 1;
+		goto out;
+	} else if (cqe->res != -EFBIG) {
+		fprintf(stderr, "Expected -EFBIG: %d\n", cqe->res);
+		goto err;
+	}
+	io_uring_cqe_seen(ring, cqe);
+out:
+	unlink(buf);
+	return 0;
+err:
+	unlink(buf);
+	return 1;
+}
+
+static int test_fallocate(struct io_uring *ring)
+{
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	struct stat st;
+	char buf[32];
+	int fd, ret;
+
+	sprintf(buf, "./XXXXXX");
+	fd = mkstemp(buf);
+	if (fd < 0) {
+		perror("open");
+		return 1;
+	}
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "get sqe failed\n");
+		goto err;
+	}
+	io_uring_prep_fallocate(sqe, fd, 0, 0, 128*1024);
+
+	ret = io_uring_submit(ring);
+	if (ret <= 0) {
+		fprintf(stderr, "sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	ret = io_uring_wait_cqe(ring, &cqe);
+	if (ret < 0) {
+		fprintf(stderr, "wait completion %d\n", ret);
+		goto err;
+	}
+
+	if (cqe->res == -EINVAL) {
+		fprintf(stdout, "Fallocate not supported, skipping\n");
+		no_fallocate = 1;
+		goto out;
+	}
+	if (cqe->res) {
+		fprintf(stderr, "cqe->res=%d\n", cqe->res);
+		goto err;
+	}
+	io_uring_cqe_seen(ring, cqe);
+
+	if (fstat(fd, &st) < 0) {
+		perror("stat");
+		goto err;
+	}
+
+	if (st.st_size != 128*1024) {
+		fprintf(stderr, "Size mismatch: %llu\n",
+					(unsigned long long) st.st_size);
+		goto err;
+	}
+
+out:
+	unlink(buf);
+	return 0;
+err:
+	unlink(buf);
+	return 1;
+}
+
+static int test_fallocate_fsync(struct io_uring *ring)
+{
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	struct stat st;
+	char buf[32];
+	int fd, ret, i;
+
+	if (no_fallocate)
+		return 0;
+
+	sprintf(buf, "./XXXXXX");
+	fd = mkstemp(buf);
+	if (fd < 0) {
+		perror("open");
+		return 1;
+	}
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "get sqe failed\n");
+		goto err;
+	}
+	io_uring_prep_fallocate(sqe, fd, 0, 0, 128*1024);
+	sqe->flags |= IOSQE_IO_LINK;
+	sqe->user_data = 1;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "get sqe failed\n");
+		goto err;
+	}
+	io_uring_prep_fsync(sqe, fd, 0);
+	sqe->user_data = 2;
+
+	ret = io_uring_submit(ring);
+	if (ret <= 0) {
+		fprintf(stderr, "sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	for (i = 0; i < 2; i++) {
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret < 0) {
+			fprintf(stderr, "wait completion %d\n", ret);
+			goto err;
+		}
+		if (cqe->res) {
+			fprintf(stderr, "cqe->res=%d,data=%" PRIu64 "\n", cqe->res,
+							(uint64_t) cqe->user_data);
+			goto err;
+		}
+		io_uring_cqe_seen(ring, cqe);
+	}
+
+	if (fstat(fd, &st) < 0) {
+		perror("stat");
+		goto err;
+	}
+
+	if (st.st_size != 128*1024) {
+		fprintf(stderr, "Size mismatch: %llu\n",
+					(unsigned long long) st.st_size);
+		goto err;
+	}
+
+	unlink(buf);
+	return 0;
+err:
+	unlink(buf);
+	return 1;
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring;
+	int ret;
+
+	if (argc > 1)
+		return 0;
+
+	ret = io_uring_queue_init(8, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "ring setup failed\n");
+		return 1;
+	}
+
+	ret = test_fallocate(&ring);
+	if (ret) {
+		fprintf(stderr, "test_fallocate failed\n");
+		return ret;
+	}
+
+	ret = test_fallocate_fsync(&ring);
+	if (ret) {
+		fprintf(stderr, "test_fallocate_fsync failed\n");
+		return ret;
+	}
+
+	ret = test_fallocate_rlimit(&ring);
+	if (ret) {
+		fprintf(stderr, "test_fallocate_rlimit failed\n");
+		return ret;
+	}
+
+	return 0;
+}
diff --git a/test/fc2a85cb02ef-test.c b/test/fc2a85cb02ef-test.c
new file mode 100644
index 0000000..35addf5
--- /dev/null
+++ b/test/fc2a85cb02ef-test.c
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: MIT */
+// https://syzkaller.appspot.com/bug?id=1f2ecd7a23dba87e5ca3505ec44514a462cfe8c0
+// autogenerated by syzkaller (https://github.com/google/syzkaller)
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdarg.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <unistd.h>
+
+#include "liburing.h"
+#include "../src/syscall.h"
+
+static bool write_file(const char* file, const char* what, ...)
+{
+  char buf[1024];
+  va_list args;
+  va_start(args, what);
+  vsnprintf(buf, sizeof(buf), what, args);
+  va_end(args);
+  buf[sizeof(buf) - 1] = 0;
+  int len = strlen(buf);
+  int fd = open(file, O_WRONLY | O_CLOEXEC);
+  if (fd == -1)
+    return false;
+  if (write(fd, buf, len) != len) {
+    int err = errno;
+    close(fd);
+    errno = err;
+    return false;
+  }
+  close(fd);
+  return true;
+}
+
+static int inject_fault(int nth)
+{
+  int fd;
+  fd = open("/proc/thread-self/fail-nth", O_RDWR);
+  if (fd == -1)
+    exit(1);
+  char buf[16];
+  sprintf(buf, "%d", nth + 1);
+  if (write(fd, buf, strlen(buf)) != (ssize_t)strlen(buf))
+    exit(1);
+  return fd;
+}
+
+static int setup_fault()
+{
+  static struct {
+    const char* file;
+    const char* val;
+    bool fatal;
+  } files[] = {
+      {"/sys/kernel/debug/failslab/ignore-gfp-wait", "N", true},
+      {"/sys/kernel/debug/failslab/verbose", "0", false},
+      {"/sys/kernel/debug/fail_futex/ignore-private", "N", false},
+      {"/sys/kernel/debug/fail_page_alloc/verbose", "0", false},
+      {"/sys/kernel/debug/fail_page_alloc/ignore-gfp-highmem", "N", false},
+      {"/sys/kernel/debug/fail_page_alloc/ignore-gfp-wait", "N", false},
+      {"/sys/kernel/debug/fail_page_alloc/min-order", "0", false},
+  };
+  unsigned i;
+  for (i = 0; i < sizeof(files) / sizeof(files[0]); i++) {
+    if (!write_file(files[i].file, files[i].val)) {
+      if (files[i].fatal)
+	return 1;
+    }
+  }
+  return 0;
+}
+
+#ifndef __NR_io_uring_register
+#define __NR_io_uring_register 427
+#endif
+#ifndef __NR_io_uring_setup
+#define __NR_io_uring_setup 425
+#endif
+
+uint64_t r[2] = {0xffffffffffffffff, 0xffffffffffffffff};
+
+int main(int argc, char *argv[])
+{
+  if (argc > 1)
+    return 0;
+  mmap((void *) 0x20000000ul, 0x1000000ul, 3ul, 0x32ul, -1, 0);
+  if (setup_fault()) {
+    printf("Test needs failslab/fail_futex/fail_page_alloc enabled, skipped\n");
+    return 0;
+  }
+  intptr_t res = 0;
+  *(uint32_t*)0x20000000 = 0;
+  *(uint32_t*)0x20000004 = 0;
+  *(uint32_t*)0x20000008 = 0;
+  *(uint32_t*)0x2000000c = 0;
+  *(uint32_t*)0x20000010 = 0;
+  *(uint32_t*)0x20000014 = 0;
+  *(uint32_t*)0x20000018 = 0;
+  *(uint32_t*)0x2000001c = 0;
+  *(uint32_t*)0x20000020 = 0;
+  *(uint32_t*)0x20000024 = 0;
+  *(uint32_t*)0x20000028 = 0;
+  *(uint32_t*)0x2000002c = 0;
+  *(uint32_t*)0x20000030 = 0;
+  *(uint32_t*)0x20000034 = 0;
+  *(uint32_t*)0x20000038 = 0;
+  *(uint32_t*)0x2000003c = 0;
+  *(uint32_t*)0x20000040 = 0;
+  *(uint32_t*)0x20000044 = 0;
+  *(uint64_t*)0x20000048 = 0;
+  *(uint32_t*)0x20000050 = 0;
+  *(uint32_t*)0x20000054 = 0;
+  *(uint32_t*)0x20000058 = 0;
+  *(uint32_t*)0x2000005c = 0;
+  *(uint32_t*)0x20000060 = 0;
+  *(uint32_t*)0x20000064 = 0;
+  *(uint32_t*)0x20000068 = 0;
+  *(uint32_t*)0x2000006c = 0;
+  *(uint64_t*)0x20000070 = 0;
+  res = __sys_io_uring_setup(0x6a6, (struct io_uring_params *) 0x20000000ul);
+  if (res != -1)
+    r[0] = res;
+  res = socket(0x11ul, 2ul, 0x300ul);
+  if (res != -1)
+    r[1] = res;
+  *(uint32_t*)0x20000080 = r[1];
+  inject_fault(1);
+  __sys_io_uring_register(r[0], 2ul, (const void *) 0x20000080ul, 1ul);
+  return 0;
+}
diff --git a/test/file-register.c b/test/file-register.c
new file mode 100644
index 0000000..c5c5507
--- /dev/null
+++ b/test/file-register.c
@@ -0,0 +1,842 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: run various file registration tests
+ *
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+
+#include "helpers.h"
+#include "liburing.h"
+
+static int no_update = 0;
+
+static void close_files(int *files, int nr_files, int add)
+{
+	char fname[32];
+	int i;
+
+	for (i = 0; i < nr_files; i++) {
+		if (files)
+			close(files[i]);
+		if (!add)
+			sprintf(fname, ".reg.%d", i);
+		else
+			sprintf(fname, ".add.%d", i + add);
+		unlink(fname);
+	}
+	if (files)
+		free(files);
+}
+
+static int *open_files(int nr_files, int extra, int add)
+{
+	char fname[32];
+	int *files;
+	int i;
+
+	files = t_calloc(nr_files + extra, sizeof(int));
+
+	for (i = 0; i < nr_files; i++) {
+		if (!add)
+			sprintf(fname, ".reg.%d", i);
+		else
+			sprintf(fname, ".add.%d", i + add);
+		files[i] = open(fname, O_RDWR | O_CREAT, 0644);
+		if (files[i] < 0) {
+			perror("open");
+			free(files);
+			files = NULL;
+			break;
+		}
+	}
+	if (extra) {
+		for (i = nr_files; i < nr_files + extra; i++)
+			files[i] = -1;
+	}
+
+	return files;
+}
+
+static int test_shrink(struct io_uring *ring)
+{
+	int ret, off, fd;
+	int *files;
+
+	files = open_files(50, 0, 0);
+	ret = io_uring_register_files(ring, files, 50);
+	if (ret) {
+		fprintf(stderr, "%s: register ret=%d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	off = 0;
+	do {
+		fd = -1;
+		ret = io_uring_register_files_update(ring, off, &fd, 1);
+		if (ret != 1) {
+			if (off == 50 && ret == -EINVAL)
+				break;
+			fprintf(stderr, "%s: update ret=%d\n", __FUNCTION__, ret);
+			break;
+		}
+		off++;
+	} while (1);
+
+	ret = io_uring_unregister_files(ring);
+	if (ret) {
+		fprintf(stderr, "%s: unregister ret=%d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	close_files(files, 50, 0);
+	return 0;
+err:
+	close_files(files, 50, 0);
+	return 1;
+}
+
+
+static int test_grow(struct io_uring *ring)
+{
+	int ret, off;
+	int *files, *fds = NULL;
+
+	files = open_files(50, 250, 0);
+	ret = io_uring_register_files(ring, files, 300);
+	if (ret) {
+		fprintf(stderr, "%s: register ret=%d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	off = 50;
+	do {
+		fds = open_files(1, 0, off);
+		ret = io_uring_register_files_update(ring, off, fds, 1);
+		if (ret != 1) {
+			if (off == 300 && ret == -EINVAL)
+				break;
+			fprintf(stderr, "%s: update ret=%d\n", __FUNCTION__, ret);
+			break;
+		}
+		if (off >= 300) {
+			fprintf(stderr, "%s: Succeeded beyond end-of-list?\n", __FUNCTION__);
+			goto err;
+		}
+		off++;
+	} while (1);
+
+	ret = io_uring_unregister_files(ring);
+	if (ret) {
+		fprintf(stderr, "%s: unregister ret=%d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	close_files(files, 100, 0);
+	close_files(NULL, 251, 50);
+	return 0;
+err:
+	close_files(files, 100, 0);
+	close_files(NULL, 251, 50);
+	return 1;
+}
+
+static int test_replace_all(struct io_uring *ring)
+{
+	int *files, *fds = NULL;
+	int ret, i;
+
+	files = open_files(100, 0, 0);
+	ret = io_uring_register_files(ring, files, 100);
+	if (ret) {
+		fprintf(stderr, "%s: register ret=%d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	fds = t_malloc(100 * sizeof(int));
+	for (i = 0; i < 100; i++)
+		fds[i] = -1;
+
+	ret = io_uring_register_files_update(ring, 0, fds, 100);
+	if (ret != 100) {
+		fprintf(stderr, "%s: update ret=%d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	ret = io_uring_unregister_files(ring);
+	if (ret) {
+		fprintf(stderr, "%s: unregister ret=%d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	close_files(files, 100, 0);
+	if (fds)
+		free(fds);
+	return 0;
+err:
+	close_files(files, 100, 0);
+	if (fds)
+		free(fds);
+	return 1;
+}
+
+static int test_replace(struct io_uring *ring)
+{
+	int *files, *fds = NULL;
+	int ret;
+
+	files = open_files(100, 0, 0);
+	ret = io_uring_register_files(ring, files, 100);
+	if (ret) {
+		fprintf(stderr, "%s: register ret=%d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	fds = open_files(10, 0, 1);
+	ret = io_uring_register_files_update(ring, 90, fds, 10);
+	if (ret != 10) {
+		fprintf(stderr, "%s: update ret=%d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	ret = io_uring_unregister_files(ring);
+	if (ret) {
+		fprintf(stderr, "%s: unregister ret=%d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	close_files(files, 100, 0);
+	if (fds)
+		close_files(fds, 10, 1);
+	return 0;
+err:
+	close_files(files, 100, 0);
+	if (fds)
+		close_files(fds, 10, 1);
+	return 1;
+}
+
+static int test_removals(struct io_uring *ring)
+{
+	int *files, *fds = NULL;
+	int ret, i;
+
+	files = open_files(100, 0, 0);
+	ret = io_uring_register_files(ring, files, 100);
+	if (ret) {
+		fprintf(stderr, "%s: register ret=%d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	fds = t_calloc(10, sizeof(int));
+	for (i = 0; i < 10; i++)
+		fds[i] = -1;
+
+	ret = io_uring_register_files_update(ring, 50, fds, 10);
+	if (ret != 10) {
+		fprintf(stderr, "%s: update ret=%d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	ret = io_uring_unregister_files(ring);
+	if (ret) {
+		fprintf(stderr, "%s: unregister ret=%d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	close_files(files, 100, 0);
+	if (fds)
+		free(fds);
+	return 0;
+err:
+	close_files(files, 100, 0);
+	if (fds)
+		free(fds);
+	return 1;
+}
+
+static int test_additions(struct io_uring *ring)
+{
+	int *files, *fds = NULL;
+	int ret;
+
+	files = open_files(100, 100, 0);
+	ret = io_uring_register_files(ring, files, 200);
+	if (ret) {
+		fprintf(stderr, "%s: register ret=%d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	fds = open_files(2, 0, 1);
+	ret = io_uring_register_files_update(ring, 100, fds, 2);
+	if (ret != 2) {
+		fprintf(stderr, "%s: update ret=%d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	ret = io_uring_unregister_files(ring);
+	if (ret) {
+		fprintf(stderr, "%s: unregister ret=%d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	close_files(files, 100, 0);
+	if (fds)
+		close_files(fds, 2, 1);
+	return 0;
+err:
+	close_files(files, 100, 0);
+	if (fds)
+		close_files(fds, 2, 1);
+	return 1;
+}
+
+static int test_sparse(struct io_uring *ring)
+{
+	int *files;
+	int ret;
+
+	files = open_files(100, 100, 0);
+	ret = io_uring_register_files(ring, files, 200);
+	if (ret) {
+		if (ret == -EBADF) {
+			fprintf(stdout, "Sparse files not supported\n");
+			no_update = 1;
+			goto done;
+		}
+		fprintf(stderr, "%s: register ret=%d\n", __FUNCTION__, ret);
+		goto err;
+	}
+	ret = io_uring_unregister_files(ring);
+	if (ret) {
+		fprintf(stderr, "%s: unregister ret=%d\n", __FUNCTION__, ret);
+		goto err;
+	}
+done:
+	close_files(files, 100, 0);
+	return 0;
+err:
+	close_files(files, 100, 0);
+	return 1;
+}
+
+static int test_basic_many(struct io_uring *ring)
+{
+	int *files;
+	int ret;
+
+	files = open_files(768, 0, 0);
+	ret = io_uring_register_files(ring, files, 768);
+	if (ret) {
+		fprintf(stderr, "%s: register %d\n", __FUNCTION__, ret);
+		goto err;
+	}
+	ret = io_uring_unregister_files(ring);
+	if (ret) {
+		fprintf(stderr, "%s: unregister %d\n", __FUNCTION__, ret);
+		goto err;
+	}
+	close_files(files, 768, 0);
+	return 0;
+err:
+	close_files(files, 768, 0);
+	return 1;
+}
+
+static int test_basic(struct io_uring *ring, int fail)
+{
+	int *files;
+	int ret;
+
+	files = open_files(fail ? 10 : 100, 0, 0);
+	ret = io_uring_register_files(ring, files, 100);
+	if (ret) {
+		if (fail) {
+			if (ret == -EBADF || ret == -EFAULT)
+				return 0;
+		}
+		fprintf(stderr, "%s: register %d\n", __FUNCTION__, ret);
+		goto err;
+	}
+	if (fail) {
+		fprintf(stderr, "Registration succeeded, but expected fail\n");
+		goto err;
+	}
+	ret = io_uring_unregister_files(ring);
+	if (ret) {
+		fprintf(stderr, "%s: unregister %d\n", __FUNCTION__, ret);
+		goto err;
+	}
+	close_files(files, 100, 0);
+	return 0;
+err:
+	close_files(files, 100, 0);
+	return 1;
+}
+
+/*
+ * Register 0 files, but reserve space for 10.  Then add one file.
+ */
+static int test_zero(struct io_uring *ring)
+{
+	int *files, *fds = NULL;
+	int ret;
+
+	files = open_files(0, 10, 0);
+	ret = io_uring_register_files(ring, files, 10);
+	if (ret) {
+		fprintf(stderr, "%s: register ret=%d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	fds = open_files(1, 0, 1);
+	ret = io_uring_register_files_update(ring, 0, fds, 1);
+	if (ret != 1) {
+		fprintf(stderr, "%s: update ret=%d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	ret = io_uring_unregister_files(ring);
+	if (ret) {
+		fprintf(stderr, "%s: unregister ret=%d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	if (fds)
+		close_files(fds, 1, 1);
+	free(files);
+	return 0;
+err:
+	if (fds)
+		close_files(fds, 1, 1);
+	free(files);
+	return 1;
+}
+
+static int test_fixed_read_write(struct io_uring *ring, int index)
+{
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	struct iovec iov[2];
+	int ret;
+
+	iov[0].iov_base = t_malloc(4096);
+	iov[0].iov_len = 4096;
+	memset(iov[0].iov_base, 0x5a, 4096);
+
+	iov[1].iov_base = t_malloc(4096);
+	iov[1].iov_len = 4096;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "%s: failed to get sqe\n", __FUNCTION__);
+		return 1;
+	}
+	io_uring_prep_writev(sqe, index, &iov[0], 1, 0);
+	sqe->flags |= IOSQE_FIXED_FILE;
+	sqe->user_data = 1;
+
+	ret = io_uring_submit(ring);
+	if (ret != 1) {
+		fprintf(stderr, "%s: got %d, wanted 1\n", __FUNCTION__, ret);
+		return 1;
+	}
+
+	ret = io_uring_wait_cqe(ring, &cqe);
+	if (ret < 0) {
+		fprintf(stderr, "%s: io_uring_wait_cqe=%d\n", __FUNCTION__, ret);
+		return 1;
+	}
+	if (cqe->res != 4096) {
+		fprintf(stderr, "%s: write cqe->res=%d\n", __FUNCTION__, cqe->res);
+		return 1;
+	}
+	io_uring_cqe_seen(ring, cqe);
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "%s: failed to get sqe\n", __FUNCTION__);
+		return 1;
+	}
+	io_uring_prep_readv(sqe, index, &iov[1], 1, 0);
+	sqe->flags |= IOSQE_FIXED_FILE;
+	sqe->user_data = 2;
+
+	ret = io_uring_submit(ring);
+	if (ret != 1) {
+		fprintf(stderr, "%s: got %d, wanted 1\n", __FUNCTION__, ret);
+		return 1;
+	}
+
+	ret = io_uring_wait_cqe(ring, &cqe);
+	if (ret < 0) {
+		fprintf(stderr, "%s: io_uring_wait_cqe=%d\n", __FUNCTION__, ret);
+		return 1;
+	}
+	if (cqe->res != 4096) {
+		fprintf(stderr, "%s: read cqe->res=%d\n", __FUNCTION__, cqe->res);
+		return 1;
+	}
+	io_uring_cqe_seen(ring, cqe);
+
+	if (memcmp(iov[1].iov_base, iov[0].iov_base, 4096)) {
+		fprintf(stderr, "%s: data mismatch\n", __FUNCTION__);
+		return 1;
+	}
+
+	free(iov[0].iov_base);
+	free(iov[1].iov_base);
+	return 0;
+}
+
+/*
+ * Register 8K of sparse files, update one at a random spot, then do some
+ * file IO to verify it works.
+ */
+static int test_huge(struct io_uring *ring)
+{
+	int *files;
+	int ret;
+
+	files = open_files(0, 8192, 0);
+	ret = io_uring_register_files(ring, files, 8192);
+	if (ret) {
+		/* huge sets not supported */
+		if (ret == -EMFILE) {
+			fprintf(stdout, "%s: No huge file set support, skipping\n", __FUNCTION__);
+			goto out;
+		}
+		fprintf(stderr, "%s: register ret=%d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	files[7193] = open(".reg.7193", O_RDWR | O_CREAT, 0644);
+	if (files[7193] < 0) {
+		fprintf(stderr, "%s: open=%d\n", __FUNCTION__, errno);
+		goto err;
+	}
+
+	ret = io_uring_register_files_update(ring, 7193, &files[7193], 1);
+	if (ret != 1) {
+		fprintf(stderr, "%s: update ret=%d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	if (test_fixed_read_write(ring, 7193))
+		goto err;
+
+	ret = io_uring_unregister_files(ring);
+	if (ret) {
+		fprintf(stderr, "%s: unregister ret=%d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	if (files[7193] != -1) {
+		close(files[7193]);
+		unlink(".reg.7193");
+	}
+out:
+	free(files);
+	return 0;
+err:
+	if (files[7193] != -1) {
+		close(files[7193]);
+		unlink(".reg.7193");
+	}
+	free(files);
+	return 1;
+}
+
+static int test_skip(struct io_uring *ring)
+{
+	int *files;
+	int ret;
+
+	files = open_files(100, 0, 0);
+	ret = io_uring_register_files(ring, files, 100);
+	if (ret) {
+		fprintf(stderr, "%s: register ret=%d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	files[90] = IORING_REGISTER_FILES_SKIP;
+	ret = io_uring_register_files_update(ring, 90, &files[90], 1);
+	if (ret != 1) {
+		if (ret == -EBADF) {
+			fprintf(stdout, "Skipping files not supported\n");
+			goto done;
+		}
+		fprintf(stderr, "%s: update ret=%d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	/* verify can still use file index 90 */
+	if (test_fixed_read_write(ring, 90))
+		goto err;
+
+	ret = io_uring_unregister_files(ring);
+	if (ret) {
+		fprintf(stderr, "%s: unregister ret=%d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+done:
+	close_files(files, 100, 0);
+	return 0;
+err:
+	close_files(files, 100, 0);
+	return 1;
+}
+
+static int test_sparse_updates(void)
+{
+	struct io_uring ring;
+	int ret, i, *fds, newfd;
+
+	ret = io_uring_queue_init(8, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "queue_init: %d\n", ret);
+		return ret;
+	}
+
+	fds = t_malloc(256 * sizeof(int));
+	for (i = 0; i < 256; i++)
+		fds[i] = -1;
+
+	ret = io_uring_register_files(&ring, fds, 256);
+	if (ret) {
+		fprintf(stderr, "file_register: %d\n", ret);
+		return ret;
+	}
+
+	newfd = 1;
+	for (i = 0; i < 256; i++) {
+		ret = io_uring_register_files_update(&ring, i, &newfd, 1);
+		if (ret != 1) {
+			fprintf(stderr, "file_update: %d\n", ret);
+			return ret;
+		}
+	}
+	io_uring_unregister_files(&ring);
+
+	for (i = 0; i < 256; i++)
+		fds[i] = 1;
+
+	ret = io_uring_register_files(&ring, fds, 256);
+	if (ret) {
+		fprintf(stderr, "file_register: %d\n", ret);
+		return ret;
+	}
+
+	newfd = -1;
+	for (i = 0; i < 256; i++) {
+		ret = io_uring_register_files_update(&ring, i, &newfd, 1);
+		if (ret != 1) {
+			fprintf(stderr, "file_update: %d\n", ret);
+			return ret;
+		}
+	}
+	io_uring_unregister_files(&ring);
+
+	io_uring_queue_exit(&ring);
+	return 0;
+}
+
+static int test_fixed_removal_ordering(void)
+{
+	char buffer[128];
+	struct io_uring ring;
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	struct __kernel_timespec ts;
+	int ret, fd, i, fds[2];
+
+	ret = io_uring_queue_init(8, &ring, 0);
+	if (ret < 0) {
+		fprintf(stderr, "failed to init io_uring: %s\n", strerror(-ret));
+		return ret;
+	}
+	if (pipe(fds)) {
+		perror("pipe");
+		return -1;
+	}
+	ret = io_uring_register_files(&ring, fds, 2);
+	if (ret) {
+		fprintf(stderr, "file_register: %d\n", ret);
+		return ret;
+	}
+	/* ring should have fds referenced, can close them */
+	close(fds[0]);
+	close(fds[1]);
+
+	sqe = io_uring_get_sqe(&ring);
+	if (!sqe) {
+		fprintf(stderr, "%s: get sqe failed\n", __FUNCTION__);
+		return 1;
+	}
+	/* outwait file recycling delay */
+	ts.tv_sec = 3;
+	ts.tv_nsec = 0;
+	io_uring_prep_timeout(sqe, &ts, 0, 0);
+	sqe->flags |= IOSQE_IO_LINK | IOSQE_IO_HARDLINK;
+	sqe->user_data = 1;
+
+	sqe = io_uring_get_sqe(&ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		return -1;
+	}
+	io_uring_prep_write(sqe, 1, buffer, sizeof(buffer), 0);
+	sqe->flags |= IOSQE_FIXED_FILE;
+	sqe->user_data = 2;
+
+	ret = io_uring_submit(&ring);
+	if (ret != 2) {
+		fprintf(stderr, "%s: got %d, wanted 2\n", __FUNCTION__, ret);
+		return -1;
+	}
+
+	/* remove unused pipe end */
+	fd = -1;
+	ret = io_uring_register_files_update(&ring, 0, &fd, 1);
+	if (ret != 1) {
+		fprintf(stderr, "update off=0 failed\n");
+		return -1;
+	}
+
+	/* remove used pipe end */
+	fd = -1;
+	ret = io_uring_register_files_update(&ring, 1, &fd, 1);
+	if (ret != 1) {
+		fprintf(stderr, "update off=1 failed\n");
+		return -1;
+	}
+
+	for (i = 0; i < 2; ++i) {
+		ret = io_uring_wait_cqe(&ring, &cqe);
+		if (ret < 0) {
+			fprintf(stderr, "%s: io_uring_wait_cqe=%d\n", __FUNCTION__, ret);
+			return 1;
+		}
+		io_uring_cqe_seen(&ring, cqe);
+	}
+
+	io_uring_queue_exit(&ring);
+	return 0;
+}
+
+
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring;
+	int ret;
+
+	if (argc > 1)
+		return 0;
+
+	ret = io_uring_queue_init(8, &ring, 0);
+	if (ret) {
+		printf("ring setup failed\n");
+		return 1;
+	}
+
+	ret = test_basic(&ring, 0);
+	if (ret) {
+		printf("test_basic failed\n");
+		return ret;
+	}
+
+	ret = test_basic(&ring, 1);
+	if (ret) {
+		printf("test_basic failed\n");
+		return ret;
+	}
+
+	ret = test_basic_many(&ring);
+	if (ret) {
+		printf("test_basic_many failed\n");
+		return ret;
+	}
+
+	ret = test_sparse(&ring);
+	if (ret) {
+		printf("test_sparse failed\n");
+		return ret;
+	}
+
+	if (no_update)
+		return 0;
+
+	ret = test_additions(&ring);
+	if (ret) {
+		printf("test_additions failed\n");
+		return ret;
+	}
+
+	ret = test_removals(&ring);
+	if (ret) {
+		printf("test_removals failed\n");
+		return ret;
+	}
+
+	ret = test_replace(&ring);
+	if (ret) {
+		printf("test_replace failed\n");
+		return ret;
+	}
+
+	ret = test_replace_all(&ring);
+	if (ret) {
+		printf("test_replace_all failed\n");
+		return ret;
+	}
+
+	ret = test_grow(&ring);
+	if (ret) {
+		printf("test_grow failed\n");
+		return ret;
+	}
+
+	ret = test_shrink(&ring);
+	if (ret) {
+		printf("test_shrink failed\n");
+		return ret;
+	}
+
+	ret = test_zero(&ring);
+	if (ret) {
+		printf("test_zero failed\n");
+		return ret;
+	}
+
+	ret = test_huge(&ring);
+	if (ret) {
+		printf("test_huge failed\n");
+		return ret;
+	}
+
+	ret = test_skip(&ring);
+	if (ret) {
+		printf("test_skip failed\n");
+		return 1;
+	}
+
+	ret = test_sparse_updates();
+	if (ret) {
+		printf("test_sparse_updates failed\n");
+		return ret;
+	}
+
+	ret = test_fixed_removal_ordering();
+	if (ret) {
+		printf("test_fixed_removal_ordering failed\n");
+		return 1;
+	}
+
+	return 0;
+}
diff --git a/test/file-update.c b/test/file-update.c
new file mode 100644
index 0000000..38059d4
--- /dev/null
+++ b/test/file-update.c
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: run various file registration tests
+ *
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+
+#include "helpers.h"
+#include "liburing.h"
+
+static void close_files(int *files, int nr_files, int add)
+{
+	char fname[32];
+	int i;
+
+	for (i = 0; i < nr_files; i++) {
+		if (files)
+			close(files[i]);
+		if (!add)
+			sprintf(fname, ".reg.%d", i);
+		else
+			sprintf(fname, ".add.%d", i + add);
+		unlink(fname);
+	}
+	if (files)
+		free(files);
+}
+
+static int *open_files(int nr_files, int extra, int add)
+{
+	char fname[32];
+	int *files;
+	int i;
+
+	files = t_calloc(nr_files + extra, sizeof(int));
+
+	for (i = 0; i < nr_files; i++) {
+		if (!add)
+			sprintf(fname, ".reg.%d", i);
+		else
+			sprintf(fname, ".add.%d", i + add);
+		files[i] = open(fname, O_RDWR | O_CREAT, 0644);
+		if (files[i] < 0) {
+			perror("open");
+			free(files);
+			files = NULL;
+			break;
+		}
+	}
+	if (extra) {
+		for (i = nr_files; i < nr_files + extra; i++)
+			files[i] = -1;
+	}
+
+	return files;
+}
+
+static int test_update_multiring(struct io_uring *r1, struct io_uring *r2,
+				 struct io_uring *r3, int do_unreg)
+{
+	int *fds, *newfds;
+
+	fds = open_files(10, 0, 0);
+	newfds = open_files(10, 0, 1);
+
+	if (io_uring_register_files(r1, fds, 10) ||
+	    io_uring_register_files(r2, fds, 10) ||
+	    io_uring_register_files(r3, fds, 10)) {
+		fprintf(stderr, "%s: register files failed\n", __FUNCTION__);
+		goto err;
+	}
+
+	if (io_uring_register_files_update(r1, 0, newfds, 10) != 10 ||
+	    io_uring_register_files_update(r2, 0, newfds, 10) != 10 ||
+	    io_uring_register_files_update(r3, 0, newfds, 10) != 10) {
+		fprintf(stderr, "%s: update files failed\n", __FUNCTION__);
+		goto err;
+	}
+
+	if (!do_unreg)
+		goto done;
+
+	if (io_uring_unregister_files(r1) ||
+	    io_uring_unregister_files(r2) ||
+	    io_uring_unregister_files(r3)) {
+		fprintf(stderr, "%s: unregister files failed\n", __FUNCTION__);
+		goto err;
+	}
+
+done:
+	close_files(fds, 10, 0);
+	close_files(newfds, 10, 1);
+	return 0;
+err:
+	close_files(fds, 10, 0);
+	close_files(newfds, 10, 1);
+	return 1;
+}
+
+static int test_sqe_update(struct io_uring *ring)
+{
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	int *fds, i, ret;
+
+	fds = t_malloc(sizeof(int) * 10);
+	for (i = 0; i < 10; i++)
+		fds[i] = -1;
+
+	sqe = io_uring_get_sqe(ring);
+	io_uring_prep_files_update(sqe, fds, 10, 0);
+	ret = io_uring_submit(ring);
+	if (ret != 1) {
+		fprintf(stderr, "submit: %d\n", ret);
+		return 1;
+	}
+
+	ret = io_uring_wait_cqe(ring, &cqe);
+	if (ret) {
+		fprintf(stderr, "wait: %d\n", ret);
+		return 1;
+	}
+
+	ret = cqe->res;
+	io_uring_cqe_seen(ring, cqe);
+	if (ret == -EINVAL) {
+		fprintf(stdout, "IORING_OP_FILES_UPDATE not supported, skipping\n");
+		return 0;
+	}
+	return ret != 10;
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring r1, r2, r3;
+	int ret;
+
+	if (argc > 1)
+		return 0;
+
+	if (io_uring_queue_init(8, &r1, 0) ||
+	    io_uring_queue_init(8, &r2, 0) ||
+	    io_uring_queue_init(8, &r3, 0)) {
+		fprintf(stderr, "ring setup failed\n");
+		return 1;
+	}
+
+	ret = test_update_multiring(&r1, &r2, &r3, 1);
+	if (ret) {
+		fprintf(stderr, "test_update_multiring w/unreg\n");
+		return ret;
+	}
+
+	ret = test_update_multiring(&r1, &r2, &r3, 0);
+	if (ret) {
+		fprintf(stderr, "test_update_multiring wo/unreg\n");
+		return ret;
+	}
+
+	ret = test_sqe_update(&r1);
+	if (ret) {
+		fprintf(stderr, "test_sqe_update failed\n");
+		return ret;
+	}
+
+	return 0;
+}
diff --git a/test/files-exit-hang-poll.c b/test/files-exit-hang-poll.c
new file mode 100644
index 0000000..c3f7fb7
--- /dev/null
+++ b/test/files-exit-hang-poll.c
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Based on a test case from Josef Grieb - test that we can exit without
+ * hanging if we have the task file table pinned by a request that is linked
+ * to another request that doesn't finish.
+ */
+#include <errno.h>
+#include <fcntl.h>
+#include <netinet/in.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <strings.h>
+#include <sys/socket.h>
+#include <unistd.h>
+#include <poll.h>
+#include "liburing.h"
+
+#define BACKLOG 512
+
+#define PORT 9100
+
+static struct io_uring ring;
+
+static void add_poll(struct io_uring *ring, int fd)
+{
+	struct io_uring_sqe *sqe;
+
+	sqe = io_uring_get_sqe(ring);
+	io_uring_prep_poll_add(sqe, fd, POLLIN);
+	sqe->flags |= IOSQE_IO_LINK;
+}
+
+static void add_accept(struct io_uring *ring, int fd)
+{
+	struct io_uring_sqe *sqe;
+
+	sqe = io_uring_get_sqe(ring);
+	io_uring_prep_accept(sqe, fd, 0, 0, SOCK_NONBLOCK | SOCK_CLOEXEC);
+}
+
+static int setup_io_uring(void)
+{
+	int ret;
+
+	ret = io_uring_queue_init(16, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "Unable to setup io_uring: %s\n", strerror(-ret));
+		return 1;
+	}
+
+	return 0;
+}
+
+static void alarm_sig(int sig)
+{
+	exit(0);
+}
+
+int main(int argc, char *argv[])
+{
+	struct sockaddr_in serv_addr;
+	struct io_uring_cqe *cqe;
+	int ret, sock_listen_fd;
+	const int val = 1;
+	int i;
+
+	if (argc > 1)
+		return 0;
+
+	sock_listen_fd = socket(AF_INET, SOCK_STREAM | SOCK_NONBLOCK, 0);
+	if (sock_listen_fd < 0) {
+		perror("socket");
+		return 1;
+	}
+
+	setsockopt(sock_listen_fd, SOL_SOCKET, SO_REUSEADDR, &val, sizeof(val));
+
+	memset(&serv_addr, 0, sizeof(serv_addr));
+	serv_addr.sin_family = AF_INET;
+	serv_addr.sin_addr.s_addr = INADDR_ANY;
+
+	for (i = 0; i < 100; i++) {
+		serv_addr.sin_port = htons(PORT + i);
+
+		ret = bind(sock_listen_fd, (struct sockaddr *)&serv_addr, sizeof(serv_addr));
+		if (!ret)
+			break;
+		if (errno != EADDRINUSE) {
+			fprintf(stderr, "bind: %s\n", strerror(errno));
+			return 1;
+		}
+		if (i == 99) {
+			printf("Gave up on finding a port, skipping\n");
+			goto out;
+		}
+	}
+
+	if (listen(sock_listen_fd, BACKLOG) < 0) {
+		perror("Error listening on socket\n");
+		return 1;
+	}
+
+	if (setup_io_uring())
+		return 1;
+
+	add_poll(&ring, sock_listen_fd);
+	add_accept(&ring, sock_listen_fd);
+
+	ret = io_uring_submit(&ring);
+	if (ret != 2) {
+		fprintf(stderr, "submit=%d\n", ret);
+		return 1;
+	}
+
+	signal(SIGALRM, alarm_sig);
+	alarm(1);
+
+	ret = io_uring_wait_cqe(&ring, &cqe);
+	if (ret) {
+		fprintf(stderr, "wait_cqe=%d\n", ret);
+		return 1;
+	}
+
+out:
+	io_uring_queue_exit(&ring);
+	return 0;
+}
diff --git a/test/files-exit-hang-timeout.c b/test/files-exit-hang-timeout.c
new file mode 100644
index 0000000..09efc4f
--- /dev/null
+++ b/test/files-exit-hang-timeout.c
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Based on a test case from Josef Grieb - test that we can exit without
+ * hanging if we have the task file table pinned by a request that is linked
+ * to another request that doesn't finish.
+ */
+#include <errno.h>
+#include <fcntl.h>
+#include <netinet/in.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <strings.h>
+#include <sys/socket.h>
+#include <unistd.h>
+#include <poll.h>
+#include "liburing.h"
+
+#define BACKLOG 512
+
+#define PORT 9100
+
+struct io_uring ring;
+
+struct __kernel_timespec ts = {
+	.tv_sec		= 300,
+	.tv_nsec	= 0,
+};
+
+static void add_timeout(struct io_uring *ring, int fd)
+{
+	struct io_uring_sqe *sqe;
+
+	sqe = io_uring_get_sqe(ring);
+	io_uring_prep_timeout(sqe, &ts, 100, 0);
+	sqe->flags |= IOSQE_IO_LINK;
+}
+
+static void add_accept(struct io_uring *ring, int fd)
+{
+	struct io_uring_sqe *sqe;
+
+	sqe = io_uring_get_sqe(ring);
+	io_uring_prep_accept(sqe, fd, 0, 0, SOCK_NONBLOCK | SOCK_CLOEXEC);
+	sqe->flags |= IOSQE_IO_LINK;
+}
+
+static int setup_io_uring(void)
+{
+	int ret;
+       
+	ret = io_uring_queue_init(16, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "Unable to setup io_uring: %s\n", strerror(-ret));
+		return 1;
+	}
+
+	return 0;
+}
+
+static void alarm_sig(int sig)
+{
+	exit(0);
+}
+
+int main(int argc, char *argv[])
+{
+	struct sockaddr_in serv_addr;
+	struct io_uring_cqe *cqe;
+	int ret, sock_listen_fd;
+	const int val = 1;
+	int i;
+
+	if (argc > 1)
+		return 0;
+
+	sock_listen_fd = socket(AF_INET, SOCK_STREAM | SOCK_NONBLOCK, 0);
+	if (sock_listen_fd < 0) {
+		perror("socket");
+		return 1;
+	}
+
+	setsockopt(sock_listen_fd, SOL_SOCKET, SO_REUSEADDR, &val, sizeof(val));
+
+	memset(&serv_addr, 0, sizeof(serv_addr));
+	serv_addr.sin_family = AF_INET;
+	serv_addr.sin_addr.s_addr = INADDR_ANY;
+
+	for (i = 0; i < 100; i++) {
+		serv_addr.sin_port = htons(PORT + i);
+
+		ret = bind(sock_listen_fd, (struct sockaddr *)&serv_addr, sizeof(serv_addr));
+		if (!ret)
+			break;
+		if (errno != EADDRINUSE) {
+			fprintf(stderr, "bind: %s\n", strerror(errno));
+			return 1;
+		}
+		if (i == 99) {
+			printf("Gave up on finding a port, skipping\n");
+			goto out;
+		}
+	}
+
+	if (listen(sock_listen_fd, BACKLOG) < 0) {
+		perror("Error listening on socket\n");
+		return 1;
+	}
+
+	if (setup_io_uring())
+		return 1;
+
+	add_timeout(&ring, sock_listen_fd);
+	add_accept(&ring, sock_listen_fd);
+
+	ret = io_uring_submit(&ring);
+	if (ret != 2) {
+		fprintf(stderr, "submit=%d\n", ret);
+		return 1;
+	}
+
+	signal(SIGALRM, alarm_sig);
+	alarm(1);
+
+	ret = io_uring_wait_cqe(&ring, &cqe);
+	if (ret) {
+		fprintf(stderr, "wait_cqe=%d\n", ret);
+		return 1;
+	}
+
+out:
+	io_uring_queue_exit(&ring);
+	return 0;
+}
diff --git a/test/fixed-link.c b/test/fixed-link.c
new file mode 100644
index 0000000..60d96ec
--- /dev/null
+++ b/test/fixed-link.c
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: MIT */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+#include <sys/types.h>
+
+#include "helpers.h"
+#include "liburing.h"
+
+#define IOVECS_LEN 2
+
+int main(int argc, char *argv[])
+{
+	struct iovec iovecs[IOVECS_LEN];
+	struct io_uring ring;
+	int i, fd, ret;
+
+	if (argc > 1)
+		return 0;
+
+	fd = open("/dev/zero", O_RDONLY);
+	if (fd < 0) {
+		fprintf(stderr, "Failed to open /dev/zero\n");
+		return 1;
+	}
+
+	if (io_uring_queue_init(32, &ring, 0) < 0) {
+		fprintf(stderr, "Faild to init io_uring\n");
+		close(fd);
+		return 1;
+	}
+
+	for (i = 0; i < IOVECS_LEN; ++i) {
+		iovecs[i].iov_base = t_malloc(64);
+		iovecs[i].iov_len = 64;
+	};
+
+	ret = io_uring_register_buffers(&ring, iovecs, IOVECS_LEN);
+	if (ret) {
+		fprintf(stderr, "Failed to register buffers\n");
+		return 1;
+	}
+
+	for (i = 0; i < IOVECS_LEN; ++i) {
+		struct io_uring_sqe *sqe = io_uring_get_sqe(&ring);
+		const char *str = "#include <errno.h>";
+
+		iovecs[i].iov_len = strlen(str);
+		io_uring_prep_read_fixed(sqe, fd, iovecs[i].iov_base, strlen(str), 0, i);
+		if (i == 0)
+			io_uring_sqe_set_flags(sqe, IOSQE_IO_LINK);
+		io_uring_sqe_set_data(sqe, (void *)str);
+	}
+
+	ret = io_uring_submit_and_wait(&ring, IOVECS_LEN);
+	if (ret < 0) {
+		fprintf(stderr, "Failed to submit IO\n");
+		return 1;
+	} else if (ret < 2) {
+		fprintf(stderr, "Submitted %d, wanted %d\n", ret, IOVECS_LEN);
+		return 1;
+	}
+
+	for (i = 0; i < IOVECS_LEN; i++) {
+		struct io_uring_cqe *cqe;
+
+		ret = io_uring_wait_cqe(&ring, &cqe);
+		if (ret) {
+			fprintf(stderr, "wait_cqe=%d\n", ret);
+			return 1;
+		}
+		if (cqe->res != iovecs[i].iov_len) {
+			fprintf(stderr, "read: wanted %ld, got %d\n",
+					(long) iovecs[i].iov_len, cqe->res);
+			return 1;
+		}
+		io_uring_cqe_seen(&ring, cqe);
+	}
+
+	close(fd);
+	io_uring_queue_exit(&ring);
+
+	for (i = 0; i < IOVECS_LEN; ++i)
+		free(iovecs[i].iov_base);
+
+	return 0;
+}
diff --git a/test/fsync.c b/test/fsync.c
new file mode 100644
index 0000000..7e93ecc
--- /dev/null
+++ b/test/fsync.c
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: test io_uring fsync handling
+ *
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+
+#include "helpers.h"
+#include "liburing.h"
+
+static int test_single_fsync(struct io_uring *ring)
+{
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	char buf[32];
+	int fd, ret;
+
+	sprintf(buf, "./XXXXXX");
+	fd = mkstemp(buf);
+	if (fd < 0) {
+		perror("open");
+		return 1;
+	}
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "get sqe failed\n");
+		goto err;
+	}
+
+	io_uring_prep_fsync(sqe, fd, 0);
+
+	ret = io_uring_submit(ring);
+	if (ret <= 0) {
+		fprintf(stderr, "sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	ret = io_uring_wait_cqe(ring, &cqe);
+	if (ret < 0) {
+		fprintf(stderr, "wait completion %d\n", ret);
+		goto err;
+	}
+
+	io_uring_cqe_seen(ring, cqe);
+	unlink(buf);
+	return 0;
+err:
+	unlink(buf);
+	return 1;
+}
+
+static int test_barrier_fsync(struct io_uring *ring)
+{
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	struct iovec iovecs[4];
+	int i, fd, ret;
+	off_t off;
+
+	fd = open("testfile", O_WRONLY | O_CREAT, 0644);
+	if (fd < 0) {
+		perror("open");
+		return 1;
+	}
+
+	for (i = 0; i < 4; i++) {
+		iovecs[i].iov_base = t_malloc(4096);
+		iovecs[i].iov_len = 4096;
+	}
+
+	off = 0;
+	for (i = 0; i < 4; i++) {
+		sqe = io_uring_get_sqe(ring);
+		if (!sqe) {
+			fprintf(stderr, "get sqe failed\n");
+			goto err;
+		}
+
+		io_uring_prep_writev(sqe, fd, &iovecs[i], 1, off);
+		sqe->user_data = 0;
+		off += 4096;
+	}
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "get sqe failed\n");
+		goto err;
+	}
+
+	io_uring_prep_fsync(sqe, fd, IORING_FSYNC_DATASYNC);
+	sqe->user_data = 1;
+	io_uring_sqe_set_flags(sqe, IOSQE_IO_DRAIN);
+
+	ret = io_uring_submit(ring);
+	if (ret < 0) {
+		fprintf(stderr, "sqe submit failed: %d\n", ret);
+		goto err;
+	} else if (ret < 5) {
+		fprintf(stderr, "Submitted only %d\n", ret);
+		goto err;
+	}
+
+	for (i = 0; i < 5; i++) {
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret < 0) {
+			fprintf(stderr, "wait completion %d\n", ret);
+			goto err;
+		}
+		/* kernel doesn't support IOSQE_IO_DRAIN */
+		if (cqe->res == -EINVAL)
+			break;
+		if (i <= 3) {
+			if (cqe->user_data) {
+				fprintf(stderr, "Got fsync early?\n");
+				goto err;
+			}
+		} else {
+			if (!cqe->user_data) {
+				fprintf(stderr, "Got write late?\n");
+				goto err;
+			}
+		}
+		io_uring_cqe_seen(ring, cqe);
+	}
+
+	unlink("testfile");
+	return 0;
+err:
+	unlink("testfile");
+	return 1;
+}
+
+#define FILE_SIZE 1024
+
+static int test_sync_file_range(struct io_uring *ring)
+{
+	int ret, fd, save_errno;
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+
+	t_create_file(".sync_file_range", FILE_SIZE);
+
+	fd = open(".sync_file_range", O_RDWR);
+	save_errno = errno;
+	unlink(".sync_file_range");
+	errno = save_errno;
+	if (fd < 0) {
+		perror("file open");
+		return 1;
+	}
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "sqe get failed\n");
+		return 1;
+	}
+	io_uring_prep_sync_file_range(sqe, fd, 0, 0, 0);
+	sqe->user_data = 1;
+
+	ret = io_uring_submit(ring);
+	if (ret != 1) {
+		fprintf(stderr, "submit failed: %d\n", ret);
+		return 1;
+	}
+	ret = io_uring_wait_cqe(ring, &cqe);
+	if (ret) {
+		fprintf(stderr, "wait_cqe failed: %d\n", ret);
+		return 1;
+	}
+	if (cqe->res) {
+		fprintf(stderr, "sfr failed: %d\n", cqe->res);
+		return 1;
+	}
+
+	io_uring_cqe_seen(ring, cqe);
+	return 0;
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring;
+	int ret;
+
+	if (argc > 1)
+		return 0;
+
+	ret = io_uring_queue_init(8, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "ring setup failed\n");
+		return 1;
+
+	}
+
+	ret = test_single_fsync(&ring);
+	if (ret) {
+		fprintf(stderr, "test_single_fsync failed\n");
+		return ret;
+	}
+
+	ret = test_barrier_fsync(&ring);
+	if (ret) {
+		fprintf(stderr, "test_barrier_fsync failed\n");
+		return ret;
+	}
+
+	ret = test_sync_file_range(&ring);
+	if (ret) {
+		fprintf(stderr, "test_sync_file_range failed\n");
+		return ret;
+	}
+
+	return 0;
+}
diff --git a/test/hardlink.c b/test/hardlink.c
new file mode 100644
index 0000000..1c73424
--- /dev/null
+++ b/test/hardlink.c
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: test io_uring linkat handling
+ */
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "liburing.h"
+
+
+static int do_linkat(struct io_uring *ring, const char *oldname, const char *newname)
+{
+	int ret;
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "sqe get failed\n");
+		goto err;
+	}
+	io_uring_prep_linkat(sqe, AT_FDCWD, oldname, AT_FDCWD, newname, 0);
+
+	ret = io_uring_submit(ring);
+	if (ret != 1) {
+		fprintf(stderr, "submit failed: %d\n", ret);
+		goto err;
+	}
+
+	ret = io_uring_wait_cqes(ring, &cqe, 1, 0, 0);
+	if (ret) {
+		fprintf(stderr, "wait_cqe failed: %d\n", ret);
+		goto err;
+	}
+	ret = cqe->res;
+	io_uring_cqe_seen(ring, cqe);
+	return ret;
+err:
+	return 1;
+}
+
+int files_linked_ok(const char* fn1, const char *fn2)
+{
+	struct stat s1, s2;
+
+	if (stat(fn1, &s1)) {
+		fprintf(stderr, "stat(%s): %s\n", fn1, strerror(errno));
+		return 0;
+	}
+	if (stat(fn2, &s2)) {
+		fprintf(stderr, "stat(%s): %s\n", fn2, strerror(errno));
+		return 0;
+	}
+	if (s1.st_dev != s2.st_dev || s1.st_ino != s2.st_ino) {
+		fprintf(stderr, "linked files have different device / inode numbers\n");
+		return 0;
+	}
+	if (s1.st_nlink != 2 || s2.st_nlink != 2) {
+		fprintf(stderr, "linked files have unexpected links count\n");
+		return 0;
+	}
+	return 1;
+}
+
+int main(int argc, char *argv[])
+{
+	static const char target[] = "io_uring-linkat-test-target";
+	static const char linkname[] = "io_uring-linkat-test-link";
+	int ret;
+	struct io_uring ring;
+
+	ret = io_uring_queue_init(8, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "queue init failed: %d\n", ret);
+		return ret;
+	}
+
+	ret = open(target, O_CREAT | O_RDWR | O_EXCL, 0600);
+	if (ret < 0) {
+		perror("open");
+		goto err;
+	}
+	if (write(ret, "linktest", 8) != 8) {
+		close(ret);
+		goto err1;
+	}
+	close(ret);
+
+	ret = do_linkat(&ring, target, linkname);
+	if (ret < 0) {
+		if (ret == -EBADF || ret == -EINVAL) {
+			fprintf(stdout, "linkat not supported, skipping\n");
+			goto out;
+		}
+		fprintf(stderr, "linkat: %s\n", strerror(-ret));
+		goto err1;
+	} else if (ret) {
+		goto err1;
+	}
+
+	if (!files_linked_ok(linkname, target))
+		goto err2;
+
+	ret = do_linkat(&ring, target, linkname);
+	if (ret != -EEXIST) {
+		fprintf(stderr, "test_linkat linkname already exists failed: %d\n", ret);
+		goto err2;
+	}
+
+	ret = do_linkat(&ring, target, "surely/this/does/not/exist");
+	if (ret != -ENOENT) {
+		fprintf(stderr, "test_linkat no parent failed: %d\n", ret);
+		goto err2;
+	}
+
+out:
+	unlinkat(AT_FDCWD, linkname, 0);
+	unlinkat(AT_FDCWD, target, 0);
+	io_uring_queue_exit(&ring);
+	return 0;
+err2:
+	unlinkat(AT_FDCWD, linkname, 0);
+err1:
+	unlinkat(AT_FDCWD, target, 0);
+err:
+	io_uring_queue_exit(&ring);
+	return 1;
+}
+
diff --git a/test/helpers.c b/test/helpers.c
new file mode 100644
index 0000000..930d82a
--- /dev/null
+++ b/test/helpers.c
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: Helpers for tests.
+ */
+#include <stdlib.h>
+#include <assert.h>
+#include <string.h>
+#include <stdio.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/types.h>
+
+#include "helpers.h"
+#include "liburing.h"
+
+/*
+ * Helper for allocating memory in tests.
+ */
+void *t_malloc(size_t size)
+{
+	void *ret;
+	ret = malloc(size);
+	assert(ret);
+	return ret;
+}
+
+/*
+ * Helper for allocating size bytes aligned on a boundary.
+ */
+void t_posix_memalign(void **memptr, size_t alignment, size_t size)
+{
+	int ret;
+	ret = posix_memalign(memptr, alignment, size);
+	assert(!ret);
+}
+
+/*
+ * Helper for allocating space for an array of nmemb elements
+ * with size bytes for each element.
+ */
+void *t_calloc(size_t nmemb, size_t size)
+{
+	void *ret;
+	ret = calloc(nmemb, size);
+	assert(ret);
+	return ret;
+}
+
+/*
+ * Helper for creating file and write @size byte buf with 0xaa value in the file.
+ */
+void t_create_file(const char *file, size_t size)
+{
+	ssize_t ret;
+	char *buf;
+	int fd; 
+
+	buf = t_malloc(size);
+	memset(buf, 0xaa, size);
+
+	fd = open(file, O_WRONLY | O_CREAT, 0644);
+	assert(fd >= 0);
+
+	ret = write(fd, buf, size);
+	fsync(fd);
+	close(fd);
+	free(buf);
+	assert(ret == size);
+}
+
+/*
+ * Helper for creating @buf_num number of iovec
+ * with @buf_size bytes buffer of each iovec.
+ */
+struct iovec *t_create_buffers(size_t buf_num, size_t buf_size)
+{
+	struct iovec *vecs;
+	int i;
+
+	vecs = t_malloc(buf_num * sizeof(struct iovec));
+	for (i = 0; i < buf_num; i++) {
+		t_posix_memalign(&vecs[i].iov_base, buf_size, buf_size);
+		vecs[i].iov_len = buf_size; 
+	}
+	return vecs;
+}
+
+/*
+ * Helper for setting up an io_uring instance, skipping if the given user isn't
+ * allowed to.
+ */
+enum t_setup_ret t_create_ring_params(int depth, struct io_uring *ring,
+				      struct io_uring_params *p)
+{
+	int ret;
+
+	ret = io_uring_queue_init_params(depth, ring, p);
+	if (!ret)
+		return T_SETUP_OK;
+	if ((p->flags & IORING_SETUP_SQPOLL) && ret == -EPERM && geteuid()) {
+		fprintf(stdout, "SQPOLL skipped for regular user\n");
+		return T_SETUP_SKIP;
+	}
+
+	fprintf(stderr, "queue_init: %s\n", strerror(-ret));
+	return ret;
+}
+
+enum t_setup_ret t_create_ring(int depth, struct io_uring *ring,
+			       unsigned int flags)
+{
+	struct io_uring_params p = { };
+
+	p.flags = flags;
+	return t_create_ring_params(depth, ring, &p);
+}
diff --git a/test/helpers.h b/test/helpers.h
new file mode 100644
index 0000000..74fe162
--- /dev/null
+++ b/test/helpers.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: Helpers for tests.
+ */
+#ifndef LIBURING_HELPERS_H
+#define LIBURING_HELPERS_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "liburing.h"
+
+enum t_setup_ret {
+	T_SETUP_OK	= 0,
+	T_SETUP_SKIP,
+};
+
+/*
+ * Helper for allocating memory in tests.
+ */
+void *t_malloc(size_t size);
+
+
+/*
+ * Helper for allocating size bytes aligned on a boundary.
+ */
+void t_posix_memalign(void **memptr, size_t alignment, size_t size);
+
+
+/*
+ * Helper for allocating space for an array of nmemb elements
+ * with size bytes for each element.
+ */
+void *t_calloc(size_t nmemb, size_t size);
+
+
+/*
+ * Helper for creating file and write @size byte buf with 0xaa value in the file.
+ */
+void t_create_file(const char *file, size_t size);
+
+/*
+ * Helper for creating @buf_num number of iovec
+ * with @buf_size bytes buffer of each iovec.
+ */
+struct iovec *t_create_buffers(size_t buf_num, size_t buf_size);
+
+/*
+ * Helper for setting up a ring and checking for user privs
+ */
+enum t_setup_ret t_create_ring_params(int depth, struct io_uring *ring,
+				      struct io_uring_params *p);
+enum t_setup_ret t_create_ring(int depth, struct io_uring *ring,
+			       unsigned int flags);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/test/io-cancel.c b/test/io-cancel.c
new file mode 100644
index 0000000..9a36dd9
--- /dev/null
+++ b/test/io-cancel.c
@@ -0,0 +1,531 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: Basic IO cancel test
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <sys/time.h>
+#include <sys/wait.h>
+#include <sys/poll.h>
+
+#include "helpers.h"
+#include "liburing.h"
+
+#define FILE_SIZE	(128 * 1024)
+#define BS		4096
+#define BUFFERS		(FILE_SIZE / BS)
+
+static struct iovec *vecs;
+
+static unsigned long long utime_since(const struct timeval *s,
+				      const struct timeval *e)
+{
+	long long sec, usec;
+
+	sec = e->tv_sec - s->tv_sec;
+	usec = (e->tv_usec - s->tv_usec);
+	if (sec > 0 && usec < 0) {
+		sec--;
+		usec += 1000000;
+	}
+
+	sec *= 1000000;
+	return sec + usec;
+}
+
+static unsigned long long utime_since_now(struct timeval *tv)
+{
+	struct timeval end;
+
+	gettimeofday(&end, NULL);
+	return utime_since(tv, &end);
+}
+
+static int start_io(struct io_uring *ring, int fd, int do_write)
+{
+	struct io_uring_sqe *sqe;
+	int i, ret;
+
+	for (i = 0; i < BUFFERS; i++) {
+		off_t offset;
+
+		sqe = io_uring_get_sqe(ring);
+		if (!sqe) {
+			fprintf(stderr, "sqe get failed\n");
+			goto err;
+		}
+		offset = BS * (rand() % BUFFERS);
+		if (do_write) {
+			io_uring_prep_writev(sqe, fd, &vecs[i], 1, offset);
+		} else {
+			io_uring_prep_readv(sqe, fd, &vecs[i], 1, offset);
+		}
+		sqe->user_data = i + 1;
+	}
+
+	ret = io_uring_submit(ring);
+	if (ret != BUFFERS) {
+		fprintf(stderr, "submit got %d, wanted %d\n", ret, BUFFERS);
+		goto err;
+	}
+
+	return 0;
+err:
+	return 1;
+}
+
+static int wait_io(struct io_uring *ring, unsigned nr_io, int do_partial)
+{
+	struct io_uring_cqe *cqe;
+	int i, ret;
+
+	for (i = 0; i < nr_io; i++) {
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret) {
+			fprintf(stderr, "wait_cqe=%d\n", ret);
+			goto err;
+		}
+		if (do_partial && cqe->user_data) {
+			if (!(cqe->user_data & 1)) {
+				if (cqe->res != BS) {
+					fprintf(stderr, "IO %d wasn't cancelled but got error %d\n", (unsigned) cqe->user_data, cqe->res);
+					goto err;
+				}
+			}
+		}
+		io_uring_cqe_seen(ring, cqe);
+	}
+	return 0;
+err:
+	return 1;
+
+}
+
+static int do_io(struct io_uring *ring, int fd, int do_write)
+{
+	if (start_io(ring, fd, do_write))
+		return 1;
+	if (wait_io(ring, BUFFERS, 0))
+		return 1;
+	return 0;
+}
+
+static int start_cancel(struct io_uring *ring, int do_partial)
+{
+	struct io_uring_sqe *sqe;
+	int i, ret, submitted = 0;
+
+	for (i = 0; i < BUFFERS; i++) {
+		if (do_partial && (i & 1))
+			continue;
+		sqe = io_uring_get_sqe(ring);
+		if (!sqe) {
+			fprintf(stderr, "sqe get failed\n");
+			goto err;
+		}
+		io_uring_prep_cancel(sqe, (void *) (unsigned long) i + 1, 0);
+		sqe->user_data = 0;
+		submitted++;
+	}
+
+	ret = io_uring_submit(ring);
+	if (ret != submitted) {
+		fprintf(stderr, "submit got %d, wanted %d\n", ret, submitted);
+		goto err;
+	}
+	return 0;
+err:
+	return 1;
+}
+
+/*
+ * Test cancels. If 'do_partial' is set, then we only attempt to cancel half of
+ * the submitted IO. This is done to verify that cancelling one piece of IO doesn't
+ * impact others.
+ */
+static int test_io_cancel(const char *file, int do_write, int do_partial)
+{
+	struct io_uring ring;
+	struct timeval start_tv;
+	unsigned long usecs;
+	unsigned to_wait;
+	int fd, ret;
+
+	fd = open(file, O_RDWR | O_DIRECT);
+	if (fd < 0) {
+		perror("file open");
+		goto err;
+	}
+
+	ret = io_uring_queue_init(4 * BUFFERS, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "ring create failed: %d\n", ret);
+		goto err;
+	}
+
+	if (do_io(&ring, fd, do_write))
+		goto err;
+	gettimeofday(&start_tv, NULL);
+	if (do_io(&ring, fd, do_write))
+		goto err;
+	usecs = utime_since_now(&start_tv);
+
+	if (start_io(&ring, fd, do_write))
+		goto err;
+	/* sleep for 1/3 of the total time, to allow some to start/complete */
+	usleep(usecs / 3);
+	if (start_cancel(&ring, do_partial))
+		goto err;
+	to_wait = BUFFERS;
+	if (do_partial)
+		to_wait += BUFFERS / 2;
+	else
+		to_wait += BUFFERS;
+	if (wait_io(&ring, to_wait, do_partial))
+		goto err;
+
+	io_uring_queue_exit(&ring);
+	close(fd);
+	return 0;
+err:
+	if (fd != -1)
+		close(fd);
+	return 1;
+}
+
+static int test_dont_cancel_another_ring(void)
+{
+	struct io_uring ring1, ring2;
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	char buffer[128];
+	int ret, fds[2];
+	struct __kernel_timespec ts = { .tv_sec = 0, .tv_nsec = 100000000, };
+
+	ret = io_uring_queue_init(8, &ring1, 0);
+	if (ret) {
+		fprintf(stderr, "ring create failed: %d\n", ret);
+		return 1;
+	}
+	ret = io_uring_queue_init(8, &ring2, 0);
+	if (ret) {
+		fprintf(stderr, "ring create failed: %d\n", ret);
+		return 1;
+	}
+	if (pipe(fds)) {
+		perror("pipe");
+		return 1;
+	}
+
+	sqe = io_uring_get_sqe(&ring1);
+	if (!sqe) {
+		fprintf(stderr, "%s: failed to get sqe\n", __FUNCTION__);
+		return 1;
+	}
+	io_uring_prep_read(sqe, fds[0], buffer, 10, 0);
+	sqe->flags |= IOSQE_ASYNC;
+	sqe->user_data = 1;
+
+	ret = io_uring_submit(&ring1);
+	if (ret != 1) {
+		fprintf(stderr, "%s: got %d, wanted 1\n", __FUNCTION__, ret);
+		return 1;
+	}
+
+	/* make sure it doesn't cancel requests of the other ctx */
+	sqe = io_uring_get_sqe(&ring2);
+	if (!sqe) {
+		fprintf(stderr, "%s: failed to get sqe\n", __FUNCTION__);
+		return 1;
+	}
+	io_uring_prep_cancel(sqe, (void *) (unsigned long)1, 0);
+	sqe->user_data = 2;
+
+	ret = io_uring_submit(&ring2);
+	if (ret != 1) {
+		fprintf(stderr, "%s: got %d, wanted 1\n", __FUNCTION__, ret);
+		return 1;
+	}
+
+	ret = io_uring_wait_cqe(&ring2, &cqe);
+	if (ret) {
+		fprintf(stderr, "wait_cqe=%d\n", ret);
+		return 1;
+	}
+	if (cqe->user_data != 2 || cqe->res != -ENOENT) {
+		fprintf(stderr, "error: cqe %i: res=%i, but expected -ENOENT\n",
+			(int)cqe->user_data, (int)cqe->res);
+		return 1;
+	}
+	io_uring_cqe_seen(&ring2, cqe);
+
+	ret = io_uring_wait_cqe_timeout(&ring1, &cqe, &ts);
+	if (ret != -ETIME) {
+		fprintf(stderr, "read got cancelled or wait failed\n");
+		return 1;
+	}
+	io_uring_cqe_seen(&ring1, cqe);
+
+	close(fds[0]);
+	close(fds[1]);
+	io_uring_queue_exit(&ring1);
+	io_uring_queue_exit(&ring2);
+	return 0;
+}
+
+static int test_cancel_req_across_fork(void)
+{
+	struct io_uring ring;
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	char buffer[128];
+	int ret, i, fds[2];
+	pid_t p;
+
+	ret = io_uring_queue_init(8, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "ring create failed: %d\n", ret);
+		return 1;
+	}
+	if (pipe(fds)) {
+		perror("pipe");
+		return 1;
+	}
+	sqe = io_uring_get_sqe(&ring);
+	if (!sqe) {
+		fprintf(stderr, "%s: failed to get sqe\n", __FUNCTION__);
+		return 1;
+	}
+	io_uring_prep_read(sqe, fds[0], buffer, 10, 0);
+	sqe->flags |= IOSQE_ASYNC;
+	sqe->user_data = 1;
+
+	ret = io_uring_submit(&ring);
+	if (ret != 1) {
+		fprintf(stderr, "%s: got %d, wanted 1\n", __FUNCTION__, ret);
+		return 1;
+	}
+
+	p = fork();
+	if (p == -1) {
+		fprintf(stderr, "fork() failed\n");
+		return 1;
+	}
+
+	if (p == 0) {
+		sqe = io_uring_get_sqe(&ring);
+		if (!sqe) {
+			fprintf(stderr, "%s: failed to get sqe\n", __FUNCTION__);
+			return 1;
+		}
+		io_uring_prep_cancel(sqe, (void *) (unsigned long)1, 0);
+		sqe->user_data = 2;
+
+		ret = io_uring_submit(&ring);
+		if (ret != 1) {
+			fprintf(stderr, "%s: got %d, wanted 1\n", __FUNCTION__, ret);
+			return 1;
+		}
+
+		for (i = 0; i < 2; ++i) {
+			ret = io_uring_wait_cqe(&ring, &cqe);
+			if (ret) {
+				fprintf(stderr, "wait_cqe=%d\n", ret);
+				return 1;
+			}
+			if ((cqe->user_data == 1 && cqe->res != -EINTR) ||
+			    (cqe->user_data == 2 && cqe->res != -EALREADY)) {
+				fprintf(stderr, "%i %i\n", (int)cqe->user_data, cqe->res);
+				exit(1);
+			}
+
+			io_uring_cqe_seen(&ring, cqe);
+		}
+		exit(0);
+	} else {
+		int wstatus;
+
+		if (waitpid(p, &wstatus, 0) == (pid_t)-1) {
+			perror("waitpid()");
+			return 1;
+		}
+		if (!WIFEXITED(wstatus) || WEXITSTATUS(wstatus)) {
+			fprintf(stderr, "child failed %i\n", WEXITSTATUS(wstatus));
+			return 1;
+		}
+	}
+
+	close(fds[0]);
+	close(fds[1]);
+	io_uring_queue_exit(&ring);
+	return 0;
+}
+
+static int test_cancel_inflight_exit(void)
+{
+	struct __kernel_timespec ts = { .tv_sec = 1, .tv_nsec = 0, };
+	struct io_uring ring;
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	int ret, i;
+	pid_t p;
+
+	ret = io_uring_queue_init(8, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "ring create failed: %d\n", ret);
+		return 1;
+	}
+	p = fork();
+	if (p == -1) {
+		fprintf(stderr, "fork() failed\n");
+		return 1;
+	}
+
+	if (p == 0) {
+		sqe = io_uring_get_sqe(&ring);
+		io_uring_prep_poll_add(sqe, ring.ring_fd, POLLIN);
+		sqe->user_data = 1;
+		sqe->flags |= IOSQE_IO_LINK;
+
+		sqe = io_uring_get_sqe(&ring);
+		io_uring_prep_timeout(sqe, &ts, 0, 0);
+		sqe->user_data = 2;
+
+		sqe = io_uring_get_sqe(&ring);
+		io_uring_prep_timeout(sqe, &ts, 0, 0);
+		sqe->user_data = 3;
+
+		ret = io_uring_submit(&ring);
+		if (ret != 3) {
+			fprintf(stderr, "io_uring_submit() failed %s, ret %i\n", __FUNCTION__, ret);
+			exit(1);
+		}
+		exit(0);
+	} else {
+		int wstatus;
+
+		if (waitpid(p, &wstatus, 0) == (pid_t)-1) {
+			perror("waitpid()");
+			return 1;
+		}
+		if (!WIFEXITED(wstatus) || WEXITSTATUS(wstatus)) {
+			fprintf(stderr, "child failed %i\n", WEXITSTATUS(wstatus));
+			return 1;
+		}
+	}
+
+	for (i = 0; i < 3; ++i) {
+		ret = io_uring_wait_cqe(&ring, &cqe);
+		if (ret) {
+			fprintf(stderr, "wait_cqe=%d\n", ret);
+			return 1;
+		}
+		if ((cqe->user_data == 1 && cqe->res != -ECANCELED) ||
+		    (cqe->user_data == 2 && cqe->res != -ECANCELED) ||
+		    (cqe->user_data == 3 && cqe->res != -ETIME)) {
+			fprintf(stderr, "%i %i\n", (int)cqe->user_data, cqe->res);
+			return 1;
+		}
+		io_uring_cqe_seen(&ring, cqe);
+	}
+
+	io_uring_queue_exit(&ring);
+	return 0;
+}
+
+static int test_sqpoll_cancel_iowq_requests(void)
+{
+	struct io_uring ring;
+	struct io_uring_sqe *sqe;
+	int ret, fds[2];
+	char buffer[16];
+
+	ret = io_uring_queue_init(8, &ring, IORING_SETUP_SQPOLL);
+	if (ret) {
+		fprintf(stderr, "ring create failed: %d\n", ret);
+		return 1;
+	}
+	if (pipe(fds)) {
+		perror("pipe");
+		return 1;
+	}
+	/* pin both pipe ends via io-wq */
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_read(sqe, fds[0], buffer, 10, 0);
+	sqe->flags |= IOSQE_ASYNC | IOSQE_IO_LINK;
+	sqe->user_data = 1;
+
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_write(sqe, fds[1], buffer, 10, 0);
+	sqe->flags |= IOSQE_ASYNC;
+	sqe->user_data = 2;
+	ret = io_uring_submit(&ring);
+	if (ret != 2) {
+		fprintf(stderr, "%s: got %d, wanted 1\n", __FUNCTION__, ret);
+		return 1;
+	}
+
+	/* wait for sqpoll to kick in and submit before exit */
+	sleep(1);
+	io_uring_queue_exit(&ring);
+
+	/* close the write end, so if ring is cancelled properly read() fails*/
+	close(fds[1]);
+	ret = read(fds[0], buffer, 10);
+	close(fds[0]);
+	return 0;
+}
+
+int main(int argc, char *argv[])
+{
+	int i, ret;
+
+	if (argc > 1)
+		return 0;
+
+	if (test_dont_cancel_another_ring()) {
+		fprintf(stderr, "test_dont_cancel_another_ring() failed\n");
+		return 1;
+	}
+
+	if (test_cancel_req_across_fork()) {
+		fprintf(stderr, "test_cancel_req_across_fork() failed\n");
+		return 1;
+	}
+
+	if (test_cancel_inflight_exit()) {
+		fprintf(stderr, "test_cancel_inflight_exit() failed\n");
+		return 1;
+	}
+
+	if (test_sqpoll_cancel_iowq_requests()) {
+		fprintf(stderr, "test_sqpoll_cancel_iowq_requests() failed\n");
+		return 1;
+	}
+
+	t_create_file(".basic-rw", FILE_SIZE);
+
+	vecs = t_create_buffers(BUFFERS, BS);
+
+	for (i = 0; i < 4; i++) {
+		int v1 = (i & 1) != 0;
+		int v2 = (i & 2) != 0;
+
+		ret = test_io_cancel(".basic-rw", v1, v2);
+		if (ret) {
+			fprintf(stderr, "test_io_cancel %d %d failed\n", v1, v2);
+			goto err;
+		}
+	}
+
+	unlink(".basic-rw");
+	return 0;
+err:
+	unlink(".basic-rw");
+	return 1;
+}
diff --git a/test/io_uring_enter.c b/test/io_uring_enter.c
new file mode 100644
index 0000000..a6bb8f5
--- /dev/null
+++ b/test/io_uring_enter.c
@@ -0,0 +1,293 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * io_uring_enter.c
+ *
+ * Description: Unit tests for the io_uring_enter system call.
+ *
+ * Copyright 2019, Red Hat, Inc.
+ * Author: Jeff Moyer <jmoyer@redhat.com>
+ */
+#include <stdio.h>
+#include <fcntl.h>
+#include <string.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <errno.h>
+#include <sys/sysinfo.h>
+#include <poll.h>
+#include <assert.h>
+#include <sys/uio.h>
+#include <sys/mman.h>
+#include <linux/mman.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <limits.h>
+#include <sys/time.h>
+
+#include "helpers.h"
+#include "liburing.h"
+#include "liburing/barrier.h"
+#include "../src/syscall.h"
+
+#define IORING_MAX_ENTRIES 4096
+
+int
+expect_failed_submit(struct io_uring *ring, int error)
+{
+	int ret;
+
+	ret = io_uring_submit(ring);
+	if (ret == 1) {
+		printf("expected failure, but io_uring_submit succeeded.\n");
+		return 1;
+	}
+
+	if (errno != error) {
+		printf("expected %d, got %d\n", error, errno);
+		return 1;
+	}
+
+	return 0;
+}
+
+int
+expect_fail(int fd, unsigned int to_submit, unsigned int min_complete,
+	    unsigned int flags, sigset_t *sig, int error)
+{
+	int ret;
+
+	ret = __sys_io_uring_enter(fd, to_submit, min_complete, flags, sig);
+	if (ret != -1) {
+		printf("expected %s, but call succeeded\n", strerror(error));
+		return 1;
+	}
+
+	if (errno != error) {
+		printf("expected %d, got %d\n", error, errno);
+		return 1;
+	}
+
+	return 0;
+}
+
+int
+try_io_uring_enter(int fd, unsigned int to_submit, unsigned int min_complete,
+		   unsigned int flags, sigset_t *sig, int expect, int error)
+{
+	int ret;
+
+	printf("io_uring_enter(%d, %u, %u, %u, %p)\n", fd, to_submit,
+	       min_complete, flags, sig);
+
+	if (expect == -1)
+		return expect_fail(fd, to_submit, min_complete,
+				   flags, sig, error);
+
+	ret = __sys_io_uring_enter(fd, to_submit, min_complete, flags, sig);
+	if (ret != expect) {
+		printf("Expected %d, got %d\n", expect, errno);
+		return 1;
+	}
+
+	return 0;
+}
+
+/*
+ * prep a read I/O.  index is treated like a block number.
+ */
+int
+setup_file(char *template, off_t len)
+{
+	int fd, ret;
+	char buf[4096];
+
+	fd = mkstemp(template);
+	if (fd < 0) {
+		perror("mkstemp");
+		exit(1);
+	}
+	ret = ftruncate(fd, len);
+	if (ret < 0) {
+		perror("ftruncate");
+		exit(1);
+	}
+
+	ret = read(fd, buf, 4096);
+	if (ret != 4096) {
+		printf("read returned %d, expected 4096\n", ret);
+		exit(1);
+	}
+
+	return fd;
+}
+
+void
+io_prep_read(struct io_uring_sqe *sqe, int fd, off_t offset, size_t len)
+{
+	struct iovec *iov;
+
+	iov = t_malloc(sizeof(*iov));
+	assert(iov);
+
+	iov->iov_base = t_malloc(len);
+	assert(iov->iov_base);
+	iov->iov_len = len;
+
+	io_uring_prep_readv(sqe, fd, iov, 1, offset);
+	io_uring_sqe_set_data(sqe, iov); // free on completion
+}
+
+void
+reap_events(struct io_uring *ring, unsigned nr)
+{
+	int ret;
+	unsigned left = nr;
+	struct io_uring_cqe *cqe;
+	struct iovec *iov;
+	struct timeval start, now, elapsed;
+
+	printf("Reaping %u I/Os\n", nr);
+	gettimeofday(&start, NULL);
+	while (left) {
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret < 0) {
+			printf("io_uring_wait_cqe returned %d\n", ret);
+			printf("expected success\n");
+			exit(1);
+		}
+		if (cqe->res != 4096)
+			printf("cqe->res: %d, expected 4096\n", cqe->res);
+		iov = io_uring_cqe_get_data(cqe);
+		free(iov->iov_base);
+		free(iov);
+		left--;
+		io_uring_cqe_seen(ring, cqe);
+
+		gettimeofday(&now, NULL);
+		timersub(&now, &start, &elapsed);
+		if (elapsed.tv_sec > 10) {
+			printf("Timed out waiting for I/Os to complete.\n");
+			printf("%u expected, %u completed\n", nr, left);
+			break;
+		}
+	}
+}
+
+void
+submit_io(struct io_uring *ring, unsigned nr)
+{
+	int fd, ret;
+	off_t file_len;
+	unsigned i;
+	static char template[32] = "/tmp/io_uring_enter-test.XXXXXX";
+	struct io_uring_sqe *sqe;
+
+	printf("Allocating %u sqes\n", nr);
+	file_len = nr * 4096;
+	fd = setup_file(template, file_len);
+	for (i = 0; i < nr; i++) {
+		/* allocate an sqe */
+		sqe = io_uring_get_sqe(ring);
+		/* fill it in */
+		io_prep_read(sqe, fd, i * 4096, 4096);
+	}
+
+	/* submit the I/Os */
+	printf("Submitting %u I/Os\n", nr);
+	ret = io_uring_submit(ring);
+	unlink(template);
+	if (ret < 0) {
+		perror("io_uring_enter");
+		exit(1);
+	}
+	printf("Done\n");
+}
+
+int
+main(int argc, char **argv)
+{
+	int ret;
+	unsigned int status = 0;
+	struct io_uring ring;
+	struct io_uring_sq *sq = &ring.sq;
+	unsigned ktail, mask, index;
+	unsigned sq_entries;
+	unsigned completed, dropped;
+
+	if (argc > 1)
+		return 0;
+
+	ret = io_uring_queue_init(IORING_MAX_ENTRIES, &ring, 0);
+	if (ret < 0) {
+		perror("io_uring_queue_init");
+		exit(1);
+	}
+	mask = *sq->kring_mask;
+
+	/* invalid flags */
+	status |= try_io_uring_enter(ring.ring_fd, 1, 0, ~0U, NULL, -1, EINVAL);
+
+	/* invalid fd, EBADF */
+	status |= try_io_uring_enter(-1, 0, 0, 0, NULL, -1, EBADF);
+
+	/* valid, non-ring fd, EOPNOTSUPP */
+	status |= try_io_uring_enter(0, 0, 0, 0, NULL, -1, EOPNOTSUPP);
+
+	/* to_submit: 0, flags: 0;  should get back 0. */
+	status |= try_io_uring_enter(ring.ring_fd, 1, 0, 0, NULL, 0, 0);
+
+	/* fill the sq ring */
+	sq_entries = *ring.sq.kring_entries;
+	submit_io(&ring, sq_entries);
+	printf("Waiting for %u events\n", sq_entries);
+	ret = __sys_io_uring_enter(ring.ring_fd, 0, sq_entries,
+					IORING_ENTER_GETEVENTS, NULL);
+	if (ret < 0) {
+		perror("io_uring_enter");
+		status = 1;
+	} else {
+		/*
+		 * This is a non-IOPOLL ring, which means that io_uring_enter
+		 * should not return until min_complete events are available
+		 * in the completion queue.
+		 */
+		completed = *ring.cq.ktail - *ring.cq.khead;
+		if (completed != sq_entries) {
+			printf("Submitted %u I/Os, but only got %u completions\n",
+			       sq_entries, completed);
+			status = 1;
+		}
+		reap_events(&ring, sq_entries);
+	}
+
+	/*
+	 * Add an invalid index to the submission queue.  This should
+	 * result in the dropped counter increasing.
+	 */
+	printf("Submitting invalid sqe index.\n");
+	index = *sq->kring_entries + 1; // invalid index
+	dropped = *sq->kdropped;
+	ktail = *sq->ktail;
+	sq->array[ktail & mask] = index;
+	++ktail;
+	/*
+	 * Ensure that the kernel sees the SQE update before it sees the tail
+	 * update.
+	 */
+	io_uring_smp_store_release(sq->ktail, ktail);
+
+	ret = __sys_io_uring_enter(ring.ring_fd, 1, 0, 0, NULL);
+	/* now check to see if our sqe was dropped */
+	if (*sq->kdropped == dropped) {
+		printf("dropped counter did not increase\n");
+		status = 1;
+	}
+
+	if (!status) {
+		printf("PASS\n");
+		return 0;
+	}
+
+	printf("FAIL\n");
+	return -1;
+}
diff --git a/test/io_uring_register.c b/test/io_uring_register.c
new file mode 100644
index 0000000..53e3987
--- /dev/null
+++ b/test/io_uring_register.c
@@ -0,0 +1,658 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * io_uring_register.c
+ *
+ * Description: Unit tests for the io_uring_register system call.
+ *
+ * Copyright 2019, Red Hat, Inc.
+ * Author: Jeff Moyer <jmoyer@redhat.com>
+ */
+#include <stdio.h>
+#include <fcntl.h>
+#include <string.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <errno.h>
+#include <sys/sysinfo.h>
+#include <poll.h>
+#include <assert.h>
+#include <sys/uio.h>
+#include <sys/mman.h>
+#include <linux/mman.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <limits.h>
+
+#include "helpers.h"
+#include "liburing.h"
+#include "../src/syscall.h"
+
+static int pagesize;
+static rlim_t mlock_limit;
+static int devnull;
+
+int
+expect_fail(int fd, unsigned int opcode, void *arg,
+	    unsigned int nr_args, int error)
+{
+	int ret;
+
+	printf("io_uring_register(%d, %u, %p, %u)\n",
+	       fd, opcode, arg, nr_args);
+	ret = __sys_io_uring_register(fd, opcode, arg, nr_args);
+	if (ret != -1) {
+		int ret2 = 0;
+
+		printf("expected %s, but call succeeded\n", strerror(error));
+		if (opcode == IORING_REGISTER_BUFFERS) {
+			ret2 = __sys_io_uring_register(fd,
+					IORING_UNREGISTER_BUFFERS, 0, 0);
+		} else if (opcode == IORING_REGISTER_FILES) {
+			ret2 = __sys_io_uring_register(fd,
+					IORING_UNREGISTER_FILES, 0, 0);
+		}
+		if (ret2) {
+			printf("internal error: failed to unregister\n");
+			exit(1);
+		}
+		return 1;
+	}
+
+	if (errno != error) {
+		printf("expected %d, got %d\n", error, errno);
+		return 1;
+	}
+	return 0;
+}
+
+int
+new_io_uring(int entries, struct io_uring_params *p)
+{
+	int fd;
+
+	fd = __sys_io_uring_setup(entries, p);
+	if (fd < 0) {
+		perror("io_uring_setup");
+		exit(1);
+	}
+	return fd;
+}
+
+#define MAXFDS (UINT_MAX * sizeof(int))
+
+void *
+map_filebacked(size_t size)
+{
+	int fd, ret;
+	void *addr;
+	char template[32] = "io_uring_register-test-XXXXXXXX";
+
+	fd = mkstemp(template);
+	if (fd < 0) {
+		perror("mkstemp");
+		return NULL;
+	}
+	unlink(template);
+
+	ret = ftruncate(fd, size);
+	if (ret < 0) {
+		perror("ftruncate");
+		close(fd);
+		return NULL;
+	}
+
+	addr = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
+	if (addr == MAP_FAILED) {
+		perror("mmap");
+		close(fd);
+		return NULL;
+	}
+
+	close(fd);
+	return addr;
+}
+
+/*
+ * NOTE: this is now limited by SCM_MAX_FD (253).  Keep the code for now,
+ * but probably should augment it to test 253 and 254, specifically.
+ */
+int
+test_max_fds(int uring_fd)
+{
+	int status = 1;
+	int ret;
+	void *fd_as; /* file descriptor address space */
+	int fdtable_fd; /* fd for the file that will be mapped over and over */
+	int io_fd; /* the valid fd for I/O -- /dev/null */
+	int *fds; /* used to map the file into the address space */
+	char template[32] = "io_uring_register-test-XXXXXXXX";
+	unsigned long long i, nr_maps, nr_fds;
+
+	/*
+	 * First, mmap anonymous the full size.  That will guarantee the
+	 * mapping will fit in the memory area selected by mmap.  Then,
+	 * over-write that mapping using a file-backed mapping, 128MiB at
+	 * a time using MAP_FIXED.
+	 */
+	fd_as = mmap(NULL, UINT_MAX * sizeof(int), PROT_READ|PROT_WRITE,
+		     MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+	if (fd_as == MAP_FAILED) {
+		if (errno == ENOMEM) {
+			printf("Not enough memory for this test, skipping\n");
+			return 0;
+		}
+		perror("mmap fd_as");
+		exit(1);
+	}
+	printf("allocated %zu bytes of address space\n", UINT_MAX * sizeof(int));
+
+	fdtable_fd = mkstemp(template);
+	if (fdtable_fd < 0) {
+		perror("mkstemp");
+		exit(1);
+	}
+	unlink(template);
+	ret = ftruncate(fdtable_fd, 128*1024*1024);
+	if (ret < 0) {
+		perror("ftruncate");
+		exit(1);
+	}
+
+	io_fd = open("/dev/null", O_RDWR);
+	if (io_fd < 0) {
+		perror("open /dev/null");
+		exit(1);
+	}
+	fds = mmap(fd_as, 128*1024*1024, PROT_READ|PROT_WRITE,
+		   MAP_SHARED|MAP_FIXED, fdtable_fd, 0);
+	if (fds == MAP_FAILED) {
+		perror("mmap fdtable");
+		exit(1);
+	}
+
+	/* fill the fd table */
+	nr_fds = 128*1024*1024 / sizeof(int);
+	for (i = 0; i < nr_fds; i++)
+		fds[i] = io_fd;
+
+	/* map the file through the rest of the address space */
+	nr_maps = (UINT_MAX * sizeof(int)) / (128*1024*1024);
+	for (i = 0; i < nr_maps; i++) {
+		fds = &fds[nr_fds]; /* advance fds by 128MiB */
+		fds = mmap(fds, 128*1024*1024, PROT_READ|PROT_WRITE,
+			   MAP_SHARED|MAP_FIXED, fdtable_fd, 0);
+		if (fds == MAP_FAILED) {
+			printf("mmap failed at offset %lu\n",
+			       (unsigned long)((char *)fd_as - (char *)fds));
+			exit(1);
+		}
+	}
+
+	/* Now fd_as points to the file descriptor array. */
+	/*
+	 * We may not be able to map all of these files.  Let's back off
+	 * until success.
+	 */
+	nr_fds = UINT_MAX;
+	while (nr_fds) {
+		ret = __sys_io_uring_register(uring_fd, IORING_REGISTER_FILES,
+						fd_as, nr_fds);
+		if (ret != 0) {
+			nr_fds /= 2;
+			continue;
+		}
+		printf("io_uring_register(%d, IORING_REGISTER_FILES, %p, %llu)"
+		       "...succeeded\n", uring_fd, fd_as, nr_fds);
+		status = 0;
+		printf("io_uring_register(%d, IORING_UNREGISTER_FILES, 0, 0)...",
+		       uring_fd);
+		ret = __sys_io_uring_register(uring_fd, IORING_UNREGISTER_FILES,
+						0, 0);
+		if (ret < 0) {
+			ret = errno;
+			printf("failed\n");
+			errno = ret;
+			perror("io_uring_register UNREGISTER_FILES");
+			exit(1);
+		}
+		printf("succeeded\n");
+		break;
+	}
+
+	close(io_fd);
+	close(fdtable_fd);
+	ret = munmap(fd_as, UINT_MAX * sizeof(int));
+	if (ret != 0) {
+		printf("munmap(%zu) failed\n", UINT_MAX * sizeof(int));
+		exit(1);
+	}
+
+	return status;
+}
+
+int
+test_memlock_exceeded(int fd)
+{
+	int ret;
+	void *buf;
+	struct iovec iov;
+
+	/* if limit is larger than 2gb, just skip this test */
+	if (mlock_limit >= 2 * 1024 * 1024 * 1024ULL)
+		return 0;
+
+	iov.iov_len = mlock_limit * 2;
+	buf = t_malloc(iov.iov_len);
+	iov.iov_base = buf;
+
+	while (iov.iov_len) {
+		ret = __sys_io_uring_register(fd, IORING_REGISTER_BUFFERS, &iov, 1);
+		if (ret < 0) {
+			if (errno == ENOMEM) {
+				printf("io_uring_register of %zu bytes failed "
+				       "with ENOMEM (expected).\n", iov.iov_len);
+				iov.iov_len /= 2;
+				continue;
+			}
+			printf("expected success or EFAULT, got %d\n", errno);
+			free(buf);
+			return 1;
+		}
+		printf("successfully registered %zu bytes (%d).\n",
+		       iov.iov_len, ret);
+		ret = __sys_io_uring_register(fd, IORING_UNREGISTER_BUFFERS,
+						NULL, 0);
+		if (ret != 0) {
+			printf("error: unregister failed with %d\n", errno);
+			free(buf);
+			return 1;
+		}
+		break;
+	}
+	if (!iov.iov_len)
+		printf("Unable to register buffers.  Check memlock rlimit.\n");
+
+	free(buf);
+	return 0;
+}
+
+int
+test_iovec_nr(int fd)
+{
+	int i, ret, status = 0;
+	unsigned int nr = 1000000;
+	struct iovec *iovs;
+	void *buf;
+
+	iovs = malloc(nr * sizeof(struct iovec));
+	if (!iovs) {
+		fprintf(stdout, "can't allocate iovecs, skip\n");
+		return 0;
+	}
+	buf = t_malloc(pagesize);
+
+	for (i = 0; i < nr; i++) {
+		iovs[i].iov_base = buf;
+		iovs[i].iov_len = pagesize;
+	}
+
+	status |= expect_fail(fd, IORING_REGISTER_BUFFERS, iovs, nr, EINVAL);
+
+	/* reduce to UIO_MAXIOV */
+	nr = UIO_MAXIOV;
+	printf("io_uring_register(%d, %u, %p, %u)\n",
+	       fd, IORING_REGISTER_BUFFERS, iovs, nr);
+	ret = __sys_io_uring_register(fd, IORING_REGISTER_BUFFERS, iovs, nr);
+	if (ret != 0) {
+		printf("expected success, got %d\n", errno);
+		status = 1;
+	} else
+		__sys_io_uring_register(fd, IORING_UNREGISTER_BUFFERS, 0, 0);
+
+	free(buf);
+	free(iovs);
+	return status;
+}
+
+/*
+ * io_uring limit is 1G.  iov_len limit is ~OUL, I think
+ */
+int
+test_iovec_size(int fd)
+{
+	unsigned int status = 0;
+	int ret;
+	struct iovec iov;
+	void *buf;
+
+	/* NULL pointer for base */
+	iov.iov_base = 0;
+	iov.iov_len = 4096;
+	status |= expect_fail(fd, IORING_REGISTER_BUFFERS, &iov, 1, EFAULT);
+
+	/* valid base, 0 length */
+	iov.iov_base = &buf;
+	iov.iov_len = 0;
+	status |= expect_fail(fd, IORING_REGISTER_BUFFERS, &iov, 1, EFAULT);
+
+	/* valid base, length exceeds size */
+	/* this requires an unampped page directly after buf */
+	buf = mmap(NULL, 2 * pagesize, PROT_READ|PROT_WRITE,
+		   MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+	assert(buf != MAP_FAILED);
+	ret = munmap(buf + pagesize, pagesize);
+	assert(ret == 0);
+	iov.iov_base = buf;
+	iov.iov_len = 2 * pagesize;
+	status |= expect_fail(fd, IORING_REGISTER_BUFFERS, &iov, 1, EFAULT);
+	munmap(buf, pagesize);
+
+	/* huge page */
+	buf = mmap(NULL, 2*1024*1024, PROT_READ|PROT_WRITE,
+		   MAP_PRIVATE | MAP_HUGETLB | MAP_HUGE_2MB | MAP_ANONYMOUS,
+		   -1, 0);
+	if (buf == MAP_FAILED) {
+		printf("Unable to map a huge page.  Try increasing "
+		       "/proc/sys/vm/nr_hugepages by at least 1.\n");
+		printf("Skipping the hugepage test\n");
+	} else {
+		/*
+		 * This should succeed, so long as RLIMIT_MEMLOCK is
+		 * not exceeded
+		 */
+		iov.iov_base = buf;
+		iov.iov_len = 2*1024*1024;
+		ret = __sys_io_uring_register(fd, IORING_REGISTER_BUFFERS, &iov, 1);
+		if (ret < 0) {
+			if (errno == ENOMEM)
+				printf("Unable to test registering of a huge "
+				       "page.  Try increasing the "
+				       "RLIMIT_MEMLOCK resource limit by at "
+				       "least 2MB.");
+			else {
+				printf("expected success, got %d\n", errno);
+				status = 1;
+			}
+		} else {
+			printf("Success!\n");
+			ret = __sys_io_uring_register(fd,
+					IORING_UNREGISTER_BUFFERS, 0, 0);
+			if (ret < 0) {
+				perror("io_uring_unregister");
+				status = 1;
+			}
+		}
+	}
+	ret = munmap(iov.iov_base, iov.iov_len);
+	assert(ret == 0);
+
+	/* file-backed buffers -- not supported */
+	buf = map_filebacked(2*1024*1024);
+	if (!buf)
+		status = 1;
+	iov.iov_base = buf;
+	iov.iov_len = 2*1024*1024;
+	printf("reserve file-backed buffers\n");
+	status |= expect_fail(fd, IORING_REGISTER_BUFFERS, &iov, 1, EOPNOTSUPP);
+	munmap(buf, 2*1024*1024);
+
+	/* bump up against the soft limit and make sure we get EFAULT
+	 * or whatever we're supposed to get.  NOTE: this requires
+	 * running the test as non-root. */
+	if (getuid() != 0)
+		status |= test_memlock_exceeded(fd);
+
+	return status;
+}
+
+void
+dump_sqe(struct io_uring_sqe *sqe)
+{
+	printf("\topcode: %d\n", sqe->opcode);
+	printf("\tflags:  0x%.8x\n", sqe->flags);
+	printf("\tfd:     %d\n", sqe->fd);
+	if (sqe->opcode == IORING_OP_POLL_ADD)
+		printf("\tpoll_events: 0x%.8x\n", sqe->poll_events);
+}
+
+int
+ioring_poll(struct io_uring *ring, int fd, int fixed)
+{
+	int ret;
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+
+	sqe = io_uring_get_sqe(ring);
+	memset(sqe, 0, sizeof(*sqe));
+	sqe->opcode = IORING_OP_POLL_ADD;
+	if (fixed)
+		sqe->flags = IOSQE_FIXED_FILE;
+	sqe->fd = fd;
+	sqe->poll_events = POLLIN|POLLOUT;
+
+	printf("io_uring_submit:\n");
+	dump_sqe(sqe);
+	ret = io_uring_submit(ring);
+	if (ret != 1) {
+		printf("failed to submit poll sqe: %d.\n", errno);
+		return 1;
+	}
+
+	ret = io_uring_wait_cqe(ring, &cqe);
+	if (ret < 0) {
+		printf("io_uring_wait_cqe failed with %d\n", ret);
+		return 1;
+	}
+	ret = 0;
+	if (cqe->res != POLLOUT) {
+		printf("io_uring_wait_cqe: expected 0x%.8x, got 0x%.8x\n",
+		       POLLOUT, cqe->res);
+		ret = 1;
+	}
+
+	io_uring_cqe_seen(ring, cqe);
+	return ret;
+}
+
+int
+test_poll_ringfd(void)
+{
+	int status = 0;
+	int ret;
+	int fd;
+	struct io_uring ring;
+
+	ret = io_uring_queue_init(1, &ring, 0);
+	if (ret) {
+		perror("io_uring_queue_init");
+		return 1;
+	}
+	fd = ring.ring_fd;
+
+	/* try polling the ring fd */
+	status = ioring_poll(&ring, fd, 0);
+
+	/*
+	 * now register the ring fd, and try the poll again.  This should
+	 * fail, because the kernel does not allow registering of the
+	 * ring_fd.
+	 */
+	status |= expect_fail(fd, IORING_REGISTER_FILES, &fd, 1, EBADF);
+
+	/* tear down queue */
+	io_uring_queue_exit(&ring);
+
+	return status;
+}
+
+static int test_shmem(void)
+{
+	const char pattern = 0xEA;
+	const int len = 4096;
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	struct io_uring ring;
+	struct iovec iov;
+	int memfd, ret, i;
+	char *mem;
+	int pipefd[2] = {-1, -1};
+
+	ret = io_uring_queue_init(8, &ring, 0);
+	if (ret)
+		return 1;
+
+	if (pipe(pipefd)) {
+		perror("pipe");
+		return 1;
+	}
+	memfd = memfd_create("uring-shmem-test", 0);
+	if (memfd < 0) {
+		fprintf(stderr, "memfd_create() failed %i\n", -errno);
+		return 1;
+	}
+	if (ftruncate(memfd, len)) {
+		fprintf(stderr, "can't truncate memfd\n");
+		return 1;
+	}
+	mem = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, memfd, 0);
+	if (!mem) {
+		fprintf(stderr, "mmap failed\n");
+		return 1;
+	}
+	for (i = 0; i < len; i++)
+		mem[i] = pattern;
+
+	iov.iov_base = mem;
+	iov.iov_len = len;
+	ret = io_uring_register_buffers(&ring, &iov, 1);
+	if (ret) {
+		if (ret == -EOPNOTSUPP) {
+			fprintf(stdout, "memfd registration isn't supported, "
+					"skip\n");
+			goto out;
+		}
+
+		fprintf(stderr, "buffer reg failed: %d\n", ret);
+		return 1;
+	}
+
+	/* check that we can read and write from/to shmem reg buffer */
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_write_fixed(sqe, pipefd[1], mem, 512, 0, 0);
+	sqe->user_data = 1;
+
+	ret = io_uring_submit(&ring);
+	if (ret != 1) {
+		fprintf(stderr, "submit write failed\n");
+		return 1;
+	}
+	ret = io_uring_wait_cqe(&ring, &cqe);
+	if (ret < 0 || cqe->user_data != 1 || cqe->res != 512) {
+		fprintf(stderr, "reading from shmem failed\n");
+		return 1;
+	}
+	io_uring_cqe_seen(&ring, cqe);
+
+	/* clean it, should be populated with the pattern back from the pipe */
+	memset(mem, 0, 512);
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_read_fixed(sqe, pipefd[0], mem, 512, 0, 0);
+	sqe->user_data = 2;
+
+	ret = io_uring_submit(&ring);
+	if (ret != 1) {
+		fprintf(stderr, "submit write failed\n");
+		return 1;
+	}
+	ret = io_uring_wait_cqe(&ring, &cqe);
+	if (ret < 0 || cqe->user_data != 2 || cqe->res != 512) {
+		fprintf(stderr, "reading from shmem failed\n");
+		return 1;
+	}
+	io_uring_cqe_seen(&ring, cqe);
+
+	for (i = 0; i < 512; i++) {
+		if (mem[i] != pattern) {
+			fprintf(stderr, "data integrity fail\n");
+			return 1;
+		}
+	}
+
+	ret = io_uring_unregister_buffers(&ring);
+	if (ret) {
+		fprintf(stderr, "buffer unreg failed: %d\n", ret);
+		return 1;
+	}
+out:
+	io_uring_queue_exit(&ring);
+	close(pipefd[0]);
+	close(pipefd[1]);
+	munmap(mem, len);
+	close(memfd);
+	return 0;
+}
+
+int
+main(int argc, char **argv)
+{
+	int fd, ret;
+	unsigned int status = 0;
+	struct io_uring_params p;
+	struct rlimit rlim;
+
+	if (argc > 1)
+		return 0;
+
+	/* setup globals */
+	pagesize = getpagesize();
+	ret = getrlimit(RLIMIT_MEMLOCK, &rlim);
+	if (ret < 0) {
+		perror("getrlimit");
+		return 1;
+	}
+	mlock_limit = rlim.rlim_cur;
+	printf("RELIMIT_MEMLOCK: %lu (%lu)\n", rlim.rlim_cur, rlim.rlim_max);
+	devnull = open("/dev/null", O_RDWR);
+	if (devnull < 0) {
+		perror("open /dev/null");
+		exit(1);
+	}
+
+	/* invalid fd */
+	status |= expect_fail(-1, 0, NULL, 0, EBADF);
+	/* valid fd that is not an io_uring fd */
+	status |= expect_fail(devnull, 0, NULL, 0, EOPNOTSUPP);
+
+	/* invalid opcode */
+	memset(&p, 0, sizeof(p));
+	fd = new_io_uring(1, &p);
+	ret = expect_fail(fd, ~0U, NULL, 0, EINVAL);
+	if (ret) {
+		/* if this succeeds, tear down the io_uring instance
+		 * and start clean for the next test. */
+		close(fd);
+		fd = new_io_uring(1, &p);
+	}
+
+	/* IORING_REGISTER_BUFFERS */
+	status |= test_iovec_size(fd);
+	status |= test_iovec_nr(fd);
+	/* IORING_REGISTER_FILES */
+	status |= test_max_fds(fd);
+	close(fd);
+	/* uring poll on the uring fd */
+	status |= test_poll_ringfd();
+
+	if (!status)
+		printf("PASS\n");
+	else
+		printf("FAIL\n");
+
+	ret = test_shmem();
+	if (ret) {
+		fprintf(stderr, "test_shmem() failed\n");
+		status |= 1;
+	}
+
+	return status;
+}
diff --git a/test/io_uring_setup.c b/test/io_uring_setup.c
new file mode 100644
index 0000000..a0709a7
--- /dev/null
+++ b/test/io_uring_setup.c
@@ -0,0 +1,192 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * io_uring_setup.c
+ *
+ * Description: Unit tests for the io_uring_setup system call.
+ *
+ * Copyright 2019, Red Hat, Inc.
+ * Author: Jeff Moyer <jmoyer@redhat.com>
+ */
+#include <stdio.h>
+#include <fcntl.h>
+#include <string.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <errno.h>
+#include <sys/sysinfo.h>
+#include "liburing.h"
+
+#include "../syscall.h"
+
+char *features_string(struct io_uring_params *p)
+{
+	static char flagstr[64];
+
+	if (!p || !p->features)
+		return "none";
+
+	if (p->features & ~IORING_FEAT_SINGLE_MMAP) {
+		snprintf(flagstr, 64, "0x%.8x", p->features);
+		return flagstr;
+	}
+
+	if (p->features & IORING_FEAT_SINGLE_MMAP)
+		strncat(flagstr, "IORING_FEAT_SINGLE_MMAP", 64 - strlen(flagstr));
+
+	return flagstr;
+}
+
+/*
+ * Attempt the call with the given args.  Return 0 when expect matches
+ * the return value of the system call, 1 otherwise.
+ */
+char *
+flags_string(struct io_uring_params *p)
+{
+	static char flagstr[64];
+	int add_pipe = 0;
+
+	memset(flagstr, 0, sizeof(flagstr));
+
+	if (!p || p->flags == 0)
+		return "none";
+
+	/*
+	 * If unsupported flags are present, just print the bitmask.
+	 */
+	if (p->flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
+			 IORING_SETUP_SQ_AFF)) {
+		snprintf(flagstr, 64, "0x%.8x", p->flags);
+		return flagstr;
+	}
+
+	if (p->flags & IORING_SETUP_IOPOLL) {
+		strncat(flagstr, "IORING_SETUP_IOPOLL", 64 - strlen(flagstr));
+		add_pipe = 1;
+	}
+	if (p->flags & IORING_SETUP_SQPOLL) {
+		if (add_pipe)
+			strncat(flagstr, "|", 64 - strlen(flagstr));
+		else
+			add_pipe = 1;
+		strncat(flagstr, "IORING_SETUP_SQPOLL", 64 - strlen(flagstr));
+	}
+	if (p->flags & IORING_SETUP_SQ_AFF) {
+		if (add_pipe)
+			strncat(flagstr, "|", 64 - strlen(flagstr));
+		strncat(flagstr, "IORING_SETUP_SQ_AFF", 64 - strlen(flagstr));
+	}
+
+	return flagstr;
+}
+
+char *
+dump_resv(struct io_uring_params *p)
+{
+	static char resvstr[4096];
+
+	if (!p)
+		return "";
+
+	sprintf(resvstr, "0x%.8x 0x%.8x 0x%.8x", p->resv[0],
+		p->resv[1], p->resv[2]);
+
+	return resvstr;
+}
+
+/* bogus: setup returns a valid fd on success... expect can't predict the
+   fd we'll get, so this really only takes 1 parameter: error */
+int
+try_io_uring_setup(unsigned entries, struct io_uring_params *p, int expect, int error)
+{
+	int ret, __errno;
+
+	printf("io_uring_setup(%u, %p), flags: %s, feat: %s, resv: %s, sq_thread_cpu: %u\n",
+	       entries, p, flags_string(p), features_string(p), dump_resv(p),
+	       p ? p->sq_thread_cpu : 0);
+
+	ret = __sys_io_uring_setup(entries, p);
+	if (ret != expect) {
+		printf("expected %d, got %d\n", expect, ret);
+		/* if we got a valid uring, close it */
+		if (ret > 0)
+			close(ret);
+		return 1;
+	}
+	__errno = errno;
+	if (expect == -1 && error != __errno) {
+		if (__errno == EPERM && geteuid() != 0) {
+			printf("Needs root, not flagging as an error\n");
+			return 0;
+		}
+		printf("expected errno %d, got %d\n", error, __errno);
+		return 1;
+	}
+
+	return 0;
+}
+
+int
+main(int argc, char **argv)
+{
+	int fd;
+	unsigned int status = 0;
+	struct io_uring_params p;
+
+	if (argc > 1)
+		return 0;
+
+	memset(&p, 0, sizeof(p));
+	status |= try_io_uring_setup(0, &p, -1, EINVAL);
+	status |= try_io_uring_setup(1, NULL, -1, EFAULT);
+
+	/* resv array is non-zero */
+	memset(&p, 0, sizeof(p));
+	p.resv[0] = p.resv[1] = p.resv[2] = 1;
+	status |= try_io_uring_setup(1, &p, -1, EINVAL);
+
+	/* invalid flags */
+	memset(&p, 0, sizeof(p));
+	p.flags = ~0U;
+	status |= try_io_uring_setup(1, &p, -1, EINVAL);
+
+	/* IORING_SETUP_SQ_AFF set but not IORING_SETUP_SQPOLL */
+	memset(&p, 0, sizeof(p));
+	p.flags = IORING_SETUP_SQ_AFF;
+	status |= try_io_uring_setup(1, &p, -1, EINVAL);
+
+	/* attempt to bind to invalid cpu */
+	memset(&p, 0, sizeof(p));
+	p.flags = IORING_SETUP_SQPOLL | IORING_SETUP_SQ_AFF;
+	p.sq_thread_cpu = get_nprocs_conf();
+	status |= try_io_uring_setup(1, &p, -1, EINVAL);
+
+	/* I think we can limit a process to a set of cpus.  I assume
+	 * we shouldn't be able to setup a kernel thread outside of that.
+	 * try to do that. (task->cpus_allowed) */
+
+	/* read/write on io_uring_fd */
+	memset(&p, 0, sizeof(p));
+	fd = __sys_io_uring_setup(1, &p);
+	if (fd < 0) {
+		printf("io_uring_setup failed with %d, expected success\n",
+		       errno);
+		status = 1;
+	} else {
+		char buf[4096];
+		int ret;
+		ret = read(fd, buf, 4096);
+		if (ret >= 0) {
+			printf("read from io_uring fd succeeded.  expected fail\n");
+			status = 1;
+		}
+	}
+
+	if (!status) {
+		printf("PASS\n");
+		return 0;
+	}
+
+	printf("FAIL\n");
+	return -1;
+}
diff --git a/test/iopoll.c b/test/iopoll.c
new file mode 100644
index 0000000..3d94dfe
--- /dev/null
+++ b/test/iopoll.c
@@ -0,0 +1,374 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: basic read/write tests with polled IO
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <sys/poll.h>
+#include <sys/eventfd.h>
+#include <sys/resource.h>
+#include "helpers.h"
+#include "liburing.h"
+#include "../src/syscall.h"
+
+#define FILE_SIZE	(128 * 1024)
+#define BS		4096
+#define BUFFERS		(FILE_SIZE / BS)
+
+static struct iovec *vecs;
+static int no_buf_select;
+static int no_iopoll;
+
+static int provide_buffers(struct io_uring *ring)
+{
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	int ret, i;
+
+	for (i = 0; i < BUFFERS; i++) {
+		sqe = io_uring_get_sqe(ring);
+		io_uring_prep_provide_buffers(sqe, vecs[i].iov_base,
+						vecs[i].iov_len, 1, 1, i);
+	}
+
+	ret = io_uring_submit(ring);
+	if (ret != BUFFERS) {
+		fprintf(stderr, "submit: %d\n", ret);
+		return 1;
+	}
+
+	for (i = 0; i < BUFFERS; i++) {
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (cqe->res < 0) {
+			fprintf(stderr, "cqe->res=%d\n", cqe->res);
+			return 1;
+		}
+		io_uring_cqe_seen(ring, cqe);
+	}
+
+	return 0;
+}
+
+static int __test_io(const char *file, struct io_uring *ring, int write, int sqthread,
+		     int fixed, int buf_select)
+{
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	int open_flags;
+	int i, fd, ret;
+	off_t offset;
+
+	if (buf_select && write)
+		write = 0;
+	if (buf_select && fixed)
+		fixed = 0;
+
+	if (buf_select && provide_buffers(ring))
+		return 1;
+
+	if (write)
+		open_flags = O_WRONLY;
+	else
+		open_flags = O_RDONLY;
+	open_flags |= O_DIRECT;
+
+	fd = open(file, open_flags);
+	if (fd < 0) {
+		perror("file open");
+		goto err;
+	}
+
+	if (fixed) {
+		ret = io_uring_register_buffers(ring, vecs, BUFFERS);
+		if (ret) {
+			fprintf(stderr, "buffer reg failed: %d\n", ret);
+			goto err;
+		}
+	}
+	if (sqthread) {
+		ret = io_uring_register_files(ring, &fd, 1);
+		if (ret) {
+			fprintf(stderr, "file reg failed: %d\n", ret);
+			goto err;
+		}
+	}
+
+	offset = 0;
+	for (i = 0; i < BUFFERS; i++) {
+		sqe = io_uring_get_sqe(ring);
+		if (!sqe) {
+			fprintf(stderr, "sqe get failed\n");
+			goto err;
+		}
+		offset = BS * (rand() % BUFFERS);
+		if (write) {
+			int do_fixed = fixed;
+			int use_fd = fd;
+
+			if (sqthread)
+				use_fd = 0;
+			if (fixed && (i & 1))
+				do_fixed = 0;
+			if (do_fixed) {
+				io_uring_prep_write_fixed(sqe, use_fd, vecs[i].iov_base,
+								vecs[i].iov_len,
+								offset, i);
+			} else {
+				io_uring_prep_writev(sqe, use_fd, &vecs[i], 1,
+								offset);
+			}
+		} else {
+			int do_fixed = fixed;
+			int use_fd = fd;
+
+			if (sqthread)
+				use_fd = 0;
+			if (fixed && (i & 1))
+				do_fixed = 0;
+			if (do_fixed) {
+				io_uring_prep_read_fixed(sqe, use_fd, vecs[i].iov_base,
+								vecs[i].iov_len,
+								offset, i);
+			} else {
+				io_uring_prep_readv(sqe, use_fd, &vecs[i], 1,
+								offset);
+			}
+
+		}
+		if (sqthread)
+			sqe->flags |= IOSQE_FIXED_FILE;
+		if (buf_select) {
+			sqe->flags |= IOSQE_BUFFER_SELECT;
+			sqe->buf_group = buf_select;
+			sqe->user_data = i;
+		}
+	}
+
+	ret = io_uring_submit(ring);
+	if (ret != BUFFERS) {
+		fprintf(stderr, "submit got %d, wanted %d\n", ret, BUFFERS);
+		goto err;
+	}
+
+	for (i = 0; i < BUFFERS; i++) {
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret) {
+			fprintf(stderr, "wait_cqe=%d\n", ret);
+			goto err;
+		} else if (cqe->res == -EOPNOTSUPP) {
+			fprintf(stdout, "File/device/fs doesn't support polled IO\n");
+			no_iopoll = 1;
+			goto out;
+		} else if (cqe->res != BS) {
+			fprintf(stderr, "cqe res %d, wanted %d\n", cqe->res, BS);
+			goto err;
+		}
+		io_uring_cqe_seen(ring, cqe);
+	}
+
+	if (fixed) {
+		ret = io_uring_unregister_buffers(ring);
+		if (ret) {
+			fprintf(stderr, "buffer unreg failed: %d\n", ret);
+			goto err;
+		}
+	}
+	if (sqthread) {
+		ret = io_uring_unregister_files(ring);
+		if (ret) {
+			fprintf(stderr, "file unreg failed: %d\n", ret);
+			goto err;
+		}
+	}
+
+out:
+	close(fd);
+	return 0;
+err:
+	if (fd != -1)
+		close(fd);
+	return 1;
+}
+
+extern int __io_uring_flush_sq(struct io_uring *ring);
+
+/*
+ * if we are polling io_uring_submit needs to always enter the
+ * kernel to fetch events
+ */
+static int test_io_uring_submit_enters(const char *file)
+{
+	struct io_uring ring;
+	int fd, i, ret, ring_flags, open_flags;
+	unsigned head;
+	struct io_uring_cqe *cqe;
+
+	if (no_iopoll)
+		return 0;
+
+	ring_flags = IORING_SETUP_IOPOLL;
+	ret = io_uring_queue_init(64, &ring, ring_flags);
+	if (ret) {
+		fprintf(stderr, "ring create failed: %d\n", ret);
+		return 1;
+	}
+
+	open_flags = O_WRONLY | O_DIRECT;
+	fd = open(file, open_flags);
+	if (fd < 0) {
+		perror("file open");
+		goto err;
+	}
+
+	for (i = 0; i < BUFFERS; i++) {
+		struct io_uring_sqe *sqe;
+		off_t offset = BS * (rand() % BUFFERS);
+
+		sqe = io_uring_get_sqe(&ring);
+		io_uring_prep_writev(sqe, fd, &vecs[i], 1, offset);
+		sqe->user_data = 1;
+	}
+
+	/* submit manually to avoid adding IORING_ENTER_GETEVENTS */
+	ret = __sys_io_uring_enter(ring.ring_fd, __io_uring_flush_sq(&ring), 0,
+						0, NULL);
+	if (ret < 0)
+		goto err;
+
+	for (i = 0; i < 500; i++) {
+		ret = io_uring_submit(&ring);
+		if (ret != 0) {
+			fprintf(stderr, "still had %d sqes to submit, this is unexpected", ret);
+			goto err;
+		}
+
+		io_uring_for_each_cqe(&ring, head, cqe) {
+			/* runs after test_io so should not have happened */
+			if (cqe->res == -EOPNOTSUPP) {
+				fprintf(stdout, "File/device/fs doesn't support polled IO\n");
+				goto err;
+			}
+			goto ok;
+		}
+		usleep(10000);
+	}
+err:
+	ret = 1;
+	if (fd != -1)
+		close(fd);
+
+ok:
+	io_uring_queue_exit(&ring);
+	return ret;
+}
+
+static int test_io(const char *file, int write, int sqthread, int fixed,
+		   int buf_select)
+{
+	struct io_uring ring;
+	int ret, ring_flags;
+
+	if (no_iopoll)
+		return 0;
+
+	ring_flags = IORING_SETUP_IOPOLL;
+	if (sqthread) {
+		static int warned;
+
+		if (geteuid()) {
+			if (!warned)
+				fprintf(stdout, "SQPOLL requires root, skipping\n");
+			warned = 1;
+			return 0;
+		}
+	}
+
+	ret = io_uring_queue_init(64, &ring, ring_flags);
+	if (ret) {
+		fprintf(stderr, "ring create failed: %d\n", ret);
+		return 1;
+	}
+
+	ret = __test_io(file, &ring, write, sqthread, fixed, buf_select);
+
+	io_uring_queue_exit(&ring);
+	return ret;
+}
+
+static int probe_buf_select(void)
+{
+	struct io_uring_probe *p;
+	struct io_uring ring;
+	int ret;
+
+	ret = io_uring_queue_init(1, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "ring create failed: %d\n", ret);
+		return 1;
+	}
+
+	p = io_uring_get_probe_ring(&ring);
+	if (!p || !io_uring_opcode_supported(p, IORING_OP_PROVIDE_BUFFERS)) {
+		no_buf_select = 1;
+		fprintf(stdout, "Buffer select not supported, skipping\n");
+		return 0;
+	}
+	free(p);
+	return 0;
+}
+
+int main(int argc, char *argv[])
+{
+	int i, ret, nr;
+	char *fname;
+
+	if (probe_buf_select())
+		return 1;
+
+	if (argc > 1) {
+		fname = argv[1];
+	} else {
+		fname = ".iopoll-rw";
+		t_create_file(fname, FILE_SIZE);
+	}
+
+	vecs = t_create_buffers(BUFFERS, BS);
+
+	nr = 16;
+	if (no_buf_select)
+		nr = 8;
+	for (i = 0; i < nr; i++) {
+		int v1, v2, v3, v4;
+
+		v1 = (i & 1) != 0;
+		v2 = (i & 2) != 0;
+		v3 = (i & 4) != 0;
+		v4 = (i & 8) != 0;
+		ret = test_io(fname, v1, v2, v3, v4);
+		if (ret) {
+			fprintf(stderr, "test_io failed %d/%d/%d/%d\n", v1, v2, v3, v4);
+			goto err;
+		}
+		if (no_iopoll)
+			break;
+	}
+
+	ret = test_io_uring_submit_enters(fname);
+	if (ret) {
+	    fprintf(stderr, "test_io_uring_submit_enters failed\n");
+	    goto err;
+	}
+
+	if (fname != argv[1])
+		unlink(fname);
+	return 0;
+err:
+	if (fname != argv[1])
+		unlink(fname);
+	return 1;
+}
diff --git a/test/lfs-openat-write.c b/test/lfs-openat-write.c
new file mode 100644
index 0000000..ac35e1b
--- /dev/null
+++ b/test/lfs-openat-write.c
@@ -0,0 +1,117 @@
+#define _LARGEFILE_SOURCE
+#define _FILE_OFFSET_BITS 64
+
+#include <liburing.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <sys/resource.h>
+#include <unistd.h>
+
+static const int RSIZE = 2;
+static const int OPEN_FLAGS = O_RDWR | O_CREAT;
+static const mode_t OPEN_MODE = S_IRUSR | S_IWUSR;
+
+#define DIE(...) do {\
+		fprintf(stderr, __VA_ARGS__);\
+		abort();\
+	} while(0);
+
+static int do_write(struct io_uring *ring, int fd, off_t offset)
+{
+	char buf[] = "some test write buf";
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	int res, ret;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "failed to get sqe\n");
+		return 1;
+	}
+	io_uring_prep_write(sqe, fd, buf, sizeof(buf), offset);
+
+	ret = io_uring_submit(ring);
+	if (ret < 0) {
+		fprintf(stderr, "failed to submit write: %s\n", strerror(-ret));
+		return 1;
+	}
+
+	ret = io_uring_wait_cqe(ring, &cqe);
+	if (ret < 0) {
+		fprintf(stderr, "wait_cqe failed: %s\n", strerror(-ret));
+		return 1;
+	}
+
+	res = cqe->res;
+	io_uring_cqe_seen(ring, cqe);
+	if (res < 0) {
+		fprintf(stderr, "write failed: %s\n", strerror(-res));
+		return 1;
+	}
+
+	return 0;
+}
+
+static int test_open_write(struct io_uring *ring, int dfd, const char *fn)
+{
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	int ret, fd = -1;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "failed to get sqe\n");
+		return 1;
+	}
+	io_uring_prep_openat(sqe, dfd, fn, OPEN_FLAGS, OPEN_MODE);
+
+	ret = io_uring_submit(ring);
+	if (ret < 0) {
+		fprintf(stderr, "failed to submit openat: %s\n", strerror(-ret));
+		return 1;
+	}
+
+	ret = io_uring_wait_cqe(ring, &cqe);
+	if (ret < 0) {
+		fprintf(stderr, "wait_cqe failed: %s\n", strerror(-ret));
+		return 1;
+	}
+
+	fd = cqe->res;
+	io_uring_cqe_seen(ring, cqe);
+	if (fd < 0) {
+		fprintf(stderr, "openat failed: %s\n", strerror(-fd));
+		return 1;
+	}
+
+	return do_write(ring, fd, 1ULL << 32);
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring;
+	int dfd, ret;
+
+	if (argc > 1)
+		return 0;
+
+	dfd = open("/tmp", O_RDONLY | O_DIRECTORY);
+	if (dfd < 0)
+		DIE("open /tmp: %s\n", strerror(errno));
+
+	ret = io_uring_queue_init(RSIZE, &ring, 0);
+	if (ret < 0)
+		DIE("failed to init io_uring: %s\n", strerror(-ret));
+
+	ret = test_open_write(&ring, dfd, "io_uring_openat_write_test1");
+
+	io_uring_queue_exit(&ring);
+	close(dfd);
+	unlink("/tmp/io_uring_openat_write_test1");
+	return ret;
+}
diff --git a/test/lfs-openat.c b/test/lfs-openat.c
new file mode 100644
index 0000000..b14238a
--- /dev/null
+++ b/test/lfs-openat.c
@@ -0,0 +1,273 @@
+#define _LARGEFILE_SOURCE
+#define _FILE_OFFSET_BITS 64
+
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <sys/resource.h>
+#include <unistd.h>
+
+#include "liburing.h"
+
+#define DIE(...) do {\
+		fprintf(stderr, __VA_ARGS__);\
+		abort();\
+	} while(0);
+
+static const int RSIZE = 2;
+static const int OPEN_FLAGS = O_RDWR | O_CREAT;
+static const mode_t OPEN_MODE = S_IRUSR | S_IWUSR;
+
+static int open_io_uring(struct io_uring *ring, int dfd, const char *fn)
+{
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	int ret, fd;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "failed to get sqe\n");
+		return 1;
+	}
+	io_uring_prep_openat(sqe, dfd, fn, OPEN_FLAGS, OPEN_MODE);
+
+	ret = io_uring_submit(ring);
+	if (ret < 0) {
+		fprintf(stderr, "failed to submit openat: %s\n", strerror(-ret));
+		return 1;
+	}
+
+	ret = io_uring_wait_cqe(ring, &cqe);
+	fd = cqe->res;
+	io_uring_cqe_seen(ring, cqe);
+	if (ret < 0) {
+		fprintf(stderr, "wait_cqe failed: %s\n", strerror(-ret));
+		return 1;
+	} else if (fd < 0) {
+		fprintf(stderr, "io_uring openat failed: %s\n", strerror(-fd));
+		return 1;
+	}
+
+	close(fd);
+	return 0;
+}
+
+static int prepare_file(int dfd, const char* fn)
+{
+	const char buf[] = "foo";
+	int fd, res;
+
+	fd = openat(dfd, fn, OPEN_FLAGS, OPEN_MODE);
+	if (fd < 0) {
+		fprintf(stderr, "prepare/open: %s\n", strerror(errno));
+		return -1;
+	}
+
+	res = pwrite(fd, buf, sizeof(buf), 1ull << 32);
+	if (res < 0)
+		fprintf(stderr, "prepare/pwrite: %s\n", strerror(errno));
+
+	close(fd);
+	return res < 0 ? res : 0;
+}
+
+static int test_linked_files(int dfd, const char *fn, bool async)
+{
+	struct io_uring ring;
+	struct io_uring_sqe *sqe;
+	char buffer[128];
+	struct iovec iov = {.iov_base = buffer, .iov_len = sizeof(buffer), };
+	int ret, fd;
+	int fds[2];
+
+	ret = io_uring_queue_init(10, &ring, 0);
+	if (ret < 0)
+		DIE("failed to init io_uring: %s\n", strerror(-ret));
+
+	if (pipe(fds)) {
+		perror("pipe");
+		return 1;
+	}
+
+	sqe = io_uring_get_sqe(&ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		return -1;
+	}
+	io_uring_prep_readv(sqe, fds[0], &iov, 1, 0);
+	sqe->flags |= IOSQE_IO_LINK;
+	if (async)
+		sqe->flags |= IOSQE_ASYNC;
+
+	sqe = io_uring_get_sqe(&ring);
+	if (!sqe) {
+		fprintf(stderr, "failed to get sqe\n");
+		return 1;
+	}
+	io_uring_prep_openat(sqe, dfd, fn, OPEN_FLAGS, OPEN_MODE);
+
+	ret = io_uring_submit(&ring);
+	if (ret != 2) {
+		fprintf(stderr, "failed to submit openat: %s\n", strerror(-ret));
+		return 1;
+	}
+
+	fd = dup(ring.ring_fd);
+	if (fd < 0) {
+		fprintf(stderr, "dup() failed: %s\n", strerror(-fd));
+		return 1;
+	}
+
+	/* io_uring->flush() */
+	close(fd);
+
+	io_uring_queue_exit(&ring);
+	return 0;
+}
+
+static int test_drained_files(int dfd, const char *fn, bool linked, bool prepend)
+{
+	struct io_uring ring;
+	struct io_uring_sqe *sqe;
+	char buffer[128];
+	struct iovec iov = {.iov_base = buffer, .iov_len = sizeof(buffer), };
+	int ret, fd, fds[2], to_cancel = 0;
+
+	ret = io_uring_queue_init(10, &ring, 0);
+	if (ret < 0)
+		DIE("failed to init io_uring: %s\n", strerror(-ret));
+
+	if (pipe(fds)) {
+		perror("pipe");
+		return 1;
+	}
+
+	sqe = io_uring_get_sqe(&ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		return -1;
+	}
+	io_uring_prep_readv(sqe, fds[0], &iov, 1, 0);
+	sqe->user_data = 0;
+
+	if (prepend) {
+		sqe = io_uring_get_sqe(&ring);
+		if (!sqe) {
+			fprintf(stderr, "failed to get sqe\n");
+			return 1;
+		}
+		io_uring_prep_nop(sqe);
+		sqe->flags |= IOSQE_IO_DRAIN;
+		to_cancel++;
+		sqe->user_data = to_cancel;
+	}
+
+	if (linked) {
+		sqe = io_uring_get_sqe(&ring);
+		if (!sqe) {
+			fprintf(stderr, "failed to get sqe\n");
+			return 1;
+		}
+		io_uring_prep_nop(sqe);
+		sqe->flags |= IOSQE_IO_DRAIN | IOSQE_IO_LINK;
+		to_cancel++;
+		sqe->user_data = to_cancel;
+	}
+
+	sqe = io_uring_get_sqe(&ring);
+	if (!sqe) {
+		fprintf(stderr, "failed to get sqe\n");
+		return 1;
+	}
+	io_uring_prep_openat(sqe, dfd, fn, OPEN_FLAGS, OPEN_MODE);
+	sqe->flags |= IOSQE_IO_DRAIN;
+	to_cancel++;
+	sqe->user_data = to_cancel;
+
+
+	ret = io_uring_submit(&ring);
+	if (ret != 1 + to_cancel) {
+		fprintf(stderr, "failed to submit openat: %s\n", strerror(-ret));
+		return 1;
+	}
+
+	fd = dup(ring.ring_fd);
+	if (fd < 0) {
+		fprintf(stderr, "dup() failed: %s\n", strerror(-fd));
+		return 1;
+	}
+
+	/*
+	 * close(), which triggers ->flush(), and io_uring_queue_exit()
+	 * should successfully return and not hang.
+	 */
+	close(fd);
+	io_uring_queue_exit(&ring);
+	return 0;
+}
+
+int main(int argc, char *argv[])
+{
+	const char *fn = "io_uring_openat_test";
+	struct io_uring ring;
+	int ret, dfd;
+
+	if (argc > 1)
+		return 0;
+
+	dfd = open("/tmp", O_PATH);
+	if (dfd < 0)
+		DIE("open /tmp: %s\n", strerror(errno));
+
+	ret = io_uring_queue_init(RSIZE, &ring, 0);
+	if (ret < 0)
+		DIE("failed to init io_uring: %s\n", strerror(-ret));
+
+	if (prepare_file(dfd, fn))
+		return 1;
+
+	ret = open_io_uring(&ring, dfd, fn);
+	if (ret) {
+		fprintf(stderr, "open_io_uring() failed\n");
+		goto out;
+	}
+
+	ret = test_linked_files(dfd, fn, false);
+	if (ret) {
+		fprintf(stderr, "test_linked_files() !async failed\n");
+		goto out;
+	}
+
+	ret = test_linked_files(dfd, fn, true);
+	if (ret) {
+		fprintf(stderr, "test_linked_files() async failed\n");
+		goto out;
+	}
+
+	ret = test_drained_files(dfd, fn, false, false);
+	if (ret) {
+		fprintf(stderr, "test_drained_files() failed\n");
+		goto out;
+	}
+
+	ret = test_drained_files(dfd, fn, false, true);
+	if (ret) {
+		fprintf(stderr, "test_drained_files() middle failed\n");
+		goto out;
+	}
+
+	ret = test_drained_files(dfd, fn, true, false);
+	if (ret) {
+		fprintf(stderr, "test_drained_files() linked failed\n");
+		goto out;
+	}
+out:
+	io_uring_queue_exit(&ring);
+	close(dfd);
+	unlink("/tmp/io_uring_openat_test");
+	return ret;
+}
diff --git a/test/link-timeout.c b/test/link-timeout.c
new file mode 100644
index 0000000..5d8417f
--- /dev/null
+++ b/test/link-timeout.c
@@ -0,0 +1,1096 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: run various linked timeout cases
+ *
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+#include <sys/poll.h>
+
+#include "liburing.h"
+
+static int test_fail_lone_link_timeouts(struct io_uring *ring)
+{
+	struct __kernel_timespec ts;
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	int ret;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+	io_uring_prep_link_timeout(sqe, &ts, 0);
+	ts.tv_sec = 1;
+	ts.tv_nsec = 0;
+	sqe->user_data = 1;
+	sqe->flags |= IOSQE_IO_LINK;
+
+	ret = io_uring_submit(ring);
+	if (ret != 1) {
+		printf("sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	ret = io_uring_wait_cqe(ring, &cqe);
+	if (ret < 0) {
+		printf("wait completion %d\n", ret);
+		goto err;
+	}
+
+	if (cqe->user_data != 1) {
+		fprintf(stderr, "invalid user data %d\n", cqe->res);
+		goto err;
+	}
+	if (cqe->res != -EINVAL) {
+		fprintf(stderr, "got %d, wanted -EINVAL\n", cqe->res);
+		goto err;
+	}
+	io_uring_cqe_seen(ring, cqe);
+
+	return 0;
+err:
+	return 1;
+}
+
+static int test_fail_two_link_timeouts(struct io_uring *ring)
+{
+	struct __kernel_timespec ts;
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	int ret, i;
+
+	ts.tv_sec = 1;
+	ts.tv_nsec = 0;
+
+	/*
+	 * sqe_1: write destined to fail
+	 * use buf=NULL, to do that during the issuing stage
+	 */
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+	io_uring_prep_writev(sqe, 0, NULL, 1, 0);
+	sqe->flags |= IOSQE_IO_LINK;
+	sqe->user_data = 1;
+
+
+	/* sqe_2: valid linked timeout */
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+	io_uring_prep_link_timeout(sqe, &ts, 0);
+	sqe->user_data = 2;
+	sqe->flags |= IOSQE_IO_LINK;
+
+
+	/* sqe_3: invalid linked timeout */
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+	io_uring_prep_link_timeout(sqe, &ts, 0);
+	sqe->flags |= IOSQE_IO_LINK;
+	sqe->user_data = 3;
+
+	/* sqe_4: invalid linked timeout */
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+	io_uring_prep_link_timeout(sqe, &ts, 0);
+	sqe->flags |= IOSQE_IO_LINK;
+	sqe->user_data = 4;
+
+	ret = io_uring_submit(ring);
+	if (ret != 4) {
+		printf("sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	for (i = 0; i < 4; i++) {
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret < 0) {
+			printf("wait completion %d\n", ret);
+			goto err;
+		}
+
+		switch (cqe->user_data) {
+		case 1:
+			if (cqe->res != -EFAULT && cqe->res != -ECANCELED) {
+				fprintf(stderr, "write got %d, wanted -EFAULT "
+						"or -ECANCELED\n", cqe->res);
+				goto err;
+			}
+			break;
+		case 2:
+			if (cqe->res != -ECANCELED) {
+				fprintf(stderr, "Link timeout got %d, wanted -ECACNCELED\n", cqe->res);
+				goto err;
+			}
+			break;
+		case 3:
+			/* fall through */
+		case 4:
+			if (cqe->res != -ECANCELED && cqe->res != -EINVAL) {
+				fprintf(stderr, "Invalid link timeout got %d"
+					", wanted -ECACNCELED || -EINVAL\n", cqe->res);
+				goto err;
+			}
+			break;
+		}
+		io_uring_cqe_seen(ring, cqe);
+	}
+
+	return 0;
+err:
+	return 1;
+}
+
+/*
+ * Test linked timeout with timeout (timeoutception)
+ */
+static int test_single_link_timeout_ception(struct io_uring *ring)
+{
+	struct __kernel_timespec ts1, ts2;
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	int ret, i;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+
+	ts1.tv_sec = 1;
+	ts1.tv_nsec = 0;
+	io_uring_prep_timeout(sqe, &ts1, -1U, 0);
+	sqe->flags |= IOSQE_IO_LINK;
+	sqe->user_data = 1;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+
+	ts2.tv_sec = 2;
+	ts2.tv_nsec = 0;
+	io_uring_prep_link_timeout(sqe, &ts2, 0);
+	sqe->user_data = 2;
+
+	ret = io_uring_submit(ring);
+	if (ret != 2) {
+		printf("sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	for (i = 0; i < 2; i++) {
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret < 0) {
+			printf("wait completion %d\n", ret);
+			goto err;
+		}
+		switch (cqe->user_data) {
+		case 1:
+			/* newer kernels allow timeout links */
+			if (cqe->res != -EINVAL && cqe->res != -ETIME) {
+				fprintf(stderr, "Timeout got %d, wanted "
+					"-EINVAL or -ETIME\n", cqe->res);
+				goto err;
+			}
+			break;
+		case 2:
+			if (cqe->res != -ECANCELED) {
+				fprintf(stderr, "Link timeout got %d, wanted -ECANCELED\n", cqe->res);
+				goto err;
+			}
+			break;
+		}
+		io_uring_cqe_seen(ring, cqe);
+	}
+
+	return 0;
+err:
+	return 1;
+}
+
+/*
+ * Test linked timeout with NOP
+ */
+static int test_single_link_timeout_nop(struct io_uring *ring)
+{
+	struct __kernel_timespec ts;
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	int ret, i;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+
+	io_uring_prep_nop(sqe);
+	sqe->flags |= IOSQE_IO_LINK;
+	sqe->user_data = 1;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+
+	ts.tv_sec = 1;
+	ts.tv_nsec = 0;
+	io_uring_prep_link_timeout(sqe, &ts, 0);
+	sqe->user_data = 2;
+
+	ret = io_uring_submit(ring);
+	if (ret != 2) {
+		printf("sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	for (i = 0; i < 2; i++) {
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret < 0) {
+			printf("wait completion %d\n", ret);
+			goto err;
+		}
+		switch (cqe->user_data) {
+		case 1:
+			if (cqe->res) {
+				fprintf(stderr, "NOP got %d, wanted 0\n", cqe->res);
+				goto err;
+			}
+			break;
+		case 2:
+			if (cqe->res != -ECANCELED) {
+				fprintf(stderr, "Link timeout got %d, wanted -ECACNCELED\n", cqe->res);
+				goto err;
+			}
+			break;
+		}
+		io_uring_cqe_seen(ring, cqe);
+	}
+
+	return 0;
+err:
+	return 1;
+}
+
+/*
+ * Test read that will not complete, with a linked timeout behind it that
+ * has errors in the SQE
+ */
+static int test_single_link_timeout_error(struct io_uring *ring)
+{
+	struct __kernel_timespec ts;
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	int fds[2], ret, i;
+	struct iovec iov;
+	char buffer[128];
+
+	if (pipe(fds)) {
+		perror("pipe");
+		return 1;
+	}
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+
+	iov.iov_base = buffer;
+	iov.iov_len = sizeof(buffer);
+	io_uring_prep_readv(sqe, fds[0], &iov, 1, 0);
+	sqe->flags |= IOSQE_IO_LINK;
+	sqe->user_data = 1;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+
+	ts.tv_sec = 1;
+	ts.tv_nsec = 0;
+	io_uring_prep_link_timeout(sqe, &ts, 0);
+	/* set invalid field, it'll get failed */
+	sqe->ioprio = 89;
+	sqe->user_data = 2;
+
+	ret = io_uring_submit(ring);
+	if (ret != 2) {
+		printf("sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	for (i = 0; i < 2; i++) {
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret < 0) {
+			printf("wait completion %d\n", ret);
+			goto err;
+		}
+		switch (cqe->user_data) {
+		case 1:
+			if (cqe->res != -ECANCELED) {
+				fprintf(stderr, "Read got %d, wanted -ECANCELED\n",
+						cqe->res);
+				goto err;
+			}
+			break;
+		case 2:
+			if (cqe->res != -EINVAL) {
+				fprintf(stderr, "Link timeout got %d, wanted -EINVAL\n", cqe->res);
+				goto err;
+			}
+			break;
+		}
+		io_uring_cqe_seen(ring, cqe);
+	}
+
+	return 0;
+err:
+	return 1;
+}
+
+/*
+ * Test read that will complete, with a linked timeout behind it
+ */
+static int test_single_link_no_timeout(struct io_uring *ring)
+{
+	struct __kernel_timespec ts;
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	int fds[2], ret, i;
+	struct iovec iov;
+	char buffer[128];
+
+	if (pipe(fds)) {
+		perror("pipe");
+		return 1;
+	}
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+
+	iov.iov_base = buffer;
+	iov.iov_len = sizeof(buffer);
+	io_uring_prep_readv(sqe, fds[0], &iov, 1, 0);
+	sqe->flags |= IOSQE_IO_LINK;
+	sqe->user_data = 1;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+
+	ts.tv_sec = 1;
+	ts.tv_nsec = 0;
+	io_uring_prep_link_timeout(sqe, &ts, 0);
+	sqe->user_data = 2;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+
+	iov.iov_base = buffer;
+	iov.iov_len = sizeof(buffer);
+	io_uring_prep_writev(sqe, fds[1], &iov, 1, 0);
+	sqe->user_data = 3;
+
+	ret = io_uring_submit(ring);
+	if (ret != 3) {
+		printf("sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	for (i = 0; i < 3; i++) {
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret < 0) {
+			printf("wait completion %d\n", ret);
+			goto err;
+		}
+		switch (cqe->user_data) {
+		case 1:
+		case 3:
+			if (cqe->res != sizeof(buffer)) {
+				fprintf(stderr, "R/W got %d, wanted %d\n", cqe->res,
+						(int) sizeof(buffer));
+				goto err;
+			}
+			break;
+		case 2:
+			if (cqe->res != -ECANCELED) {
+				fprintf(stderr, "Link timeout %d, wanted -ECANCELED\n",
+						cqe->res);
+				goto err;
+			}
+			break;
+		}
+		io_uring_cqe_seen(ring, cqe);
+	}
+
+	return 0;
+err:
+	return 1;
+}
+
+/*
+ * Test read that will not complete, with a linked timeout behind it
+ */
+static int test_single_link_timeout(struct io_uring *ring, unsigned nsec)
+{
+	struct __kernel_timespec ts;
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	int fds[2], ret, i;
+	struct iovec iov;
+	char buffer[128];
+
+	if (pipe(fds)) {
+		perror("pipe");
+		return 1;
+	}
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+
+	iov.iov_base = buffer;
+	iov.iov_len = sizeof(buffer);
+	io_uring_prep_readv(sqe, fds[0], &iov, 1, 0);
+	sqe->flags |= IOSQE_IO_LINK;
+	sqe->user_data = 1;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+
+	ts.tv_sec = 0;
+	ts.tv_nsec = nsec;
+	io_uring_prep_link_timeout(sqe, &ts, 0);
+	sqe->user_data = 2;
+
+	ret = io_uring_submit(ring);
+	if (ret != 2) {
+		printf("sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	for (i = 0; i < 2; i++) {
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret < 0) {
+			printf("wait completion %d\n", ret);
+			goto err;
+		}
+		switch (cqe->user_data) {
+		case 1:
+			if (cqe->res != -EINTR && cqe->res != -ECANCELED) {
+				fprintf(stderr, "Read got %d\n", cqe->res);
+				goto err;
+			}
+			break;
+		case 2:
+			if (cqe->res != -EALREADY && cqe->res != -ETIME &&
+			    cqe->res != 0) {
+				fprintf(stderr, "Link timeout got %d\n", cqe->res);
+				goto err;
+			}
+			break;
+		}
+		io_uring_cqe_seen(ring, cqe);
+	}
+
+	close(fds[0]);
+	close(fds[1]);
+	return 0;
+err:
+	return 1;
+}
+
+static int test_timeout_link_chain1(struct io_uring *ring)
+{
+	struct __kernel_timespec ts;
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	int fds[2], ret, i;
+	struct iovec iov;
+	char buffer[128];
+
+	if (pipe(fds)) {
+		perror("pipe");
+		return 1;
+	}
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+	iov.iov_base = buffer;
+	iov.iov_len = sizeof(buffer);
+	io_uring_prep_readv(sqe, fds[0], &iov, 1, 0);
+	sqe->flags |= IOSQE_IO_LINK;
+	sqe->user_data = 1;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+	ts.tv_sec = 0;
+	ts.tv_nsec = 1000000;
+	io_uring_prep_link_timeout(sqe, &ts, 0);
+	sqe->flags |= IOSQE_IO_LINK;
+	sqe->user_data = 2;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+	io_uring_prep_nop(sqe);
+	sqe->user_data = 3;
+
+	ret = io_uring_submit(ring);
+	if (ret != 3) {
+		printf("sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	for (i = 0; i < 3; i++) {
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret < 0) {
+			printf("wait completion %d\n", ret);
+			goto err;
+		}
+		switch (cqe->user_data) {
+		case 1:
+			if (cqe->res != -EINTR && cqe->res != -ECANCELED) {
+				fprintf(stderr, "Req %" PRIu64 " got %d\n", (uint64_t) cqe->user_data,
+						cqe->res);
+				goto err;
+			}
+			break;
+		case 2:
+			/* FASTPOLL kernels can cancel successfully */
+			if (cqe->res != -EALREADY && cqe->res != -ETIME) {
+				fprintf(stderr, "Req %" PRIu64 " got %d\n", (uint64_t) cqe->user_data,
+						cqe->res);
+				goto err;
+			}
+			break;
+		case 3:
+			if (cqe->res != -ECANCELED) {
+				fprintf(stderr, "Req %" PRIu64 " got %d\n", (uint64_t) cqe->user_data,
+						cqe->res);
+				goto err;
+			}
+			break;
+		}
+
+		io_uring_cqe_seen(ring, cqe);
+	}
+
+	return 0;
+err:
+	return 1;
+}
+
+static int test_timeout_link_chain2(struct io_uring *ring)
+{
+	struct __kernel_timespec ts;
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	int fds[2], ret, i;
+
+	if (pipe(fds)) {
+		perror("pipe");
+		return 1;
+	}
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+	io_uring_prep_poll_add(sqe, fds[0], POLLIN);
+	sqe->flags |= IOSQE_IO_LINK;
+	sqe->user_data = 1;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+	ts.tv_sec = 0;
+	ts.tv_nsec = 1000000;
+	io_uring_prep_link_timeout(sqe, &ts, 0);
+	sqe->flags |= IOSQE_IO_LINK;
+	sqe->user_data = 2;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+	io_uring_prep_nop(sqe);
+	sqe->flags |= IOSQE_IO_LINK;
+	sqe->user_data = 3;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+	io_uring_prep_nop(sqe);
+	sqe->user_data = 4;
+
+	ret = io_uring_submit(ring);
+	if (ret != 4) {
+		printf("sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	for (i = 0; i < 4; i++) {
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret < 0) {
+			printf("wait completion %d\n", ret);
+			goto err;
+		}
+		switch (cqe->user_data) {
+		/* poll cancel really should return -ECANCEL... */
+		case 1:
+			if (cqe->res != -ECANCELED) {
+				fprintf(stderr, "Req %" PRIu64 " got %d\n", (uint64_t) cqe->user_data,
+						cqe->res);
+				goto err;
+			}
+			break;
+		case 2:
+			if (cqe->res != -ETIME) {
+				fprintf(stderr, "Req %" PRIu64 " got %d\n", (uint64_t) cqe->user_data,
+						cqe->res);
+				goto err;
+			}
+			break;
+		case 3:
+		case 4:
+			if (cqe->res != -ECANCELED) {
+				fprintf(stderr, "Req %" PRIu64 " got %d\n", (uint64_t) cqe->user_data,
+						cqe->res);
+				goto err;
+			}
+			break;
+		}
+		io_uring_cqe_seen(ring, cqe);
+	}
+
+	return 0;
+err:
+	return 1;
+}
+
+static int test_timeout_link_chain3(struct io_uring *ring)
+{
+	struct __kernel_timespec ts;
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	int fds[2], ret, i;
+
+	if (pipe(fds)) {
+		perror("pipe");
+		return 1;
+	}
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+	io_uring_prep_poll_add(sqe, fds[0], POLLIN);
+	sqe->flags |= IOSQE_IO_LINK;
+	sqe->user_data = 1;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+	ts.tv_sec = 0;
+	ts.tv_nsec = 1000000;
+	io_uring_prep_link_timeout(sqe, &ts, 0);
+	sqe->flags |= IOSQE_IO_LINK;
+	sqe->user_data = 2;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+	io_uring_prep_nop(sqe);
+	sqe->flags |= IOSQE_IO_LINK;
+	sqe->user_data = 3;
+
+	/* POLL -> TIMEOUT -> NOP */
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+	io_uring_prep_poll_add(sqe, fds[0], POLLIN);
+	sqe->flags |= IOSQE_IO_LINK;
+	sqe->user_data = 4;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+	ts.tv_sec = 0;
+	ts.tv_nsec = 1000000;
+	io_uring_prep_link_timeout(sqe, &ts, 0);
+	sqe->user_data = 5;
+
+	/* poll on pipe + timeout */
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+	io_uring_prep_nop(sqe);
+	sqe->user_data = 6;
+
+	/* nop */
+
+	ret = io_uring_submit(ring);
+	if (ret != 6) {
+		printf("sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	for (i = 0; i < 6; i++) {
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret < 0) {
+			printf("wait completion %d\n", ret);
+			goto err;
+		}
+		switch (cqe->user_data) {
+		case 2:
+			if (cqe->res != -ETIME) {
+				fprintf(stderr, "Req %" PRIu64 " got %d\n", (uint64_t) cqe->user_data,
+						cqe->res);
+				goto err;
+			}
+			break;
+		case 1:
+		case 3:
+		case 4:
+		case 5:
+			if (cqe->res != -ECANCELED) {
+				fprintf(stderr, "Req %" PRIu64 " got %d\n", (uint64_t) cqe->user_data,
+						cqe->res);
+				goto err;
+			}
+			break;
+		case 6:
+			if (cqe->res) {
+				fprintf(stderr, "Req %" PRIu64 " got %d\n", (uint64_t) cqe->user_data,
+						cqe->res);
+				goto err;
+			}
+			break;
+		}
+		io_uring_cqe_seen(ring, cqe);
+	}
+
+	return 0;
+err:
+	return 1;
+}
+
+static int test_timeout_link_chain4(struct io_uring *ring)
+{
+	struct __kernel_timespec ts;
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	int fds[2], ret, i;
+
+	if (pipe(fds)) {
+		perror("pipe");
+		return 1;
+	}
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+	io_uring_prep_nop(sqe);
+	sqe->flags |= IOSQE_IO_LINK;
+	sqe->user_data = 1;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+	io_uring_prep_poll_add(sqe, fds[0], POLLIN);
+	sqe->flags |= IOSQE_IO_LINK;
+	sqe->user_data = 2;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+	ts.tv_sec = 0;
+	ts.tv_nsec = 1000000;
+	io_uring_prep_link_timeout(sqe, &ts, 0);
+	sqe->user_data = 3;
+
+	ret = io_uring_submit(ring);
+	if (ret != 3) {
+		printf("sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	for (i = 0; i < 3; i++) {
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret < 0) {
+			printf("wait completion %d\n", ret);
+			goto err;
+		}
+		switch (cqe->user_data) {
+		/* poll cancel really should return -ECANCEL... */
+		case 1:
+			if (cqe->res) {
+				fprintf(stderr, "Req %" PRIu64 " got %d\n", (uint64_t) cqe->user_data,
+						cqe->res);
+				goto err;
+			}
+			break;
+		case 2:
+			if (cqe->res != -ECANCELED) {
+				fprintf(stderr, "Req %" PRIu64 " got %d\n", (uint64_t) cqe->user_data,
+						cqe->res);
+				goto err;
+			}
+			break;
+		case 3:
+			if (cqe->res != -ETIME) {
+				fprintf(stderr, "Req %" PRIu64 " got %d\n", (uint64_t) cqe->user_data,
+						cqe->res);
+				goto err;
+			}
+			break;
+		}
+		io_uring_cqe_seen(ring, cqe);
+	}
+
+	return 0;
+err:
+	return 1;
+}
+
+static int test_timeout_link_chain5(struct io_uring *ring)
+{
+	struct __kernel_timespec ts1, ts2;
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	int ret, i;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+	io_uring_prep_nop(sqe);
+	sqe->flags |= IOSQE_IO_LINK;
+	sqe->user_data = 1;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+	ts1.tv_sec = 1;
+	ts1.tv_nsec = 0;
+	io_uring_prep_link_timeout(sqe, &ts1, 0);
+	sqe->flags |= IOSQE_IO_LINK;
+	sqe->user_data = 2;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+	ts2.tv_sec = 2;
+	ts2.tv_nsec = 0;
+	io_uring_prep_link_timeout(sqe, &ts2, 0);
+	sqe->user_data = 3;
+
+	ret = io_uring_submit(ring);
+	if (ret != 3) {
+		printf("sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	for (i = 0; i < 3; i++) {
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret < 0) {
+			printf("wait completion %d\n", ret);
+			goto err;
+		}
+		switch (cqe->user_data) {
+		case 1:
+			if (cqe->res) {
+				fprintf(stderr, "Timeout got %d, wanted -EINVAL\n",
+						cqe->res);
+				goto err;
+			}
+			break;
+		case 2:
+			if (cqe->res != -ECANCELED) {
+				fprintf(stderr, "Link timeout got %d, wanted -ECANCELED\n", cqe->res);
+				goto err;
+			}
+			break;
+		}
+		io_uring_cqe_seen(ring, cqe);
+	}
+
+	return 0;
+err:
+	return 1;
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring;
+	int ret;
+
+	if (argc > 1)
+		return 0;
+
+	ret = io_uring_queue_init(8, &ring, 0);
+	if (ret) {
+		printf("ring setup failed\n");
+		return 1;
+	}
+
+	ret = test_timeout_link_chain1(&ring);
+	if (ret) {
+		printf("test_single_link_chain1 failed\n");
+		return ret;
+	}
+
+	ret = test_timeout_link_chain2(&ring);
+	if (ret) {
+		printf("test_single_link_chain2 failed\n");
+		return ret;
+	}
+
+	ret = test_timeout_link_chain3(&ring);
+	if (ret) {
+		printf("test_single_link_chain3 failed\n");
+		return ret;
+	}
+
+	ret = test_timeout_link_chain4(&ring);
+	if (ret) {
+		printf("test_single_link_chain4 failed\n");
+		return ret;
+	}
+
+	ret = test_timeout_link_chain5(&ring);
+	if (ret) {
+		printf("test_single_link_chain5 failed\n");
+		return ret;
+	}
+
+	ret = test_single_link_timeout(&ring, 10);
+	if (ret) {
+		printf("test_single_link_timeout 10 failed\n");
+		return ret;
+	}
+
+	ret = test_single_link_timeout(&ring, 100000ULL);
+	if (ret) {
+		printf("test_single_link_timeout 100000 failed\n");
+		return ret;
+	}
+
+	ret = test_single_link_timeout(&ring, 500000000ULL);
+	if (ret) {
+		printf("test_single_link_timeout 500000000 failed\n");
+		return ret;
+	}
+
+	ret = test_single_link_no_timeout(&ring);
+	if (ret) {
+		printf("test_single_link_no_timeout failed\n");
+		return ret;
+	}
+
+	ret = test_single_link_timeout_error(&ring);
+	if (ret) {
+		printf("test_single_link_timeout_error failed\n");
+		return ret;
+	}
+
+	ret = test_single_link_timeout_nop(&ring);
+	if (ret) {
+		printf("test_single_link_timeout_nop failed\n");
+		return ret;
+	}
+
+	ret = test_single_link_timeout_ception(&ring);
+	if (ret) {
+		printf("test_single_link_timeout_ception failed\n");
+		return ret;
+	}
+
+	ret = test_fail_lone_link_timeouts(&ring);
+	if (ret) {
+		printf("test_fail_lone_link_timeouts failed\n");
+		return ret;
+	}
+
+	ret = test_fail_two_link_timeouts(&ring);
+	if (ret) {
+		printf("test_fail_two_link_timeouts failed\n");
+		return ret;
+	}
+
+	return 0;
+}
diff --git a/test/link.c b/test/link.c
new file mode 100644
index 0000000..c89d6b2
--- /dev/null
+++ b/test/link.c
@@ -0,0 +1,496 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: run various linked sqe tests
+ *
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+
+#include "liburing.h"
+
+static int no_hardlink;
+
+/*
+ * Timer with single nop
+ */
+static int test_single_hardlink(struct io_uring *ring)
+{
+	struct __kernel_timespec ts;
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	int ret, i;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "get sqe failed\n");
+		goto err;
+	}
+	ts.tv_sec = 0;
+	ts.tv_nsec = 10000000ULL;
+	io_uring_prep_timeout(sqe, &ts, 0, 0);
+	sqe->flags |= IOSQE_IO_LINK | IOSQE_IO_HARDLINK;
+	sqe->user_data = 1;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "get sqe failed\n");
+		goto err;
+	}
+	io_uring_prep_nop(sqe);
+	sqe->user_data = 2;
+
+	ret = io_uring_submit(ring);
+	if (ret <= 0) {
+		fprintf(stderr, "sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	for (i = 0; i < 2; i++) {
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret < 0) {
+			fprintf(stderr, "wait completion %d\n", ret);
+			goto err;
+		}
+		if (!cqe) {
+			fprintf(stderr, "failed to get cqe\n");
+			goto err;
+		}
+		if (no_hardlink)
+			goto next;
+		if (cqe->user_data == 1 && cqe->res == -EINVAL) {
+			fprintf(stdout, "Hard links not supported, skipping\n");
+			no_hardlink = 1;
+			goto next;
+		}
+		if (cqe->user_data == 1 && cqe->res != -ETIME) {
+			fprintf(stderr, "timeout failed with %d\n", cqe->res);
+			goto err;
+		}
+		if (cqe->user_data == 2 && cqe->res) {
+			fprintf(stderr, "nop failed with %d\n", cqe->res);
+			goto err;
+		}
+next:
+		io_uring_cqe_seen(ring, cqe);
+	}
+
+	return 0;
+err:
+	return 1;
+}
+
+/*
+ * Timer -> timer -> nop
+ */
+static int test_double_hardlink(struct io_uring *ring)
+{
+	struct __kernel_timespec ts1, ts2;
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	int ret, i;
+
+	if (no_hardlink)
+		return 0;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "get sqe failed\n");
+		goto err;
+	}
+	ts1.tv_sec = 0;
+	ts1.tv_nsec = 10000000ULL;
+	io_uring_prep_timeout(sqe, &ts1, 0, 0);
+	sqe->flags |= IOSQE_IO_LINK | IOSQE_IO_HARDLINK;
+	sqe->user_data = 1;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "get sqe failed\n");
+		goto err;
+	}
+	ts2.tv_sec = 0;
+	ts2.tv_nsec = 15000000ULL;
+	io_uring_prep_timeout(sqe, &ts2, 0, 0);
+	sqe->flags |= IOSQE_IO_LINK | IOSQE_IO_HARDLINK;
+	sqe->user_data = 2;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "get sqe failed\n");
+		goto err;
+	}
+	io_uring_prep_nop(sqe);
+	sqe->user_data = 3;
+
+	ret = io_uring_submit(ring);
+	if (ret <= 0) {
+		fprintf(stderr, "sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	for (i = 0; i < 3; i++) {
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret < 0) {
+			fprintf(stderr, "wait completion %d\n", ret);
+			goto err;
+		}
+		if (!cqe) {
+			fprintf(stderr, "failed to get cqe\n");
+			goto err;
+		}
+		if (cqe->user_data == 1 && cqe->res != -ETIME) {
+			fprintf(stderr, "timeout failed with %d\n", cqe->res);
+			goto err;
+		}
+		if (cqe->user_data == 2 && cqe->res != -ETIME) {
+			fprintf(stderr, "timeout failed with %d\n", cqe->res);
+			goto err;
+		}
+		if (cqe->user_data == 3 && cqe->res) {
+			fprintf(stderr, "nop failed with %d\n", cqe->res);
+			goto err;
+		}
+		io_uring_cqe_seen(ring, cqe);
+	}
+
+	return 0;
+err:
+	return 1;
+
+}
+
+/*
+ * Test failing head of chain, and dependent getting -ECANCELED
+ */
+static int test_single_link_fail(struct io_uring *ring)
+{
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	int ret, i;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+
+	io_uring_prep_nop(sqe);
+	sqe->flags |= IOSQE_IO_LINK;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+
+	io_uring_prep_nop(sqe);
+
+	ret = io_uring_submit(ring);
+	if (ret <= 0) {
+		printf("sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	for (i = 0; i < 2; i++) {
+		ret = io_uring_peek_cqe(ring, &cqe);
+		if (ret < 0) {
+			printf("wait completion %d\n", ret);
+			goto err;
+		}
+		if (!cqe) {
+			printf("failed to get cqe\n");
+			goto err;
+		}
+		if (i == 0 && cqe->res != -EINVAL) {
+			printf("sqe0 failed with %d, wanted -EINVAL\n", cqe->res);
+			goto err;
+		}
+		if (i == 1 && cqe->res != -ECANCELED) {
+			printf("sqe1 failed with %d, wanted -ECANCELED\n", cqe->res);
+			goto err;
+		}
+		io_uring_cqe_seen(ring, cqe);
+	}
+
+	return 0;
+err:
+	return 1;
+}
+
+/*
+ * Test two independent chains
+ */
+static int test_double_chain(struct io_uring *ring)
+{
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	int ret, i;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+
+	io_uring_prep_nop(sqe);
+	sqe->flags |= IOSQE_IO_LINK;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+
+	io_uring_prep_nop(sqe);
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+
+	io_uring_prep_nop(sqe);
+	sqe->flags |= IOSQE_IO_LINK;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+
+	io_uring_prep_nop(sqe);
+
+	ret = io_uring_submit(ring);
+	if (ret <= 0) {
+		printf("sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	for (i = 0; i < 4; i++) {
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret < 0) {
+			printf("wait completion %d\n", ret);
+			goto err;
+		}
+		io_uring_cqe_seen(ring, cqe);
+	}
+
+	return 0;
+err:
+	return 1;
+}
+
+/*
+ * Test multiple dependents
+ */
+static int test_double_link(struct io_uring *ring)
+{
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	int ret, i;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+
+	io_uring_prep_nop(sqe);
+	sqe->flags |= IOSQE_IO_LINK;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+
+	io_uring_prep_nop(sqe);
+	sqe->flags |= IOSQE_IO_LINK;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+
+	io_uring_prep_nop(sqe);
+
+	ret = io_uring_submit(ring);
+	if (ret <= 0) {
+		printf("sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	for (i = 0; i < 3; i++) {
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret < 0) {
+			printf("wait completion %d\n", ret);
+			goto err;
+		}
+		io_uring_cqe_seen(ring, cqe);
+	}
+
+	return 0;
+err:
+	return 1;
+}
+
+/*
+ * Test single dependency
+ */
+static int test_single_link(struct io_uring *ring)
+{
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	int ret, i;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+
+	io_uring_prep_nop(sqe);
+	sqe->flags |= IOSQE_IO_LINK;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+
+	io_uring_prep_nop(sqe);
+
+	ret = io_uring_submit(ring);
+	if (ret <= 0) {
+		printf("sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	for (i = 0; i < 2; i++) {
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret < 0) {
+			printf("wait completion %d\n", ret);
+			goto err;
+		}
+		io_uring_cqe_seen(ring, cqe);
+	}
+
+	return 0;
+err:
+	return 1;
+}
+
+static int test_early_fail_and_wait(void)
+{
+	struct io_uring ring;
+	struct io_uring_sqe *sqe;
+	int ret, invalid_fd = 42;
+	struct iovec iov = { .iov_base = NULL, .iov_len = 0 };
+
+	/* create a new ring as it leaves it dirty */
+	ret = io_uring_queue_init(8, &ring, 0);
+	if (ret) {
+		printf("ring setup failed\n");
+		return 1;
+	}
+
+	sqe = io_uring_get_sqe(&ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+
+	io_uring_prep_readv(sqe, invalid_fd, &iov, 1, 0);
+	sqe->flags |= IOSQE_IO_LINK;
+
+	sqe = io_uring_get_sqe(&ring);
+	if (!sqe) {
+		printf("get sqe failed\n");
+		goto err;
+	}
+
+	io_uring_prep_nop(sqe);
+
+	ret = io_uring_submit_and_wait(&ring, 2);
+	if (ret <= 0 && ret != -EAGAIN) {
+		printf("sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	io_uring_queue_exit(&ring);
+	return 0;
+err:
+	io_uring_queue_exit(&ring);
+	return 1;
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring, poll_ring;
+	int ret;
+
+	if (argc > 1)
+		return 0;
+
+	ret = io_uring_queue_init(8, &ring, 0);
+	if (ret) {
+		printf("ring setup failed\n");
+		return 1;
+
+	}
+
+	ret = io_uring_queue_init(8, &poll_ring, IORING_SETUP_IOPOLL);
+	if (ret) {
+		printf("poll_ring setup failed\n");
+		return 1;
+	}
+
+	ret = test_single_link(&ring);
+	if (ret) {
+		printf("test_single_link failed\n");
+		return ret;
+	}
+
+	ret = test_double_link(&ring);
+	if (ret) {
+		printf("test_double_link failed\n");
+		return ret;
+	}
+
+	ret = test_double_chain(&ring);
+	if (ret) {
+		printf("test_double_chain failed\n");
+		return ret;
+	}
+
+	ret = test_single_link_fail(&poll_ring);
+	if (ret) {
+		printf("test_single_link_fail failed\n");
+		return ret;
+	}
+
+	ret = test_single_hardlink(&ring);
+	if (ret) {
+		fprintf(stderr, "test_single_hardlink\n");
+		return ret;
+	}
+
+	ret = test_double_hardlink(&ring);
+	if (ret) {
+		fprintf(stderr, "test_double_hardlink\n");
+		return ret;
+	}
+
+	ret = test_early_fail_and_wait();
+	if (ret) {
+		fprintf(stderr, "test_early_fail_and_wait\n");
+		return ret;
+	}
+
+	return 0;
+}
diff --git a/test/link_drain.c b/test/link_drain.c
new file mode 100644
index 0000000..a50fe88
--- /dev/null
+++ b/test/link_drain.c
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: test io_uring link io with drain io
+ *
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+
+#include "helpers.h"
+#include "liburing.h"
+
+static int test_link_drain_one(struct io_uring *ring)
+{
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe[5];
+	struct iovec iovecs;
+	int i, fd, ret;
+	off_t off = 0;
+	char data[5] = {0};
+	char expect[5] = {0, 1, 2, 3, 4};
+
+	fd = open("testfile", O_WRONLY | O_CREAT, 0644);
+	if (fd < 0) {
+		perror("open");
+		return 1;
+	}
+
+	iovecs.iov_base = t_malloc(4096);
+	iovecs.iov_len = 4096;
+
+	for (i = 0; i < 5; i++) {
+		sqe[i] = io_uring_get_sqe(ring);
+		if (!sqe[i]) {
+			printf("get sqe failed\n");
+			goto err;
+		}
+	}
+
+	/* normal heavy io */
+	io_uring_prep_writev(sqe[0], fd, &iovecs, 1, off);
+	sqe[0]->user_data = 0;
+
+	/* link io */
+	io_uring_prep_nop(sqe[1]);
+	sqe[1]->flags |= IOSQE_IO_LINK;
+	sqe[1]->user_data = 1;
+
+	/* link drain io */
+	io_uring_prep_nop(sqe[2]);
+	sqe[2]->flags |= (IOSQE_IO_LINK | IOSQE_IO_DRAIN);
+	sqe[2]->user_data = 2;
+
+	/* link io */
+	io_uring_prep_nop(sqe[3]);
+	sqe[3]->user_data = 3;
+
+	/* normal nop io */
+	io_uring_prep_nop(sqe[4]);
+	sqe[4]->user_data = 4;
+
+	ret = io_uring_submit(ring);
+	if (ret < 0) {
+		printf("sqe submit failed\n");
+		goto err;
+	} else if (ret < 5) {
+		printf("Submitted only %d\n", ret);
+		goto err;
+	}
+
+	for (i = 0; i < 5; i++) {
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret < 0) {
+			printf("child: wait completion %d\n", ret);
+			goto err;
+		}
+
+		data[i] = cqe->user_data;
+		io_uring_cqe_seen(ring, cqe);
+	}
+
+	if (memcmp(data, expect, 5) != 0)
+		goto err;
+
+	free(iovecs.iov_base);
+	close(fd);
+	unlink("testfile");
+	return 0;
+err:
+	free(iovecs.iov_base);
+	close(fd);
+	unlink("testfile");
+	return 1;
+}
+
+int test_link_drain_multi(struct io_uring *ring)
+{
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe[9];
+	struct iovec iovecs;
+	int i, fd, ret;
+	off_t off = 0;
+	char data[9] = {0};
+	char expect[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
+
+	fd = open("testfile", O_WRONLY | O_CREAT, 0644);
+	if (fd < 0) {
+		perror("open");
+		return 1;
+	}
+
+	iovecs.iov_base = t_malloc(4096);
+	iovecs.iov_len = 4096;
+
+	for (i = 0; i < 9; i++) {
+		sqe[i] = io_uring_get_sqe(ring);
+		if (!sqe[i]) {
+			printf("get sqe failed\n");
+			goto err;
+		}
+	}
+
+	/* normal heavy io */
+	io_uring_prep_writev(sqe[0], fd, &iovecs, 1, off);
+	sqe[0]->user_data = 0;
+
+	/* link1 io head */
+	io_uring_prep_nop(sqe[1]);
+	sqe[1]->flags |= IOSQE_IO_LINK;
+	sqe[1]->user_data = 1;
+
+	/* link1 drain io */
+	io_uring_prep_nop(sqe[2]);
+	sqe[2]->flags |= (IOSQE_IO_LINK | IOSQE_IO_DRAIN);
+	sqe[2]->user_data = 2;
+
+	/* link1 io end*/
+	io_uring_prep_nop(sqe[3]);
+	sqe[3]->user_data = 3;
+
+	/* link2 io head */
+	io_uring_prep_nop(sqe[4]);
+	sqe[4]->flags |= IOSQE_IO_LINK;
+	sqe[4]->user_data = 4;
+
+	/* link2 io */
+	io_uring_prep_nop(sqe[5]);
+	sqe[5]->flags |= IOSQE_IO_LINK;
+	sqe[5]->user_data = 5;
+
+	/* link2 drain io */
+	io_uring_prep_writev(sqe[6], fd, &iovecs, 1, off);
+	sqe[6]->flags |= (IOSQE_IO_LINK | IOSQE_IO_DRAIN);
+	sqe[6]->user_data = 6;
+
+	/* link2 io end */
+	io_uring_prep_nop(sqe[7]);
+	sqe[7]->user_data = 7;
+
+	/* normal io */
+	io_uring_prep_nop(sqe[8]);
+	sqe[8]->user_data = 8;
+
+	ret = io_uring_submit(ring);
+	if (ret < 0) {
+		printf("sqe submit failed\n");
+		goto err;
+	} else if (ret < 9) {
+		printf("Submitted only %d\n", ret);
+		goto err;
+	}
+
+	for (i = 0; i < 9; i++) {
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret < 0) {
+			printf("child: wait completion %d\n", ret);
+			goto err;
+		}
+
+		data[i] = cqe->user_data;
+		io_uring_cqe_seen(ring, cqe);
+	}
+
+	if (memcmp(data, expect, 9) != 0)
+		goto err;
+
+	free(iovecs.iov_base);
+	close(fd);
+	unlink("testfile");
+	return 0;
+err:
+	free(iovecs.iov_base);
+	close(fd);
+	unlink("testfile");
+	return 1;
+
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring;
+	int i, ret;
+
+	if (argc > 1)
+		return 0;
+
+	ret = io_uring_queue_init(100, &ring, 0);
+	if (ret) {
+		printf("ring setup failed\n");
+		return 1;
+	}
+
+	for (i = 0; i < 1000; i++) {
+		ret = test_link_drain_one(&ring);
+		if (ret) {
+			fprintf(stderr, "test_link_drain_one failed\n");
+			break;
+		}
+		ret = test_link_drain_multi(&ring);
+		if (ret) {
+			fprintf(stderr, "test_link_drain_multi failed\n");
+			break;
+		}
+	}
+
+	return ret;
+}
diff --git a/test/madvise.c b/test/madvise.c
new file mode 100644
index 0000000..89057af
--- /dev/null
+++ b/test/madvise.c
@@ -0,0 +1,194 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: basic madvise test
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <sys/time.h>
+#include <sys/mman.h>
+
+#include "helpers.h"
+#include "liburing.h"
+
+#define FILE_SIZE	(128 * 1024)
+
+#define LOOPS		100
+#define MIN_LOOPS	10
+
+static unsigned long long utime_since(const struct timeval *s,
+				      const struct timeval *e)
+{
+	long long sec, usec;
+
+	sec = e->tv_sec - s->tv_sec;
+	usec = (e->tv_usec - s->tv_usec);
+	if (sec > 0 && usec < 0) {
+		sec--;
+		usec += 1000000;
+	}
+
+	sec *= 1000000;
+	return sec + usec;
+}
+
+static unsigned long long utime_since_now(struct timeval *tv)
+{
+	struct timeval end;
+
+	gettimeofday(&end, NULL);
+	return utime_since(tv, &end);
+}
+
+static int do_madvise(struct io_uring *ring, void *addr, off_t len, int advice)
+{
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	int ret;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "failed to get sqe\n");
+		return 1;
+	}
+
+	io_uring_prep_madvise(sqe, addr, len, advice);
+	sqe->user_data = advice;
+	ret = io_uring_submit_and_wait(ring, 1);
+	if (ret != 1) {
+		fprintf(stderr, "submit: %d\n", ret);
+		return 1;
+	}
+
+	ret = io_uring_wait_cqe(ring, &cqe);
+	if (ret) {
+		fprintf(stderr, "wait: %d\n", ret);
+		return 1;
+	}
+
+	ret = cqe->res;
+	if (ret == -EINVAL || ret == -EBADF) {
+		fprintf(stdout, "Madvise not supported, skipping\n");
+		unlink(".madvise.tmp");
+		exit(0);
+	} else if (ret) {
+		fprintf(stderr, "cqe->res=%d\n", cqe->res);
+	}
+	io_uring_cqe_seen(ring, cqe);
+	return ret;
+}
+
+static long do_copy(int fd, char *buf, void *ptr)
+{
+	struct timeval tv;
+
+	gettimeofday(&tv, NULL);
+	memcpy(buf, ptr, FILE_SIZE);
+	return utime_since_now(&tv);
+}
+
+static int test_madvise(struct io_uring *ring, const char *filename)
+{
+	unsigned long cached_read, uncached_read, cached_read2;
+	int fd, ret;
+	char *buf;
+	void *ptr;
+
+	fd = open(filename, O_RDONLY);
+	if (fd < 0) {
+		perror("open");
+		return 1;
+	}
+
+	buf = t_malloc(FILE_SIZE);
+
+	ptr = mmap(NULL, FILE_SIZE, PROT_READ, MAP_PRIVATE, fd, 0);
+	if (ptr == MAP_FAILED) {
+		perror("mmap");
+		return 1;
+	}
+
+	cached_read = do_copy(fd, buf, ptr);
+	if (cached_read == -1)
+		return 1;
+
+	cached_read = do_copy(fd, buf, ptr);
+	if (cached_read == -1)
+		return 1;
+
+	ret = do_madvise(ring, ptr, FILE_SIZE, MADV_DONTNEED);
+	if (ret)
+		return 1;
+
+	uncached_read = do_copy(fd, buf, ptr);
+	if (uncached_read == -1)
+		return 1;
+
+	ret = do_madvise(ring, ptr, FILE_SIZE, MADV_DONTNEED);
+	if (ret)
+		return 1;
+
+	ret = do_madvise(ring, ptr, FILE_SIZE, MADV_WILLNEED);
+	if (ret)
+		return 1;
+
+	msync(ptr, FILE_SIZE, MS_SYNC);
+
+	cached_read2 = do_copy(fd, buf, ptr);
+	if (cached_read2 == -1)
+		return 1;
+
+	if (cached_read < uncached_read &&
+	    cached_read2 < uncached_read)
+		return 0;
+
+	return 2;
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring;
+	int ret, i, good, bad;
+	char *fname;
+
+	if (argc > 1) {
+		fname = argv[1];
+	} else {
+		fname = ".madvise.tmp";
+		t_create_file(fname, FILE_SIZE);
+	}
+
+	if (io_uring_queue_init(8, &ring, 0)) {
+		fprintf(stderr, "ring creation failed\n");
+		goto err;
+	}
+
+	good = bad = 0;
+	for (i = 0; i < LOOPS; i++) {
+		ret = test_madvise(&ring, fname);
+		if (ret == 1) {
+			fprintf(stderr, "test_madvise failed\n");
+			goto err;
+		} else if (!ret)
+			good++;
+		else if (ret == 2)
+			bad++;
+		if (i >= MIN_LOOPS && !bad)
+			break;
+	}
+
+	if (bad > good)
+		fprintf(stderr, "Suspicious timings (%u > %u)\n", bad, good);
+	if (fname != argv[1])
+		unlink(fname);
+	io_uring_queue_exit(&ring);
+	return 0;
+err:
+	if (fname != argv[1])
+		unlink(fname);
+	return 1;
+}
diff --git a/test/mkdir.c b/test/mkdir.c
new file mode 100644
index 0000000..c044652
--- /dev/null
+++ b/test/mkdir.c
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: test io_uring mkdirat handling
+ */
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "liburing.h"
+
+static int do_mkdirat(struct io_uring *ring, const char *fn)
+{
+	int ret;
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "sqe get failed\n");
+		goto err;
+	}
+	io_uring_prep_mkdirat(sqe, AT_FDCWD, fn, 0700);
+
+	ret = io_uring_submit(ring);
+	if (ret != 1) {
+		fprintf(stderr, "submit failed: %d\n", ret);
+		goto err;
+	}
+
+	ret = io_uring_wait_cqes(ring, &cqe, 1, 0, 0);
+	if (ret) {
+		fprintf(stderr, "wait_cqe failed: %d\n", ret);
+		goto err;
+	}
+	ret = cqe->res;
+	io_uring_cqe_seen(ring, cqe);
+	return ret;
+err:
+	return 1;
+}
+
+static int stat_file(const char *fn)
+{
+	struct stat sb;
+
+	if (!stat(fn, &sb))
+		return 0;
+
+	return errno;
+}
+
+int main(int argc, char *argv[])
+{
+	static const char fn[] = "io_uring-mkdirat-test";
+	int ret;
+	struct io_uring ring;
+
+	ret = io_uring_queue_init(8, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "queue init failed: %d\n", ret);
+		return ret;
+	}
+
+	ret = do_mkdirat(&ring, fn);
+	if (ret < 0) {
+		if (ret == -EBADF || ret == -EINVAL) {
+			fprintf(stdout, "mkdirat not supported, skipping\n");
+			goto out;
+		}
+		fprintf(stderr, "mkdirat: %s\n", strerror(-ret));
+		goto err;
+	} else if (ret) {
+		goto err;
+	}
+
+	if (stat_file(fn)) {
+		perror("stat");
+		goto err;
+	}
+
+	ret = do_mkdirat(&ring, fn);
+	if (ret != -EEXIST) {
+		fprintf(stderr, "do_mkdirat already exists failed: %d\n", ret);
+		goto err1;
+	}
+
+	ret = do_mkdirat(&ring, "surely/this/wont/exist");
+	if (ret != -ENOENT) {
+		fprintf(stderr, "do_mkdirat no parent failed: %d\n", ret);
+		goto err1;
+	}
+
+out:
+	unlinkat(AT_FDCWD, fn, AT_REMOVEDIR);
+	io_uring_queue_exit(&ring);
+	return 0;
+err1:
+	unlinkat(AT_FDCWD, fn, AT_REMOVEDIR);
+err:
+	io_uring_queue_exit(&ring);
+	return 1;
+}
diff --git a/test/multicqes_drain.c b/test/multicqes_drain.c
new file mode 100644
index 0000000..609d583
--- /dev/null
+++ b/test/multicqes_drain.c
@@ -0,0 +1,383 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: generic tests for  io_uring drain io
+ *
+ * The main idea is to randomly generate different type of sqe to
+ * challenge the drain logic. There are some restrictions for the
+ * generated sqes, details in io_uring maillist:
+ * https://lore.kernel.org/io-uring/39a49b4c-27c2-1035-b250-51daeccaab9b@linux.alibaba.com/
+ *
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <sys/poll.h>
+
+#include "liburing.h"
+
+enum {
+	multi,
+	single,
+	nop,
+	cancel,
+	op_last,
+};
+
+struct sqe_info {
+	__u8 op;
+	unsigned flags;
+};
+
+#define max_entry 50
+
+/*
+ * sqe_flags: combination of sqe flags
+ * multi_sqes: record the user_data/index of all the multishot sqes
+ * cnt: how many entries there are in multi_sqes
+ * we can leverage multi_sqes array for cancellation: we randomly pick
+ * up an entry in multi_sqes when form a cancellation sqe.
+ * multi_cap: limitation of number of multishot sqes
+ */
+const unsigned sqe_flags[4] = {0, IOSQE_IO_LINK, IOSQE_IO_DRAIN,
+	IOSQE_IO_LINK | IOSQE_IO_DRAIN};
+int multi_sqes[max_entry], cnt = 0;
+int multi_cap = max_entry / 5;
+
+int write_pipe(int pipe, char *str)
+{
+	int ret;
+	do {
+		errno = 0;
+		ret = write(pipe, str, 3);
+	} while (ret == -1 && errno == EINTR);
+	return ret;
+}
+
+void read_pipe(int pipe)
+{
+	char str[4] = {0};
+	int ret;
+
+	ret = read(pipe, &str, 3);
+	if (ret < 0)
+		perror("read");
+}
+
+int trigger_event(int p[])
+{
+	int ret;
+	if ((ret = write_pipe(p[1], "foo")) != 3) {
+		fprintf(stderr, "bad write return %d\n", ret);
+		return 1;
+	}
+	read_pipe(p[0]);
+	return 0;
+}
+
+void io_uring_sqe_prep(int op, struct io_uring_sqe *sqe, unsigned sqe_flags, int arg)
+{
+	switch (op) {
+		case multi:
+			io_uring_prep_poll_add(sqe, arg, POLLIN);
+			sqe->len |= IORING_POLL_ADD_MULTI;
+			break;
+		case single:
+			io_uring_prep_poll_add(sqe, arg, POLLIN);
+			break;
+		case nop:
+			io_uring_prep_nop(sqe);
+			break;
+		case cancel:
+			io_uring_prep_poll_remove(sqe, (void *)(long)arg);
+			break;
+	}
+	sqe->flags = sqe_flags;
+}
+
+__u8 generate_flags(int sqe_op)
+{
+	__u8 flags = 0;
+	/*
+	 * drain sqe must be put after multishot sqes cancelled
+	 */
+	do {
+		flags = sqe_flags[rand() % 4];
+	} while ((flags & IOSQE_IO_DRAIN) && cnt);
+
+	/*
+	 * cancel req cannot have drain or link flag
+	 */
+	if (sqe_op == cancel) {
+		flags &= ~(IOSQE_IO_DRAIN | IOSQE_IO_LINK);
+	}
+	/*
+	 * avoid below case:
+	 * sqe0(multishot, link)->sqe1(nop, link)->sqe2(nop)->sqe3(cancel_sqe0)
+	 * sqe3 may excute before sqe0 so that sqe0 isn't cancelled
+	 */
+	if (sqe_op == multi)
+		flags &= ~IOSQE_IO_LINK;
+
+	return flags;
+
+}
+
+/*
+ * function to generate opcode of a sqe
+ * several restrictions here:
+ * - cancel all the previous multishot sqes as soon as possible when
+ *   we reach high watermark.
+ * - ensure there is some multishot sqe when generating a cancel sqe
+ * - ensure a cancel/multshot sqe is not in a linkchain
+ * - ensure number of multishot sqes doesn't exceed multi_cap
+ * - don't generate multishot sqes after high watermark
+ */
+int generate_opcode(int i, int pre_flags)
+{
+	int sqe_op;
+	int high_watermark = max_entry - max_entry / 5;
+	bool retry0 = false, retry1 = false, retry2 = false;
+
+	if ((i >= high_watermark) && cnt) {
+		sqe_op = cancel;
+	} else {
+		do {
+			sqe_op = rand() % op_last;
+			retry0 = (sqe_op == cancel) && (!cnt || (pre_flags & IOSQE_IO_LINK));
+			retry1 = (sqe_op == multi) && ((multi_cap - 1 < 0) || i >= high_watermark);
+			retry2 = (sqe_op == multi) && (pre_flags & IOSQE_IO_LINK);
+		} while (retry0 || retry1 || retry2);
+	}
+
+	if (sqe_op == multi)
+		multi_cap--;
+	return sqe_op;
+}
+
+inline void add_multishot_sqe(int index)
+{
+	multi_sqes[cnt++] = index;
+}
+
+int remove_multishot_sqe()
+{
+	int ret;
+
+	int rem_index = rand() % cnt;
+	ret = multi_sqes[rem_index];
+	multi_sqes[rem_index] = multi_sqes[cnt - 1];
+	cnt--;
+
+	return ret;
+}
+
+static int test_generic_drain(struct io_uring *ring)
+{
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe[max_entry];
+	struct sqe_info si[max_entry];
+	int cqe_data[max_entry << 1], cqe_res[max_entry << 1];
+	int i, j, ret, arg = 0;
+	int pipes[max_entry][2];
+	int pre_flags = 0;
+
+	for (i = 0; i < max_entry; i++) {
+		if (pipe(pipes[i]) != 0) {
+			perror("pipe");
+			return 1;
+		}
+	}
+
+	srand((unsigned)time(NULL));
+	for (i = 0; i < max_entry; i++) {
+		sqe[i] = io_uring_get_sqe(ring);
+		if (!sqe[i]) {
+			printf("get sqe failed\n");
+			goto err;
+		}
+
+		int sqe_op = generate_opcode(i, pre_flags);
+		__u8 flags = generate_flags(sqe_op);
+
+		if (sqe_op == cancel)
+			arg = remove_multishot_sqe();
+		if (sqe_op == multi || sqe_op == single)
+			arg = pipes[i][0];
+		io_uring_sqe_prep(sqe_op, sqe[i], flags, arg);
+		sqe[i]->user_data = i;
+		si[i].op = sqe_op;
+		si[i].flags = flags;
+		pre_flags = flags;
+		if (sqe_op == multi)
+			add_multishot_sqe(i);
+	}
+
+	ret = io_uring_submit(ring);
+	if (ret < 0) {
+		printf("sqe submit failed\n");
+		goto err;
+	} else if (ret < max_entry) {
+		printf("Submitted only %d\n", ret);
+		goto err;
+	}
+
+	sleep(4);
+	// TODO: randomize event triggerring order
+	for (i = 0; i < max_entry; i++) {
+		if (si[i].op != multi && si[i].op != single)
+			continue;
+
+		if (trigger_event(pipes[i]))
+			goto err;
+	}
+	sleep(5);
+	i = 0;
+	while (!io_uring_peek_cqe(ring, &cqe)) {
+		cqe_data[i] = cqe->user_data;
+		cqe_res[i++] = cqe->res;
+		io_uring_cqe_seen(ring, cqe);
+	}
+
+	/*
+	 * compl_bits is a bit map to record completions.
+	 * eg. sqe[0], sqe[1], sqe[2] fully completed
+	 * then compl_bits is 000...00111b
+	 * 
+	 */
+	unsigned long long compl_bits = 0;
+	for (j = 0; j < i; j++) {
+		int index = cqe_data[j];
+		if ((si[index].flags & IOSQE_IO_DRAIN) && index) {
+			if ((~compl_bits) & ((1ULL << index) - 1)) {
+				printf("drain failed\n");
+				goto err;
+			}
+		}
+		/*
+		 * for multishot sqes, record them only when it is cancelled
+		 */
+		if ((si[index].op != multi) || (cqe_res[j] == -ECANCELED))
+			compl_bits |= (1ULL << index);
+	}
+
+	return 0;
+err:
+	return 1;
+}
+
+static int test_simple_drain(struct io_uring *ring)
+{
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe[2];
+	int i, ret;
+	int pipe1[2], pipe2[2];
+
+	if (pipe(pipe1) != 0 || pipe(pipe2) != 0) {
+		perror("pipe");
+		return 1;
+	}
+
+	for (i = 0; i < 2; i++) {
+		sqe[i] = io_uring_get_sqe(ring);
+		if (!sqe[i]) {
+			printf("get sqe failed\n");
+			goto err;
+		}
+	}
+
+	io_uring_prep_poll_add(sqe[0], pipe1[0], POLLIN);
+	sqe[0]->len |= IORING_POLL_ADD_MULTI;
+	sqe[0]->user_data = 0;
+	io_uring_prep_poll_add(sqe[1], pipe2[0], POLLIN);
+	sqe[1]->user_data = 1;
+
+	ret = io_uring_submit(ring);
+	if (ret < 0) {
+		printf("sqe submit failed\n");
+		goto err;
+	} else if (ret < 2) {
+		printf("Submitted only %d\n", ret);
+		goto err;
+	}
+
+	for (i = 0; i < 2; i++) {
+		if (trigger_event(pipe1))
+			goto err;
+	}
+	if (trigger_event(pipe2))
+			goto err;
+
+	for (i = 0; i < 2; i++) {
+		sqe[i] = io_uring_get_sqe(ring);
+		if (!sqe[i]) {
+			printf("get sqe failed\n");
+			goto err;
+		}
+	}
+
+	io_uring_prep_poll_remove(sqe[0], 0);
+	sqe[0]->user_data = 2;
+	io_uring_prep_nop(sqe[1]);
+	sqe[1]->flags |= IOSQE_IO_DRAIN;
+	sqe[1]->user_data = 3;
+
+	ret = io_uring_submit(ring);
+	if (ret < 0) {
+		printf("sqe submit failed\n");
+		goto err;
+	} else if (ret < 2) {
+		printf("Submitted only %d\n", ret);
+		goto err;
+	}
+
+
+	for (i = 0; i < 6; i++) {
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret < 0) {
+			printf("wait completion %d\n", ret);
+			goto err;
+		}
+		io_uring_cqe_seen(ring, cqe);
+		if ((i == 5) && (cqe->user_data != 3))
+			goto err;
+	}
+
+	return 0;
+err:
+	return 1;
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring;
+	int i, ret;
+
+	if (argc > 1)
+		return 0;
+
+	ret = io_uring_queue_init(1024, &ring, 0);
+	if (ret) {
+		printf("ring setup failed\n");
+		return 1;
+	}
+
+	for (i = 0; i < 5; i++) {
+		ret = test_simple_drain(&ring);
+		if (ret) {
+			fprintf(stderr, "test_simple_drain failed\n");
+			break;
+		}
+	}
+
+	for (i = 0; i < 5; i++) {
+		ret = test_generic_drain(&ring);
+		if (ret) {
+			fprintf(stderr, "test_generic_drain failed\n");
+			break;
+		}
+	}
+	return ret;
+}
diff --git a/test/nop-all-sizes.c b/test/nop-all-sizes.c
new file mode 100644
index 0000000..49b8642
--- /dev/null
+++ b/test/nop-all-sizes.c
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: exercise full filling of SQ and CQ ring
+ *
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+
+#include "liburing.h"
+
+#define MAX_ENTRIES	32768
+
+static int fill_nops(struct io_uring *ring)
+{
+	struct io_uring_sqe *sqe;
+	int filled = 0;
+
+	do {
+		sqe = io_uring_get_sqe(ring);
+		if (!sqe)
+			break;
+
+		io_uring_prep_nop(sqe);
+		filled++;
+	} while (1);
+
+	return filled;
+}
+
+static int test_nops(struct io_uring *ring)
+{
+	struct io_uring_cqe *cqe;
+	int ret, nr, total = 0, i;
+
+	nr = fill_nops(ring);
+	if (nr < 0) {
+		fprintf(stderr, "Fill: %d\n", nr);
+		goto err;
+	}
+
+	ret = io_uring_submit(ring);
+	if (ret != nr) {
+		fprintf(stderr, "submit %d, wanted %d\n", ret, nr);
+		goto err;
+	}
+	total += ret;
+
+	nr = fill_nops(ring);
+	if (nr < 0) {
+		fprintf(stderr, "Fill: %d\n", nr);
+		goto err;
+	}
+
+	ret = io_uring_submit(ring);
+	if (ret != nr) {
+		fprintf(stderr, "submit %d, wanted %d\n", ret, nr);
+		goto err;
+	}
+	total += ret;
+
+	for (i = 0; i < total; i++) {
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret < 0) {
+			fprintf(stderr, "wait completion %d\n", ret);
+			goto err;
+		}
+
+		io_uring_cqe_seen(ring, cqe);
+	}
+	return 0;
+err:
+	return 1;
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring;
+	int ret, depth;
+
+	if (argc > 1)
+		return 0;
+
+	depth = 1;
+	while (depth <= MAX_ENTRIES) {
+		ret = io_uring_queue_init(depth, &ring, 0);
+		if (ret) {
+			if (ret == -ENOMEM)
+				break;
+			fprintf(stderr, "ring setup failed: %d\n", ret);
+			return 1;
+		}
+
+		ret = test_nops(&ring);
+		if (ret) {
+			fprintf(stderr, "test_single_nop failed\n");
+			return ret;
+		}
+		depth <<= 1;
+		io_uring_queue_exit(&ring);
+	}
+
+	return 0;
+}
diff --git a/test/nop.c b/test/nop.c
new file mode 100644
index 0000000..82201bd
--- /dev/null
+++ b/test/nop.c
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: run various nop tests
+ *
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+
+#include "liburing.h"
+
+static int test_single_nop(struct io_uring *ring)
+{
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	int ret;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "get sqe failed\n");
+		goto err;
+	}
+
+	io_uring_prep_nop(sqe);
+
+	ret = io_uring_submit(ring);
+	if (ret <= 0) {
+		fprintf(stderr, "sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	ret = io_uring_wait_cqe(ring, &cqe);
+	if (ret < 0) {
+		fprintf(stderr, "wait completion %d\n", ret);
+		goto err;
+	}
+
+	io_uring_cqe_seen(ring, cqe);
+	return 0;
+err:
+	return 1;
+}
+
+static int test_barrier_nop(struct io_uring *ring)
+{
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	int ret, i;
+
+	for (i = 0; i < 8; i++) {
+		sqe = io_uring_get_sqe(ring);
+		if (!sqe) {
+			fprintf(stderr, "get sqe failed\n");
+			goto err;
+		}
+
+		io_uring_prep_nop(sqe);
+		if (i == 4)
+			sqe->flags = IOSQE_IO_DRAIN;
+	}
+
+	ret = io_uring_submit(ring);
+	if (ret < 0) {
+		fprintf(stderr, "sqe submit failed: %d\n", ret);
+		goto err;
+	} else if (ret < 8) {
+		fprintf(stderr, "Submitted only %d\n", ret);
+		goto err;
+	}
+
+	for (i = 0; i < 8; i++) {
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret < 0) {
+			fprintf(stderr, "wait completion %d\n", ret);
+			goto err;
+		}
+		io_uring_cqe_seen(ring, cqe);
+	}
+
+	return 0;
+err:
+	return 1;
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring;
+	int ret;
+
+	if (argc > 1)
+		return 0;
+
+	ret = io_uring_queue_init(8, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "ring setup failed: %d\n", ret);
+		return 1;
+	}
+
+	ret = test_single_nop(&ring);
+	if (ret) {
+		fprintf(stderr, "test_single_nop failed\n");
+		return ret;
+	}
+
+	ret = test_barrier_nop(&ring);
+	if (ret) {
+		fprintf(stderr, "test_barrier_nop failed\n");
+		return ret;
+	}
+
+	return 0;
+}
diff --git a/test/open-close.c b/test/open-close.c
new file mode 100644
index 0000000..648737c
--- /dev/null
+++ b/test/open-close.c
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: run various openat(2) tests
+ *
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+
+#include "helpers.h"
+#include "liburing.h"
+
+static int test_close(struct io_uring *ring, int fd, int is_ring_fd)
+{
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	int ret;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "get sqe failed\n");
+		goto err;
+	}
+	io_uring_prep_close(sqe, fd);
+
+	ret = io_uring_submit(ring);
+	if (ret <= 0) {
+		fprintf(stderr, "sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	ret = io_uring_wait_cqe(ring, &cqe);
+	if (ret < 0) {
+		if (!(is_ring_fd && ret == -EBADF)) {
+			fprintf(stderr, "wait completion %d\n", ret);
+			goto err;
+		}
+		return ret;
+	}
+	ret = cqe->res;
+	io_uring_cqe_seen(ring, cqe);
+	return ret;
+err:
+	return -1;
+}
+
+static int test_openat(struct io_uring *ring, const char *path, int dfd)
+{
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	int ret;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "get sqe failed\n");
+		goto err;
+	}
+	io_uring_prep_openat(sqe, dfd, path, O_RDONLY, 0);
+
+	ret = io_uring_submit(ring);
+	if (ret <= 0) {
+		fprintf(stderr, "sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	ret = io_uring_wait_cqe(ring, &cqe);
+	if (ret < 0) {
+		fprintf(stderr, "wait completion %d\n", ret);
+		goto err;
+	}
+	ret = cqe->res;
+	io_uring_cqe_seen(ring, cqe);
+	return ret;
+err:
+	return -1;
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring;
+	const char *path, *path_rel;
+	int ret, do_unlink;
+
+	ret = io_uring_queue_init(8, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "ring setup failed\n");
+		return 1;
+	}
+
+	if (argc > 1) {
+		path = "/tmp/.open.close";
+		path_rel = argv[1];
+		do_unlink = 0;
+	} else {
+		path = "/tmp/.open.close";
+		path_rel = ".open.close";
+		do_unlink = 1;
+	}
+
+	t_create_file(path, 4096);
+
+	if (do_unlink)
+		t_create_file(path_rel, 4096);
+
+	ret = test_openat(&ring, path, -1);
+	if (ret < 0) {
+		if (ret == -EINVAL) {
+			fprintf(stdout, "Open not supported, skipping\n");
+			goto done;
+		}
+		fprintf(stderr, "test_openat absolute failed: %d\n", ret);
+		goto err;
+	}
+
+	ret = test_openat(&ring, path_rel, AT_FDCWD);
+	if (ret < 0) {
+		fprintf(stderr, "test_openat relative failed: %d\n", ret);
+		goto err;
+	}
+
+	ret = test_close(&ring, ret, 0);
+	if (ret) {
+		fprintf(stderr, "test_close normal failed\n");
+		goto err;
+	}
+
+	ret = test_close(&ring, ring.ring_fd, 1);
+	if (ret != -EBADF) {
+		fprintf(stderr, "test_close ring_fd failed\n");
+		goto err;
+	}
+
+done:
+	unlink(path);
+	if (do_unlink)
+		unlink(path_rel);
+	return 0;
+err:
+	unlink(path);
+	if (do_unlink)
+		unlink(path_rel);
+	return 1;
+}
diff --git a/test/openat2.c b/test/openat2.c
new file mode 100644
index 0000000..65f81b1
--- /dev/null
+++ b/test/openat2.c
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: run various openat(2) tests
+ *
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+
+#include "helpers.h"
+#include "liburing.h"
+
+static int test_openat2(struct io_uring *ring, const char *path, int dfd)
+{
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	struct open_how how;
+	int ret;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "get sqe failed\n");
+		goto err;
+	}
+	memset(&how, 0, sizeof(how));
+	how.flags = O_RDONLY;
+	io_uring_prep_openat2(sqe, dfd, path, &how);
+
+	ret = io_uring_submit(ring);
+	if (ret <= 0) {
+		fprintf(stderr, "sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	ret = io_uring_wait_cqe(ring, &cqe);
+	if (ret < 0) {
+		fprintf(stderr, "wait completion %d\n", ret);
+		goto err;
+	}
+	ret = cqe->res;
+	io_uring_cqe_seen(ring, cqe);
+	return ret;
+err:
+	return -1;
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring;
+	const char *path, *path_rel;
+	int ret, do_unlink;
+
+	ret = io_uring_queue_init(8, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "ring setup failed\n");
+		return 1;
+	}
+
+	if (argc > 1) {
+		path = "/tmp/.open.close";
+		path_rel = argv[1];
+		do_unlink = 0;
+	} else {
+		path = "/tmp/.open.close";
+		path_rel = ".open.close";
+		do_unlink = 1;
+	}
+
+	t_create_file(path, 4096);
+
+	if (do_unlink)
+		t_create_file(path_rel, 4096);
+
+	ret = test_openat2(&ring, path, -1);
+	if (ret < 0) {
+		if (ret == -EINVAL) {
+			fprintf(stdout, "openat2 not supported, skipping\n");
+			goto done;
+		}
+		fprintf(stderr, "test_openat2 absolute failed: %d\n", ret);
+		goto err;
+	}
+
+	ret = test_openat2(&ring, path_rel, AT_FDCWD);
+	if (ret < 0) {
+		fprintf(stderr, "test_openat2 relative failed: %d\n", ret);
+		goto err;
+	}
+
+done:
+	unlink(path);
+	if (do_unlink)
+		unlink(path_rel);
+	return 0;
+err:
+	unlink(path);
+	if (do_unlink)
+		unlink(path_rel);
+	return 1;
+}
diff --git a/test/personality.c b/test/personality.c
new file mode 100644
index 0000000..591ec83
--- /dev/null
+++ b/test/personality.c
@@ -0,0 +1,204 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: test if personalities work
+ *
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+
+#include "liburing.h"
+
+#define FNAME	"/tmp/.tmp.access"
+#define USE_UID	1000
+
+static int no_personality;
+
+static int open_file(struct io_uring *ring, int cred_id, int with_link)
+{
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	int ret, i, to_submit = 1;
+
+	if (with_link) {
+		sqe = io_uring_get_sqe(ring);
+		io_uring_prep_nop(sqe);
+		sqe->flags |= IOSQE_IO_LINK;
+		sqe->user_data = 1;
+		to_submit++;
+	}
+
+	sqe = io_uring_get_sqe(ring);
+	io_uring_prep_openat(sqe, -1, FNAME, O_RDONLY, 0);
+	sqe->user_data = 2;
+
+	if (cred_id != -1)
+		sqe->personality = cred_id;
+
+	ret = io_uring_submit(ring);
+	if (ret != to_submit) {
+		fprintf(stderr, "submit got: %d\n", ret);
+		goto err;
+	}
+
+	for (i = 0; i < to_submit; i++) {
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret < 0) {
+			fprintf(stderr, "wait completion %d\n", ret);
+			goto err;
+		}
+
+		ret = cqe->res;
+		io_uring_cqe_seen(ring, cqe);
+	}
+err:
+	return ret;
+}
+
+static int test_personality(struct io_uring *ring)
+{
+	int ret, cred_id;
+
+	ret = io_uring_register_personality(ring);
+	if (ret < 0) {
+		if (ret == -EINVAL) {
+			fprintf(stdout, "Personalities not supported, skipping\n");
+			no_personality = 1;
+			goto out;
+		}
+		fprintf(stderr, "register_personality: %d\n", ret);
+		goto err;
+	}
+	cred_id = ret;
+
+	/* create file only owner can open */
+	ret = open(FNAME, O_RDONLY | O_CREAT, 0600);
+	if (ret < 0) {
+		perror("open");
+		goto err;
+	}
+	close(ret);
+
+	/* verify we can open it */
+	ret = open_file(ring, -1, 0);
+	if (ret < 0) {
+		fprintf(stderr, "current open got: %d\n", ret);
+		goto err;
+	}
+
+	if (seteuid(USE_UID) < 0) {
+		fprintf(stdout, "Can't switch to UID %u, skipping\n", USE_UID);
+		goto out;
+	}
+
+	/* verify we can't open it with current credentials */
+	ret = open_file(ring, -1, 0);
+	if (ret != -EACCES) {
+		fprintf(stderr, "open got: %d\n", ret);
+		goto err;
+	}
+
+	/* verify we can open with registered credentials */
+	ret = open_file(ring, cred_id, 0);
+	if (ret < 0) {
+		fprintf(stderr, "credential open: %d\n", ret);
+		goto err;
+	}
+	close(ret);
+
+	/* verify we can open with registered credentials and as a link */
+	ret = open_file(ring, cred_id, 1);
+	if (ret < 0) {
+		fprintf(stderr, "credential open: %d\n", ret);
+		goto err;
+	}
+
+	if (seteuid(0))
+		perror("seteuid");
+
+	ret = io_uring_unregister_personality(ring, cred_id);
+	if (ret) {
+		fprintf(stderr, "register_personality: %d\n", ret);
+		goto err;
+	}
+
+out:
+	unlink(FNAME);
+	return 0;
+err:
+	unlink(FNAME);
+	return 1;
+}
+
+static int test_invalid_personality(struct io_uring *ring)
+{
+	int ret;
+
+	ret = open_file(ring, 2, 0);
+	if (ret != -EINVAL) {
+		fprintf(stderr, "invalid personality got: %d\n", ret);
+		goto err;
+	}
+	return 0;
+err:
+	return 1;
+}
+
+static int test_invalid_unregister(struct io_uring *ring)
+{
+	int ret;
+
+	ret = io_uring_unregister_personality(ring, 2);
+	if (ret != -EINVAL) {
+		fprintf(stderr, "invalid personality unregister got: %d\n", ret);
+		goto err;
+	}
+	return 0;
+err:
+	return 1;
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring;
+	int ret;
+
+	if (argc > 1)
+		return 0;
+
+	if (geteuid()) {
+		fprintf(stderr, "Not root, skipping\n");
+		return 0;
+	}
+
+	ret = io_uring_queue_init(8, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "ring setup failed: %d\n", ret);
+		return 1;
+	}
+
+	ret = test_personality(&ring);
+	if (ret) {
+		fprintf(stderr, "test_personality failed\n");
+		return ret;
+	}
+	if (no_personality)
+		return 0;
+
+	ret = test_invalid_personality(&ring);
+	if (ret) {
+		fprintf(stderr, "test_invalid_personality failed\n");
+		return ret;
+	}
+
+	ret = test_invalid_unregister(&ring);
+	if (ret) {
+		fprintf(stderr, "test_invalid_unregister failed\n");
+		return ret;
+	}
+
+	return 0;
+}
diff --git a/test/pipe-eof.c b/test/pipe-eof.c
new file mode 100644
index 0000000..4c98de9
--- /dev/null
+++ b/test/pipe-eof.c
@@ -0,0 +1,81 @@
+/*
+ * Test that closed pipe reads returns 0, instead of waiting for more
+ * data.
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <pthread.h>
+#include <string.h>
+#include "liburing.h"
+
+#define BUFSIZE	512
+
+struct data {
+	char *str;
+	int fds[2];
+};
+
+static void *t(void *data)
+{
+	struct data *d = data;
+	int ret;
+
+	strcpy(d->str, "This is a test string");
+	ret = write(d->fds[1], d->str, strlen(d->str));
+	close(d->fds[1]);
+	if (ret < 0)
+		perror("write");
+
+	return NULL;
+}
+
+int main(int argc, char *argv[])
+{
+	static char buf[BUFSIZE];
+	struct io_uring ring;
+	pthread_t thread;
+	struct data d;
+	int ret;
+
+	if (pipe(d.fds) < 0) {
+		perror("pipe");
+		return 1;
+	}
+	d.str = buf;
+
+	io_uring_queue_init(8, &ring, 0);
+
+	pthread_create(&thread, NULL, t, &d);
+
+	while (1) {
+		struct io_uring_sqe *sqe;
+		struct io_uring_cqe *cqe;
+
+		sqe = io_uring_get_sqe(&ring);
+		io_uring_prep_read(sqe, d.fds[0], buf, BUFSIZE, 0);
+		ret = io_uring_submit(&ring);
+		if (ret != 1) {
+			fprintf(stderr, "submit: %d\n", ret);
+			return 1;
+		}
+		ret = io_uring_wait_cqe(&ring, &cqe);
+		if (ret) {
+			fprintf(stderr, "wait: %d\n", ret);
+			return 1;
+		}
+
+		if (cqe->res < 0) {
+			fprintf(stderr, "Read error: %s\n", strerror(-cqe->res));
+			return 1;
+		}
+		if (cqe->res == 0)
+			break;
+		io_uring_cqe_seen(&ring, cqe);
+	}
+
+	pthread_join(thread, NULL);
+	io_uring_queue_exit(&ring);
+	return 0;
+}
diff --git a/test/pipe-reuse.c b/test/pipe-reuse.c
new file mode 100644
index 0000000..255bc2a
--- /dev/null
+++ b/test/pipe-reuse.c
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Check split up read is handled correctly
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <pthread.h>
+#include <string.h>
+#include "liburing.h"
+
+#define BUFSIZE	16384
+#define BUFFERS	16
+
+int main(int argc, char *argv[])
+{
+	char buf[BUFSIZE], wbuf[BUFSIZE];
+	struct iovec iov[BUFFERS];
+	struct io_uring_params p = { };
+	struct io_uring ring;
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	int ret, i, fds[2];
+	void *ptr;
+
+	if (pipe(fds) < 0) {
+		perror("pipe");
+		return 1;
+	}
+
+	ptr = buf;
+	for (i = 0; i < BUFFERS; i++) {
+		unsigned bsize = BUFSIZE / BUFFERS;
+
+		iov[i].iov_base = ptr;
+		iov[i].iov_len = bsize;
+		ptr += bsize;
+	}
+
+	ret = io_uring_queue_init_params(8, &ring, &p);
+	if (ret) {
+		fprintf(stderr, "queue_init: %d\n", ret);
+		return 1;
+	}
+	if (!(p.features & IORING_FEAT_SUBMIT_STABLE)) {
+		fprintf(stdout, "FEAT_SUBMIT_STABLE not there, skipping\n");
+		return 0;
+	}
+
+	ptr = wbuf;
+	memset(ptr, 0x11, sizeof(wbuf) / 2);
+	ptr += sizeof(wbuf) / 2;
+	memset(ptr, 0x22, sizeof(wbuf) / 2);
+
+	ret = write(fds[1], wbuf, sizeof(wbuf) / 2);
+	if (ret != sizeof(wbuf) / 2) {
+		fprintf(stderr, "Bad write\n");
+		ret = 1;
+		goto err;
+	}
+
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_readv(sqe, fds[0], iov, BUFFERS, 0);
+	ret = io_uring_submit(&ring);
+	if (ret != 1) {
+		fprintf(stderr, "submit: %d\n", ret);
+		return 1;
+	}
+
+	for (i = 0; i < BUFFERS; i++) {
+		iov[i].iov_base = NULL;
+		iov[i].iov_len = 1000000;
+	}
+
+	ret = write(fds[1], ptr, sizeof(wbuf) / 2);
+	if (ret != sizeof(wbuf) / 2) {
+		fprintf(stderr, "Bad write\n");
+		ret = 1;
+		goto err;
+	}
+
+	ret = io_uring_wait_cqe(&ring, &cqe);
+	if (ret) {
+		fprintf(stderr, "wait: %d\n", ret);
+		return 1;
+	}
+
+	if (cqe->res < 0) {
+		fprintf(stderr, "Read error: %s\n", strerror(-cqe->res));
+		return 1;
+	} else if (cqe->res != sizeof(wbuf)) {
+		/* ignore short read, not a failure */
+		goto err;
+	}
+	io_uring_cqe_seen(&ring, cqe);
+
+	ret = memcmp(wbuf, buf, sizeof(wbuf));
+	if (ret)
+		fprintf(stderr, "Read data mismatch\n");
+
+err:
+	io_uring_queue_exit(&ring);
+	return ret;
+}
diff --git a/test/poll-cancel-ton.c b/test/poll-cancel-ton.c
new file mode 100644
index 0000000..e9d612e
--- /dev/null
+++ b/test/poll-cancel-ton.c
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: test massive amounts of poll with cancel
+ *
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <inttypes.h>
+#include <sys/poll.h>
+#include <sys/wait.h>
+#include <sys/signal.h>
+
+#include "liburing.h"
+
+#define POLL_COUNT	30000
+
+static void *sqe_index[POLL_COUNT];
+
+static int reap_events(struct io_uring *ring, unsigned nr_events, int nowait)
+{
+	struct io_uring_cqe *cqe;
+	int i, ret = 0;
+
+	for (i = 0; i < nr_events; i++) {
+		if (!i && !nowait)
+			ret = io_uring_wait_cqe(ring, &cqe);
+		else
+			ret = io_uring_peek_cqe(ring, &cqe);
+		if (ret) {
+			if (ret != -EAGAIN)
+				fprintf(stderr, "cqe peek failed: %d\n", ret);
+			break;
+		}
+		io_uring_cqe_seen(ring, cqe);
+	}
+
+	return i ? i : ret;
+}
+
+static int del_polls(struct io_uring *ring, int fd, int nr)
+{
+	int batch, i, ret;
+	struct io_uring_sqe *sqe;
+
+	while (nr) {
+		batch = 1024;
+		if (batch > nr)
+			batch = nr;
+
+		for (i = 0; i < batch; i++) {
+			void *data;
+
+			sqe = io_uring_get_sqe(ring);
+			data = sqe_index[lrand48() % nr];
+			io_uring_prep_poll_remove(sqe, data);
+		}
+
+		ret = io_uring_submit(ring);
+		if (ret != batch) {
+			fprintf(stderr, "%s: failed submit, %d\n", __FUNCTION__, ret);
+			return 1;
+		}
+		nr -= batch;
+		ret = reap_events(ring, 2 * batch, 0);
+	}
+	return 0;
+}
+
+static int add_polls(struct io_uring *ring, int fd, int nr)
+{
+	int pending, batch, i, count, ret;
+	struct io_uring_sqe *sqe;
+
+	pending = count = 0;
+	while (nr) {
+		batch = 1024;
+		if (batch > nr)
+			batch = nr;
+
+		for (i = 0; i < batch; i++) {
+			sqe = io_uring_get_sqe(ring);
+			io_uring_prep_poll_add(sqe, fd, POLLIN);
+			sqe_index[count++] = sqe;
+			sqe->user_data = (unsigned long) sqe;
+		}
+
+		ret = io_uring_submit(ring);
+		if (ret != batch) {
+			fprintf(stderr, "%s: failed submit, %d\n", __FUNCTION__, ret);
+			return 1;
+		}
+		nr -= batch;
+		pending += batch;
+		reap_events(ring, batch, 1);
+	}
+	return 0;
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring;
+	struct io_uring_params p = { };
+	int pipe1[2];
+	int ret;
+
+	if (argc > 1)
+		return 0;
+
+	if (pipe(pipe1) != 0) {
+		perror("pipe");
+		return 1;
+	}
+
+	p.flags = IORING_SETUP_CQSIZE;
+	p.cq_entries = 16384;
+	ret = io_uring_queue_init_params(1024, &ring, &p);
+	if (ret) {
+		if (ret == -EINVAL) {
+			fprintf(stdout, "No CQSIZE, trying without\n");
+			ret = io_uring_queue_init(1024, &ring, 0);
+			if (ret) {
+				fprintf(stderr, "ring setup failed: %d\n", ret);
+				return 1;
+			}
+		}
+	}
+
+	add_polls(&ring, pipe1[0], 30000);
+#if 0
+	usleep(1000);
+#endif
+	del_polls(&ring, pipe1[0], 30000);
+
+	io_uring_queue_exit(&ring);
+	return 0;
+}
diff --git a/test/poll-cancel.c b/test/poll-cancel.c
new file mode 100644
index 0000000..a74e915
--- /dev/null
+++ b/test/poll-cancel.c
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: test io_uring poll cancel handling
+ *
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <inttypes.h>
+#include <sys/poll.h>
+#include <sys/wait.h>
+#include <sys/signal.h>
+
+#include "liburing.h"
+
+struct poll_data {
+	unsigned is_poll;
+	unsigned is_cancel;
+};
+
+static void sig_alrm(int sig)
+{
+	fprintf(stderr, "Timed out!\n");
+	exit(1);
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring;
+	int pipe1[2];
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	struct poll_data *pd, pds[2];
+	struct sigaction act;
+	int ret;
+
+	if (argc > 1)
+		return 0;
+
+	if (pipe(pipe1) != 0) {
+		perror("pipe");
+		return 1;
+	}
+
+	ret = io_uring_queue_init(2, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "ring setup failed: %d\n", ret);
+		return 1;
+	}
+
+	memset(&act, 0, sizeof(act));
+	act.sa_handler = sig_alrm;
+	act.sa_flags = SA_RESTART;
+	sigaction(SIGALRM, &act, NULL);
+	alarm(1);
+
+	sqe = io_uring_get_sqe(&ring);
+	if (!sqe) {
+		fprintf(stderr, "get sqe failed\n");
+		return 1;
+	}
+
+	io_uring_prep_poll_add(sqe, pipe1[0], POLLIN);
+
+	pds[0].is_poll = 1;
+	pds[0].is_cancel = 0;
+	io_uring_sqe_set_data(sqe, &pds[0]);
+
+	ret = io_uring_submit(&ring);
+	if (ret <= 0) {
+		fprintf(stderr, "sqe submit failed\n");
+		return 1;
+	}
+
+	sqe = io_uring_get_sqe(&ring);
+	if (!sqe) {
+		fprintf(stderr, "get sqe failed\n");
+		return 1;
+	}
+
+	pds[1].is_poll = 0;
+	pds[1].is_cancel = 1;
+	io_uring_prep_poll_remove(sqe, &pds[0]);
+	io_uring_sqe_set_data(sqe, &pds[1]);
+
+	ret = io_uring_submit(&ring);
+	if (ret <= 0) {
+		fprintf(stderr, "sqe submit failed: %d\n", ret);
+		return 1;
+	}
+
+	ret = io_uring_wait_cqe(&ring, &cqe);
+	if (ret < 0) {
+		fprintf(stderr, "wait cqe failed: %d\n", ret);
+		return 1;
+	}
+
+	pd = io_uring_cqe_get_data(cqe);
+	if (pd->is_poll && cqe->res != -ECANCELED) {
+		fprintf(stderr ,"sqe (add=%d/remove=%d) failed with %ld\n",
+					pd->is_poll, pd->is_cancel,
+					(long) cqe->res);
+		return 1;
+	} else if (pd->is_cancel && cqe->res) {
+		fprintf(stderr, "sqe (add=%d/remove=%d) failed with %ld\n",
+					pd->is_poll, pd->is_cancel,
+					(long) cqe->res);
+		return 1;
+	}
+	io_uring_cqe_seen(&ring, cqe);
+
+	ret = io_uring_wait_cqe(&ring, &cqe);
+	if (ret < 0) {
+		fprintf(stderr, "wait_cqe: %d\n", ret);
+		return 1;
+	}
+
+	pd = io_uring_cqe_get_data(cqe);
+	if (pd->is_poll && cqe->res != -ECANCELED) {
+		fprintf(stderr, "sqe (add=%d/remove=%d) failed with %ld\n",
+					pd->is_poll, pd->is_cancel,
+					(long) cqe->res);
+		return 1;
+	} else if (pd->is_cancel && cqe->res) {
+		fprintf(stderr, "sqe (add=%d/remove=%d) failed with %ld\n",
+					pd->is_poll, pd->is_cancel,
+					(long) cqe->res);
+		return 1;
+	}
+
+	io_uring_cqe_seen(&ring, cqe);
+	return 0;
+}
diff --git a/test/poll-link.c b/test/poll-link.c
new file mode 100644
index 0000000..4b4f9aa
--- /dev/null
+++ b/test/poll-link.c
@@ -0,0 +1,227 @@
+/* SPDX-License-Identifier: MIT */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+#include <assert.h>
+#include <pthread.h>
+#include <sys/socket.h>
+#include <netinet/tcp.h>
+#include <netinet/in.h>
+#include <poll.h>
+
+#include "liburing.h"
+
+pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
+pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
+
+static int recv_thread_ready = 0;
+static int recv_thread_done = 0;
+
+static void signal_var(int *var)
+{
+        pthread_mutex_lock(&mutex);
+        *var = 1;
+        pthread_cond_signal(&cond);
+        pthread_mutex_unlock(&mutex);
+}
+
+static void wait_for_var(int *var)
+{
+        pthread_mutex_lock(&mutex);
+
+        while (!*var)
+                pthread_cond_wait(&cond, &mutex);
+
+        pthread_mutex_unlock(&mutex);
+}
+
+struct data {
+	unsigned expected[2];
+	unsigned is_mask[2];
+	unsigned long timeout;
+	int port;
+	int stop;
+};
+
+static void *send_thread(void *arg)
+{
+	struct data *data = arg;
+
+	wait_for_var(&recv_thread_ready);
+
+	int s0 = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
+	assert(s0 != -1);
+
+	struct sockaddr_in addr;
+
+	addr.sin_family = AF_INET;
+	addr.sin_port = data->port;
+	addr.sin_addr.s_addr = 0x0100007fU;
+
+	if (connect(s0, (struct sockaddr*)&addr, sizeof(addr)) != -1)
+		wait_for_var(&recv_thread_done);
+
+	close(s0);
+	return 0;
+}
+
+void *recv_thread(void *arg)
+{
+	struct data *data = arg;
+	struct io_uring_sqe *sqe;
+	struct io_uring ring;
+	int i, ret;
+
+	ret = io_uring_queue_init(8, &ring, 0);
+	assert(ret == 0);
+
+	int s0 = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
+	assert(s0 != -1);
+
+	int32_t val = 1;
+	ret = setsockopt(s0, SOL_SOCKET, SO_REUSEPORT, &val, sizeof(val));
+	assert(ret != -1);
+	ret = setsockopt(s0, SOL_SOCKET, SO_REUSEADDR, &val, sizeof(val));
+	assert(ret != -1);
+
+	struct sockaddr_in addr;
+
+	addr.sin_family = AF_INET;
+	addr.sin_addr.s_addr = 0x0100007fU;
+
+	i = 0;
+	do {
+		data->port = 1025 + (rand() % 64510);
+		addr.sin_port = data->port;
+
+		if (bind(s0, (struct sockaddr*)&addr, sizeof(addr)) != -1)
+			break;
+	} while (++i < 100);
+
+	if (i >= 100) {
+		fprintf(stderr, "Can't find good port, skipped\n");
+		data->stop = 1;
+		signal_var(&recv_thread_ready);
+		goto out;
+	}
+
+	ret = listen(s0, 128);
+	assert(ret != -1);
+
+	signal_var(&recv_thread_ready);
+
+	sqe = io_uring_get_sqe(&ring);
+	assert(sqe != NULL);
+
+	io_uring_prep_poll_add(sqe, s0, POLLIN | POLLHUP | POLLERR);
+	sqe->flags |= IOSQE_IO_LINK;
+	sqe->user_data = 1;
+
+	sqe = io_uring_get_sqe(&ring);
+	assert(sqe != NULL);
+
+	struct __kernel_timespec ts;
+	ts.tv_sec = data->timeout / 1000000000;
+	ts.tv_nsec = data->timeout % 1000000000;
+	io_uring_prep_link_timeout(sqe, &ts, 0);
+	sqe->user_data = 2;
+
+	ret = io_uring_submit(&ring);
+	assert(ret == 2);
+
+	for (i = 0; i < 2; i++) {
+		struct io_uring_cqe *cqe;
+		int idx;
+
+		if (io_uring_wait_cqe(&ring, &cqe)) {
+			fprintf(stderr, "wait cqe failed\n");
+			goto err;
+		}
+		idx = cqe->user_data - 1;
+		if (data->is_mask[idx] && !(data->expected[idx] & cqe->res)) {
+			fprintf(stderr, "cqe %" PRIu64 " got %x, wanted mask %x\n",
+					(uint64_t) cqe->user_data, cqe->res,
+					data->expected[idx]);
+			goto err;
+		} else if (!data->is_mask[idx] && cqe->res != data->expected[idx]) {
+			fprintf(stderr, "cqe %" PRIu64 " got %d, wanted %d\n",
+					(uint64_t) cqe->user_data, cqe->res,
+					data->expected[idx]);
+			goto err;
+		}
+		io_uring_cqe_seen(&ring, cqe);
+	}
+
+out:
+	signal_var(&recv_thread_done);
+	close(s0);
+	io_uring_queue_exit(&ring);
+	return NULL;
+err:
+	signal_var(&recv_thread_done);
+	close(s0);
+	io_uring_queue_exit(&ring);
+	return (void *) 1;
+}
+
+static int test_poll_timeout(int do_connect, unsigned long timeout)
+{
+	pthread_t t1, t2;
+	struct data d;
+	void *tret;
+	int ret = 0;
+
+	recv_thread_ready = 0;
+	recv_thread_done = 0;
+
+	memset(&d, 0, sizeof(d));
+	d.timeout = timeout;
+	if (!do_connect) {
+		d.expected[0] = -ECANCELED;
+		d.expected[1] = -ETIME;
+	} else {
+		d.expected[0] = POLLIN;
+		d.is_mask[0] = 1;
+		d.expected[1] = -ECANCELED;
+	}
+
+	pthread_create(&t1, NULL, recv_thread, &d);
+
+	if (do_connect)
+		pthread_create(&t2, NULL, send_thread, &d);
+
+	pthread_join(t1, &tret);
+	if (tret)
+		ret++;
+
+	if (do_connect) {
+		pthread_join(t2, &tret);
+		if (tret)
+			ret++;
+	}
+
+	return ret;
+}
+
+int main(int argc, char *argv[])
+{
+	if (argc > 1)
+		return 0;
+
+	srand(getpid());
+
+	if (test_poll_timeout(0, 200000000)) {
+		fprintf(stderr, "poll timeout 0 failed\n");
+		return 1;
+	}
+
+	if (test_poll_timeout(1, 1000000000)) {
+		fprintf(stderr, "poll timeout 1 failed\n");
+		return 1;
+	}
+
+	return 0;
+}
diff --git a/test/poll-many.c b/test/poll-many.c
new file mode 100644
index 0000000..3f8d08d
--- /dev/null
+++ b/test/poll-many.c
@@ -0,0 +1,208 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: test many files being polled for
+ *
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <signal.h>
+#include <sys/poll.h>
+#include <sys/resource.h>
+#include <fcntl.h>
+
+#include "liburing.h"
+
+#define	NFILES	5000
+#define BATCH	500
+#define NLOOPS	1000
+
+#define RING_SIZE	512
+
+struct p {
+	int fd[2];
+	int triggered;
+};
+
+static struct p p[NFILES];
+
+static int arm_poll(struct io_uring *ring, int off)
+{
+	struct io_uring_sqe *sqe;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "failed getting sqe\n");
+		return 1;
+	}
+
+	io_uring_prep_poll_add(sqe, p[off].fd[0], POLLIN);
+	sqe->user_data = off;
+	return 0;
+}
+
+static int reap_polls(struct io_uring *ring)
+{
+	struct io_uring_cqe *cqe;
+	int i, ret, off;
+	char c;
+
+	for (i = 0; i < BATCH; i++) {
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret) {
+			fprintf(stderr, "wait cqe %d\n", ret);
+			return ret;
+		}
+		off = cqe->user_data;
+		p[off].triggered = 0;
+		ret = read(p[off].fd[0], &c, 1);
+		if (ret != 1) {
+			fprintf(stderr, "read got %d/%d\n", ret, errno);
+			break;
+		}
+		if (arm_poll(ring, off))
+			break;
+		io_uring_cqe_seen(ring, cqe);
+	}
+
+	if (i != BATCH) {
+		fprintf(stderr, "gave up at %d\n", i);
+		return 1;
+	}
+
+	ret = io_uring_submit(ring);
+	if (ret != BATCH) {
+		fprintf(stderr, "submitted %d, %d\n", ret, BATCH);
+		return 1;
+	}
+
+	return 0;
+}
+
+static int trigger_polls(void)
+{
+	char c = 89;
+	int i, ret;
+
+	for (i = 0; i < BATCH; i++) {
+		int off;
+
+		do {
+			off = rand() % NFILES;
+			if (!p[off].triggered)
+				break;
+		} while (1);
+
+		p[off].triggered = 1;
+		ret = write(p[off].fd[1], &c, 1);
+		if (ret != 1) {
+			fprintf(stderr, "write got %d/%d\n", ret, errno);
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
+static int arm_polls(struct io_uring *ring)
+{
+	int ret, to_arm = NFILES, i, off;
+
+	off = 0;
+	while (to_arm) {
+		int this_arm;
+
+		this_arm = to_arm;
+		if (this_arm > RING_SIZE)
+			this_arm = RING_SIZE;
+
+		for (i = 0; i < this_arm; i++) {
+			if (arm_poll(ring, off)) {
+				fprintf(stderr, "arm failed at %d\n", off);
+				return 1;
+			}
+			off++;
+		}
+
+		ret = io_uring_submit(ring);
+		if (ret != this_arm) {
+			fprintf(stderr, "submitted %d, %d\n", ret, this_arm);
+			return 1;
+		}
+		to_arm -= this_arm;
+	}
+
+	return 0;
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring;
+	struct io_uring_params params = { };
+	struct rlimit rlim;
+	int i, ret;
+
+	if (argc > 1)
+		return 0;
+
+	if (getrlimit(RLIMIT_NOFILE, &rlim) < 0) {
+		perror("getrlimit");
+		goto err_noring;
+	}
+
+	if (rlim.rlim_cur < (2 * NFILES + 5)) {
+		rlim.rlim_cur = (2 * NFILES + 5);
+		rlim.rlim_max = rlim.rlim_cur;
+		if (setrlimit(RLIMIT_NOFILE, &rlim) < 0) {
+			if (errno == EPERM)
+				goto err_nofail;
+			perror("setrlimit");
+			goto err_noring;
+		}
+	}
+
+	for (i = 0; i < NFILES; i++) {
+		if (pipe(p[i].fd) < 0) {
+			perror("pipe");
+			goto err_noring;
+		}
+	}
+
+	params.flags = IORING_SETUP_CQSIZE;
+	params.cq_entries = 4096;
+	ret = io_uring_queue_init_params(RING_SIZE, &ring, &params);
+	if (ret) {
+		if (ret == -EINVAL) {
+			fprintf(stdout, "No CQSIZE, trying without\n");
+			ret = io_uring_queue_init(RING_SIZE, &ring, 0);
+			if (ret) {
+				fprintf(stderr, "ring setup failed: %d\n", ret);
+				return 1;
+			}
+		}
+	}
+
+	if (arm_polls(&ring))
+		goto err;
+
+	for (i = 0; i < NLOOPS; i++) {
+		trigger_polls();
+		ret = reap_polls(&ring);
+		if (ret)
+			goto err;
+	}
+
+	io_uring_queue_exit(&ring);
+	return 0;
+err:
+	io_uring_queue_exit(&ring);
+err_noring:
+	fprintf(stderr, "poll-many failed\n");
+	return 1;
+err_nofail:
+	fprintf(stderr, "poll-many: not enough files available (and not root), "
+			"skipped\n");
+	return 0;
+}
diff --git a/test/poll-mshot-update.c b/test/poll-mshot-update.c
new file mode 100644
index 0000000..1a9ea0a
--- /dev/null
+++ b/test/poll-mshot-update.c
@@ -0,0 +1,255 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: test many files being polled for and updated
+ *
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <signal.h>
+#include <sys/poll.h>
+#include <sys/resource.h>
+#include <fcntl.h>
+#include <pthread.h>
+
+#include "liburing.h"
+
+#define	NFILES	5000
+#define BATCH	500
+#define NLOOPS	1000
+
+#define RING_SIZE	512
+
+struct p {
+	int fd[2];
+	int triggered;
+};
+
+static struct p p[NFILES];
+static int no_update;
+
+static int arm_poll(struct io_uring *ring, int off)
+{
+	struct io_uring_sqe *sqe;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "failed getting sqe\n");
+		return 1;
+	}
+
+	io_uring_prep_poll_add(sqe, p[off].fd[0], POLLIN);
+	sqe->len = 1;
+	sqe->user_data = off;
+	return 0;
+}
+
+static int reap_polls(struct io_uring *ring)
+{
+	struct io_uring_cqe *cqe;
+	int i, ret, off;
+	char c;
+
+	for (i = 0; i < BATCH; i++) {
+		struct io_uring_sqe *sqe;
+
+		sqe = io_uring_get_sqe(ring);
+		/* update event */
+		io_uring_prep_poll_update(sqe, (void *)(unsigned long)i, NULL,
+					  POLLIN, 2);
+		sqe->user_data = 0x12345678;
+	}
+
+	ret = io_uring_submit(ring);
+	if (ret != BATCH) {
+		fprintf(stderr, "submitted %d, %d\n", ret, BATCH);
+		return 1;
+	}
+
+	for (i = 0; i < 2 * BATCH; i++) {
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret) {
+			fprintf(stderr, "wait cqe %d\n", ret);
+			return ret;
+		}
+		off = cqe->user_data;
+		if (off == 0x12345678)
+			goto seen;
+		p[off].triggered = 0;
+		ret = read(p[off].fd[0], &c, 1);
+		if (ret != 1) {
+			if (ret == -1 && errno == EAGAIN)
+				goto seen;
+			fprintf(stderr, "read got %d/%d\n", ret, errno);
+			break;
+		}
+seen:
+		io_uring_cqe_seen(ring, cqe);
+	}
+
+	if (i != 2 * BATCH) {
+		fprintf(stderr, "gave up at %d\n", i);
+		return 1;
+	}
+
+	return 0;
+}
+
+static int trigger_polls(void)
+{
+	char c = 89;
+	int i, ret;
+
+	for (i = 0; i < BATCH; i++) {
+		int off;
+
+		do {
+			off = rand() % NFILES;
+			if (!p[off].triggered)
+				break;
+		} while (1);
+
+		p[off].triggered = 1;
+		ret = write(p[off].fd[1], &c, 1);
+		if (ret != 1) {
+			fprintf(stderr, "write got %d/%d\n", ret, errno);
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
+static void *trigger_polls_fn(void *data)
+{
+	trigger_polls();
+	return NULL;
+}
+
+static int check_no_update(struct io_uring *ring)
+{
+	struct io_uring_cqe *cqe;
+	int ret;
+
+	ret = io_uring_wait_cqe(ring, &cqe);
+	if (ret)
+		return 0;
+	ret = cqe->res;
+	io_uring_cqe_seen(ring, cqe);
+	return ret == -EINVAL;
+}
+
+static int arm_polls(struct io_uring *ring)
+{
+	int ret, to_arm = NFILES, i, off;
+
+	off = 0;
+	while (to_arm) {
+		int this_arm;
+
+		this_arm = to_arm;
+		if (this_arm > RING_SIZE)
+			this_arm = RING_SIZE;
+
+		for (i = 0; i < this_arm; i++) {
+			if (arm_poll(ring, off)) {
+				fprintf(stderr, "arm failed at %d\n", off);
+				return 1;
+			}
+			off++;
+		}
+
+		ret = io_uring_submit(ring);
+		if (ret != this_arm) {
+			if (ret > 0 && check_no_update(ring)) {
+				no_update = 1;
+				return 0;
+			}
+			fprintf(stderr, "submitted %d, %d\n", ret, this_arm);
+			return 1;
+		}
+		to_arm -= this_arm;
+	}
+
+	return 0;
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring;
+	struct io_uring_params params = { };
+	struct rlimit rlim;
+	pthread_t thread;
+	int i, ret;
+
+	if (argc > 1)
+		return 0;
+
+	if (getrlimit(RLIMIT_NOFILE, &rlim) < 0) {
+		perror("getrlimit");
+		goto err_noring;
+	}
+
+	if (rlim.rlim_cur < (2 * NFILES + 5)) {
+		rlim.rlim_cur = (2 * NFILES + 5);
+		rlim.rlim_max = rlim.rlim_cur;
+		if (setrlimit(RLIMIT_NOFILE, &rlim) < 0) {
+			if (errno == EPERM)
+				goto err_nofail;
+			perror("setrlimit");
+			goto err_noring;
+		}
+	}
+
+	for (i = 0; i < NFILES; i++) {
+		if (pipe(p[i].fd) < 0) {
+			perror("pipe");
+			goto err_noring;
+		}
+		fcntl(p[i].fd[0], F_SETFL, O_NONBLOCK);
+	}
+
+	params.flags = IORING_SETUP_CQSIZE;
+	params.cq_entries = 4096;
+	ret = io_uring_queue_init_params(RING_SIZE, &ring, &params);
+	if (ret) {
+		if (ret == -EINVAL) {
+			fprintf(stdout, "No CQSIZE, trying without\n");
+			ret = io_uring_queue_init(RING_SIZE, &ring, 0);
+			if (ret) {
+				fprintf(stderr, "ring setup failed: %d\n", ret);
+				return 1;
+			}
+		}
+	}
+
+	if (arm_polls(&ring))
+		goto err;
+	if (no_update) {
+		printf("No poll update support, skipping\n");
+		goto done;
+	}
+
+	for (i = 0; i < NLOOPS; i++) {
+		pthread_create(&thread, NULL, trigger_polls_fn, NULL);
+		ret = reap_polls(&ring);
+		if (ret)
+			goto err;
+		pthread_join(thread, NULL);
+	}
+
+done:
+	io_uring_queue_exit(&ring);
+	return 0;
+err:
+	io_uring_queue_exit(&ring);
+err_noring:
+	fprintf(stderr, "poll-many failed\n");
+	return 1;
+err_nofail:
+	fprintf(stderr, "poll-many: not enough files available (and not root), "
+			"skipped\n");
+	return 0;
+}
diff --git a/test/poll-ring.c b/test/poll-ring.c
new file mode 100644
index 0000000..1f69e20
--- /dev/null
+++ b/test/poll-ring.c
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: Test poll against ring itself. A buggy kernel will end up
+ * 		having io_wq_* workers pending, as the circular reference
+ * 		will prevent full exit.
+ *
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/poll.h>
+
+#include "liburing.h"
+
+int main(int argc, char *argv[])
+{
+	struct io_uring_sqe *sqe;
+	struct io_uring ring;
+	int ret;
+
+	if (argc > 1)
+		return 0;
+
+	ret = io_uring_queue_init(1, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "child: ring setup failed: %d\n", ret);
+		return 1;
+	}
+
+	sqe = io_uring_get_sqe(&ring);
+	if (!sqe) {
+		fprintf(stderr, "get sqe failed\n");
+		return 1;
+	}
+
+	io_uring_prep_poll_add(sqe, ring.ring_fd, POLLIN);
+	io_uring_sqe_set_data(sqe, sqe);
+
+	ret = io_uring_submit(&ring);
+	if (ret <= 0) {
+		fprintf(stderr, "child: sqe submit failed: %d\n", ret);
+		return 1;
+	}
+
+	return 0;
+}
diff --git a/test/poll-v-poll.c b/test/poll-v-poll.c
new file mode 100644
index 0000000..c8ba6f1
--- /dev/null
+++ b/test/poll-v-poll.c
@@ -0,0 +1,353 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: test io_uring poll handling
+ *
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <signal.h>
+#include <fcntl.h>
+#include <sys/poll.h>
+#include <sys/wait.h>
+#include <sys/select.h>
+#include <pthread.h>
+#include <sys/epoll.h>
+
+#include "liburing.h"
+
+struct thread_data {
+	struct io_uring *ring;
+	int fd;
+	int events;
+	const char *test;
+	int out[2];
+};
+
+static void *epoll_wait_fn(void *data)
+{
+	struct thread_data *td = data;
+	struct epoll_event ev;
+
+	if (epoll_wait(td->fd, &ev, 1, -1) < 0) {
+		perror("epoll_wait");
+		goto err;
+	}
+
+	return NULL;
+err:
+	return (void *) 1;
+}
+
+static void *iou_poll(void *data)
+{
+	struct thread_data *td = data;
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	int ret;
+
+	sqe = io_uring_get_sqe(td->ring);
+	io_uring_prep_poll_add(sqe, td->fd, td->events);
+
+	ret = io_uring_submit(td->ring);
+	if (ret != 1) {
+		fprintf(stderr, "submit got %d\n", ret);
+		goto err;
+	}
+
+	ret = io_uring_wait_cqe(td->ring, &cqe);
+	if (ret) {
+		fprintf(stderr, "wait_cqe: %d\n", ret);
+		goto err;
+	}
+
+	td->out[0] = cqe->res & 0x3f;
+	io_uring_cqe_seen(td->ring, cqe);
+	return NULL;
+err:
+	return (void *) 1;
+}
+
+static void *poll_pipe(void *data)
+{
+	struct thread_data *td = data;
+	struct pollfd pfd;
+	int ret;
+
+	pfd.fd = td->fd;
+	pfd.events = td->events;
+
+	ret = poll(&pfd, 1, -1);
+	if (ret < 0)
+		perror("poll");
+
+	td->out[1] = pfd.revents;
+	return NULL;
+}
+
+static int do_pipe_pollin_test(struct io_uring *ring)
+{
+	struct thread_data td;
+	pthread_t threads[2];
+	int ret, pipe1[2];
+	char buf;
+
+	if (pipe(pipe1) < 0) {
+		perror("pipe");
+		return 1;
+	}
+
+	td.ring = ring;
+	td.fd = pipe1[0];
+	td.events = POLLIN;
+	td.test = __FUNCTION__;
+
+	pthread_create(&threads[1], NULL, iou_poll, &td);
+	pthread_create(&threads[0], NULL, poll_pipe, &td);
+	usleep(100000);
+
+	buf = 0x89;
+	ret = write(pipe1[1], &buf, sizeof(buf));
+	if (ret != sizeof(buf)) {
+		fprintf(stderr, "write failed: %d\n", ret);
+		return 1;
+	}
+
+	pthread_join(threads[0], NULL);
+	pthread_join(threads[1], NULL);
+
+	if (td.out[0] != td.out[1]) {
+		fprintf(stderr, "%s: res %x/%x differ\n", __FUNCTION__,
+							td.out[0], td.out[1]);
+		return 1;
+	}
+	return 0;
+}
+
+static int do_pipe_pollout_test(struct io_uring *ring)
+{
+	struct thread_data td;
+	pthread_t threads[2];
+	int ret, pipe1[2];
+	char buf;
+
+	if (pipe(pipe1) < 0) {
+		perror("pipe");
+		return 1;
+	}
+
+	td.ring = ring;
+	td.fd = pipe1[1];
+	td.events = POLLOUT;
+	td.test = __FUNCTION__;
+
+	pthread_create(&threads[0], NULL, poll_pipe, &td);
+	pthread_create(&threads[1], NULL, iou_poll, &td);
+	usleep(100000);
+
+	buf = 0x89;
+	ret = write(pipe1[1], &buf, sizeof(buf));
+	if (ret != sizeof(buf)) {
+		fprintf(stderr, "write failed: %d\n", ret);
+		return 1;
+	}
+
+	pthread_join(threads[0], NULL);
+	pthread_join(threads[1], NULL);
+
+	if (td.out[0] != td.out[1]) {
+		fprintf(stderr, "%s: res %x/%x differ\n", __FUNCTION__,
+							td.out[0], td.out[1]);
+		return 1;
+	}
+
+	return 0;
+}
+
+static int do_fd_test(struct io_uring *ring, const char *fname, int events)
+{
+	struct thread_data td;
+	pthread_t threads[2];
+	int fd;
+
+	fd = open(fname, O_RDONLY);
+	if (fd < 0) {
+		perror("open");
+		return 1;
+	}
+
+	td.ring = ring;
+	td.fd = fd;
+	td.events = events;
+	td.test = __FUNCTION__;
+
+	pthread_create(&threads[0], NULL, poll_pipe, &td);
+	pthread_create(&threads[1], NULL, iou_poll, &td);
+
+	pthread_join(threads[0], NULL);
+	pthread_join(threads[1], NULL);
+
+	if (td.out[0] != td.out[1]) {
+		fprintf(stderr, "%s: res %x/%x differ\n", __FUNCTION__,
+							td.out[0], td.out[1]);
+		return 1;
+	}
+
+	return 0;
+}
+
+static int iou_epoll_ctl(struct io_uring *ring, int epfd, int fd,
+			 struct epoll_event *ev)
+{
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	int ret;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "Failed to get sqe\n");
+		return 1;
+	}
+
+	io_uring_prep_epoll_ctl(sqe, epfd, fd, EPOLL_CTL_ADD, ev);
+
+	ret = io_uring_submit(ring);
+	if (ret != 1) {
+		fprintf(stderr, "submit: %d\n", ret);
+		return 1;
+	}
+
+	ret = io_uring_wait_cqe(ring, &cqe);
+	if (ret) {
+		fprintf(stderr, "wait_cqe: %d\n", ret);
+		return 1;
+	}
+
+	ret = cqe->res;
+	io_uring_cqe_seen(ring, cqe);
+	return ret;
+}
+
+static int do_test_epoll(struct io_uring *ring, int iou_epoll_add)
+{
+	struct epoll_event ev;
+	struct thread_data td;
+	pthread_t threads[2];
+	int ret, pipe1[2];
+	char buf;
+	int fd;
+
+	fd = epoll_create1(0);
+	if (fd < 0) {
+		perror("epoll_create");
+		return 1;
+	}
+
+	if (pipe(pipe1) < 0) {
+		perror("pipe");
+		return 1;
+	}
+
+	ev.events = EPOLLIN;
+	ev.data.fd = pipe1[0];
+
+	if (!iou_epoll_add) {
+		if (epoll_ctl(fd, EPOLL_CTL_ADD, pipe1[0], &ev) < 0) {
+			perror("epoll_ctrl");
+			return 1;
+		}
+	} else {
+		ret = iou_epoll_ctl(ring, fd, pipe1[0], &ev);
+		if (ret == -EINVAL) {
+			fprintf(stdout, "epoll not supported, skipping\n");
+			return 0;
+		} else if (ret < 0) {
+			return 1;
+		}
+	}
+
+	td.ring = ring;
+	td.fd = fd;
+	td.events = POLLIN;
+	td.test = __FUNCTION__;
+
+	pthread_create(&threads[0], NULL, iou_poll, &td);
+	pthread_create(&threads[1], NULL, epoll_wait_fn, &td);
+	usleep(100000);
+
+	buf = 0x89;
+	ret = write(pipe1[1], &buf, sizeof(buf));
+	if (ret != sizeof(buf)) {
+		fprintf(stderr, "write failed: %d\n", ret);
+		return 1;
+	}
+
+	pthread_join(threads[0], NULL);
+	pthread_join(threads[1], NULL);
+	return 0;
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring;
+	const char *fname;
+	int ret;
+
+	ret = io_uring_queue_init(1, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "ring setup failed\n");
+		return 1;
+	}
+
+	ret = do_pipe_pollin_test(&ring);
+	if (ret) {
+		fprintf(stderr, "pipe pollin test failed\n");
+		return ret;
+	}
+
+	ret = do_pipe_pollout_test(&ring);
+	if (ret) {
+		fprintf(stderr, "pipe pollout test failed\n");
+		return ret;
+	}
+
+	ret = do_test_epoll(&ring, 0);
+	if (ret) {
+		fprintf(stderr, "epoll test 0 failed\n");
+		return ret;
+	}
+
+	ret = do_test_epoll(&ring, 1);
+	if (ret) {
+		fprintf(stderr, "epoll test 1 failed\n");
+		return ret;
+	}
+
+	if (argc > 1)
+		fname = argv[1];
+	else
+		fname = argv[0];
+
+	ret = do_fd_test(&ring, fname, POLLIN);
+	if (ret) {
+		fprintf(stderr, "fd test IN failed\n");
+		return ret;
+	}
+
+	ret = do_fd_test(&ring, fname, POLLOUT);
+	if (ret) {
+		fprintf(stderr, "fd test OUT failed\n");
+		return ret;
+	}
+
+	ret = do_fd_test(&ring, fname, POLLOUT | POLLIN);
+	if (ret) {
+		fprintf(stderr, "fd test IN|OUT failed\n");
+		return ret;
+	}
+
+	return 0;
+
+}
diff --git a/test/poll.c b/test/poll.c
new file mode 100644
index 0000000..f9a89d0
--- /dev/null
+++ b/test/poll.c
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: test io_uring poll handling
+ *
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <signal.h>
+#include <sys/poll.h>
+#include <sys/wait.h>
+
+#include "liburing.h"
+
+static void sig_alrm(int sig)
+{
+	fprintf(stderr, "Timed out!\n");
+	exit(1);
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	struct io_uring ring;
+	int pipe1[2];
+	pid_t p;
+	int ret;
+
+	if (argc > 1)
+		return 0;
+
+	if (pipe(pipe1) != 0) {
+		perror("pipe");
+		return 1;
+	}
+
+	p = fork();
+	switch (p) {
+	case -1:
+		perror("fork");
+		exit(2);
+	case 0: {
+		struct sigaction act;
+
+		ret = io_uring_queue_init(1, &ring, 0);
+		if (ret) {
+			fprintf(stderr, "child: ring setup failed: %d\n", ret);
+			return 1;
+		}
+
+		memset(&act, 0, sizeof(act));
+		act.sa_handler = sig_alrm;
+		act.sa_flags = SA_RESTART;
+		sigaction(SIGALRM, &act, NULL);
+		alarm(1);
+
+		sqe = io_uring_get_sqe(&ring);
+		if (!sqe) {
+			fprintf(stderr, "get sqe failed\n");
+			return 1;
+		}
+
+		io_uring_prep_poll_add(sqe, pipe1[0], POLLIN);
+		io_uring_sqe_set_data(sqe, sqe);
+
+		ret = io_uring_submit(&ring);
+		if (ret <= 0) {
+			fprintf(stderr, "child: sqe submit failed: %d\n", ret);
+			return 1;
+		}
+
+		do {
+			ret = io_uring_wait_cqe(&ring, &cqe);
+			if (ret < 0) {
+				fprintf(stderr, "child: wait completion %d\n", ret);
+				break;
+			}
+			io_uring_cqe_seen(&ring, cqe);
+		} while (ret != 0);
+
+		if (ret < 0)
+			return 1;
+		if (cqe->user_data != (unsigned long) sqe) {
+			fprintf(stderr, "child: cqe doesn't match sqe\n");
+			return 1;
+		}
+		if ((cqe->res & POLLIN) != POLLIN) {
+			fprintf(stderr, "child: bad return value %ld\n",
+							(long) cqe->res);
+			return 1;
+		}
+		exit(0);
+		}
+	default:
+		do {
+			errno = 0;
+			ret = write(pipe1[1], "foo", 3);
+		} while (ret == -1 && errno == EINTR);
+
+		if (ret != 3) {
+			fprintf(stderr, "parent: bad write return %d\n", ret);
+			return 1;
+		}
+		return 0;
+	}
+}
diff --git a/test/probe.c b/test/probe.c
new file mode 100644
index 0000000..c7fc053
--- /dev/null
+++ b/test/probe.c
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: test IORING_REGISTER_PROBE
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+
+#include "helpers.h"
+#include "liburing.h"
+
+static int no_probe;
+
+static int verify_probe(struct io_uring_probe *p, int full)
+{
+	if (!full && p->ops_len) {
+		fprintf(stderr, "Got ops_len=%u\n", p->ops_len);
+		return 1;
+	}
+	if (!p->last_op) {
+		fprintf(stderr, "Got last_op=%u\n", p->last_op);
+		return 1;
+	}
+	if (!full)
+		return 0;
+	/* check a few ops that must be supported */
+	if (!(p->ops[IORING_OP_NOP].flags & IO_URING_OP_SUPPORTED)) {
+		fprintf(stderr, "NOP not supported!?\n");
+		return 1;
+	}
+	if (!(p->ops[IORING_OP_READV].flags & IO_URING_OP_SUPPORTED)) {
+		fprintf(stderr, "READV not supported!?\n");
+		return 1;
+	}
+	if (!(p->ops[IORING_OP_WRITE].flags & IO_URING_OP_SUPPORTED)) {
+		fprintf(stderr, "READV not supported!?\n");
+		return 1;
+	}
+
+	return 0;
+}
+
+static int test_probe_helper(struct io_uring *ring)
+{
+	struct io_uring_probe *p;
+
+	p = io_uring_get_probe_ring(ring);
+	if (!p) {
+		fprintf(stderr, "Failed getting probe data\n");
+		return 1;
+	}
+
+	if (verify_probe(p, 1)) {
+		free(p);
+		return 1;
+	}
+
+	return 0;
+}
+
+static int test_probe(struct io_uring *ring)
+{
+	struct io_uring_probe *p;
+	size_t len;
+	int ret;
+
+	len = sizeof(*p) + 256 * sizeof(struct io_uring_probe_op);
+	p = t_calloc(1, len);
+	ret = io_uring_register_probe(ring, p, 0);
+	if (ret == -EINVAL) {
+		fprintf(stdout, "Probe not supported, skipping\n");
+		no_probe = 1;
+		goto out;
+	} else if (ret) {
+		fprintf(stdout, "Probe returned %d\n", ret);
+		goto err;
+	}
+
+	if (verify_probe(p, 0))
+		goto err;
+
+	/* now grab for all entries */
+	memset(p, 0, len);
+	ret = io_uring_register_probe(ring, p, 256);
+	if (ret == -EINVAL) {
+		fprintf(stdout, "Probe not supported, skipping\n");
+		goto err;
+	} else if (ret) {
+		fprintf(stdout, "Probe returned %d\n", ret);
+		goto err;
+	}
+
+	if (verify_probe(p, 1))
+		goto err;
+
+out:
+	free(p);
+	return 0;
+err:
+	free(p);
+	return 1;
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring;
+	int ret;
+
+	if (argc > 1)
+		return 0;
+
+	ret = io_uring_queue_init(8, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "ring setup failed\n");
+		return 1;
+	}
+
+	ret = test_probe(&ring);
+	if (ret) {
+		fprintf(stderr, "test_probe failed\n");
+		return ret;
+	}
+	if (no_probe)
+		return 0;
+
+	ret = test_probe_helper(&ring);
+	if (ret) {
+		fprintf(stderr, "test_probe failed\n");
+		return ret;
+	}
+
+
+	return 0;
+}
diff --git a/test/read-write.c b/test/read-write.c
new file mode 100644
index 0000000..d0a77fa
--- /dev/null
+++ b/test/read-write.c
@@ -0,0 +1,885 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: basic read/write tests with buffered, O_DIRECT, and SQPOLL
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <sys/poll.h>
+#include <sys/eventfd.h>
+#include <sys/resource.h>
+
+#include "helpers.h"
+#include "liburing.h"
+
+#define FILE_SIZE	(128 * 1024)
+#define BS		4096
+#define BUFFERS		(FILE_SIZE / BS)
+
+static struct iovec *vecs;
+static int no_read;
+static int no_buf_select;
+static int warned;
+
+static int create_nonaligned_buffers(void)
+{
+	int i;
+
+	vecs = t_malloc(BUFFERS * sizeof(struct iovec));
+	for (i = 0; i < BUFFERS; i++) {
+		char *p = t_malloc(3 * BS);
+
+		if (!p)
+			return 1;
+		vecs[i].iov_base = p + (rand() % BS);
+		vecs[i].iov_len = 1 + (rand() % BS);
+	}
+
+	return 0;
+}
+
+static int __test_io(const char *file, struct io_uring *ring, int write,
+		     int buffered, int sqthread, int fixed, int nonvec,
+		     int buf_select, int seq, int exp_len)
+{
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	int open_flags;
+	int i, fd, ret;
+	off_t offset;
+
+#ifdef VERBOSE
+	fprintf(stdout, "%s: start %d/%d/%d/%d/%d: ", __FUNCTION__, write,
+							buffered, sqthread,
+							fixed, nonvec);
+#endif
+	if (sqthread && geteuid()) {
+#ifdef VERBOSE
+		fprintf(stdout, "SKIPPED (not root)\n");
+#endif
+		return 0;
+	}
+
+	if (write)
+		open_flags = O_WRONLY;
+	else
+		open_flags = O_RDONLY;
+	if (!buffered)
+		open_flags |= O_DIRECT;
+
+	fd = open(file, open_flags);
+	if (fd < 0) {
+		perror("file open");
+		goto err;
+	}
+
+	if (fixed) {
+		ret = io_uring_register_buffers(ring, vecs, BUFFERS);
+		if (ret) {
+			fprintf(stderr, "buffer reg failed: %d\n", ret);
+			goto err;
+		}
+	}
+	if (sqthread) {
+		ret = io_uring_register_files(ring, &fd, 1);
+		if (ret) {
+			fprintf(stderr, "file reg failed: %d\n", ret);
+			goto err;
+		}
+	}
+
+	offset = 0;
+	for (i = 0; i < BUFFERS; i++) {
+		sqe = io_uring_get_sqe(ring);
+		if (!sqe) {
+			fprintf(stderr, "sqe get failed\n");
+			goto err;
+		}
+		if (!seq)
+			offset = BS * (rand() % BUFFERS);
+		if (write) {
+			int do_fixed = fixed;
+			int use_fd = fd;
+
+			if (sqthread)
+				use_fd = 0;
+			if (fixed && (i & 1))
+				do_fixed = 0;
+			if (do_fixed) {
+				io_uring_prep_write_fixed(sqe, use_fd, vecs[i].iov_base,
+								vecs[i].iov_len,
+								offset, i);
+			} else if (nonvec) {
+				io_uring_prep_write(sqe, use_fd, vecs[i].iov_base,
+							vecs[i].iov_len, offset);
+			} else {
+				io_uring_prep_writev(sqe, use_fd, &vecs[i], 1,
+								offset);
+			}
+		} else {
+			int do_fixed = fixed;
+			int use_fd = fd;
+
+			if (sqthread)
+				use_fd = 0;
+			if (fixed && (i & 1))
+				do_fixed = 0;
+			if (do_fixed) {
+				io_uring_prep_read_fixed(sqe, use_fd, vecs[i].iov_base,
+								vecs[i].iov_len,
+								offset, i);
+			} else if (nonvec) {
+				io_uring_prep_read(sqe, use_fd, vecs[i].iov_base,
+							vecs[i].iov_len, offset);
+			} else {
+				io_uring_prep_readv(sqe, use_fd, &vecs[i], 1,
+								offset);
+			}
+
+		}
+		sqe->user_data = i;
+		if (sqthread)
+			sqe->flags |= IOSQE_FIXED_FILE;
+		if (buf_select) {
+			if (nonvec)
+				sqe->addr = 0;
+			sqe->flags |= IOSQE_BUFFER_SELECT;
+			sqe->buf_group = buf_select;
+		}
+		if (seq)
+			offset += BS;
+	}
+
+	ret = io_uring_submit(ring);
+	if (ret != BUFFERS) {
+		fprintf(stderr, "submit got %d, wanted %d\n", ret, BUFFERS);
+		goto err;
+	}
+
+	for (i = 0; i < BUFFERS; i++) {
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret) {
+			fprintf(stderr, "wait_cqe=%d\n", ret);
+			goto err;
+		}
+		if (cqe->res == -EINVAL && nonvec) {
+			if (!warned) {
+				fprintf(stdout, "Non-vectored IO not "
+					"supported, skipping\n");
+				warned = 1;
+				no_read = 1;
+			}
+		} else if (exp_len == -1) {
+			int iov_len = vecs[cqe->user_data].iov_len;
+
+			if (cqe->res != iov_len) {
+				fprintf(stderr, "cqe res %d, wanted %d\n",
+					cqe->res, iov_len);
+				goto err;
+			}
+		} else if (cqe->res != exp_len) {
+			fprintf(stderr, "cqe res %d, wanted %d\n", cqe->res, exp_len);
+			goto err;
+		}
+		if (buf_select && exp_len == BS) {
+			int bid = cqe->flags >> 16;
+			unsigned char *ptr = vecs[bid].iov_base;
+			int j;
+
+			for (j = 0; j < BS; j++) {
+				if (ptr[j] == cqe->user_data)
+					continue;
+
+				fprintf(stderr, "Data mismatch! bid=%d, "
+						"wanted=%d, got=%d\n", bid,
+						(int)cqe->user_data, ptr[j]);
+				return 1;
+			}
+		}
+		io_uring_cqe_seen(ring, cqe);
+	}
+
+	if (fixed) {
+		ret = io_uring_unregister_buffers(ring);
+		if (ret) {
+			fprintf(stderr, "buffer unreg failed: %d\n", ret);
+			goto err;
+		}
+	}
+	if (sqthread) {
+		ret = io_uring_unregister_files(ring);
+		if (ret) {
+			fprintf(stderr, "file unreg failed: %d\n", ret);
+			goto err;
+		}
+	}
+
+	close(fd);
+#ifdef VERBOSE
+	fprintf(stdout, "PASS\n");
+#endif
+	return 0;
+err:
+#ifdef VERBOSE
+	fprintf(stderr, "FAILED\n");
+#endif
+	if (fd != -1)
+		close(fd);
+	return 1;
+}
+static int test_io(const char *file, int write, int buffered, int sqthread,
+		   int fixed, int nonvec, int exp_len)
+{
+	struct io_uring ring;
+	int ret, ring_flags;
+
+	if (sqthread) {
+		if (geteuid()) {
+			if (!warned) {
+				fprintf(stderr, "SQPOLL requires root, skipping\n");
+				warned = 1;
+			}
+			return 0;
+		}
+		ring_flags = IORING_SETUP_SQPOLL;
+	} else {
+		ring_flags = 0;
+	}
+
+	ret = io_uring_queue_init(64, &ring, ring_flags);
+	if (ret) {
+		fprintf(stderr, "ring create failed: %d\n", ret);
+		return 1;
+	}
+
+	ret = __test_io(file, &ring, write, buffered, sqthread, fixed, nonvec,
+			0, 0, exp_len);
+
+	io_uring_queue_exit(&ring);
+	return ret;
+}
+
+static int read_poll_link(const char *file)
+{
+	struct __kernel_timespec ts;
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	struct io_uring ring;
+	int i, fd, ret, fds[2];
+
+	ret = io_uring_queue_init(8, &ring, 0);
+	if (ret)
+		return ret;
+
+	fd = open(file, O_WRONLY);
+	if (fd < 0) {
+		perror("open");
+		return 1;
+	}
+
+	if (pipe(fds)) {
+		perror("pipe");
+		return 1;
+	}
+
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_writev(sqe, fd, &vecs[0], 1, 0);
+	sqe->flags |= IOSQE_IO_LINK;
+	sqe->user_data = 1;
+
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_poll_add(sqe, fds[0], POLLIN);
+	sqe->flags |= IOSQE_IO_LINK;
+	sqe->user_data = 2;
+
+	ts.tv_sec = 1;
+	ts.tv_nsec = 0;
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_link_timeout(sqe, &ts, 0);
+	sqe->user_data = 3;
+
+	ret = io_uring_submit(&ring);
+	if (ret != 3) {
+		fprintf(stderr, "submitted %d\n", ret);
+		return 1;
+	}
+
+	for (i = 0; i < 3; i++) {
+		ret = io_uring_wait_cqe(&ring, &cqe);
+		if (ret) {
+			fprintf(stderr, "wait_cqe=%d\n", ret);
+			return 1;
+		}
+		io_uring_cqe_seen(&ring, cqe);
+	}
+
+	return 0;
+}
+
+static int has_nonvec_read(void)
+{
+	struct io_uring_probe *p;
+	struct io_uring ring;
+	int ret;
+
+	ret = io_uring_queue_init(1, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "queue init failed: %d\n", ret);
+		exit(ret);
+	}
+
+	p = t_calloc(1, sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
+	ret = io_uring_register_probe(&ring, p, 256);
+	/* if we don't have PROBE_REGISTER, we don't have OP_READ/WRITE */
+	if (ret == -EINVAL) {
+out:
+		io_uring_queue_exit(&ring);
+		return 0;
+	} else if (ret) {
+		fprintf(stderr, "register_probe: %d\n", ret);
+		goto out;
+	}
+
+	if (p->ops_len <= IORING_OP_READ)
+		goto out;
+	if (!(p->ops[IORING_OP_READ].flags & IO_URING_OP_SUPPORTED))
+		goto out;
+	io_uring_queue_exit(&ring);
+	return 1;
+}
+
+static int test_eventfd_read(void)
+{
+	struct io_uring ring;
+	int fd, ret;
+	eventfd_t event;
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+
+	if (no_read)
+		return 0;
+	ret = io_uring_queue_init(8, &ring, 0);
+	if (ret)
+		return ret;
+
+	fd = eventfd(1, 0);
+	if (fd < 0) {
+		perror("eventfd");
+		return 1;
+	}
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_read(sqe, fd, &event, sizeof(eventfd_t), 0);
+	ret = io_uring_submit(&ring);
+	if (ret != 1) {
+		fprintf(stderr, "submitted %d\n", ret);
+		return 1;
+	}
+	eventfd_write(fd, 1);
+	ret = io_uring_wait_cqe(&ring, &cqe);
+	if (ret) {
+		fprintf(stderr, "wait_cqe=%d\n", ret);
+		return 1;
+	}
+	if (cqe->res == -EINVAL) {
+		fprintf(stdout, "eventfd IO not supported, skipping\n");
+	} else if (cqe->res != sizeof(eventfd_t)) {
+		fprintf(stderr, "cqe res %d, wanted %d\n", cqe->res,
+						(int) sizeof(eventfd_t));
+		return 1;
+	}
+	io_uring_cqe_seen(&ring, cqe);
+	return 0;
+}
+
+static int test_buf_select_short(const char *filename, int nonvec)
+{
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	struct io_uring ring;
+	int ret, i, exp_len;
+
+	if (no_buf_select)
+		return 0;
+
+	ret = io_uring_queue_init(64, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "ring create failed: %d\n", ret);
+		return 1;
+	}
+
+	exp_len = 0;
+	for (i = 0; i < BUFFERS; i++) {
+		sqe = io_uring_get_sqe(&ring);
+		io_uring_prep_provide_buffers(sqe, vecs[i].iov_base,
+						vecs[i].iov_len / 2, 1, 1, i);
+		if (!exp_len)
+			exp_len = vecs[i].iov_len / 2;
+	}
+
+	ret = io_uring_submit(&ring);
+	if (ret != BUFFERS) {
+		fprintf(stderr, "submit: %d\n", ret);
+		return -1;
+	}
+
+	for (i = 0; i < BUFFERS; i++) {
+		ret = io_uring_wait_cqe(&ring, &cqe);
+		if (cqe->res < 0) {
+			fprintf(stderr, "cqe->res=%d\n", cqe->res);
+			return 1;
+		}
+		io_uring_cqe_seen(&ring, cqe);
+	}
+
+	ret = __test_io(filename, &ring, 0, 0, 0, 0, nonvec, 1, 1, exp_len);
+
+	io_uring_queue_exit(&ring);
+	return ret;
+}
+
+static int provide_buffers_iovec(struct io_uring *ring, int bgid)
+{
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	int i, ret;
+
+	for (i = 0; i < BUFFERS; i++) {
+		sqe = io_uring_get_sqe(ring);
+		io_uring_prep_provide_buffers(sqe, vecs[i].iov_base,
+						vecs[i].iov_len, 1, bgid, i);
+	}
+
+	ret = io_uring_submit(ring);
+	if (ret != BUFFERS) {
+		fprintf(stderr, "submit: %d\n", ret);
+		return -1;
+	}
+
+	for (i = 0; i < BUFFERS; i++) {
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret) {
+			fprintf(stderr, "wait_cqe=%d\n", ret);
+			return 1;
+		}
+		if (cqe->res < 0) {
+			fprintf(stderr, "cqe->res=%d\n", cqe->res);
+			return 1;
+		}
+		io_uring_cqe_seen(ring, cqe);
+	}
+
+	return 0;
+}
+
+static int test_buf_select(const char *filename, int nonvec)
+{
+	struct io_uring_probe *p;
+	struct io_uring ring;
+	int ret, i;
+
+	ret = io_uring_queue_init(64, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "ring create failed: %d\n", ret);
+		return 1;
+	}
+
+	p = io_uring_get_probe_ring(&ring);
+	if (!p || !io_uring_opcode_supported(p, IORING_OP_PROVIDE_BUFFERS)) {
+		no_buf_select = 1;
+		fprintf(stdout, "Buffer select not supported, skipping\n");
+		return 0;
+	}
+	free(p);
+
+	/*
+	 * Write out data with known pattern
+	 */
+	for (i = 0; i < BUFFERS; i++)
+		memset(vecs[i].iov_base, i, vecs[i].iov_len);
+
+	ret = __test_io(filename, &ring, 1, 0, 0, 0, 0, 0, 1, BS);
+	if (ret) {
+		fprintf(stderr, "failed writing data\n");
+		return 1;
+	}
+
+	for (i = 0; i < BUFFERS; i++)
+		memset(vecs[i].iov_base, 0x55, vecs[i].iov_len);
+
+	ret = provide_buffers_iovec(&ring, 1);
+	if (ret)
+		return ret;
+
+	ret = __test_io(filename, &ring, 0, 0, 0, 0, nonvec, 1, 1, BS);
+	io_uring_queue_exit(&ring);
+	return ret;
+}
+
+static int test_rem_buf(int batch, int sqe_flags)
+{
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	struct io_uring ring;
+	int left, ret, nr = 0;
+	int bgid = 1;
+
+	if (no_buf_select)
+		return 0;
+
+	ret = io_uring_queue_init(64, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "ring create failed: %d\n", ret);
+		return 1;
+	}
+
+	ret = provide_buffers_iovec(&ring, bgid);
+	if (ret)
+		return ret;
+
+	left = BUFFERS;
+	while (left) {
+		int to_rem = (left < batch) ? left : batch;
+
+		left -= to_rem;
+		sqe = io_uring_get_sqe(&ring);
+		io_uring_prep_remove_buffers(sqe, to_rem, bgid);
+		sqe->user_data = to_rem;
+		sqe->flags |= sqe_flags;
+		++nr;
+	}
+
+	ret = io_uring_submit(&ring);
+	if (ret != nr) {
+		fprintf(stderr, "submit: %d\n", ret);
+		return -1;
+	}
+
+	for (; nr > 0; nr--) {
+		ret = io_uring_wait_cqe(&ring, &cqe);
+		if (ret) {
+			fprintf(stderr, "wait_cqe=%d\n", ret);
+			return 1;
+		}
+		if (cqe->res != cqe->user_data) {
+			fprintf(stderr, "cqe->res=%d\n", cqe->res);
+			return 1;
+		}
+		io_uring_cqe_seen(&ring, cqe);
+	}
+
+	io_uring_queue_exit(&ring);
+	return ret;
+}
+
+static int test_io_link(const char *file)
+{
+	const int nr_links = 100;
+	const int link_len = 100;
+	const int nr_sqes = nr_links * link_len;
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	struct io_uring ring;
+	int i, j, fd, ret;
+
+	fd = open(file, O_WRONLY);
+	if (fd < 0) {
+		perror("file open");
+		goto err;
+	}
+
+	ret = io_uring_queue_init(nr_sqes, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "ring create failed: %d\n", ret);
+		goto err;
+	}
+
+	for (i = 0; i < nr_links; ++i) {
+		for (j = 0; j < link_len; ++j) {
+			sqe = io_uring_get_sqe(&ring);
+			if (!sqe) {
+				fprintf(stderr, "sqe get failed\n");
+				goto err;
+			}
+			io_uring_prep_writev(sqe, fd, &vecs[0], 1, 0);
+			sqe->flags |= IOSQE_ASYNC;
+			if (j != link_len - 1)
+				sqe->flags |= IOSQE_IO_LINK;
+		}
+	}
+
+	ret = io_uring_submit(&ring);
+	if (ret != nr_sqes) {
+		ret = io_uring_peek_cqe(&ring, &cqe);
+		if (!ret && cqe->res == -EINVAL) {
+			fprintf(stdout, "IOSQE_ASYNC not supported, skipped\n");
+			goto out;
+		}
+		fprintf(stderr, "submit got %d, wanted %d\n", ret, nr_sqes);
+		goto err;
+	}
+
+	for (i = 0; i < nr_sqes; i++) {
+		ret = io_uring_wait_cqe(&ring, &cqe);
+		if (ret) {
+			fprintf(stderr, "wait_cqe=%d\n", ret);
+			goto err;
+		}
+		if (cqe->res == -EINVAL) {
+			if (!warned) {
+				fprintf(stdout, "Non-vectored IO not "
+					"supported, skipping\n");
+				warned = 1;
+				no_read = 1;
+			}
+		} else if (cqe->res != BS) {
+			fprintf(stderr, "cqe res %d, wanted %d\n", cqe->res, BS);
+			goto err;
+		}
+		io_uring_cqe_seen(&ring, cqe);
+	}
+
+out:
+	io_uring_queue_exit(&ring);
+	close(fd);
+	return 0;
+err:
+	if (fd != -1)
+		close(fd);
+	return 1;
+}
+
+static int test_write_efbig(void)
+{
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	struct io_uring ring;
+	struct rlimit rlim, old_rlim;
+	int i, fd, ret;
+	loff_t off;
+
+	if (geteuid()) {
+		fprintf(stdout, "Not root, skipping %s\n", __FUNCTION__);
+		return 0;
+	}
+
+	if (getrlimit(RLIMIT_FSIZE, &old_rlim) < 0) {
+		perror("getrlimit");
+		return 1;
+	}
+	rlim = old_rlim;
+	rlim.rlim_cur = 64 * 1024;
+	rlim.rlim_max = 64 * 1024;
+	if (setrlimit(RLIMIT_FSIZE, &rlim) < 0) {
+		perror("setrlimit");
+		return 1;
+	}
+
+	fd = open(".efbig", O_WRONLY | O_CREAT, 0644);
+	if (fd < 0) {
+		perror("file open");
+		goto err;
+	}
+
+	ret = io_uring_queue_init(32, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "ring create failed: %d\n", ret);
+		goto err;
+	}
+
+	off = 0;
+	for (i = 0; i < 32; i++) {
+		sqe = io_uring_get_sqe(&ring);
+		if (!sqe) {
+			fprintf(stderr, "sqe get failed\n");
+			goto err;
+		}
+		io_uring_prep_writev(sqe, fd, &vecs[i], 1, off);
+		off += BS;
+	}
+
+	ret = io_uring_submit(&ring);
+	if (ret != 32) {
+		fprintf(stderr, "submit got %d, wanted %d\n", ret, 32);
+		goto err;
+	}
+
+	for (i = 0; i < 32; i++) {
+		ret = io_uring_wait_cqe(&ring, &cqe);
+		if (ret) {
+			fprintf(stderr, "wait_cqe=%d\n", ret);
+			goto err;
+		}
+		if (i < 16) {
+			if (cqe->res != BS) {
+				fprintf(stderr, "bad write: %d\n", cqe->res);
+				goto err;
+			}
+		} else {
+			if (cqe->res != -EFBIG) {
+				fprintf(stderr, "Expected -EFBIG: %d\n", cqe->res);
+				goto err;
+			}
+		}
+		io_uring_cqe_seen(&ring, cqe);
+	}
+
+	io_uring_queue_exit(&ring);
+	close(fd);
+	unlink(".efbig");
+
+	if (setrlimit(RLIMIT_FSIZE, &old_rlim) < 0) {
+		perror("setrlimit");
+		return 1;
+	}
+	return 0;
+err:
+	if (fd != -1)
+		close(fd);
+	unlink(".efbig");
+	return 1;
+}
+
+int main(int argc, char *argv[])
+{
+	int i, ret, nr;
+	char *fname;
+
+	if (argc > 1) {
+		fname = argv[1];
+	} else {
+		fname = ".basic-rw";
+		t_create_file(fname, FILE_SIZE);
+	}
+
+	vecs = t_create_buffers(BUFFERS, BS);
+
+	/* if we don't have nonvec read, skip testing that */
+	nr = has_nonvec_read() ? 32 : 16;
+
+	for (i = 0; i < nr; i++) {
+		int write = (i & 1) != 0;
+		int buffered = (i & 2) != 0;
+		int sqthread = (i & 4) != 0;
+		int fixed = (i & 8) != 0;
+		int nonvec = (i & 16) != 0;
+
+		ret = test_io(fname, write, buffered, sqthread, fixed, nonvec,
+			      BS);
+		if (ret) {
+			fprintf(stderr, "test_io failed %d/%d/%d/%d/%d\n",
+				write, buffered, sqthread, fixed, nonvec);
+			goto err;
+		}
+	}
+
+	ret = test_buf_select(fname, 1);
+	if (ret) {
+		fprintf(stderr, "test_buf_select nonvec failed\n");
+		goto err;
+	}
+
+	ret = test_buf_select(fname, 0);
+	if (ret) {
+		fprintf(stderr, "test_buf_select vec failed\n");
+		goto err;
+	}
+
+	ret = test_buf_select_short(fname, 1);
+	if (ret) {
+		fprintf(stderr, "test_buf_select_short nonvec failed\n");
+		goto err;
+	}
+
+	ret = test_buf_select_short(fname, 0);
+	if (ret) {
+		fprintf(stderr, "test_buf_select_short vec failed\n");
+		goto err;
+	}
+
+	ret = test_eventfd_read();
+	if (ret) {
+		fprintf(stderr, "test_eventfd_read failed\n");
+		goto err;
+	}
+
+	ret = read_poll_link(fname);
+	if (ret) {
+		fprintf(stderr, "read_poll_link failed\n");
+		goto err;
+	}
+
+	ret = test_io_link(fname);
+	if (ret) {
+		fprintf(stderr, "test_io_link failed\n");
+		goto err;
+	}
+
+	ret = test_write_efbig();
+	if (ret) {
+		fprintf(stderr, "test_write_efbig failed\n");
+		goto err;
+	}
+
+	ret = test_rem_buf(1, 0);
+	if (ret) {
+		fprintf(stderr, "test_rem_buf by 1 failed\n");
+		goto err;
+	}
+
+	ret = test_rem_buf(10, 0);
+	if (ret) {
+		fprintf(stderr, "test_rem_buf by 10 failed\n");
+		goto err;
+	}
+
+	ret = test_rem_buf(2, IOSQE_IO_LINK);
+	if (ret) {
+		fprintf(stderr, "test_rem_buf link failed\n");
+		goto err;
+	}
+
+	ret = test_rem_buf(2, IOSQE_ASYNC);
+	if (ret) {
+		fprintf(stderr, "test_rem_buf async failed\n");
+		goto err;
+	}
+
+	srand((unsigned)time(NULL));
+	if (create_nonaligned_buffers()) {
+		fprintf(stderr, "file creation failed\n");
+		goto err;
+	}
+
+	/* test fixed bufs with non-aligned len/offset */
+	for (i = 0; i < nr; i++) {
+		int write = (i & 1) != 0;
+		int buffered = (i & 2) != 0;
+		int sqthread = (i & 4) != 0;
+		int fixed = (i & 8) != 0;
+		int nonvec = (i & 16) != 0;
+
+		/* direct IO requires alignment, skip it */
+		if (!buffered || !fixed || nonvec)
+			continue;
+
+		ret = test_io(fname, write, buffered, sqthread, fixed, nonvec,
+			      -1);
+		if (ret) {
+			fprintf(stderr, "test_io failed %d/%d/%d/%d/%d\n",
+				write, buffered, sqthread, fixed, nonvec);
+			goto err;
+		}
+	}
+
+	if (fname != argv[1])
+		unlink(fname);
+	return 0;
+err:
+	if (fname != argv[1])
+		unlink(fname);
+	return 1;
+}
diff --git a/test/register-restrictions.c b/test/register-restrictions.c
new file mode 100644
index 0000000..bcae67c
--- /dev/null
+++ b/test/register-restrictions.c
@@ -0,0 +1,633 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: test restrictions
+ *
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+#include <sys/poll.h>
+#include <sys/eventfd.h>
+
+#include "liburing.h"
+
+enum {
+	TEST_OK,
+	TEST_SKIPPED,
+	TEST_FAILED
+};
+
+static int test_restrictions_sqe_op(void)
+{
+	struct io_uring_restriction res[2];
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	struct io_uring ring;
+	int ret, pipe1[2];
+
+	uint64_t ptr;
+	struct iovec vec = {
+		.iov_base = &ptr,
+		.iov_len = sizeof(ptr)
+	};
+
+	if (pipe(pipe1) != 0) {
+		perror("pipe");
+		return TEST_FAILED;
+	}
+
+	ret = io_uring_queue_init(8, &ring, IORING_SETUP_R_DISABLED);
+	if (ret) {
+		if (ret == -EINVAL)
+			return TEST_SKIPPED;
+		fprintf(stderr, "ring setup failed: %d\n", ret);
+		return TEST_FAILED;
+	}
+
+	res[0].opcode = IORING_RESTRICTION_SQE_OP;
+	res[0].sqe_op = IORING_OP_WRITEV;
+
+	res[1].opcode = IORING_RESTRICTION_SQE_OP;
+	res[1].sqe_op = IORING_OP_WRITE;
+
+	ret = io_uring_register_restrictions(&ring, res, 2);
+	if (ret) {
+		if (ret == -EINVAL)
+			return TEST_SKIPPED;
+
+		fprintf(stderr, "failed to register restrictions: %d\n", ret);
+		return TEST_FAILED;
+	}
+
+	ret = io_uring_enable_rings(&ring);
+	if (ret) {
+		fprintf(stderr, "ring enabling failed: %d\n", ret);
+		return TEST_FAILED;
+	}
+
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_writev(sqe, pipe1[1], &vec, 1, 0);
+	sqe->user_data = 1;
+
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_readv(sqe, pipe1[0], &vec, 1, 0);
+	sqe->user_data = 2;
+
+	ret = io_uring_submit(&ring);
+	if (ret != 2) {
+		fprintf(stderr, "submit: %d\n", ret);
+		return TEST_FAILED;
+	}
+
+	for (int i = 0; i < 2; i++) {
+		ret = io_uring_wait_cqe(&ring, &cqe);
+		if (ret) {
+			fprintf(stderr, "wait: %d\n", ret);
+			return TEST_FAILED;
+		}
+
+		switch (cqe->user_data) {
+		case 1: /* writev */
+			if (cqe->res != sizeof(ptr)) {
+				fprintf(stderr, "write res: %d\n", cqe->res);
+				return TEST_FAILED;
+			}
+
+			break;
+		case 2: /* readv should be denied */
+			if (cqe->res != -EACCES) {
+				fprintf(stderr, "read res: %d\n", cqe->res);
+				return TEST_FAILED;
+			}
+			break;
+		}
+		io_uring_cqe_seen(&ring, cqe);
+	}
+
+	io_uring_queue_exit(&ring);
+	return TEST_OK;
+}
+
+static int test_restrictions_register_op(void)
+{
+	struct io_uring_restriction res[1];
+	struct io_uring ring;
+	int ret, pipe1[2];
+
+	uint64_t ptr;
+	struct iovec vec = {
+		.iov_base = &ptr,
+		.iov_len = sizeof(ptr)
+	};
+
+	if (pipe(pipe1) != 0) {
+		perror("pipe");
+		return TEST_FAILED;
+	}
+
+	ret = io_uring_queue_init(8, &ring, IORING_SETUP_R_DISABLED);
+	if (ret) {
+		fprintf(stderr, "ring setup failed: %d\n", ret);
+		return TEST_FAILED;
+	}
+
+	res[0].opcode = IORING_RESTRICTION_REGISTER_OP;
+	res[0].register_op = IORING_REGISTER_BUFFERS;
+
+	ret = io_uring_register_restrictions(&ring, res, 1);
+	if (ret) {
+		if (ret == -EINVAL)
+			return TEST_SKIPPED;
+
+		fprintf(stderr, "failed to register restrictions: %d\n", ret);
+		return TEST_FAILED;
+	}
+
+	ret = io_uring_enable_rings(&ring);
+	if (ret) {
+		fprintf(stderr, "ring enabling failed: %d\n", ret);
+		return TEST_FAILED;
+	}
+
+	ret = io_uring_register_buffers(&ring, &vec, 1);
+	if (ret) {
+		fprintf(stderr, "io_uring_register_buffers failed: %d\n", ret);
+		return TEST_FAILED;
+	}
+
+	ret = io_uring_register_files(&ring, pipe1, 2);
+	if (ret != -EACCES) {
+		fprintf(stderr, "io_uring_register_files ret: %d\n", ret);
+		return TEST_FAILED;
+	}
+
+	io_uring_queue_exit(&ring);
+	return TEST_OK;
+}
+
+static int test_restrictions_fixed_file(void)
+{
+	struct io_uring_restriction res[4];
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	struct io_uring ring;
+	int ret, pipe1[2];
+
+	uint64_t ptr;
+	struct iovec vec = {
+		.iov_base = &ptr,
+		.iov_len = sizeof(ptr)
+	};
+
+	if (pipe(pipe1) != 0) {
+		perror("pipe");
+		return TEST_FAILED;
+	}
+
+	ret = io_uring_queue_init(8, &ring, IORING_SETUP_R_DISABLED);
+	if (ret) {
+		fprintf(stderr, "ring setup failed: %d\n", ret);
+		return TEST_FAILED;
+	}
+
+	res[0].opcode = IORING_RESTRICTION_SQE_OP;
+	res[0].sqe_op = IORING_OP_WRITEV;
+
+	res[1].opcode = IORING_RESTRICTION_SQE_OP;
+	res[1].sqe_op = IORING_OP_READV;
+
+	res[2].opcode = IORING_RESTRICTION_SQE_FLAGS_REQUIRED;
+	res[2].sqe_flags = IOSQE_FIXED_FILE;
+
+	res[3].opcode = IORING_RESTRICTION_REGISTER_OP;
+	res[3].register_op = IORING_REGISTER_FILES;
+
+	ret = io_uring_register_restrictions(&ring, res, 4);
+	if (ret) {
+		if (ret == -EINVAL)
+			return TEST_SKIPPED;
+
+		fprintf(stderr, "failed to register restrictions: %d\n", ret);
+		return TEST_FAILED;
+	}
+
+	ret = io_uring_enable_rings(&ring);
+	if (ret) {
+		fprintf(stderr, "ring enabling failed: %d\n", ret);
+		return TEST_FAILED;
+	}
+
+	ret = io_uring_register_files(&ring, pipe1, 2);
+	if (ret) {
+		fprintf(stderr, "io_uring_register_files ret: %d\n", ret);
+		return TEST_FAILED;
+	}
+
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_writev(sqe, 1, &vec, 1, 0);
+	io_uring_sqe_set_flags(sqe, IOSQE_FIXED_FILE);
+	sqe->user_data = 1;
+
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_readv(sqe, 0, &vec, 1, 0);
+	io_uring_sqe_set_flags(sqe, IOSQE_FIXED_FILE);
+	sqe->user_data = 2;
+
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_writev(sqe, pipe1[1], &vec, 1, 0);
+	sqe->user_data = 3;
+
+	ret = io_uring_submit(&ring);
+	if (ret != 3) {
+		fprintf(stderr, "submit: %d\n", ret);
+		return TEST_FAILED;
+	}
+
+	for (int i = 0; i < 3; i++) {
+		ret = io_uring_wait_cqe(&ring, &cqe);
+		if (ret) {
+			fprintf(stderr, "wait: %d\n", ret);
+			return TEST_FAILED;
+		}
+
+		switch (cqe->user_data) {
+		case 1: /* writev */
+			if (cqe->res != sizeof(ptr)) {
+				fprintf(stderr, "write res: %d\n", cqe->res);
+				return TEST_FAILED;
+			}
+
+			break;
+		case 2: /* readv */
+			if (cqe->res != sizeof(ptr)) {
+				fprintf(stderr, "read res: %d\n", cqe->res);
+				return TEST_FAILED;
+			}
+			break;
+		case 3: /* writev without fixed_file should be denied */
+			if (cqe->res != -EACCES) {
+				fprintf(stderr, "write res: %d\n", cqe->res);
+				return TEST_FAILED;
+			}
+			break;
+		}
+		io_uring_cqe_seen(&ring, cqe);
+	}
+
+	io_uring_queue_exit(&ring);
+	return TEST_OK;
+}
+
+static int test_restrictions_flags(void)
+{
+	struct io_uring_restriction res[3];
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	struct io_uring ring;
+	int ret, pipe1[2];
+
+	uint64_t ptr;
+	struct iovec vec = {
+		.iov_base = &ptr,
+		.iov_len = sizeof(ptr)
+	};
+
+	if (pipe(pipe1) != 0) {
+		perror("pipe");
+		return TEST_FAILED;
+	}
+
+	ret = io_uring_queue_init(8, &ring, IORING_SETUP_R_DISABLED);
+	if (ret) {
+		fprintf(stderr, "ring setup failed: %d\n", ret);
+		return TEST_FAILED;
+	}
+
+	res[0].opcode = IORING_RESTRICTION_SQE_OP;
+	res[0].sqe_op = IORING_OP_WRITEV;
+
+	res[1].opcode = IORING_RESTRICTION_SQE_FLAGS_ALLOWED;
+	res[1].sqe_flags = IOSQE_ASYNC | IOSQE_IO_LINK;
+
+	res[2].opcode = IORING_RESTRICTION_SQE_FLAGS_REQUIRED;
+	res[2].sqe_flags = IOSQE_FIXED_FILE;
+
+	ret = io_uring_register_restrictions(&ring, res, 3);
+	if (ret) {
+		if (ret == -EINVAL)
+			return TEST_SKIPPED;
+
+		fprintf(stderr, "failed to register restrictions: %d\n", ret);
+		return TEST_FAILED;
+	}
+
+	ret = io_uring_register_files(&ring, pipe1, 2);
+	if (ret) {
+		fprintf(stderr, "io_uring_register_files ret: %d\n", ret);
+		return TEST_FAILED;
+	}
+
+	ret = io_uring_enable_rings(&ring);
+	if (ret) {
+		fprintf(stderr, "ring enabling failed: %d\n", ret);
+		return TEST_FAILED;
+	}
+
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_writev(sqe, 1, &vec, 1, 0);
+	io_uring_sqe_set_flags(sqe, IOSQE_FIXED_FILE);
+	sqe->user_data = 1;
+
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_writev(sqe, 1, &vec, 1, 0);
+	io_uring_sqe_set_flags(sqe, IOSQE_FIXED_FILE | IOSQE_ASYNC);
+	sqe->user_data = 2;
+
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_writev(sqe, 1, &vec, 1, 0);
+	io_uring_sqe_set_flags(sqe, IOSQE_FIXED_FILE | IOSQE_IO_LINK);
+	sqe->user_data = 3;
+
+	ret = io_uring_submit(&ring);
+	if (ret != 3) {
+		fprintf(stderr, "submit: %d\n", ret);
+		return TEST_FAILED;
+	}
+
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_writev(sqe, 1, &vec, 1, 0);
+	io_uring_sqe_set_flags(sqe, IOSQE_FIXED_FILE | IOSQE_IO_DRAIN);
+	sqe->user_data = 4;
+
+	ret = io_uring_submit(&ring);
+	if (ret != 1) {
+		fprintf(stderr, "submit: %d\n", ret);
+		return TEST_FAILED;
+	}
+
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_writev(sqe, pipe1[1], &vec, 1, 0);
+	io_uring_sqe_set_flags(sqe, IOSQE_IO_DRAIN);
+	sqe->user_data = 5;
+
+	ret = io_uring_submit(&ring);
+	if (ret != 1) {
+		fprintf(stderr, "submit: %d\n", ret);
+		return TEST_FAILED;
+	}
+
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_writev(sqe, pipe1[1], &vec, 1, 0);
+	io_uring_sqe_set_flags(sqe, IOSQE_ASYNC);
+	sqe->user_data = 6;
+
+	ret = io_uring_submit(&ring);
+	if (ret != 1) {
+		fprintf(stderr, "submit: %d\n", ret);
+		return TEST_FAILED;
+	}
+
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_writev(sqe, pipe1[1], &vec, 1, 0);
+	sqe->user_data = 7;
+
+	ret = io_uring_submit(&ring);
+	if (ret != 1) {
+		fprintf(stderr, "submit: %d\n", ret);
+		return TEST_FAILED;
+	}
+
+	for (int i = 0; i < 7; i++) {
+		ret = io_uring_wait_cqe(&ring, &cqe);
+		if (ret) {
+			fprintf(stderr, "wait: %d\n", ret);
+			return TEST_FAILED;
+		}
+
+		switch (cqe->user_data) {
+		case 1: /* writev - flags = IOSQE_FIXED_FILE */
+		case 2: /* writev - flags = IOSQE_FIXED_FILE | IOSQE_ASYNC */
+		case 3: /* writev - flags = IOSQE_FIXED_FILE | IOSQE_IO_LINK */
+			if (cqe->res != sizeof(ptr)) {
+				fprintf(stderr, "write res: %d user_data %" PRIu64 "\n",
+					cqe->res, (uint64_t) cqe->user_data);
+				return TEST_FAILED;
+			}
+
+			break;
+		case 4: /* writev - flags = IOSQE_FIXED_FILE | IOSQE_IO_DRAIN */
+		case 5: /* writev - flags = IOSQE_IO_DRAIN */
+		case 6: /* writev - flags = IOSQE_ASYNC */
+		case 7: /* writev - flags = 0 */
+			if (cqe->res != -EACCES) {
+				fprintf(stderr, "write res: %d user_data %" PRIu64 "\n",
+					cqe->res, (uint64_t) cqe->user_data);
+				return TEST_FAILED;
+			}
+			break;
+		}
+		io_uring_cqe_seen(&ring, cqe);
+	}
+
+	io_uring_queue_exit(&ring);
+	return TEST_OK;
+}
+
+static int test_restrictions_empty(void)
+{
+	struct io_uring_restriction res[0];
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	struct io_uring ring;
+	int ret, pipe1[2];
+
+	uint64_t ptr;
+	struct iovec vec = {
+		.iov_base = &ptr,
+		.iov_len = sizeof(ptr)
+	};
+
+	if (pipe(pipe1) != 0) {
+		perror("pipe");
+		return TEST_FAILED;
+	}
+
+	ret = io_uring_queue_init(8, &ring, IORING_SETUP_R_DISABLED);
+	if (ret) {
+		fprintf(stderr, "ring setup failed: %d\n", ret);
+		return TEST_FAILED;
+	}
+
+	ret = io_uring_register_restrictions(&ring, res, 0);
+	if (ret) {
+		if (ret == -EINVAL)
+			return TEST_SKIPPED;
+
+		fprintf(stderr, "failed to register restrictions: %d\n", ret);
+		return TEST_FAILED;
+	}
+
+	ret = io_uring_enable_rings(&ring);
+	if (ret) {
+		fprintf(stderr, "ring enabling failed: %d\n", ret);
+		return TEST_FAILED;
+	}
+
+	ret = io_uring_register_buffers(&ring, &vec, 1);
+	if (ret != -EACCES) {
+		fprintf(stderr, "io_uring_register_buffers ret: %d\n", ret);
+		return TEST_FAILED;
+	}
+
+	ret = io_uring_register_files(&ring, pipe1, 2);
+	if (ret != -EACCES) {
+		fprintf(stderr, "io_uring_register_files ret: %d\n", ret);
+		return TEST_FAILED;
+	}
+
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_writev(sqe, pipe1[1], &vec, 1, 0);
+
+	ret = io_uring_submit(&ring);
+	if (ret != 1) {
+		fprintf(stderr, "submit: %d\n", ret);
+		return TEST_FAILED;
+	}
+
+	ret = io_uring_wait_cqe(&ring, &cqe);
+	if (ret) {
+		fprintf(stderr, "wait: %d\n", ret);
+		return TEST_FAILED;
+	}
+
+	if (cqe->res != -EACCES) {
+		fprintf(stderr, "write res: %d\n", cqe->res);
+		return TEST_FAILED;
+	}
+
+	io_uring_cqe_seen(&ring, cqe);
+
+	io_uring_queue_exit(&ring);
+	return TEST_OK;
+}
+
+static int test_restrictions_rings_not_disabled(void)
+{
+	struct io_uring_restriction res[1];
+	struct io_uring ring;
+	int ret;
+
+	ret = io_uring_queue_init(8, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "ring setup failed: %d\n", ret);
+		return TEST_FAILED;
+	}
+
+	res[0].opcode = IORING_RESTRICTION_SQE_OP;
+	res[0].sqe_op = IORING_OP_WRITEV;
+
+	ret = io_uring_register_restrictions(&ring, res, 1);
+	if (ret != -EBADFD) {
+		fprintf(stderr, "io_uring_register_restrictions ret: %d\n",
+			ret);
+		return TEST_FAILED;
+	}
+
+	io_uring_queue_exit(&ring);
+	return TEST_OK;
+}
+
+static int test_restrictions_rings_disabled(void)
+{
+	struct io_uring_sqe *sqe;
+	struct io_uring ring;
+	int ret;
+
+	ret = io_uring_queue_init(8, &ring, IORING_SETUP_R_DISABLED);
+	if (ret) {
+		fprintf(stderr, "ring setup failed: %d\n", ret);
+		return TEST_FAILED;
+	}
+
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_nop(sqe);
+
+	ret = io_uring_submit(&ring);
+	if (ret != -EBADFD) {
+		fprintf(stderr, "submit: %d\n", ret);
+		return TEST_FAILED;
+	}
+
+	io_uring_queue_exit(&ring);
+	return TEST_OK;
+}
+
+int main(int argc, char *argv[])
+{
+	int ret;
+
+	if (argc > 1)
+		return 0;
+
+	ret = test_restrictions_sqe_op();
+	if (ret == TEST_SKIPPED) {
+		printf("test_restrictions_sqe_op: skipped\n");
+		return 0;
+	} else if (ret == TEST_FAILED) {
+		fprintf(stderr, "test_restrictions_sqe_op failed\n");
+		return ret;
+	}
+
+	ret = test_restrictions_register_op();
+	if (ret == TEST_SKIPPED) {
+		printf("test_restrictions_register_op: skipped\n");
+	} else if (ret == TEST_FAILED) {
+		fprintf(stderr, "test_restrictions_register_op failed\n");
+		return ret;
+	}
+
+	ret = test_restrictions_fixed_file();
+	if (ret == TEST_SKIPPED) {
+		printf("test_restrictions_fixed_file: skipped\n");
+	} else if (ret == TEST_FAILED) {
+		fprintf(stderr, "test_restrictions_fixed_file failed\n");
+		return ret;
+	}
+
+	ret = test_restrictions_flags();
+	if (ret == TEST_SKIPPED) {
+		printf("test_restrictions_flags: skipped\n");
+	} else if (ret == TEST_FAILED) {
+		fprintf(stderr, "test_restrictions_flags failed\n");
+		return ret;
+	}
+
+	ret = test_restrictions_empty();
+	if (ret == TEST_SKIPPED) {
+		printf("test_restrictions_empty: skipped\n");
+	} else if (ret == TEST_FAILED) {
+		fprintf(stderr, "test_restrictions_empty failed\n");
+		return ret;
+	}
+
+	ret = test_restrictions_rings_not_disabled();
+	if (ret == TEST_SKIPPED) {
+		printf("test_restrictions_rings_not_disabled: skipped\n");
+	} else if (ret == TEST_FAILED) {
+		fprintf(stderr, "test_restrictions_rings_not_disabled failed\n");
+		return ret;
+	}
+
+	ret = test_restrictions_rings_disabled();
+	if (ret == TEST_SKIPPED) {
+		printf("test_restrictions_rings_disabled: skipped\n");
+	} else if (ret == TEST_FAILED) {
+		fprintf(stderr, "test_restrictions_rings_disabled failed\n");
+		return ret;
+	}
+
+	return 0;
+}
diff --git a/test/rename.c b/test/rename.c
new file mode 100644
index 0000000..af09d65
--- /dev/null
+++ b/test/rename.c
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: run various nop tests
+ *
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+
+#include "liburing.h"
+
+static int test_rename(struct io_uring *ring, const char *old, const char *new)
+{
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	int ret;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "get sqe failed\n");
+		goto err;
+	}
+
+	memset(sqe, 0, sizeof(*sqe));
+	sqe->opcode = IORING_OP_RENAMEAT;
+	sqe->fd = AT_FDCWD;
+	sqe->addr2 = (unsigned long) new;
+	sqe->addr = (unsigned long) old;
+	sqe->len = AT_FDCWD;
+	
+	ret = io_uring_submit(ring);
+	if (ret <= 0) {
+		fprintf(stderr, "sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	ret = io_uring_wait_cqe(ring, &cqe);
+	if (ret < 0) {
+		fprintf(stderr, "wait completion %d\n", ret);
+		goto err;
+	}
+	ret = cqe->res;
+	io_uring_cqe_seen(ring, cqe);
+	return ret;
+err:
+	return 1;
+}
+
+static int stat_file(const char *buf)
+{
+	struct stat sb;
+
+	if (!stat(buf, &sb))
+		return 0;
+
+	return errno;
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring;
+	char src[32] = "./XXXXXX";
+	char dst[32] = "./XXXXXX";
+	int ret;
+
+	if (argc > 1)
+		return 0;
+
+	ret = io_uring_queue_init(1, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "ring setup failed: %d\n", ret);
+		return 1;
+	}
+
+	ret = mkstemp(src);
+	if (ret < 0) {
+		perror("mkstemp");
+		return 1;
+	}
+	close(ret);
+
+	ret = mkstemp(dst);
+	if (ret < 0) {
+		perror("mkstemp");
+		return 1;
+	}
+	close(ret);
+
+	if (stat_file(src) != 0) {
+		perror("stat");
+		return 1;
+	}
+	if (stat_file(dst) != 0) {
+		perror("stat");
+		return 1;
+	}
+
+	ret = test_rename(&ring, src, dst);
+	if (ret < 0) {
+		if (ret == -EBADF || ret == -EINVAL) {
+			fprintf(stdout, "Rename not supported, skipping\n");
+			goto out;
+		}
+		fprintf(stderr, "rename: %s\n", strerror(-ret));
+		goto err;
+	} else if (ret)
+		goto err;
+
+	if (stat_file(src) != ENOENT) {
+		fprintf(stderr, "stat got %s\n", strerror(ret));
+		return 1;
+	}
+
+	if (stat_file(dst) != 0) {
+		perror("stat");
+		return 1;
+	}
+
+	ret = test_rename(&ring, "/x/y/1/2", "/2/1/y/x");
+	if (ret != -ENOENT) {
+		fprintf(stderr, "test_rename invalid failed: %d\n", ret);
+		return ret;
+	}
+out:
+	unlink(dst);
+	return 0;
+err:
+	unlink(src);
+	unlink(dst);
+	return 1;
+}
diff --git a/test/ring-leak.c b/test/ring-leak.c
new file mode 100644
index 0000000..f8f043c
--- /dev/null
+++ b/test/ring-leak.c
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Based on description from Al Viro - this demonstrates a leak of the
+ * io_uring instance, by sending the io_uring fd over a UNIX socket.
+ *
+ * See:
+ *
+ * https://lore.kernel.org/linux-block/20190129192702.3605-1-axboe@kernel.dk/T/#m6c87fc64e4d063786af6ec6fadce3ac1e95d3184
+ *
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <stddef.h>
+#include <signal.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/syscall.h>
+#include <sys/socket.h>
+#include <sys/wait.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <string.h>
+#include <linux/fs.h>
+
+#include "liburing.h"
+#include "../src/syscall.h"
+
+static int __io_uring_register_files(int ring_fd, int fd1, int fd2)
+{
+	__s32 fds[2] = { fd1, fd2 };
+
+	return __sys_io_uring_register(ring_fd, IORING_REGISTER_FILES, fds, 2);
+}
+
+static int get_ring_fd(void)
+{
+	struct io_uring_params p;
+	int fd;
+
+	memset(&p, 0, sizeof(p));
+
+	fd = __sys_io_uring_setup(2, &p);
+	if (fd < 0) {
+		perror("io_uring_setup");
+		return -1;
+	}
+
+	return fd;
+}
+
+static void send_fd(int socket, int fd)
+{
+	char buf[CMSG_SPACE(sizeof(fd))];
+	struct cmsghdr *cmsg;
+	struct msghdr msg;
+
+	memset(buf, 0, sizeof(buf));
+	memset(&msg, 0, sizeof(msg));
+
+	msg.msg_control = buf;
+	msg.msg_controllen = sizeof(buf);
+
+	cmsg = CMSG_FIRSTHDR(&msg);
+	cmsg->cmsg_level = SOL_SOCKET;
+	cmsg->cmsg_type = SCM_RIGHTS;
+	cmsg->cmsg_len = CMSG_LEN(sizeof(fd));
+
+	memmove(CMSG_DATA(cmsg), &fd, sizeof(fd));
+
+	msg.msg_controllen = CMSG_SPACE(sizeof(fd));
+
+	if (sendmsg(socket, &msg, 0) < 0)
+		perror("sendmsg");
+}
+
+static int test_iowq_request_cancel(void)
+{
+	char buffer[128];
+	struct io_uring ring;
+	struct io_uring_sqe *sqe;
+	int ret, fds[2];
+
+	ret = io_uring_queue_init(8, &ring, 0);
+	if (ret < 0) {
+		fprintf(stderr, "failed to init io_uring: %s\n", strerror(-ret));
+		return ret;
+	}
+	if (pipe(fds)) {
+		perror("pipe");
+		return -1;
+	}
+	ret = io_uring_register_files(&ring, fds, 2);
+	if (ret) {
+		fprintf(stderr, "file_register: %d\n", ret);
+		return ret;
+	}
+	close(fds[1]);
+
+	sqe = io_uring_get_sqe(&ring);
+	if (!sqe) {
+		fprintf(stderr, "%s: failed to get sqe\n", __FUNCTION__);
+		return 1;
+	}
+	/* potentially sitting in internal polling */
+	io_uring_prep_read(sqe, 0, buffer, 10, 0);
+	sqe->flags |= IOSQE_FIXED_FILE;
+
+	sqe = io_uring_get_sqe(&ring);
+	if (!sqe) {
+		fprintf(stderr, "%s: failed to get sqe\n", __FUNCTION__);
+		return 1;
+	}
+	/* staying in io-wq */
+	io_uring_prep_read(sqe, 0, buffer, 10, 0);
+	sqe->flags |= IOSQE_FIXED_FILE | IOSQE_ASYNC;
+
+	ret = io_uring_submit(&ring);
+	if (ret != 2) {
+		fprintf(stderr, "%s: got %d, wanted 1\n", __FUNCTION__, ret);
+		return 1;
+	}
+
+	/* should unregister files and close the write fd */
+	io_uring_queue_exit(&ring);
+
+	/*
+	 * We're trying to wait for the ring to "really" exit, that will be
+	 * done async. For that rely on the registered write end to be closed
+	 * after ring quiesce, so failing read from the other pipe end.
+	 */
+	ret = read(fds[0], buffer, 10);
+	if (ret < 0)
+		perror("read");
+	return 0;
+}
+
+int main(int argc, char *argv[])
+{
+	int sp[2], pid, ring_fd, ret;
+
+	if (argc > 1)
+		return 0;
+
+	ret = test_iowq_request_cancel();
+	if (ret) {
+		fprintf(stderr, "test_iowq_request_cancel() failed\n");
+		return 1;
+	}
+
+	if (socketpair(AF_UNIX, SOCK_DGRAM, 0, sp) != 0) {
+		perror("Failed to create Unix-domain socket pair\n");
+		return 1;
+	}
+
+	ring_fd = get_ring_fd();
+	if (ring_fd < 0)
+		return 1;
+
+	ret = __io_uring_register_files(ring_fd, sp[0], sp[1]);
+	if (ret < 0) {
+		perror("register files");
+		return 1;
+	}
+
+	pid = fork();
+	if (pid)
+		send_fd(sp[0], ring_fd);
+
+	close(ring_fd);
+	close(sp[0]);
+	close(sp[1]);
+	return 0;
+}
diff --git a/test/ring-leak2.c b/test/ring-leak2.c
new file mode 100644
index 0000000..d9bfe0f
--- /dev/null
+++ b/test/ring-leak2.c
@@ -0,0 +1,248 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: Test two ring deadlock. A buggy kernel will end up
+ * 		having io_wq_* workers pending, as the circular reference
+ * 		will prevent full exit.
+ *
+ * Based on a test case from Josef <josef.grieb@gmail.com>
+ *
+ */
+#include <errno.h>
+#include <fcntl.h>
+#include <netinet/in.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <strings.h>
+#include <sys/poll.h>
+#include <sys/socket.h>
+#include <unistd.h>
+#include <sys/eventfd.h>
+#include <pthread.h>
+
+#include "liburing.h"
+#include "../src/syscall.h"
+
+enum {
+	ACCEPT,
+	READ,
+	WRITE,
+	POLLING_IN,
+	POLLING_RDHUP,
+	CLOSE,
+	EVENTFD_READ,
+};
+
+typedef struct conn_info {
+	__u32 fd;
+	__u16 type;
+	__u16 bid;
+} conn_info;
+
+static char read_eventfd_buffer[8];
+
+static pthread_mutex_t lock;
+static struct io_uring *client_ring;
+
+static int client_eventfd = -1;
+
+int setup_io_uring(struct io_uring *ring)
+{
+	struct io_uring_params p = { };
+	int ret;
+
+	ret = io_uring_queue_init_params(8, ring, &p);
+	if (ret) {
+		fprintf(stderr, "Unable to setup io_uring: %s\n",
+			strerror(-ret));
+		return 1;
+	}
+	return 0;
+}
+
+static void add_socket_eventfd_read(struct io_uring *ring, int fd)
+{
+	struct io_uring_sqe *sqe;
+	conn_info conn_i = {
+		.fd = fd,
+		.type = EVENTFD_READ,
+	};
+
+	sqe = io_uring_get_sqe(ring);
+	io_uring_prep_read(sqe, fd, &read_eventfd_buffer, 8, 0);
+	io_uring_sqe_set_flags(sqe, IOSQE_ASYNC);
+
+	memcpy(&sqe->user_data, &conn_i, sizeof(conn_i));
+}
+
+static void add_socket_pollin(struct io_uring *ring, int fd)
+{
+	struct io_uring_sqe *sqe;
+	conn_info conn_i = {
+		.fd = fd,
+		.type = POLLING_IN,
+	};
+
+	sqe = io_uring_get_sqe(ring);
+	io_uring_prep_poll_add(sqe, fd, POLL_IN);
+
+	memcpy(&sqe->user_data, &conn_i, sizeof(conn_i));
+}
+
+static void *server_thread(void *arg)
+{
+	struct sockaddr_in serv_addr;
+	int port = 0;
+	int sock_listen_fd, evfd;
+	const int val = 1;
+	struct io_uring ring;
+       
+	sock_listen_fd = socket(AF_INET, SOCK_STREAM | SOCK_NONBLOCK, 0);
+	setsockopt(sock_listen_fd, SOL_SOCKET, SO_REUSEADDR, &val, sizeof(val));
+
+	memset(&serv_addr, 0, sizeof(serv_addr));
+	serv_addr.sin_family = AF_INET;
+	serv_addr.sin_port = htons(port);
+	serv_addr.sin_addr.s_addr = INADDR_ANY;
+
+	evfd = eventfd(0, EFD_CLOEXEC);
+
+	// bind and listen
+	if (bind(sock_listen_fd, (struct sockaddr *)&serv_addr, sizeof(serv_addr)) < 0) {
+		perror("Error binding socket...\n");
+		exit(1);
+	}
+	if (listen(sock_listen_fd, 1) < 0) {
+		perror("Error listening on socket...\n");
+		exit(1);
+	}
+
+	setup_io_uring(&ring);
+	add_socket_eventfd_read(&ring, evfd);
+	add_socket_pollin(&ring, sock_listen_fd);
+
+	while (1) {
+		struct io_uring_cqe *cqe;
+		unsigned head;
+		unsigned count = 0;
+
+		io_uring_submit_and_wait(&ring, 1);
+
+		io_uring_for_each_cqe(&ring, head, cqe) {
+			struct conn_info conn_i;
+
+			count++;
+			memcpy(&conn_i, &cqe->user_data, sizeof(conn_i));
+
+			if (conn_i.type == ACCEPT) {
+				int sock_conn_fd = cqe->res;
+				// only read when there is no error, >= 0
+				if (sock_conn_fd > 0) {
+					add_socket_pollin(&ring, sock_listen_fd);
+
+					pthread_mutex_lock(&lock);
+					io_uring_submit(client_ring);
+					pthread_mutex_unlock(&lock);
+
+				}
+			} else if (conn_i.type == POLLING_IN) {
+				break;
+			}
+		}
+		io_uring_cq_advance(&ring, count);
+	}
+}
+
+static void *client_thread(void *arg)
+{
+	struct io_uring ring;
+	int ret;
+
+	setup_io_uring(&ring);
+	client_ring = &ring;
+
+	client_eventfd = eventfd(0, EFD_CLOEXEC);
+	pthread_mutex_lock(&lock);
+	add_socket_eventfd_read(&ring, client_eventfd);
+	pthread_mutex_unlock(&lock);
+
+	while (1) {
+		struct io_uring_cqe *cqe;
+		unsigned head;
+		unsigned count = 0;
+
+		pthread_mutex_lock(&lock);
+		io_uring_submit(&ring);
+		pthread_mutex_unlock(&lock);
+
+		ret = __sys_io_uring_enter(ring.ring_fd, 0, 1, IORING_ENTER_GETEVENTS, NULL);
+		if (ret < 0) {
+			perror("Error io_uring_enter...\n");
+			exit(1);
+		}
+
+		// go through all CQEs
+		io_uring_for_each_cqe(&ring, head, cqe) {
+			struct conn_info conn_i;
+			int type;
+
+			count++;
+			memcpy(&conn_i, &cqe->user_data, sizeof(conn_i));
+
+			type = conn_i.type;
+			if (type == READ) {
+				pthread_mutex_lock(&lock);
+
+				if (cqe->res <= 0) {
+					// connection closed or error
+					shutdown(conn_i.fd, SHUT_RDWR);
+				} else {
+					break;
+				}
+				add_socket_pollin(&ring, conn_i.fd);
+				pthread_mutex_unlock(&lock);
+			} else if (type == WRITE) {
+			} else if (type == POLLING_IN) {
+				break;
+			} else if (type == POLLING_RDHUP) {
+				break;
+			} else if (type == CLOSE) {
+			} else if (type == EVENTFD_READ) {
+				add_socket_eventfd_read(&ring, client_eventfd);
+			}
+		}
+
+		io_uring_cq_advance(&ring, count);
+	}
+}
+
+static void sig_alrm(int sig)
+{
+	exit(0);
+}
+
+int main(int argc, char *argv[])
+{
+	pthread_t server_thread_t, client_thread_t;
+	struct sigaction act;
+
+	if (argc > 1)
+		return 0;
+
+	if (pthread_mutex_init(&lock, NULL) != 0) {
+		printf("\n mutex init failed\n");
+		return 1;
+	}
+
+	pthread_create(&server_thread_t, NULL, &server_thread, NULL);
+	pthread_create(&client_thread_t, NULL, &client_thread, NULL);
+
+	memset(&act, 0, sizeof(act));
+	act.sa_handler = sig_alrm;
+	act.sa_flags = SA_RESTART;
+	sigaction(SIGALRM, &act, NULL);
+	alarm(1);
+
+	pthread_join(server_thread_t, NULL);
+	return 0;
+}
diff --git a/test/rsrc_tags.c b/test/rsrc_tags.c
new file mode 100644
index 0000000..2b4890b
--- /dev/null
+++ b/test/rsrc_tags.c
@@ -0,0 +1,441 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: run various file registration tests
+ *
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+#include <assert.h>
+
+#include "../src/syscall.h"
+#include "helpers.h"
+#include "liburing.h"
+
+static int pipes[2];
+
+enum {
+	TEST_IORING_RSRC_FILE		= 0,
+	TEST_IORING_RSRC_BUFFER		= 1,
+};
+
+static bool check_cq_empty(struct io_uring *ring)
+{
+	struct io_uring_cqe *cqe = NULL;
+	int ret;
+
+	sleep(1); /* doesn't happen immediately, so wait */
+	ret = io_uring_peek_cqe(ring, &cqe); /* nothing should be there */
+	return ret == -EAGAIN;
+}
+
+static int register_rsrc(struct io_uring *ring, int type, int nr,
+			  const void *arg, const __u64 *tags)
+{
+	struct io_uring_rsrc_register reg;
+	int ret, reg_type;
+
+	memset(&reg, 0, sizeof(reg));
+	reg.nr = nr;
+	reg.data = (__u64)arg;
+	reg.tags = (__u64)tags;
+
+	reg_type = IORING_REGISTER_FILES2;
+	if (type != TEST_IORING_RSRC_FILE)
+		reg_type = IORING_REGISTER_BUFFERS2;
+
+	ret = __sys_io_uring_register(ring->ring_fd, reg_type,
+					&reg, sizeof(reg));
+	return ret ? -errno : 0;
+}
+
+static int update_rsrc(struct io_uring *ring, int type, int nr, int off,
+			const void *arg, const __u64 *tags)
+{
+	struct io_uring_rsrc_update2 up;
+	int ret, up_type;
+
+	memset(&up, 0, sizeof(up));
+	up.offset = off;
+	up.data = (__u64)arg;
+	up.tags = (__u64)tags;
+	up.nr = nr;
+
+	up_type = IORING_REGISTER_FILES_UPDATE2;
+	if (type != TEST_IORING_RSRC_FILE)
+		up_type = IORING_REGISTER_BUFFERS_UPDATE;
+	ret = __sys_io_uring_register(ring->ring_fd, up_type,
+				      &up, sizeof(up));
+	return ret < 0 ? -errno : ret;
+}
+
+static bool has_rsrc_update(void)
+{
+	struct io_uring ring;
+	char buf[1024];
+	struct iovec vec = {.iov_base = buf, .iov_len = sizeof(buf), };
+	int ret;
+
+	ret = io_uring_queue_init(1, &ring, 0);
+	if (ret)
+		return false;
+
+	ret = register_rsrc(&ring, TEST_IORING_RSRC_BUFFER, 1, &vec, NULL);
+	io_uring_queue_exit(&ring);
+	return ret != -EINVAL;
+}
+
+static int test_tags_generic(int nr, int type, void *rsrc, int ring_flags)
+{
+	struct io_uring_cqe *cqe = NULL;
+	struct io_uring ring;
+	int i, ret;
+	__u64 *tags;
+
+	tags = malloc(nr * sizeof(*tags));
+	if (!tags)
+		return 1;
+	for (i = 0; i < nr; i++)
+		tags[i] = i + 1;
+	ret = io_uring_queue_init(1, &ring, 0);
+	if (ret) {
+		printf("ring setup failed\n");
+		return 1;
+	}
+
+	ret = register_rsrc(&ring, type, nr, rsrc, tags);
+	if (ret) {
+		fprintf(stderr, "rsrc register failed %i\n", ret);
+		return 1;
+	}
+
+	/* test that tags are set */
+	tags[0] = 666;
+	ret = update_rsrc(&ring, type, 1, 0, rsrc, &tags[0]);
+	assert(ret == 1);
+	ret = io_uring_wait_cqe(&ring, &cqe);
+	assert(!ret && cqe->user_data == 1);
+	io_uring_cqe_seen(&ring, cqe);
+
+	/* test that tags are updated */
+	tags[0] = 0;
+	ret = update_rsrc(&ring, type, 1, 0, rsrc, &tags[0]);
+	assert(ret == 1);
+	ret = io_uring_wait_cqe(&ring, &cqe);
+	assert(!ret && cqe->user_data == 666);
+	io_uring_cqe_seen(&ring, cqe);
+
+	/* test tag=0 doesn't emit CQE */
+	tags[0] = 1;
+	ret = update_rsrc(&ring, type, 1, 0, rsrc, &tags[0]);
+	assert(ret == 1);
+	assert(check_cq_empty(&ring));
+
+	free(tags);
+	io_uring_queue_exit(&ring);
+	return 0;
+}
+
+static int test_buffers_update(void)
+{
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe = NULL;
+	struct io_uring ring;
+	const int nr = 5;
+	int buf_idx = 1, i, ret;
+	int pipes[2];
+	char tmp_buf[1024];
+	char tmp_buf2[1024];
+	struct iovec vecs[nr];
+	__u64 tags[nr];
+
+	for (i = 0; i < nr; i++) {
+		vecs[i].iov_base = tmp_buf;
+		vecs[i].iov_len = 1024;
+		tags[i] = i + 1;
+	}
+
+	ret = test_tags_generic(nr, TEST_IORING_RSRC_BUFFER, vecs, 0);
+	if (ret)
+		return 1;
+
+	ret = io_uring_queue_init(1, &ring, 0);
+	if (ret) {
+		printf("ring setup failed\n");
+		return 1;
+	}
+	if (pipe(pipes) < 0) {
+		perror("pipe");
+		return 1;
+	}
+	ret = register_rsrc(&ring, TEST_IORING_RSRC_BUFFER, nr, vecs, tags);
+	if (ret) {
+		fprintf(stderr, "rsrc register failed %i\n", ret);
+		return 1;
+	}
+
+	/* test that CQE is not emmited before we're done with a buffer */
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_read_fixed(sqe, pipes[0], tmp_buf, 10, 0, 0);
+	sqe->user_data = 100;
+	ret = io_uring_submit(&ring);
+	if (ret != 1) {
+		fprintf(stderr, "%s: got %d, wanted 1\n", __FUNCTION__, ret);
+		return 1;
+	}
+	ret = io_uring_peek_cqe(&ring, &cqe);
+	assert(ret == -EAGAIN);
+
+	vecs[buf_idx].iov_base = tmp_buf2;
+	ret = update_rsrc(&ring, TEST_IORING_RSRC_BUFFER, 1, buf_idx,
+			  &vecs[buf_idx], &tags[buf_idx]);
+	if (ret != 1) {
+		fprintf(stderr, "rsrc update failed %i %i\n", ret, errno);
+		return 1;
+	}
+
+	ret = io_uring_peek_cqe(&ring, &cqe); /* nothing should be there */
+	assert(ret == -EAGAIN);
+	close(pipes[0]);
+	close(pipes[1]);
+
+	ret = io_uring_wait_cqe(&ring, &cqe);
+	assert(!ret && cqe->user_data == 100);
+	io_uring_cqe_seen(&ring, cqe);
+	ret = io_uring_wait_cqe(&ring, &cqe);
+	assert(!ret && cqe->user_data == buf_idx + 1);
+	io_uring_cqe_seen(&ring, cqe);
+
+	io_uring_queue_exit(&ring);
+	return 0;
+}
+
+static int test_buffers_empty_buffers(void)
+{
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe = NULL;
+	struct io_uring ring;
+	const int nr = 5;
+	int ret, i;
+	char tmp_buf[1024];
+	struct iovec vecs[nr];
+
+	for (i = 0; i < nr; i++) {
+		vecs[i].iov_base = 0;
+		vecs[i].iov_len = 0;
+	}
+	vecs[0].iov_base = tmp_buf;
+	vecs[0].iov_len = 10;
+
+	ret = io_uring_queue_init(1, &ring, 0);
+	if (ret) {
+		printf("ring setup failed\n");
+		return 1;
+	}
+
+	ret = register_rsrc(&ring, TEST_IORING_RSRC_BUFFER, nr, vecs, NULL);
+	if (ret) {
+		fprintf(stderr, "rsrc register failed %i\n", ret);
+		return 1;
+	}
+
+	/* empty to buffer */
+	vecs[1].iov_base = tmp_buf;
+	vecs[1].iov_len = 10;
+	ret = update_rsrc(&ring, TEST_IORING_RSRC_BUFFER, 1, 1, &vecs[1], NULL);
+	if (ret != 1) {
+		fprintf(stderr, "rsrc update failed %i %i\n", ret, errno);
+		return 1;
+	}
+
+	/* buffer to empty */
+	vecs[0].iov_base = 0;
+	vecs[0].iov_len = 0;
+	ret = update_rsrc(&ring, TEST_IORING_RSRC_BUFFER, 1, 0, &vecs[0], NULL);
+	if (ret != 1) {
+		fprintf(stderr, "rsrc update failed %i %i\n", ret, errno);
+		return 1;
+	}
+
+	/* zero to zero is ok */
+	ret = update_rsrc(&ring, TEST_IORING_RSRC_BUFFER, 1, 2, &vecs[2], NULL);
+	if (ret != 1) {
+		fprintf(stderr, "rsrc update failed %i %i\n", ret, errno);
+		return 1;
+	}
+
+	/* empty buf with non-zero len fails */
+	vecs[3].iov_base = 0;
+	vecs[3].iov_len = 1;
+	ret = update_rsrc(&ring, TEST_IORING_RSRC_BUFFER, 1, 3, &vecs[3], NULL);
+	if (ret >= 0) {
+		fprintf(stderr, "rsrc update failed %i %i\n", ret, errno);
+		return 1;
+	}
+
+	/* test rw on empty ubuf is failed */
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_read_fixed(sqe, pipes[0], tmp_buf, 10, 0, 2);
+	sqe->user_data = 100;
+	ret = io_uring_submit(&ring);
+	if (ret != 1) {
+		fprintf(stderr, "%s: got %d, wanted 1\n", __FUNCTION__, ret);
+		return 1;
+	}
+	ret = io_uring_wait_cqe(&ring, &cqe);
+	assert(!ret && cqe->user_data == 100);
+	assert(cqe->res);
+	io_uring_cqe_seen(&ring, cqe);
+
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_read_fixed(sqe, pipes[0], tmp_buf, 0, 0, 2);
+	sqe->user_data = 100;
+	ret = io_uring_submit(&ring);
+	if (ret != 1) {
+		fprintf(stderr, "%s: got %d, wanted 1\n", __FUNCTION__, ret);
+		return 1;
+	}
+	ret = io_uring_wait_cqe(&ring, &cqe);
+	assert(!ret && cqe->user_data == 100);
+	assert(cqe->res);
+	io_uring_cqe_seen(&ring, cqe);
+
+	io_uring_queue_exit(&ring);
+	return 0;
+}
+
+
+static int test_files(int ring_flags)
+{
+	struct io_uring_cqe *cqe = NULL;
+	struct io_uring ring;
+	const int nr = 50;
+	int off = 5, i, ret, fd;
+	int files[nr];
+	__u64 tags[nr], tag;
+
+	for (i = 0; i < nr; ++i) {
+		files[i] = pipes[0];
+		tags[i] = i + 1;
+	}
+
+	ret = test_tags_generic(nr, TEST_IORING_RSRC_FILE, files, ring_flags);
+	if (ret)
+		return 1;
+
+	ret = io_uring_queue_init(1, &ring, ring_flags);
+	if (ret) {
+		printf("ring setup failed\n");
+		return 1;
+	}
+	ret = register_rsrc(&ring, TEST_IORING_RSRC_FILE, nr, files, tags);
+	if (ret) {
+		fprintf(stderr, "rsrc register failed %i\n", ret);
+		return 1;
+	}
+
+	/* check update did update tag */
+	fd = -1;
+	ret = io_uring_register_files_update(&ring, off, &fd, 1);
+	assert(ret == 1);
+	ret = io_uring_wait_cqe(&ring, &cqe);
+	assert(!ret && cqe->user_data == tags[off]);
+	io_uring_cqe_seen(&ring, cqe);
+
+	/* remove removed file, shouldn't emit old tag */
+	ret = io_uring_register_files_update(&ring, off, &fd, 1);
+	assert(ret <= 1);
+	assert(check_cq_empty(&ring));
+
+	/* non-zero tag with remove update is disallowed */
+	tag = 1;
+	fd = -1;
+	ret = update_rsrc(&ring, TEST_IORING_RSRC_FILE, 1, off + 1, &fd, &tag);
+	assert(ret);
+
+	io_uring_queue_exit(&ring);
+	return 0;
+}
+
+static int test_notag(void)
+{
+	struct io_uring_cqe *cqe = NULL;
+	struct io_uring ring;
+	int i, ret, fd;
+	const int nr = 50;
+	int files[nr];
+
+	ret = io_uring_queue_init(1, &ring, 0);
+	if (ret) {
+		printf("ring setup failed\n");
+		return 1;
+	}
+	for (i = 0; i < nr; ++i)
+		files[i] = pipes[0];
+
+	ret = io_uring_register_files(&ring, files, nr);
+	assert(!ret);
+
+	/* default register, update shouldn't emit CQE */
+	fd = -1;
+	ret = io_uring_register_files_update(&ring, 0, &fd, 1);
+	assert(ret == 1);
+	assert(check_cq_empty(&ring));
+
+	ret = io_uring_unregister_files(&ring);
+	assert(!ret);
+	ret = io_uring_peek_cqe(&ring, &cqe); /* nothing should be there */
+	assert(ret);
+
+	io_uring_queue_exit(&ring);
+	return 0;
+}
+
+int main(int argc, char *argv[])
+{
+	int ring_flags[] = {0, IORING_SETUP_IOPOLL, IORING_SETUP_SQPOLL};
+	int i, ret;
+
+	if (argc > 1)
+		return 0;
+	if (!has_rsrc_update()) {
+		fprintf(stderr, "doesn't support rsrc tags, skip\n");
+		return 0;
+	}
+
+	if (pipe(pipes) < 0) {
+		perror("pipe");
+		return 1;
+	}
+
+	ret = test_notag();
+	if (ret) {
+		printf("test_notag failed\n");
+		return ret;
+	}
+
+	for (i = 0; i < sizeof(ring_flags) / sizeof(ring_flags[0]); i++) {
+		ret = test_files(ring_flags[i]);
+		if (ret) {
+			printf("test_tag failed, type %i\n", i);
+			return ret;
+		}
+	}
+
+	ret = test_buffers_update();
+	if (ret) {
+		printf("test_buffers_update failed\n");
+		return ret;
+	}
+
+	ret = test_buffers_empty_buffers();
+	if (ret) {
+		printf("test_buffers_empty_buffers failed\n");
+		return ret;
+	}
+
+	return 0;
+}
diff --git a/test/runtests-loop.sh b/test/runtests-loop.sh
new file mode 100755
index 0000000..4019eba
--- /dev/null
+++ b/test/runtests-loop.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+
+TESTS="$@"
+ITER=0
+
+while true; do
+	./runtests.sh "$TESTS"
+	RET="$?"
+	if [ "${RET}" -ne 0 ]; then
+		echo "Tests failed at loop $ITER"
+		break
+	fi
+	echo "Finished loop $ITER"
+	((ITER++))
+done
+
diff --git a/test/runtests.sh b/test/runtests.sh
new file mode 100755
index 0000000..e8f4ae5
--- /dev/null
+++ b/test/runtests.sh
@@ -0,0 +1,149 @@
+#!/bin/bash
+
+TESTS="$@"
+RET=0
+TIMEOUT=60
+DMESG_FILTER="cat"
+TEST_DIR=$(dirname $0)
+FAILED=""
+SKIPPED=""
+MAYBE_FAILED=""
+TEST_FILES=""
+declare -A TEST_MAP
+
+# Only use /dev/kmsg if running as root
+DO_KMSG="1"
+[ "$(id -u)" != "0" ] && DO_KMSG="0"
+
+# Include config.local if exists and check TEST_FILES for valid devices
+if [ -f "$TEST_DIR/config.local" ]; then
+	. $TEST_DIR/config.local
+	for dev in $TEST_FILES; do
+		if [ ! -e "$dev" ]; then
+			echo "Test file $dev not valid"
+			exit 1
+		fi
+	done
+	for dev in ${TEST_MAP[@]}; do
+		if [ ! -e "$dev" ]; then
+			echo "Test file in map $dev not valid"
+			exit 1
+		fi
+	done
+fi
+
+_check_dmesg()
+{
+	local dmesg_marker="$1"
+	local seqres="$2.seqres"
+
+	if [ $DO_KMSG -eq 0 ]; then
+		return 0
+	fi
+
+	dmesg | bash -c "$DMESG_FILTER" | grep -A 9999 "$dmesg_marker" >"${seqres}.dmesg"
+	grep -q -e "kernel BUG at" \
+	     -e "WARNING:" \
+	     -e "BUG:" \
+	     -e "Oops:" \
+	     -e "possible recursive locking detected" \
+	     -e "Internal error" \
+	     -e "INFO: suspicious RCU usage" \
+	     -e "INFO: possible circular locking dependency detected" \
+	     -e "general protection fault:" \
+	     -e "blktests failure" \
+	     "${seqres}.dmesg"
+	# shellcheck disable=SC2181
+	if [[ $? -eq 0 ]]; then
+		return 1
+	else
+		rm -f "${seqres}.dmesg"
+		return 0
+	fi
+}
+
+run_test()
+{
+	local test_name="$1"
+	local dev="$2"
+	local test_string=$test_name
+
+	# Specify test string to print
+	if [ -n "$dev" ]; then
+		test_string="$test_name $dev"
+	fi
+
+	# Log start of the test
+	if [ "$DO_KMSG" -eq 1 ]; then
+		local dmesg_marker="Running test $test_string:"
+		echo $dmesg_marker | tee /dev/kmsg
+	else
+		local dmesg_marker=""
+		echo Running test $test_name $dev
+	fi
+
+	# Do we have to exclude the test ?
+	echo $TEST_EXCLUDE | grep -w "$test_name" > /dev/null 2>&1
+	if [ $? -eq 0 ]; then
+		echo "Test skipped"
+		SKIPPED="$SKIPPED <$test_string>"
+		return
+	fi
+
+	# Run the test
+	timeout -s INT -k $TIMEOUT $TIMEOUT ./$test_name $dev
+	local status=$?
+
+	# Check test status
+	if [ "$status" -eq 124 ]; then
+		echo "Test $test_name timed out (may not be a failure)"
+	elif [ "$status" -ne 0 ]; then
+		echo "Test $test_name failed with ret $status"
+		FAILED="$FAILED <$test_string>"
+		RET=1
+	elif ! _check_dmesg "$dmesg_marker" "$test_name"; then
+		echo "Test $test_name failed dmesg check"
+		FAILED="$FAILED <$test_string>"
+		RET=1
+	elif [ -n "$dev" ]; then
+		sleep .1
+		ps aux | grep "\[io_wq_manager\]" > /dev/null
+		if [ $? -eq 0 ]; then
+			MAYBE_FAILED="$MAYBE_FAILED $test_string"
+		fi
+	fi
+}
+
+# Run all specified tests
+for tst in $TESTS; do
+	if [ ! -n "${TEST_MAP[$tst]}" ]; then
+		run_test $tst
+		if [ ! -z "$TEST_FILES" ]; then
+			for dev in $TEST_FILES; do
+				run_test $tst $dev
+			done
+		fi
+	else
+		run_test $tst ${TEST_MAP[$tst]}
+	fi
+done
+
+if [ -n "$SKIPPED" ]; then
+	echo "Tests skipped: $SKIPPED"
+fi
+
+if [ "${RET}" -ne 0 ]; then
+	echo "Tests failed: $FAILED"
+	exit $RET
+else
+	sleep 1
+	ps aux | grep "\[io_wq_manager\]" > /dev/null
+	if [ $? -ne 0 ]; then
+		MAYBE_FAILED=""
+	fi
+	if [ ! -z "$MAYBE_FAILED" ]; then
+		echo "Tests _maybe_ failed: $MAYBE_FAILED"
+	fi
+	echo "All tests passed"
+	exit 0
+fi
diff --git a/test/rw_merge_test.c b/test/rw_merge_test.c
new file mode 100644
index 0000000..43feed4
--- /dev/null
+++ b/test/rw_merge_test.c
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Regression test for incorrect async_list io_should_merge() logic
+ * Bug was fixed in 5.5 by (commit: 561fb04 io_uring: replace workqueue usage with io-wq")
+ * Affects 5.4 lts branch, at least 5.4.106 is affected.
+ */
+#include <stdio.h>
+#include <errno.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <assert.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+#include "liburing.h"
+#include "helpers.h"
+
+int main(int argc, char *argv[])
+{
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	struct io_uring ring;
+	int ret, fd, pipe1[2];
+	char buf[4096];
+	struct iovec vec = {
+		.iov_base = buf,
+		.iov_len = sizeof(buf)
+	};
+	struct __kernel_timespec ts = {.tv_sec = 3, .tv_nsec = 0};
+
+	if (argc > 1)
+		return 0;
+
+	ret = pipe(pipe1);
+	assert(!ret);
+
+	fd = open("testfile", O_RDWR | O_CREAT, 0644);
+	assert(ret >= 0);
+	ret = ftruncate(fd, 4096);
+	assert(!ret);
+
+	ret = t_create_ring(4, &ring, 0);
+	if (ret == T_SETUP_SKIP)
+		return 0;
+	else if (ret < 0)
+		return 1;
+
+	/* REQ1 */
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_readv(sqe, pipe1[0], &vec, 1, 0);
+	sqe->user_data = 1;
+
+	/* REQ2 */
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_readv(sqe, fd, &vec, 1, 4096);
+	sqe->user_data = 2;
+
+	ret = io_uring_submit(&ring);
+	assert(ret == 2);
+
+	ret = io_uring_wait_cqe(&ring, &cqe);
+	assert(!ret);
+	assert(cqe->res == 0);
+	assert(cqe->user_data == 2);
+	io_uring_cqe_seen(&ring, cqe);
+
+	/*
+	 * REQ3
+	 * Prepare request adjacent to previous one, so merge logic may want to
+	 * link it to previous request, but because of a bug in merge logic
+	 * it may be merged with <REQ1> request
+	 */
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_readv(sqe, fd, &vec, 1, 2048);
+	sqe->user_data = 3;
+
+	ret = io_uring_submit(&ring);
+	assert(ret == 1);
+
+	/*
+	 * Read may stuck because of bug there request was be incorrecly
+	 * merged with <REQ1> request
+	 */
+	ret = io_uring_wait_cqe_timeout(&ring, &cqe, &ts);
+	if (ret == -ETIME) {
+		printf("TEST_FAIL: readv req3 stuck\n");
+		return 1;
+	}
+	assert(!ret);
+
+	assert(cqe->res == 2048);
+	assert(cqe->user_data == 3);
+
+	io_uring_cqe_seen(&ring, cqe);
+	io_uring_queue_exit(&ring);
+	return 0;
+}
diff --git a/test/self.c b/test/self.c
new file mode 100644
index 0000000..422c9e3
--- /dev/null
+++ b/test/self.c
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: test that pathname resolution works from async context when
+ * using /proc/self/ which should be the original submitting task, not the
+ * async worker.
+ *
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+
+#include "liburing.h"
+
+static int io_openat2(struct io_uring *ring, const char *path, int dfd)
+{
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	struct open_how how;
+	int ret;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "get sqe failed\n");
+		goto err;
+	}
+	memset(&how, 0, sizeof(how));
+	how.flags = O_RDONLY;
+	io_uring_prep_openat2(sqe, dfd, path, &how);
+
+	ret = io_uring_submit(ring);
+	if (ret <= 0) {
+		fprintf(stderr, "sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	ret = io_uring_wait_cqe(ring, &cqe);
+	if (ret < 0) {
+		fprintf(stderr, "wait completion %d\n", ret);
+		goto err;
+	}
+	ret = cqe->res;
+	io_uring_cqe_seen(ring, cqe);
+	return ret;
+err:
+	return -1;
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring;
+	char buf[64];
+	int ret;
+
+	if (argc > 1)
+		return 0;
+
+	ret = io_uring_queue_init(1, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "ring setup failed\n");
+		return 1;
+	}
+
+	ret = io_openat2(&ring, "/proc/self/comm", -1);
+	if (ret < 0) {
+		if (ret == -EOPNOTSUPP)
+			return 0;
+		if (ret == -EINVAL) {
+			fprintf(stdout, "openat2 not supported, skipping\n");
+			return 0;
+		}
+		fprintf(stderr, "openat2 failed: %s\n", strerror(-ret));
+		return 1;
+	}
+
+	memset(buf, 0, sizeof(buf));
+	ret = read(ret, buf, sizeof(buf));
+	if (ret < 0) {
+		perror("read");
+		return 1;
+	}
+
+	if (strncmp(buf, "self", 4)) {
+		fprintf(stderr, "got comm=<%s>, wanted <self>\n", buf);
+		return 1;
+	}
+
+	return 0;
+}
diff --git a/test/send_recv.c b/test/send_recv.c
new file mode 100644
index 0000000..38ae27f
--- /dev/null
+++ b/test/send_recv.c
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Simple test case showing using send and recv through io_uring
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <arpa/inet.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <pthread.h>
+
+#include "liburing.h"
+#include "helpers.h"
+
+static char str[] = "This is a test of send and recv over io_uring!";
+
+#define MAX_MSG	128
+
+#define PORT	10200
+#define HOST	"127.0.0.1"
+
+#if 0
+#	define io_uring_prep_send io_uring_prep_write
+#	define io_uring_prep_recv io_uring_prep_read
+#endif
+
+static int recv_prep(struct io_uring *ring, struct iovec *iov, int *sock,
+		     int registerfiles)
+{
+	struct sockaddr_in saddr;
+	struct io_uring_sqe *sqe;
+	int sockfd, ret, val, use_fd;
+
+	memset(&saddr, 0, sizeof(saddr));
+	saddr.sin_family = AF_INET;
+	saddr.sin_addr.s_addr = htonl(INADDR_ANY);
+	saddr.sin_port = htons(PORT);
+
+	sockfd = socket(AF_INET, SOCK_DGRAM, 0);
+	if (sockfd < 0) {
+		perror("socket");
+		return 1;
+	}
+
+	val = 1;
+	setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &val, sizeof(val));
+
+	ret = bind(sockfd, (struct sockaddr *)&saddr, sizeof(saddr));
+	if (ret < 0) {
+		perror("bind");
+		goto err;
+	}
+
+	if (registerfiles) {
+		ret = io_uring_register_files(ring, &sockfd, 1);
+		if (ret) {
+			fprintf(stderr, "file reg failed\n");
+			goto err;
+		}
+		use_fd = 0;
+	} else {
+		use_fd = sockfd;
+	}
+
+	sqe = io_uring_get_sqe(ring);
+	io_uring_prep_recv(sqe, use_fd, iov->iov_base, iov->iov_len, 0);
+	if (registerfiles)
+		sqe->flags |= IOSQE_FIXED_FILE;
+	sqe->user_data = 2;
+
+	ret = io_uring_submit(ring);
+	if (ret <= 0) {
+		fprintf(stderr, "submit failed: %d\n", ret);
+		goto err;
+	}
+
+	*sock = sockfd;
+	return 0;
+err:
+	close(sockfd);
+	return 1;
+}
+
+static int do_recv(struct io_uring *ring, struct iovec *iov)
+{
+	struct io_uring_cqe *cqe;
+	int ret;
+
+	ret = io_uring_wait_cqe(ring, &cqe);
+	if (ret) {
+		fprintf(stdout, "wait_cqe: %d\n", ret);
+		goto err;
+	}
+	if (cqe->res == -EINVAL) {
+		fprintf(stdout, "recv not supported, skipping\n");
+		return 0;
+	}
+	if (cqe->res < 0) {
+		fprintf(stderr, "failed cqe: %d\n", cqe->res);
+		goto err;
+	}
+
+	if (cqe->res -1 != strlen(str)) {
+		fprintf(stderr, "got wrong length: %d/%d\n", cqe->res,
+							(int) strlen(str) + 1);
+		goto err;
+	}
+
+	if (strcmp(str, iov->iov_base)) {
+		fprintf(stderr, "string mismatch\n");
+		goto err;
+	}
+
+	return 0;
+err:
+	return 1;
+}
+
+struct recv_data {
+	pthread_mutex_t mutex;
+	int use_sqthread;
+	int registerfiles;
+};
+
+static void *recv_fn(void *data)
+{
+	struct recv_data *rd = data;
+	char buf[MAX_MSG + 1];
+	struct iovec iov = {
+		.iov_base = buf,
+		.iov_len = sizeof(buf) - 1,
+	};
+	struct io_uring_params p = { };
+	struct io_uring ring;
+	int ret, sock;
+
+	if (rd->use_sqthread)
+		p.flags = IORING_SETUP_SQPOLL;
+	ret = t_create_ring_params(1, &ring, &p);
+	if (ret == T_SETUP_SKIP) {
+		pthread_mutex_unlock(&rd->mutex);
+		ret = 0;
+		goto err;
+	} else if (ret < 0) {
+		pthread_mutex_unlock(&rd->mutex);
+		goto err;
+	}
+
+	if (rd->use_sqthread && !rd->registerfiles) {
+		if (!(p.features & IORING_FEAT_SQPOLL_NONFIXED)) {
+			fprintf(stdout, "Non-registered SQPOLL not available, skipping\n");
+			pthread_mutex_unlock(&rd->mutex);
+			goto err;
+		}
+	}
+
+	ret = recv_prep(&ring, &iov, &sock, rd->registerfiles);
+	if (ret) {
+		fprintf(stderr, "recv_prep failed: %d\n", ret);
+		goto err;
+	}
+	pthread_mutex_unlock(&rd->mutex);
+	ret = do_recv(&ring, &iov);
+
+	close(sock);
+	io_uring_queue_exit(&ring);
+err:
+	return (void *)(intptr_t)ret;
+}
+
+static int do_send(void)
+{
+	struct sockaddr_in saddr;
+	struct iovec iov = {
+		.iov_base = str,
+		.iov_len = sizeof(str),
+	};
+	struct io_uring ring;
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	int sockfd, ret;
+
+	ret = io_uring_queue_init(1, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "queue init failed: %d\n", ret);
+		return 1;
+	}
+
+	memset(&saddr, 0, sizeof(saddr));
+	saddr.sin_family = AF_INET;
+	saddr.sin_port = htons(PORT);
+	inet_pton(AF_INET, HOST, &saddr.sin_addr);
+
+	sockfd = socket(AF_INET, SOCK_DGRAM, 0);
+	if (sockfd < 0) {
+		perror("socket");
+		return 1;
+	}
+
+	ret = connect(sockfd, &saddr, sizeof(saddr));
+	if (ret < 0) {
+		perror("connect");
+		return 1;
+	}
+
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_send(sqe, sockfd, iov.iov_base, iov.iov_len, 0);
+	sqe->user_data = 1;
+
+	ret = io_uring_submit(&ring);
+	if (ret <= 0) {
+		fprintf(stderr, "submit failed: %d\n", ret);
+		goto err;
+	}
+
+	ret = io_uring_wait_cqe(&ring, &cqe);
+	if (cqe->res == -EINVAL) {
+		fprintf(stdout, "send not supported, skipping\n");
+		close(sockfd);
+		return 0;
+	}
+	if (cqe->res != iov.iov_len) {
+		fprintf(stderr, "failed cqe: %d\n", cqe->res);
+		goto err;
+	}
+
+	close(sockfd);
+	return 0;
+err:
+	close(sockfd);
+	return 1;
+}
+
+static int test(int use_sqthread, int regfiles)
+{
+	pthread_mutexattr_t attr;
+	pthread_t recv_thread;
+	struct recv_data rd;
+	int ret;
+	void *retval;
+
+	pthread_mutexattr_init(&attr);
+	pthread_mutexattr_setpshared(&attr, 1);
+	pthread_mutex_init(&rd.mutex, &attr);
+	pthread_mutex_lock(&rd.mutex);
+	rd.use_sqthread = use_sqthread;
+	rd.registerfiles = regfiles;
+
+	ret = pthread_create(&recv_thread, NULL, recv_fn, &rd);
+	if (ret) {
+		fprintf(stderr, "Thread create failed: %d\n", ret);
+		pthread_mutex_unlock(&rd.mutex);
+		return 1;
+	}
+
+	pthread_mutex_lock(&rd.mutex);
+	do_send();
+	pthread_join(recv_thread, &retval);
+	return (int)(intptr_t)retval;
+}
+
+int main(int argc, char *argv[])
+{
+	int ret;
+
+	if (argc > 1)
+		return 0;
+
+	ret = test(0, 0);
+	if (ret) {
+		fprintf(stderr, "test sqthread=0 failed\n");
+		return ret;
+	}
+
+	ret = test(1, 1);
+	if (ret) {
+		fprintf(stderr, "test sqthread=1 reg=1 failed\n");
+		return ret;
+	}
+
+	ret = test(1, 0);
+	if (ret) {
+		fprintf(stderr, "test sqthread=1 reg=0 failed\n");
+		return ret;
+	}
+
+	return 0;
+}
diff --git a/test/send_recvmsg.c b/test/send_recvmsg.c
new file mode 100644
index 0000000..2ff8d9d
--- /dev/null
+++ b/test/send_recvmsg.c
@@ -0,0 +1,345 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Simple test case showing using sendmsg and recvmsg through io_uring
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+#include <arpa/inet.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <pthread.h>
+#include <assert.h>
+
+#include "liburing.h"
+
+static char str[] = "This is a test of sendmsg and recvmsg over io_uring!";
+
+#define MAX_MSG	128
+
+#define PORT	10200
+#define HOST	"127.0.0.1"
+
+#define BUF_BGID	10
+#define BUF_BID		89
+
+#define MAX_IOV_COUNT	10
+
+static int recv_prep(struct io_uring *ring, struct iovec iov[], int iov_count,
+		     int bgid)
+{
+	struct sockaddr_in saddr;
+	struct msghdr msg;
+	struct io_uring_sqe *sqe;
+	int sockfd, ret;
+	int val = 1;
+
+	memset(&saddr, 0, sizeof(saddr));
+	saddr.sin_family = AF_INET;
+	saddr.sin_addr.s_addr = htonl(INADDR_ANY);
+	saddr.sin_port = htons(PORT);
+
+	sockfd = socket(AF_INET, SOCK_DGRAM, 0);
+	if (sockfd < 0) {
+		perror("socket");
+		return 1;
+	}
+
+	val = 1;
+	setsockopt(sockfd, SOL_SOCKET, SO_REUSEPORT, &val, sizeof(val));
+	setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &val, sizeof(val));
+
+	ret = bind(sockfd, (struct sockaddr *)&saddr, sizeof(saddr));
+	if (ret < 0) {
+		perror("bind");
+		goto err;
+	}
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "io_uring_get_sqe failed\n");
+		return 1;
+	}
+
+	io_uring_prep_recvmsg(sqe, sockfd, &msg, 0);
+	if (bgid) {
+		iov->iov_base = NULL;
+		sqe->flags |= IOSQE_BUFFER_SELECT;
+		sqe->buf_group = bgid;
+		iov_count = 1;
+	}
+	memset(&msg, 0, sizeof(msg));
+	msg.msg_namelen = sizeof(struct sockaddr_in);
+	msg.msg_iov = iov;
+	msg.msg_iovlen = iov_count;
+
+	ret = io_uring_submit(ring);
+	if (ret <= 0) {
+		fprintf(stderr, "submit failed: %d\n", ret);
+		goto err;
+	}
+
+	close(sockfd);
+	return 0;
+err:
+	close(sockfd);
+	return 1;
+}
+
+struct recv_data {
+	pthread_mutex_t *mutex;
+	int buf_select;
+	int no_buf_add;
+	int iov_count;
+};
+
+static int do_recvmsg(struct io_uring *ring, char buf[MAX_MSG + 1],
+		      struct recv_data *rd)
+{
+	struct io_uring_cqe *cqe;
+	int ret;
+
+	ret = io_uring_wait_cqe(ring, &cqe);
+	if (ret) {
+		fprintf(stdout, "wait_cqe: %d\n", ret);
+		goto err;
+	}
+	if (cqe->res < 0) {
+		if (rd->no_buf_add && rd->buf_select)
+			return 0;
+		fprintf(stderr, "%s: failed cqe: %d\n", __FUNCTION__, cqe->res);
+		goto err;
+	}
+	if (cqe->flags) {
+		int bid = cqe->flags >> 16;
+		if (bid != BUF_BID)
+			fprintf(stderr, "Buffer ID mismatch %d\n", bid);
+	}
+
+	if (rd->no_buf_add && rd->buf_select) {
+		fprintf(stderr, "Expected -ENOBUFS: %d\n", cqe->res);
+		goto err;
+	}
+
+	if (cqe->res -1 != strlen(str)) {
+		fprintf(stderr, "got wrong length: %d/%d\n", cqe->res,
+							(int) strlen(str) + 1);
+		goto err;
+	}
+
+	if (strncmp(str, buf, MAX_MSG + 1)) {
+		fprintf(stderr, "string mismatch\n");
+		goto err;
+	}
+
+	return 0;
+err:
+	return 1;
+}
+
+static void init_iov(struct iovec iov[MAX_IOV_COUNT], int iov_to_use,
+		     char buf[MAX_MSG + 1])
+{
+	int i, last_idx = iov_to_use - 1;
+
+	assert(0 < iov_to_use && iov_to_use <= MAX_IOV_COUNT);
+	for (i = 0; i < last_idx; ++i) {
+		iov[i].iov_base = buf + i;
+		iov[i].iov_len = 1;
+	}
+
+	iov[last_idx].iov_base = buf + last_idx;
+	iov[last_idx].iov_len = MAX_MSG - last_idx;
+}
+
+static void *recv_fn(void *data)
+{
+	struct recv_data *rd = data;
+	pthread_mutex_t *mutex = rd->mutex;
+	char buf[MAX_MSG + 1];
+	struct iovec iov[MAX_IOV_COUNT];
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	struct io_uring ring;
+	int ret;
+
+	init_iov(iov, rd->iov_count, buf);
+
+	ret = io_uring_queue_init(1, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "queue init failed: %d\n", ret);
+		goto err;
+	}
+
+	if (rd->buf_select && !rd->no_buf_add) {
+		sqe = io_uring_get_sqe(&ring);
+		io_uring_prep_provide_buffers(sqe, buf, sizeof(buf) -1, 1,
+						BUF_BGID, BUF_BID);
+		ret = io_uring_submit(&ring);
+		if (ret != 1) {
+			fprintf(stderr, "submit ret=%d\n", ret);
+			goto err;
+		}
+
+		ret = io_uring_wait_cqe(&ring, &cqe);
+		if (ret) {
+			fprintf(stderr, "wait_cqe=%d\n", ret);
+			goto err;
+		}
+		ret = cqe->res;
+		io_uring_cqe_seen(&ring, cqe);
+		if (ret == -EINVAL) {
+			fprintf(stdout, "PROVIDE_BUFFERS not supported, skip\n");
+			goto out;
+			goto err;
+		} else if (ret < 0) {
+			fprintf(stderr, "PROVIDER_BUFFERS %d\n", ret);
+			goto err;
+		}
+	}
+
+	ret = recv_prep(&ring, iov, rd->iov_count, rd->buf_select ? BUF_BGID : 0);
+	if (ret) {
+		fprintf(stderr, "recv_prep failed: %d\n", ret);
+		goto err;
+	}
+
+	pthread_mutex_unlock(mutex);
+	ret = do_recvmsg(&ring, buf, rd);
+
+	io_uring_queue_exit(&ring);
+
+err:
+	return (void *)(intptr_t)ret;
+out:
+	pthread_mutex_unlock(mutex);
+	io_uring_queue_exit(&ring);
+	return NULL;
+}
+
+static int do_sendmsg(void)
+{
+	struct sockaddr_in saddr;
+	struct iovec iov = {
+		.iov_base = str,
+		.iov_len = sizeof(str),
+	};
+	struct msghdr msg;
+	struct io_uring ring;
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	int sockfd, ret;
+
+	ret = io_uring_queue_init(1, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "queue init failed: %d\n", ret);
+		return 1;
+	}
+
+	memset(&saddr, 0, sizeof(saddr));
+	saddr.sin_family = AF_INET;
+	saddr.sin_port = htons(PORT);
+	inet_pton(AF_INET, HOST, &saddr.sin_addr);
+
+	memset(&msg, 0, sizeof(msg));
+	msg.msg_name = &saddr;
+	msg.msg_namelen = sizeof(struct sockaddr_in);
+	msg.msg_iov = &iov;
+	msg.msg_iovlen = 1;
+
+	sockfd = socket(AF_INET, SOCK_DGRAM, 0);
+	if (sockfd < 0) {
+		perror("socket");
+		return 1;
+	}
+
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_sendmsg(sqe, sockfd, &msg, 0);
+
+	ret = io_uring_submit(&ring);
+	if (ret <= 0) {
+		fprintf(stderr, "submit failed: %d\n", ret);
+		goto err;
+	}
+
+	ret = io_uring_wait_cqe(&ring, &cqe);
+	if (cqe->res < 0) {
+		fprintf(stderr, "%s: failed cqe: %d\n", __FUNCTION__, cqe->res);
+		goto err;
+	}
+
+	close(sockfd);
+	return 0;
+err:
+	close(sockfd);
+	return 1;
+}
+
+static int test(int buf_select, int no_buf_add, int iov_count)
+{
+	struct recv_data rd;
+	pthread_mutexattr_t attr;
+	pthread_t recv_thread;
+	pthread_mutex_t mutex;
+	int ret;
+	void *retval;
+
+	pthread_mutexattr_init(&attr);
+	pthread_mutexattr_setpshared(&attr, 1);
+	pthread_mutex_init(&mutex, &attr);
+	pthread_mutex_lock(&mutex);
+
+	rd.mutex = &mutex;
+	rd.buf_select = buf_select;
+	rd.no_buf_add = no_buf_add;
+	rd.iov_count = iov_count;
+	ret = pthread_create(&recv_thread, NULL, recv_fn, &rd);
+	if (ret) {
+		pthread_mutex_unlock(&mutex);
+		fprintf(stderr, "Thread create failed\n");
+		return 1;
+	}
+
+	pthread_mutex_lock(&mutex);
+	do_sendmsg();
+	pthread_join(recv_thread, &retval);
+	ret = (int)(intptr_t)retval;
+
+	return ret;
+}
+
+int main(int argc, char *argv[])
+{
+	int ret;
+
+	if (argc > 1)
+		return 0;
+
+	ret = test(0, 0, 1);
+	if (ret) {
+		fprintf(stderr, "send_recvmsg 0 failed\n");
+		return 1;
+	}
+
+	ret = test(0, 0, 10);
+	if (ret) {
+		fprintf(stderr, "send_recvmsg multi iov failed\n");
+		return 1;
+	}
+
+	ret = test(1, 0, 1);
+	if (ret) {
+		fprintf(stderr, "send_recvmsg 1 0 failed\n");
+		return 1;
+	}
+
+	ret = test(1, 1, 1);
+	if (ret) {
+		fprintf(stderr, "send_recvmsg 1 1 failed\n");
+		return 1;
+	}
+
+	return 0;
+}
diff --git a/test/sendmsg_fs_cve.c b/test/sendmsg_fs_cve.c
new file mode 100644
index 0000000..8de220a
--- /dev/null
+++ b/test/sendmsg_fs_cve.c
@@ -0,0 +1,192 @@
+/*
+ * repro-CVE-2020-29373 -- Reproducer for CVE-2020-29373.
+ *
+ * Copyright (c) 2021 SUSE
+ * Author: Nicolai Stange <nstange@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <unistd.h>
+#include <stdio.h>
+#include <sys/mman.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include "liburing.h"
+
+/*
+ * This attempts to make the kernel issue a sendmsg() to
+ * path from io_uring's async io_sq_wq_submit_work().
+ *
+ * Unfortunately, IOSQE_ASYNC is available only from kernel version
+ * 5.6 onwards. To still force io_uring to process the request
+ * asynchronously from io_sq_wq_submit_work(), queue a couple of
+ * auxiliary requests all failing with EAGAIN before. This is
+ * implemented by writing repeatedly to an auxiliary O_NONBLOCK
+ * AF_UNIX socketpair with a small SO_SNDBUF.
+ */
+static int try_sendmsg_async(const char * const path)
+{
+	int snd_sock, r;
+	struct io_uring ring;
+	char sbuf[16] = {};
+	struct iovec siov = { .iov_base = &sbuf, .iov_len = sizeof(sbuf) };
+	struct sockaddr_un addr = {};
+	struct msghdr msg = {
+		.msg_name = &addr,
+		.msg_namelen = sizeof(addr),
+		.msg_iov = &siov,
+		.msg_iovlen = 1,
+	};
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+
+	snd_sock = socket(AF_UNIX, SOCK_DGRAM, 0);
+	if (snd_sock < 0) {
+		perror("socket(AF_UNIX)");
+		return -1;
+	}
+
+	addr.sun_family = AF_UNIX;
+	strcpy(addr.sun_path, path);
+
+	r = io_uring_queue_init(512, &ring, 0);
+	if (r < 0) {
+		fprintf(stderr, "ring setup failed: %d\n", r);
+		goto close_iour;
+	}
+
+	sqe = io_uring_get_sqe(&ring);
+	if (!sqe) {
+		fprintf(stderr, "get sqe failed\n");
+		r = -EFAULT;
+		goto close_iour;
+	}
+
+	/* the actual one supposed to fail with -ENOENT. */
+	io_uring_prep_sendmsg(sqe, snd_sock, &msg, 0);
+	sqe->flags = IOSQE_ASYNC;
+	sqe->user_data = 255;
+
+	r = io_uring_submit(&ring);
+	if (r != 1) {
+		fprintf(stderr, "sqe submit failed: %d\n", r);
+		r = -EFAULT;
+		goto close_iour;
+	}
+
+	r = io_uring_wait_cqe(&ring, &cqe);
+	if (r < 0) {
+		fprintf(stderr, "wait completion %d\n", r);
+		r = -EFAULT;
+		goto close_iour;
+	}
+	if (cqe->user_data != 255) {
+		fprintf(stderr, "user data %d\n", r);
+		r = -EFAULT;
+		goto close_iour;
+	}
+	if (cqe->res != -ENOENT) {
+		r = 3;
+		fprintf(stderr,
+			"error: cqe %i: res=%i, but expected -ENOENT\n",
+			(int)cqe->user_data, (int)cqe->res);
+	}
+	io_uring_cqe_seen(&ring, cqe);
+
+close_iour:
+	io_uring_queue_exit(&ring);
+	close(snd_sock);
+	return r;
+}
+
+int main(int argc, char *argv[])
+{
+	int r;
+	char tmpdir[] = "/tmp/tmp.XXXXXX";
+	int rcv_sock;
+	struct sockaddr_un addr = {};
+	pid_t c;
+	int wstatus;
+
+	if (!mkdtemp(tmpdir)) {
+		perror("mkdtemp()");
+		return 1;
+	}
+
+	rcv_sock = socket(AF_UNIX, SOCK_DGRAM, 0);
+	if (rcv_sock < 0) {
+		perror("socket(AF_UNIX)");
+		r = 1;
+		goto rmtmpdir;
+	}
+
+	addr.sun_family = AF_UNIX;
+	snprintf(addr.sun_path, sizeof(addr.sun_path), "%s/sock", tmpdir);
+
+	r = bind(rcv_sock, (struct sockaddr *)&addr,
+		 sizeof(addr));
+	if (r < 0) {
+		perror("bind()");
+		close(rcv_sock);
+		r = 1;
+		goto rmtmpdir;
+	}
+
+	c = fork();
+	if (!c) {
+		close(rcv_sock);
+
+		if (chroot(tmpdir)) {
+			perror("chroot()");
+			return 1;
+		}
+
+		r = try_sendmsg_async(addr.sun_path);
+		if (r < 0) {
+			/* system call failure */
+			r = 1;
+		} else if (r) {
+			/* test case failure */
+			r += 1;
+		}
+		return r;
+	}
+
+	if (waitpid(c, &wstatus, 0) == (pid_t)-1) {
+		perror("waitpid()");
+		r = 1;
+		goto rmsock;
+	}
+	if (!WIFEXITED(wstatus)) {
+		fprintf(stderr, "child got terminated\n");
+		r = 1;
+		goto rmsock;
+	}
+	r = WEXITSTATUS(wstatus);
+	if (r)
+		fprintf(stderr, "error: Test failed\n");
+rmsock:
+	close(rcv_sock);
+	unlink(addr.sun_path);
+rmtmpdir:
+	rmdir(tmpdir);
+	return r;
+}
diff --git a/test/shared-wq.c b/test/shared-wq.c
new file mode 100644
index 0000000..c0571e6
--- /dev/null
+++ b/test/shared-wq.c
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: test wq sharing
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+
+#include "liburing.h"
+
+static int test_attach_invalid(int ringfd)
+{
+	struct io_uring_params p;
+	struct io_uring ring;
+	int ret;
+
+	memset(&p, 0, sizeof(p));
+	p.flags = IORING_SETUP_ATTACH_WQ;
+	p.wq_fd = ringfd;
+	ret = io_uring_queue_init_params(1, &ring, &p);
+	if (ret != -EINVAL) {
+		fprintf(stderr, "Attach to zero: %d\n", ret);
+		goto err;
+	}
+	return 0;
+err:
+	return 1;
+}
+
+static int test_attach(int ringfd)
+{
+	struct io_uring_params p;
+	struct io_uring ring2;
+	int ret;
+
+	memset(&p, 0, sizeof(p));
+	p.flags = IORING_SETUP_ATTACH_WQ;
+	p.wq_fd = ringfd;
+	ret = io_uring_queue_init_params(1, &ring2, &p);
+	if (ret == -EINVAL) {
+		fprintf(stdout, "Sharing not supported, skipping\n");
+		return 0;
+	} else if (ret) {
+		fprintf(stderr, "Attach to id: %d\n", ret);
+		goto err;
+	}
+	io_uring_queue_exit(&ring2);
+	return 0;
+err:
+	return 1;
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring;
+	int ret;
+
+	if (argc > 1)
+		return 0;
+
+	ret = io_uring_queue_init(8, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "ring setup failed\n");
+		return 1;
+	}
+
+	/* stdout is definitely not an io_uring descriptor */
+	ret = test_attach_invalid(2);
+	if (ret) {
+		fprintf(stderr, "test_attach_invalid failed\n");
+		return ret;
+	}
+
+	ret = test_attach(ring.ring_fd);
+	if (ret) {
+		fprintf(stderr, "test_attach failed\n");
+		return ret;
+	}
+
+	return 0;
+}
diff --git a/test/short-read.c b/test/short-read.c
new file mode 100644
index 0000000..02eee04
--- /dev/null
+++ b/test/short-read.c
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: MIT */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <sys/poll.h>
+
+
+#include "helpers.h"
+#include "liburing.h"
+
+#define BUF_SIZE 4096
+#define FILE_SIZE 1024
+
+int main(int argc, char *argv[])
+{
+	int ret, fd, save_errno;
+	struct io_uring ring;
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	struct iovec vec;
+
+	if (argc > 1)
+		return 0;
+
+	vec.iov_base = t_malloc(BUF_SIZE);
+	vec.iov_len = BUF_SIZE;
+
+	t_create_file(".short-read", FILE_SIZE);
+
+	fd = open(".short-read", O_RDONLY);
+	save_errno = errno;
+	unlink(".short-read");
+	errno = save_errno;
+	if (fd < 0) {
+		perror("file open");
+		return 1;
+	}
+
+	ret = io_uring_queue_init(32, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "queue init failed: %d\n", ret);
+		return ret;
+	}
+
+	sqe = io_uring_get_sqe(&ring);
+	if (!sqe) {
+		fprintf(stderr, "sqe get failed\n");
+		return 1;
+	}
+	io_uring_prep_readv(sqe, fd, &vec, 1, 0);
+
+	ret = io_uring_submit(&ring);
+	if (ret != 1) {
+		fprintf(stderr, "submit failed: %d\n", ret);
+ 		return 1;
+	}
+
+	ret = io_uring_wait_cqes(&ring, &cqe, 1, 0, 0);
+	if (ret) {
+		fprintf(stderr, "wait_cqe failed: %d\n", ret);
+		return 1;
+	}
+
+	if (cqe->res != FILE_SIZE) {
+		fprintf(stderr, "Read failed: %d\n", cqe->res);
+		return 1;
+	}
+
+	io_uring_cqe_seen(&ring, cqe);
+	return 0;
+}
diff --git a/test/shutdown.c b/test/shutdown.c
new file mode 100644
index 0000000..5aa1371
--- /dev/null
+++ b/test/shutdown.c
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Check that writev on a socket that has been shutdown(2) fails
+ *
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <assert.h>
+
+#include <errno.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <netinet/tcp.h>
+#include <netinet/in.h>
+
+#include "liburing.h"
+
+static void sig_pipe(int sig)
+{
+}
+
+int main(int argc, char *argv[])
+{
+	int p_fd[2], ret;
+	int32_t recv_s0;
+	int32_t val = 1;
+	struct sockaddr_in addr;
+
+	if (argc > 1)
+		return 0;
+
+	srand(getpid());
+
+	recv_s0 = socket(AF_INET, SOCK_STREAM | SOCK_CLOEXEC, IPPROTO_TCP);
+
+	ret = setsockopt(recv_s0, SOL_SOCKET, SO_REUSEPORT, &val, sizeof(val));
+	assert(ret != -1);
+	ret = setsockopt(recv_s0, SOL_SOCKET, SO_REUSEADDR, &val, sizeof(val));
+	assert(ret != -1);
+
+	addr.sin_family = AF_INET;
+	addr.sin_port = (rand() % 61440) + 4096;
+	addr.sin_addr.s_addr = 0x0100007fU;
+
+	ret = bind(recv_s0, (struct sockaddr*)&addr, sizeof(addr));
+	assert(ret != -1);
+	ret = listen(recv_s0, 128);
+	assert(ret != -1);
+
+	p_fd[1] = socket(AF_INET, SOCK_STREAM | SOCK_CLOEXEC, IPPROTO_TCP);
+
+	val = 1;
+	ret = setsockopt(p_fd[1], IPPROTO_TCP, TCP_NODELAY, &val, sizeof(val));
+	assert(ret != -1);
+
+	int32_t flags = fcntl(p_fd[1], F_GETFL, 0);
+	assert(flags != -1);
+
+	flags |= O_NONBLOCK;
+	ret = fcntl(p_fd[1], F_SETFL, flags);
+	assert(ret != -1);
+
+	ret = connect(p_fd[1], (struct sockaddr*)&addr, sizeof(addr));
+	assert(ret == -1);
+
+	flags = fcntl(p_fd[1], F_GETFL, 0);
+	assert(flags != -1);
+
+	flags &= ~O_NONBLOCK;
+	ret = fcntl(p_fd[1], F_SETFL, flags);
+	assert(ret != -1);
+
+	p_fd[0] = accept(recv_s0, NULL, NULL);
+	assert(p_fd[0] != -1);
+
+	signal(SIGPIPE, sig_pipe);
+
+	while (1) {
+		int32_t code;
+		socklen_t code_len = sizeof(code);
+
+		ret = getsockopt(p_fd[1], SOL_SOCKET, SO_ERROR, &code, &code_len);
+		assert(ret != -1);
+
+		if (!code)
+			break;
+	}
+
+	struct io_uring m_io_uring;
+
+	ret = io_uring_queue_init(32, &m_io_uring, 0);
+	assert(ret >= 0);
+
+	{
+		struct io_uring_cqe *cqe;
+		struct io_uring_sqe *sqe;
+		int res;
+
+		sqe = io_uring_get_sqe(&m_io_uring);
+		io_uring_prep_shutdown(sqe, p_fd[1], SHUT_WR);
+		sqe->user_data = 1;
+
+		res = io_uring_submit_and_wait(&m_io_uring, 1);
+		assert(res != -1);
+
+		res = io_uring_wait_cqe(&m_io_uring, &cqe);
+		if (res < 0) {
+			fprintf(stderr, "wait: %s\n", strerror(-ret));
+			goto err;
+		}
+
+		if (cqe->res) {
+			if (cqe->res == -EINVAL) {
+				fprintf(stdout, "Shutdown not supported, skipping\n");
+				goto done;
+			}
+			fprintf(stderr, "writev: %d\n", cqe->res);
+			goto err;
+		}
+
+		io_uring_cqe_seen(&m_io_uring, cqe);
+	}
+
+	{
+		struct io_uring_cqe *cqe;
+		struct io_uring_sqe *sqe;
+		struct iovec iov[1];
+		char send_buff[128];
+		int res;
+
+		iov[0].iov_base = send_buff;
+		iov[0].iov_len = sizeof(send_buff);
+
+		sqe = io_uring_get_sqe(&m_io_uring);
+		assert(sqe != NULL);
+
+		io_uring_prep_writev(sqe, p_fd[1], iov, 1, 0);
+		res = io_uring_submit_and_wait(&m_io_uring, 1);
+		assert(res != -1);
+
+		res = io_uring_wait_cqe(&m_io_uring, &cqe);
+		if (res < 0) {
+			fprintf(stderr, "wait: %s\n", strerror(-ret));
+			goto err;
+		}
+
+		if (cqe->res != -EPIPE) {
+			fprintf(stderr, "writev: %d\n", cqe->res);
+			goto err;
+		}
+		io_uring_cqe_seen(&m_io_uring, cqe);
+	}
+
+done:
+	io_uring_queue_exit(&m_io_uring);
+	return 0;
+err:
+	io_uring_queue_exit(&m_io_uring);
+	return 1;
+}
diff --git a/test/sigfd-deadlock.c b/test/sigfd-deadlock.c
new file mode 100644
index 0000000..038b094
--- /dev/null
+++ b/test/sigfd-deadlock.c
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: test that sigfd reading/polling works. A regression test for
+ * the upstream commit:
+ *
+ * fd7d6de22414 ("io_uring: don't recurse on tsk->sighand->siglock with signalfd")
+ */
+#include <unistd.h>
+#include <sys/signalfd.h>
+#include <sys/epoll.h>
+#include <sys/poll.h>
+#include <stdio.h>
+#include "liburing.h"
+
+static int setup_signal(void)
+{
+	sigset_t mask;
+	int sfd;
+
+	sigemptyset(&mask);
+	sigaddset(&mask, SIGINT);
+
+	sigprocmask(SIG_BLOCK, &mask, NULL);
+	sfd = signalfd(-1, &mask, SFD_NONBLOCK);
+	if (sfd < 0)
+		perror("signalfd");
+	return sfd;
+}
+
+static int test_uring(int sfd)
+{
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	struct io_uring ring;
+	int ret;
+
+	io_uring_queue_init(32, &ring, 0);
+
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_poll_add(sqe, sfd, POLLIN);
+	io_uring_submit(&ring);
+
+	kill(getpid(), SIGINT);
+
+	io_uring_wait_cqe(&ring, &cqe);
+	if (cqe->res & POLLIN) {
+		ret = 0;
+	} else {
+		fprintf(stderr, "Unexpected poll mask %x\n", cqe->res);
+		ret = 1;
+	}
+	io_uring_cqe_seen(&ring, cqe);
+	io_uring_queue_exit(&ring);
+	return ret;
+}
+
+int main(int argc, char *argv[])
+{
+	int sfd, ret;
+
+	if (argc > 1)
+		return 0;
+
+	sfd = setup_signal();
+	if (sfd < 0)
+		return 1;
+
+	ret = test_uring(sfd);
+	if (ret)
+		fprintf(stderr, "test_uring signalfd failed\n");
+
+	close(sfd);
+	return ret;
+}
diff --git a/test/socket-rw-eagain.c b/test/socket-rw-eagain.c
new file mode 100644
index 0000000..f15c0c1
--- /dev/null
+++ b/test/socket-rw-eagain.c
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Check that a readv on a nonblocking socket queued before a writev doesn't
+ * wait for data to arrive.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <assert.h>
+
+#include <errno.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <netinet/tcp.h>
+#include <netinet/in.h>
+
+#include "liburing.h"
+
+int main(int argc, char *argv[])
+{
+	int p_fd[2], ret;
+	int32_t recv_s0;
+	int32_t val = 1;
+	struct sockaddr_in addr;
+
+	if (argc > 1)
+		return 0;
+
+	srand(getpid());
+
+	recv_s0 = socket(AF_INET, SOCK_STREAM | SOCK_CLOEXEC, IPPROTO_TCP);
+
+	ret = setsockopt(recv_s0, SOL_SOCKET, SO_REUSEPORT, &val, sizeof(val));
+	assert(ret != -1);
+	ret = setsockopt(recv_s0, SOL_SOCKET, SO_REUSEADDR, &val, sizeof(val));
+	assert(ret != -1);
+
+	addr.sin_family = AF_INET;
+	addr.sin_addr.s_addr = 0x0100007fU;
+
+	do {
+		addr.sin_port = (rand() % 61440) + 4096;
+		ret = bind(recv_s0, (struct sockaddr*)&addr, sizeof(addr));
+		if (!ret)
+			break;
+		if (errno != EADDRINUSE) {
+			perror("bind");
+			exit(1);
+		}
+	} while (1);
+
+	ret = listen(recv_s0, 128);
+	assert(ret != -1);
+
+	p_fd[1] = socket(AF_INET, SOCK_STREAM | SOCK_CLOEXEC, IPPROTO_TCP);
+
+	val = 1;
+	ret = setsockopt(p_fd[1], IPPROTO_TCP, TCP_NODELAY, &val, sizeof(val));
+	assert(ret != -1);
+
+	int32_t flags = fcntl(p_fd[1], F_GETFL, 0);
+	assert(flags != -1);
+
+	flags |= O_NONBLOCK;
+	ret = fcntl(p_fd[1], F_SETFL, flags);
+	assert(ret != -1);
+
+	ret = connect(p_fd[1], (struct sockaddr*)&addr, sizeof(addr));
+	assert(ret == -1);
+
+	p_fd[0] = accept(recv_s0, NULL, NULL);
+	assert(p_fd[0] != -1);
+
+	flags = fcntl(p_fd[0], F_GETFL, 0);
+	assert(flags != -1);
+
+	flags |= O_NONBLOCK;
+        ret = fcntl(p_fd[0], F_SETFL, flags);
+	assert(ret != -1);
+
+	while (1) {
+		int32_t code;
+		socklen_t code_len = sizeof(code);
+
+		ret = getsockopt(p_fd[1], SOL_SOCKET, SO_ERROR, &code, &code_len);
+		assert(ret != -1);
+
+		if (!code)
+			break;
+	}
+
+	struct io_uring m_io_uring;
+
+	ret = io_uring_queue_init(32, &m_io_uring, 0);
+	assert(ret >= 0);
+
+	char recv_buff[128];
+	char send_buff[128];
+
+	{
+		struct iovec iov[1];
+
+		iov[0].iov_base = recv_buff;
+		iov[0].iov_len = sizeof(recv_buff);
+
+		struct io_uring_sqe* sqe = io_uring_get_sqe(&m_io_uring);
+		assert(sqe != NULL);
+
+		io_uring_prep_readv(sqe, p_fd[0], iov, 1, 0);
+		sqe->user_data = 1;
+	}
+
+	{
+		struct iovec iov[1];
+
+		iov[0].iov_base = send_buff;
+		iov[0].iov_len = sizeof(send_buff);
+
+		struct io_uring_sqe* sqe = io_uring_get_sqe(&m_io_uring);
+		assert(sqe != NULL);
+
+		io_uring_prep_writev(sqe, p_fd[1], iov, 1, 0);
+		sqe->user_data = 2;
+	}
+
+	ret = io_uring_submit_and_wait(&m_io_uring, 2);
+	assert(ret != -1);
+
+	struct io_uring_cqe* cqe;
+	uint32_t head;
+	uint32_t count = 0;
+
+	while (count != 2) {
+		io_uring_for_each_cqe(&m_io_uring, head, cqe) {
+			if (cqe->user_data == 2 && cqe->res != 128) {
+				fprintf(stderr, "write=%d\n", cqe->res);
+				goto err;
+			} else if (cqe->user_data == 1 && cqe->res != -EAGAIN) {
+				fprintf(stderr, "read=%d\n", cqe->res);
+				goto err;
+			}
+			count++;
+		}
+
+		assert(count <= 2);
+		io_uring_cq_advance(&m_io_uring, count);
+	}
+
+	io_uring_queue_exit(&m_io_uring);
+	return 0;
+err:
+	io_uring_queue_exit(&m_io_uring);
+	return 1;
+}
diff --git a/test/socket-rw.c b/test/socket-rw.c
new file mode 100644
index 0000000..1b731b2
--- /dev/null
+++ b/test/socket-rw.c
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Check that a readv on a socket queued before a writev doesn't hang
+ * the processing.
+ *
+ * From Hrvoje Zeba <zeba.hrvoje@gmail.com>
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <assert.h>
+
+#include <errno.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <netinet/tcp.h>
+#include <netinet/in.h>
+
+#include "liburing.h"
+
+int main(int argc, char *argv[])
+{
+	int p_fd[2], ret;
+	int32_t recv_s0;
+	int32_t val = 1;
+	struct sockaddr_in addr;
+
+	if (argc > 1)
+		return 0;
+
+	srand(getpid());
+
+	recv_s0 = socket(AF_INET, SOCK_STREAM | SOCK_CLOEXEC, IPPROTO_TCP);
+
+	ret = setsockopt(recv_s0, SOL_SOCKET, SO_REUSEPORT, &val, sizeof(val));
+	assert(ret != -1);
+	ret = setsockopt(recv_s0, SOL_SOCKET, SO_REUSEADDR, &val, sizeof(val));
+	assert(ret != -1);
+
+	addr.sin_family = AF_INET;
+	addr.sin_addr.s_addr = 0x0100007fU;
+
+	do {
+		addr.sin_port = (rand() % 61440) + 4096;
+		ret = bind(recv_s0, (struct sockaddr*)&addr, sizeof(addr));
+		if (!ret)
+			break;
+		if (errno != EADDRINUSE) {
+			perror("bind");
+			exit(1);
+		}
+	} while (1);
+	ret = listen(recv_s0, 128);
+	assert(ret != -1);
+
+
+	p_fd[1] = socket(AF_INET, SOCK_STREAM | SOCK_CLOEXEC, IPPROTO_TCP);
+
+	val = 1;
+	ret = setsockopt(p_fd[1], IPPROTO_TCP, TCP_NODELAY, &val, sizeof(val));
+	assert(ret != -1);
+
+	int32_t flags = fcntl(p_fd[1], F_GETFL, 0);
+	assert(flags != -1);
+
+	flags |= O_NONBLOCK;
+	ret = fcntl(p_fd[1], F_SETFL, flags);
+	assert(ret != -1);
+
+	ret = connect(p_fd[1], (struct sockaddr*)&addr, sizeof(addr));
+	assert(ret == -1);
+
+	flags = fcntl(p_fd[1], F_GETFL, 0);
+	assert(flags != -1);
+
+	flags &= ~O_NONBLOCK;
+	ret = fcntl(p_fd[1], F_SETFL, flags);
+	assert(ret != -1);
+
+	p_fd[0] = accept(recv_s0, NULL, NULL);
+	assert(p_fd[0] != -1);
+
+	while (1) {
+		int32_t code;
+		socklen_t code_len = sizeof(code);
+
+		ret = getsockopt(p_fd[1], SOL_SOCKET, SO_ERROR, &code, &code_len);
+		assert(ret != -1);
+
+		if (!code)
+			break;
+	}
+
+	struct io_uring m_io_uring;
+
+	ret = io_uring_queue_init(32, &m_io_uring, 0);
+	assert(ret >= 0);
+
+	char recv_buff[128];
+	char send_buff[128];
+
+	{
+		struct iovec iov[1];
+
+		iov[0].iov_base = recv_buff;
+		iov[0].iov_len = sizeof(recv_buff);
+
+		struct io_uring_sqe* sqe = io_uring_get_sqe(&m_io_uring);
+		assert(sqe != NULL);
+
+		io_uring_prep_readv(sqe, p_fd[0], iov, 1, 0);
+	}
+
+	{
+		struct iovec iov[1];
+
+		iov[0].iov_base = send_buff;
+		iov[0].iov_len = sizeof(send_buff);
+
+		struct io_uring_sqe* sqe = io_uring_get_sqe(&m_io_uring);
+		assert(sqe != NULL);
+
+		io_uring_prep_writev(sqe, p_fd[1], iov, 1, 0);
+	}
+
+	ret = io_uring_submit_and_wait(&m_io_uring, 2);
+	assert(ret != -1);
+
+	struct io_uring_cqe* cqe;
+	uint32_t head;
+	uint32_t count = 0;
+
+	while (count != 2) {
+		io_uring_for_each_cqe(&m_io_uring, head, cqe) {
+			assert(cqe->res == 128);
+			count++;
+		}
+
+		assert(count <= 2);
+		io_uring_cq_advance(&m_io_uring, count);
+	}
+
+	io_uring_queue_exit(&m_io_uring);
+	return 0;
+}
diff --git a/test/splice.c b/test/splice.c
new file mode 100644
index 0000000..f4f0c9c
--- /dev/null
+++ b/test/splice.c
@@ -0,0 +1,511 @@
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+
+#include "helpers.h"
+#include "liburing.h"
+
+#define BUF_SIZE (16 * 4096)
+
+struct test_ctx {
+	int real_pipe1[2];
+	int real_pipe2[2];
+	int real_fd_in;
+	int real_fd_out;
+
+	/* fds or for registered files */
+	int pipe1[2];
+	int pipe2[2];
+	int fd_in;
+	int fd_out;
+
+	void *buf_in;
+	void *buf_out;
+};
+
+static unsigned int splice_flags = 0;
+static unsigned int sqe_flags = 0;
+static int has_splice = 0;
+static int has_tee = 0;
+
+static int read_buf(int fd, void *buf, int len)
+{
+	int ret;
+
+	while (len) {
+		ret = read(fd, buf, len);
+		if (ret < 0)
+			return ret;
+		len -= ret;
+		buf += ret;
+	}
+	return 0;
+}
+
+static int write_buf(int fd, const void *buf, int len)
+{
+	int ret;
+
+	while (len) {
+		ret = write(fd, buf, len);
+		if (ret < 0)
+			return ret;
+		len -= ret;
+		buf += ret;
+	}
+	return 0;
+}
+
+static int check_content(int fd, void *buf, int len, const void *src)
+{
+	int ret;
+
+	ret = read_buf(fd, buf, len);
+	if (ret)
+		return ret;
+
+	ret = memcmp(buf, src, len);
+	return (ret != 0) ? -1 : 0;
+}
+
+static int create_file(const char *filename)
+{
+	int fd, save_errno;
+
+	fd = open(filename, O_RDWR | O_CREAT, 0644);
+	save_errno = errno;
+	unlink(filename);
+	errno = save_errno;
+	return fd;
+}
+
+static int init_splice_ctx(struct test_ctx *ctx)
+{
+	int ret, rnd_fd;
+
+	ctx->buf_in = t_calloc(BUF_SIZE, 1);
+	ctx->buf_out = t_calloc(BUF_SIZE, 1);
+
+	ctx->fd_in = create_file(".splice-test-in");
+	if (ctx->fd_in < 0) {
+		perror("file open");
+		return 1;
+	}
+
+	ctx->fd_out = create_file(".splice-test-out");
+	if (ctx->fd_out < 0) {
+		perror("file open");
+		return 1;
+	}
+
+	/* get random data */
+	rnd_fd = open("/dev/urandom", O_RDONLY);
+	if (rnd_fd < 0)
+		return 1;
+
+	ret = read_buf(rnd_fd, ctx->buf_in, BUF_SIZE);
+	if (ret != 0)
+		return 1;
+	close(rnd_fd);
+
+	/* populate file */
+	ret = write_buf(ctx->fd_in, ctx->buf_in, BUF_SIZE);
+	if (ret)
+		return ret;
+
+	if (pipe(ctx->pipe1) < 0)
+		return 1;
+	if (pipe(ctx->pipe2) < 0)
+		return 1;
+
+	ctx->real_pipe1[0] = ctx->pipe1[0];
+	ctx->real_pipe1[1] = ctx->pipe1[1];
+	ctx->real_pipe2[0] = ctx->pipe2[0];
+	ctx->real_pipe2[1] = ctx->pipe2[1];
+	ctx->real_fd_in = ctx->fd_in;
+	ctx->real_fd_out = ctx->fd_out;
+	return 0;
+}
+
+static int do_splice_op(struct io_uring *ring,
+			int fd_in, loff_t off_in,
+			int fd_out, loff_t off_out,
+			unsigned int len,
+			__u8 opcode)
+{
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	int ret = -1;
+
+	do {
+		sqe = io_uring_get_sqe(ring);
+		if (!sqe) {
+			fprintf(stderr, "get sqe failed\n");
+			return -1;
+		}
+		io_uring_prep_splice(sqe, fd_in, off_in, fd_out, off_out,
+				     len, splice_flags);
+		sqe->flags |= sqe_flags;
+		sqe->user_data = 42;
+		sqe->opcode = opcode;
+
+		ret = io_uring_submit(ring);
+		if (ret != 1) {
+			fprintf(stderr, "sqe submit failed: %d\n", ret);
+			return ret;
+		}
+
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret < 0) {
+			fprintf(stderr, "wait completion %d\n", cqe->res);
+			return ret;
+		}
+
+		if (cqe->res <= 0) {
+			io_uring_cqe_seen(ring, cqe);
+			return cqe->res;
+		}
+
+		len -= cqe->res;
+		if (off_in != -1)
+			off_in += cqe->res;
+		if (off_out != -1)
+			off_out += cqe->res;
+		io_uring_cqe_seen(ring, cqe);
+	} while (len);
+
+	return 0;
+}
+
+static int do_splice(struct io_uring *ring,
+			int fd_in, loff_t off_in,
+			int fd_out, loff_t off_out,
+			unsigned int len)
+{
+	return do_splice_op(ring, fd_in, off_in, fd_out, off_out, len,
+			    IORING_OP_SPLICE);
+}
+
+static int do_tee(struct io_uring *ring, int fd_in, int fd_out, 
+		  unsigned int len)
+{
+	return do_splice_op(ring, fd_in, 0, fd_out, 0, len, IORING_OP_TEE);
+}
+
+static void check_splice_support(struct io_uring *ring, struct test_ctx *ctx)
+{
+	int ret;
+
+	ret = do_splice(ring, -1, 0, -1, 0, BUF_SIZE);
+	has_splice = (ret == -EBADF);
+}
+
+static void check_tee_support(struct io_uring *ring, struct test_ctx *ctx)
+{
+	int ret;
+
+	ret = do_tee(ring, -1, -1, BUF_SIZE);
+	has_tee = (ret == -EBADF);
+}
+
+static int check_zero_splice(struct io_uring *ring, struct test_ctx *ctx)
+{
+	int ret;
+
+	ret = do_splice(ring, ctx->fd_in, -1, ctx->pipe1[1], -1, 0);
+	if (ret)
+		return ret;
+
+	ret = do_splice(ring, ctx->pipe2[0], -1, ctx->pipe1[1], -1, 0);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int splice_to_pipe(struct io_uring *ring, struct test_ctx *ctx)
+{
+	int ret;
+
+	ret = lseek(ctx->real_fd_in, 0, SEEK_SET);
+	if (ret)
+		return ret;
+
+	/* implicit file offset */
+	ret = do_splice(ring, ctx->fd_in, -1, ctx->pipe1[1], -1, BUF_SIZE);
+	if (ret)
+		return ret;
+
+	ret = check_content(ctx->real_pipe1[0], ctx->buf_out, BUF_SIZE,
+			     ctx->buf_in);
+	if (ret)
+		return ret;
+
+	/* explicit file offset */
+	ret = do_splice(ring, ctx->fd_in, 0, ctx->pipe1[1], -1, BUF_SIZE);
+	if (ret)
+		return ret;
+
+	return check_content(ctx->real_pipe1[0], ctx->buf_out, BUF_SIZE,
+			     ctx->buf_in);
+}
+
+static int splice_from_pipe(struct io_uring *ring, struct test_ctx *ctx)
+{
+	int ret;
+
+	ret = write_buf(ctx->real_pipe1[1], ctx->buf_in, BUF_SIZE);
+	if (ret)
+		return ret;
+	ret = do_splice(ring, ctx->pipe1[0], -1, ctx->fd_out, 0, BUF_SIZE);
+	if (ret)
+		return ret;
+	ret = check_content(ctx->real_fd_out, ctx->buf_out, BUF_SIZE,
+			     ctx->buf_in);
+	if (ret)
+		return ret;
+
+	ret = ftruncate(ctx->real_fd_out, 0);
+	if (ret)
+		return ret;
+	return lseek(ctx->real_fd_out, 0, SEEK_SET);
+}
+
+static int splice_pipe_to_pipe(struct io_uring *ring, struct test_ctx *ctx)
+{
+	int ret;
+
+	ret = do_splice(ring, ctx->fd_in, 0, ctx->pipe1[1], -1, BUF_SIZE);
+	if (ret)
+		return ret;
+	ret = do_splice(ring, ctx->pipe1[0], -1, ctx->pipe2[1], -1, BUF_SIZE);
+	if (ret)
+		return ret;
+
+	return check_content(ctx->real_pipe2[0], ctx->buf_out, BUF_SIZE,
+				ctx->buf_in);
+}
+
+static int fail_splice_pipe_offset(struct io_uring *ring, struct test_ctx *ctx)
+{
+	int ret;
+
+	ret = do_splice(ring, ctx->fd_in, 0, ctx->pipe1[1], 0, BUF_SIZE);
+	if (ret != -ESPIPE && ret != -EINVAL)
+		return ret;
+
+	ret = do_splice(ring, ctx->pipe1[0], 0, ctx->fd_out, 0, BUF_SIZE);
+	if (ret != -ESPIPE && ret != -EINVAL)
+		return ret;
+
+	return 0;
+}
+
+static int fail_tee_nonpipe(struct io_uring *ring, struct test_ctx *ctx)
+{
+	int ret;
+
+	ret = do_tee(ring, ctx->fd_in, ctx->pipe1[1], BUF_SIZE);
+	if (ret != -ESPIPE && ret != -EINVAL)
+		return ret;
+
+	return 0;
+}
+
+static int fail_tee_offset(struct io_uring *ring, struct test_ctx *ctx)
+{
+	int ret;
+
+	ret = do_splice_op(ring, ctx->pipe2[0], -1, ctx->pipe1[1], 0,
+			   BUF_SIZE, IORING_OP_TEE);
+	if (ret != -ESPIPE && ret != -EINVAL)
+		return ret;
+
+	ret = do_splice_op(ring, ctx->pipe2[0], 0, ctx->pipe1[1], -1,
+			   BUF_SIZE, IORING_OP_TEE);
+	if (ret != -ESPIPE && ret != -EINVAL)
+		return ret;
+
+	return 0;
+}
+
+static int check_tee(struct io_uring *ring, struct test_ctx *ctx)
+{
+	int ret;
+
+	ret = write_buf(ctx->real_pipe1[1], ctx->buf_in, BUF_SIZE);
+	if (ret)
+		return ret;
+	ret = do_tee(ring, ctx->pipe1[0], ctx->pipe2[1], BUF_SIZE);
+	if (ret)
+		return ret;
+
+	ret = check_content(ctx->real_pipe1[0], ctx->buf_out, BUF_SIZE,
+				ctx->buf_in);
+	if (ret) {
+		fprintf(stderr, "tee(), invalid src data\n");
+		return ret;
+	}
+
+	ret = check_content(ctx->real_pipe2[0], ctx->buf_out, BUF_SIZE,
+				ctx->buf_in);
+	if (ret) {
+		fprintf(stderr, "tee(), invalid dst data\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int check_zero_tee(struct io_uring *ring, struct test_ctx *ctx)
+{
+	return do_tee(ring, ctx->pipe2[0], ctx->pipe1[1], 0);
+}
+
+static int test_splice(struct io_uring *ring, struct test_ctx *ctx)
+{
+	int ret;
+
+	if (has_splice) {
+		ret = check_zero_splice(ring, ctx);
+		if (ret) {
+			fprintf(stderr, "check_zero_splice failed %i %i\n",
+				ret, errno);
+			return ret;
+		}
+
+		ret = splice_to_pipe(ring, ctx);
+		if (ret) {
+			fprintf(stderr, "splice_to_pipe failed %i %i\n",
+				ret, errno);
+			return ret;
+		}
+
+		ret = splice_from_pipe(ring, ctx);
+		if (ret) {
+			fprintf(stderr, "splice_from_pipe failed %i %i\n",
+				ret, errno);
+			return ret;
+		}
+
+		ret = splice_pipe_to_pipe(ring, ctx);
+		if (ret) {
+			fprintf(stderr, "splice_pipe_to_pipe failed %i %i\n",
+				ret, errno);
+			return ret;
+		}
+
+		ret = fail_splice_pipe_offset(ring, ctx);
+		if (ret) {
+			fprintf(stderr, "fail_splice_pipe_offset failed %i %i\n",
+				ret, errno);
+			return ret;
+		}
+	}
+
+	if (has_tee) {
+		ret = check_zero_tee(ring, ctx);
+		if (ret) {
+			fprintf(stderr, "check_zero_tee() failed %i %i\n",
+				ret, errno);
+			return ret;
+		}
+
+		ret = fail_tee_nonpipe(ring, ctx);
+		if (ret) {
+			fprintf(stderr, "fail_tee_nonpipe() failed %i %i\n",
+				ret, errno);
+			return ret;
+		}
+
+		ret = fail_tee_offset(ring, ctx);
+		if (ret) {
+			fprintf(stderr, "fail_tee_offset failed %i %i\n",
+				ret, errno);
+			return ret;
+		}
+
+		ret = check_tee(ring, ctx);
+		if (ret) {
+			fprintf(stderr, "check_tee() failed %i %i\n",
+				ret, errno);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring;
+	struct io_uring_params p = { };
+	struct test_ctx ctx;
+	int ret;
+	int reg_fds[6];
+
+	if (argc > 1)
+		return 0;
+
+	ret = io_uring_queue_init_params(8, &ring, &p);
+	if (ret) {
+		fprintf(stderr, "ring setup failed\n");
+		return 1;
+	}
+	if (!(p.features & IORING_FEAT_FAST_POLL)) {
+		fprintf(stdout, "No splice support, skipping\n");
+		return 0;
+	}
+
+	ret = init_splice_ctx(&ctx);
+	if (ret) {
+		fprintf(stderr, "init failed %i %i\n", ret, errno);
+		return 1;
+	}
+
+	check_splice_support(&ring, &ctx);
+	if (!has_splice)
+		fprintf(stdout, "skip, doesn't support splice()\n");
+	check_tee_support(&ring, &ctx);
+	if (!has_tee)
+		fprintf(stdout, "skip, doesn't support tee()\n");
+
+	ret = test_splice(&ring, &ctx);
+	if (ret) {
+		fprintf(stderr, "basic splice tests failed\n");
+		return ret;
+	}
+
+	reg_fds[0] = ctx.real_pipe1[0];
+	reg_fds[1] = ctx.real_pipe1[1];
+	reg_fds[2] = ctx.real_pipe2[0];
+	reg_fds[3] = ctx.real_pipe2[1];
+	reg_fds[4] = ctx.real_fd_in;
+	reg_fds[5] = ctx.real_fd_out;
+	ret = io_uring_register_files(&ring, reg_fds, 6);
+	if (ret) {
+		fprintf(stderr, "%s: register ret=%d\n", __FUNCTION__, ret);
+		return 1;
+	}
+
+	/* remap fds to registered */
+	ctx.pipe1[0] = 0;
+	ctx.pipe1[1] = 1;
+	ctx.pipe2[0] = 2;
+	ctx.pipe2[1] = 3;
+	ctx.fd_in = 4;
+	ctx.fd_out = 5;
+
+	splice_flags = SPLICE_F_FD_IN_FIXED;
+	sqe_flags = IOSQE_FIXED_FILE;
+	ret = test_splice(&ring, &ctx);
+	if (ret) {
+		fprintf(stderr, "registered fds splice tests failed\n");
+		return ret;
+	}
+	return 0;
+}
diff --git a/test/sq-full-cpp.cc b/test/sq-full-cpp.cc
new file mode 100644
index 0000000..ba40099
--- /dev/null
+++ b/test/sq-full-cpp.cc
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: test SQ queue full condition
+ *
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+
+#include "liburing.h"
+
+int main(int argc, char *argv[])
+{
+	struct io_uring_sqe *sqe;
+	struct io_uring ring;
+	int ret, i;
+
+	if (argc > 1)
+		return 0;
+
+	ret = io_uring_queue_init(8, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "ring setup failed: %d\n", ret);
+		return 1;
+
+	}
+
+	i = 0;
+	while ((sqe = io_uring_get_sqe(&ring)) != NULL)
+		i++;
+
+	if (i != 8) {
+		fprintf(stderr, "Got %d SQEs, wanted 8\n", i);
+		goto err;
+	}
+
+	io_uring_queue_exit(&ring);
+	return 0;
+err:
+	io_uring_queue_exit(&ring);
+	return 1;
+}
diff --git a/test/sq-full.c b/test/sq-full.c
new file mode 100644
index 0000000..ba40099
--- /dev/null
+++ b/test/sq-full.c
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: test SQ queue full condition
+ *
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+
+#include "liburing.h"
+
+int main(int argc, char *argv[])
+{
+	struct io_uring_sqe *sqe;
+	struct io_uring ring;
+	int ret, i;
+
+	if (argc > 1)
+		return 0;
+
+	ret = io_uring_queue_init(8, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "ring setup failed: %d\n", ret);
+		return 1;
+
+	}
+
+	i = 0;
+	while ((sqe = io_uring_get_sqe(&ring)) != NULL)
+		i++;
+
+	if (i != 8) {
+		fprintf(stderr, "Got %d SQEs, wanted 8\n", i);
+		goto err;
+	}
+
+	io_uring_queue_exit(&ring);
+	return 0;
+err:
+	io_uring_queue_exit(&ring);
+	return 1;
+}
diff --git a/test/sq-poll-dup.c b/test/sq-poll-dup.c
new file mode 100644
index 0000000..eeb619c
--- /dev/null
+++ b/test/sq-poll-dup.c
@@ -0,0 +1,201 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: test SQPOLL with IORING_SETUP_ATTACH_WQ and closing of
+ * the original ring descriptor.
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <sys/poll.h>
+#include <sys/eventfd.h>
+#include <sys/resource.h>
+
+#include "helpers.h"
+#include "liburing.h"
+
+#define FILE_SIZE	(128 * 1024 * 1024)
+#define BS		4096
+#define BUFFERS		64
+
+#define NR_RINGS	4
+
+static struct iovec *vecs;
+static struct io_uring rings[NR_RINGS];
+
+static int wait_io(struct io_uring *ring, int nr_ios)
+{
+	struct io_uring_cqe *cqe;
+
+	while (nr_ios) {
+		io_uring_wait_cqe(ring, &cqe);
+		if (cqe->res != BS) {
+			fprintf(stderr, "Unexpected ret %d\n", cqe->res);
+			return 1;
+		}
+		io_uring_cqe_seen(ring, cqe);
+		nr_ios--;
+	}
+
+	return 0;
+}
+
+static int queue_io(struct io_uring *ring, int fd, int nr_ios)
+{
+	unsigned long off;
+	int i;
+
+	i = 0;
+	off = 0;
+	while (nr_ios) {
+		struct io_uring_sqe *sqe;
+
+		sqe = io_uring_get_sqe(ring);
+		if (!sqe)
+			break;
+		io_uring_prep_read(sqe, fd, vecs[i].iov_base, vecs[i].iov_len, off);
+		nr_ios--;
+		i++;
+		off += BS;
+	}
+
+	io_uring_submit(ring);
+	return i;
+}
+
+static int do_io(int fd, int ring_start, int ring_end)
+{
+	int i, rets[NR_RINGS];
+	unsigned ios = 0;
+
+	while (ios < 32) {
+		for (i = ring_start; i < ring_end; i++) {
+			int ret = queue_io(&rings[i], fd, BUFFERS);
+			if (ret < 0)
+				goto err;
+			rets[i] = ret;
+		}
+		for (i = ring_start; i < ring_end; i++) {
+			if (wait_io(&rings[i], rets[i]))
+				goto err;
+		}
+		ios += BUFFERS;
+	}
+
+	return 0;
+err:
+	return 1;
+}
+
+static int test(int fd, int do_dup_and_close, int close_ring)
+{
+	int i, ret, ring_fd;
+
+	for (i = 0; i < NR_RINGS; i++) {
+		struct io_uring_params p = { };
+
+		p.flags = IORING_SETUP_SQPOLL;
+		p.sq_thread_idle = 100;
+		if (i) {
+			p.wq_fd = rings[0].ring_fd;
+			p.flags |= IORING_SETUP_ATTACH_WQ;
+		}
+		ret = io_uring_queue_init_params(BUFFERS, &rings[i], &p);
+		if (ret) {
+			fprintf(stderr, "queue_init: %d/%d\n", ret, i);
+			goto err;
+		}
+		/* no sharing for non-fixed either */
+		if (!(p.features & IORING_FEAT_SQPOLL_NONFIXED)) {
+			fprintf(stdout, "No SQPOLL sharing, skipping\n");
+			return 0;
+		}
+	}
+
+	/* test all rings */
+	if (do_io(fd, 0, NR_RINGS))
+		goto err;
+
+	/* dup and close original ring fd */
+	ring_fd = dup(rings[0].ring_fd);
+	if (close_ring)
+		close(rings[0].ring_fd);
+	rings[0].ring_fd = ring_fd;
+	if (do_dup_and_close)
+		goto done;
+
+	/* test all but closed one */
+	if (do_io(fd, 1, NR_RINGS))
+		goto err;
+
+	/* test closed one */
+	if (do_io(fd, 0, 1))
+		goto err;
+
+	/* make sure thread is idle so we enter the kernel */
+	usleep(200000);
+
+	/* test closed one */
+	if (do_io(fd, 0, 1))
+		goto err;
+
+
+done:
+	for (i = 0; i < NR_RINGS; i++)
+		io_uring_queue_exit(&rings[i]);
+
+	return 0;
+err:
+	return 1;
+}
+
+int main(int argc, char *argv[])
+{
+	char *fname;
+	int ret, fd;
+
+	if (argc > 1) {
+		fname = argv[1];
+	} else {
+		fname = ".basic-rw";
+		t_create_file(fname, FILE_SIZE);
+	}
+
+	vecs = t_create_buffers(BUFFERS, BS);
+
+	fd = open(fname, O_RDONLY | O_DIRECT);
+	if (fd < 0) {
+		perror("open");
+		return -1;
+	}
+
+	ret = test(fd, 0, 0);
+	if (ret) {
+		fprintf(stderr, "test 0 0 failed\n");
+		goto err;
+	}
+
+	ret = test(fd, 0, 1);
+	if (ret) {
+		fprintf(stderr, "test 0 1 failed\n");
+		goto err;
+	}
+
+
+	ret = test(fd, 1, 0);
+	if (ret) {
+		fprintf(stderr, "test 1 0 failed\n");
+		goto err;
+	}
+
+	if (fname != argv[1])
+		unlink(fname);
+	return 0;
+err:
+	if (fname != argv[1])
+		unlink(fname);
+	return 1;
+}
diff --git a/test/sq-poll-kthread.c b/test/sq-poll-kthread.c
new file mode 100644
index 0000000..ed7d0bf
--- /dev/null
+++ b/test/sq-poll-kthread.c
@@ -0,0 +1,170 @@
+/*
+ * Description: test if io_uring SQ poll kthread is stopped when the userspace
+ *              process ended with or without closing the io_uring fd
+ *
+ */
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <pthread.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <string.h>
+#include <signal.h>
+#include <sys/poll.h>
+#include <sys/wait.h>
+#include <sys/epoll.h>
+
+#include "liburing.h"
+
+#define SQ_THREAD_IDLE  2000
+#define BUF_SIZE        128
+#define KTHREAD_NAME    "io_uring-sq"
+
+enum {
+	TEST_OK = 0,
+	TEST_SKIPPED = 1,
+	TEST_FAILED = 2,
+};
+
+static int do_test_sq_poll_kthread_stopped(bool do_exit)
+{
+	int ret = 0, pipe1[2];
+	struct io_uring_params param;
+	struct io_uring ring;
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	uint8_t buf[BUF_SIZE];
+	struct iovec iov;
+
+	if (geteuid()) {
+		fprintf(stderr, "sqpoll requires root!\n");
+		return TEST_SKIPPED;
+	}
+
+	if (pipe(pipe1) != 0) {
+		perror("pipe");
+		return TEST_FAILED;
+	}
+
+	memset(&param, 0, sizeof(param));
+
+	param.flags |= IORING_SETUP_SQPOLL;
+	param.sq_thread_idle = SQ_THREAD_IDLE;
+
+	ret = io_uring_queue_init_params(16, &ring, &param);
+	if (ret) {
+		fprintf(stderr, "ring setup failed\n");
+		ret = TEST_FAILED;
+		goto err_pipe;
+	}
+
+	ret = io_uring_register_files(&ring, &pipe1[1], 1);
+	if (ret) {
+		fprintf(stderr, "file reg failed: %d\n", ret);
+		ret = TEST_FAILED;
+		goto err_uring;
+	}
+
+	iov.iov_base = buf;
+	iov.iov_len = BUF_SIZE;
+
+	sqe = io_uring_get_sqe(&ring);
+	if (!sqe) {
+		fprintf(stderr, "io_uring_get_sqe failed\n");
+		ret = TEST_FAILED;
+		goto err_uring;
+	}
+
+	io_uring_prep_writev(sqe, 0, &iov, 1, 0);
+	sqe->flags |= IOSQE_FIXED_FILE;
+
+	ret = io_uring_submit(&ring);
+	if (ret < 0) {
+		fprintf(stderr, "io_uring_submit failed - ret: %d\n",
+			ret);
+		ret = TEST_FAILED;
+		goto err_uring;
+	}
+
+	ret = io_uring_wait_cqe(&ring, &cqe);
+	if (ret < 0) {
+		fprintf(stderr, "io_uring_wait_cqe - ret: %d\n",
+			ret);
+		ret = TEST_FAILED;
+		goto err_uring;
+	}
+
+	if (cqe->res != BUF_SIZE) {
+		fprintf(stderr, "unexpected cqe->res %d [expected %d]\n",
+			cqe->res, BUF_SIZE);
+		ret = TEST_FAILED;
+		goto err_uring;
+
+	}
+
+	io_uring_cqe_seen(&ring, cqe);
+
+	ret = TEST_OK;
+
+err_uring:
+	if (do_exit)
+		io_uring_queue_exit(&ring);
+err_pipe:
+	close(pipe1[0]);
+	close(pipe1[1]);
+
+	return ret;
+}
+
+int test_sq_poll_kthread_stopped(bool do_exit)
+{
+	pid_t pid;
+	int status = 0;
+
+	pid = fork();
+
+	if (pid == 0) {
+		int ret = do_test_sq_poll_kthread_stopped(do_exit);
+		exit(ret);
+	}
+
+	pid = wait(&status);
+	if (status != 0)
+		return WEXITSTATUS(status);
+
+	sleep(1);
+	if (system("ps --ppid 2 | grep " KTHREAD_NAME) == 0) {
+		fprintf(stderr, "%s kthread still running!\n", KTHREAD_NAME);
+		return TEST_FAILED;
+	}
+
+	return 0;
+}
+
+int main(int argc, char *argv[])
+{
+	int ret;
+
+	if (argc > 1)
+		return 0;
+
+	ret = test_sq_poll_kthread_stopped(true);
+	if (ret == TEST_SKIPPED) {
+		printf("test_sq_poll_kthread_stopped_exit: skipped\n");
+	} else if (ret == TEST_FAILED) {
+		fprintf(stderr, "test_sq_poll_kthread_stopped_exit failed\n");
+		return ret;
+	}
+
+	ret = test_sq_poll_kthread_stopped(false);
+	if (ret == TEST_SKIPPED) {
+		printf("test_sq_poll_kthread_stopped_noexit: skipped\n");
+	} else if (ret == TEST_FAILED) {
+		fprintf(stderr, "test_sq_poll_kthread_stopped_noexit failed\n");
+		return ret;
+	}
+
+	return 0;
+}
diff --git a/test/sq-poll-share.c b/test/sq-poll-share.c
new file mode 100644
index 0000000..a46b94f
--- /dev/null
+++ b/test/sq-poll-share.c
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: test SQPOLL with IORING_SETUP_ATTACH_WQ
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <sys/poll.h>
+#include <sys/eventfd.h>
+#include <sys/resource.h>
+
+#include "helpers.h"
+#include "liburing.h"
+
+#define FILE_SIZE	(128 * 1024 * 1024)
+#define BS		4096
+#define BUFFERS		64
+
+#define NR_RINGS	4
+
+static struct iovec *vecs;
+
+static int wait_io(struct io_uring *ring, int nr_ios)
+{
+	struct io_uring_cqe *cqe;
+
+	while (nr_ios) {
+		int ret = io_uring_wait_cqe(ring, &cqe);
+
+		if (ret == -EAGAIN) {
+			continue;
+		} else if (ret) {
+			fprintf(stderr, "io_uring_wait_cqe failed %i\n", ret);
+			return 1;
+		}
+		if (cqe->res != BS) {
+			fprintf(stderr, "Unexpected ret %d\n", cqe->res);
+			return 1;
+		}
+		io_uring_cqe_seen(ring, cqe);
+		nr_ios--;
+	}
+
+	return 0;
+}
+
+static int queue_io(struct io_uring *ring, int fd, int nr_ios)
+{
+	unsigned long off;
+	int i;
+
+	i = 0;
+	off = 0;
+	while (nr_ios) {
+		struct io_uring_sqe *sqe;
+
+		sqe = io_uring_get_sqe(ring);
+		if (!sqe)
+			break;
+		io_uring_prep_read(sqe, fd, vecs[i].iov_base, vecs[i].iov_len, off);
+		nr_ios--;
+		i++;
+		off += BS;
+	}
+
+	io_uring_submit(ring);
+	return i;
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring rings[NR_RINGS];
+	int rets[NR_RINGS];
+	unsigned long ios;
+	int i, ret, fd;
+	char *fname;
+
+	if (argc > 1) {
+		fname = argv[1];
+	} else {
+		fname = ".basic-rw";
+		t_create_file(fname, FILE_SIZE);
+	}
+
+	vecs = t_create_buffers(BUFFERS, BS);
+
+	fd = open(fname, O_RDONLY | O_DIRECT);
+	if (fd < 0) {
+		perror("open");
+		return -1;
+	}
+
+	for (i = 0; i < NR_RINGS; i++) {
+		struct io_uring_params p = { };
+
+		p.flags = IORING_SETUP_SQPOLL;
+		if (i) {
+			p.wq_fd = rings[0].ring_fd;
+			p.flags |= IORING_SETUP_ATTACH_WQ;
+		}
+		ret = io_uring_queue_init_params(BUFFERS, &rings[i], &p);
+		if (ret) {
+			fprintf(stderr, "queue_init: %d/%d\n", ret, i);
+			goto err;
+		}
+		/* no sharing for non-fixed either */
+		if (!(p.features & IORING_FEAT_SQPOLL_NONFIXED)) {
+			fprintf(stdout, "No SQPOLL sharing, skipping\n");
+			return 0;
+		}
+	}
+
+	ios = 0;
+	while (ios < (FILE_SIZE / BS)) {
+		for (i = 0; i < NR_RINGS; i++) {
+			ret = queue_io(&rings[i], fd, BUFFERS);
+			if (ret < 0)
+				goto err;
+			rets[i] = ret;
+		}
+		for (i = 0; i < NR_RINGS; i++) {
+			if (wait_io(&rings[i], rets[i]))
+				goto err;
+		}
+		ios += BUFFERS;
+	}
+
+	if (fname != argv[1])
+		unlink(fname);
+	return 0;
+err:
+	if (fname != argv[1])
+		unlink(fname);
+	return 1;
+}
diff --git a/test/sq-space_left.c b/test/sq-space_left.c
new file mode 100644
index 0000000..69f554c
--- /dev/null
+++ b/test/sq-space_left.c
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: test SQ queue space left
+ *
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+
+#include "liburing.h"
+
+static int test_left(void)
+{
+	struct io_uring_sqe *sqe;
+	struct io_uring ring;
+	int ret, i = 0, s;
+
+	ret = io_uring_queue_init(8, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "ring setup failed: %d\n", ret);
+		return 1;
+
+	}
+
+	if ((s = io_uring_sq_space_left(&ring)) != 8) {
+		fprintf(stderr, "Got %d SQEs left, expected %d\n", s, 8);
+		goto err;
+	}
+
+	i = 0;
+	while ((sqe = io_uring_get_sqe(&ring)) != NULL) {
+		i++;
+		if ((s = io_uring_sq_space_left(&ring)) != 8 - i) {
+			fprintf(stderr, "Got %d SQEs left, expected %d\n", s, 8 - i);
+			goto err;
+		}
+	}
+
+	if (i != 8) {
+		fprintf(stderr, "Got %d SQEs, expected %d\n", i, 8);
+		goto err;
+	}
+
+	io_uring_queue_exit(&ring);
+	return 0;
+err:
+	io_uring_queue_exit(&ring);
+	return 1;
+}
+
+static int test_sync(void)
+{
+	struct io_uring_sqe *sqe;
+	struct io_uring ring;
+	int ret, i;
+
+	ret = io_uring_queue_init(32, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "ring setup failed: %d\n", ret);
+		return 1;
+
+	}
+
+	/* prep 8 NOPS */
+	for (i = 0; i < 8; i++) {
+		sqe = io_uring_get_sqe(&ring);
+		if (!sqe) {
+			fprintf(stderr, "get sqe failed\n");
+			goto err;
+		}
+		io_uring_prep_nop(sqe);
+	}
+
+	/* prep known bad command, this should terminate submission */
+	sqe = io_uring_get_sqe(&ring);
+	if (!sqe) {
+		fprintf(stderr, "get sqe failed\n");
+		goto err;
+	}
+	io_uring_prep_nop(sqe);
+	sqe->opcode = 0xfe;
+
+	/* prep 8 NOPS */
+	for (i = 0; i < 8; i++) {
+		sqe = io_uring_get_sqe(&ring);
+		if (!sqe) {
+			fprintf(stderr, "get sqe failed\n");
+			goto err;
+		}
+		io_uring_prep_nop(sqe);
+	}
+
+	/* we should have 8 + 1 + 8 pending now */
+	ret = io_uring_sq_ready(&ring);
+	if (ret != 17) {
+		fprintf(stderr, "%d ready, wanted 17\n", ret);
+		goto err;
+	}
+
+	ret = io_uring_submit(&ring);
+
+	/* should submit 8 successfully, then error #9 and stop */
+	if (ret != 9) {
+		fprintf(stderr, "submitted %d, wanted 9\n", ret);
+		goto err;
+	}
+
+	/* should now have 8 ready, with 9 gone */
+	ret = io_uring_sq_ready(&ring);
+	if (ret != 8) {
+		fprintf(stderr, "%d ready, wanted 8\n", ret);
+		goto err;
+	}
+
+	ret = io_uring_submit(&ring);
+
+	/* the last 8 should submit fine */
+	if (ret != 8) {
+		fprintf(stderr, "submitted %d, wanted 8\n", ret);
+		goto err;
+	}
+
+	ret = io_uring_sq_ready(&ring);
+	if (ret) {
+		fprintf(stderr, "%d ready, wanted 0\n", ret);
+		goto err;
+	}
+
+	io_uring_queue_exit(&ring);
+	return 0;
+err:
+	io_uring_queue_exit(&ring);
+	return 1;
+}
+
+int main(int argc, char *argv[])
+{
+	int ret;
+
+	if (argc > 1)
+		return 0;
+
+	ret = test_left();
+	if (ret) {
+		fprintf(stderr, "test_left failed\n");
+		return ret;
+	}
+
+	ret = test_sync();
+	if (ret) {
+		fprintf(stderr, "test_sync failed\n");
+		return ret;
+	}
+
+	return 0;
+}
diff --git a/test/sqpoll-disable-exit.c b/test/sqpoll-disable-exit.c
new file mode 100644
index 0000000..93bcf42
--- /dev/null
+++ b/test/sqpoll-disable-exit.c
@@ -0,0 +1,195 @@
+// https://syzkaller.appspot.com/bug?id=99f4ea77bb9b9ef24cefb66469be319f4aa9f162
+// autogenerated by syzkaller (https://github.com/google/syzkaller)
+
+#include <dirent.h>
+#include <endian.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <stdarg.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/prctl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <time.h>
+#include <unistd.h>
+
+#include "liburing.h"
+#include "../src/syscall.h"
+
+static void sleep_ms(uint64_t ms)
+{
+  usleep(ms * 1000);
+}
+
+static uint64_t current_time_ms(void)
+{
+  struct timespec ts;
+  if (clock_gettime(CLOCK_MONOTONIC, &ts))
+    exit(1);
+  return (uint64_t)ts.tv_sec * 1000 + (uint64_t)ts.tv_nsec / 1000000;
+}
+
+static bool write_file(const char* file, const char* what, ...)
+{
+  char buf[1024];
+  va_list args;
+  va_start(args, what);
+  vsnprintf(buf, sizeof(buf), what, args);
+  va_end(args);
+  buf[sizeof(buf) - 1] = 0;
+  int len = strlen(buf);
+  int fd = open(file, O_WRONLY | O_CLOEXEC);
+  if (fd == -1)
+    return false;
+  if (write(fd, buf, len) != len) {
+    int err = errno;
+    close(fd);
+    errno = err;
+    return false;
+  }
+  close(fd);
+  return true;
+}
+
+#define SIZEOF_IO_URING_SQE 64
+#define SIZEOF_IO_URING_CQE 16
+#define SQ_HEAD_OFFSET 0
+#define SQ_TAIL_OFFSET 64
+#define SQ_RING_MASK_OFFSET 256
+#define SQ_RING_ENTRIES_OFFSET 264
+#define SQ_FLAGS_OFFSET 276
+#define SQ_DROPPED_OFFSET 272
+#define CQ_HEAD_OFFSET 128
+#define CQ_TAIL_OFFSET 192
+#define CQ_RING_MASK_OFFSET 260
+#define CQ_RING_ENTRIES_OFFSET 268
+#define CQ_RING_OVERFLOW_OFFSET 284
+#define CQ_FLAGS_OFFSET 280
+#define CQ_CQES_OFFSET 320
+
+static long syz_io_uring_setup(volatile long a0, volatile long a1,
+                               volatile long a2, volatile long a3,
+                               volatile long a4, volatile long a5)
+{
+  uint32_t entries = (uint32_t)a0;
+  struct io_uring_params* setup_params = (struct io_uring_params*)a1;
+  void* vma1 = (void*)a2;
+  void* vma2 = (void*)a3;
+  void** ring_ptr_out = (void**)a4;
+  void** sqes_ptr_out = (void**)a5;
+  uint32_t fd_io_uring = __sys_io_uring_setup(entries, setup_params);
+  uint32_t sq_ring_sz =
+      setup_params->sq_off.array + setup_params->sq_entries * sizeof(uint32_t);
+  uint32_t cq_ring_sz = setup_params->cq_off.cqes +
+                        setup_params->cq_entries * SIZEOF_IO_URING_CQE;
+  uint32_t ring_sz = sq_ring_sz > cq_ring_sz ? sq_ring_sz : cq_ring_sz;
+  *ring_ptr_out = mmap(vma1, ring_sz, PROT_READ | PROT_WRITE,
+                       MAP_SHARED | MAP_POPULATE | MAP_FIXED, fd_io_uring,
+                       IORING_OFF_SQ_RING);
+  uint32_t sqes_sz = setup_params->sq_entries * SIZEOF_IO_URING_SQE;
+  *sqes_ptr_out =
+      mmap(vma2, sqes_sz, PROT_READ | PROT_WRITE,
+           MAP_SHARED | MAP_POPULATE | MAP_FIXED, fd_io_uring, IORING_OFF_SQES);
+  return fd_io_uring;
+}
+
+static void kill_and_wait(int pid, int* status)
+{
+  kill(-pid, SIGKILL);
+  kill(pid, SIGKILL);
+  for (int i = 0; i < 100; i++) {
+    if (waitpid(-1, status, WNOHANG | __WALL) == pid)
+      return;
+    usleep(1000);
+  }
+  DIR* dir = opendir("/sys/fs/fuse/connections");
+  if (dir) {
+    for (;;) {
+      struct dirent* ent = readdir(dir);
+      if (!ent)
+        break;
+      if (strcmp(ent->d_name, ".") == 0 || strcmp(ent->d_name, "..") == 0)
+        continue;
+      char abort[300];
+      snprintf(abort, sizeof(abort), "/sys/fs/fuse/connections/%s/abort",
+               ent->d_name);
+      int fd = open(abort, O_WRONLY);
+      if (fd == -1) {
+        continue;
+      }
+      if (write(fd, abort, 1) < 0) {
+      }
+      close(fd);
+    }
+    closedir(dir);
+  } else {
+  }
+  while (waitpid(-1, status, __WALL) != pid) {
+  }
+}
+
+static void setup_test()
+{
+  prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0);
+  setpgrp();
+  write_file("/proc/self/oom_score_adj", "1000");
+}
+
+static void execute_one(void);
+
+#define WAIT_FLAGS __WALL
+
+static void loop(void)
+{
+  int iter = 0;
+  for (; iter < 100; iter++) {
+    int pid = fork();
+    if (pid < 0)
+      exit(1);
+    if (pid == 0) {
+      setup_test();
+      execute_one();
+      exit(0);
+    }
+    int status = 0;
+    uint64_t start = current_time_ms();
+    for (;;) {
+      if (waitpid(-1, &status, WNOHANG | WAIT_FLAGS) == pid)
+        break;
+      sleep_ms(1);
+      if (current_time_ms() - start < 5000) {
+        continue;
+      }
+      kill_and_wait(pid, &status);
+      break;
+    }
+  }
+}
+
+void execute_one(void)
+{
+  *(uint32_t*)0x20000044 = 0;
+  *(uint32_t*)0x20000048 = 0x42;
+  *(uint32_t*)0x2000004c = 0;
+  *(uint32_t*)0x20000050 = 0;
+  *(uint32_t*)0x20000058 = -1;
+  *(uint32_t*)0x2000005c = 0;
+  *(uint32_t*)0x20000060 = 0;
+  *(uint32_t*)0x20000064 = 0;
+  syz_io_uring_setup(0x74bc, 0x20000040, 0x20ffb000, 0x20ffc000, 0, 0);
+}
+int main(void)
+{
+  mmap((void *)0x1ffff000ul, 0x1000ul, 0ul, 0x32ul, -1, 0ul);
+  mmap((void *)0x20000000ul, 0x1000000ul, 7ul, 0x32ul, -1, 0ul);
+  mmap((void *)0x21000000ul, 0x1000ul, 0ul, 0x32ul, -1, 0ul);
+  loop();
+  return 0;
+}
diff --git a/test/sqpoll-exit-hang.c b/test/sqpoll-exit-hang.c
new file mode 100644
index 0000000..43385ce
--- /dev/null
+++ b/test/sqpoll-exit-hang.c
@@ -0,0 +1,77 @@
+/*
+ * Test that we exit properly with SQPOLL and having a request that
+ * adds a circular reference to the ring itself.
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/time.h>
+#include <sys/poll.h>
+#include "liburing.h"
+
+static unsigned long long mtime_since(const struct timeval *s,
+				      const struct timeval *e)
+{
+	long long sec, usec;
+
+	sec = e->tv_sec - s->tv_sec;
+	usec = (e->tv_usec - s->tv_usec);
+	if (sec > 0 && usec < 0) {
+		sec--;
+		usec += 1000000;
+	}
+
+	sec *= 1000;
+	usec /= 1000;
+	return sec + usec;
+}
+
+static unsigned long long mtime_since_now(struct timeval *tv)
+{
+	struct timeval end;
+
+	gettimeofday(&end, NULL);
+	return mtime_since(tv, &end);
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring_params p = {};
+	struct timeval tv;
+	struct io_uring ring;
+	struct io_uring_sqe *sqe;
+	int ret;
+
+	if (argc > 1)
+		return 0;
+
+	p.flags = IORING_SETUP_SQPOLL;
+	p.sq_thread_idle = 100;
+
+	ret = io_uring_queue_init_params(1, &ring, &p);
+	if (ret) {
+		if (geteuid()) {
+			printf("%s: skipped, not root\n", argv[0]);
+			return 0;
+		}
+		fprintf(stderr, "queue_init=%d\n", ret);
+		return 1;
+	}
+
+	if (!(p.features & IORING_FEAT_SQPOLL_NONFIXED)) {
+		fprintf(stdout, "Skipping\n");
+		return 0;
+	}
+
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_poll_add(sqe, ring.ring_fd, POLLIN);
+	io_uring_submit(&ring);
+
+	gettimeofday(&tv, NULL);
+	do {
+		usleep(1000);
+	} while (mtime_since_now(&tv) < 1000);
+
+	return 0;
+}
diff --git a/test/sqpoll-sleep.c b/test/sqpoll-sleep.c
new file mode 100644
index 0000000..7ffd0e5
--- /dev/null
+++ b/test/sqpoll-sleep.c
@@ -0,0 +1,68 @@
+/*
+ * Test that the sqthread goes to sleep around the specified time, and that
+ * the NEED_WAKEUP flag is then set.
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/time.h>
+#include "liburing.h"
+
+static unsigned long long mtime_since(const struct timeval *s,
+				      const struct timeval *e)
+{
+	long long sec, usec;
+
+	sec = e->tv_sec - s->tv_sec;
+	usec = (e->tv_usec - s->tv_usec);
+	if (sec > 0 && usec < 0) {
+		sec--;
+		usec += 1000000;
+	}
+
+	sec *= 1000;
+	usec /= 1000;
+	return sec + usec;
+}
+
+static unsigned long long mtime_since_now(struct timeval *tv)
+{
+	struct timeval end;
+
+	gettimeofday(&end, NULL);
+	return mtime_since(tv, &end);
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring_params p = {};
+	struct timeval tv;
+	struct io_uring ring;
+	int ret;
+
+	if (argc > 1)
+		return 0;
+
+	p.flags = IORING_SETUP_SQPOLL;
+	p.sq_thread_idle = 100;
+
+	ret = io_uring_queue_init_params(1, &ring, &p);
+	if (ret) {
+		if (geteuid()) {
+			printf("%s: skipped, not root\n", argv[0]);
+			return 0;
+		}
+		fprintf(stderr, "queue_init=%d\n", ret);
+		return 1;
+	}
+
+	gettimeofday(&tv, NULL);
+	do {
+		usleep(1000);
+		if ((*ring.sq.kflags) & IORING_SQ_NEED_WAKEUP)
+			return 0;
+	} while (mtime_since_now(&tv) < 1000);
+
+	return 1;
+}
diff --git a/test/statx.c b/test/statx.c
new file mode 100644
index 0000000..c0f9e9c
--- /dev/null
+++ b/test/statx.c
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: run various statx(2) tests
+ *
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <sys/syscall.h>
+#include <linux/stat.h>
+
+#include "helpers.h"
+#include "liburing.h"
+
+#ifdef __NR_statx
+static int do_statx(int dfd, const char *path, int flags, unsigned mask,
+		    struct statx *statxbuf)
+{
+	return syscall(__NR_statx, dfd, path, flags, mask, statxbuf);
+}
+#else
+static int do_statx(int dfd, const char *path, int flags, unsigned mask,
+		    struct statx *statxbuf)
+{
+	errno = ENOSYS;
+	return -1;
+}
+#endif
+
+static int statx_syscall_supported(void)
+{
+	return errno == ENOSYS ? 0 : -1;
+}
+
+static int test_statx(struct io_uring *ring, const char *path)
+{
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	struct statx x1, x2;
+	int ret;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "get sqe failed\n");
+		goto err;
+	}
+	io_uring_prep_statx(sqe, -1, path, 0, STATX_ALL, &x1);
+
+	ret = io_uring_submit(ring);
+	if (ret <= 0) {
+		fprintf(stderr, "sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	ret = io_uring_wait_cqe(ring, &cqe);
+	if (ret < 0) {
+		fprintf(stderr, "wait completion %d\n", ret);
+		goto err;
+	}
+	ret = cqe->res;
+	io_uring_cqe_seen(ring, cqe);
+	if (ret)
+		return ret;
+	ret = do_statx(-1, path, 0, STATX_ALL, &x2);
+	if (ret < 0)
+		return statx_syscall_supported();
+	if (memcmp(&x1, &x2, sizeof(x1))) {
+		fprintf(stderr, "Miscompare between io_uring and statx\n");
+		goto err;
+	}
+	return 0;
+err:
+	return -1;
+}
+
+static int test_statx_fd(struct io_uring *ring, const char *path)
+{
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	struct statx x1, x2;
+	int ret, fd;
+
+	fd = open(path, O_RDONLY);
+	if (fd < 0) {
+		perror("open");
+		return 1;
+	}
+
+	memset(&x1, 0, sizeof(x1));
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "get sqe failed\n");
+		goto err;
+	}
+	io_uring_prep_statx(sqe, fd, "", AT_EMPTY_PATH, STATX_ALL, &x1);
+
+	ret = io_uring_submit(ring);
+	if (ret <= 0) {
+		fprintf(stderr, "sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	ret = io_uring_wait_cqe(ring, &cqe);
+	if (ret < 0) {
+		fprintf(stderr, "wait completion %d\n", ret);
+		goto err;
+	}
+	ret = cqe->res;
+	io_uring_cqe_seen(ring, cqe);
+	if (ret)
+		return ret;
+	memset(&x2, 0, sizeof(x2));
+	ret = do_statx(fd, "", AT_EMPTY_PATH, STATX_ALL, &x2);
+	if (ret < 0)
+		return statx_syscall_supported();
+	if (memcmp(&x1, &x2, sizeof(x1))) {
+		fprintf(stderr, "Miscompare between io_uring and statx\n");
+		goto err;
+	}
+	return 0;
+err:
+	return -1;
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring;
+	const char *fname;
+	int ret;
+
+	ret = io_uring_queue_init(8, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "ring setup failed\n");
+		return 1;
+	}
+
+	if (argc > 1) {
+		fname = argv[1];
+	} else {
+		fname = "/tmp/.statx";
+		t_create_file(fname, 4096);
+	}
+
+	ret = test_statx(&ring, fname);
+	if (ret) {
+		if (ret == -EINVAL) {
+			fprintf(stdout, "statx not supported, skipping\n");
+			goto done;
+		}
+		fprintf(stderr, "test_statx failed: %d\n", ret);
+		goto err;
+	}
+
+	ret = test_statx_fd(&ring, fname);
+	if (ret) {
+		fprintf(stderr, "test_statx_fd failed: %d\n", ret);
+		goto err;
+	}
+done:
+	if (fname != argv[1])
+		unlink(fname);
+	return 0;
+err:
+	if (fname != argv[1])
+		unlink(fname);
+	return 1;
+}
diff --git a/test/stdout.c b/test/stdout.c
new file mode 100644
index 0000000..ade100a
--- /dev/null
+++ b/test/stdout.c
@@ -0,0 +1,232 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: check that STDOUT write works
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+
+#include "helpers.h"
+#include "liburing.h"
+
+static int test_pipe_io_fixed(struct io_uring *ring)
+{
+	const char str[] = "This is a fixed pipe test\n";
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	struct iovec vecs[2];
+	char buffer[128];
+	int i, ret, fds[2];
+
+	t_posix_memalign(&vecs[0].iov_base, 4096, 4096);
+	memcpy(vecs[0].iov_base, str, strlen(str));
+	vecs[0].iov_len = strlen(str);
+
+	if (pipe(fds) < 0) {
+		perror("pipe");
+		return 1;
+	}
+
+	ret = io_uring_register_buffers(ring, vecs, 1);
+	if (ret) {
+		fprintf(stderr, "Failed to register buffers: %d\n", ret);
+		return 1;
+	}
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "get sqe failed\n");
+		goto err;
+	}
+	io_uring_prep_write_fixed(sqe, fds[1], vecs[0].iov_base,
+					vecs[0].iov_len, 0, 0);
+	sqe->user_data = 1;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "get sqe failed\n");
+		goto err;
+	}
+	vecs[1].iov_base = buffer;
+	vecs[1].iov_len = sizeof(buffer);
+	io_uring_prep_readv(sqe, fds[0], &vecs[1], 1, 0);
+	sqe->user_data = 2;
+
+	ret = io_uring_submit(ring);
+	if (ret < 0) {
+		fprintf(stderr, "sqe submit failed: %d\n", ret);
+		goto err;
+	} else if (ret != 2) {
+		fprintf(stderr, "Submitted only %d\n", ret);
+		goto err;
+	}
+
+	for (i = 0; i < 2; i++) {
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret < 0) {
+			fprintf(stderr, "wait completion %d\n", ret);
+			goto err;
+		}
+		if (cqe->res < 0) {
+			fprintf(stderr, "I/O write error on %lu: %s\n",
+					(unsigned long) cqe->user_data,
+					 strerror(-cqe->res));
+			goto err;
+		}
+		if (cqe->res != strlen(str)) {
+			fprintf(stderr, "Got %d bytes, wanted %d on %lu\n",
+					cqe->res, (int)strlen(str),
+					(unsigned long) cqe->user_data);
+			goto err;
+		}
+		if (cqe->user_data == 2 && memcmp(str, buffer, strlen(str))) {
+			fprintf(stderr, "read data mismatch\n");
+			goto err;
+		}
+		io_uring_cqe_seen(ring, cqe);
+	}
+	io_uring_unregister_buffers(ring);
+	return 0;
+err:
+	return 1;
+}
+
+static int test_stdout_io_fixed(struct io_uring *ring)
+{
+	const char str[] = "This is a fixed pipe test\n";
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	struct iovec vecs;
+	int ret;
+
+	t_posix_memalign(&vecs.iov_base, 4096, 4096);
+	memcpy(vecs.iov_base, str, strlen(str));
+	vecs.iov_len = strlen(str);
+
+	ret = io_uring_register_buffers(ring, &vecs, 1);
+	if (ret) {
+		fprintf(stderr, "Failed to register buffers: %d\n", ret);
+		return 1;
+	}
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "get sqe failed\n");
+		goto err;
+	}
+	io_uring_prep_write_fixed(sqe, STDOUT_FILENO, vecs.iov_base, vecs.iov_len, 0, 0);
+
+	ret = io_uring_submit(ring);
+	if (ret < 0) {
+		fprintf(stderr, "sqe submit failed: %d\n", ret);
+		goto err;
+	} else if (ret < 1) {
+		fprintf(stderr, "Submitted only %d\n", ret);
+		goto err;
+	}
+
+	ret = io_uring_wait_cqe(ring, &cqe);
+	if (ret < 0) {
+		fprintf(stderr, "wait completion %d\n", ret);
+		goto err;
+	}
+	if (cqe->res < 0) {
+		fprintf(stderr, "STDOUT write error: %s\n", strerror(-cqe->res));
+		goto err;
+	}
+	if (cqe->res != vecs.iov_len) {
+		fprintf(stderr, "Got %d write, wanted %d\n", cqe->res, (int)vecs.iov_len);
+		goto err;
+	}
+	io_uring_cqe_seen(ring, cqe);
+	io_uring_unregister_buffers(ring);
+	return 0;
+err:
+	return 1;
+}
+
+static int test_stdout_io(struct io_uring *ring)
+{
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	struct iovec vecs;
+	int ret;
+
+	vecs.iov_base = "This is a pipe test\n";
+	vecs.iov_len = strlen(vecs.iov_base);
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "get sqe failed\n");
+		goto err;
+	}
+	io_uring_prep_writev(sqe, STDOUT_FILENO, &vecs, 1, 0);
+
+	ret = io_uring_submit(ring);
+	if (ret < 0) {
+		fprintf(stderr, "sqe submit failed: %d\n", ret);
+		goto err;
+	} else if (ret < 1) {
+		fprintf(stderr, "Submitted only %d\n", ret);
+		goto err;
+	}
+
+	ret = io_uring_wait_cqe(ring, &cqe);
+	if (ret < 0) {
+		fprintf(stderr, "wait completion %d\n", ret);
+		goto err;
+	}
+	if (cqe->res < 0) {
+		fprintf(stderr, "STDOUT write error: %s\n",
+				strerror(-cqe->res));
+		goto err;
+	}
+	if (cqe->res != vecs.iov_len) {
+		fprintf(stderr, "Got %d write, wanted %d\n", cqe->res,
+				(int)vecs.iov_len);
+		goto err;
+	}
+	io_uring_cqe_seen(ring, cqe);
+
+	return 0;
+err:
+	return 1;
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring;
+	int ret;
+
+	if (argc > 1)
+		return 0;
+
+	ret = io_uring_queue_init(8, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "ring setup failed\n");
+		return 1;
+	}
+
+	ret = test_stdout_io(&ring);
+	if (ret) {
+		fprintf(stderr, "test_pipe_io failed\n");
+		return ret;
+	}
+
+	ret = test_stdout_io_fixed(&ring);
+	if (ret) {
+		fprintf(stderr, "test_pipe_io_fixed failed\n");
+		return ret;
+	}
+
+	ret = test_pipe_io_fixed(&ring);
+	if (ret) {
+		fprintf(stderr, "test_pipe_io_fixed failed\n");
+		return ret;
+	}
+
+	return 0;
+}
diff --git a/test/submit-reuse.c b/test/submit-reuse.c
new file mode 100644
index 0000000..74ba769
--- /dev/null
+++ b/test/submit-reuse.c
@@ -0,0 +1,242 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Test reads that will punt to blocking context, with immediate overwrite
+ * of iovec->iov_base to NULL. If the kernel doesn't properly handle
+ * reuse of the iovec, we should get -EFAULT.
+ */
+#include <unistd.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <pthread.h>
+#include <sys/time.h>
+
+#include "helpers.h"
+#include "liburing.h"
+
+#define STR_SIZE	32768
+#define FILE_SIZE	65536
+
+struct thread_data {
+	int fd1, fd2;
+	volatile int do_exit;
+};
+
+static void *flusher(void *__data)
+{
+	struct thread_data *data = __data;
+	int i = 0;
+
+	while (!data->do_exit) {
+		posix_fadvise(data->fd1, 0, FILE_SIZE, POSIX_FADV_DONTNEED);
+		posix_fadvise(data->fd2, 0, FILE_SIZE, POSIX_FADV_DONTNEED);
+		usleep(10);
+		i++;
+	}
+
+	return NULL;
+}
+
+static char str1[STR_SIZE];
+static char str2[STR_SIZE];
+
+static struct io_uring ring;
+
+static int no_stable;
+
+static int prep(int fd, char *str, int split, int async)
+{
+	struct io_uring_sqe *sqe;
+	struct iovec iovs[16];
+	int ret, i;
+
+	if (split) {
+		int vsize = STR_SIZE / 16;
+		void *ptr = str;
+
+		for (i = 0; i < 16; i++) {
+			iovs[i].iov_base = ptr;
+			iovs[i].iov_len = vsize;
+			ptr += vsize;
+		}
+	} else {
+		iovs[0].iov_base = str;
+		iovs[0].iov_len = STR_SIZE;
+	}
+
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_readv(sqe, fd, iovs, split ? 16 : 1, 0);
+	sqe->user_data = fd;
+	if (async)
+		sqe->flags = IOSQE_ASYNC;
+	ret = io_uring_submit(&ring);
+	if (ret != 1) {
+		fprintf(stderr, "submit got %d\n", ret);
+		return 1;
+	}
+	if (split) {
+		for (i = 0; i < 16; i++)
+			iovs[i].iov_base = NULL;
+	} else {
+		iovs[0].iov_base = NULL;
+	}
+	return 0;
+}
+
+static int wait_nr(int nr)
+{
+	int i, ret;
+
+	for (i = 0; i < nr; i++) {
+		struct io_uring_cqe *cqe;
+
+		ret = io_uring_wait_cqe(&ring, &cqe);
+		if (ret)
+			return ret;
+		if (cqe->res < 0) {
+			fprintf(stderr, "cqe->res=%d\n", cqe->res);
+			return 1;
+		}
+		io_uring_cqe_seen(&ring, cqe);
+	}
+
+	return 0;
+}
+
+static unsigned long long mtime_since(const struct timeval *s,
+				      const struct timeval *e)
+{
+	long long sec, usec;
+
+	sec = e->tv_sec - s->tv_sec;
+	usec = (e->tv_usec - s->tv_usec);
+	if (sec > 0 && usec < 0) {
+		sec--;
+		usec += 1000000;
+	}
+
+	sec *= 1000;
+	usec /= 1000;
+	return sec + usec;
+}
+
+static unsigned long long mtime_since_now(struct timeval *tv)
+{
+	struct timeval end;
+
+	gettimeofday(&end, NULL);
+	return mtime_since(tv, &end);
+}
+
+static int test_reuse(int argc, char *argv[], int split, int async)
+{
+	struct thread_data data;
+	struct io_uring_params p = { };
+	int fd1, fd2, ret, i;
+	struct timeval tv;
+	pthread_t thread;
+	char *fname1 = ".reuse.1";
+	int do_unlink = 1;
+	void *tret;
+
+	if (argc > 1) {
+		fname1 = argv[1];
+		do_unlink = 0;
+	}
+
+	ret = io_uring_queue_init_params(32, &ring, &p);
+	if (ret) {
+		fprintf(stderr, "io_uring_queue_init: %d\n", ret);
+		return 1;
+	}
+
+	if (!(p.features & IORING_FEAT_SUBMIT_STABLE)) {
+		fprintf(stdout, "FEAT_SUBMIT_STABLE not there, skipping\n");
+		no_stable = 1;
+		return 0;
+	}
+
+	if (do_unlink)
+		t_create_file(fname1, FILE_SIZE);
+
+	t_create_file(".reuse.2", FILE_SIZE);
+
+	fd1 = open(fname1, O_RDONLY);
+	if (fd1 < 0) {
+		perror("open fname1");
+		goto err;
+	}
+	fd2 = open(".reuse.2", O_RDONLY);
+	if (fd2 < 0) {
+		perror("open .reuse.2");
+		goto err;
+	}
+
+	data.fd1 = fd1;
+	data.fd2 = fd2;
+	data.do_exit = 0;
+	pthread_create(&thread, NULL, flusher, &data);
+	usleep(10000);
+
+	gettimeofday(&tv, NULL);
+	for (i = 0; i < 1000; i++) {
+		ret = prep(fd1, str1, split, async);
+		if (ret) {
+			fprintf(stderr, "prep1 failed: %d\n", ret);
+			goto err;
+		}
+		ret = prep(fd2, str2, split, async);
+		if (ret) {
+			fprintf(stderr, "prep1 failed: %d\n", ret);
+			goto err;
+		}
+		ret = wait_nr(2);
+		if (ret) {
+			fprintf(stderr, "wait_nr: %d\n", ret);
+			goto err;
+		}
+		if (mtime_since_now(&tv) > 5000)
+			break;
+	}
+
+	data.do_exit = 1;
+	pthread_join(thread, &tret);
+
+	close(fd2);
+	close(fd1);
+	io_uring_queue_exit(&ring);
+	if (do_unlink)
+		unlink(fname1);
+	unlink(".reuse.2");
+	return 0;
+err:
+	io_uring_queue_exit(&ring);
+	if (do_unlink)
+		unlink(fname1);
+	unlink(".reuse.2");
+	return 1;
+
+}
+
+int main(int argc, char *argv[])
+{
+	int ret, i;
+
+	for (i = 0; i < 4; i++) {
+		int split, async;
+
+		split = (i & 1) != 0;
+		async = (i & 2) != 0;
+
+		ret = test_reuse(argc, argv, split, async);
+		if (ret) {
+			fprintf(stderr, "test_reuse %d %d failed\n", split, async);
+			return ret;
+		}
+		if (no_stable)
+			break;
+	}
+
+	return 0;
+}
diff --git a/test/symlink.c b/test/symlink.c
new file mode 100644
index 0000000..8b5e04a
--- /dev/null
+++ b/test/symlink.c
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: test io_uring symlinkat handling
+ */
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "liburing.h"
+
+
+static int do_symlinkat(struct io_uring *ring, const char *oldname, const char *newname)
+{
+	int ret;
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "sqe get failed\n");
+		goto err;
+	}
+	io_uring_prep_symlinkat(sqe, oldname, AT_FDCWD, newname);
+
+	ret = io_uring_submit(ring);
+	if (ret != 1) {
+		fprintf(stderr, "submit failed: %d\n", ret);
+		goto err;
+	}
+
+	ret = io_uring_wait_cqes(ring, &cqe, 1, 0, 0);
+	if (ret) {
+		fprintf(stderr, "wait_cqe failed: %d\n", ret);
+		goto err;
+	}
+	ret = cqe->res;
+	io_uring_cqe_seen(ring, cqe);
+	return ret;
+err:
+	return 1;
+}
+
+int test_link_contents(const char* linkname, const char *expected_contents)
+{
+	char buf[128];
+	int ret = readlink(linkname, buf, 127);
+	if (ret < 0) {
+		perror("readlink");
+		return ret;
+	}
+	buf[ret] = 0;
+	if (strncmp(buf, expected_contents, 128)) {
+		fprintf(stderr, "link contents differs from expected: '%s' vs '%s'",
+			buf, expected_contents);
+		return -1;
+	}
+	return 0;
+}
+
+int main(int argc, char *argv[])
+{
+	static const char target[] = "io_uring-symlinkat-test-target";
+	static const char linkname[] = "io_uring-symlinkat-test-link";
+	int ret;
+	struct io_uring ring;
+
+	ret = io_uring_queue_init(8, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "queue init failed: %d\n", ret);
+		return ret;
+	}
+
+	ret = do_symlinkat(&ring, target, linkname);
+	if (ret < 0) {
+		if (ret == -EBADF || ret == -EINVAL) {
+			fprintf(stdout, "symlinkat not supported, skipping\n");
+			goto out;
+		}
+		fprintf(stderr, "symlinkat: %s\n", strerror(-ret));
+		goto err;
+	} else if (ret) {
+		goto err;
+	}
+
+	ret = test_link_contents(linkname, target);
+	if (ret < 0)
+		goto err1;
+
+	ret = do_symlinkat(&ring, target, linkname);
+	if (ret != -EEXIST) {
+		fprintf(stderr, "test_symlinkat linkname already exists failed: %d\n", ret);
+		goto err1;
+	}
+
+	ret = do_symlinkat(&ring, target, "surely/this/does/not/exist");
+	if (ret != -ENOENT) {
+		fprintf(stderr, "test_symlinkat no parent failed: %d\n", ret);
+		goto err1;
+	}
+
+out:
+	unlinkat(AT_FDCWD, linkname, 0);
+	io_uring_queue_exit(&ring);
+	return 0;
+err1:
+	unlinkat(AT_FDCWD, linkname, 0);
+err:
+	io_uring_queue_exit(&ring);
+	return 1;
+}
diff --git a/test/teardowns.c b/test/teardowns.c
new file mode 100644
index 0000000..8bd3022
--- /dev/null
+++ b/test/teardowns.c
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: MIT */
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <errno.h>
+
+#include "liburing.h"
+
+static void loop(void)
+{
+	int i, ret = 0;
+
+	for (i = 0; i < 100; i++) {
+		struct io_uring ring;
+		int fd;
+
+		memset(&ring, 0, sizeof(ring));
+		fd = io_uring_queue_init(0xa4, &ring, 0);
+		if (fd >= 0) {
+			close(fd);
+			continue;
+		}
+		if (fd != -ENOMEM)
+			ret++;
+	}
+	exit(ret);
+}
+
+int main(int argc, char *argv[])
+{
+	int i, ret, status;
+
+	if (argc > 1)
+		return 0;
+
+	for (i = 0; i < 12; i++) {
+		if (!fork()) {
+			loop();
+			break;
+		}
+	}
+
+	ret = 0;
+	for (i = 0; i < 12; i++) {
+		if (waitpid(-1, &status, 0) < 0) {
+			perror("waitpid");
+			return 1;
+		}
+		if (WEXITSTATUS(status))
+			ret++;
+	}
+
+	return ret;
+}
diff --git a/test/thread-exit.c b/test/thread-exit.c
new file mode 100644
index 0000000..c2f2148
--- /dev/null
+++ b/test/thread-exit.c
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: test that thread pool issued requests don't cancel on thread
+ *		exit, but do get canceled once the parent exits. Do both
+ *		writes that finish and a poll request that sticks around.
+ *
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+#include <sys/poll.h>
+#include <pthread.h>
+
+#include "helpers.h"
+#include "liburing.h"
+
+#define NR_IOS	8
+#define WSIZE	512
+
+struct d {
+	int fd;
+	struct io_uring *ring;
+	unsigned long off;
+	int pipe_fd;
+	int err;
+};
+
+static void *do_io(void *data)
+{
+	struct d *d = data;
+	struct io_uring_sqe *sqe;
+	char *buffer;
+	int ret;
+
+	buffer = t_malloc(WSIZE);
+	memset(buffer, 0x5a, WSIZE);
+	sqe = io_uring_get_sqe(d->ring);
+	if (!sqe) {
+		d->err++;
+		return NULL;
+	}
+	io_uring_prep_write(sqe, d->fd, buffer, WSIZE, d->off);
+	sqe->user_data = d->off;
+
+	sqe = io_uring_get_sqe(d->ring);
+	if (!sqe) {
+		d->err++;
+		return NULL;
+	}
+	io_uring_prep_poll_add(sqe, d->pipe_fd, POLLIN);
+
+	ret = io_uring_submit(d->ring);
+	if (ret != 2)
+		d->err++;
+
+	free(buffer);
+	return NULL;
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring;
+	const char *fname;
+	pthread_t thread;
+	int ret, do_unlink, i, fd;
+	struct d d;
+	int fds[2];
+
+	if (pipe(fds) < 0) {
+		perror("pipe");
+		return 1;
+	}
+
+	ret = io_uring_queue_init(32, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "ring setup failed\n");
+		return 1;
+	}
+
+	if (argc > 1) {
+		fname = argv[1];
+		do_unlink = 0;
+	} else {
+		fname = ".thread.exit";
+		do_unlink = 1;
+	}
+
+	if (do_unlink)
+		t_create_file(fname, 4096);
+
+	fd = open(fname, O_WRONLY);
+	if (fd < 0) {
+		perror("open");
+		return 1;
+	}
+
+	d.fd = fd;
+	d.ring = &ring;
+	d.off = 0;
+	d.pipe_fd = fds[0];
+	d.err = 0;
+	for (i = 0; i < NR_IOS; i++) {
+		memset(&thread, 0, sizeof(thread));
+		pthread_create(&thread, NULL, do_io, &d);
+		pthread_join(thread, NULL);
+		d.off += WSIZE;
+	}
+
+	for (i = 0; i < NR_IOS; i++) {
+		struct io_uring_cqe *cqe;
+
+		ret = io_uring_wait_cqe(&ring, &cqe);
+		if (ret) {
+			fprintf(stderr, "io_uring_wait_cqe=%d\n", ret);
+			goto err;
+		}
+		if (cqe->res != WSIZE) {
+			fprintf(stderr, "cqe->res=%d, Expected %d\n", cqe->res,
+								WSIZE);
+			goto err;
+		}
+		io_uring_cqe_seen(&ring, cqe);
+	}
+
+	if (do_unlink)
+		unlink(fname);
+	return d.err;
+err:
+	if (do_unlink)
+		unlink(fname);
+	return 1;
+}
diff --git a/test/timeout-new.c b/test/timeout-new.c
new file mode 100644
index 0000000..b0bb5ee
--- /dev/null
+++ b/test/timeout-new.c
@@ -0,0 +1,246 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: tests for getevents timeout
+ *
+ */
+#include <stdio.h>
+#include <sys/time.h>
+#include <unistd.h>
+#include <pthread.h>
+#include "liburing.h"
+
+#define TIMEOUT_MSEC	200
+#define TIMEOUT_SEC	10
+
+int thread_ret0, thread_ret1;
+int cnt = 0;
+pthread_mutex_t mutex;
+
+static void msec_to_ts(struct __kernel_timespec *ts, unsigned int msec)
+{
+	ts->tv_sec = msec / 1000;
+	ts->tv_nsec = (msec % 1000) * 1000000;
+}
+
+static unsigned long long mtime_since(const struct timeval *s,
+				      const struct timeval *e)
+{
+	long long sec, usec;
+
+	sec = e->tv_sec - s->tv_sec;
+	usec = (e->tv_usec - s->tv_usec);
+	if (sec > 0 && usec < 0) {
+		sec--;
+		usec += 1000000;
+	}
+
+	sec *= 1000;
+	usec /= 1000;
+	return sec + usec;
+}
+
+static unsigned long long mtime_since_now(struct timeval *tv)
+{
+	struct timeval end;
+
+	gettimeofday(&end, NULL);
+	return mtime_since(tv, &end);
+}
+
+
+static int test_return_before_timeout(struct io_uring *ring)
+{
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	int ret;
+	struct __kernel_timespec ts;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "%s: get sqe failed\n", __FUNCTION__);
+		return 1;
+	}
+
+	io_uring_prep_nop(sqe);
+
+	ret = io_uring_submit(ring);
+	if (ret <= 0) {
+		fprintf(stderr, "%s: sqe submit failed: %d\n", __FUNCTION__, ret);
+		return 1;
+	}
+
+	msec_to_ts(&ts, TIMEOUT_MSEC);
+	ret = io_uring_wait_cqe_timeout(ring, &cqe, &ts);
+	if (ret < 0) {
+		fprintf(stderr, "%s: timeout error: %d\n", __FUNCTION__, ret);
+		return 1;
+	}
+
+	io_uring_cqe_seen(ring, cqe);
+	return 0;
+}
+
+static int test_return_after_timeout(struct io_uring *ring)
+{
+	struct io_uring_cqe *cqe;
+	int ret;
+	struct __kernel_timespec ts;
+	struct timeval tv;
+	unsigned long long exp;
+
+	msec_to_ts(&ts, TIMEOUT_MSEC);
+	gettimeofday(&tv, NULL);
+	ret = io_uring_wait_cqe_timeout(ring, &cqe, &ts);
+	exp = mtime_since_now(&tv);
+	if (ret != -ETIME) {
+		fprintf(stderr, "%s: timeout error: %d\n", __FUNCTION__, ret);
+		return 1;
+	}
+
+	if (exp < TIMEOUT_MSEC / 2 || exp > (TIMEOUT_MSEC  * 3) / 2) {
+		fprintf(stderr, "%s: Timeout seems wonky (got %llu)\n", __FUNCTION__, exp);
+		return 1;
+	}
+
+	return 0;
+}
+
+int __reap_thread_fn(void *data) {
+	struct io_uring *ring = (struct io_uring *)data;
+	struct io_uring_cqe *cqe;
+	struct __kernel_timespec ts;
+
+	msec_to_ts(&ts, TIMEOUT_SEC);
+	pthread_mutex_lock(&mutex);
+	cnt++;
+	pthread_mutex_unlock(&mutex);
+	return io_uring_wait_cqe_timeout(ring, &cqe, &ts);
+}
+
+void *reap_thread_fn0(void *data) {
+	thread_ret0 = __reap_thread_fn(data);
+	return NULL;
+}
+
+void *reap_thread_fn1(void *data) {
+	thread_ret1 = __reap_thread_fn(data);
+	return NULL;
+}
+
+/*
+ * This is to test issuing a sqe in main thread and reaping it in two child-thread
+ * at the same time. To see if timeout feature works or not.
+ */
+int test_multi_threads_timeout() {
+	struct io_uring ring;
+	int ret;
+	bool both_wait = false;
+	pthread_t reap_thread0, reap_thread1;
+	struct io_uring_sqe *sqe;
+
+	ret = io_uring_queue_init(8, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "%s: ring setup failed: %d\n", __FUNCTION__, ret);
+		return 1;
+	}
+
+	pthread_create(&reap_thread0, NULL, reap_thread_fn0, &ring);
+	pthread_create(&reap_thread1, NULL, reap_thread_fn1, &ring);
+
+	/*
+	 * make two threads both enter io_uring_wait_cqe_timeout() before issuing the sqe
+	 * as possible as we can. So that there are two threads in the ctx->wait queue.
+	 * In this way, we can test if a cqe wakes up two threads at the same time.
+	 */
+	while(!both_wait) {
+		pthread_mutex_lock(&mutex);
+		if (cnt == 2)
+			both_wait = true;
+		pthread_mutex_unlock(&mutex);
+		sleep(1);
+	}
+
+	sqe = io_uring_get_sqe(&ring);
+	if (!sqe) {
+		fprintf(stderr, "%s: get sqe failed\n", __FUNCTION__);
+		goto err;
+	}
+
+	io_uring_prep_nop(sqe);
+
+	ret = io_uring_submit(&ring);
+	if (ret <= 0) {
+		fprintf(stderr, "%s: sqe submit failed: %d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	pthread_join(reap_thread0, NULL);
+	pthread_join(reap_thread1, NULL);
+
+	if ((thread_ret0 && thread_ret0 != -ETIME) || (thread_ret1 && thread_ret1 != -ETIME)) {
+		fprintf(stderr, "%s: thread wait cqe timeout failed: %d %d\n",
+				__FUNCTION__, thread_ret0, thread_ret1);
+		goto err;
+	}
+
+	return 0;
+err:
+	return 1;
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring_normal, ring_sq;
+	int ret;
+
+	if (argc > 1)
+		return 0;
+
+	ret = io_uring_queue_init(8, &ring_normal, 0);
+	if (ret) {
+		fprintf(stderr, "ring_normal setup failed: %d\n", ret);
+		return 1;
+	}
+	if (!(ring_normal.features & IORING_FEAT_EXT_ARG)) {
+		fprintf(stderr, "feature IORING_FEAT_EXT_ARG not supported.\n");
+		return 0;
+	}
+
+	ret = test_return_before_timeout(&ring_normal);
+	if (ret) {
+		fprintf(stderr, "ring_normal: test_return_before_timeout failed\n");
+		return ret;
+	}
+
+	ret = test_return_after_timeout(&ring_normal);
+	if (ret) {
+		fprintf(stderr, "ring_normal: test_return_after_timeout failed\n");
+		return ret;
+	}
+
+	ret = io_uring_queue_init(8, &ring_sq, IORING_SETUP_SQPOLL);
+	if (ret) {
+		fprintf(stderr, "ring_sq setup failed: %d\n", ret);
+		return 1;
+	}
+
+	ret = test_return_before_timeout(&ring_sq);
+	if (ret) {
+		fprintf(stderr, "ring_sq: test_return_before_timeout failed\n");
+		return ret;
+	}
+
+	ret = test_return_after_timeout(&ring_sq);
+	if (ret) {
+		fprintf(stderr, "ring_sq: test_return_after_timeout failed\n");
+		return ret;
+	}
+
+	ret = test_multi_threads_timeout();
+	if (ret) {
+		fprintf(stderr, "test_multi_threads_timeout failed\n");
+		return ret;
+	}
+
+	return 0;
+}
diff --git a/test/timeout-overflow.c b/test/timeout-overflow.c
new file mode 100644
index 0000000..f952f80
--- /dev/null
+++ b/test/timeout-overflow.c
@@ -0,0 +1,204 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: run timeout overflow test
+ *
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <limits.h>
+#include <string.h>
+#include <sys/time.h>
+
+#include "liburing.h"
+
+#define TIMEOUT_MSEC	200
+static int not_supported;
+
+static void msec_to_ts(struct __kernel_timespec *ts, unsigned int msec)
+{
+	ts->tv_sec = msec / 1000;
+	ts->tv_nsec = (msec % 1000) * 1000000;
+}
+
+static int check_timeout_support(void)
+{
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	struct __kernel_timespec ts;
+	struct io_uring_params p;
+	struct io_uring ring;
+	int ret;
+
+	memset(&p, 0, sizeof(p));
+	ret = io_uring_queue_init_params(1, &ring, &p);
+	if (ret) {
+		fprintf(stderr, "ring setup failed: %d\n", ret);
+		return 1;
+	}
+
+	/* not really a match, but same kernel added batched completions */
+	if (p.features & IORING_FEAT_POLL_32BITS) {
+		fprintf(stdout, "Skipping\n");
+		not_supported = 1;
+		return 0;
+	}
+
+	sqe = io_uring_get_sqe(&ring);
+	msec_to_ts(&ts, TIMEOUT_MSEC);
+	io_uring_prep_timeout(sqe, &ts, 1, 0);
+
+	ret = io_uring_submit(&ring);
+	if (ret < 0) {
+		fprintf(stderr, "sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	ret = io_uring_wait_cqe(&ring, &cqe);
+	if (ret < 0) {
+		fprintf(stderr, "wait completion %d\n", ret);
+		goto err;
+	}
+
+	if (cqe->res == -EINVAL) {
+		not_supported = 1;
+		fprintf(stdout, "Timeout not supported, ignored\n");
+		return 0;
+	}
+
+	io_uring_cqe_seen(&ring, cqe);
+	io_uring_queue_exit(&ring);
+	return 0;
+err:
+	io_uring_queue_exit(&ring);
+	return 1;
+}
+
+/*
+ * We first setup 4 timeout requests, which require a count value of 1, 1, 2,
+ * UINT_MAX, so the sequence is 1, 2, 4, 2. Before really timeout, this 4
+ * requests will not lead the change of cq_cached_tail, so as sq_dropped.
+ *
+ * And before this patch. The order of this four requests will be req1->req2->
+ * req4->req3. Actually, it should be req1->req2->req3->req4.
+ *
+ * Then, if there is 2 nop req. All timeout requests expect req4 will completed
+ * successful after the patch. And req1/req2 will completed successful with
+ * req3/req4 return -ETIME without this patch!
+ */
+static int test_timeout_overflow(void)
+{
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	struct __kernel_timespec ts;
+	struct io_uring ring;
+	int i, ret;
+
+	ret = io_uring_queue_init(16, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "ring setup failed: %d\n", ret);
+		return 1;
+	}
+
+	msec_to_ts(&ts, TIMEOUT_MSEC);
+	for (i = 0; i < 4; i++) {
+		unsigned num;
+		sqe = io_uring_get_sqe(&ring);
+		switch (i) {
+		case 0:
+		case 1:
+			num = 1;
+			break;
+		case 2:
+			num = 2;
+			break;
+		case 3:
+			num = UINT_MAX;
+			break;
+		}
+		io_uring_prep_timeout(sqe, &ts, num, 0);
+	}
+
+	for (i = 0; i < 2; i++) {
+		sqe = io_uring_get_sqe(&ring);
+		io_uring_prep_nop(sqe);
+		io_uring_sqe_set_data(sqe, (void *) 1);
+	}
+	ret = io_uring_submit(&ring);
+	if (ret < 0) {
+		fprintf(stderr, "sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	i = 0;
+	while (i < 6) {
+		ret = io_uring_wait_cqe(&ring, &cqe);
+		if (ret < 0) {
+			fprintf(stderr, "wait completion %d\n", ret);
+			goto err;
+		}
+
+		/*
+		 * cqe1: first nop req
+		 * cqe2: first timeout req, because of cqe1
+		 * cqe3: second timeout req because of cqe1 + cqe2
+		 * cqe4: second nop req
+		 * cqe5~cqe6: the left three timeout req
+		 */
+		switch (i) {
+		case 0:
+		case 3:
+			if (io_uring_cqe_get_data(cqe) != (void *) 1) {
+				fprintf(stderr, "nop not seen as 1 or 2\n");
+				goto err;
+			}
+			break;
+		case 1:
+		case 2:
+		case 4:
+			if (cqe->res == -ETIME) {
+				fprintf(stderr, "expected not return -ETIME "
+					"for the #%d timeout req\n", i - 1);
+				goto err;
+			}
+			break;
+		case 5:
+			if (cqe->res != -ETIME) {
+				fprintf(stderr, "expected return -ETIME for "
+					"the #%d timeout req\n", i - 1);
+				goto err;
+			}
+			break;
+		}
+		io_uring_cqe_seen(&ring, cqe);
+		i++;
+	}
+
+	return 0;
+err:
+	return 1;
+}
+
+int main(int argc, char *argv[])
+{
+	int ret;
+
+	if (argc > 1)
+		return 0;
+
+	ret = check_timeout_support();
+	if (ret) {
+		fprintf(stderr, "check_timeout_support failed: %d\n", ret);
+		return 1;
+	}
+
+	if (not_supported)
+		return 0;
+
+	ret = test_timeout_overflow();
+	if (ret) {
+		fprintf(stderr, "test_timeout_overflow failed\n");
+		return 1;
+	}
+
+	return 0;
+}
diff --git a/test/timeout.c b/test/timeout.c
new file mode 100644
index 0000000..a28d599
--- /dev/null
+++ b/test/timeout.c
@@ -0,0 +1,1343 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: run various timeout tests
+ *
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+#include <sys/time.h>
+
+#include "liburing.h"
+#include "../src/syscall.h"
+
+#define TIMEOUT_MSEC	200
+static int not_supported;
+static int no_modify;
+
+static void msec_to_ts(struct __kernel_timespec *ts, unsigned int msec)
+{
+	ts->tv_sec = msec / 1000;
+	ts->tv_nsec = (msec % 1000) * 1000000;
+}
+
+static unsigned long long mtime_since(const struct timeval *s,
+				      const struct timeval *e)
+{
+	long long sec, usec;
+
+	sec = e->tv_sec - s->tv_sec;
+	usec = (e->tv_usec - s->tv_usec);
+	if (sec > 0 && usec < 0) {
+		sec--;
+		usec += 1000000;
+	}
+
+	sec *= 1000;
+	usec /= 1000;
+	return sec + usec;
+}
+
+static unsigned long long mtime_since_now(struct timeval *tv)
+{
+	struct timeval end;
+
+	gettimeofday(&end, NULL);
+	return mtime_since(tv, &end);
+}
+
+/*
+ * Test that we return to userspace if a timeout triggers, even if we
+ * don't satisfy the number of events asked for.
+ */
+static int test_single_timeout_many(struct io_uring *ring)
+{
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	unsigned long long exp;
+	struct __kernel_timespec ts;
+	struct timeval tv;
+	int ret;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "%s: get sqe failed\n", __FUNCTION__);
+		goto err;
+	}
+
+	msec_to_ts(&ts, TIMEOUT_MSEC);
+	io_uring_prep_timeout(sqe, &ts, 0, 0);
+
+	ret = io_uring_submit(ring);
+	if (ret <= 0) {
+		fprintf(stderr, "%s: sqe submit failed: %d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	gettimeofday(&tv, NULL);
+	ret = __sys_io_uring_enter(ring->ring_fd, 0, 4, IORING_ENTER_GETEVENTS,
+					NULL);
+	if (ret < 0) {
+		fprintf(stderr, "%s: io_uring_enter %d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	ret = io_uring_wait_cqe(ring, &cqe);
+	if (ret < 0) {
+		fprintf(stderr, "%s: wait completion %d\n", __FUNCTION__, ret);
+		goto err;
+	}
+	ret = cqe->res;
+	io_uring_cqe_seen(ring, cqe);
+	if (ret == -EINVAL) {
+		fprintf(stdout, "Timeout not supported, ignored\n");
+		not_supported = 1;
+		return 0;
+	} else if (ret != -ETIME) {
+		fprintf(stderr, "Timeout: %s\n", strerror(-ret));
+		goto err;
+	}
+
+	exp = mtime_since_now(&tv);
+	if (exp >= TIMEOUT_MSEC / 2 && exp <= (TIMEOUT_MSEC * 3) / 2)
+		return 0;
+	fprintf(stderr, "%s: Timeout seems wonky (got %llu)\n", __FUNCTION__, exp);
+err:
+	return 1;
+}
+
+/*
+ * Test numbered trigger of timeout
+ */
+static int test_single_timeout_nr(struct io_uring *ring, int nr)
+{
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	struct __kernel_timespec ts;
+	int i, ret;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "%s: get sqe failed\n", __FUNCTION__);
+		goto err;
+	}
+
+	msec_to_ts(&ts, TIMEOUT_MSEC);
+	io_uring_prep_timeout(sqe, &ts, nr, 0);
+
+	sqe = io_uring_get_sqe(ring);
+	io_uring_prep_nop(sqe);
+	io_uring_sqe_set_data(sqe, (void *) 1);
+	sqe = io_uring_get_sqe(ring);
+	io_uring_prep_nop(sqe);
+	io_uring_sqe_set_data(sqe, (void *) 1);
+
+	ret = io_uring_submit_and_wait(ring, 3);
+	if (ret <= 0) {
+		fprintf(stderr, "%s: sqe submit failed: %d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	i = 0;
+	while (i < 3) {
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret < 0) {
+			fprintf(stderr, "%s: wait completion %d\n", __FUNCTION__, ret);
+			goto err;
+		}
+
+		ret = cqe->res;
+
+		/*
+		 * NOP commands have user_data as 1. Check that we get the
+		 * at least 'nr' NOPs first, then the successfully removed timout.
+		 */
+		if (io_uring_cqe_get_data(cqe) == NULL) {
+			if (i < nr) {
+				fprintf(stderr, "%s: timeout received too early\n", __FUNCTION__);
+				goto err;
+			}
+			if (ret) {
+				fprintf(stderr, "%s: timeout triggered by passage of"
+					" time, not by events completed\n", __FUNCTION__);
+				goto err;
+			}
+		}
+
+		io_uring_cqe_seen(ring, cqe);
+		if (ret) {
+			fprintf(stderr, "res: %d\n", ret);
+			goto err;
+		}
+		i++;
+	};
+
+	return 0;
+err:
+	return 1;
+}
+
+static int test_single_timeout_wait(struct io_uring *ring)
+{
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	struct __kernel_timespec ts;
+	int i, ret;
+
+	sqe = io_uring_get_sqe(ring);
+	io_uring_prep_nop(sqe);
+	io_uring_sqe_set_data(sqe, (void *) 1);
+
+	sqe = io_uring_get_sqe(ring);
+	io_uring_prep_nop(sqe);
+	io_uring_sqe_set_data(sqe, (void *) 1);
+
+	msec_to_ts(&ts, 1000);
+
+	i = 0;
+	do {
+		ret = io_uring_wait_cqes(ring, &cqe, 2, &ts, NULL);
+		if (ret == -ETIME)
+			break;
+		if (ret < 0) {
+			fprintf(stderr, "%s: wait timeout failed: %d\n", __FUNCTION__, ret);
+			goto err;
+		}
+
+		ret = cqe->res;
+		io_uring_cqe_seen(ring, cqe);
+		if (ret < 0) {
+			fprintf(stderr, "res: %d\n", ret);
+			goto err;
+		}
+		i++;
+	} while (1);
+
+	if (i != 2) {
+		fprintf(stderr, "got %d completions\n", i);
+		goto err;
+	}
+	return 0;
+err:
+	return 1;
+}
+
+/*
+ * Test single timeout waking us up
+ */
+static int test_single_timeout(struct io_uring *ring)
+{
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	unsigned long long exp;
+	struct __kernel_timespec ts;
+	struct timeval tv;
+	int ret;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "%s: get sqe failed\n", __FUNCTION__);
+		goto err;
+	}
+
+	msec_to_ts(&ts, TIMEOUT_MSEC);
+	io_uring_prep_timeout(sqe, &ts, 0, 0);
+
+	ret = io_uring_submit(ring);
+	if (ret <= 0) {
+		fprintf(stderr, "%s: sqe submit failed: %d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	gettimeofday(&tv, NULL);
+	ret = io_uring_wait_cqe(ring, &cqe);
+	if (ret < 0) {
+		fprintf(stderr, "%s: wait completion %d\n", __FUNCTION__, ret);
+		goto err;
+	}
+	ret = cqe->res;
+	io_uring_cqe_seen(ring, cqe);
+	if (ret == -EINVAL) {
+		fprintf(stdout, "%s: Timeout not supported, ignored\n", __FUNCTION__);
+		not_supported = 1;
+		return 0;
+	} else if (ret != -ETIME) {
+		fprintf(stderr, "%s: Timeout: %s\n", __FUNCTION__, strerror(-ret));
+		goto err;
+	}
+
+	exp = mtime_since_now(&tv);
+	if (exp >= TIMEOUT_MSEC / 2 && exp <= (TIMEOUT_MSEC * 3) / 2)
+		return 0;
+	fprintf(stderr, "%s: Timeout seems wonky (got %llu)\n", __FUNCTION__, exp);
+err:
+	return 1;
+}
+
+static int test_single_timeout_remove_notfound(struct io_uring *ring)
+{
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	struct __kernel_timespec ts;
+	int ret, i;
+
+	if (no_modify)
+		return 0;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "%s: get sqe failed\n", __FUNCTION__);
+		goto err;
+	}
+
+	msec_to_ts(&ts, TIMEOUT_MSEC);
+	io_uring_prep_timeout(sqe, &ts, 2, 0);
+	sqe->user_data = 1;
+
+	ret = io_uring_submit(ring);
+	if (ret <= 0) {
+		fprintf(stderr, "%s: sqe submit failed: %d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "%s: get sqe failed\n", __FUNCTION__);
+		goto err;
+	}
+
+	io_uring_prep_timeout_remove(sqe, 2, 0);
+	sqe->user_data = 2;
+
+	ret = io_uring_submit(ring);
+	if (ret <= 0) {
+		fprintf(stderr, "%s: sqe submit failed: %d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	/*
+	 * We should get two completions. One is our modify request, which should
+	 * complete with -ENOENT. The other is the timeout that will trigger after
+	 * TIMEOUT_MSEC.
+	 */
+	for (i = 0; i < 2; i++) {
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret < 0) {
+			fprintf(stderr, "%s: wait completion %d\n", __FUNCTION__, ret);
+			goto err;
+		}
+		if (cqe->user_data == 2) {
+			if (cqe->res != -ENOENT) {
+				fprintf(stderr, "%s: modify ret %d, wanted ENOENT\n", __FUNCTION__, cqe->res);
+				break;
+			}
+		} else if (cqe->user_data == 1) {
+			if (cqe->res != -ETIME) {
+				fprintf(stderr, "%s: timeout ret %d, wanted -ETIME\n", __FUNCTION__, cqe->res);
+				break;
+			}
+		}
+		io_uring_cqe_seen(ring, cqe);
+	}
+	return 0;
+err:
+	return 1;
+}
+
+static int test_single_timeout_remove(struct io_uring *ring)
+{
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	struct __kernel_timespec ts;
+	int ret, i;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "%s: get sqe failed\n", __FUNCTION__);
+		goto err;
+	}
+
+	msec_to_ts(&ts, TIMEOUT_MSEC);
+	io_uring_prep_timeout(sqe, &ts, 0, 0);
+	sqe->user_data = 1;
+
+	ret = io_uring_submit(ring);
+	if (ret <= 0) {
+		fprintf(stderr, "%s: sqe submit failed: %d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "%s: get sqe failed\n", __FUNCTION__);
+		goto err;
+	}
+
+	io_uring_prep_timeout_remove(sqe, 1, 0);
+	sqe->user_data = 2;
+
+	ret = io_uring_submit(ring);
+	if (ret <= 0) {
+		fprintf(stderr, "%s: sqe submit failed: %d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	/*
+	 * We should have two completions ready. One is for the original timeout
+	 * request, user_data == 1, that should have a ret of -ECANCELED. The other
+	 * is for our modify request, user_data == 2, that should have a ret of 0.
+	 */
+	for (i = 0; i < 2; i++) {
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret < 0) {
+			fprintf(stderr, "%s: wait completion %d\n", __FUNCTION__, ret);
+			goto err;
+		}
+		if (no_modify)
+			goto seen;
+		if (cqe->res == -EINVAL && cqe->user_data == 2) {
+			fprintf(stdout, "Timeout modify not supported, ignoring\n");
+			no_modify = 1;
+			goto seen;
+		}
+		if (cqe->user_data == 1) {
+			if (cqe->res != -ECANCELED) {
+				fprintf(stderr, "%s: timeout ret %d, wanted canceled\n", __FUNCTION__, cqe->res);
+				break;
+			}
+		} else if (cqe->user_data == 2) {
+			if (cqe->res) {
+				fprintf(stderr, "%s: modify ret %d, wanted 0\n", __FUNCTION__, cqe->res);
+				break;
+			}
+		}
+seen:
+		io_uring_cqe_seen(ring, cqe);
+	}
+	return 0;
+err:
+	return 1;
+}
+
+/*
+ * Test single absolute timeout waking us up
+ */
+static int test_single_timeout_abs(struct io_uring *ring)
+{
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	unsigned long long exp;
+	struct __kernel_timespec ts;
+	struct timespec abs_ts;
+	struct timeval tv;
+	int ret;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "%s: get sqe failed\n", __FUNCTION__);
+		goto err;
+	}
+
+	clock_gettime(CLOCK_MONOTONIC, &abs_ts);
+	ts.tv_sec = abs_ts.tv_sec + 1;
+	ts.tv_nsec = abs_ts.tv_nsec;
+	io_uring_prep_timeout(sqe, &ts, 0, IORING_TIMEOUT_ABS);
+
+	ret = io_uring_submit(ring);
+	if (ret <= 0) {
+		fprintf(stderr, "%s: sqe submit failed: %d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	gettimeofday(&tv, NULL);
+	ret = io_uring_wait_cqe(ring, &cqe);
+	if (ret < 0) {
+		fprintf(stderr, "%s: wait completion %d\n", __FUNCTION__, ret);
+		goto err;
+	}
+	ret = cqe->res;
+	io_uring_cqe_seen(ring, cqe);
+	if (ret == -EINVAL) {
+		fprintf(stdout, "Absolute timeouts not supported, ignored\n");
+		return 0;
+	} else if (ret != -ETIME) {
+		fprintf(stderr, "Timeout: %s\n", strerror(-ret));
+		goto err;
+	}
+
+	exp = mtime_since_now(&tv);
+	if (exp >= 1000 / 2 && exp <= (1000 * 3) / 2)
+		return 0;
+	fprintf(stderr, "%s: Timeout seems wonky (got %llu)\n", __FUNCTION__, exp);
+err:
+	return 1;
+}
+
+/*
+ * Test that timeout is canceled on exit
+ */
+static int test_single_timeout_exit(struct io_uring *ring)
+{
+	struct io_uring_sqe *sqe;
+	struct __kernel_timespec ts;
+	int ret;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "%s: get sqe failed\n", __FUNCTION__);
+		goto err;
+	}
+
+	msec_to_ts(&ts, 30000);
+	io_uring_prep_timeout(sqe, &ts, 0, 0);
+
+	ret = io_uring_submit(ring);
+	if (ret <= 0) {
+		fprintf(stderr, "%s: sqe submit failed: %d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	io_uring_queue_exit(ring);
+	return 0;
+err:
+	io_uring_queue_exit(ring);
+	return 1;
+}
+
+/*
+ * Test multi timeouts waking us up
+ */
+static int test_multi_timeout(struct io_uring *ring)
+{
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	struct __kernel_timespec ts[2];
+	unsigned int timeout[2];
+	unsigned long long exp;
+	struct timeval tv;
+	int ret, i;
+
+	/* req_1: timeout req, count = 1, time = (TIMEOUT_MSEC * 2) */
+	timeout[0] = TIMEOUT_MSEC * 2;
+	msec_to_ts(&ts[0], timeout[0]);
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "%s: get sqe failed\n", __FUNCTION__);
+		goto err;
+	}
+	io_uring_prep_timeout(sqe, &ts[0], 1, 0);
+	sqe->user_data = 1;
+
+	/* req_2: timeout req, count = 1, time = TIMEOUT_MSEC */
+	timeout[1] = TIMEOUT_MSEC;
+	msec_to_ts(&ts[1], timeout[1]);
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "%s: get sqe failed\n", __FUNCTION__);
+		goto err;
+	}
+	io_uring_prep_timeout(sqe, &ts[1], 1, 0);
+	sqe->user_data = 2;
+
+	ret = io_uring_submit(ring);
+	if (ret <= 0) {
+		fprintf(stderr, "%s: sqe submit failed: %d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	gettimeofday(&tv, NULL);
+	for (i = 0; i < 2; i++) {
+		unsigned int time;
+		__u64 user_data;
+
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret < 0) {
+			fprintf(stderr, "%s: wait completion %d\n", __FUNCTION__, ret);
+			goto err;
+		}
+
+		/*
+		 * Both of these two reqs should timeout, but req_2 should
+		 * return before req_1.
+		 */
+		switch (i) {
+		case 0:
+			user_data = 2;
+			time = timeout[1];
+			break;
+		case 1:
+			user_data = 1;
+			time = timeout[0];
+			break;
+		}
+
+		if (cqe->user_data != user_data) {
+			fprintf(stderr, "%s: unexpected timeout req %d sequece\n",
+				__FUNCTION__, i+1);
+			goto err;
+		}
+		if (cqe->res != -ETIME) {
+			fprintf(stderr, "%s: Req %d timeout: %s\n",
+				__FUNCTION__, i+1, strerror(cqe->res));
+			goto err;
+		}
+		exp = mtime_since_now(&tv);
+		if (exp < time / 2 || exp > (time * 3) / 2) {
+			fprintf(stderr, "%s: Req %d timeout seems wonky (got %llu)\n",
+				__FUNCTION__, i+1, exp);
+			goto err;
+		}
+		io_uring_cqe_seen(ring, cqe);
+	}
+
+	return 0;
+err:
+	return 1;
+}
+
+/*
+ * Test multi timeout req with different count
+ */
+static int test_multi_timeout_nr(struct io_uring *ring)
+{
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	struct __kernel_timespec ts;
+	int ret, i;
+
+	msec_to_ts(&ts, TIMEOUT_MSEC);
+
+	/* req_1: timeout req, count = 2 */
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "%s: get sqe failed\n", __FUNCTION__);
+		goto err;
+	}
+	io_uring_prep_timeout(sqe, &ts, 2, 0);
+	sqe->user_data = 1;
+
+	/* req_2: timeout req, count = 1 */
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "%s: get sqe failed\n", __FUNCTION__);
+		goto err;
+	}
+	io_uring_prep_timeout(sqe, &ts, 1, 0);
+	sqe->user_data = 2;
+
+	/* req_3: nop req */
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "%s: get sqe failed\n", __FUNCTION__);
+		goto err;
+	}
+	io_uring_prep_nop(sqe);
+	io_uring_sqe_set_data(sqe, (void *) 1);
+
+	ret = io_uring_submit(ring);
+	if (ret <= 0) {
+		fprintf(stderr, "%s: sqe submit failed: %d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	/*
+	 * req_2 (count=1) should return without error and req_1 (count=2)
+	 * should timeout.
+	 */
+	for (i = 0; i < 3; i++) {
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret < 0) {
+			fprintf(stderr, "%s: wait completion %d\n", __FUNCTION__, ret);
+			goto err;
+		}
+
+		switch (i) {
+		case 0:
+			/* Should be nop req */
+			if (io_uring_cqe_get_data(cqe) != (void *) 1) {
+				fprintf(stderr, "%s: nop not seen as 1 or 2\n", __FUNCTION__);
+				goto err;
+			}
+			break;
+		case 1:
+			/* Should be timeout req_2 */
+			if (cqe->user_data != 2) {
+				fprintf(stderr, "%s: unexpected timeout req %d sequece\n",
+					__FUNCTION__, i+1);
+				goto err;
+			}
+			if (cqe->res < 0) {
+				fprintf(stderr, "%s: Req %d res %d\n",
+					__FUNCTION__, i+1, cqe->res);
+				goto err;
+			}
+			break;
+		case 2:
+			/* Should be timeout req_1 */
+			if (cqe->user_data != 1) {
+				fprintf(stderr, "%s: unexpected timeout req %d sequece\n",
+					__FUNCTION__, i+1);
+				goto err;
+			}
+			if (cqe->res != -ETIME) {
+				fprintf(stderr, "%s: Req %d timeout: %s\n",
+					__FUNCTION__, i+1, strerror(cqe->res));
+				goto err;
+			}
+			break;
+		}
+		io_uring_cqe_seen(ring, cqe);
+	}
+
+	return 0;
+err:
+	return 1;
+}
+
+/*
+ * Test timeout <link> timeout <drain> timeout
+ */
+static int test_timeout_flags1(struct io_uring *ring)
+{
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	struct __kernel_timespec ts;
+	int ret, i;
+
+	msec_to_ts(&ts, TIMEOUT_MSEC);
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "%s: get sqe failed\n", __FUNCTION__);
+		goto err;
+	}
+	io_uring_prep_timeout(sqe, &ts, 0, 0);
+	sqe->user_data = 1;
+	sqe->flags |= IOSQE_IO_LINK;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "%s: get sqe failed\n", __FUNCTION__);
+		goto err;
+	}
+	io_uring_prep_timeout(sqe, &ts, 0, 0);
+	sqe->user_data = 2;
+	sqe->flags |= IOSQE_IO_DRAIN;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "%s: get sqe failed\n", __FUNCTION__);
+		goto err;
+	}
+	io_uring_prep_timeout(sqe, &ts, 0, 0);
+	sqe->user_data = 3;
+
+	ret = io_uring_submit(ring);
+	if (ret <= 0) {
+		fprintf(stderr, "%s: sqe submit failed: %d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	for (i = 0; i < 3; i++) {
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret < 0) {
+			fprintf(stderr, "%s: wait completion %d\n", __FUNCTION__, ret);
+			goto err;
+		}
+
+		if (cqe->res == -EINVAL) {
+			if (!i)
+				fprintf(stdout, "%s: timeout flags not supported\n",
+						__FUNCTION__);
+			io_uring_cqe_seen(ring, cqe);
+			continue;
+		}
+
+		switch (cqe->user_data) {
+		case 1:
+			if (cqe->res != -ETIME) {
+				fprintf(stderr, "%s: got %d, wanted %d\n",
+						__FUNCTION__, cqe->res, -ETIME);
+				goto err;
+			}
+			break;
+		case 2:
+			if (cqe->res != -ECANCELED) {
+				fprintf(stderr, "%s: got %d, wanted %d\n",
+						__FUNCTION__, cqe->res,
+						-ECANCELED);
+				goto err;
+			}
+			break;
+		case 3:
+			if (cqe->res != -ETIME) {
+				fprintf(stderr, "%s: got %d, wanted %d\n",
+						__FUNCTION__, cqe->res, -ETIME);
+				goto err;
+			}
+			break;
+		}
+		io_uring_cqe_seen(ring, cqe);
+	}
+
+	return 0;
+err:
+	return 1;
+}
+
+/*
+ * Test timeout <link> timeout <link> timeout
+ */
+static int test_timeout_flags2(struct io_uring *ring)
+{
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	struct __kernel_timespec ts;
+	int ret, i;
+
+	msec_to_ts(&ts, TIMEOUT_MSEC);
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "%s: get sqe failed\n", __FUNCTION__);
+		goto err;
+	}
+	io_uring_prep_timeout(sqe, &ts, 0, 0);
+	sqe->user_data = 1;
+	sqe->flags |= IOSQE_IO_LINK;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "%s: get sqe failed\n", __FUNCTION__);
+		goto err;
+	}
+	io_uring_prep_timeout(sqe, &ts, 0, 0);
+	sqe->user_data = 2;
+	sqe->flags |= IOSQE_IO_LINK;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "%s: get sqe failed\n", __FUNCTION__);
+		goto err;
+	}
+	io_uring_prep_timeout(sqe, &ts, 0, 0);
+	sqe->user_data = 3;
+
+	ret = io_uring_submit(ring);
+	if (ret <= 0) {
+		fprintf(stderr, "%s: sqe submit failed: %d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	for (i = 0; i < 3; i++) {
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret < 0) {
+			fprintf(stderr, "%s: wait completion %d\n", __FUNCTION__, ret);
+			goto err;
+		}
+
+		if (cqe->res == -EINVAL) {
+			if (!i)
+				fprintf(stdout, "%s: timeout flags not supported\n",
+						__FUNCTION__);
+			io_uring_cqe_seen(ring, cqe);
+			continue;
+		}
+
+		switch (cqe->user_data) {
+		case 1:
+			if (cqe->res != -ETIME) {
+				fprintf(stderr, "%s: got %d, wanted %d\n",
+						__FUNCTION__, cqe->res, -ETIME);
+				goto err;
+			}
+			break;
+		case 2:
+		case 3:
+			if (cqe->res != -ECANCELED) {
+				fprintf(stderr, "%s: got %d, wanted %d\n",
+						__FUNCTION__, cqe->res,
+						-ECANCELED);
+				goto err;
+			}
+			break;
+		}
+		io_uring_cqe_seen(ring, cqe);
+	}
+
+	return 0;
+err:
+	return 1;
+}
+
+/*
+ * Test timeout <drain> timeout <link> timeout
+ */
+static int test_timeout_flags3(struct io_uring *ring)
+{
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	struct __kernel_timespec ts;
+	int ret, i;
+
+	msec_to_ts(&ts, TIMEOUT_MSEC);
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "%s: get sqe failed\n", __FUNCTION__);
+		goto err;
+	}
+	io_uring_prep_timeout(sqe, &ts, 0, 0);
+	sqe->user_data = 1;
+	sqe->flags |= IOSQE_IO_DRAIN;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "%s: get sqe failed\n", __FUNCTION__);
+		goto err;
+	}
+	io_uring_prep_timeout(sqe, &ts, 0, 0);
+	sqe->user_data = 2;
+	sqe->flags |= IOSQE_IO_LINK;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "%s: get sqe failed\n", __FUNCTION__);
+		goto err;
+	}
+	io_uring_prep_timeout(sqe, &ts, 0, 0);
+	sqe->user_data = 3;
+
+	ret = io_uring_submit(ring);
+	if (ret <= 0) {
+		fprintf(stderr, "%s: sqe submit failed: %d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	for (i = 0; i < 3; i++) {
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret < 0) {
+			fprintf(stderr, "%s: wait completion %d\n", __FUNCTION__, ret);
+			goto err;
+		}
+
+		if (cqe->res == -EINVAL) {
+			if (!i)
+				fprintf(stdout, "%s: timeout flags not supported\n",
+						__FUNCTION__);
+			io_uring_cqe_seen(ring, cqe);
+			continue;
+		}
+
+		switch (cqe->user_data) {
+		case 1:
+		case 2:
+			if (cqe->res != -ETIME) {
+				fprintf(stderr, "%s: got %d, wanted %d\n",
+						__FUNCTION__, cqe->res, -ETIME);
+				goto err;
+			}
+			break;
+		case 3:
+			if (cqe->res != -ECANCELED) {
+				fprintf(stderr, "%s: got %d, wanted %d\n",
+						__FUNCTION__, cqe->res,
+						-ECANCELED);
+				goto err;
+			}
+			break;
+		}
+		io_uring_cqe_seen(ring, cqe);
+	}
+
+	return 0;
+err:
+	return 1;
+}
+
+static int test_update_timeout(struct io_uring *ring, unsigned long ms,
+				bool abs, bool async, bool linked)
+{
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	struct __kernel_timespec ts, ts_upd;
+	unsigned long long exp_ms, base_ms = 10000;
+	struct timeval tv;
+	int ret, i, nr = 2;
+	__u32 mode = abs ? IORING_TIMEOUT_ABS : 0;
+
+	msec_to_ts(&ts_upd, ms);
+	gettimeofday(&tv, NULL);
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "%s: get sqe failed\n", __FUNCTION__);
+		goto err;
+	}
+	msec_to_ts(&ts, base_ms);
+	io_uring_prep_timeout(sqe, &ts, 0, 0);
+	sqe->user_data = 1;
+
+	if (linked) {
+		sqe = io_uring_get_sqe(ring);
+		if (!sqe) {
+			fprintf(stderr, "%s: get sqe failed\n", __FUNCTION__);
+			goto err;
+		}
+		io_uring_prep_nop(sqe);
+		sqe->user_data = 3;
+		sqe->flags = IOSQE_IO_LINK;
+		if (async)
+			sqe->flags |= IOSQE_ASYNC;
+		nr++;
+	}
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "%s: get sqe failed\n", __FUNCTION__);
+		goto err;
+	}
+	io_uring_prep_timeout_update(sqe, &ts_upd, 1, mode);
+	sqe->user_data = 2;
+	if (async)
+		sqe->flags |= IOSQE_ASYNC;
+
+	ret = io_uring_submit(ring);
+	if (ret != nr) {
+		fprintf(stderr, "%s: sqe submit failed: %d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	for (i = 0; i < nr; i++) {
+		ret = io_uring_wait_cqe(ring, &cqe);
+		if (ret < 0) {
+			fprintf(stderr, "%s: wait completion %d\n", __FUNCTION__, ret);
+			goto err;
+		}
+
+		switch (cqe->user_data) {
+		case 1:
+			if (cqe->res != -ETIME) {
+				fprintf(stderr, "%s: got %d, wanted %d\n",
+						__FUNCTION__, cqe->res, -ETIME);
+				goto err;
+			}
+			break;
+		case 2:
+			if (cqe->res != 0) {
+				fprintf(stderr, "%s: got %d, wanted %d\n",
+						__FUNCTION__, cqe->res,
+						0);
+				goto err;
+			}
+			break;
+		case 3:
+			if (cqe->res != 0) {
+				fprintf(stderr, "nop failed\n");
+				goto err;
+			}
+			break;
+		default:
+			goto err;
+		}
+		io_uring_cqe_seen(ring, cqe);
+	}
+
+	exp_ms = mtime_since_now(&tv);
+	if (exp_ms >= base_ms / 2) {
+		fprintf(stderr, "too long, timeout wasn't updated\n");
+		goto err;
+	}
+	if (ms >= 1000 && !abs && exp_ms < ms / 2) {
+		fprintf(stderr, "fired too early, potentially updated to 0 ms"
+					"instead of %lu\n", ms);
+		goto err;
+	}
+	return 0;
+err:
+	return 1;
+}
+
+static int test_update_nonexistent_timeout(struct io_uring *ring)
+{
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	struct __kernel_timespec ts;
+	int ret;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "%s: get sqe failed\n", __FUNCTION__);
+		goto err;
+	}
+	msec_to_ts(&ts, 0);
+	io_uring_prep_timeout_update(sqe, &ts, 42, 0);
+
+	ret = io_uring_submit(ring);
+	if (ret != 1) {
+		fprintf(stderr, "%s: sqe submit failed: %d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	ret = io_uring_wait_cqe(ring, &cqe);
+	if (ret < 0) {
+		fprintf(stderr, "%s: wait completion %d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	ret = cqe->res;
+	if (ret == -ENOENT)
+		ret = 0;
+	io_uring_cqe_seen(ring, cqe);
+	return ret;
+err:
+	return 1;
+}
+
+static int test_update_invalid_flags(struct io_uring *ring)
+{
+	struct io_uring_sqe *sqe;
+	struct io_uring_cqe *cqe;
+	struct __kernel_timespec ts;
+	int ret;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "%s: get sqe failed\n", __FUNCTION__);
+		goto err;
+	}
+	io_uring_prep_timeout_remove(sqe, 0, IORING_TIMEOUT_ABS);
+
+	ret = io_uring_submit(ring);
+	if (ret != 1) {
+		fprintf(stderr, "%s: sqe submit failed: %d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	ret = io_uring_wait_cqe(ring, &cqe);
+	if (ret < 0) {
+		fprintf(stderr, "%s: wait completion %d\n", __FUNCTION__, ret);
+		goto err;
+	}
+	if (cqe->res != -EINVAL) {
+		fprintf(stderr, "%s: got %d, wanted %d\n",
+				__FUNCTION__, cqe->res, -EINVAL);
+		goto err;
+	}
+	io_uring_cqe_seen(ring, cqe);
+
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "%s: get sqe failed\n", __FUNCTION__);
+		goto err;
+	}
+	msec_to_ts(&ts, 0);
+	io_uring_prep_timeout_update(sqe, &ts, 0, -1);
+
+	ret = io_uring_submit(ring);
+	if (ret != 1) {
+		fprintf(stderr, "%s: sqe submit failed: %d\n", __FUNCTION__, ret);
+		goto err;
+	}
+
+	ret = io_uring_wait_cqe(ring, &cqe);
+	if (ret < 0) {
+		fprintf(stderr, "%s: wait completion %d\n", __FUNCTION__, ret);
+		goto err;
+	}
+	if (cqe->res != -EINVAL) {
+		fprintf(stderr, "%s: got %d, wanted %d\n",
+				__FUNCTION__, cqe->res, -EINVAL);
+		goto err;
+	}
+	io_uring_cqe_seen(ring, cqe);
+
+	return 0;
+err:
+	return 1;
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring, sqpoll_ring;
+	bool has_timeout_update, sqpoll;
+	int ret;
+
+	if (argc > 1)
+		return 0;
+
+	ret = io_uring_queue_init(8, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "ring setup failed\n");
+		return 1;
+	}
+
+	ret = io_uring_queue_init(8, &sqpoll_ring, IORING_SETUP_SQPOLL);
+	sqpoll = !ret;
+
+	ret = test_single_timeout(&ring);
+	if (ret) {
+		fprintf(stderr, "test_single_timeout failed\n");
+		return ret;
+	}
+	if (not_supported)
+		return 0;
+
+	ret = test_multi_timeout(&ring);
+	if (ret) {
+		fprintf(stderr, "test_multi_timeout failed\n");
+		return ret;
+	}
+
+	ret = test_single_timeout_abs(&ring);
+	if (ret) {
+		fprintf(stderr, "test_single_timeout_abs failed\n");
+		return ret;
+	}
+
+	ret = test_single_timeout_remove(&ring);
+	if (ret) {
+		fprintf(stderr, "test_single_timeout_remove failed\n");
+		return ret;
+	}
+
+	ret = test_single_timeout_remove_notfound(&ring);
+	if (ret) {
+		fprintf(stderr, "test_single_timeout_remove_notfound failed\n");
+		return ret;
+	}
+
+	ret = test_single_timeout_many(&ring);
+	if (ret) {
+		fprintf(stderr, "test_single_timeout_many failed\n");
+		return ret;
+	}
+
+	ret = test_single_timeout_nr(&ring, 1);
+	if (ret) {
+		fprintf(stderr, "test_single_timeout_nr(1) failed\n");
+		return ret;
+	}
+	ret = test_single_timeout_nr(&ring, 2);
+	if (ret) {
+		fprintf(stderr, "test_single_timeout_nr(2) failed\n");
+		return ret;
+	}
+
+	ret = test_multi_timeout_nr(&ring);
+	if (ret) {
+		fprintf(stderr, "test_multi_timeout_nr failed\n");
+		return ret;
+	}
+
+	ret = test_timeout_flags1(&ring);
+	if (ret) {
+		fprintf(stderr, "test_timeout_flags1 failed\n");
+		return ret;
+	}
+
+	ret = test_timeout_flags2(&ring);
+	if (ret) {
+		fprintf(stderr, "test_timeout_flags2 failed\n");
+		return ret;
+	}
+
+	ret = test_timeout_flags3(&ring);
+	if (ret) {
+		fprintf(stderr, "test_timeout_flags3 failed\n");
+		return ret;
+	}
+
+	ret = test_single_timeout_wait(&ring);
+	if (ret) {
+		fprintf(stderr, "test_single_timeout_wait failed\n");
+		return ret;
+	}
+
+	/* io_uring_wait_cqes() may have left a timeout, reinit ring */
+	io_uring_queue_exit(&ring);
+	ret = io_uring_queue_init(8, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "ring setup failed\n");
+		return 1;
+	}
+
+	ret = test_update_nonexistent_timeout(&ring);
+	has_timeout_update = (ret != -EINVAL);
+	if (has_timeout_update) {
+		if (ret) {
+			fprintf(stderr, "test_update_nonexistent_timeout failed\n");
+			return ret;
+		}
+
+		ret = test_update_invalid_flags(&ring);
+		if (ret) {
+			fprintf(stderr, "test_update_invalid_flags failed\n");
+			return ret;
+		}
+
+		ret = test_update_timeout(&ring, 0, false, false, false);
+		if (ret) {
+			fprintf(stderr, "test_update_timeout failed\n");
+			return ret;
+		}
+
+		ret = test_update_timeout(&ring, 1, false, false, false);
+		if (ret) {
+			fprintf(stderr, "test_update_timeout 1ms failed\n");
+			return ret;
+		}
+
+		ret = test_update_timeout(&ring, 1000, false, false, false);
+		if (ret) {
+			fprintf(stderr, "test_update_timeout 1s failed\n");
+			return ret;
+		}
+
+		ret = test_update_timeout(&ring, 0, true, true, false);
+		if (ret) {
+			fprintf(stderr, "test_update_timeout abs failed\n");
+			return ret;
+		}
+
+
+		ret = test_update_timeout(&ring, 0, false, true, false);
+		if (ret) {
+			fprintf(stderr, "test_update_timeout async failed\n");
+			return ret;
+		}
+
+		ret = test_update_timeout(&ring, 0, false, false, true);
+		if (ret) {
+			fprintf(stderr, "test_update_timeout linked failed\n");
+			return ret;
+		}
+
+		if (sqpoll) {
+			ret = test_update_timeout(&sqpoll_ring, 0, false, false,
+						  false);
+			if (ret) {
+				fprintf(stderr, "test_update_timeout sqpoll"
+						"failed\n");
+				return ret;
+			}
+		}
+	}
+
+	/*
+	 * this test must go last, it kills the ring
+	 */
+	ret = test_single_timeout_exit(&ring);
+	if (ret) {
+		fprintf(stderr, "test_single_timeout_exit failed\n");
+		return ret;
+	}
+
+	if (sqpoll)
+		io_uring_queue_exit(&sqpoll_ring);
+	return 0;
+}
diff --git a/test/unlink.c b/test/unlink.c
new file mode 100644
index 0000000..f8c7639
--- /dev/null
+++ b/test/unlink.c
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Description: run various nop tests
+ *
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+
+#include "liburing.h"
+
+static int test_unlink(struct io_uring *ring, const char *old)
+{
+	struct io_uring_cqe *cqe;
+	struct io_uring_sqe *sqe;
+	int ret;
+
+	sqe = io_uring_get_sqe(ring);
+	if (!sqe) {
+		fprintf(stderr, "get sqe failed\n");
+		goto err;
+	}
+	io_uring_prep_unlinkat(sqe, AT_FDCWD, old, 0);
+	
+	ret = io_uring_submit(ring);
+	if (ret <= 0) {
+		fprintf(stderr, "sqe submit failed: %d\n", ret);
+		goto err;
+	}
+
+	ret = io_uring_wait_cqe(ring, &cqe);
+	if (ret < 0) {
+		fprintf(stderr, "wait completion %d\n", ret);
+		goto err;
+	}
+	ret = cqe->res;
+	io_uring_cqe_seen(ring, cqe);
+	return ret;
+err:
+	return 1;
+}
+
+static int stat_file(const char *buf)
+{
+	struct stat sb;
+
+	if (!stat(buf, &sb))
+		return 0;
+
+	return errno;
+}
+
+int main(int argc, char *argv[])
+{
+	struct io_uring ring;
+	char buf[32] = "./XXXXXX";
+	int ret;
+
+	if (argc > 1)
+		return 0;
+
+	ret = io_uring_queue_init(1, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "ring setup failed: %d\n", ret);
+		return 1;
+	}
+
+	ret = mkstemp(buf);
+	if (ret < 0) {
+		perror("mkstemp");
+		return 1;
+	}
+	close(ret);
+
+	if (stat_file(buf) != 0) {
+		perror("stat");
+		return 1;
+	}
+
+	ret = test_unlink(&ring, buf);
+	if (ret < 0) {
+		if (ret == -EBADF || ret == -EINVAL) {
+			fprintf(stdout, "Unlink not supported, skipping\n");
+			unlink(buf);
+			return 0;
+		}
+		fprintf(stderr, "rename: %s\n", strerror(-ret));
+		goto err;
+	} else if (ret)
+		goto err;
+
+	ret = stat_file(buf);
+	if (ret != ENOENT) {
+		fprintf(stderr, "stat got %s\n", strerror(ret));
+		return 1;
+	}
+
+	ret = test_unlink(&ring, "/3/2/3/1/z/y");
+	if (ret != -ENOENT) {
+		fprintf(stderr, "invalid unlink got %s\n", strerror(-ret));
+		return 1;
+	}
+
+	return 0;
+err:
+	unlink(buf);
+	return 1;
+}
diff --git a/test/wakeup-hang.c b/test/wakeup-hang.c
new file mode 100644
index 0000000..e43cb34
--- /dev/null
+++ b/test/wakeup-hang.c
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: MIT */
+#include <sys/eventfd.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <pthread.h>
+#include <liburing.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <sys/time.h>
+
+struct thread_data {
+	struct io_uring *ring;
+	int write_fd;
+};
+
+static void error_exit(char *message)
+{
+	perror(message);
+	exit(1);
+}
+
+static void *listener_thread(void *data)
+{
+	struct thread_data *td = data;
+	struct io_uring_cqe *cqe;
+	int ret;
+
+        ret = io_uring_wait_cqe(td->ring, &cqe);
+        if (ret < 0) {
+        	fprintf(stderr, "Error waiting for completion: %s\n",
+                	strerror(-ret));
+		goto err;
+        }
+	if (cqe->res < 0) {
+		fprintf(stderr, "Error in async operation: %s\n", strerror(-cqe->res));
+		goto err;
+        }
+	io_uring_cqe_seen(td->ring, cqe);
+	return NULL;
+err:
+	return (void *) 1;
+}
+
+static void *wakeup_io_uring(void *data)
+{
+	struct thread_data *td = data;
+	int res;
+
+	res = eventfd_write(td->write_fd, (eventfd_t) 1L);
+	if (res < 0) {
+		perror("eventfd_write");
+		return (void *) 1;
+	}
+	return NULL;
+}
+
+static int test_pipes(void)
+{
+	struct io_uring_sqe *sqe;
+	struct thread_data td;
+	struct io_uring ring;
+	pthread_t t1, t2;
+	int ret, fds[2];
+	void *pret;
+
+	if (pipe(fds) < 0)
+		error_exit("eventfd");
+
+	ret = io_uring_queue_init(8, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "Unable to setup io_uring: %s\n", strerror(-ret));
+		return 1;
+	}
+
+	td.write_fd = fds[1];
+	td.ring = &ring;
+
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_poll_add(sqe, fds[0], POLLIN);
+	sqe->user_data = 2;
+	ret = io_uring_submit(&ring);
+	if (ret != 1) {
+		fprintf(stderr, "ring_submit=%d\n", ret);
+		return 1;
+	}
+
+	pthread_create(&t1, NULL, listener_thread, &td);
+
+	sleep(1);
+
+	pthread_create(&t2, NULL, wakeup_io_uring, &td);
+	pthread_join(t1, &pret);
+
+	io_uring_queue_exit(&ring);
+	return pret != NULL;
+}
+
+static int test_eventfd(void)
+{
+	struct io_uring_sqe *sqe;
+	struct thread_data td;
+	struct io_uring ring;
+	pthread_t t1, t2;
+	int efd, ret;
+	void *pret;
+
+	efd = eventfd(0, 0);
+	if (efd < 0)
+		error_exit("eventfd");
+
+	ret = io_uring_queue_init(8, &ring, 0);
+	if (ret) {
+		fprintf(stderr, "Unable to setup io_uring: %s\n", strerror(-ret));
+		return 1;
+	}
+
+	td.write_fd = efd;
+	td.ring = &ring;
+
+	sqe = io_uring_get_sqe(&ring);
+	io_uring_prep_poll_add(sqe, efd, POLLIN);
+	sqe->user_data = 2;
+	ret = io_uring_submit(&ring);
+	if (ret != 1) {
+		fprintf(stderr, "ring_submit=%d\n", ret);
+		return 1;
+	}
+
+	pthread_create(&t1, NULL, listener_thread, &td);
+
+	sleep(1);
+
+	pthread_create(&t2, NULL, wakeup_io_uring, &td);
+	pthread_join(t1, &pret);
+
+	io_uring_queue_exit(&ring);
+	return pret != NULL;
+}
+
+int main(int argc, char *argv[])
+{
+	int ret;
+
+	if (argc > 1)
+		return 0;
+
+	ret = test_pipes();
+	if (ret) {
+		fprintf(stderr, "test_pipe failed\n");
+		return ret;
+	}
+
+	ret = test_eventfd();
+	if (ret) {
+		fprintf(stderr, "test_eventfd failed\n");
+		return ret;
+	}
+
+	return 0;
+}