aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/_posts
diff options
context:
space:
mode:
Diffstat (limited to '_posts')
-rw-r--r--_posts/2015-07-03-std-call-once-bug.md252
-rw-r--r--_posts/2017-01-07-building-boost.md265
-rw-r--r--_posts/2017-06-24-static-vs-inline-vs-unnamed-namespaces.md273
-rw-r--r--_posts/2018-02-18-peculiar-indentation.md101
-rw-r--r--_posts/2020-02-24-ssh-tunnel-windows.md153
-rw-r--r--_posts/2020-05-06-docker-bind-mounts.md227
-rw-r--r--_posts/2020-05-20-makefile-escaping.md439
-rw-r--r--_posts/2021-03-10-ubuntu-packaging.md320
-rw-r--r--_posts/2022-09-07-gdb-sleep-all.md33
-rw-r--r--_posts/2022-11-07-ptrace-sigtraps.md72
-rw-r--r--_posts/2022-11-07-ptrace-waitpid.md51
-rwxr-xr-x_posts/snippets/gdb_sleep_all/gdb_sleep_all.sh17
-rw-r--r--_posts/snippets/gdb_sleep_all/sleep.gdb2
-rw-r--r--_posts/snippets/static_vs_inline_vs_unnamed_namespaces/inline/shared.hpp6
-rw-r--r--_posts/snippets/static_vs_inline_vs_unnamed_namespaces/inline/weird/another.cpp11
-rw-r--r--_posts/snippets/static_vs_inline_vs_unnamed_namespaces/inline/weird/another.hpp3
-rw-r--r--_posts/snippets/static_vs_inline_vs_unnamed_namespaces/inline/weird/main.cpp13
-rw-r--r--_posts/snippets/static_vs_inline_vs_unnamed_namespaces/separate_method_definitions/another.cpp6
-rw-r--r--_posts/snippets/static_vs_inline_vs_unnamed_namespaces/separate_method_definitions/another.hpp3
-rw-r--r--_posts/snippets/static_vs_inline_vs_unnamed_namespaces/separate_method_definitions/main.cpp8
-rw-r--r--_posts/snippets/static_vs_inline_vs_unnamed_namespaces/separate_method_definitions/shared.hpp12
-rw-r--r--_posts/snippets/static_vs_inline_vs_unnamed_namespaces/static/main.cpp10
-rw-r--r--_posts/snippets/static_vs_inline_vs_unnamed_namespaces/static/proxy.cpp6
-rw-r--r--_posts/snippets/static_vs_inline_vs_unnamed_namespaces/static/proxy.hpp3
-rw-r--r--_posts/snippets/static_vs_inline_vs_unnamed_namespaces/static/shared.hpp6
-rw-r--r--_posts/snippets/static_vs_inline_vs_unnamed_namespaces/static_and_inline/main.cpp10
-rw-r--r--_posts/snippets/static_vs_inline_vs_unnamed_namespaces/static_and_inline/proxy.cpp6
-rw-r--r--_posts/snippets/static_vs_inline_vs_unnamed_namespaces/static_and_inline/proxy.hpp3
-rw-r--r--_posts/snippets/static_vs_inline_vs_unnamed_namespaces/static_and_inline/shared.hpp6
-rw-r--r--_posts/snippets/static_vs_inline_vs_unnamed_namespaces/unnamed_namespace_and_inline/main.cpp10
-rw-r--r--_posts/snippets/static_vs_inline_vs_unnamed_namespaces/unnamed_namespace_and_inline/proxy.cpp6
-rw-r--r--_posts/snippets/static_vs_inline_vs_unnamed_namespaces/unnamed_namespace_and_inline/proxy.hpp3
-rw-r--r--_posts/snippets/static_vs_inline_vs_unnamed_namespaces/unnamed_namespace_and_inline/shared.hpp10
-rw-r--r--_posts/snippets/static_vs_inline_vs_unnamed_namespaces/unnamed_namespaces/ok/another.cpp19
-rw-r--r--_posts/snippets/static_vs_inline_vs_unnamed_namespaces/unnamed_namespaces/ok/another.hpp3
-rw-r--r--_posts/snippets/static_vs_inline_vs_unnamed_namespaces/unnamed_namespaces/ok/main.cpp22
-rw-r--r--_posts/snippets/static_vs_inline_vs_unnamed_namespaces/unnamed_namespaces/weird/another.cpp15
-rw-r--r--_posts/snippets/static_vs_inline_vs_unnamed_namespaces/unnamed_namespaces/weird/another.hpp3
-rw-r--r--_posts/snippets/static_vs_inline_vs_unnamed_namespaces/unnamed_namespaces/weird/main.cpp18
-rw-r--r--_posts/snippets/ubuntu_packaging/basic/changelog5
-rw-r--r--_posts/snippets/ubuntu_packaging/basic/control13
-rw-r--r--_posts/snippets/ubuntu_packaging/basic/copyright31
-rw-r--r--_posts/snippets/ubuntu_packaging/basic/rules3
-rw-r--r--_posts/snippets/ubuntu_packaging/gbp/gbp.conf5
-rw-r--r--_posts/snippets/ubuntu_packaging/install/test.install1
45 files changed, 2484 insertions, 0 deletions
diff --git a/_posts/2015-07-03-std-call-once-bug.md b/_posts/2015-07-03-std-call-once-bug.md
new file mode 100644
index 0000000..8191ad3
--- /dev/null
+++ b/_posts/2015-07-03-std-call-once-bug.md
@@ -0,0 +1,252 @@
+---
+title: std::call_once bug in Visual C++ 2012/2013
+category: C++
+---
+I've recently come across a nasty standard library bug in the implementation
+shipped with Microsoft Visual Studio 2012/2013.
+[StackOverflow was of no help], so I had to somehow report the bug to the
+maintainers.
+Oddly enough, Visual Studio's [Connect page] wouldn't let me report one,
+complaining about the lack of permissions, even though I was logged in from my
+work account, associated with my Visual Studio 2013 installation.
+
+Fortunately, I've come across the personal website of this amazing guy,
+[Stephan T. Lavavej], who appears to be the chief maintainer of Microsoft's
+standard library implementation.
+He seems to be your go-to guy when it comes to obvious standard library
+misbehaviours.
+
+[StackOverflow was of no help]: https://stackoverflow.com/questions/26477070/concurrent-stdcall-once-calls
+[Connect page]: https://connect.microsoft.com/VisualStudio
+[Stephan T. Lavavej]: http://nuwen.net/stl.html
+
+C++11 and singletons
+--------------------
+
+Anyway, the story begins with me trying to implement the singleton pattern
+using C++11 facilities like this:
+
+```c++
+#include <mutex>
+
+template <typename Derived>
+class Singleton {
+public:
+ static Derived& get_instance() {
+ std::call_once(initialized_flag, &initialize_instance);
+ return Derived::get_instance_unsafe();
+ }
+
+protected:
+ Singleton() = default;
+ ~Singleton() = default;
+
+ static Derived& get_instance_unsafe() {
+ static Derived instance;
+ return instance;
+ }
+
+private:
+ static void initialize_instance() {
+ Derived::get_instance_unsafe();
+ }
+
+ static std::once_flag initialized_flag;
+
+ Singleton(const Singleton&) = delete;
+ Singleton& operator=(const Singleton&) = delete;
+};
+
+template <typename Derived>
+std::once_flag Singleton<Derived>::initialized_flag;
+```
+
+Neat, huh?
+Now other classes can inherit from `Singleton`, implementing the singleton
+pattern effortlessly:
+
+```c++
+class Logger : public Singleton<Logger> {
+private:
+ Logger() = default;
+ ~Logger() = default;
+
+ friend class Singleton<Logger>;
+};
+```
+
+Note that the [N2660] standard proposal isn't/wasn't implemented in the
+compilers shipped with Visual Studio 2012/2013.
+If it was, I wouldn't, of course, need to employ this `std::call_once`
+trickery, and the implementation would be much simpler, i.e. something like
+this:
+
+```c++
+class Logger {
+public:
+ static Logger& get_instance() {
+ static Logger instance;
+ return instance;
+ }
+
+private:
+ Logger() = default;
+ ~Logger() = default;
+};
+```
+
+<div class="alert alert-info" markdown="1">
+
+The point is that the `Logger::get_instance` routine above wasn't thread-safe
+until C++11.
+Imagine what might happen if `Logger`'s constructor takes some time to
+initialize the instance.
+If a couple of threads then call `get_instance`, the first thread might begin
+the initialization process, making the other thread believe that the instance
+had already been intialized.
+This other thread might then return a reference to the instance which hasn't
+yet completed its initialization and is most likely unsafe to use.
+
+Since C++11 includes the proposal mentioned above, this routine would indeed be
+thread-safe in C++11.
+Unfortunately, the compilers shipped with Visual Studio 2012/2013 don't/didn't
+implement this particular proposal, which caused me to look at
+`std::call_once`, which seemed to implement exactly what I needed.
+
+</div>
+
+[N2660]: http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2660.htm
+
+Problem
+-------
+
+Unfortunately, matters became a bit more complicated when I tried to introduce
+two singletons, one having a dependency on the other.
+I had `Logger`, like in the example above, and some kind of a "master"
+singleton (let's call it `Duke`).
+`Duke`'s constructor was complicated and time-consuming, and definitely
+required some logging to be done.
+I thought that I could simply call `Logger::get_instance` inside `Duke`'s
+constructor, and everything looked fine at first glance.
+
+```c++
+#include <chrono>
+#include <thread>
+
+class Logger : public Singleton<Logger> {
+public:
+ Logger& operator<<(const char* msg) {
+ // Actual logging is stripped for brevity.
+ return *this;
+ }
+
+private:
+ Logger() {
+ // Opening log files, etc.
+ std::this_thread::sleep_for(std::chrono::seconds{3});
+ }
+
+ ~Logger() = default;
+
+ friend class Singleton<Logger>;
+};
+
+class Duke : public Singleton<Duke> {
+private:
+ Duke() {
+ Logger::get_instance() << "started Duke's initialization";
+ // It's a lot of work to be done.
+ std::this_thread::sleep_for(std::chrono::seconds{10});
+ Logger::get_instance() << "finishing Duke's initialization";
+ }
+
+ ~Duke() = default;
+
+ friend class Singleton<Duke>;
+};
+```
+
+Now, what happens if I have two threads, one using the `Duke` instance, and the
+other logging something?
+Like in this example:
+
+```c++
+#include <thread>
+
+namespace {
+
+void get_logger() {
+ entered(__FUNCTION__);
+ Logger::get_instance();
+ exiting(__FUNCTION__);
+}
+
+void get_duke() {
+ entered(__FUNCTION__);
+ Duke::get_instance();
+ exiting(__FUNCTION__);
+}
+
+}
+
+int main() {
+ std::thread t1{&get_duke};
+ std::thread t2{&get_logger};
+ t1.join();
+ t2.join();
+ return 0;
+}
+```
+
+`entered` and `exiting` are utility functions to print timestamps.
+The implementation is included in the [complete code sample].
+{: .alert .alert-info }
+
+The first thread is supposed to have the total running time of about 13
+seconds, right?
+Three seconds to initialize the `Logger` instance and ten to initialize the
+`Duke` instance.
+The second thread, similarly, is supposed to be done in about 3 seconds
+required for the initialization of `Logger`.
+
+Weirdly, this program produces the following output when compiled using Visual
+Studio 2013's compiler:
+
+ Entered `anonymous-namespace'::get_duke at Fri Jul 03 02:26:16 2015
+ Entered `anonymous-namespace'::get_logger at Fri Jul 03 02:26:16 2015
+ Exiting `anonymous-namespace'::get_duke at Fri Jul 03 02:26:29 2015
+ Exiting `anonymous-namespace'::get_logger at Fri Jul 03 02:26:29 2015
+
+Isn't it wrong that the second thread actually took the same 13 seconds as the
+first thread?
+Better check with some other compiler in case it was me who made a mistake.
+Unfortunately, the program behaves as expected when compiled using GCC:
+
+ Entered get_logger at Fri Jul 3 02:27:12 2015
+ Entered get_duke at Fri Jul 3 02:27:12 2015
+ Exiting get_logger at Fri Jul 3 02:27:15 2015
+ Exiting get_duke at Fri Jul 3 02:27:25 2015
+
+So it appears that the implementation of `std::call_once` shipped with Visual
+Studio 2012/2013 relies on some kind of a global lock, which causes even the
+simple example above to misbehave.
+
+The [complete code sample] to demonstrate the misbehaviour described above can
+be found in this blog's repository.
+
+[complete code sample]: {{ site.github.repository_url }}/tree/master/std_call_once_bug
+
+Conclusion
+----------
+
+So, since I couldn't submit the bug via Visual Studio's [Connect page], I wrote
+to Mr. Lavavej directly, not hoping for an answer.
+Amazingly, it took him less than a day to reply.
+He told me he was planning to overhaul `std::call_once` for Visual Studio 2015.
+Meanwhile, I had to stick to something else; I think I either dropped logging
+from `Duke`'s constructor or initialized all the singleton instances manually
+before actually using any of them.
+In a few months, Mr. Lavavej replied to me that the bug has been fixed in
+Visual Studio 2015 RTM.
+I would like to thank him for the professionalism and responsibility he's
+shown.
diff --git a/_posts/2017-01-07-building-boost.md b/_posts/2017-01-07-building-boost.md
new file mode 100644
index 0000000..cddba13
--- /dev/null
+++ b/_posts/2017-01-07-building-boost.md
@@ -0,0 +1,265 @@
+---
+title: Building Boost on Windows
+category: C++
+---
+Below you can find the steps required to build Boost libraries on Windows.
+These steps tightly fit my typical workflow, which is to use Boost libraries in
+CMake builds using either Visual Studio or the combination of Cygwin +
+MinGW-w64.
+I would expect, however, that the procedure for the latter toolset can easily
+be adjusted for generic GCC distributions (including vanilla GCCs found in
+popular Linux distributions).
+
+One of the features of this workflow is that I build throwaway, "run
+everywhere, record the results, and scrap it" executables more often than not,
+so I prefer to link everything statically, including, for instance, C/C++
+runtimes.
+This is implemented by passing `runtime-link=static` to Boost's build utility
+`b2`; change this to `runtime-link=dynamic` to link the runtime dynamically.
+
+Excerpts from shell sessions in this post feature a few different commands
+besides Boost's `b2` and `cmake`, like `cd` and `cat`.
+They are used to hint at my personal directory layout, display various
+auxiliary files, etc.
+Windows' `cd`, for example, simply prints the current working directory;
+Cygwin's `pwd` serves the same purpose.
+`cat` is used to display files.
+
+Visual Studio
+-------------
+
+Statically-linked Boost libraries are built, both the debug and the release
+versions of them (these are default settings).
+While it is required to keep x86 and x64 libraries in different directories (to
+avoid file name clashes), it's not necessary to separate debug libraries from
+their release counterparts, because that information is actually encoded in
+file names (the "gd" suffix).
+
+### x86
+
+{% capture out1 %}
+D:\workspace\third-party\boost_1_61_0\msvc
+{% endcapture %}
+
+{% capture cmd3 %}
+b2 --stagedir=stage\x86 ^
+ runtime-link=static ^
+ --with-filesystem ^
+ --with-program_options ^
+ ...
+{% endcapture %}
+
+{% include jekyll-theme/shell.html cmd='cd' out=out1 %}
+{% include jekyll-theme/shell.html cmd='bootstrap' %}
+{% include jekyll-theme/shell.html cmd=cmd3 %}
+
+### x64
+
+The only important difference is that you have to pass `address-model=64` to
+`b2` (notice also the different "staging" directory).
+
+{% capture out1 %}
+D:\workspace\third-party\boost_1_61_0\msvc
+{% endcapture %}
+
+{% capture cmd3 %}
+b2 --stagedir=stage\x64 ^
+ runtime-link=static ^
+ address-model=64 ^
+ --with-filesystem ^
+ --with-program_options ^
+ ...
+{% endcapture %}
+
+{% include jekyll-theme/shell.html cmd='cd' out=out1 %}
+{% include jekyll-theme/shell.html cmd='bootstrap' %}
+{% include jekyll-theme/shell.html cmd=cmd3 %}
+
+Cygwin + MinGW-w64
+------------------
+
+Contrary to the Visual Studio example above, it is required to store debug and
+release libraries *as well as* x86 and x64 libraries in different directories.
+It is required to avoid file name clashes; unlike the Visual Studio "toolset"
+(in Boost's terms), GCC-derived toolsets don't encode any information (like
+whether the debug or the release version of a library was built) in file names.
+
+Also, linking the runtime statically doesn't really make sense for MinGW, as it
+always links to msvcrt.dll, which is [simply the Visual Studio 6.0 runtime].
+
+[simply the Visual Studio 6.0 runtime]: https://sourceforge.net/p/mingw-w64/wiki2/The%20case%20against%20msvcrt.dll/
+
+In the examples below, only the debug versions of the libraries are built.
+Build the release versions by executing the same command, and substituting
+`variant=release` instead of `variant=debug` and either
+`--stagedir=stage/x86/release` or `--stagedir=stage/x64/release`, depending
+on the target architecture.
+
+### x86
+
+{% capture out1 %}
+/cygdrive/d/workspace/third-party/boost_1_61_0/mingw
+{% endcapture %}
+
+{% capture out3 %}
+using gcc : : i686-w64-mingw32-g++ ;
+{% endcapture %}
+
+{% capture cmd4 %}
+./b2 toolset=gcc-mingw \
+ target-os=windows \
+ link=static \
+ variant=debug \
+ --stagedir=stage/x86/debug \
+ --user-config=user-config-x86.jam \
+ --with-filesystem \
+ --with-program_options \
+ ...
+{% endcapture %}
+
+{% include jekyll-theme/shell.html cmd='pwd' out=out1 %}
+{% include jekyll-theme/shell.html cmd='./bootstrap.sh' %}
+{% include jekyll-theme/shell.html cmd='cat user-config-x86.jam' out=out3 %}
+{% include jekyll-theme/shell.html cmd=cmd4 %}
+
+The "user" configuration file above stopped working at some point; not sure as
+to who's to blame, Cygwin or Boost.
+If you see something like "`error: provided command 'i686-w64-mingw32-g++' not
+found`", add ".exe" to the binary name above, so that the whole file reads
+"`using gcc : : i686-w64-mingw32-g++.exe ;`".
+{: .alert .alert-info }
+
+### x64
+
+Notice the two major differences from the x86 example:
+
+* the addition of `address-model=64` (as in the example for Visual Studio),
+* the different "user" configuration file, pointing to `x86_64-w64-mingw32-g++`
+instead of `i686-w64-mingw32-g++`.
+
+Again, as in the example for Visual Studio, a different "staging" directory
+needs to be specified using the `--stagedir` parameter.
+
+{% capture out1 %}
+/cygdrive/d/workspace/third-party/boost_1_61_0/mingw
+{% endcapture %}
+
+{% capture out3 %}
+using gcc : : x86_64-w64-mingw32-g++ ;
+{% endcapture %}
+
+{% capture cmd4 %}
+./b2 toolset=gcc-mingw \
+ address-model=64 \
+ target-os=windows \
+ link=static \
+ variant=debug \
+ --stagedir=stage/x64/debug \
+ --user-config=user-config-x64.jam \
+ --with-filesystem \
+ --with-program_options \
+ ...
+{% endcapture %}
+
+{% include jekyll-theme/shell.html cmd='pwd' out=out1 %}
+{% include jekyll-theme/shell.html cmd='./bootstrap.sh' %}
+{% include jekyll-theme/shell.html cmd='cat user-config-x64.jam' out=out3 %}
+{% include jekyll-theme/shell.html cmd=cmd4 %}
+
+The "user" configuration file above stopped working at some point; not sure as
+to who's to blame, Cygwin or Boost.
+If you see something like "`error: provided command 'x86_64-w64-mingw32-g++'
+not found`", add ".exe" to the binary name above, so that the whole file reads
+"`using gcc : : x86_64-w64-mingw32-g++.exe ;`".
+{: .alert .alert-info }
+
+Usage in CMake
+--------------
+
+### Visual Studio
+
+Examples below apply to Visual Studio 2015.
+You may want to adjust the paths.
+
+#### x86
+
+{% capture out1 %}
+D:\workspace\build\test_project\msvc\x86
+{% endcapture %}
+
+{% capture cmd2 %}
+cmake -G "Visual Studio 14 2015" ^
+ -D BOOST_ROOT=D:\workspace\third-party\boost_1_61_0\msvc ^
+ -D BOOST_LIBRARYDIR=D:\workspace\third-party\boost_1_61_0\msvc\stage\x86\lib ^
+ -D Boost_USE_STATIC_LIBS=ON ^
+ -D Boost_USE_STATIC_RUNTIME=ON ^
+ ...
+{% endcapture %}
+
+{% include jekyll-theme/shell.html cmd='cd' out=out1 %}
+{% include jekyll-theme/shell.html cmd=cmd2 %}
+
+#### x64
+
+{% capture out1 %}
+D:\workspace\build\test_project\msvc\x64
+{% endcapture %}
+
+{% capture cmd2 %}
+cmake -G "Visual Studio 14 2015 Win64" ^
+ -D BOOST_ROOT=D:\workspace\third-party\boost_1_61_0\msvc ^
+ -D BOOST_LIBRARYDIR=D:\workspace\third-party\boost_1_61_0\msvc\stage\x64\lib ^
+ -D Boost_USE_STATIC_LIBS=ON ^
+ -D Boost_USE_STATIC_RUNTIME=ON ^
+ ...
+{% endcapture %}
+
+{% include jekyll-theme/shell.html cmd='cd' out=out1 %}
+{% include jekyll-theme/shell.html cmd=cmd2 %}
+
+### Cygwin & MinGW-w64
+
+Examples below only apply to debug CMake builds.
+Notice that, contrary to the Visual Studio examples above, debug and release
+builds must be kept in separate directories.
+You may also want to adjust the paths.
+
+#### x86
+
+{% capture out1 %}
+/cygdrive/d/workspace/build/test_project/mingw/x86/debug
+{% endcapture %}
+
+{% capture cmd2 %}
+cmake -G "Unix Makefiles" \
+ -D CMAKE_BUILD_TYPE=Debug \
+ -D CMAKE_C_COMPILER=i686-w64-mingw32-gcc \
+ -D CMAKE_CXX_COMPILER=i686-w64-mingw32-g++ \
+ -D BOOST_ROOT=/cygdrive/d/workspace/third-party/boost_1_61_0/mingw \
+ -D BOOST_LIBRARYDIR=/cygdrive/d/workspace/third-party/boost_1_61_0/mingw/stage/x86/debug/lib \
+ -D Boost_USE_STATIC_LIBS=ON \
+ ...
+{% endcapture %}
+
+{% include jekyll-theme/shell.html cmd='pwd' out=out1 %}
+{% include jekyll-theme/shell.html cmd=cmd2 %}
+
+#### x64
+
+{% capture out1 %}
+/cygdrive/d/workspace/build/test_project/mingw/x64/debug
+{% endcapture %}
+
+{% capture cmd2 %}
+cmake -G "Unix Makefiles" \
+ -D CMAKE_BUILD_TYPE=Debug \
+ -D CMAKE_C_COMPILER=x86_64-w64-mingw32-gcc \
+ -D CMAKE_CXX_COMPILER=x86_64-w64-mingw32-g++ \
+ -D BOOST_ROOT=/cygdrive/d/workspace/third-party/boost_1_61_0/mingw \
+ -D BOOST_LIBRARYDIR=/cygdrive/d/workspace/third-party/boost_1_61_0/mingw/stage/x64/debug/lib \
+ -D Boost_USE_STATIC_LIBS=ON \
+ ...
+{% endcapture %}
+
+{% include jekyll-theme/shell.html cmd='pwd' out=out1 %}
+{% include jekyll-theme/shell.html cmd=cmd2 %}
diff --git a/_posts/2017-06-24-static-vs-inline-vs-unnamed-namespaces.md b/_posts/2017-06-24-static-vs-inline-vs-unnamed-namespaces.md
new file mode 100644
index 0000000..cb12867
--- /dev/null
+++ b/_posts/2017-06-24-static-vs-inline-vs-unnamed-namespaces.md
@@ -0,0 +1,273 @@
+---
+title: static vs. inline vs. namespace&nbsp;{
+category: C++
+snippets_root_directory: snippets/static_vs_inline_vs_unnamed_namespaces
+snippets_language: c++
+snippets:
+ static:
+ - static/main.cpp
+ - static/proxy.cpp
+ - static/proxy.hpp
+ - static/shared.hpp
+ inline:
+ - inline/shared.hpp
+ inline_weird:
+ - inline/weird/main.cpp
+ - inline/weird/another.cpp
+ - inline/weird/another.hpp
+ unnamed_namespaces_weird:
+ - unnamed_namespaces/weird/main.cpp
+ - unnamed_namespaces/weird/another.cpp
+ - unnamed_namespaces/weird/another.hpp
+ unnamed_namespaces_ok:
+ - unnamed_namespaces/ok/main.cpp
+ - unnamed_namespaces/ok/another.cpp
+ - unnamed_namespaces/ok/another.hpp
+ static_and_inline:
+ - static_and_inline/main.cpp
+ - static_and_inline/proxy.cpp
+ - static_and_inline/proxy.hpp
+ - static_and_inline/shared.hpp
+ unnamed_namespace_and_inline:
+ - unnamed_namespace_and_inline/main.cpp
+ - unnamed_namespace_and_inline/proxy.cpp
+ - unnamed_namespace_and_inline/proxy.hpp
+ - unnamed_namespace_and_inline/shared.hpp
+ separate_method_definitions:
+ - separate_method_definitions/main.cpp
+ - separate_method_definitions/another.cpp
+ - separate_method_definitions/another.hpp
+ - separate_method_definitions/shared.hpp
+---
+In this post I'll try to figure out whether I should use `static`, `inline` or
+unnamed namespaces for function definitions.
+
+TL;DR
+-----
+
+Here's my attempt to build an algorithm to decide whether a class/function
+should be defined with either of the `static`/`inline` specifiers or put into
+an unnamed namespace.
+The first question I answer is: is the entity defined in a header file or in a
+.cpp file?
+
+* **In a header** &mdash; Is it a class or a function?
+ * **Class** &mdash; There's no need to do anything.
+ * **Function** &mdash; Do you want it to behave differently for each
+translation unit (may be useful, for example, for logging)?
+ * **Yes** &mdash; Use `static`.
+ * **No** &mdash; Use `inline`.
+* **In a .cpp file** &mdash; Put it into an unnamed namespace.
+
+`static`
+--------
+
+It's an old C-style method of defining functions in header files.
+This way, every translation unit gets its own copy of a function.
+What does that mean?
+The most obvious implication that pops into my head is that every local static
+variable defined inside that function gets an independent replica in every
+translation unit.
+For example, the program below would print
+
+```
+1
+1
+```
+
+due to the fact that both main.cpp and proxy.cpp get their own versions of `n`
+from `shared()`.
+
+{% include jekyll-theme/snippets/section.html section_id='static' %}
+
+In C, this is the only way to share function definitions between translation
+units (apart from the usual way of declaring a function in a header file and
+putting its definition to a .c file).
+
+### Properties
+
+* Using `static`, you can share function definitions between multiple
+translation units.
+* Each unit gets its own replica of the function: they have different
+addresses, their local static variables are independent, etc.
+* If different translation units define different functions with the same
+name using the `static` specifier, each unit can use its function without any
+issues.
+This might seem like an trivial claim, but other approaches sometimes disallow
+this, which is discussed below.
+
+`inline`
+--------
+
+It's well-known that this keyword has pretty much nothing to do with whether a
+function will actually be inlined or not.
+It's used much more often to define functions in header files, since every
+function defined this way will be the same (as in "will have the same address")
+in every translation unit.
+Let's try and adjust the definition of `shared()` accordingly:
+
+{% include jekyll-theme/snippets/section.html section_id='inline' %}
+
+The same program would then print
+
+```
+1
+2
+```
+
+since both `main()` and `proxy()` would call the same `shared()`, incrementing
+the same `n`.
+
+Weird things happen when different translation units define different `inline`
+functions with the same name.
+
+{% include jekyll-theme/snippets/section.html section_id='inline_weird' %}
+
+According to my simple experiments, this program produces different output
+based on which .cpp file was specified first on the command line during
+compilation.
+For example, this is the output of test.exe produced with either `cl /W4 /EHsc
+main.cpp another.cpp /Fe:test.exe` or `g++ -Wall -Wextra -std=c++11 main.cpp
+another.cpp -o test.exe`.
+
+```
+main.cpp: shared()
+main.cpp: shared()
+```
+
+If we swap the order of .cpp files (`another.cpp main.cpp` instead of `main.cpp
+another.cpp`), the output becomes
+
+```
+another.cpp: shared()
+another.cpp: shared()
+```
+
+No warnings/errors are emitted, making the situation truly disturbing.
+I tested this with GNU compiler version 5.4.0 and Microsoft compiler version
+19.00.24210.
+
+This behavior can be easily fixed either by making these functions `static` or
+by using unnamed namespaces (see below).
+
+### Properties
+
+* Using `inline`, you can share function definitions between multiple
+translation units.
+* Each translation unit will use the same function: it will have the same
+address in every translation unit, its local static variables will be shared,
+etc.
+* Defining different `inline` functions with the same name in different
+translation units is undefined behavior.
+
+Two inline functions might be different even if they are the same textually.
+For example, they might reference two global variables which have the same
+name, but are defined in different translation units.
+{: .alert .alert-info }
+
+`namespace {`
+-------------
+
+With respect to function definitions, unnamed namespaces are, according to my
+understanding, quite similar to the `static` keyword.
+The additional value they provide is that they provide a way to apply `static`
+not only to functions, but to classes also.
+Remember the weirdness that happens when multiple translation units define
+different `inline` functions with the same name?
+Arguably, it gets even worse if we add classes to the equation.
+
+{% include jekyll-theme/snippets/section.html section_id='unnamed_namespaces_weird' %}
+
+Compiling this program the same way we did in the `inline` example (`cl /W4
+/EHsc main.cpp another.cpp /Fe:test.exe`/`g++ -Wall -Wextra -std=c++11 main.cpp
+another.cpp -o test.exe`) yields different outputs depending on which .cpp file
+was specified first.
+
+```
+main.cpp: Test::Test()
+1
+main.cpp: Test::Test()
+```
+
+```
+another.cpp: Test::Test()
+1065353216
+another.cpp: Test::Test()
+```
+
+I'm not sure why anybody would want that.
+This can be easily fixed by putting both `Test` classes into unnamed
+namespaces.
+The program than reads
+
+{% include jekyll-theme/snippets/section.html section_id='unnamed_namespaces_ok' %}
+
+After the adjustment, it produces the same output regardless of compilation
+options.
+
+```
+main.cpp: Test::Test()
+1
+another.cpp: Test::Test()
+```
+
+Notice how sharing classes defined in header files isn't discussed here.
+The standard actually guarantees that if a class is defined in a header file,
+all translation units that use it share the definition.
+
+### Properties
+
+* Essentially, unnamed namespaces allow the `static` keyword to be applied to
+classes.
+* Similar to the `static` approach, each translation unit gets its own replica
+of a function/class, including their own local static variables, etc.
+* Defining different classes with the same name in different translation units
+(without utilizing unnamed namespaces) is undefined behavior.
+
+Tricky cases
+------------
+
+### `static` + `inline`
+
+In case a function is defined as `static inline`, `static` wins, and `inline`
+is ignored.
+The program below outputs
+
+```
+1
+1
+```
+
+{% include jekyll-theme/snippets/section.html section_id='static_and_inline' %}
+
+In general, I can't think of a reason to define a `static inline` function.
+
+### `namespace {` + `inline`
+
+If an `inline` function is defined in an unnamed namespace, the unnamed
+namespace wins.
+The program below outputs
+
+```
+1
+1
+```
+
+{% include jekyll-theme/snippets/section.html section_id='unnamed_namespace_and_inline' %}
+
+In general, I can't think of a reason to define an `inline` function in an
+unnamed namespace.
+
+### Separate method definitions
+
+If you want to separate your class declaration from its method definitions
+while keeping them in the same header file, each method must be explicitly
+defined `inline`.
+The program below outputs
+
+```
+1
+2
+```
+
+{% include jekyll-theme/snippets/section.html section_id='separate_method_definitions' %}
diff --git a/_posts/2018-02-18-peculiar-indentation.md b/_posts/2018-02-18-peculiar-indentation.md
new file mode 100644
index 0000000..c477382
--- /dev/null
+++ b/_posts/2018-02-18-peculiar-indentation.md
@@ -0,0 +1,101 @@
+---
+title: Peculiar Haskell indentation
+category: Haskell
+---
+I've fallen into a Haskell indentation pitfall.
+I think it must be common, so I'm describing it here.
+
+The problem is that indentation rules in `do` blocks are not intuitive to me.
+For example, the following function is valid Haskell syntax:
+
+```haskell
+foo1 :: IO ()
+foo1 =
+ alloca $ \a ->
+ alloca $ \b ->
+ alloca $ \c -> do
+ poke a (1 :: Int)
+ poke b (1 :: Int)
+ poke c (1 :: Int)
+ return ()
+```
+
+In fact, this funnier version is also OK:
+
+```haskell
+foo2 :: IO ()
+foo2 = alloca $ \a ->
+ alloca $ \b ->
+ alloca $ \c -> do
+ poke a (1 :: Int)
+ poke b (1 :: Int)
+ poke c (1 :: Int)
+ return ()
+```
+
+If you add an outer `do` however, things become a little more complicated.
+For example, this is the valid version of the functions above with an outer
+`do`:
+
+```haskell
+foo3 :: IO ()
+foo3 = do
+ alloca $ \a ->
+ alloca $ \b ->
+ alloca $ \c -> do
+ poke a (1 :: Int)
+ poke b (1 :: Int)
+ poke c (1 :: Int)
+ return ()
+```
+
+Notice the extra indentation for each of the `alloca`s.
+When I tried to remove these seemingly excessive indents, GHC complained with
+the usual `parse error (possibly incorrect indentation or mismatched
+brackets)`.
+
+```haskell
+foo4 :: IO ()
+foo4 = do
+ alloca $ \a ->
+ alloca $ \b ->
+ alloca $ \c -> do
+ poke a (1 :: Int)
+ poke b (1 :: Int)
+ poke c (1 :: Int)
+ return ()
+```
+
+The truth is, the rules for desugaring `do` blocks are surprisingly simple and
+literal.
+GHC inserts semicolons according to the rules [found in the Wikibook].
+So it inserts semicolons between the `alloca`s on the same level, so `foo4`
+becomes:
+
+```haskell
+foo4 :: IO ()
+foo4 = do
+ { alloca $ \a ->
+ ; alloca $ \b ->
+ ; alloca $ \c -> do
+ { poke a (1 :: Int)
+ ; poke b (1 :: Int)
+ ; poke c (1 :: Int)
+ ; return ()
+ }
+ }
+```
+
+[found in the Wikibook]: https://en.wikibooks.org/wiki/Haskell/Indentation#Explicit_characters_in_place_of_indentation
+
+The semicolons after `->` are clearly invalid Haskell syntax, hence the error.
+
+P.S. To compile the functions above, you need to include them in a module and
+add proper imports, e.g.
+
+```haskell
+module PeculiarIndentation where
+
+import Foreign.Marshal.Alloc (alloca)
+import Foreign.Storable (poke)
+```
diff --git a/_posts/2020-02-24-ssh-tunnel-windows.md b/_posts/2020-02-24-ssh-tunnel-windows.md
new file mode 100644
index 0000000..fdbc134
--- /dev/null
+++ b/_posts/2020-02-24-ssh-tunnel-windows.md
@@ -0,0 +1,153 @@
+---
+title: Persistent SSH tunnel
+---
+SSH tunneling is awesome.
+For some reason, I've only recently learned about this feature, but I've been
+immediately blown away by how useful it can be.
+
+Basically, to use SSH tunneling (a.k.a. port forwarding) you need to have a SSH
+client (`ssh`) with an access to a SSH server.
+You can then access any host your SSH server has access to.
+It works like this:
+
+* the client establishes a connection to the SSH server,
+* the client asks the server to forward incoming requests to the destination
+host,
+* the client listens on a proxy port on the local machine, and forwards
+requests to the SSH server.
+
+Say, you have access to SSH server `gateway` on port 22, and you want to gain
+access to HTTPS server `dest` on port 443, which is only accessible from the
+the SSH server.
+You can then run something like
+
+{% include jekyll-theme/shell.html cmd='ssh -L 4433:dest:443 gateway -p 22' %}
+
+And now you can access `dest` at `https://localhost:4433/`.
+That's brilliant, really.
+
+But there's more.
+You can make a _reverse_ tunnel, allowing you to give access to any host your
+client computer has access to, via a remote SSH server.
+It works like this:
+
+* your SSH client establishes a connection to the SSH server,
+* the client asks the server to listen on a port of your choosing and forward
+incoming requests to the client,
+* the client forwards incoming requests to the destination host.
+
+This, as I've recently learned, is a common pattern to subvert corporate
+firewalls, which frequently forbid incoming connections.
+Say, you want to access your work computer from home via RDP.
+Both your home and your work computers have access to a SSH server `gateway` on
+port 22 (you might want to change it to port 80 or 443 if your outside
+connections are filtered).
+
+You can then run something like (notice the `-R`)
+
+{% include jekyll-theme/shell.html cmd='ssh -R 13389:127.0.0.1:3389 gateway -p 22' %}
+
+and now you can connect to `gateway:13389` from your home computer using a RDP
+client.
+Even more brilliant!
+
+You might need to set the `GatewayPorts` setting to `yes` or `clientspecified`
+on your SSH server (typically in "/etc/ssh/sshd_config").
+
+Batch mode
+----------
+
+If you want to establish a reverse SSH tunnel automatically, some tweaking is
+required.
+First, set some SSH client options:
+
+* `-F /dev/null` to disregard the user config,
+* `-oBatchMode=yes` to run non-interactively,
+* `-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null` to disable server
+verification (optional),
+* `-oExitOnForwardFailure=yes` to exit if port forwarding fails,
+* `-oServerAliveCountMax=3 -oServerAliveInterval=15` to break the connection if
+the server or the network is down,
+* `-N -n -T` to only forward the ports and not execute the shell or any
+additional commands.
+
+Thus, the full command would be something like
+
+{% capture cmd1 %}
+ssh \
+ -F /dev/null \
+ -oBatchMode=yes \
+ -oStrictHostKeyChecking=no \
+ -oUserKnownHostsFile=/dev/null \
+ -oExitOnForwardFailure=yes \
+ -oServerAliveCountMax=3 \
+ -oServerAliveInterval=15 \
+ -N -n -T \
+ -R 13389:127.0.0.1:3389 \
+ user@gateway -p 22 \
+ -i ~/.ssh/tunnel
+{% endcapture %}
+
+{% include jekyll-theme/shell.html cmd=cmd1 %}
+
+Adjust the `user@gateway -p 22` part accordingly.
+
+Notice also `-i ~/.ssh/tunnel`.
+It's the path to the SSH key used to authenticate with the server.
+It can't have a passphrase, since the command will be run non-interactively,
+and the public key must be in the server's authorized_keys file.
+
+For best results, you should also adjust some settings on the SSH server.
+Namely, you should enable client keep-alives on the server using something like
+
+```
+ClientAliveCountMax 3
+ClientAliveInterval 15
+```
+
+Unless you do that, even if the client breaks the connection, you won't be able
+to re-establish it for a long-ish time, since the server wouldn't know that the
+original connection is no longer valid.
+
+As a service
+------------
+
+Cygwin is awesome.
+I've been using for 10+ years, and it has never failed me.
+It comes with a SSH server, a client (you need to install the `openssh` package
+for both of these), and a service manager, `cygrunsrv`.
+`cygrunsrv` is similar to [NSSM], as it allows to wrap any executable into a
+native Windows service.
+
+[NSSM]: https://nssm.cc/
+
+Using `cygrunsrv`, you can create a Windows service to establish a reverse SSH
+tunnel automatically.
+
+{% capture cmd1 %}
+cygrunsrv \
+ -I ssh_tunnel \
+ -p /usr/bin/ssh \
+ --args '-F /dev/null -oBatchMode=yes -oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oExitOnForwardFailure=yes -oServerAliveCountMax=3 -oServerAliveInterval=15 -N -n -T -R 13389:127.0.0.1:3389 user@gateway -p 22 -i ~/.ssh/tunnel' \
+ --disp 'Reverse SSH tunnels' \
+ --user user \
+ --neverexits \
+ --preshutdown
+{% endcapture %}
+
+{% include jekyll-theme/shell.html cmd=cmd1 %}
+
+Adjust the `--user` and the `--args` values accordingly.
+
+You can then run `services.msc` and adjust the recovery settings for the
+service to restart if `ssh` fails:
+
+<div class="row">
+ <div class="col-xs-12 col-sm-8 col-md-6">
+ <a href="{{ '/assets/img/ssh_tunnel_services.png' | relative_url }}" class="thumbnail">
+ <img class="img-responsive" alt="services.msc" src="{{ '/assets/img/ssh_tunnel_services.png' | relative_url }}">
+ </a>
+ </div>
+</div>
+
+And voilà, you have an automatic reverse SSH tunnel on Windows for you!
diff --git a/_posts/2020-05-06-docker-bind-mounts.md b/_posts/2020-05-06-docker-bind-mounts.md
new file mode 100644
index 0000000..832c132
--- /dev/null
+++ b/_posts/2020-05-06-docker-bind-mounts.md
@@ -0,0 +1,227 @@
+---
+title: 'Docker: bind mounts & file ownership'
+---
+If you want to:
+
+1. run your Docker service as a user other than root,
+2. share a writable directory between your host and the container,
+
+you're in for a treat!
+The thing is, files stored in the shared directory retain their ownership (and
+by that I mean their UIDs and GIDs, as they're the only thing that matters)
+after being mounted in the container.
+
+Case in point:
+
+{% capture cmd1 %}
+docker run -it --rm -v "$( pwd ):/data" alpine touch /data/test.txt
+{% endcapture %}
+
+{% include jekyll-theme/shell.html cmd=cmd1 %}
+
+would create file ./test.txt owned by root:root.
+
+You can fix that by using the `--user` parameter:
+
+{% capture cmd1 %}
+docker run -it --rm -v "$( pwd ):/data" --user "$( id -u ):$( id -g )" alpine touch /data/test.txt
+{% endcapture %}
+
+{% include jekyll-theme/shell.html cmd=cmd1 %}
+
+That would create file ./test.txt owned by the current user (if the current
+working directory is writable by the current user, of course).
+
+More often though, instead of a simple `touch` call, you have a 24/7 service,
+which absolutely mustn't run as root, regardless of whether `--user` was
+specified or not.
+In such cases, the logical solution would be to create a regular user in the
+container, and use it to run the service.
+In fact, that's what many popular images do, i.e. [Redis][Redis Dockerfile] and
+[MongoDB][MongoDB Dockerfile].
+
+[Redis Dockerfile]: https://github.com/docker-library/redis/blob/cc1b618d51eb5f6bf6e3a03c7842317b38dbd7f9/6.0/Dockerfile#L4
+[MongoDB Dockerfile]: https://github.com/docker-library/mongo/blob/5cbf7be9a486932b7e472a39e432c9a444628465/4.2/Dockerfile#L4
+
+How do you run the service as regular user though?
+It's tempting to use the `USER` directive in the Dockerfile, but that can be
+overridden by `--user`:
+
+{% capture cmd1 %}
+cat Dockerfile
+{% endcapture %}
+{% capture out1 %}
+FROM alpine
+
+RUN addgroup --gid 9099 test-group && \
+ adduser \
+ --disabled-password \
+ --gecos '' \
+ --home /home/test-user \
+ --ingroup test-group \
+ --uid 9099 \
+ test-user
+
+RUN touch /root.txt
+USER test-user:test-group
+RUN touch /home/test-user/test-user.txt
+
+CMD id && stat -c '%U %G' /root.txt && stat -c '%U %G' /home/test-user/test-user.txt
+{% endcapture %}
+
+{% capture cmd2 %}
+docker build -t id .
+{% endcapture %}
+
+{% capture cmd3 %}
+docker run -it --rm id
+{% endcapture %}
+{% capture out3 %}
+uid=9099(test-user) gid=9099(test-group)
+root root
+test-user test-group
+{% endcapture %}
+
+{% capture cmd4 %}
+docker run -it --rm --user root id
+{% endcapture %}
+{% capture out4 %}
+uid=0(root) gid=0(root) groups=0(root),1(bin),2(daemon),3(sys),4(adm),6(disk),10(wheel),11(floppy),20(dialout),26(tape),27(video)
+root root
+test-user test-group
+{% endcapture %}
+
+{% include jekyll-theme/shell.html cmd=cmd1 out=out1 %}
+{% include jekyll-theme/shell.html cmd=cmd2 %}
+{% include jekyll-theme/shell.html cmd=cmd3 out=out3 %}
+{% include jekyll-theme/shell.html cmd=cmd4 out=out4 %}
+
+I suppose that's the reason why many popular images override ENTRYPOINT, using
+a custom script (and `gosu`, which is basically `sudo`, I think) to forcefully
+drop privileges (for example, see [Redis][Redis entrypoint],
+[MongoDB][MongoDB entrypoint]).
+
+[Redis entrypoint]: https://github.com/docker-library/redis/blob/cc1b618d51eb5f6bf6e3a03c7842317b38dbd7f9/6.0/docker-entrypoint.sh#L11
+[MongoDB entrypoint]: https://github.com/docker-library/mongo/blob/5cbf7be9a486932b7e472a39e432c9a444628465/4.2/docker-entrypoint.sh#L12
+
+Now, what if such service needs persistent storage?
+A good solution would be to use Docker volumes.
+For development though, you often need to just share a directory between your
+host and the container, and it has to be writable by both the host and the
+container process.
+This can be accomplished using _bind mounts_.
+For example, let's try to map ./data to /data inside a Redis container (this
+assumes ./data doesn't exist and you're running as regular user with UID 1000;
+press Ctrl+C to stop Redis):
+
+{% capture cmd1 %}
+mkdir data
+{% endcapture %}
+
+{% capture cmd2 %}
+stat -c '%u' data
+{% endcapture %}
+{% capture out2 %}
+1000
+{% endcapture %}
+
+{% capture cmd3 %}
+docker run -it --rm --name redis -v "$( pwd )/data:/data" redis:6.0
+{% endcapture %}
+
+{% capture cmd4 %}
+stat -c '%u' data
+{% endcapture %}
+{% capture out4 %}
+999
+{% endcapture %}
+
+{% include jekyll-theme/shell.html cmd=cmd1 %}
+{% include jekyll-theme/shell.html cmd=cmd2 out=out2 %}
+{% include jekyll-theme/shell.html cmd=cmd3 %}
+{% include jekyll-theme/shell.html cmd=cmd4 out=out4 %}
+
+As you can see, ./data changed its owner from user with UID 1000 (the host
+user) to user with UID 999 (the `redis` user inside the container).
+This is done in Redis' ENTRYPOINT script, just before dropping root privileges
+so that the `redis-server` process owns the /data directory and thus can write
+to it.
+
+If you want to preserve ./data ownership, Redis' image (and many others)
+explicitly accommodates for it by _not_ changing its owner if the container is
+run as anybody other than root.
+For example:
+
+{% capture cmd1 %}
+mkdir data
+{% endcapture %}
+
+{% capture cmd2 %}
+stat -c '%u' data
+{% endcapture %}
+{% capture out2 %}
+1000
+{% endcapture %}
+
+{% capture cmd3 %}
+docker run -it --rm --name redis -v "$( pwd )/data:/data" --user "$( id -u ):$( id -g )" redis:6.0
+{% endcapture %}
+
+{% capture cmd4 %}
+stat -c '%u' data
+{% endcapture %}
+{% capture out4 %}
+1000
+{% endcapture %}
+
+{% include jekyll-theme/shell.html cmd=cmd1 %}
+{% include jekyll-theme/shell.html cmd=cmd2 out=out2 %}
+{% include jekyll-theme/shell.html cmd=cmd3 %}
+{% include jekyll-theme/shell.html cmd=cmd4 out=out4 %}
+
+Going hardcore
+--------------
+
+Sometimes `--user` is not enough though.
+The specified user is almost certainly missing from container's /etc/passwd, it
+doesn't have a $HOME directory, etc.
+All of that could cause problems with some applications.
+
+The solution often suggested is to create a container user with a fixed UID
+(that would match the host user UID).
+That way, the app won't be run as root, the user will have a proper entry in
+/etc/passwd, it will be able to write to the bind mount owned by the host user,
+and it won't have to change the directory's permissions.
+
+We can create a user with a fixed UID when
+
+1. building the image (using build `ARG`uments),
+2. first starting the container by passing the required UID using environment
+variables.
+
+The advantage of creating the user when building the image is that we can also
+do additional work in the Dockerfile (like if you need to install dependencies
+as that user).
+The disadvantage is that the image would need to be rebuilt for every user on
+every machine.
+
+Creating the user when first starting the container switches the pros and cons.
+You don't need to rebuild the image every time, but you'll have to waste time
+and resources by doing the additional work that could've been done in the
+Dockerfile every time you create a container.
+
+For my project [jekyll-docker] I opted for the former approach, making sure the
+`jekyll` process runs with the same UID as the user who built the image (unless
+it was built by root, in which case it falls back to a custom UID of 999).
+Seems to work quite nicely in practice.
+
+[jekyll-docker]: https://github.com/egor-tensin/jekyll-docker/tree/7d1824a5fac0ed483bc49209bbd89f564a7bcefe
+
+Useful links
+------------
+
+* [Docker and \-\-userns-remap, how to manage volume permissions to share data between host and container?](https://stackoverflow.com/q/35291520/514684)
+* [What is the (best) way to manage permissions for Docker shared volumes?](https://stackoverflow.com/q/23544282/514684)
+* [Handling Permissions with Docker Volumes](https://denibertovic.com/posts/handling-permissions-with-docker-volumes/)
+* [File Permissions: the painful side of Docker](https://blog.gougousis.net/file-permissions-the-painful-side-of-docker/)
+* [Avoiding Permission Issues With Docker-Created Files](https://vsupalov.com/docker-shared-permissions/)
diff --git a/_posts/2020-05-20-makefile-escaping.md b/_posts/2020-05-20-makefile-escaping.md
new file mode 100644
index 0000000..d468cc3
--- /dev/null
+++ b/_posts/2020-05-20-makefile-escaping.md
@@ -0,0 +1,439 @@
+---
+title: Escaping characters in Makefile
+---
+TL;DR: visit [this page] for a short and concise version of this article.
+{: .alert .alert-success }
+
+[this page]: {% link _notes/makefile.md %}
+
+I'm a big sucker for irrelevant nitpicks like properly quoting arguments in
+shell scripts.
+I've also recently started using GNU make as a substitute for one-line shell
+scripts (so instead of a bunch of scripts like build.sh, deploy.sh, test.sh I
+get to have a single Makefile and can just run `make build`, `make deploy`,
+`make test`).
+
+As a side note, there's an excellent [Makefile style guide] available on the
+web.
+I'm going to be using a slightly modified prologue suggested in the guide in
+all Makefiles in this post:
+
+[Makefile style guide]: https://clarkgrubb.com/makefile-style-guide
+
+```
+MAKEFLAGS += --no-builtin-rules --no-builtin-variables --warn-undefined-variables
+unexport MAKEFLAGS
+.DEFAULT_GOAL := all
+.DELETE_ON_ERROR:
+.SUFFIXES:
+SHELL := bash
+.SHELLFLAGS := -eu -o pipefail -c
+```
+
+`make` invokes a shell program to execute recipes.
+As issues of properly escaping "special" characters are going to be discussed,
+the choice of shell is very relevant.
+The Makefiles in this post specify `bash` explicitly using the `SHELL`
+variable, but the same rules should apply for all similar `sh`-like shells.
+
+Quoting arguments
+-----------------
+
+You should quote command arguments in `make` rule recipes, just like in shell
+scripts.
+This is to prevent a single argument from being expanded into multiple
+arguments by the shell.
+
+{% capture out1 %}
+# Prologue goes here...
+
+test_var := Same line?
+export test_var
+
+test:
+ @printf '%s\n' $(test_var)
+ @printf '%s\n' '$(test_var)'
+ @printf '%s\n' $$test_var
+ @printf '%s\n' "$$test_var"
+{% endcapture %}
+
+{% capture out2 %}
+Same
+line?
+Same line?
+Same
+line?
+Same line?
+{% endcapture %}
+
+{% include jekyll-theme/shell.html cmd='cat Makefile' out=out1 %}
+{% include jekyll-theme/shell.html cmd='make test' out=out2 %}
+
+This is quite often sufficient to write valid recipes.
+
+One thing to note is that you shouldn't use double quotes `"` for quoting
+arguments, as they might contain literal dollar signs `$`, interpreted by the
+shell as variable references, which is not something you always want.
+
+Escaping quotes
+---------------
+
+What if `test_var` included a single quote `'`?
+In that case, even the quoted `printf` invocation would break because of the
+mismatch.
+
+{% capture out1 %}
+# Prologue goes here...
+
+test_var := Includes ' quote
+
+test:
+ printf '%s\n' '$(test_var)'
+{% endcapture %}
+
+{% capture out2 %}
+printf '%s\n' 'Includes ' quote'
+bash: -c: line 0: unexpected EOF while looking for matching `''
+make: *** [Makefile:11: test] Error 2
+{% endcapture %}
+
+{% include jekyll-theme/shell.html cmd='cat Makefile' out=out1 %}
+{% include jekyll-theme/shell.html cmd='make test' out=out2 %}
+
+One solution is to take advantage of how `bash` parses command arguments, and
+replace every quote `'` by `'\''`.
+This works because `bash` merges a string like `'Includes '\'' quote'` into
+`Includes ' quote`.
+
+{% capture out1 %}
+# Prologue goes here...
+
+escape = $(subst ','\'',$(1))
+
+test_var := Includes ' quote
+
+test:
+ printf '%s\n' '$(call escape,$(test_var))'
+{% endcapture %}
+
+{% capture out2 %}
+printf '%s\n' 'Includes '\'' quote'
+Includes ' quote
+{% endcapture %}
+
+{% include jekyll-theme/shell.html cmd='cat Makefile' out=out1 %}
+{% include jekyll-theme/shell.html cmd='make test' out=out2 %}
+
+Surprisingly, this works even in much more complicated cases.
+You can have a recipe that executes a command that takes a whole other command
+(with its own separate arguments) as an argument.
+I guess the most common use case is doing something like `ssh 'rm -rf
+$(junk_dir)'`, but I'll use nested `bash` calls instead for simplicity.
+
+{% capture out1 %}
+# Prologue goes here...
+
+escape = $(subst ','\'',$(1))
+
+test_var := Includes ' quote
+
+echo_test_var := printf '%s\n' '$(call escape,$(test_var))'
+bash_test_var := bash -c '$(call escape,$(echo_test_var))'
+
+test:
+ printf '%s\n' '$(call escape,$(test_var))'
+ bash -c '$(call escape,$(echo_test_var))'
+ bash -c '$(call escape,$(bash_test_var))'
+{% endcapture %}
+
+{% capture out2 %}
+printf '%s\n' 'Includes '\'' quote'
+Includes ' quote
+bash -c 'printf '\''%s\n'\'' '\''Includes '\''\'\'''\'' quote'\'''
+Includes ' quote
+bash -c 'bash -c '\''printf '\''\'\'''\''%s\n'\''\'\'''\'' '\''\'\'''\''Includes '\''\'\'''\''\'\''\'\'''\'''\''\'\'''\'' quote'\''\'\'''\'''\'''
+Includes ' quote
+{% endcapture %}
+
+{% include jekyll-theme/shell.html cmd='cat Makefile' out=out1 %}
+{% include jekyll-theme/shell.html cmd='make test' out=out2 %}
+
+That's somewhat insane, but it works.
+
+Shell output
+------------
+
+The `shell` function is one of the two most common ways to communicate with the
+outside world in a Makefile (the other being environment variables).
+This little `escape` function we've defined is actually sufficient to deal with
+the output of the `shell` function safely.
+
+{% capture out1 %}
+# Prologue goes here...
+
+escape = $(subst ','\'',$(1))
+
+cwd := $(shell basename -- "$$( pwd )")
+
+simple_var := Simple value
+composite_var := Composite value - $(simple_var) - $(cwd)
+
+.PHONY: test
+test:
+ @printf '%s\n' '$(call escape,$(cwd))'
+ @printf '%s\n' '$(call escape,$(composite_var))'
+{% endcapture %}
+
+{% capture cmd2 %}
+mkdir "Includes ' quote" && \
+ cd "Includes ' quote" && \
+ make -f ../Makefile test
+{% endcapture %}
+{% capture out2 %}
+Includes ' quote
+Composite value - Simple value - Includes ' quote
+{% endcapture %}
+
+{% capture cmd3 %}
+mkdir 'Maybe a comment #' && \
+ cd 'Maybe a comment #' && \
+ make -f ../Makefile test
+{% endcapture %}
+{% capture out3 %}
+Maybe a comment #
+Composite value - Simple value - Maybe a comment #
+{% endcapture %}
+
+{% capture cmd4 %}
+mkdir 'Variable ${reference}' && \
+ cd 'Variable ${reference}' && \
+ make -f ../Makefile test
+{% endcapture %}
+{% capture out4 %}
+Variable ${reference}
+Composite value - Simple value - Variable ${reference}
+{% endcapture %}
+
+{% include jekyll-theme/shell.html cmd='cat Makefile' out=out1 %}
+{% include jekyll-theme/shell.html cmd=cmd2 out=out2 %}
+{% include jekyll-theme/shell.html cmd=cmd3 out=out3 %}
+{% include jekyll-theme/shell.html cmd=cmd4 out=out4 %}
+
+Environment variables
+---------------------
+
+Makefiles often have parameters that modify their behaviour.
+The most common example is doing something like `make install
+PREFIX=/somewhere/else`, where the `PREFIX` argument overrides the default
+value "/usr/local".
+These parameters are often defined in a Makefile like this:
+
+```
+param_name ?= Default value
+```
+
+They should be `escape`d and quoted when passed to external commands, of
+course.
+However, things get complicated when they contain dollar signs `$`.
+`make` variables may contain references to other variables, and they're
+expanded recursively either when defined (for `:=` assignments) or when used
+(in all other cases, including `?=`).
+
+{% capture out1 %}
+# Prologue goes here...
+
+escape = $(subst ','\'',$(1))
+
+test_var ?= This is safe.
+export test_var
+
+.PHONY: test
+test:
+ @printf '%s\n' '$(call escape,$(test_var))'
+ @printf '%s\n' "$$test_var"
+{% endcapture %}
+
+{% capture cmd2 %}
+test_var='Variable ${reference}' make test
+{% endcapture %}
+{% capture out2 %}
+Makefile:15: warning: undefined variable 'reference'
+Variable
+Variable ${reference}
+{% endcapture %}
+
+{% include jekyll-theme/shell.html cmd='cat Makefile' out=out1 %}
+{% include jekyll-theme/shell.html cmd=cmd2 out=out2 %}
+
+Here, `$(test_var)` is expanded recursively, substituting an empty string for
+the `${reference}` part.
+One attempt to solve this is to escape the dollar sign in the variable value,
+but that breaks the `"$$test_var"` case:
+
+{% capture cmd1 %}
+test_var='Variable $${reference}' make test
+{% endcapture %}
+{% capture out1 %}
+Variable ${reference}
+Variable $${reference}
+{% endcapture %}
+
+{% include jekyll-theme/shell.html cmd=cmd1 out=out1 %}
+
+A working solution would be to use the `escape` function on the unexpanded
+variable value.
+Turns out, you can do just that using the `value` function in `make`.
+
+{% capture out1 %}
+# Prologue goes here...
+
+escape = $(subst ','\'',$(1))
+
+test_var ?= This is safe.
+test_var := $(value test_var)
+export test_var
+
+.PHONY: test
+test:
+ @printf '%s\n' '$(call escape,$(test_var))'
+ @printf '%s\n' "$$test_var"
+{% endcapture %}
+
+{% capture cmd2 %}
+test_var="Quote '"' and variable ${reference}' make test
+{% endcapture %}
+{% capture out2 %}
+Quote ' and variable ${reference}
+Quote ' and variable ${reference}
+{% endcapture %}
+
+{% include jekyll-theme/shell.html cmd='cat Makefile' out=out1 %}
+{% include jekyll-theme/shell.html cmd=cmd2 out=out2 %}
+
+This doesn't quite work though when [overriding variables] on the command line.
+For example, this doesn't work:
+
+[overriding variables]: https://www.gnu.org/software/make/manual/html_node/Overriding.html#Overriding
+
+{% capture cmd1 %}
+make test test_var='Variable ${reference}'
+{% endcapture %}
+{% capture out1 %}
+Makefile:16: warning: undefined variable 'reference'
+make: warning: undefined variable 'reference'
+Variable
+Variable
+{% endcapture %}
+
+{% include jekyll-theme/shell.html cmd=cmd1 out=out1 %}
+
+This is because `make` ignores all assignments to `test_var` if it's overridden
+on the command line (including `test_var := $(value test_var)`).
+
+This can be fixed using the `override` directive for these cases only.
+A complete solution that works for seemingly all cases looks like something
+along these lines:
+
+```
+ifeq ($(origin test_var),environment)
+ test_var := $(value test_var)
+endif
+ifeq ($(origin test_var),environment override)
+ test_var := $(value test_var)
+endif
+ifeq ($(origin test_var),command line)
+ override test_var := $(value test_var)
+endif
+```
+
+Here, we check where the value of `test_var` comes from using the `origin`
+function.
+If it was defined in the environment (the `environment` and `environment
+override` cases), its value is prevented from being expanded using the `value`
+function.
+If it was overridden on the command line (the `command line` case), the
+`override` directive is used so that the unexpanded value actually gets
+assigned.
+
+The snippet above can be generalized by defining a custom function that
+produces the required `make` code, and then calling `eval`.
+
+```
+define noexpand
+ifeq ($$(origin $(1)),environment)
+ $(1) := $$(value $(1))
+endif
+ifeq ($$(origin $(1)),environment override)
+ $(1) := $$(value $(1))
+endif
+ifeq ($$(origin $(1)),command line)
+ override $(1) := $$(value $(1))
+endif
+endef
+
+test_var ?= This is safe.
+
+$(eval $(call noexpand,test_var))
+```
+
+I couldn't find a case where the combination of `escape` and `noexpand`
+wouldn't work.
+You can even safely use other variable as the default value of `test_var`, and
+it works:
+
+{% capture out1 %}
+# Prologue goes here...
+
+escape = $(subst ','\'',$(1))
+
+define noexpand
+ifeq ($$(origin $(1)),environment)
+ $(1) := $$(value $(1))
+endif
+ifeq ($$(origin $(1)),environment override)
+ $(1) := $$(value $(1))
+endif
+ifeq ($$(origin $(1)),command line)
+ override $(1) := $$(value $(1))
+endif
+endef
+
+simple_var := Simple value
+
+test_var ?= $(simple_var) in test_var
+$(eval $(call noexpand,test_var))
+
+simple_var := New simple value
+composite_var := Composite value - $(simple_var) - $(test_var)
+
+.PHONY: test
+test:
+ @printf '%s\n' '$(call escape,$(test_var))'
+ @printf '%s\n' '$(call escape,$(composite_var))'
+{% endcapture %}
+
+{% capture out2 %}
+New simple value in test_var
+Composite value - New simple value - New simple value in test_var
+{% endcapture %}
+
+{% capture cmd3 %}
+make test test_var='Variable ${reference}'
+{% endcapture %}
+{% capture out3 %}
+Variable ${reference}
+Composite value - New simple value - Variable ${reference}
+{% endcapture %}
+
+{% capture cmd4 %}
+test_var='Variable ${reference}' make test
+{% endcapture %}
+{% capture out4 %}
+Variable ${reference}
+Composite value - New simple value - Variable ${reference}
+{% endcapture %}
+
+{% include jekyll-theme/shell.html cmd='cat Makefile' out=out1 %}
+{% include jekyll-theme/shell.html cmd='make test' out=out2 %}
+{% include jekyll-theme/shell.html cmd=cmd3 out=out3 %}
+{% include jekyll-theme/shell.html cmd=cmd4 out=out4 %}
diff --git a/_posts/2021-03-10-ubuntu-packaging.md b/_posts/2021-03-10-ubuntu-packaging.md
new file mode 100644
index 0000000..ad94f2a
--- /dev/null
+++ b/_posts/2021-03-10-ubuntu-packaging.md
@@ -0,0 +1,320 @@
+---
+title: Basic Ubuntu packaging
+snippets_root_directory: snippets/ubuntu_packaging
+snippets_language: plain
+snippets:
+ basic:
+ - basic/changelog
+ - basic/control
+ - basic/copyright
+ - basic/rules
+ install:
+ - install/test.install
+ gbp:
+ - gbp/gbp.conf
+---
+It took me about an hour to make a PKGBUILD for my simple, non-compiled
+piece of software to be published on [AUR].
+In contrast, it took me a few days to figure out how to build suitable .deb
+packages for publishing in a PPA on [Launchpad].
+In this post, I'll try to describe some of the initial pain points of mine.
+
+[AUR]: https://aur.archlinux.org/
+[Launchpad]: https://launchpad.net/
+
+Basics
+------
+
+The Debian package format is really old, and it shows.
+There's a billion of metadata files to take care of, and barely any suitable
+tutorials for beginners.
+At best, you'll learn how to build _binary_ packages, not suitable for
+publishing in a PPA (which only accept _source_ packages and builds the
+binaries itself).
+
+First, you need to realize that there are source packages and binary packages.
+Binary packages are the .deb files that actually contain the software.
+A source package is, confusingly, multiple files, and you need to submit them
+all to Launchpad.
+You can distribute binary packages directly to your users, but they would have
+to fetch & install the new version manually every time there's an upgrade.
+If you could set up a repository and just point the users to it, they would get
+the new versions naturally via the package manager (`apt`).
+
+Canonical's Launchpad provides a very handy PPA (Personal Package Archive)
+service so that anyone can set up a repository.
+Users could then use `add-apt-repository ppa:...` and get the packages in a
+standard and convenient way.
+
+Tools
+-----
+
+There's a myriad of tools to build and maintain Debian packages.
+The [Debian New Maintainers' Guide] provides a [short summary] of how these
+tools interact.
+This tutorial assumes that your software lives in a Git repository and you'd
+like to use Git to maintain the packaging metadata in the same repository.
+This process is greatly aided by the [git-buildpackage] tool.
+We still need to install a bunch of other stuff though; the complete command
+line to install the required tools would be something like
+
+{% include jekyll-theme/shell.html cmd='sudo apt install -y build-essential devscripts dh-make git-buildpackage' %}
+
+Many of the tools pick up particular metadata (like the maintainer name and
+email address) from environment variables.
+You can put something like
+
+ export DEBFULLNAME='John Doe'
+ export DEBEMAIL='John.Doe@example.com'
+
+in your .bashrc to set them globally.
+
+[Debian New Maintainers' Guide]: https://www.debian.org/doc/manuals/maint-guide
+[short summary]: https://www.debian.org/doc/manuals/maint-guide/build.en.html#hierarchy
+[git-buildpackage]: http://honk.sigxcpu.org/projects/git-buildpackage/manual-html/gbp.html
+
+Getting started
+---------------
+
+Let's create a repository to try things out.
+It'll contain a single executable shell script test.sh, which only outputs the
+string "test".
+
+{% include jekyll-theme/shell.html cmd='mkdir test' %}
+{% include jekyll-theme/shell.html cmd='cd test' %}
+{% include jekyll-theme/shell.html cmd='git init' %}
+
+{% capture cmd1 %}
+cat <<'EOF' > test.sh
+#!/usr/bin/env bash
+echo test
+EOF
+{% endcapture %}
+{% include jekyll-theme/shell.html cmd=cmd1 %}
+
+{% include jekyll-theme/shell.html cmd='chmod +x test.sh' %}
+{% include jekyll-theme/shell.html cmd='git add .' %}
+{% include jekyll-theme/shell.html cmd='git commit -m \'initial commit\'' %}
+
+This is going to be version 1.0 of our project, let's tag it as such.
+
+{% include jekyll-theme/shell.html cmd='git tag -a -m \'Release 1.0\' v1.0' %}
+
+All of the Debian packaging tools are tailored to the following use-case.
+
+1. There's an upstream distribution, which releases the software in tarballs.
+2. There's a maintainer (who's not the software author), who takes care of
+packaging and is disconnected from the development.
+
+This disconnect means that maintaining the Debian packaging files in the
+`master` branch is inconvenient using the existing tools.
+At the very least, you should create a separate branch for doing packaging
+work.
+
+In addition, Debian (and hence, Ubuntu) is not a rolling-release distribution.
+That means that there're regular releases, and the software version shouldn't
+change too much during a lifetime of a single release.
+Once Debian makes a release, the software version is more or less fixed, and
+security fixes from future versions should be backported separately for each of
+the supported Debian/Ubuntu releases.
+
+Except there _is_ a rolling-release distribution of Debian, and it's called
+"unstable" or "sid".
+The bleeding-edge packaging work should target the "unstable" distribution.
+
+So, let's create a new branch `debian` for our packaging work:
+
+{% include jekyll-theme/shell.html cmd='git checkout -b debian' %}
+
+All the packaging tools assume there's a separate folder "debian" that contains
+the package metadata files.
+There's a handy tool `dh_make` that creates the directory and populates it with
+a number of template metadata files.
+Using it is not so simple though.
+First of all, it assumes that there's a properly named tarball with the project
+sources available in the parent directory.
+Why?
+Who knows.
+Let's create said tarball:
+
+{% include jekyll-theme/shell.html cmd='git archive --format=tar --prefix=test_1.0/ v1.0 | gzip -c > ../test_1.0.orig.tar.gz' %}
+
+The tarball name should follow the NAME_VERSION.orig.tar.gz pattern exactly!
+Anyway, now is the time to run `dh_make`:
+
+{% include jekyll-theme/shell.html cmd='dh_make --indep --copyright mit --packagename test_1.0 --yes' %}
+
+I'm using the MIT License for our little script, hence the `--copyright mit`
+argument.
+In addition, every package in Debian is either "single", "arch-independent",
+"library" or "python".
+I'm not sure what the exact differences between those are, but a shell script
+is clearly CPU architecture-independent, hence the `--indep` argument.
+If it was a compiled executable, it would be a "single" (`--single`) package.
+
+`dh_make` created the "debian" directory for us, filled with all kinds of
+files.
+The only required ones are "changelog", "control", "source", "rules" and the
+"source" directory.
+Let's remove every other file for now:
+
+{% include jekyll-theme/shell.html cmd='rm -f -- debian/*.ex debian/*.EX debian/README.* debian/*.docs' %}
+
+You can study the exact format of the metadata files in the [Debian New
+Maintainers' Guide], but for now let's keep it simple:
+
+{% include jekyll-theme/snippets/section.html section_id='basic' %}
+
+The "control" and "copyright" files are fairly straighforward.
+The "changelog" file has a strict format and is supposed to be maintained using
+the `dch` tool (luckily, git-buildpackage helps with that; more on that later).
+
+The "rules" file is an _executable_ Makefile, and actually controls how the
+software is built.
+Building a package involves invoking many predefined targets in this Makefile;
+for now, we'll resort to delegating everything to the `dh` tool.
+It's the Debhelper tool; it's a magic set of scripts that contain an
+unbelievable amount of hidden logic that's supposed to aid package maintainers
+in building the software.
+For example, if the package is supposed to be built using the standard
+`./configure && make && make install` sequence, it'll do this automatically.
+If it's a Python package with setup.py, it'll use the Python package-building
+utilities, etc.
+We don't want any of that, we just want to copy test.sh to /usr/bin.
+It can be taken care of using the `dh_install` script.
+While building the package, it'll get executed by `dh`, read the
+"debian/test.install" file and copy the files listed there to the specified
+directories.
+Our test.install should look like this:
+
+{% include jekyll-theme/snippets/section.html section_id='install' %}
+
+At this point, we can actually build a proper Debian package!
+
+{% include jekyll-theme/shell.html cmd='dpkg-buildpackage -uc -us' %}
+
+This command will generate a bunch of files in the parent directory.
+The one of interest to us is "test_1.0-1_all.deb".
+We can install it using `dpkg`:
+
+{% include jekyll-theme/shell.html cmd='sudo dpkg -i ../test_1.0-1_all.deb' %}
+
+We can now execute `test.sh`, and it'll hopefully print the string "test".
+
+This .deb file can be distributed to other users, but is no good for uploading
+to Launchpad.
+For one, it's a binary package, and we need source packages for Launchpad to
+build itself.
+Second, it's unsigned, which is also a no-no.
+
+I'm not going to describe how to set up a GnuPG key and upload it to the Ubuntu
+keyserver (keyserver.ubuntu.com), but it's pretty straightforward once you know
+the basics of GnuPG key handling.
+
+One disadvantage of the `dpkg-buildpackage` tool is that it creates a lot of
+files in the "debian" directory; their purpose is unclear to me.
+For now, you can delete them, leaving only the original "changelog", "control",
+"copyright", "rules", "test.install" and the "source" directory.
+
+git-buildpackage
+----------------
+
+git-buildpackage is a wonderful tool that helps with managing the packaging
+work in the upstream repository.
+Please refer to its manual to learn how to use it properly.
+We need to configure it so that it knows how the release tags look like
+(`vVERSION`), how the packaging branch is called (`debian`) and where to put
+the generated files.
+Create "debian/gbp.conf" with the following contents:
+
+{% include jekyll-theme/snippets/section.html section_id='gbp' %}
+
+One unclear line here is `pristine-tar = False`.
+It turns out, a lot of Debian package maintainers use the `pristine-tar` tool
+to create "pristine", byte-for-byte reproducible tarballs of the upstream
+software.
+This is just more headache for us, so we're not going to use that;
+git-buildpackage will just use the normal `git archive` to create tarballs.
+
+First, commit the packaging work we just made:
+
+{% include jekyll-theme/shell.html cmd='git add debian/' %}
+{% include jekyll-theme/shell.html cmd='git commit -m \'initial Debian release\'' %}
+
+We can now build the package using git-buildpackage:
+
+{% include jekyll-theme/shell.html cmd='gbp buildpackage' %}
+
+The tool will try to sign the packages, so this assumes that you have your
+GnuPG key set up!
+
+If all went right, it just built the packages in the ../build-area directory.
+And it hasn't crapped all over the working directory too!
+Similar to `dpkg-buildpackage`, it builds binary packages by default.
+To build _source_ packages, it needs to be invoked with the `-S` argument:
+
+{% include jekyll-theme/shell.html cmd='gbp buildpackage -S' %}
+
+It'll build the source package in the same directory (you'll notice a lot of
+files having the "_source" suffix).
+If all is well, we can tag the packaging work we've just completed:
+
+{% include jekyll-theme/shell.html cmd='gbp buildpackage --git-tag-only' %}
+
+This will create the `debian/1.0-1` tag in the repository.
+
+We are now ready to upload the source package to Launchpad.
+It's done using the `dput` tool.
+The naive way would fail:
+
+{% include jekyll-theme/shell.html cmd='dput ppa:john-doe/test ../build-area/test_1.0-1_source.changes' %}
+
+This is due to the fact that we've specified that we're targetting the
+"unstable" distribution in debian/changelog.
+There's no "unstable" distribution of Ubuntu though; we need to manually
+specify the minimal-supported version (e.g. "bionic"):
+
+{% include jekyll-theme/shell.html cmd='dput ppa:john-doe/test/ubuntu/bionic ../build-area/test_1.0-1_source.changes' %}
+
+What about other distributions?
+Well, if the binary package doesn't need recompiling, we can use Launchpad's
+"Copy packages" feature; this is well-described in this [Ask Ubuntu question].
+
+[Ask Ubuntu question]: https://askubuntu.com/q/23227/844205
+
+New versions
+------------
+
+When a new version is released, git-buildpackage helps to integrate it to the
+packaging branch.
+Let's say the new version is tagged `v1.1`:
+
+{% include jekyll-theme/shell.html cmd='git checkout debian' %}
+{% include jekyll-theme/shell.html cmd='git merge v1.1' %}
+{% include jekyll-theme/shell.html cmd='gbp dch' %}
+
+The above command will update debian/changelog; modify it manually to target
+the usual "unstable" distribution instead of "UNRELEASED" and update the
+version to something like "1.1-1".
+
+{% include jekyll-theme/shell.html cmd='git add debian/' %}
+{% include jekyll-theme/shell.html cmd='git commit -m \'Debian release 1.1\'' %}
+{% include jekyll-theme/shell.html cmd='gbp buildpackage -S' %}
+
+This will build the source package for the new version in the ../build-area
+directory; you can then upload it Launchpad and copy the built binary packages.
+
+Aftermath
+---------
+
+This fucking sucks.
+What's the way to sanely manage the repository if the build/runtime
+dependencies are different for different Ubuntu versions?
+I have no idea.
+Some pointers to help you understand what's going on in this tutorial more
+deeply:
+
+* [When upstream uses Git: Building Debian Packages with git-buildpackage](https://honk.sigxcpu.org/projects/git-buildpackage/manual-html/gbp.import.upstream-git.html)
+* [Using Git for Debian packaging](https://www.eyrie.org/~eagle/notes/debian/git.html)
+
+Good luck with this, because I'm definitely overwhelmed.
diff --git a/_posts/2022-09-07-gdb-sleep-all.md b/_posts/2022-09-07-gdb-sleep-all.md
new file mode 100644
index 0000000..a575afd
--- /dev/null
+++ b/_posts/2022-09-07-gdb-sleep-all.md
@@ -0,0 +1,33 @@
+---
+title: Pause all userspace processes
+snippets_root_directory: snippets/gdb_sleep_all
+snippets_language: bash
+snippets:
+ main:
+ - gdb_sleep_all.sh
+ gdb:
+ - sleep.gdb
+---
+If you need to debug some kind of monitoring system (or just have some fun),
+you might want to pause all userspace processes for a certain number of seconds
+(to measure delays, etc.).
+
+You can easily do this using GDB like this:
+
+{% include jekyll-theme/snippets/section.html section_id='main' %}
+
+sleep.gdb is a very simple GDB script; it basically sleeps for a determined
+amount of seconds:
+
+{% include jekyll-theme/snippets/section.html section_id='gdb' %}
+
+You can simply run
+
+ sudo ./gdb_sleep_all.sh
+
+and all of your userspace processes should be frozen for 30 seconds.
+
+On a couple of servers, this worked quite well; not so well on my laptop with
+Xfce installed.
+Obviously, this would require a bit of work to adapt for containers as well.
+Otherwise, pretty neat, huh?
diff --git a/_posts/2022-11-07-ptrace-sigtraps.md b/_posts/2022-11-07-ptrace-sigtraps.md
new file mode 100644
index 0000000..2f42a81
--- /dev/null
+++ b/_posts/2022-11-07-ptrace-sigtraps.md
@@ -0,0 +1,72 @@
+---
+title: 'Fun with ptrace: SIGTRAPs galore'
+date: 2022-11-07 13:00 +0100
+---
+When using `PTRACE_ATTACH` the `ptrace` mechanism reuses SIGTRAP for a number
+of things by default.
+This makes it unnecessarily hard to distinguish regular traps (possibly caused
+by breakpoints we might place) from other events.
+
+1. After `ptrace(PTRACE_SYSCALL)`, syscall-stops will be reported as SIGTRAPs.
+
+ ```c
+ int status;
+
+ ptrace(PTRACE_SYSCALL, pid, 0, 0);
+ waitpid(pid, &status, 0);
+
+ if (WIFSTOPPED(status) && WSTOPSIG(status) == SIGTRAP) {
+ /* We don't know if the tracee has just entered/exited a syscall or
+ * received a regular SIGTRAP (could be caused by a breakpoint we
+ * placed). */
+ }
+ ```
+
+ This is fixed by using the `PTRACE_O_TRACESYSGOOD` option.
+
+ ```c
+ int status;
+
+ ptrace(PTRACE_SETOPTIONS, pid, 0, PTRACE_O_TRACESYSGOOD);
+ ptrace(PTRACE_SYSCALL, pid, 0, 0);
+ waitpid(pid, &status, 0);
+
+ if (WIFSTOPPED(status) && WSTOPSIG(status) == (SIGTRAP | 0x80)) {
+ /* We know for sure that the tracee has just entered/exited a
+ * syscall. */
+ }
+ ```
+
+2. Every `execve` call will be reported as a SIGTRAP.
+
+ ```c
+ int status;
+
+ ptrace(PTRACE_CONT, pid, 0, 0);
+ waitpid(pid, &status, 0);
+
+ if (WIFSTOPPED(status) && WSTOPSIG(status) == SIGTRAP) {
+ /* We don't know if the tracee just called execve() or received a
+ * regular SIGTRAP (could be caused by a breakpoint we placed). */
+ }
+ ```
+
+ This is fixed by using the `PTRACE_O_TRACEEXEC` option.
+
+ ```c
+ int status;
+
+ ptrace(PTRACE_SETOPTIONS, pid, 0, PTRACE_O_TRACEEXEC);
+ ptrace(PTRACE_CONT, pid, 0, 0);
+ waitpid(pid, &status, 0);
+
+ if (WIFSTOPPED(status) && status >> 8 == (SIGTRAP | PTRACE_EVENT_EXEC << 8)) {
+ /* We know for sure that the tracee has just called execve(). */
+ }
+ ```
+
+ This point doesn't apply to tracees attached using `PTRACE_SEIZE`.
+ {: .alert .alert-info }
+
+As you can see, you should always use at least the `PTRACE_O_TRACESYSGOOD` and
+`PTRACE_O_TRACEEXEC` options to be able to distinguish between SIGTRAPs.
diff --git a/_posts/2022-11-07-ptrace-waitpid.md b/_posts/2022-11-07-ptrace-waitpid.md
new file mode 100644
index 0000000..b5f34fa
--- /dev/null
+++ b/_posts/2022-11-07-ptrace-waitpid.md
@@ -0,0 +1,51 @@
+---
+title: 'Fun with ptrace: a waitpid pitfall'
+date: 2022-11-07 12:00 +0100
+---
+When tracing a process using `ptrace`, one often uses the `waitpid` system call
+to wait until something happens to the tracee.
+It often goes like this (error handling is omitted for brevity):
+
+```c
+/* We have previously attached to tracee `pid`. */
+
+int status;
+
+waitpid(pid, &status, 0);
+
+if (WIFEXITED(status)) {
+ /* Tracee has exited. */
+}
+if (WIFSIGNALED(status)) {
+ /* Tracee was killed by a signal. */
+}
+/* Tracee was stopped by a signal WSTOPSIG(status). */
+```
+
+What if a single thread is attached to multiple tracees?
+Then we can use `-1` as the first argument to `waitpid`, and it will wait for
+any child to change state.
+
+```c
+int status;
+pid_t pid = waitpid(-1, &status, __WALL);
+```
+
+What's little known, however, is that `waitpid(-1)` will by default consume
+status changes from other thread's children.
+So if you have two tracer threads A and B, and each of them is attached to a
+tracee, then thread A might consume thread B's tracee status change by calling
+`waitpid(-1)`.
+Therefore, thread A would have thread B's tracee status.
+A typical application could be completely unprepared for this scenario.
+
+To avoid that, use the `__WNOTHREAD` flag.
+That way, thread A will only consume status changes from its own children only.
+
+```c
+int status;
+pid_t pid = waitpid(-1, &status, __WALL | __WNOTHREAD);
+```
+
+In my opinion, `__WNOTHREAD` should often be a default in well-structured
+applications.
diff --git a/_posts/snippets/gdb_sleep_all/gdb_sleep_all.sh b/_posts/snippets/gdb_sleep_all/gdb_sleep_all.sh
new file mode 100755
index 0000000..e923740
--- /dev/null
+++ b/_posts/snippets/gdb_sleep_all/gdb_sleep_all.sh
@@ -0,0 +1,17 @@
+#!/usr/bin/env bash
+
+set -o errexit -o nounset -o pipefail
+
+# Select all process IDs that are _not_ children of PID 2, [kthreadd].
+pids="$( ps -o pid --no-headers --ppid 2 -p 2 --deselect )"
+
+for pid in $pids; do
+ cmdline="$( cat "/proc/$pid/cmdline" | tr '\0' ' ' )" || continue
+ echo ------------------------------------------------------------------
+ echo "PID: $pid"
+ echo "Command line: $cmdline"
+ echo ------------------------------------------------------------------
+ gdb -p "$pid" -x sleep.gdb -batch &
+done
+
+wait
diff --git a/_posts/snippets/gdb_sleep_all/sleep.gdb b/_posts/snippets/gdb_sleep_all/sleep.gdb
new file mode 100644
index 0000000..6b8f268
--- /dev/null
+++ b/_posts/snippets/gdb_sleep_all/sleep.gdb
@@ -0,0 +1,2 @@
+shell sleep 10
+quit
diff --git a/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/inline/shared.hpp b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/inline/shared.hpp
new file mode 100644
index 0000000..796ea85
--- /dev/null
+++ b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/inline/shared.hpp
@@ -0,0 +1,6 @@
+#pragma once
+
+inline int shared() {
+ static int n = 0;
+ return ++n;
+}
diff --git a/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/inline/weird/another.cpp b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/inline/weird/another.cpp
new file mode 100644
index 0000000..330ba80
--- /dev/null
+++ b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/inline/weird/another.cpp
@@ -0,0 +1,11 @@
+#include "another.hpp"
+
+#include <iostream>
+
+inline void shared() {
+ std::cout << "another.cpp: shared()\n";
+}
+
+void another() {
+ shared();
+}
diff --git a/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/inline/weird/another.hpp b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/inline/weird/another.hpp
new file mode 100644
index 0000000..9c26d3f
--- /dev/null
+++ b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/inline/weird/another.hpp
@@ -0,0 +1,3 @@
+#pragma once
+
+void another();
diff --git a/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/inline/weird/main.cpp b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/inline/weird/main.cpp
new file mode 100644
index 0000000..e278b9f
--- /dev/null
+++ b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/inline/weird/main.cpp
@@ -0,0 +1,13 @@
+#include "another.hpp"
+
+#include <iostream>
+
+inline void shared() {
+ std::cout << "main.cpp: shared()\n";
+}
+
+int main() {
+ shared();
+ another();
+ return 0;
+}
diff --git a/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/separate_method_definitions/another.cpp b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/separate_method_definitions/another.cpp
new file mode 100644
index 0000000..f13b3a1
--- /dev/null
+++ b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/separate_method_definitions/another.cpp
@@ -0,0 +1,6 @@
+#include "another.hpp"
+#include "shared.hpp"
+
+void another() {
+ Test test;
+}
diff --git a/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/separate_method_definitions/another.hpp b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/separate_method_definitions/another.hpp
new file mode 100644
index 0000000..9c26d3f
--- /dev/null
+++ b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/separate_method_definitions/another.hpp
@@ -0,0 +1,3 @@
+#pragma once
+
+void another();
diff --git a/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/separate_method_definitions/main.cpp b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/separate_method_definitions/main.cpp
new file mode 100644
index 0000000..b3118c1
--- /dev/null
+++ b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/separate_method_definitions/main.cpp
@@ -0,0 +1,8 @@
+#include "another.hpp"
+#include "shared.hpp"
+
+int main() {
+ Test test;
+ another();
+ return 0;
+}
diff --git a/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/separate_method_definitions/shared.hpp b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/separate_method_definitions/shared.hpp
new file mode 100644
index 0000000..ef4da34
--- /dev/null
+++ b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/separate_method_definitions/shared.hpp
@@ -0,0 +1,12 @@
+#pragma once
+
+#include <iostream>
+
+struct Test {
+ Test();
+};
+
+inline Test::Test() {
+ static int x = 0;
+ std::cout << ++x << '\n';
+}
diff --git a/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/static/main.cpp b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/static/main.cpp
new file mode 100644
index 0000000..fde1a43
--- /dev/null
+++ b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/static/main.cpp
@@ -0,0 +1,10 @@
+#include "proxy.hpp"
+#include "shared.hpp"
+
+#include <iostream>
+
+int main() {
+ std::cout << shared() << '\n';
+ std::cout << proxy() << '\n';
+ return 0;
+}
diff --git a/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/static/proxy.cpp b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/static/proxy.cpp
new file mode 100644
index 0000000..78e4611
--- /dev/null
+++ b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/static/proxy.cpp
@@ -0,0 +1,6 @@
+#include "proxy.hpp"
+#include "shared.hpp"
+
+int proxy() {
+ return shared();
+}
diff --git a/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/static/proxy.hpp b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/static/proxy.hpp
new file mode 100644
index 0000000..7dfc52a
--- /dev/null
+++ b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/static/proxy.hpp
@@ -0,0 +1,3 @@
+#pragma once
+
+int proxy();
diff --git a/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/static/shared.hpp b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/static/shared.hpp
new file mode 100644
index 0000000..647f49e
--- /dev/null
+++ b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/static/shared.hpp
@@ -0,0 +1,6 @@
+#pragma once
+
+static int shared() {
+ static int n = 0;
+ return ++n;
+}
diff --git a/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/static_and_inline/main.cpp b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/static_and_inline/main.cpp
new file mode 100644
index 0000000..fde1a43
--- /dev/null
+++ b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/static_and_inline/main.cpp
@@ -0,0 +1,10 @@
+#include "proxy.hpp"
+#include "shared.hpp"
+
+#include <iostream>
+
+int main() {
+ std::cout << shared() << '\n';
+ std::cout << proxy() << '\n';
+ return 0;
+}
diff --git a/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/static_and_inline/proxy.cpp b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/static_and_inline/proxy.cpp
new file mode 100644
index 0000000..78e4611
--- /dev/null
+++ b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/static_and_inline/proxy.cpp
@@ -0,0 +1,6 @@
+#include "proxy.hpp"
+#include "shared.hpp"
+
+int proxy() {
+ return shared();
+}
diff --git a/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/static_and_inline/proxy.hpp b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/static_and_inline/proxy.hpp
new file mode 100644
index 0000000..7dfc52a
--- /dev/null
+++ b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/static_and_inline/proxy.hpp
@@ -0,0 +1,3 @@
+#pragma once
+
+int proxy();
diff --git a/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/static_and_inline/shared.hpp b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/static_and_inline/shared.hpp
new file mode 100644
index 0000000..28de441
--- /dev/null
+++ b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/static_and_inline/shared.hpp
@@ -0,0 +1,6 @@
+#pragma once
+
+static inline int shared() {
+ static int x = 0;
+ return ++x;
+}
diff --git a/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/unnamed_namespace_and_inline/main.cpp b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/unnamed_namespace_and_inline/main.cpp
new file mode 100644
index 0000000..fde1a43
--- /dev/null
+++ b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/unnamed_namespace_and_inline/main.cpp
@@ -0,0 +1,10 @@
+#include "proxy.hpp"
+#include "shared.hpp"
+
+#include <iostream>
+
+int main() {
+ std::cout << shared() << '\n';
+ std::cout << proxy() << '\n';
+ return 0;
+}
diff --git a/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/unnamed_namespace_and_inline/proxy.cpp b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/unnamed_namespace_and_inline/proxy.cpp
new file mode 100644
index 0000000..78e4611
--- /dev/null
+++ b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/unnamed_namespace_and_inline/proxy.cpp
@@ -0,0 +1,6 @@
+#include "proxy.hpp"
+#include "shared.hpp"
+
+int proxy() {
+ return shared();
+}
diff --git a/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/unnamed_namespace_and_inline/proxy.hpp b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/unnamed_namespace_and_inline/proxy.hpp
new file mode 100644
index 0000000..7dfc52a
--- /dev/null
+++ b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/unnamed_namespace_and_inline/proxy.hpp
@@ -0,0 +1,3 @@
+#pragma once
+
+int proxy();
diff --git a/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/unnamed_namespace_and_inline/shared.hpp b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/unnamed_namespace_and_inline/shared.hpp
new file mode 100644
index 0000000..e21a00c
--- /dev/null
+++ b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/unnamed_namespace_and_inline/shared.hpp
@@ -0,0 +1,10 @@
+#pragma once
+
+namespace {
+
+inline int shared() {
+ static int x = 0;
+ return ++x;
+}
+
+}
diff --git a/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/unnamed_namespaces/ok/another.cpp b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/unnamed_namespaces/ok/another.cpp
new file mode 100644
index 0000000..cc7556d
--- /dev/null
+++ b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/unnamed_namespaces/ok/another.cpp
@@ -0,0 +1,19 @@
+#include "another.hpp"
+
+#include <iostream>
+
+namespace {
+
+struct Test {
+ Test() {
+ std::cout << "another.cpp: Test::Test()\n";
+ }
+
+ float y = 1.;
+};
+
+}
+
+void another() {
+ Test test;
+}
diff --git a/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/unnamed_namespaces/ok/another.hpp b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/unnamed_namespaces/ok/another.hpp
new file mode 100644
index 0000000..9c26d3f
--- /dev/null
+++ b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/unnamed_namespaces/ok/another.hpp
@@ -0,0 +1,3 @@
+#pragma once
+
+void another();
diff --git a/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/unnamed_namespaces/ok/main.cpp b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/unnamed_namespaces/ok/main.cpp
new file mode 100644
index 0000000..e383ded
--- /dev/null
+++ b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/unnamed_namespaces/ok/main.cpp
@@ -0,0 +1,22 @@
+#include "another.hpp"
+
+#include <iostream>
+
+namespace {
+
+struct Test {
+ Test() {
+ std::cout << "main.cpp: Test::Test()\n";
+ }
+
+ int x = 1;
+};
+
+}
+
+int main() {
+ Test test;
+ std::cout << test.x << '\n';
+ another();
+ return 0;
+}
diff --git a/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/unnamed_namespaces/weird/another.cpp b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/unnamed_namespaces/weird/another.cpp
new file mode 100644
index 0000000..0e0bff9
--- /dev/null
+++ b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/unnamed_namespaces/weird/another.cpp
@@ -0,0 +1,15 @@
+#include "another.hpp"
+
+#include <iostream>
+
+struct Test {
+ Test() {
+ std::cout << "another.cpp: Test::Test()\n";
+ }
+
+ float y = 1.;
+};
+
+void another() {
+ Test test;
+}
diff --git a/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/unnamed_namespaces/weird/another.hpp b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/unnamed_namespaces/weird/another.hpp
new file mode 100644
index 0000000..9c26d3f
--- /dev/null
+++ b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/unnamed_namespaces/weird/another.hpp
@@ -0,0 +1,3 @@
+#pragma once
+
+void another();
diff --git a/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/unnamed_namespaces/weird/main.cpp b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/unnamed_namespaces/weird/main.cpp
new file mode 100644
index 0000000..abd42b7
--- /dev/null
+++ b/_posts/snippets/static_vs_inline_vs_unnamed_namespaces/unnamed_namespaces/weird/main.cpp
@@ -0,0 +1,18 @@
+#include "another.hpp"
+
+#include <iostream>
+
+struct Test {
+ Test() {
+ std::cout << "main.cpp: Test::Test()\n";
+ }
+
+ int x = 1;
+};
+
+int main() {
+ Test test;
+ std::cout << test.x << '\n';
+ another();
+ return 0;
+}
diff --git a/_posts/snippets/ubuntu_packaging/basic/changelog b/_posts/snippets/ubuntu_packaging/basic/changelog
new file mode 100644
index 0000000..6b7131e
--- /dev/null
+++ b/_posts/snippets/ubuntu_packaging/basic/changelog
@@ -0,0 +1,5 @@
+test (1.0-1) unstable; urgency=medium
+
+ * Initial release.
+
+ -- John Doe <John.Doe@example.com> Wed, 10 Mar 2021 16:15:19 +0000
diff --git a/_posts/snippets/ubuntu_packaging/basic/control b/_posts/snippets/ubuntu_packaging/basic/control
new file mode 100644
index 0000000..55f8252
--- /dev/null
+++ b/_posts/snippets/ubuntu_packaging/basic/control
@@ -0,0 +1,13 @@
+Source: test
+Section: utils
+Priority: optional
+Maintainer: John Doe <John.Doe@example.com>
+Build-Depends: debhelper-compat (= 12)
+Standards-Version: 4.4.1
+Homepage: https://example.com/test
+
+Package: test
+Architecture: all
+Depends: ${misc:Depends}
+Description: This is a test package.
+ This is a test package, just trying out Debian packaging.
diff --git a/_posts/snippets/ubuntu_packaging/basic/copyright b/_posts/snippets/ubuntu_packaging/basic/copyright
new file mode 100644
index 0000000..8a67e52
--- /dev/null
+++ b/_posts/snippets/ubuntu_packaging/basic/copyright
@@ -0,0 +1,31 @@
+Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Upstream-Name: test
+Upstream-Contact: John Doe <John.Doe@example.com>
+Source: https://example.com/test
+
+Files: *
+Copyright: 2021 John Doe <John.Doe@example.com>
+License: MIT
+
+Files: debian/*
+Copyright: 2021 John Doe <John.Doe@example.com>
+License: MIT
+
+License: MIT
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the "Software"),
+ to deal in the Software without restriction, including without limitation
+ the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ and/or sell copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following conditions:
+ .
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+ .
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/_posts/snippets/ubuntu_packaging/basic/rules b/_posts/snippets/ubuntu_packaging/basic/rules
new file mode 100644
index 0000000..cbe925d
--- /dev/null
+++ b/_posts/snippets/ubuntu_packaging/basic/rules
@@ -0,0 +1,3 @@
+#!/usr/bin/make -f
+%:
+ dh $@
diff --git a/_posts/snippets/ubuntu_packaging/gbp/gbp.conf b/_posts/snippets/ubuntu_packaging/gbp/gbp.conf
new file mode 100644
index 0000000..d9f7e5f
--- /dev/null
+++ b/_posts/snippets/ubuntu_packaging/gbp/gbp.conf
@@ -0,0 +1,5 @@
+[DEFAULT]
+upstream-tag = v%(version)s
+debian-branch = debian
+pristine-tar = False
+export-dir = ../build-area/
diff --git a/_posts/snippets/ubuntu_packaging/install/test.install b/_posts/snippets/ubuntu_packaging/install/test.install
new file mode 100644
index 0000000..6222235
--- /dev/null
+++ b/_posts/snippets/ubuntu_packaging/install/test.install
@@ -0,0 +1 @@
+test.sh usr/bin