A nice property of Common Lisp is its macro system which made it possible to have things like cl-who, which allowed to embed HTML-code in Lisp syntax directly into the code.

With the new features of C++11, I wrote this little code snipped which does something similar:

/* we assume everything utf-8 */

#include <iostream>
#include <string>
#include <sstream>
#include <deque>
#include <cassert>

class Attr {
public:
  std::string _name;
  std::string _value;
  Attr (std::string const& n, const char* v)
    : _name(n), _value(v) {};

  void text_(std::stringstream& ss) const {
    ss << _name << "=\"";
    // NOTICE: this works only with UTF-8
    for (char c : _value) {
      switch (c) {
      case '&':
        ss << "&amp;";
        break;
      case '"':
        ss << "&quot;";
        break;
      case '<':
        ss << "&lt;";
        break;
      case '>':
        ss << "&gt;";
        break;
      default:
        ss << c;
      }
    }
    ss << '"';
  }

  std::string text() const {
    std::stringstream ss;
    text_(ss);
    ss.flush();
    return ss.str();
  }
};

class AttrTrunk {
public:
  std::string _name;
  AttrTrunk(std::string & n) : _name(n) {};
  Attr operator= (const char* a) {
    Attr ret(_name, a);
    return ret;
  }
};

class AttrList {
public:
  std::deque<Attr> attributes;
  AttrList () {};
  void text_ (std::stringstream& ss) const {
    for (Attr const& c : attributes) {
      c.text_(ss);
      ss << " ";
    }
  }
  std::string text () const {
    std::stringstream ss;
    text_(ss);
    ss.flush();
    return ss.str();
  }
};

AttrList operator , (Attr const& a, Attr const& b) {
  AttrList ret;
  ret.attributes.push_back(a);
  ret.attributes.push_back(b);
  return ret;
}

AttrList& operator , (AttrList& a, Attr const& b) {
  a.attributes.push_back(b);
  return a;
}

AttrTrunk operator "" _att (const char* a, size_t len) {
  std::string b("");
  for (size_t i = 0; i < len; ++i) b.push_back(a[i]);
  AttrTrunk ret(b);
  return ret;
}

class Tag;

std::deque<Tag> empty;
AttrList empty_attr;

class Tag {
public:
  std::string _name; // or cleartext
  AttrList _attrs;
  std::deque<Tag> _tags;
  bool _empty;
  bool _text;
  bool _as_is;
  // _empty means that the tag should be of the form <bla />. If it is
  // not set, it will be <bla></bla>. We distinguish this, because
  // <script ... /> does not work everywhere.
  Tag (std::string& n, AttrList& attrs, std::deque<Tag>& subtags, bool empty) :
    _name(n), _attrs(attrs), _tags(subtags), _empty(empty), _text(false), _as_is(false) {
    assert (!empty || subtags.size() == 0);
  }

  Tag (const char* cleartext)
    : _tags(empty), _attrs(empty_attr), _text(true), _name(cleartext), _as_is(false) {}

  Tag (std::string& cleartext)
    : _tags(empty), _attrs(empty_attr), _text(true),
      _name(cleartext), _as_is(false) {}

  static Tag as_is (const char* text) {
    Tag ret(text);
    ret._as_is = true;
    return ret;
  }

  void text_ (std::stringstream& ss) const {
    if (_as_is) {
      ss << _name;
    } else if (_text) {
      for (char c : _name) {
        switch (c) {
        case '<':
          ss << "&lt;";
          break;
        case '>':
          ss << "&gt;";
          break;
        case '&':
          ss << "&amp;";
          break;
        default:
          ss << c;
        }
      }
    } else {
      ss << "<" << _name << " ";
      _attrs.text_(ss);
      if (_empty) {
        ss << " />";
      } else {
        ss << ">";
        for (Tag const& tg : _tags) {
          tg.text_(ss);
        }
        ss << "</" << _name << ">";
      }
    }
  }
  std::string text() const {
    std::stringstream ss;
    text_(ss);
    ss.flush();
    return ss.str();
  }
};

// Tag with (possibly empty) attlist but no content yet
class TagTrunk2 {
public:
  std::string _name;
  AttrList _attrs;
  TagTrunk2 (std::string n, AttrList al)
    : _name(n), _attrs(al) {}

  // for the difference between [] with an empty list and (), see
  // class Tag

  Tag operator [] (std::deque<Tag> subtags) {
    Tag t (_name, _attrs, subtags, false);
    return t;
  }

  Tag operator [] (Tag subtag) {
    std::deque<Tag> subtags;
    subtags.push_back(subtag);
    Tag t (_name, _attrs, subtags, false);
    return t;
  }
  Tag operator () () {
    std::deque<Tag> trash;
    Tag t (_name, _attrs, trash, true);
    return t;
  }
};

std::deque<Tag> operator , (Tag a, Tag b) {
  std::deque<Tag> ret;
  ret.push_back(a);
  ret.push_back(b);
  return ret;
}

std::deque<Tag>& operator , (std::deque<Tag>& a, Tag b) {
  a.push_back(b);
  return a;
}

// Tag without attlist and content yet
class TagTrunk1 {
public:
  std::string _name;
  TagTrunk1 (std::string n) : _name(n) {}
  TagTrunk2 operator [] (AttrList a) {
    TagTrunk2 ret (_name, a);
    return ret;
  }
  TagTrunk2 operator [] (Attr a) {
    AttrList b;
    b.attributes.push_back(a);
    TagTrunk2 ret (_name, b);
    return ret;
  }
  TagTrunk2 operator () () {
    // use this when no attributes are present
    AttrList b;
    TagTrunk2 ret (_name, b);
    return ret;
  }
};

TagTrunk1 operator "" _tag (const char* a, size_t length) {
  std::string b("");
  for (size_t i = 0; i < length; ++i) b.push_back(a[i]);
  TagTrunk1 ret(b);
  return ret;
}

Tag operator "" _txt (const char* a, size_t length) {
  std::string b("");
  for (size_t i = 0; i < length; ++i) b.push_back(a[i]);
  Tag ret(b);
  return ret;
}

Tag operator "" _as_is (const char* a, size_t length) {
  return Tag::as_is(a);
}

int main (void) {

  std::cout <<
    ( "html"_tag ()
      ["head"_tag ()
       ["title"_tag () [ "Meine Seite"_txt ],
        "script"_tag ["type"_att = "text/javascript",
                      "src"_att = "hallo.js"][empty]],
       "body"_tag () ["Hallo Welt!"_txt, "<![CDATA[ bla ]]>"_as_is]] ).text() << std::endl;

}

Have fun. Regard the code as public domain. You C++-Nerds, please make some nice library out of it that does encoding and collation and shit correctly and release it freely so I can use it.

External Content not shown. Further Information.

After a long ride on German trails I kindly want to make the request to the programmers of the messenger.com web app to please die in a chemical fire.

Then, Facebook could maybe pay some student to hack together a simple interface that just does - say - autorefresh every few seconds.

Well, admittedly, ping times of 21686ms, which are common in Germany, are a problem when programming certain kinds of low latency network software. I wouldn't blame, for example, MineCraft to not work under these conditions.

But a simple text chat – and yes, that's what your "web app" is – should work perfectly well under that conditions.

And yes, I tried your Android app, and it is even worse. It drains my battery, has the same issues, and above that, I had situations where the messages were not synced correctly (or at all) between my devices.

And don't tell me that this was too hard to implement. Skype has a similar messaging website. And it works perfectly. Even WhatsApp Web works better, though it is annoying that they want an active connection with the phone.

External Content not shown. Further Information.

As I always forget them, here a short list of functions regarding several sum- and product types in Coq. If I missed something, feel free to send me a comment.

Product types:

notation    | name     | left proj. | right proj. |
------------+----------+------------+-------------+
A /\ B      | and      | proj1      | proj2       |
A * B       | pair     | fst        | snd         |
{x | A}     | exist    | proj1_sig  | proj2_sig   |
{x : B & A} | existT   | projT1     | projT2      |

Sum Types:

notation    | name     | intro left | intro right |
------------+----------+------------+-------------+
A \/ B      | or       | or_introl  | or_intror   |
A + B       | sum      | inl        | inr         |
{A} + {B}   | sumbool  | left       | right       |

Hi. There are currently some problems with my markdown setup. I am also refactoring some parts of this website. Behold! #technikeristinformiert

Update: Ok. Most of the problems should be solved now. One problem is that cl-markdown and the original Markdown have slightly different opinions about when a list begins. So some older link lists are still scrambled.

The good news is that most MathJax is gone in favor of svg-latex, which loads a lot faster (though it does not look quite so good).

If you see otherwise scrambled pages, feel free to drop a comment or mail about it. At the moment, I do not have the time to go through every single post I made.

External Content not shown. Further Information.

Arrays in Haskell can be a pain. Monads are a nice thing, but for some things, I would prefer linear types. There are so-called DiffArrays in Haskell, which are, for no apparent reason, ridiculously slow.

As a quick reminder: A DiffArray maintains a history of changes, which should be only linear overhead and when using it linearly only, most of the memory overhead should be quite short-lived. But it isn't.

My idea now is different: Using an array only linearly should be trivial to check when having an AST (and I wonder why nobody has written such a thing yet which just replaces linearily used DiffArrays with unsafe operations). But other than overwriting the old array with a difference, I just mark it as invalid:

import System.IO.Unsafe
import Data.IORef
import Data.Array.IO

data LinearArray_ = Invalid | Valid (IOUArray Int Int)
type LinearArray = IORef LinearArray_

Creating new Arrays is straightforward:

newArray :: (Int, Int) -> Int -> LinearArray
newArray (start, end) init =
    unsafePerformIO $
    do
      t <- Data.Array.IO.newArray (start, end) init
      r <- newIORef (Valid t)
      return $ r

Overwriting an array does the following: It takes the array from the old ref and overwrites it accordingly. Then the old ref is overwritten and marked Invalid, and a new array is returned. Of course, if the old ref was Invalid already, an error is rised:

overwriteArray :: LinearArray -> Int -> Int -> LinearArray
overwriteArray old index value =
    unsafePerformIO $
    do ior <- readIORef old
       case ior of
         Invalid -> error "nonlinear use of linear array!"
         Valid a -> do
                  writeArray a index value
                  writeIORef old Invalid
                  new <- newIORef (Valid a)
                  return new

Trying to read from an invalid array will do the same:

readArray :: LinearArray -> Int -> Int
readArray arr index =
    unsafePerformIO $
    do ior <- readIORef arr
       case ior of
         Invalid -> error "nonlinear use of linear array!"
         Valid a -> do
                  r <- Data.Array.IO.readArray a index
                  return r

This approach has several caveeats. For example, in most functional languages, one can just overload a name. In Haskell, a code like

let {a = []} in let {a = 1 : a} in a

produces an infinite list of ones: By default, a refers to itself, and all definitions are recursive. But name overloading is something that comes in handy when using linear arrays. Another caveeat is lazy evaluation. The following code "surprisingly" works:

a :: Int
a = let arr1 = Main.newArray (0, 10) 0
        arr2 = overwriteArray arr1 9 1
    in Main.readArray arr1 9

That is because arr2 is never evaluated. However, activating {-# LANGUAGE BangPatterns #-} and using

a :: Int
a = let arr1 = Main.newArray (0, 10) 0
        !arr2 = overwriteArray arr1 9 1
    in Main.readArray arr1 9

will throw an error. Notice that both are wrong, in the sense that the specification of a linear array requires linear occurences only, but arr1 occurs twice. It is only that there is no error in the one case.

My benchmarks show that this kind of array is pretty fast. Now, while feeling a lynchmob of Haskell-fanboys forming, let me add that I would not recommend to actually use this. I would just wish that Haskell had support for uniqueness types – or at least automatic optimization of linearly used immutable arrays or DiffArrays in that manner.

Because it should be quite easy to find out whether an array occurs only once in a term, and all its decendants are also only used once. And the benefits would be really huge, in my opinion.

External Content not shown. Further Information.

I've tried BTRFS before, and it was very unstable. Now that it is in the mainline kernel, I decided to give it another shot.

The main reason was online compression. My personal chat log files are larger than 4G now, when uncompressed, but they are very repetitive and therefore compress well. I could have written some logrotate(8) script. This is the common suggestion, especially on the Raspberry Pi, on which "every processor cycle is precious". The Raspberry Pi is 21 times faster than my first computer, and has 262144 times as much RAM, and had online compression. Honestly, this is 2016, not 1992.

The only filesystem from the standard packages of Raspbian that supports compression is NTFS. Reiser4 is also available and supports compression, but the snapshot facility of BTRFS convinced me. (Unfortunately, ZFS is not supported.)

On my raspi, since then, it works great. On my PC, I tried to convert an ext4 partition into BTRFS. Don't do that, at least not before doing a backup. However, now it works. And it runs faster than ext4, at least it feels like that.

Before we start

This was again a presentation for the Best mathematics club in the multiverse. It is meant for both younger and older students, as well as working mathematicians. Its objective therefore lied in breadth rather than depth. I removed lots of topics I wanted to talk about, as the talk I gave got too long. It is not a scientific talk. Comments and corrections are welcome.

Why well-orderings?

Why are well-orderings the backbone of mathematics? A similar claim for Zermelo-Fraenckel-Set-Theory (ZFC) was made by Oliver Deiser in his book Einführung in die Mengenlehre, about the constructible cardinal numbers. In ZFC, cardinal numbers are specific ordinal numbers, and ordinal numbers are sets which are well-ordered by the \in-Relation (roughly speaking). And in fact, the study of well-orderings is an important part of set theory. In recursion theory, well-orderings and well-quasi-orderings can be used for termination proofs, and are also important for newer foundational systems that base on type theory. In proof theory, well-orderings can measure the consistency strength of a theory. The axiom of choice is closely related to well-orderings, and it is an important principal in classical mathematics, specifically analysis, stochastics and topology.

A short definition

Definition. In short, a well-ordering is a linear ordering relation with no infinite descending chains.

That is, if we have some collection of objects ("Set") A, and a binary relation <_A on this collection, then the pair (A,<_A) is a well-ordering, if it satisfies:

  • linearity: \forall_{a,b\in A}. a <_A b \vee a = b \vee b <_A
   a.
  • irreflexivity: \forall_{a\in A}.\neg a <_A a
  • transitivity: \forall_{a,b,c\in A}. a <_A b \rightarrow b
   <_A c \rightarrow a <_A c
  • well-foundedness: Let B\subseteq A and B\neq\emptyset. Then B has a <_A-minimal element.

We write a >_A b for b <_A a, and a \le_A b for a <_A b \vee a = b and a \ge_A b for b \ge_A
a. In most cases it is clear from the context which relation we mean, so we shall leave the index out and write < instead of <_A, etc. We often just write A instead of (A,
<_A) if the ordering we mean is clear (or no specific order is given).

Canonical examples for well-orderings are the default ordering on \mathbb{N}_0, and on all its subsets – especially also all of its finite subsets, and the empty set (no axiom sais that there has to be an element at all).

Exercise: Show that if < is a well-ordering, then \le is reflexive, transitive, antisymmetric (\forall_{a,b}.a\le b\rightarrow b\le a\rightarrow a=b) and every decreasing sequence is stationary.

Definition: We call orderings which are reflexive, transitive, antisymmetric, and in which every decreasing sequence is stationary, weak well-orderings.

Most people have probably heard of the concepts of homomorphism and isomorphosm. The idea is that, given two structures, there is a "renaming" between these structures that preserves the structure fully or partially.

Definition: Let A and B be well-orderings, and f:A\rightarrow B, such that \forall_{a_1,a_2\in A}. a_1
  <_A a_2 \rightarrow f(a_1) <_B f(a_2). Then f is called a homomorphism. A bijective homomorphism is called isomorphism.

(Notice that by definition, every homomorphism is injective.)

Let us define [n]:=\{m\in\mathbb{N}_0\mid m<n\}, with the canonical ordering on it.

Lemma: For all m,n\in\mathbb{N}_0, m\le n if and only if there exists a homomorphism f : [m]\rightarrow[n].

Proof. "\Rightarrow": Just set f x =
x. "\Leftarrow": This is an instance of the well-known Pigeonhole principle.

Lemma: Every finite subset X\subset\mathbb{N}_0 with n elements is isomorphic to [n].

Proof. By induction. For n=0 this follows from extensionality. As step, notice that X is non-empty, and therefore has a minimal element, say x. By induction, there is an isomorphism f : [n-1] \rightarrow X\backslash\{x\}. Then trivially, gz:=\left\{\begin{array}{cl} x & \mbox{ for } z = n \\ f
z & \mbox{ otherwise}\end{array}\right. is an isomorphism.

Lemma: Every infinite subset X\subseteq\mathbb{N}_0 is isomorphic to \mathbb{N}.

Proof. Let us define


\left<n\right>:=\left\{
\begin{array}{rl}
\emptyset & \mbox{ for } n = 0 \\
\left<n-1\right>\cup\min(X\backslash\left<n-1\right>) & \mbox{ otherwise }\end{array}\right.

From our previous lemma, we get isomorphisms f_n:\left<n\right>\rightarrow[n]. Notice that the graphs G_n=\{(x,y)\mid f_n x=y\} of these isomorphisms are strictly increasing, that is, G_0\subsetneq G_1\subsetneq
G_2\subsetneq\ldots. Therefore, we can define G_\omega:=\bigcup_i G_i, and this must be the graph of a bijective function f_\omega:X\rightarrow\mathbb{N}.

Lemma: Induction is equivalent to well-ordering on \mathbb{N}.

Proof. "\Rightarrow": Let X\subseteq\mathbb{N}, and X\neq\emptyset. Then there is some n\in X. We do induction on n. For n = 0, n is trivially minimal. Now in the step, if we already know that all sets containing a number < n have a minimum, and n\in X, either n is already the minimum of X, or X contains a smaller element. "\Leftarrow": Assume there was some set X such that 0\in X, X+1\subseteq X but X\neq\mathbb{N}. Then there is some minimal element in n\in\mathbb{N}\backslash X. Trivially, n\neq 0 and n-1\not\in X. Contradiction.

Side Note: This lemma is related to Markov's Principle, which is a theorem about intuitionistic logic. It is an example for a theorem which holds for every instance, but is not provable in general.

Exercise: If this is new to you: Before you read on, can you think of infinite well-orderings which are not isomorphic to \mathbb{N}?

Solution. Of course, there is no unique solution to this. One simple example is the canonical ordering on \{\lim_{m\rightarrow n}(1 -
\frac{1}{m})\mid n\in\mathbb{N}_0\}. Essentially, it is almost the same as \mathbb{N}, with one exception: 1 is larger than all other elements.

Comparing and combining well-orderings

Something very common in mathematics is to view structures "up to isomorphism". For example, one talks about the "Field F_2", even though in the common mathematical theories it is not unique. We will do the same with well-orderings now: We identify isomorphic well-orderings. Therefore, we can say, all finite well-orderings are given by [0],[1],[2],\ldots. We already began comparing finite well-orderings by their homomorphisms, and we will just continue doing so by saying, a well-ordering a is less-than-or-equal a well-ordering b, written a\le b, if there is a homomorphism a\rightarrow b. Then, we go even further, and leave out the squared brackets, that is, we write 0,1,2,\ldots for the finite well-orderings.

We have already seen that the smallest infinite well-ordering is the canonical ordering on \mathbb{N}, which we will call \omega.

Lemma: Let (x, <) be a well-ordering, and y\in
  x. Then (\{z\in x\mid z<y\}, <) is also a well-ordering which is not isomorphic to (x, <).

Proof. Trivially, (\{z\in x\mid z<y\},<) is also a well-ordering. Assuming there was an isomorphism f:\{z\in x\mid
 z<y\}\rightarrow x, then \{z\in x\mid f(z)\neq z\} is non-empty and contains a smallest element m such that f(m)\neq m. Then there must be some n\in x such that f(n)=m, but as m was minimal and n\neq m, we know that n>m, therefore f(n)>f(m), so m>f(m), which means that f(f(m))=f(m) since m was minimal, and therefore f(m)>f(f(m))=f(m). Contradiction.

Lemma and Definition: For every two well-orderings a,b, the sum a+b is defined as the disjoint union of a and b with the ordering that makes every element of a smaller than every element of b. a+b is a well-ordering.

Proof. Exercise.

Notice: The addition of well-orderings is not commutative. 1+\omega=\omega, while \omega+1 is the well-ordering in our above example.

Lemma and Definition: For every two well-orderings a,b, the product a\cdot b is the lexicographical ordering on a\times b: (a_1, b_1) < (a_2, b_2) :\Leftrightarrow
  a_1<a_2\vee(a_1=a_2\wedge b_1<b_2).

Notice: The product of well-orderings is not commutative. 2\cdot\omega=\omega, but \omega\cdot
  2=\omega + \omega.

Notice: The finite well-orderings with addition and multiplication are essentially \mathbb{N}.

Well-orderings in set theory

While currently, in the realm of logic, there are new tools being built to cope with this problem, for well-orderings the problem has been solved in an elegant way inside set theory, which is currently the one which is most well developed.

Ordinal Numbers

Definition: A set x is called an ordinal number if for all y\in x we have y\subseteq x and x is well-ordered by \in. The class of all ordinals is called On.

Lemma: \emptyset\in x for x\in On.

Proof. x contains a minimum y. Assume there was some z\in
 y, then also z\in x, and y would not be minimal anymore. Contradiction. Thus, y=\emptyset.

Lemma: If y\in x and x\in On, then y\in On.

Proof. As y\in x, also y\subseteq x, so trivially, y is well-ordered by \in. Assuming z\in y and t\in z, by transitivity we have t\in y, so every element of y is a subset. y\in On.

Theorem: If x and y are ordinals whose well-orderings regarding \in are isomorphic, then x=y.

Proof. Call the isomorphism f. Assuming f\neq id, there must be a minimal z\in x such that f(z)\neq
 z. Trivially, f(\emptyset)=\emptyset, so z\neq\emptyset. Therefore, there is some t\in
 z. Trivially, we have f(z)=\{f(t)\mid t\in z\}=\{t\mid t\in
 z\}=z. Contradiction.

Lemma: If x\in On, y\subseteq x, y\neq\emptyset, then \exists_{a\in y}\forall_{b\in y}. a\in b \vee a = b.

Proof. By the foundation axiom, we get an a\in y such that a\cap y=\emptyset. Take any b\in y. b\not\in a, because a\cap y = \emptyset. By linearity of x therefore a\in b\vee a = b.

Lemma: If a is a set of ordinals, then \bigcap a is an ordinal.

Proof. We first prove that \bigcap a is an ordinal number. If x\in\bigcap a, then \forall_{b\in a}, x\in b, therefore \forall_{b\in a}, x\subseteq b, therefore x\subseteq\bigcap
 a. We still have to prove that \in is a well-ordering on \bigcap a. Irreflexivity follows from the foundation axiom: No set may contain itself. For linearity, consider b,c\in\bigcap
 a, that is, b\in d, c\in d for all d\in a. Since d is an ordinal, linearity holds for b,c. Well-foundedness of \in follows from the foundation axiom. Transitivity is left as an exercise.

Chaining a few more technical proofs, we could prove:

Theorem: For any two a,b\in On, we have a\in b\vee a=b\vee
b\in a.

Corollary: Every set of ordinals is well-ordered by the \in relation.

We can generalize induction on \mathbb{N} to induction on On:

Theorem (Transfinite Induction): If for some proposition P we have P(\emptyset), and \forall y\in On.(\forall x\in
  y. P(x))\rightarrow P(y), then \forall x\in On. P(x).

Proof. Assume not. Then there is some x\in On with \neg
 P(x), and therefore \{y\le x\mid\neg P(y)\} is a well-orderet set and has a minimum \alpha, which is also the absolute minimum value with \neg P(\alpha). Trivially, \alpha\neq\emptyset, and by its minimality, also \forall
 x\in\alpha P(x), but this would imply P(\alpha). Contradiction.

Lemma: On is not a set.

Proof. If On was a set, On\in On. Contradiction.

Lemma: Every well-ordering (A,<) is isomorphic to some ordinal number.

Proof Sketch. Define f(\emptyset) = \min A. Now let f be defined for all \alpha\in\beta. If there is still an m\in
 A remaining, set f(\beta):=m. Otherwise, proceed. Since we assumed that A is a set, this procedure must terminate at some point.

The nice thing is that in this theory we have concrete objects to talk about, namely the ordinal numbers.

Zorn's Lemma

Zorn's Lemma is one of the three classically equivalent axioms independent of ZF: The well-ordering theorem, which says that every set has some well-ordering; the axiom of choice; and Zorn's Lemma. It is probably the one that is used most often explicitly outside of set theory.

Zorn's Lemma: If (P, <) is transitive, reflexive and every non-empty chain has an upper bound, then P contains a maximal element.

Notice: A maximal element might not be comparable with many other elements.

Definition: A Well-quasi-ordering is a well-ordering except for it may be nonlinear.

In some sense this generalizes the notion of trees. Zorn's Lemma can be reduced to some stronger property about well-quasi-orders:

Theorem: If (P, <) is transitive, reflexive and every non-empty well-ordered subset has an upper bound, then for all x\in P, P contains a maximal element which is comparable to x.

This essentially sais that if every branch of the tree is bounded, then there are leaves.

(Linear) Algebra

A vector space (V, +, \cdot) over a field F is defined by a few axioms that can easily be looked up in wikipedia.

Theorem: Every vector space has a base.

Proof 1. Take an arbitrary element. If the hull of the element is the whole space, you are done. Otherwise, take an element which is not in the hull and proceed. As the vector space is a set, this process must "end" after infinitely many steps: After "set" many steps.

Proof 2. All chains of bases are bounded. Therefore, by ZL, there is a basis.

Proof 1 is normally only done in the finite case, but also works in the infinite case, because of transfinite induction. Proof 2 uses Zorn's Lemma. Similarily, algebraic closure and many other theorems use Zorn's Lemma.

Fun Fact: The groups (\mathbb{R}, +) and (\mathbb{C},
+) are isomorphic.

Proof. It is easy to show that both are vector spaces over \mathbb{Q}, and as such, they have a countable base.

Recursion Theory

Consider a simple programming language: You have an arbitrary number of registers a[i] which are initially 0 and can store natural numbers. You have a command a[i]++ which increments the register a[i], and a[i]-- which decrements it if it is not 0. (We do not allow constructs like a[a[i]], the i must be constants.) You have an exit command. Furthermore, you have a command if a[i] == 0 then goto l1 where l1 is the line number to jump to. It is easy to show that this language is turing complete. So especially, it is generally undecidable, whether a program terminates.

However, we can change the semantics a bit: Attach every program line with an additional function f from ordinal numbers into ordinal numbers (or, if you want to implement it on a real computer, other well-orderings), which must satisfy the condition that it is strictly sublinear, that is, f\alpha < \alpha for all \alpha>0. In the beginning, you start with some ordinal number, that will, at every step, be decreased through the corresponding function. The program terminates on exit as before, or as soon as the ordinal is 0.

All programs in such a language terminate. What programs can be expressed depends on the values of the original ordinal number that we allow. By the way, this is not pure theory.

External Content not shown. Further Information.

I am currently cleaning up my feed list. Omg, so many pages do not maintain their feeds. Many of them abandonned them entirely. Why would an admin do this?

Ah, of course. It's because feeds were simple, worked well, were not centralized and controlled by a big company. Sorry for asking.

I have a lot of stuff to do, so during February, don't expect too many new posts. Furthermore, all my bookmarks are on another computer, that should have been synced using Firefox Sync, but somehow wasn't. So I cannot finish this week's link list now.

External Content not shown. Further Information.

When it comes to web-browsing, I think I would call myself a "power-user". My current feed-list has 585 entries (some of them are comment feeds, but still a large number), and the usual way of reading them is letting newsbeuter manage them, then "pre-filter" them (most feeds do not have content embedded, and newsbeuter is a console ui, which is nice but not really suitable for webcomics, etc.) and putting them on a list that I then select in Firefox. Then I will use a feature of Tab Mix Plus, namely being able to open all selected links in new tabs. Furthermore, I configured Tab Mix Plus such that it shows these tabs in multiple lines. For a long period of time, I configured it to show the tab bar at the bottom rather than the top, but this feature someday began not to work properly anymore, and I did not have time to investigate further.

At present, Firefox seems to be the only browser that can really handle many tabs properly. Chromium has some extensions that sort-of work, but it is that kind of software that "knows best". Opera had this feature in the past, too, but they dropped it. Vivaldi seems nice with respect to that. I also used tab groups, which is a feature that will soon be dropped. Now I got used to bookmarks for the same purpose.

Firefox has a good crash recovery mechanism that usually works, but it occured to me several times now that my profile got broken and I lost many of my bookmarks or at least had a lot of work restoring them, and all the settings. In the past I used Weave, which is now called Sync. However, I wanted to run my own server, because the official Mozilla server had a 5 MiB boundary, which is not enough for me (and besides that, I have a server, so I want to use it), but someday they changed the protocol and dropped support for the old protocol, and there was no easy way of setting up a new server.

These are several reasons why I am trying to keep my local Firefox profile working. So here is what I am doing:

Firstly, my Linux runs on a LVM. So I can create an additional logical volume "firefox", which I made a 512 MiB ext3 partition. This partition will be mounted (via fstab) to ~/.mozilla. And then, I use a straightforward backup procedure:

#!/usr/bin/env bash

PATH=/bin:/sbin:/usr/bin:/usr/sbin
SECPATH=/home/user/fbackups
BACKUPNUM=100

i=0;
while test -e "${SECPATH}/backup.$i"; do ((i++)); done

lvcreate -l 100%FREE --snapshot /dev/lvm1/firefox -n firefox-b || (wall "konnte kein backup erstellen!"; exit 1)
TMPFL=$(mktemp -d)
mount -o ro /dev/lvm1/firefox-b ${TMPFL} || (wall "konnte backup erstellen aber nicht mounten. tmp-directory $TMPFL wurde nicht geloescht."; exit 1)

rsync -alH --delete "${TMPFL}/" "${SECPATH}/backup.$i" --link-dest "${SECPATH}/backup.$((i-1))"

umount /dev/lvm1/firefox-b || (wall "Konnte backup /dev/lvm1/firefox-b (auf $TMPFL) nicht unmounten!"; exit 1)
lvremove -f /dev/lvm1/firefox-b || (wall "konnte /dev/lvm1/firefox-b nicht loeschen!"; exit 1)
rm -rf ${TMPFL} || (wall "konnte $TMPFL nicht loeschen!"; exit 1)

if [ $i -ge "$BACKUPNUM" ]; then
rm -rf "${SECPATH}/backup.0"
j=0; while [ $j -lt $i ]; do
mv "${SECPATH}/backup.$((j+1))" "${SECPATH}/backup.$j"
((j++))
done
fi

exit 0

Of course, I could just rsync the original directory, but that already broke some of my configurations (probably because it is not "atomic"). Creating an LVM snapshot leaves a directory that looks like the system has crashed – a situation Firefox can usually handle. I am running this script every 10 minutes in a loop. It is not perfect, but it serves its purpose.

Update: I now use BTRFS which has an internal snapshot mechanism. The code now looks like

while true;
do btrfs subvolume snapshot -r .mozilla/ snap/dot_mozilla_`date -u +%s`;
   sleep 600;
done