Thu, 05 May 2016 13:36:58 GMT

A nice property of Common Lisp is its macro system which made it possible to have things like cl-who, which allowed to embed HTML-code in Lisp syntax directly into the code.

With the new features of C++11, I wrote this little code snipped which does something similar:

```
/* we assume everything utf-8 */
#include <iostream>
#include <string>
#include <sstream>
#include <deque>
#include <cassert>
class Attr {
public:
std::string _name;
std::string _value;
Attr (std::string const& n, const char* v)
: _name(n), _value(v) {};
void text_(std::stringstream& ss) const {
ss << _name << "=\"";
// NOTICE: this works only with UTF-8
for (char c : _value) {
switch (c) {
case '&':
ss << "&";
break;
case '"':
ss << """;
break;
case '<':
ss << "<";
break;
case '>':
ss << ">";
break;
default:
ss << c;
}
}
ss << '"';
}
std::string text() const {
std::stringstream ss;
text_(ss);
ss.flush();
return ss.str();
}
};
class AttrTrunk {
public:
std::string _name;
AttrTrunk(std::string & n) : _name(n) {};
Attr operator= (const char* a) {
Attr ret(_name, a);
return ret;
}
};
class AttrList {
public:
std::deque<Attr> attributes;
AttrList () {};
void text_ (std::stringstream& ss) const {
for (Attr const& c : attributes) {
c.text_(ss);
ss << " ";
}
}
std::string text () const {
std::stringstream ss;
text_(ss);
ss.flush();
return ss.str();
}
};
AttrList operator , (Attr const& a, Attr const& b) {
AttrList ret;
ret.attributes.push_back(a);
ret.attributes.push_back(b);
return ret;
}
AttrList& operator , (AttrList& a, Attr const& b) {
a.attributes.push_back(b);
return a;
}
AttrTrunk operator "" _att (const char* a, size_t len) {
std::string b("");
for (size_t i = 0; i < len; ++i) b.push_back(a[i]);
AttrTrunk ret(b);
return ret;
}
class Tag;
std::deque<Tag> empty;
AttrList empty_attr;
class Tag {
public:
std::string _name; // or cleartext
AttrList _attrs;
std::deque<Tag> _tags;
bool _empty;
bool _text;
bool _as_is;
// _empty means that the tag should be of the form <bla />. If it is
// not set, it will be <bla></bla>. We distinguish this, because
// <script ... /> does not work everywhere.
Tag (std::string& n, AttrList& attrs, std::deque<Tag>& subtags, bool empty) :
_name(n), _attrs(attrs), _tags(subtags), _empty(empty), _text(false), _as_is(false) {
assert (!empty || subtags.size() == 0);
}
Tag (const char* cleartext)
: _tags(empty), _attrs(empty_attr), _text(true), _name(cleartext), _as_is(false) {}
Tag (std::string& cleartext)
: _tags(empty), _attrs(empty_attr), _text(true),
_name(cleartext), _as_is(false) {}
static Tag as_is (const char* text) {
Tag ret(text);
ret._as_is = true;
return ret;
}
void text_ (std::stringstream& ss) const {
if (_as_is) {
ss << _name;
} else if (_text) {
for (char c : _name) {
switch (c) {
case '<':
ss << "<";
break;
case '>':
ss << ">";
break;
case '&':
ss << "&";
break;
default:
ss << c;
}
}
} else {
ss << "<" << _name << " ";
_attrs.text_(ss);
if (_empty) {
ss << " />";
} else {
ss << ">";
for (Tag const& tg : _tags) {
tg.text_(ss);
}
ss << "</" << _name << ">";
}
}
}
std::string text() const {
std::stringstream ss;
text_(ss);
ss.flush();
return ss.str();
}
};
// Tag with (possibly empty) attlist but no content yet
class TagTrunk2 {
public:
std::string _name;
AttrList _attrs;
TagTrunk2 (std::string n, AttrList al)
: _name(n), _attrs(al) {}
// for the difference between [] with an empty list and (), see
// class Tag
Tag operator [] (std::deque<Tag> subtags) {
Tag t (_name, _attrs, subtags, false);
return t;
}
Tag operator [] (Tag subtag) {
std::deque<Tag> subtags;
subtags.push_back(subtag);
Tag t (_name, _attrs, subtags, false);
return t;
}
Tag operator () () {
std::deque<Tag> trash;
Tag t (_name, _attrs, trash, true);
return t;
}
};
std::deque<Tag> operator , (Tag a, Tag b) {
std::deque<Tag> ret;
ret.push_back(a);
ret.push_back(b);
return ret;
}
std::deque<Tag>& operator , (std::deque<Tag>& a, Tag b) {
a.push_back(b);
return a;
}
// Tag without attlist and content yet
class TagTrunk1 {
public:
std::string _name;
TagTrunk1 (std::string n) : _name(n) {}
TagTrunk2 operator [] (AttrList a) {
TagTrunk2 ret (_name, a);
return ret;
}
TagTrunk2 operator [] (Attr a) {
AttrList b;
b.attributes.push_back(a);
TagTrunk2 ret (_name, b);
return ret;
}
TagTrunk2 operator () () {
// use this when no attributes are present
AttrList b;
TagTrunk2 ret (_name, b);
return ret;
}
};
TagTrunk1 operator "" _tag (const char* a, size_t length) {
std::string b("");
for (size_t i = 0; i < length; ++i) b.push_back(a[i]);
TagTrunk1 ret(b);
return ret;
}
Tag operator "" _txt (const char* a, size_t length) {
std::string b("");
for (size_t i = 0; i < length; ++i) b.push_back(a[i]);
Tag ret(b);
return ret;
}
Tag operator "" _as_is (const char* a, size_t length) {
return Tag::as_is(a);
}
int main (void) {
std::cout <<
( "html"_tag ()
["head"_tag ()
["title"_tag () [ "Meine Seite"_txt ],
"script"_tag ["type"_att = "text/javascript",
"src"_att = "hallo.js"][empty]],
"body"_tag () ["Hallo Welt!"_txt, "<![CDATA[ bla ]]>"_as_is]] ).text() << std::endl;
}
```

Have fun. Regard the code as public domain. You C++-Nerds, please make some nice library out of it that does encoding and collation and shit correctly and release it freely so I can use it.

Show comments (Requires JavaScript, loads external content from Disqus.com)

Mon, 04 Apr 2016 15:10:14 GMT

Dieser Inhalt wird in Deutschland nicht angezeigt. Weitere Informationen

After a long ride on German trails I kindly want to make the request to the programmers of the messenger.com web app to please die in a chemical fire.

Then, Facebook could maybe pay some student to hack together a simple interface that just does - say - autorefresh every few seconds.

Well, admittedly, ping times of 21686ms, which are common in Germany,
*are* a problem when programming certain kinds of low latency network
software. I wouldn't blame, for example, MineCraft to not work under
these conditions.

But a simple text chat – and yes, that's what your "web app" is – should work perfectly well under that conditions.

And yes, I tried your Android app, and it is even worse. It drains my battery, has the same issues, and above that, I had situations where the messages were not synced correctly (or at all) between my devices.

And don't tell me that this was too hard to implement. Skype has a similar messaging website. And it works perfectly. Even WhatsApp Web works better, though it is annoying that they want an active connection with the phone.

Sun, 28 Feb 2016 15:49:36 GMT

Dieser Inhalt wird in Deutschland nicht angezeigt. Weitere Informationen

As I always forget them, here a short list of functions regarding several sum- and product types in Coq. If I missed something, feel free to send me a comment.

Product types:

```
notation | name | left proj. | right proj. |
------------+----------+------------+-------------+
A /\ B | and | proj1 | proj2 |
A * B | pair | fst | snd |
{x | A} | exist | proj1_sig | proj2_sig |
{x : B & A} | existT | projT1 | projT2 |
```

Sum Types:

```
notation | name | intro left | intro right |
------------+----------+------------+-------------+
A \/ B | or | or_introl | or_intror |
A + B | sum | inl | inr |
{A} + {B} | sumbool | left | right |
```

Fri, 19 Feb 2016 20:15:07 GMT

Hi. There are currently some problems with my markdown setup. I am also refactoring some parts of this website. Behold! #technikeristinformiert

**Update**: Ok. Most of the problems should be solved now. One problem
is that cl-markdown and the original Markdown have slightly different
opinions about when a list begins. So some older link lists are still
scrambled.

The good news is that *most* MathJax is gone in favor of svg-latex,
which loads a lot faster (though it does not look quite so good).

If you see otherwise scrambled pages, feel free to drop a comment or mail about it. At the moment, I do not have the time to go through every single post I made.

Fri, 19 Feb 2016 17:59:58 GMT

Dieser Inhalt wird in Deutschland nicht angezeigt. Weitere Informationen

Arrays in Haskell can be a pain. Monads are a nice thing, but for some things, I would prefer linear types. There are so-called DiffArrays in Haskell, which are, for no apparent reason, ridiculously slow.

As a quick reminder: A DiffArray maintains a history of changes, which
*should* be only linear overhead and when using it linearly only, most
of the memory overhead *should* be quite short-lived. But it isn't.

My idea now is different: Using an array only linearly should be trivial to check when having an AST (and I wonder why nobody has written such a thing yet which just replaces linearily used DiffArrays with unsafe operations). But other than overwriting the old array with a difference, I just mark it as invalid:

```
import System.IO.Unsafe
import Data.IORef
import Data.Array.IO
data LinearArray_ = Invalid | Valid (IOUArray Int Int)
type LinearArray = IORef LinearArray_
```

Creating new Arrays is straightforward:

```
newArray :: (Int, Int) -> Int -> LinearArray
newArray (start, end) init =
unsafePerformIO $
do
t <- Data.Array.IO.newArray (start, end) init
r <- newIORef (Valid t)
return $ r
```

Overwriting an array does the following: It takes the array from the
old ref and overwrites it accordingly. Then the old ref is overwritten
and marked `Invalid`

, and a new array is returned. Of course, if the
old ref was `Invalid`

already, an error is rised:

```
overwriteArray :: LinearArray -> Int -> Int -> LinearArray
overwriteArray old index value =
unsafePerformIO $
do ior <- readIORef old
case ior of
Invalid -> error "nonlinear use of linear array!"
Valid a -> do
writeArray a index value
writeIORef old Invalid
new <- newIORef (Valid a)
return new
```

Trying to read from an invalid array will do the same:

```
readArray :: LinearArray -> Int -> Int
readArray arr index =
unsafePerformIO $
do ior <- readIORef arr
case ior of
Invalid -> error "nonlinear use of linear array!"
Valid a -> do
r <- Data.Array.IO.readArray a index
return r
```

This approach has several caveeats. For example, in most functional languages, one can just overload a name. In Haskell, a code like

```
let {a = []} in let {a = 1 : a} in a
```

produces an infinite list of ones: By default, a refers to itself, and all definitions are recursive. But name overloading is something that comes in handy when using linear arrays. Another caveeat is lazy evaluation. The following code "surprisingly" works:

```
a :: Int
a = let arr1 = Main.newArray (0, 10) 0
arr2 = overwriteArray arr1 9 1
in Main.readArray arr1 9
```

That is because arr2 is never evaluated. However, activating ```
{-#
LANGUAGE BangPatterns #-}
```

and using

```
a :: Int
a = let arr1 = Main.newArray (0, 10) 0
!arr2 = overwriteArray arr1 9 1
in Main.readArray arr1 9
```

*will* throw an error. Notice that both are *wrong*, in the sense that
the specification of a linear array requires linear occurences only,
but `arr1`

occurs twice. It is only that there is no error in the one
case.

My benchmarks show that this kind of array is pretty fast. Now, while
feeling a lynchmob of Haskell-fanboys forming, let me add that **I
would not recommend to actually use this**. I would just wish that
Haskell had support for uniqueness types – or at least automatic
optimization of linearly used immutable arrays or DiffArrays in that
manner.

Because it should be quite easy to find out whether an array occurs only once in a term, and all its decendants are also only used once. And the benefits would be really huge, in my opinion.

Tue, 16 Feb 2016 23:21:53 GMT

Dieser Inhalt wird in Deutschland nicht angezeigt. Weitere Informationen

I've tried BTRFS before, and it was very unstable. Now that it is in the mainline kernel, I decided to give it another shot.

The main reason was online compression. My personal chat log files are
larger than 4G now, when uncompressed, but they are very repetitive
and therefore compress well. I *could* have written some
`logrotate(8)`

script. This is the common suggestion, especially on
the Raspberry Pi, on which "every processor cycle is
precious". The Raspberry Pi is 21 times faster than my first computer,
and has 262144 times as much RAM, and had
online compression. Honestly, this is 2016, not 1992.

The only filesystem from the standard packages of Raspbian that supports compression is NTFS. Reiser4 is also available and supports compression, but the snapshot facility of BTRFS convinced me. (Unfortunately, ZFS is not supported.)

On my raspi, since then, it works great. On my PC, I tried to convert an ext4 partition into BTRFS. Don't do that, at least not before doing a backup. However, now it works. And it runs faster than ext4, at least it feels like that.

Mon, 08 Feb 2016 08:01:46 GMT

This was again a presentation for the
Best mathematics club in the multiverse. It is meant for both
younger and older students, as well as working mathematicians. Its
objective therefore lied in breadth rather than depth. I removed lots
of topics I wanted to talk about, as the talk I gave got too
long. **It is not a scientific talk.** Comments and corrections are
welcome.

Why are well-orderings the backbone of mathematics? A similar claim for Zermelo-Fraenckel-Set-Theory (ZFC) was made by Oliver Deiser in his book Einführung in die Mengenlehre, about the constructible cardinal numbers. In ZFC, cardinal numbers are specific ordinal numbers, and ordinal numbers are sets which are well-ordered by the -Relation (roughly speaking). And in fact, the study of well-orderings is an important part of set theory. In recursion theory, well-orderings and well-quasi-orderings can be used for termination proofs, and are also important for newer foundational systems that base on type theory. In proof theory, well-orderings can measure the consistency strength of a theory. The axiom of choice is closely related to well-orderings, and it is an important principal in classical mathematics, specifically analysis, stochastics and topology.

**Definition.** *In short, a well-ordering is a linear ordering
relation with no infinite descending chains.*

That is, if we have some collection of objects ("Set") , and a binary relation on this collection, then the pair is a well-ordering, if it satisfies:

**linearity**: .**irreflexivity**:**transitivity**:**well-foundedness**: Let and . Then has a -minimal element.

We write for , and for and for . In most cases it is clear from the context which relation we mean, so we shall leave the index out and write instead of , etc. We often just write instead of if the ordering we mean is clear (or no specific order is given).

Canonical examples for well-orderings are the default ordering on , and on all its subsets – especially also all of its finite subsets, and the empty set (no axiom sais that there has to be an element at all).

**Exercise:** Show that if is a well-ordering, then
is reflexive, transitive, antisymmetric
() and
every decreasing sequence is stationary.

**Definition:** We call orderings which are reflexive, transitive,
antisymmetric, and in which every decreasing sequence is stationary,
**weak well-orderings**.

Most people have probably heard of the concepts of homomorphism and isomorphosm. The idea is that, given two structures, there is a "renaming" between these structures that preserves the structure fully or partially.

**Definition:** Let and be well-orderings, and
, such that . Then is called a
*homomorphism*. A bijective homomorphism is called *isomorphism*.

(Notice that by definition, every homomorphism is injective.)

Let us define , with the canonical ordering on it.

**Lemma:** For all , if and
only if there exists a homomorphism .

*Proof.* "": Just set . "": This is an instance of the well-known
Pigeonhole principle.

**Lemma:** Every finite subset with
elements is isomorphic to .

*Proof.* By induction. For this follows from
extensionality. As step, notice that is non-empty, and
therefore has a minimal element, say . By induction, there is
an isomorphism . Then
trivially, is an isomorphism.

**Lemma:** Every infinite subset is
isomorphic to .

*Proof.* Let us define

From our previous lemma, we get isomorphisms . Notice that the graphs of these isomorphisms are strictly increasing, that is, . Therefore, we can define , and this must be the graph of a bijective function .

**Lemma:** Induction is equivalent to well-ordering on
.

*Proof.* "": Let , and
. Then there is some . We do
induction on . For , is trivially
minimal. Now in the step, if we already know that all sets containing
a number have a minimum, and , either
is already the minimum of , or contains a
smaller element. "": Assume there was some set
such that , but
. Then there is some minimal element in
. Trivially, and
. Contradiction.

Side Note: This lemma is related to **Markov's Principle**, which is a
theorem about intuitionistic logic. It is an example for a theorem
which holds for every instance, but is not provable in general.

**Exercise:** If this is new to you: Before you read on, can you think
of infinite well-orderings which are **not** isomorphic to
?

*Solution.* Of course, there is no unique solution to this. One simple
example is the canonical ordering on . Essentially, it is almost the
same as , with one exception: is larger than
all other elements.

Something very common in mathematics is to view structures "up to
isomorphism". For example, one talks about the "Field ",
even though in the common mathematical theories it is not unique. We
will do the same with well-orderings now: We *identify* isomorphic
well-orderings. Therefore, we can say, all finite well-orderings are
given by . We already began comparing finite
well-orderings by their homomorphisms, and we will just continue doing
so by saying, a well-ordering is less-than-or-equal a
well-ordering , written , if there is a
homomorphism . Then, we go even further, and
leave out the squared brackets, that is, we write
for the finite well-orderings.

We have already seen that the smallest infinite well-ordering is the canonical ordering on , which we will call .

**Lemma:** Let be a well-ordering, and . Then is also a well-ordering
which is not isomorphic to .

*Proof.* Trivially, is also a
well-ordering. Assuming there was an isomorphism , then is
non-empty and contains a smallest element such that
. Then there must be some such that
, but as was minimal and , we
know that , therefore , so ,
which means that since was minimal, and
therefore . Contradiction.

**Lemma and Definition:** For every two well-orderings , the
sum is defined as the disjoint union of and
with the ordering that makes every element of
smaller than every element of . is a
well-ordering.

*Proof.* Exercise.

**Notice:** The addition of well-orderings is not
commutative. , while is the
well-ordering in our above example.

**Lemma and Definition:** For every two well-orderings , the
product is the lexicographical ordering on
: .

**Notice:** The product of well-orderings is not
commutative. , but .

**Notice:** The finite well-orderings with addition and multiplication
are essentially .

While currently, in the realm of logic, there are new tools being built to cope with this problem, for well-orderings the problem has been solved in an elegant way inside set theory, which is currently the one which is most well developed.

**Definition:** A set is called an **ordinal number** if for
all we have and is
well-ordered by . The class of all ordinals is called
.

**Lemma:** for .

*Proof.* contains a minimum y. Assume there was some , then also , and would not be minimal
anymore. Contradiction. Thus, .

**Lemma:** If and , then .

*Proof.* As , also , so trivially,
is well-ordered by . Assuming and
, by transitivity we have , so every
element of is a subset. .

**Theorem:** If and are ordinals whose
well-orderings regarding are isomorphic, then .

*Proof.* Call the isomorphism . Assuming ,
there must be a minimal such that . Trivially, , so
. Therefore, there is some . Trivially, we have . Contradiction.

**Lemma:** If , then
.

*Proof.* By the foundation axiom, we get an such that
. Take any . ,
because . By linearity of
therefore .

**Lemma:** If is a set of ordinals, then is
an ordinal.

*Proof.* We first prove that is an ordinal number. If
, then , therefore
, therefore . We still have to prove that is a well-ordering on
. Irreflexivity follows from the foundation axiom: No
set may contain itself. For linearity, consider , that is, for all . Since
is an ordinal, linearity holds for
. Well-foundedness of follows from the
foundation axiom. Transitivity is left as an exercise.

Chaining a few more technical proofs, we could prove:

**Theorem:** For any two , we have .

**Corollary:** Every set of ordinals is well-ordered by the relation.

We can generalize induction on to induction on :

**Theorem (Transfinite Induction):** If for some proposition
we have , and , then .

*Proof.* Assume not. Then there is some with , and therefore is a
well-orderet set and has a minimum , which is also the
absolute minimum value with . Trivially,
, and by its minimality, also , but this would imply
. Contradiction.

**Lemma:** is not a set.

*Proof.* If was a set, . Contradiction.

**Lemma:** Every well-ordering is isomorphic to some
ordinal number.

*Proof Sketch.* Define . Now let be
defined for all . If there is still an remaining, set . Otherwise, proceed. Since we
assumed that is a set, this procedure must terminate at some
point.

The nice thing is that in this theory we have concrete objects to talk about, namely the ordinal numbers.

Zorn's Lemma is one of the three classically equivalent axioms independent of ZF: The well-ordering theorem, which says that every set has some well-ordering; the axiom of choice; and Zorn's Lemma. It is probably the one that is used most often explicitly outside of set theory.

**Zorn's Lemma:** If is transitive, reflexive and every
non-empty chain has an upper bound, then contains a maximal
element.

**Notice:** A maximal element might not be comparable with many other
elements.

**Definition:** A **Well-quasi-ordering** is a **well-ordering**
except for it may be nonlinear.

In some sense this generalizes the notion of trees. Zorn's Lemma can be reduced to some stronger property about well-quasi-orders:

**Theorem:** If is transitive, reflexive and every
non-empty well-ordered subset has an upper bound, then for all
, contains a maximal element which is
comparable to .

This essentially sais that if every branch of the tree is bounded, then there are leaves.

A vector space over a field is defined by a few axioms that can easily be looked up in wikipedia.

**Theorem:** Every vector space has a base.

*Proof 1.* Take an arbitrary element. If the hull of the element is
the whole space, you are done. Otherwise, take an element which is
not in the hull and proceed. As the vector space is a set, this
process must "end" after infinitely many steps: After "set" many
steps.

*Proof 2.* All chains of bases are bounded. Therefore, by ZL, there is
a basis.

Proof 1 is normally only done in the finite case, but also works in the infinite case, because of transfinite induction. Proof 2 uses Zorn's Lemma. Similarily, algebraic closure and many other theorems use Zorn's Lemma.

**Fun Fact:** The groups and are isomorphic.

*Proof.* It is easy to show that both are vector spaces over
, and as such, they have a countable base.

Consider a simple programming language: You have an arbitrary number
of registers `a[i]`

which are initially `0`

and can store natural
numbers. You have a command `a[i]++`

which increments the register
`a[i]`

, and `a[i]--`

which decrements it if it is not `0`

. (We do not
allow constructs like `a[a[i]]`

, the `i`

must be constants.) You have
an `exit`

command. Furthermore, you have a command ```
if a[i] == 0 then
goto l1
```

where `l1`

is the line number to jump to. It is easy to show
that this language is turing complete. So especially, it is generally
undecidable, whether a program terminates.

However, we can change the semantics a bit: Attach every program line
with an additional function from ordinal numbers into ordinal
numbers (or, if you want to implement it on a real computer, other
well-orderings), which must satisfy the condition that it is strictly
sublinear, that is, for all
. In the beginning, you start with *some* ordinal
number, that will, at every step, be decreased through the
corresponding function. The program terminates on `exit`

as before, or
as soon as the ordinal is .

All programs in such a language terminate. What programs can be expressed depends on the values of the original ordinal number that we allow. By the way, this is not pure theory.

Wed, 03 Feb 2016 20:45:47 GMT

Dieser Inhalt wird in Deutschland nicht angezeigt. Weitere Informationen

I am currently cleaning up my feed list. Omg, so many pages do not maintain their feeds. Many of them abandonned them entirely. Why would an admin do this?

Ah, of course. It's because feeds were simple, worked well, were not centralized and controlled by a big company. Sorry for asking.

Tue, 26 Jan 2016 21:05:55 GMT

I have a lot of stuff to do, so during February, don't expect too many
new posts. Furthermore, all my bookmarks are on another computer, that
*should* have been synced using Firefox Sync, but somehow wasn't. So I
cannot finish this week's link list now.

Sat, 16 Jan 2016 22:02:41 GMT

Dieser Inhalt wird in Deutschland nicht angezeigt. Weitere Informationen

When it comes to web-browsing, I think I would call myself a "power-user". My current feed-list has 585 entries (some of them are comment feeds, but still a large number), and the usual way of reading them is letting newsbeuter manage them, then "pre-filter" them (most feeds do not have content embedded, and newsbeuter is a console ui, which is nice but not really suitable for webcomics, etc.) and putting them on a list that I then select in Firefox. Then I will use a feature of Tab Mix Plus, namely being able to open all selected links in new tabs. Furthermore, I configured Tab Mix Plus such that it shows these tabs in multiple lines. For a long period of time, I configured it to show the tab bar at the bottom rather than the top, but this feature someday began not to work properly anymore, and I did not have time to investigate further.

At present, Firefox seems to be the only browser that can really handle many tabs properly. Chromium has some extensions that sort-of work, but it is that kind of software that "knows best". Opera had this feature in the past, too, but they dropped it. Vivaldi seems nice with respect to that. I also used tab groups, which is a feature that will soon be dropped. Now I got used to bookmarks for the same purpose.

Firefox has a good crash recovery mechanism that usually works, but it
occured to me several times now that my profile got broken and I lost
many of my bookmarks or at least had a lot of work restoring them, and
all the settings. In the past I used Weave, which is now called
Sync. However, I wanted to run my own server, because the official
Mozilla server had a 5 MiB boundary, which is not enough for me (and
besides that, I *have* a server, so I want to use it), but someday
they changed the protocol and dropped support for the old protocol,
and there was no easy way of setting up a new server.

These are several reasons why I am trying to keep my local Firefox profile working. So here is what I am doing:

Firstly, my Linux runs on a LVM. So I can create an additional logical volume "firefox", which I made a 512 MiB ext3 partition. This partition will be mounted (via fstab) to ~/.mozilla. And then, I use a straightforward backup procedure:

```
#!/usr/bin/env bash
PATH=/bin:/sbin:/usr/bin:/usr/sbin
SECPATH=/home/user/fbackups
BACKUPNUM=100
i=0;
while test -e "${SECPATH}/backup.$i"; do ((i++)); done
lvcreate -l 100%FREE --snapshot /dev/lvm1/firefox -n firefox-b || (wall "konnte kein backup erstellen!"; exit 1)
TMPFL=$(mktemp -d)
mount -o ro /dev/lvm1/firefox-b ${TMPFL} || (wall "konnte backup erstellen aber nicht mounten. tmp-directory $TMPFL wurde nicht geloescht."; exit 1)
rsync -alH --delete "${TMPFL}/" "${SECPATH}/backup.$i" --link-dest "${SECPATH}/backup.$((i-1))"
umount /dev/lvm1/firefox-b || (wall "Konnte backup /dev/lvm1/firefox-b (auf $TMPFL) nicht unmounten!"; exit 1)
lvremove -f /dev/lvm1/firefox-b || (wall "konnte /dev/lvm1/firefox-b nicht loeschen!"; exit 1)
rm -rf ${TMPFL} || (wall "konnte $TMPFL nicht loeschen!"; exit 1)
if [ $i -ge "$BACKUPNUM" ]; then
rm -rf "${SECPATH}/backup.0"
j=0; while [ $j -lt $i ]; do
mv "${SECPATH}/backup.$((j+1))" "${SECPATH}/backup.$j"
((j++))
done
fi
exit 0
```

Of course, I could just rsync the original directory, but that already broke some of my configurations (probably because it is not "atomic"). Creating an LVM snapshot leaves a directory that looks like the system has crashed – a situation Firefox can usually handle. I am running this script every 10 minutes in a loop. It is not perfect, but it serves its purpose.

**Update:** I now use BTRFS which has an internal snapshot
mechanism. The code now looks like

```
while true;
do btrfs subvolume snapshot -r .mozilla/ snap/dot_mozilla_`date -u +%s`;
sleep 600;
done
```