Nick — 2025-11-21 15:54:00
The Rebol Forum was previously hosted on an inexpensive Lunarpages account. Since Lunarpages was purchased by Hostpapa, support for those old hosting packages has been incrementally deprecated. They recently required me to use Cloudflare to manage DNS, and their most recent update disallowed rebol.exe execute permission.
So I've made a quick conversion of the rebolforum app to python, and saved all the historical message posts to a sqlite database, which is downloadable by a link at the bottom of the home page.
The final bb.db and archive.db files used by the original application are still available at:
https://rebolforum.com/bb.db
https://rebolforum.com/archive
The offline reader application from 2010 is still available at:
http://www.rebol.org/view-script.r?script=forum-reader.r
Only a few lines are required:
REBOL [title: "Forum Reader"]
topics: copy [] database: copy []
update: does [
topics: copy []
database: copy load to-file request-file/title/file
"Load Messages:" "" %rebolforum.txt
foreach topic database [
append topics first topic
]
t1/data: copy topics
show t1
]
view layout [
across
t1: text-list 200x400 data topics [
messages: copy ""
foreach [message name time] (at pick database t1/cnt 2) [
append messages rejoin [
message newline newline name " " time newline newline
"---------------------------------------" newline newline
]
]
a1/text: copy messages
show a1
]
a1: area 400x400
btn "Load Locally Saved Messages" [update]
]
Nick — 2025-11-21 15:55:20
Here's code used to migrate the old Rebol data files to sqlite (entirely generated by GPT):
#!/usr/bin/env python
"""
migrate_from_rebol_dbs_fix_encoding.py
Directly parse Rebol bb.db + archive.db into a SQLite forum.sqlite
compatible with the Flask app (topics + posts tables).
Each topic is stored as a Rebol block like:
[
"Topic title"
{Message 1 text}
"Author 1"
"19-Dec-2022/9:55:06-8:00"
{Message 2 text}
"Author 2"
"20-Dec-2022/22:48:42-8:00"
...
]
Strings may be delimited by "..." or {...}.
Key point here: we decode the Rebol files as cp1252 WITH errors="replace"
so that smart quotes/apostrophes survive, and only truly undefined bytes
(e.g., 0x9D) are turned into the replacement character.
"""
import re
import sqlite3
from pathlib import Path
from datetime import datetime
# Paths (adjust if needed)
BB_PATH = Path("bb.db")
ARCHIVE_PATH = Path("archive.db")
OUT_DB = Path("forum.sqlite")
# Rebol data was likely saved in Windows ANSI (cp1252) on Windows
SOURCE_ENCODING = "cp1252"
def iter_top_blocks(src: str):
"""
Yield (start_index, end_index, block_text) for each top-level [...] block,
skipping [ and ] that occur inside "..." or {...}.
"""
blocks = []
i = 0
n = len(src)
while i < n:
c = src[i]
# Skip double-quoted strings with ^ escapes
if c == '"':
i += 1
while i < n:
ch = src[i]
if ch == "^" and i + 1 < n:
i += 2
continue
if ch == '"':
i += 1
break
i += 1
continue
# Skip curly-braced strings (allow nesting and ^ escapes)
if c == "{":
depth = 1
i += 1
while i < n and depth > 0:
ch = src[i]
if ch == "^" and i + 1 < n:
i += 2
continue
if ch == "{":
depth += 1
i += 1
continue
if ch == "}":
depth -= 1
i += 1
continue
i += 1
continue
# Top-level block
if c == "[":
start = i
depth = 1
i += 1
while i < n and depth > 0:
ch = src[i]
# Skip strings inside the block too
if ch == '"':
i += 1
while i < n:
ch2 = src[i]
if ch2 == "^" and i + 1 < n:
i += 2
continue
if ch2 == '"':
i += 1
break
i += 1
continue
if ch == "{":
i += 1
d2 = 1
while i < n and d2 > 0:
ch2 = src[i]
if ch2 == "^" and i + 1 < n:
i += 2
continue
if ch2 == "{":
d2 += 1
i += 1
continue
if ch2 == "}":
d2 -= 1
i += 1
continue
i += 1
continue
if ch == "[":
depth += 1
i += 1
continue
if ch == "]":
depth -= 1
i += 1
if depth == 0:
end = i
blocks.append((start, end, src[start:end]))
break
continue
i += 1
continue
i += 1
return blocks
def tokenize_rebol_block(block: str):
"""
Given a block string like "[ ... ]", return a list of tokens as Python strings.
We treat:
- "..." as a single string token (with ^ escapes)
- {...} as a single string token (with ^ escapes, and nested {})
- everything else as bare tokens split on whitespace (stopping at [ or ]).
"""
assert block[0] == "[" and block[-1] == "]"
src = block[1:-1]
tokens = []
i = 0
n = len(src)
while i < n:
c = src[i]
# Skip whitespace
if c.isspace():
i += 1
continue
# Double-quoted string
if c == '"':
i += 1
buf = []
while i < n:
ch = src[i]
if ch == "^" and i + 1 < n:
buf.append(src[i + 1])
i += 2
continue
if ch == '"':
i += 1
break
buf.append(ch)
i += 1
tokens.append("".join(buf))
continue
# Curly-braced string
if c == "{":
i += 1
buf = []
depth = 1
while i < n and depth > 0:
ch = src[i]
if ch == "^" and i + 1 < n:
buf.append(src[i + 1])
i += 2
continue
if ch == "{":
depth += 1
buf.append(ch)
i += 1
continue
if ch == "}":
depth -= 1
if depth == 0:
i += 1
break
buf.append(ch)
i += 1
continue
buf.append(ch)
i += 1
tokens.append("".join(buf))
continue
# Ignore stray [ or ]
if c in "[]":
i += 1
continue
# Bare token
start = i
while i < n and (not src[i].isspace()) and src[i] not in "[]":
i += 1
tokens.append(src[start:i])
continue
return tokens
# Date parsing for sanity checks
DT_RE = re.compile(
r"(\d{1,2})-([A-Za-z]{3})-(\d{4})/(\d{1,2}):(\d{2})(?::(\d{2}))?"
)
MONTHS = {
"Jan": 1,
"Feb": 2,
"Mar": 3,
"Apr": 4,
"May": 5,
"Jun": 6,
"Jul": 7,
"Aug": 8,
"Sep": 9,
"Oct": 10,
"Nov": 11,
"Dec": 12,
}
def parse_rebol_dt(s: str):
m = DT_RE.search(s)
if not m:
return None
d, mon_s, y, h, mi, se = m.groups()
try:
return datetime(
int(y),
MONTHS.get(mon_s, 1),
int(d),
int(h),
int(mi),
int(se) if se else 0,
)
except Exception:
return None
def load_topics_from_file(path: Path, is_archived_flag: int):
"""
Parse a Rebol db file into a list of topics:
[(is_archived, title, [(msg, author, ts), ...]), ...]
"""
print("Reading", path)
# Decode as cp1252, but replace undefined bytes (like 0x9D) with ?.
raw = path.read_bytes()
text = raw.decode(SOURCE_ENCODING, errors="replace")
blocks = iter_top_blocks(text)
print(" Found", len(blocks), "blocks")
topics = []
for _, _, blk in blocks:
tokens = tokenize_rebol_block(blk)
if len(tokens) < 4:
continue
title = tokens[0].strip()
rest = tokens[1:]
triples = []
# Group rest as (msg, author, ts) triples
for i in range(0, len(rest) - 2, 3):
msg, author, ts = rest[i], rest[i + 1], rest[i + 2]
triples.append((msg, author, ts))
if not triples:
continue
topics.append((is_archived_flag, title, triples))
print(" Parsed", len(topics), "topics from", path.name)
total_posts = sum(len(t[2]) for t in topics)
print(" Total posts from", path.name + ":", total_posts)
return topics
def migrate():
if not BB_PATH.exists():
raise FileNotFoundError(str(BB_PATH) + " not found")
if not ARCHIVE_PATH.exists():
raise FileNotFoundError(str(ARCHIVE_PATH) + " not found")
# Parse archive first, then live
topics = []
topics += load_topics_from_file(ARCHIVE_PATH, is_archived_flag=1)
topics += load_topics_from_file(BB_PATH, is_archived_flag=0)
print()
print("Combined topics:", len(topics))
total_posts = sum(len(t[2]) for t in topics)
print("Combined posts:", total_posts)
# Find latest timestamp for sanity check
latest_dt = None
latest_raw = None
latest_title = None
for is_arch, title, triples in topics:
for msg, author, ts in triples:
dt = parse_rebol_dt(ts)
if dt and (latest_dt is None or dt > latest_dt):
latest_dt = dt
latest_raw = ts
latest_title = title
if latest_dt:
print(
"Latest timestamp in data:",
latest_dt,
"(raw",
repr(latest_raw) + ")",
"in topic",
repr(latest_title),
)
else:
print("WARNING: no timestamps parsed")
# Build SQLite database
if OUT_DB.exists():
print("Removing existing", OUT_DB)
OUT_DB.unlink()
conn = sqlite3.connect(str(OUT_DB))
cur = conn.cursor()
cur.executescript(
"""
CREATE TABLE topics (
id INTEGER PRIMARY KEY,
is_archived INTEGER NOT NULL DEFAULT 0,
title TEXT NOT NULL,
author TEXT,
created_at TEXT,
is_sticky INTEGER NOT NULL DEFAULT 0,
permalink TEXT
);
CREATE TABLE posts (
id INTEGER PRIMARY KEY AUTOINCREMENT,
topic_id INTEGER NOT NULL,
body TEXT NOT NULL,
author TEXT,
created_at TEXT,
post_index INTEGER,
FOREIGN KEY (topic_id) REFERENCES topics(id)
);
"""
)
topic_id = 0
for is_arch, title, triples in topics:
topic_id += 1
first_author = triples[0][1]
first_ts = triples[0][2]
cur.execute(
"""
INSERT INTO topics (id, is_archived, title, author, created_at, is_sticky, permalink)
VALUES (?, ?, ?, ?, ?, 0, NULL)
""",
(topic_id, is_arch, title, first_author, first_ts),
)
for idx, (msg, author, ts) in enumerate(triples, start=1):
cur.execute(
"""
INSERT INTO posts (topic_id, body, author, created_at, post_index)
VALUES (?, ?, ?, ?, ?)
""",
(topic_id, msg, author, ts, idx),
)
conn.commit()
c_topics = cur.execute("SELECT COUNT(*) FROM topics").fetchone()[0]
c_posts = cur.execute("SELECT COUNT(*) FROM posts").fetchone()[0]
conn.close()
print()
print("Migration complete. Created", OUT_DB)
print(" topics:", c_topics)
print(" posts: ", c_posts)
if __name__ == "__main__":
migrate()
Nick — 2025-11-21 15:58:16
I may take a few more moments to reconfigure DNS at some point to run this again at rebolforum.com, but for the moment it serves the purpose well enough.
Please let me know if you discover any issue with the application, or if any messages have been omitted.