90fa787be803 — Peter Sanchez a month ago
Merge tedu upstream
M TODO +4 -0
@@ 11,3 11,7 @@ The gob encoding for backend rpc uses mo
 Several columns and indices have potentially long shared prefixes.
 	These could be stored rearranged, perhaps with last four bytes prepended.
 	https://here/there/9876 -> 9876https://here/there/9876
+
+Stop double fetching when doing a search.
+
+Fix click to expand images.

          
M activity.go +15 -15
@@ 37,8 37,9 @@ import (
 	"humungus.tedunangst.com/r/webs/templates"
 )
 
-var theonetruename = `application/ld+json; profile="https://www.w3.org/ns/activitystreams"`
-var thefakename = `application/activity+json`
+const theonetruename = `application/ld+json; profile="https://www.w3.org/ns/activitystreams"`
+const allowit = theonetruename + `,application/activity+json`
+
 var falsenames = []string{
 	`application/ld+json`,
 	`application/activity+json`,

          
@@ 103,7 104,7 @@ func PostMsg(keyname string, key httpsig
 	case 201:
 	case 202:
 	default:
-		var buf [4096]byte
+		var buf [80]byte
 		n, _ := resp.Body.Read(buf[:])
 		dlog.Printf("post failure message: %s", buf[:n])
 		return fmt.Errorf("http post status: %d", resp.StatusCode)

          
@@ 172,7 173,7 @@ func GetJunkTimeout(userid UserID, url s
 		return nil
 	}
 	fn := func() (interface{}, error) {
-		at := theonetruename
+		at := allowit
 		if strings.Contains(url, ".well-known/webfinger?resource") {
 			at = "application/jrd+json"
 		}

          
@@ 711,8 712,8 @@ func xonksaver2(user *WhatAbout, item ju
 			return nil
 		case "Remove":
 			xid, _ = item.GetString("object")
-			targ, _ := obj.GetString("target")
-			ilog.Printf("remove %s from %s", obj, targ)
+			targ, _ := item.GetString("target")
+			ilog.Printf("remove %s from %s", xid, targ)
 			return nil
 		case "Tombstone":
 			xid, _ = item.GetString("id")

          
@@ 1240,17 1241,15 @@ func xonksaver2(user *WhatAbout, item ju
 			}
 			enc, _ := obj.GetString(chatKeyProp)
 			if enc != "" {
-				var dec string
 				if pubkey, ok := getchatkey(xonk.Honker); ok {
-					dec, err = decryptString(xonk.Noise, user.ChatSecKey, pubkey)
+					dec, err := decryptString(xonk.Noise, user.ChatSecKey, pubkey)
 					if err != nil {
 						ilog.Printf("failed to decrypt chonk")
+					} else {
+						dlog.Printf("successful decrypt from %s", xonk.Honker)
+						xonk.Noise = dec
 					}
 				}
-				if err == nil {
-					dlog.Printf("successful decrypt from %s", xonk.Honker)
-					xonk.Noise = dec
-				}
 			}
 			ch := Chonk{
 				UserID: xonk.UserID,

          
@@ 1657,12 1656,13 @@ func chonkifymsg(user *WhatAbout, rcpt s
 	content := string(ch.HTML)
 	if user.ChatSecKey.key != nil {
 		if pubkey, ok := getchatkey(rcpt); ok {
-			var err error
-			content, err = encryptString(content, user.ChatSecKey, pubkey)
+			enc, err := encryptString(content, user.ChatSecKey, pubkey)
 			if err != nil {
 				ilog.Printf("failure encrypting chonk: %s", err)
+			} else {
+				content = enc
+				jo[chatKeyProp] = user.Options.ChatPubKey
 			}
-			jo[chatKeyProp] = user.Options.ChatPubKey
 		}
 	}
 	jo["content"] = content

          
M backupdb.go +73 -31
@@ 45,7 45,12 @@ func svalbard(dirname string) {
 		elog.Fatalf("can't create directory: %s", dirname)
 	}
 	now := time.Now().Unix()
-	backupdbname := fmt.Sprintf("%s/honk-%d.db", dirname, now)
+	dirname = fmt.Sprintf("%s/honk-%d", dirname, now)
+	err = os.Mkdir(dirname, 0700)
+	if err != nil {
+		elog.Fatalf("can't create directory: %s", dirname)
+	}
+	backupdbname := fmt.Sprintf("%s/honk.db", dirname)
 	backup, err := sql.Open("sqlite3", backupdbname)
 	if err != nil {
 		elog.Fatalf("can't open backup database")

          
@@ 154,18 159,26 @@ func svalbard(dirname string) {
 	}
 	filexids := make(map[string]bool)
 	for f := range fileids {
-		rows = qordie(orig, "select fileid, xid, name, description, url, media, local from filemeta where fileid = ?", f)
+		rows = qordie(orig, "select fileid, xid, name, description, url, media, local, meta from filemeta where fileid = ?", f)
 		for rows.Next() {
 			var fileid int64
-			var xid, name, description, url, media string
+			var xid, name, description, url, media, meta string
 			var local int64
-			scanordie(rows, &fileid, &xid, &name, &description, &url, &media, &local)
+			scanordie(rows, &fileid, &xid, &name, &description, &url, &media, &local, &meta)
 			filexids[xid] = true
-			doordie(tx, "insert into filemeta (fileid, xid, name, description, url, media, local) values (?, ?, ?, ?, ?, ?, ?)", fileid, xid, name, description, url, media, local)
+			doordie(tx, "insert into filemeta (fileid, xid, name, description, url, media, local, meta) values (?, ?, ?, ?, ?, ?, ?, ?)", fileid, xid, name, description, url, media, local, meta)
 		}
 		rows.Close()
 	}
-
+	for xid := range filexids {
+		rows = qordie(orig, "select media, hash from filehashes where xid = ?", xid)
+		for rows.Next() {
+			var media, hash string
+			scanordie(rows, &media, &hash)
+			doordie(tx, "insert into filehashes (xid, media, hash) values (?, ?, ?)", xid, media, hash)
+		}
+		rows.Close()
+	}
 	rows = qordie(orig, "select key, value from config")
 	for rows.Next() {
 		var key string

          
@@ 178,36 191,65 @@ func svalbard(dirname string) {
 	if err != nil {
 		elog.Fatalf("can't commit backp: %s", err)
 	}
+	tx = nil
 	backup.Close()
 
-	backupblobname := fmt.Sprintf("%s/blob-%d.db", dirname, now)
-	blob, err := sql.Open("sqlite3", backupblobname)
-	if err != nil {
-		elog.Fatalf("can't open backup blob database")
+	var blob *sql.DB
+	var filesavepath string
+	if storeTheFilesInTheFileSystem {
+		filesavepath = fmt.Sprintf("%s/attachments", dirname)
+		os.Mkdir(filesavepath, 0700)
+		filesavepath += "/"
+	} else {
+		backupblobname := fmt.Sprintf("%s/blob.db", dirname)
+		blob, err = sql.Open("sqlite3", backupblobname)
+		if err != nil {
+			elog.Fatalf("can't open backup blob database")
+		}
+		_, err = blob.Exec("PRAGMA journal_mode=WAL")
+		doordie(blob, "create table filedata (xid text, content blob)")
+		doordie(blob, "create index idx_filexid on filedata(xid)")
+		tx, err = blob.Begin()
+		if err != nil {
+			elog.Fatalf("can't start transaction: %s", err)
+		}
+		stmtSaveBlobData, err = tx.Prepare("insert into filedata (xid, content) values (?, ?)")
+		checkErr(err)
 	}
-	_, err = blob.Exec("PRAGMA journal_mode=WAL")
-	doordie(blob, "create table filedata (xid text, media text, hash text, content blob)")
-	doordie(blob, "create index idx_filexid on filedata(xid)")
-	doordie(blob, "create index idx_filehash on filedata(hash)")
-	tx, err = blob.Begin()
-	if err != nil {
-		elog.Fatalf("can't start transaction: %s", err)
-	}
-	origblob := openblobdb()
-	for x := range filexids {
-		rows = qordie(origblob, "select xid, media, hash, content from filedata where xid = ?", x)
-		for rows.Next() {
-			var xid, media, hash string
-			var content sql.RawBytes
-			scanordie(rows, &xid, &media, &hash, &content)
-			doordie(tx, "insert into filedata (xid, media, hash, content) values (?, ?, ?, ?)", xid, media, hash, content)
+	for xid := range filexids {
+		if storeTheFilesInTheFileSystem {
+			oldname := filepath(xid)
+			newname := filesavepath + oldname[14:]
+			os.Mkdir(newname[:strings.LastIndexByte(newname, '/')], 0700)
+			err = os.Link(oldname, newname)
+			if err == nil {
+				continue
+			}
 		}
-		rows.Close()
+		data, closer, err := loaddata(xid)
+		if err != nil {
+			elog.Printf("lost a file: %s", xid)
+			continue
+		}
+		if storeTheFilesInTheFileSystem {
+			oldname := filepath(xid)
+			newname := filesavepath + oldname[14:]
+			err = os.WriteFile(newname, data, 0700)
+		} else {
+			_, err = stmtSaveBlobData.Exec(xid, data)
+		}
+		if err != nil {
+			elog.Printf("failed to save file %s: %s", xid, err)
+		}
+		closer()
 	}
 
-	err = tx.Commit()
-	if err != nil {
-		elog.Fatalf("can't commit blobs: %s", err)
+	if blob != nil {
+		err = tx.Commit()
+		if err != nil {
+			elog.Fatalf("can't commit blobs: %s", err)
+		}
+		blob.Close()
 	}
-	blob.Close()
+	fmt.Printf("backup saved to %s\n", dirname)
 }

          
M cli.go +40 -0
@@ 175,6 175,24 @@ var commands = map[string]cmd{
 			cleanupdb(arg)
 		},
 	},
+	"storefiles": {
+		help: "store attachments as files",
+		callback: func(args []string) {
+			setconfig("usefilestore", 1)
+		},
+	},
+	"storeblobs": {
+		help: "store attachments as blobs",
+		callback: func(args []string) {
+			setconfig("usefilestore", 0)
+		},
+	},
+	"extractblobs": {
+		help: "extract blobs to file store",
+		callback: func(args []string) {
+			extractblobs()
+		},
+	},
 	"unplug": {
 		help: "disconnect from a dead server",
 		callback: func(args []string) {

          
@@ 210,6 228,28 @@ var commands = map[string]cmd{
 			ping(user, targ)
 		},
 	},
+	"extractchatkey": {
+		help: "extract secret chat key from user",
+		callback: func(args []string) {
+			if len(args) < 3 || args[2] != "yesimsure" {
+				errx("usage: honk extractchatkey [username] yesimsure")
+			}
+			user, _ := butwhatabout(args[1])
+			if user == nil {
+				errx("user not found")
+			}
+			fmt.Printf("%s\n", user.Options.ChatSecKey)
+			user.Options.ChatSecKey = ""
+			j, err := jsonify(user.Options)
+			if err == nil {
+				db := opendatabase()
+				_, err = db.Exec("update users set options = ? where username = ?", j, user.Name)
+			}
+			if err != nil {
+				elog.Printf("error bouting what: %s", err)
+			}
+		},
+	},
 	"run": {
 		help: "run honk",
 		callback: func(args []string) {

          
M database.go +20 -148
@@ 17,7 17,6 @@ package main
 
 import (
 	"bytes"
-	"crypto/sha512"
 	"database/sql"
 	_ "embed"
 	"encoding/json"

          
@@ 58,7 57,9 @@ func userfromrow(row *sql.Row) (*WhatAbo
 			elog.Printf("error processing user options: %s", err)
 		}
 		user.ChatPubKey.key, _ = b64tokey(user.Options.ChatPubKey)
-		user.ChatSecKey.key, _ = b64tokey(user.Options.ChatSecKey)
+		if user.Options.ChatSecKey != "" {
+			user.ChatSecKey.key, _ = b64tokey(user.Options.ChatSecKey)
+		}
 	} else {
 		user.URL = serverURL("/%s", user.Name)
 	}

          
@@ 573,103 574,6 @@ func donksforchonks(chonks []*Chonk) {
 	}
 }
 
-func savefile(name string, desc string, url string, media string, local bool, data []byte, meta *DonkMeta) (int64, error) {
-	fileid, _, err := savefileandxid(name, desc, url, media, local, data, meta)
-	return fileid, err
-}
-
-func hashfiledata(data []byte) string {
-	h := sha512.New512_256()
-	h.Write(data)
-	return fmt.Sprintf("%x", h.Sum(nil))
-}
-
-func savefileandxid(name string, desc string, url string, media string, local bool, data []byte, meta *DonkMeta) (int64, string, error) {
-	var xid string
-	if local {
-		hash := hashfiledata(data)
-		row := stmtCheckFileData.QueryRow(hash)
-		err := row.Scan(&xid)
-		if err == sql.ErrNoRows {
-			xid = xfiltrate()
-			switch media {
-			case "image/png":
-				xid += ".png"
-			case "image/jpeg":
-				xid += ".jpg"
-			case "image/svg+xml":
-				xid += ".svg"
-			case "application/pdf":
-				xid += ".pdf"
-			case "text/plain":
-				xid += ".txt"
-			}
-			_, err = stmtSaveFileData.Exec(xid, media, hash, data)
-			if err != nil {
-				return 0, "", err
-			}
-		} else if err != nil {
-			elog.Printf("error checking file hash: %s", err)
-			return 0, "", err
-		}
-		if url == "" {
-			url = serverURL("/d/%s", xid)
-		}
-	}
-
-	j := "{}"
-	if meta != nil {
-		j, _ = jsonify(meta)
-	}
-	res, err := stmtSaveFile.Exec(xid, name, desc, url, media, local, j)
-	if err != nil {
-		return 0, "", err
-	}
-	fileid, _ := res.LastInsertId()
-	return fileid, xid, nil
-}
-
-func getfileinfo(xid string) *Donk {
-	donk := new(Donk)
-	row := stmtGetFileInfo.QueryRow(xid)
-	err := row.Scan(&donk.URL)
-	if err == nil {
-		donk.XID = xid
-		return donk
-	}
-	if err != sql.ErrNoRows {
-		elog.Printf("error finding file: %s", err)
-	}
-	return nil
-}
-
-func finddonkid(fileid int64, url string) *Donk {
-	donk := new(Donk)
-	row := stmtFindFileId.QueryRow(fileid, url)
-	err := row.Scan(&donk.XID, &donk.Local, &donk.Desc)
-	if err == nil {
-		donk.FileID = fileid
-		return donk
-	}
-	if err != sql.ErrNoRows {
-		elog.Printf("error finding file: %s", err)
-	}
-	return nil
-}
-
-func finddonk(url string) *Donk {
-	donk := new(Donk)
-	row := stmtFindFile.QueryRow(url)
-	err := row.Scan(&donk.FileID, &donk.XID)
-	if err == nil {
-		return donk
-	}
-	if err != sql.ErrNoRows {
-		elog.Printf("error finding file: %s", err)
-	}
-	return nil
-}
-
 func savechonk(ch *Chonk) error {
 	dt := ch.Date.UTC().Format(dbtimeformat)
 	db := opendatabase()

          
@@ 770,9 674,9 @@ func menewnone(userid UserID) {
 	somenumberedusers.Clear(user.ID)
 }
 
-func loadchatter(userid UserID) []*Chatter {
+func loadchatter(userid UserID, wanted int64) []*Chatter {
 	duedt := time.Now().Add(-3 * 24 * time.Hour).UTC().Format(dbtimeformat)
-	rows, err := stmtLoadChonks.Query(userid, duedt)
+	rows, err := stmtLoadChonks.Query(userid, duedt, wanted)
 	if err != nil {
 		elog.Printf("error loading chonks: %s", err)
 		return nil

          
@@ 1166,46 1070,7 @@ func cleanupdb(arg string) {
 		doordie(db, "delete from zonkers where userid = ? and wherefore = 'zonvoy' and zonkerid < (select zonkerid from zonkers where userid = ? and wherefore = 'zonvoy' order by zonkerid desc limit 1 offset 200)", u.UserID, u.UserID)
 	}
 
-	filexids := make(map[string]bool)
-	g_blobdb = openblobdb()
-	rows, err := g_blobdb.Query("select xid from filedata")
-	if err != nil {
-		elog.Fatal(err)
-	}
-	for rows.Next() {
-		var xid string
-		err = rows.Scan(&xid)
-		if err != nil {
-			elog.Fatal(err)
-		}
-		filexids[xid] = true
-	}
-	rows.Close()
-	rows, err = db.Query("select xid from filemeta")
-	for rows.Next() {
-		var xid string
-		err = rows.Scan(&xid)
-		if err != nil {
-			elog.Fatal(err)
-		}
-		delete(filexids, xid)
-	}
-	rows.Close()
-	tx, err := g_blobdb.Begin()
-	if err != nil {
-		elog.Fatal(err)
-	}
-	for xid := range filexids {
-		_, err = tx.Exec("delete from filedata where xid = ?", xid)
-		if err != nil {
-			elog.Fatal(err)
-		}
-	}
-	err = tx.Commit()
-	if err != nil {
-		elog.Fatal(err)
-	}
-	closedatabases()
+	cleanupfiles()
 }
 
 func getusercount() int {

          
@@ 1239,8 1104,8 @@ var stmtHonksByOntology, stmtHonksForUse
 var sqlHonksFromLongAgo string
 var stmtHonksByHonker, stmtSaveHonk, stmtUserByName, stmtUserByNumber *sql.Stmt
 var stmtEventHonks, stmtOneBonk, stmtFindZonk, stmtFindXonk, stmtSaveDonk *sql.Stmt
-var stmtGetFileInfo, stmtFindFile, stmtFindFileId, stmtGetFileData, stmtSaveFileData, stmtSaveFile *sql.Stmt
-var stmtCheckFileData *sql.Stmt
+var stmtGetFileInfo, stmtFindFile, stmtFindFileId, stmtSaveFile *sql.Stmt
+var stmtGetFileMedia, stmtSaveFileHash, stmtCheckFileHash *sql.Stmt
 var stmtAddDoover, stmtGetDoovers, stmtLoadDoover, stmtZapDoover, stmtOneHonker *sql.Stmt
 var stmtUntagged, stmtDeleteHonk, stmtDeleteDonks, stmtDeleteOnts, stmtSaveZonker *sql.Stmt
 var stmtGetZonkers, stmtRecentHonkers, stmtGetXonker, stmtSaveXonker, stmtDeleteXonker, stmtDeleteOldXonkers *sql.Stmt

          
@@ 1254,6 1119,7 @@ var stmtGetUserCount *sql.Stmt
 var stmtGetActiveUserCount *sql.Stmt
 var stmtGetLocalHonkCount *sql.Stmt
 var stmtDeliquentCheck, stmtDeliquentUpdate *sql.Stmt
+var stmtGetBlobData, stmtSaveBlobData *sql.Stmt
 
 func preparetodie(db *sql.DB, s string) *sql.Stmt {
 	stmt, err := db.Prepare(s)

          
@@ 1330,10 1196,9 @@ func prepareStatements(db *sql.DB) {
 	stmtSaveDonk = preparetodie(db, "insert into donks (honkid, chonkid, fileid) values (?, ?, ?)")
 	stmtDeleteDonks = preparetodie(db, "delete from donks where honkid = ?")
 	stmtSaveFile = preparetodie(db, "insert into filemeta (xid, name, description, url, media, local, meta) values (?, ?, ?, ?, ?, ?, ?)")
-	g_blobdb = openblobdb()
-	stmtSaveFileData = preparetodie(g_blobdb, "insert into filedata (xid, media, hash, content) values (?, ?, ?, ?)")
-	stmtCheckFileData = preparetodie(g_blobdb, "select xid from filedata where hash = ?")
-	stmtGetFileData = preparetodie(g_blobdb, "select media, content from filedata where xid = ?")
+	stmtSaveFileHash = preparetodie(db, "insert into filehashes (xid, hash, media) values (?, ?, ?)")
+	stmtCheckFileHash = preparetodie(db, "select xid from filehashes where hash = ?")
+	stmtGetFileMedia = preparetodie(db, "select media from filehashes where xid = ?")
 	stmtFindXonk = preparetodie(db, "select honkid from honks where userid = ? and xid = ?")
 	stmtGetFileInfo = preparetodie(db, "select url from filemeta where xid = ?")
 	stmtFindFile = preparetodie(db, "select fileid, xid from filemeta where url = ? and local = 1")

          
@@ 1362,11 1227,18 @@ func prepareStatements(db *sql.DB) {
 	stmtDeleteFilter = preparetodie(db, "delete from hfcs where userid = ? and hfcsid = ?")
 	stmtGetTracks = preparetodie(db, "select fetches from tracks where xid = ?")
 	stmtSaveChonk = preparetodie(db, "insert into chonks (userid, xid, who, target, dt, noise, format) values (?, ?, ?, ?, ?, ?, ?)")
-	stmtLoadChonks = preparetodie(db, "select chonkid, userid, xid, who, target, dt, noise, format from chonks where userid = ? and dt > ? order by chonkid asc")
+	stmtLoadChonks = preparetodie(db, "select chonkid, userid, xid, who, target, dt, noise, format from chonks where userid = ? and dt > ? and chonkid > ? order by chonkid asc")
 	stmtGetChatters = preparetodie(db, "select distinct(target) from chonks where userid = ?")
 	stmtGetUserCount = preparetodie(db, "select count(*) from users where userid > 0")
 	stmtGetActiveUserCount = preparetodie(db, "select count(distinct honker) from honks where whofore = 2 and dt > ?")
 	stmtGetLocalHonkCount = preparetodie(db, "select count(*) from honks where whofore = 2")
 	stmtDeliquentCheck = preparetodie(db, "select dooverid, msg from doovers where userid = ? and rcpt = ?")
 	stmtDeliquentUpdate = preparetodie(db, "update doovers set msg = ? where dooverid = ?")
+	g_blobdb = openblobdb()
+	if g_blobdb != nil {
+		stmtSaveBlobData = preparetodie(g_blobdb, "insert into filedata (xid, content) values (?, ?)")
+		stmtGetBlobData = preparetodie(g_blobdb, "select content from filedata where xid = ?")
+	} else if !storeTheFilesInTheFileSystem {
+		elog.Fatal("the blob.db has disappeared")
+	}
 }

          
M deliverator.go +2 -2
@@ 42,7 42,7 @@ func sayitagain(doover Doover) {
 		drift = time.Duration(doover.Tries*5) * time.Minute
 	} else if doover.Tries <= 6 { // 1, 2, 3 hours
 		drift = time.Duration(doover.Tries-3) * time.Hour
-	} else if doover.Tries <= 9 { // 12, 12, 12 hours
+	} else if doover.Tries <= 16 { // 12 hours
 		drift = time.Duration(12) * time.Hour
 	} else {
 		ilog.Printf("he's dead jim: %s", doover.Rcpt)

          
@@ 61,7 61,7 @@ func sayitagain(doover Doover) {
 	}
 }
 
-const nearlyDead = 8
+const nearlyDead = 15
 
 func lethaldose(err error) int64 {
 	str := err.Error()

          
M docs/changelog.txt +4 -0
@@ 2,6 2,10 @@ changelog
 
 ### next
 
++ Switch to storing attachments in the file system.
+
++ Some styling changes.
+
 + Experimental support for C2S activities.
 
 + Try harder to retrieve threads from the database.

          
M docs/encrypted-messages.txt +9 -0
@@ 27,6 27,15 @@ 5. Replace message content.
 
 The public key is duplicated in the actor and the message.
 
+Implementation
+
+By default, the private keys are stored on the server, so this isn't end to
+end. This make it easier to deploy, as it's transparent to the user.
+
+By running the extractchatkey command, the key is removed from the database
+and printed to terminal, where it can be added to end devices. After this, the
+web interface is no longer able to read encrypted chats.
+
 Notes
 
 This doesn't support shared group keys. Messages need to be encrypted per recipient.

          
M docs/honk.8 +38 -0
@@ 153,6 153,24 @@ Follow and unfollow requests can be sent
 .Ic follow Ar username Ar url
 and
 .Ic unfollow Ar username Ar url .
+.Ss Storage
+By default,
+.Nm
+will store attachments in a blob database,
+.Pa blob.db .
+It may be more convenient to store attachments in the file system,
+in the
+.Pa attachments
+directory.
+Related commands:
+.Bl -tag -width extractblobs
+.It Ic storefiles
+Switch to storing attachments in the file system.
+.It Ic storeblobs
+Switch to storing attachments in the blob.db.
+.It Ic extractblobs
+Copy blobs from the blob.db into the file system.
+.El
 .Ss Maintenance
 The database may grow large over time.
 The

          
@@ 242,6 260,24 @@ Custom URL seperators (not "u" and "h") 
 "usersep" and "honksep" options to the config table.
 e.g. example.com/users/username/honk/somehonk instead of
 example.com/u/username/h/somehonk.
+.Bl -tag -width collectforwards
+.It fasttimeout
+Short timeout for fetching activities.
+(Default: 5)
+.It slowtimeout
+Long timeout for posting actvities.
+(Default: 30)
+.It honkwindow
+How many days to display in a timeline.
+(Default: 7)
+.It collectforwards
+Fetch reply actvities forwarded from other servers.
+(Default: true)
+.It usersep
+(Default: u)
+.It honksep
+(Default: h)
+.El
 .Sh FILES
 .Nm
 files are split between the data directory and the view directory.

          
@@ 253,6 289,8 @@ The data directory contains:
 The main database.
 .It Pa blob.db
 Media and attachment storage.
+.It Pa attachments
+Alternate location for media and attachment storage.
 .It Pa emus
 Custom emoji.
 .It Pa memes

          
M encrypt.go +6 -0
@@ 36,6 36,9 @@ type boxPubKey struct {
 }
 
 func encryptString(plain string, seckey boxSecKey, pubkey boxPubKey) (string, error) {
+	if seckey.key == nil {
+		return "", fmt.Errorf("no secret key")
+	}
 	var nonce [24]byte
 	rand.Read(nonce[:])
 	out := box.Seal(nil, []byte(plain), &nonce, pubkey.key, seckey.key)

          
@@ 49,6 52,9 @@ func encryptString(plain string, seckey 
 }
 
 func decryptString(encmsg string, seckey boxSecKey, pubkey boxPubKey) (string, error) {
+	if seckey.key == nil {
+		return "", fmt.Errorf("no secret key")
+	}
 	var buf bytes.Buffer
 	b64 := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encmsg))
 	io.Copy(&buf, b64)

          
A => filestoragemanagerfactory.go +320 -0
@@ 0,0 1,320 @@ 
+//
+// Copyright (c) 2024 Ted Unangst <tedu@tedunangst.com>
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+package main
+
+import (
+	"crypto/sha512"
+	"database/sql"
+	"errors"
+	"fmt"
+	"io/fs"
+	"net/http"
+	"os"
+	"path"
+	"strings"
+)
+
+var storeTheFilesInTheFileSystem = false
+
+func hashfiledata(data []byte) string {
+	h := sha512.New512_256()
+	h.Write(data)
+	return fmt.Sprintf("%x", h.Sum(nil))
+}
+
+func filepath(xid string) string {
+	parts := strings.SplitN(xid, ".", 2)
+	subdir := "xx"
+	if len(parts[0]) == 21 {
+		subdir = xid[:2]
+	}
+	fname := fmt.Sprintf("%s/attachments/%s/%s", dataDir, subdir, xid)
+	return fname
+}
+
+func savefile(name string, desc string, url string, media string, local bool, data []byte, meta *DonkMeta) (int64, error) {
+	fileid, _, err := savefileandxid(name, desc, url, media, local, data, meta)
+	return fileid, err
+}
+
+func savefiledata(xid string, data []byte) error {
+	if storeTheFilesInTheFileSystem {
+		fname := filepath(xid)
+		os.Mkdir(fname[:strings.LastIndexByte(fname, '/')], 0700)
+		err := os.WriteFile(fname, data, 0700)
+		return err
+	} else {
+		_, err := stmtSaveBlobData.Exec(xid, data)
+		return err
+	}
+}
+
+func savefileandxid(name string, desc string, url string, media string, local bool, data []byte, meta *DonkMeta) (int64, string, error) {
+	var xid string
+	if local {
+		hash := hashfiledata(data)
+		row := stmtCheckFileHash.QueryRow(hash)
+		err := row.Scan(&xid)
+		if err == sql.ErrNoRows {
+			xid = xfildate()
+			switch media {
+			case "image/png":
+				xid += ".png"
+			case "image/jpeg":
+				xid += ".jpg"
+			case "image/svg+xml":
+				xid += ".svg"
+			case "application/pdf":
+				xid += ".pdf"
+			case "text/plain":
+				xid += ".txt"
+			}
+			err = savefiledata(xid, data)
+			if err == nil {
+				_, err = stmtSaveFileHash.Exec(xid, hash, media)
+			}
+			if err != nil {
+				return 0, "", err
+			}
+		} else if err != nil {
+			elog.Printf("error checking file hash: %s", err)
+			return 0, "", err
+		}
+		if url == "" {
+			url = serverURL("/d/%s", xid)
+		}
+	}
+
+	j := "{}"
+	if meta != nil {
+		j, _ = jsonify(meta)
+	}
+	res, err := stmtSaveFile.Exec(xid, name, desc, url, media, local, j)
+	if err != nil {
+		return 0, "", err
+	}
+	fileid, _ := res.LastInsertId()
+	return fileid, xid, nil
+}
+
+func getfileinfo(xid string) *Donk {
+	donk := new(Donk)
+	row := stmtGetFileInfo.QueryRow(xid)
+	err := row.Scan(&donk.URL)
+	if err == nil {
+		donk.XID = xid
+		return donk
+	}
+	if err != sql.ErrNoRows {
+		elog.Printf("error finding file: %s", err)
+	}
+	return nil
+}
+
+func finddonkid(fileid int64, url string) *Donk {
+	donk := new(Donk)
+	row := stmtFindFileId.QueryRow(fileid, url)
+	err := row.Scan(&donk.XID, &donk.Local, &donk.Desc)
+	if err == nil {
+		donk.FileID = fileid
+		return donk
+	}
+	if err != sql.ErrNoRows {
+		elog.Printf("error finding file: %s", err)
+	}
+	return nil
+}
+
+func finddonk(url string) *Donk {
+	donk := new(Donk)
+	row := stmtFindFile.QueryRow(url)
+	err := row.Scan(&donk.FileID, &donk.XID)
+	if err == nil {
+		return donk
+	}
+	if err != sql.ErrNoRows {
+		elog.Printf("error finding file: %s", err)
+	}
+	return nil
+}
+
+func loadfiledata(xid string) ([]byte, func(), error) {
+	fname := filepath(xid)
+	data, err := os.ReadFile(fname)
+	return data, func() {}, err
+}
+
+var errNoBlob = errors.New("no blobdb")
+
+func loadblobdata(xid string) ([]byte, func(), error) {
+	if g_blobdb == nil {
+		return nil, nil, errNoBlob
+	}
+
+	var data sql.RawBytes
+	rows, err := stmtGetBlobData.Query(xid)
+	if err != nil {
+		return nil, nil, err
+	}
+	if rows.Next() {
+		err = rows.Scan(&data)
+	} else {
+		err = errors.New("blob not found")
+	}
+	return data, func() { rows.Close() }, err
+}
+
+func loaddata(xid string) ([]byte, func(), error) {
+	if storeTheFilesInTheFileSystem {
+		data, closer, err := loadfiledata(xid)
+		if err == nil {
+			return data, closer, err
+		}
+		return loadblobdata(xid)
+	} else {
+		data, closer, err := loadblobdata(xid)
+		if err == nil {
+			return data, closer, err
+		}
+		return loadfiledata(xid)
+	}
+}
+
+func servefiledata(w http.ResponseWriter, r *http.Request, xid string) {
+	var media string
+	row := stmtGetFileMedia.QueryRow(xid)
+	err := row.Scan(&media)
+	if err != nil {
+		elog.Printf("error loading file: %s", err)
+		http.NotFound(w, r)
+		return
+	}
+	data, closer, err := loaddata(xid)
+	if err != nil {
+		elog.Printf("error loading file: %s", err)
+		http.NotFound(w, r)
+		return
+	}
+	defer closer()
+	preview := r.FormValue("preview") == "1"
+	if preview && strings.HasPrefix(media, "image") {
+		img, err := lilshrink(data)
+		if err == nil {
+			data = img.Data
+		}
+	}
+	w.Header().Set("Content-Type", media)
+	w.Header().Set("X-Content-Type-Options", "nosniff")
+	w.Header().Set("Cache-Control", "max-age="+somedays())
+	w.Write(data)
+}
+
+func checkErr(err error) {
+	if err != nil {
+		elog.Fatal(err)
+	}
+}
+
+func cleanupfiles() {
+	var rows *sql.Rows
+	var err error
+	scan := func() string {
+		var xid string
+		err = rows.Scan(&xid)
+		checkErr(err)
+		return xid
+	}
+
+	filexids := make(map[string]bool)
+	if storeTheFilesInTheFileSystem {
+		walker := func(pathname string, ent fs.DirEntry, err error) error {
+			if ent.IsDir() {
+				return nil
+			}
+			fname := path.Base(pathname)
+			filexids[fname] = true
+			return nil
+		}
+		dir := os.DirFS(dataDir)
+		fs.WalkDir(dir, "attachments", walker)
+	} else {
+		rows, err = g_blobdb.Query("select xid from filedata")
+		checkErr(err)
+		for rows.Next() {
+			xid := scan()
+			filexids[xid] = true
+		}
+		rows.Close()
+	}
+
+	db := opendatabase()
+	rows, err = db.Query("select xid from filemeta")
+	checkErr(err)
+	for rows.Next() {
+		xid := scan()
+		delete(filexids, xid)
+	}
+	rows.Close()
+
+	tx, err := db.Begin()
+	checkErr(err)
+	for xid := range filexids {
+		_, err = tx.Exec("delete from filehashes where xid = ?", xid)
+		checkErr(err)
+	}
+	err = tx.Commit()
+	checkErr(err)
+
+	if storeTheFilesInTheFileSystem {
+		for xid := range filexids {
+			fname := filepath(xid)
+			ilog.Printf("should remove %s", fname)
+			//os.Remove(fname)
+		}
+	} else {
+		tx, err = g_blobdb.Begin()
+		checkErr(err)
+		for xid := range filexids {
+			_, err = tx.Exec("delete from filedata where xid = ?", xid)
+			checkErr(err)
+		}
+		err = tx.Commit()
+		checkErr(err)
+	}
+
+	closedatabases()
+}
+
+func extractblobs() {
+	if !storeTheFilesInTheFileSystem {
+		elog.Fatal("can only extract blobs when using filestore")
+	}
+	if g_blobdb == nil {
+		elog.Fatal("the blob.db is already gone")
+	}
+	rows, err := g_blobdb.Query("select xid, content from filedata")
+	checkErr(err)
+	defer rows.Close()
+	for rows.Next() {
+		var xid string
+		var data sql.RawBytes
+		err = rows.Scan(&xid, &data)
+		checkErr(err)
+		err = savefiledata(xid, data)
+		checkErr(err)
+	}
+	fmt.Printf("extraction complete. blob.db is redundant.\n")
+}

          
M fun.go +11 -1
@@ 127,7 127,7 @@ func reverbolate(userid UserID, honks []
 		h.Open = "open"
 		var misto string
 		for _, m := range h.Mentions {
-			if m.Where != h.Honker && !m.IsPresent(h.Noise) {
+			if m.Where != h.Honker && m.Where != h.Oonker && !m.IsPresent(h.Noise) {
 				misto += " " + m.Who
 			}
 		}

          
@@ 418,6 418,16 @@ func xfiltrate() string {
 	return xcelerate(b[:])
 }
 
+func xfildate() string {
+	var b [21]byte
+	rand.Read(b[:])
+	now := time.Now().Unix() / 60 / 60 / 24
+	b[2] = byte(now & 63)
+	b[1] = byte((now / 64) & 63)
+	b[0] = byte((now / 64 / 64) & 63)
+	return xcelerate(b[:])
+}
+
 func grapevine(mentions []Mention) []string {
 	var s []string
 	for _, m := range mentions {

          
M import.go +2 -3
@@ 641,20 641,19 @@ func export(username, file string) {
 		if donk == "" {
 			continue
 		}
-		var media string
 		var data []byte
 		w, err := zd.Create("media/" + donk)
 		if err != nil {
 			elog.Printf("error creating %s: %s", donk, err)
 			continue
 		}
-		row := stmtGetFileData.QueryRow(donk)
-		err = row.Scan(&media, &data)
+		data, closer, err := loaddata(donk)
 		if err != nil {
 			elog.Printf("error scanning file %s: %s", donk, err)
 			continue
 		}
 		w.Write(data)
+		closer()
 	}
 	zd.Close()
 	fd.Close()

          
M main.go +9 -0
@@ 16,9 16,11 @@ 
 package main
 
 import (
+	"errors"
 	"flag"
 	"fmt"
 	"html/template"
+	"io/fs"
 	golog "log"
 	"log/syslog"
 	notrand "math/rand"

          
@@ 46,6 48,7 @@ var iconName = "icon.png"
 var serverMsg template.HTML
 var aboutMsg template.HTML
 var loginMsg template.HTML
+var collectForwards = true
 
 func serverURL(u string, args ...interface{}) string {
 	return fmt.Sprintf("https://"+serverName+u, args...)

          
@@ 131,6 134,10 @@ func main() {
 	if os.Geteuid() == 0 {
 		elog.Fatalf("do not run honk as root")
 	}
+	err := os.Mkdir(dataDir+"/attachments", 0700)
+	if err != nil && !errors.Is(err, fs.ErrExist) {
+		errx("can't create attachments directory: %s", err)
+	}
 
 	args := flag.Args()
 	cmd := "run"

          
@@ 151,6 158,7 @@ func main() {
 	if dbversion != myVersion {
 		elog.Fatal("incorrect database version. run upgrade.")
 	}
+	getconfig("usefilestore", &storeTheFilesInTheFileSystem)
 	getconfig("servermsg", &serverMsg)
 	getconfig("aboutmsg", &aboutMsg)
 	getconfig("loginmsg", &loginMsg)

          
@@ 170,6 178,7 @@ func main() {
 	getconfig("slowtimeout", &slowTimeout)
 	getconfig("honkwindow", &honkwindow)
 	honkwindow *= 24 * time.Hour
+	getconfig("collectforwards", &collectForwards)
 
 	prepareStatements(db)
 

          
M schema.sql +3 -0
@@ 2,6 2,7 @@ create table honks (honkid integer prima
 create table chonks (chonkid integer primary key, userid integer, xid text, who txt, target text, dt text, noise text, format text);
 create table donks (honkid integer, chonkid integer, fileid integer);
 create table filemeta (fileid integer primary key, xid text, name text, description text, url text, media text, local integer, meta text);
+create table filehashes (xid text, hash text, media text);
 create table honkers (honkerid integer primary key, userid integer, name text, xid text, flavor text, combos text, owner text, meta text, folxid text);
 create table xonkers (xonkerid integer primary key, name text, info text, flavor text, dt text);
 create table zonkers (zonkerid integer primary key, userid integer, name text, wherefore text);

          
@@ 26,6 27,8 @@ create index idx_xonkername on xonkers(n
 create index idx_zonkersname on zonkers(name);
 create index idx_filesxid on filemeta(xid);
 create index idx_filesurl on filemeta(url);
+create index idx_filehashes on filehashes(hash);
+create index idx_filehashesxid on filehashes(xid);
 create index idx_ontology on onts(ontology);
 create index idx_onthonkid on onts(honkid);
 create index idx_honkmetaid on honkmeta(honkid);

          
M upgradedb.go +43 -40
@@ 23,7 23,7 @@ import (
 	"humungus.tedunangst.com/r/webs/htfilter"
 )
 
-var myVersion = 51 // filemeta.meta
+var myVersion = 53 // index filehashes.xid
 
 type dbexecer interface {
 	Exec(query string, args ...interface{}) (sql.Result, error)

          
@@ 74,34 74,24 @@ func upgradedb() {
 	switch dbversion {
 	case 41:
 		tx, err := db.Begin()
-		if err != nil {
-			elog.Fatal(err)
-		}
+		checkErr(err)
 		rows, err := tx.Query("select honkid, noise from honks where format = 'markdown' and precis <> ''")
-		if err != nil {
-			elog.Fatal(err)
-		}
+		checkErr(err)
 		m := make(map[int64]string)
 		var dummy Honk
 		for rows.Next() {
 			err = rows.Scan(&dummy.ID, &dummy.Noise)
-			if err != nil {
-				elog.Fatal(err)
-			}
+			checkErr(err)
 			precipitate(&dummy)
 			m[dummy.ID] = dummy.Noise
 		}
 		rows.Close()
 		for id, noise := range m {
 			_, err = tx.Exec("update honks set noise = ? where honkid = ?", noise, id)
-			if err != nil {
-				elog.Fatal(err)
-			}
+			checkErr(err)
 		}
 		err = tx.Commit()
-		if err != nil {
-			elog.Fatal(err)
-		}
+		checkErr(err)
 		doordie(db, "update config set value = 42 where key = 'dbversion'")
 		fallthrough
 	case 42:

          
@@ 132,35 122,25 @@ func upgradedb() {
 			return plain
 		}
 		tx, err = db.Begin()
-		if err != nil {
-			elog.Fatal(err)
-		}
+		checkErr(err)
 		plainmap := make(map[int64][]string)
 		rows, err := tx.Query("select honkid, noise, precis, format from honks")
-		if err != nil {
-			elog.Fatal(err)
-		}
+		checkErr(err)
 		for rows.Next() {
 			var honkid int64
 			var noise, precis, format string
 			err = rows.Scan(&honkid, &noise, &precis, &format)
-			if err != nil {
-				elog.Fatal(err)
-			}
+			checkErr(err)
 			plainmap[honkid] = makeplain(noise, precis, format)
 		}
 		rows.Close()
 		rows, err = tx.Query("select honkid, name, description from donks join filemeta on donks.fileid = filemeta.fileid")
-		if err != nil {
-			elog.Fatal(err)
-		}
+		checkErr(err)
 		for rows.Next() {
 			var honkid int64
 			var name, desc string
 			err = rows.Scan(&honkid, &name, &desc)
-			if err != nil {
-				elog.Fatal(err)
-			}
+			checkErr(err)
 			plainmap[honkid] = append(plainmap[honkid], name)
 			plainmap[honkid] = append(plainmap[honkid], desc)
 		}

          
@@ 170,9 150,7 @@ func upgradedb() {
 		}
 		setV(45)
 		err = tx.Commit()
-		if err != nil {
-			elog.Fatal(err)
-		}
+		checkErr(err)
 		tx = nil
 		fallthrough
 	case 45:

          
@@ 190,13 168,9 @@ func upgradedb() {
 			var user WhatAbout
 			var jopt string
 			err = rows.Scan(&user.ID, &jopt)
-			if err != nil {
-				elog.Fatal(err)
-			}
+			checkErr(err)
 			err = unjsonify(jopt, &user.Options)
-			if err != nil {
-				elog.Fatal(err)
-			}
+			checkErr(err)
 			users = append(users, &user)
 		}
 		rows.Close()

          
@@ 223,6 197,35 @@ func upgradedb() {
 		setV(51)
 		fallthrough
 	case 51:
+		hashes := make(map[string]string)
+		blobdb := openblobdb()
+		rows, err := blobdb.Query("select xid, hash, media from filedata")
+		checkErr(err)
+		for rows.Next() {
+			var xid, hash, media string
+			err = rows.Scan(&xid, &hash, &media)
+			checkErr(err)
+			hashes[xid] = hash + " " + media
+		}
+		rows.Close()
+		tx, err = db.Begin()
+		checkErr(err)
+		try("create table filehashes (xid text, hash text, media text)")
+		try("create index idx_filehashes on filehashes(hash)")
+		for xid, data := range hashes {
+			parts := strings.Split(data, " ")
+			try("insert into filehashes (xid, hash, media) values (?, ?, ?)", xid, parts[0], parts[1])
+		}
+		setV(52)
+		err = tx.Commit()
+		checkErr(err)
+		tx = nil
+		fallthrough
+	case 52:
+		try("create index idx_filehashesxid on filehashes(xid)")
+		setV(53)
+		fallthrough
+	case 53:
 		try("analyze")
 		closedatabases()
 

          
M util.go +3 -7
@@ 179,7 179,7 @@ func initblobdb(blobdbname string) {
 		elog.Print(err)
 		return
 	}
-	_, err = blobdb.Exec("create table filedata (xid text, media text, hash text, content blob)")
+	_, err = blobdb.Exec("create table filedata (xid text, content blob)")
 	if err != nil {
 		elog.Print(err)
 		return

          
@@ 189,11 189,6 @@ func initblobdb(blobdbname string) {
 		elog.Print(err)
 		return
 	}
-	_, err = blobdb.Exec("create index idx_filehash on filedata(hash)")
-	if err != nil {
-		elog.Print(err)
-		return
-	}
 	blobdb.Close()
 }
 

          
@@ 397,7 392,8 @@ func openblobdb() *sql.DB {
 	blobdbname := dataDir + "/blob.db"
 	_, err := os.Stat(blobdbname)
 	if err != nil {
-		elog.Fatalf("unable to open database: %s", err)
+		return nil
+		//elog.Fatalf("unable to open database: %s", err)
 	}
 	db, err := sql.Open("sqlite3", blobdbname)
 	if err != nil {

          
M web.go +19 -29
@@ 499,7 499,7 @@ func postinbox(w http.ResponseWriter, r 
 	origin := keymatch(keyname, who)
 	if origin == "" {
 		ilog.Printf("keyname actor mismatch: %s <> %s", keyname, who)
-		if what == "Create" {
+		if collectForwards && what == "Create" {
 			var xid string
 			obj, ok := j.GetMap("object")
 			if ok {

          
@@ 2260,7 2260,7 @@ func showhonkers(w http.ResponseWriter, 
 func showchatter(w http.ResponseWriter, r *http.Request) {
 	u := login.GetUserInfo(r)
 	chatnewnone(UserID(u.UserID))
-	chatter := loadchatter(UserID(u.UserID))
+	chatter := loadchatter(UserID(u.UserID), 0)
 	for _, chat := range chatter {
 		for _, ch := range chat.Chonks {
 			filterchonk(ch)

          
@@ 2764,33 2764,8 @@ func servefile(w http.ResponseWriter, r 
 		return
 	}
 	xid := mux.Vars(r)["xid"]
-	preview := r.FormValue("preview") == "1"
-	var media string
-	var data sql.RawBytes
-	rows, err := stmtGetFileData.Query(xid)
-	if err == nil {
-		defer rows.Close()
-		if rows.Next() {
-			err = rows.Scan(&media, &data)
-		} else {
-			data, err = refetchfile(xid)
-		}
-	}
-	if err != nil {
-		elog.Printf("error loading file: %s", err)
-		http.NotFound(w, r)
-		return
-	}
-	if preview && strings.HasPrefix(media, "image") {
-		img, err := lilshrink(data)
-		if err == nil {
-			data = img.Data
-		}
-	}
-	w.Header().Set("Content-Type", media)
-	w.Header().Set("X-Content-Type-Options", "nosniff")
-	w.Header().Set("Cache-Control", "max-age="+somedays())
-	w.Write(data)
+
+	servefiledata(w, r, xid)
 }
 
 func nomoroboto(w http.ResponseWriter, r *http.Request) {

          
@@ 3026,6 3001,21 @@ func apihandler(w http.ResponseWriter, r
 			return
 		}
 		fmt.Fprintf(w, "%d", h.ID)
+	case "getchatter":
+		wanted, _ := strconv.ParseInt(r.FormValue("after"), 10, 0)
+		chatnewnone(UserID(u.UserID))
+		user, _ := butwhatabout(u.Username)
+		chatter := loadchatter(UserID(u.UserID), wanted)
+		for _, chat := range chatter {
+			for _, ch := range chat.Chonks {
+				filterchonk(ch)
+			}
+		}
+		j := junk.New()
+		j["chatter"] = chatter
+		j["mecount"] = user.Options.MeCount
+		j["chatcount"] = user.Options.ChatCount
+		j.Write(w)
 	default:
 		http.Error(w, "unknown action", http.StatusNotFound)
 		return