import fs from "fs"; import path from "path"; import wordCounting from "word-counting"; import { walk } from "./utils/fs-utils.js"; import util from 'node:util'; import child_process from 'node:child_process'; const exec = util.promisify(child_process.exec); function toSlug(string: string) { return string.toLowerCase().replaceAll(' ', '-'); } (async () => { const blockRefs: Record = {}; const blockLinks: Record = {}; const indices: string[] = []; await walk("./garden-output/logseq-pages", (dir, file, resolve) => { const filePath = path.resolve(dir, file); const data = fs.readFileSync(filePath).toString(); const slug = path.basename(file, ".md").replaceAll('___', '/').replaceAll(/%3F/gi, '') .replace('what-is-content-', 'what-is-content'); for (const match of data.matchAll(/(.*)\n\s*id:: (.*)/gm)) { const text = match[1]; const id = match[2]; const link = `/garden/${slug}#${id}`; blockLinks[id] = link; blockRefs[id] = `[${text}](${link})`; } if (data.match(/index: "true"/g)) { indices.push(slug); } resolve(); }); const pageLinks: Record = {}; const taggedBy: Record = {}; const tagged: Record = {}; const referencedBy: Record = {}; // Walk through the pages to make sure we get the canonical name page (pre-slug) // The logseq-export README made it sound like even the title property is transformed sometimes await walk("./Garden/pages", (dir, file, resolve) => { const filePath = path.resolve(dir, file); let data = fs.readFileSync(filePath).toString(); if (data.match(/public::/g) == null) { resolve(); return; } const startPrivate = data.indexOf("- private"); if (startPrivate > 0) { data = data.slice(0, startPrivate); } const name = path.basename(file, ".md").replaceAll('___', '/'); const slug = toSlug(name).replaceAll(/%3F/gi, '').replaceAll('\'', '-'); const link = `/garden/${slug}`; pageLinks[name.replaceAll(/%3F/gi, '?')] = link; for (const match of data.matchAll(/alias:: (.*)/g)) { match[1].split(", ").forEach(page => (pageLinks[page] = link)); } for (const match of data.matchAll(/tags:: (.*)/g)) { match[1].split(", ").forEach(page => { const pageSlug = toSlug(page); taggedBy[pageSlug] = [...(taggedBy[pageSlug] ?? []), name]; tagged[slug] = [...(tagged[slug] ?? []), page]; }); } resolve(); }); await walk("./Garden/pages", (dir, file, resolve) => { const filePath = path.resolve(dir, file); let data = fs.readFileSync(filePath).toString(); if (data.match(/public::/g) == null) { resolve(); return; } const name = path.basename(file, ".md").replaceAll('___', '/'); const slug = toSlug(name).replaceAll(/%3F/gi, '').replaceAll('\'', '-'); if (!indices.includes(slug)) { for (const match of data.matchAll(/\[\[([^\[\]]*)\]\]/g)) { const pageSlug = pageLinks[match[1].replaceAll(/%3F/gi, '?')]; referencedBy[pageSlug] = [...(referencedBy[pageSlug] ?? []), name.replaceAll(/%3F/gi, '?')]; } } resolve(); }); Object.keys(referencedBy).forEach(page => { referencedBy[page] = Array.from(new Set(referencedBy[page])); }); // Move everything from ./garden-output/logseq-assets into ./public/garden fs.mkdirSync("./content/garden", { recursive: true }); await walk("./garden-output/logseq-pages", async (dir, file, resolve) => { const filePath = path.resolve(dir, file); let data = fs.readFileSync(filePath).toString(); // Count word counts with a special set of transformations that should make it more accurate const strippedData = data.replace(/---\n[\S\s]*\n---/gm, '').replaceAll(/.*::.*/g, '') .replaceAll(/\[([^\]]*)\]\(.*\)/g, '$1'); const wc = wordCounting(strippedData).wordsCount; data = data.replace(/---\n\n/gm, `wordCount: ${wc}\n---\n\n`); data = data.replace(/public: .*\n/, ''); data = data.replace(/slug: .*\n/, ''); data = data.replace(/alias: .*\n/, ''); const contentPath = path.resolve("./content/garden", path.relative("./garden-output/logseq-pages", file)); const firstCommit = exec(`git log -n 1 --diff-filter=A --format="%H,%at" -- "${contentPath}"`) .then(output => output.stdout) .catch(err => console.warn(`Error calculating first commit for ${contentPath}:\n${err}`)); const lastCommit = exec(`git log -n 1 --diff-filter=M --format="%H,%at" -- "${contentPath}"`) .then(output => output.stdout) .catch(err => console.warn(`Error calculating last commit for ${contentPath}:\n${err}`)); const [hash, timestampString] = (await firstCommit)?.trim().split(",") ?? ["", ""]; const timestamp = parseInt(timestampString); data = data.replace(/---\n\n/gm, `published:\n hash: ${hash}\n timestamp: ${timestamp * 1000}\n---\n\n`); if (await lastCommit) { const [hash, timestampString] = (await lastCommit)!.trim().split(","); const timestamp = parseInt(timestampString); data = data.replace(/---\n\n/gm, `edited:\n hash: ${hash}\n timestamp: ${timestamp * 1000}\n---\n\n`); } // Replace youtube embeds data = data.replaceAll( /{{video https:\/\/(?:www\.)?youtube\.com\/watch\?v=(.*)}}/g, '