gronk/notes2web.py

318 lines
12 KiB
Python
Raw Normal View History

2021-06-29 13:35:21 +00:00
#!/usr/bin/env python3
from bs4 import BeautifulSoup as bs
2021-08-21 01:14:12 +00:00
import subprocess
2021-08-15 19:40:13 +00:00
import frontmatter
2021-06-29 13:35:21 +00:00
import magic
import sys
import pathlib
import pypandoc
import shutil
import os
import re
2021-08-19 13:43:42 +00:00
import json
2021-06-29 13:35:21 +00:00
2021-06-29 13:35:21 +00:00
TEXT_ARTICLE_TEMPLATE_FOOT = None
TEXT_ARTICLE_TEMPLATE_HEAD = None
INDEX_TEMPLATE_FOOT = None
INDEX_TEMPLATE_HEAD = None
EXTRA_INDEX_CONTENT = None
2021-06-29 13:35:21 +00:00
2021-06-29 13:35:21 +00:00
def get_files(folder):
markdown = []
plaintext = []
other = []
for root, folders, files in os.walk(folder):
for filename in files:
2021-08-19 13:43:42 +00:00
if '/.git' in root:
continue
2021-06-29 13:35:21 +00:00
name = os.path.join(root, filename)
if os.path.splitext(name)[1] == '.md':
markdown.append(name)
elif re.match(r'^text/', magic.from_file(name, mime=True)):
plaintext.append(name)
other.append(name)
else:
other.append(name)
return markdown, plaintext, other
2021-08-21 01:14:12 +00:00
def git_filehistory(working_dir, filename):
print(f"{pathlib.Path(filename).relative_to(working_dir)=}")
git_response = subprocess.run(
[
'git',
f"--git-dir={os.path.join(working_dir, '.git')}",
"log",
"-p",
"--",
pathlib.Path(filename).relative_to(working_dir)
],
stdout=subprocess.PIPE
)
filehistory = f"File history not available: git log returned code {git_response.returncode}."
"\nIf this is not a git repository, this is not a problem."
if git_response.returncode == 0:
filehistory = git_response.stdout.decode('utf-8')
if filehistory == "":
filehistory = "This file has no history (it may not be part of the git repository)."
return filehistory
2021-06-29 13:35:21 +00:00
def get_dirs(folder):
r = []
for root, folders, files in os.walk(folder):
[r.append(os.path.join(root, folder)) for folder in folders]
return r
2021-08-15 18:34:29 +00:00
def update_required(src_filename, output_filename):
return not os.path.exists(output_filename) or os.path.getmtime(src_filename) > os.path.getmtime(output_filename)
2021-06-29 13:35:21 +00:00
def get_args():
""" Get command line arguments """
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('notes', type=pathlib.Path)
parser.add_argument('-o', '--output-dir', type=pathlib.Path, default='web')
parser.add_argument('-t', '--template', type=pathlib.Path, default=pathlib.Path('/opt/notes2web/templates/article.html'))
parser.add_argument('-H', '--template-text-head', type=pathlib.Path, default=pathlib.Path('/opt/notes2web/templates/textarticlehead.html'))
parser.add_argument('-f', '--template-text-foot', type=pathlib.Path, default=pathlib.Path('/opt/notes2web/templates/textarticlefoot.html'))
parser.add_argument('-i', '--template-index-head', type=pathlib.Path, default=pathlib.Path('/opt/notes2web/templates/indexhead.html'))
parser.add_argument('-I', '--template-index-foot', type=pathlib.Path, default=pathlib.Path('/opt/notes2web/templates/indexfoot.html'))
parser.add_argument('-s', '--stylesheet', type=pathlib.Path, default=pathlib.Path('/opt/notes2web/styles.css'))
parser.add_argument('--home_index', type=pathlib.Path, default=pathlib.Path('/opt/notes2web/templates/home_index.html'))
parser.add_argument('-e', '--extra-index-content', type=pathlib.Path, default=pathlib.Path('/opt/notes2web/templates/extra_index_content.html'))
parser.add_argument('-n', '--index-article-names', action='append', default=['index.md'])
2021-08-15 18:34:29 +00:00
parser.add_argument('-F', '--force', action="store_true", help="Generate new output html even if source file was modified before output html")
2021-08-19 13:43:42 +00:00
parser.add_argument('--fuse', type=pathlib.Path, default=pathlib.Path('/opt/notes2web/fuse.js'))
parser.add_argument('--searchjs', type=pathlib.Path, default=pathlib.Path('/opt/notes2web/search.js'))
2021-06-29 13:35:21 +00:00
return parser.parse_args()
def main(args):
""" Entry point for script """
with open(args.template_text_foot) as fp:
TEXT_ARTICLE_TEMPLATE_FOOT = fp.read()
with open(args.template_text_head) as fp:
TEXT_ARTICLE_TEMPLATE_HEAD = fp.read()
with open(args.template_index_foot) as fp:
INDEX_TEMPLATE_FOOT = fp.read()
with open(args.template_index_head) as fp:
INDEX_TEMPLATE_HEAD = fp.read()
with open(args.extra_index_content) as fp:
EXTRA_INDEX_CONTENT = fp.read()
2021-06-29 13:35:21 +00:00
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir, exist_ok=True)
if os.path.isfile(args.output_dir):
print("Output directory ({output_dir}) cannot be a file.")
markdown_files, plaintext_files, other_files = get_files(args.notes)
2021-08-19 13:43:42 +00:00
all_entries=[]
2021-06-29 13:35:21 +00:00
print(f"{args.index_article_names=}")
dirs_with_index_article = []
2021-06-29 13:35:21 +00:00
print(f"{markdown_files=}")
2021-08-15 19:40:13 +00:00
tag_dict = {}
2021-06-29 13:35:21 +00:00
for filename in markdown_files:
2021-06-29 18:22:25 +00:00
print(f"{filename=}")
print(f"{os.path.basename(filename)=}")
2021-08-15 19:40:13 +00:00
if os.path.basename(filename) in args.index_article_names:
output_filename = os.path.join(
os.path.dirname(re.sub(f"^{args.notes.name}", os.path.join(args.output_dir.name, 'notes', filename))),
'index.html'
)
dirs_with_index_article.append(os.path.dirname(re.sub(f"^{args.notes.name}", os.path.join(args.output_dir.name, 'notes'), filename)))
else:
output_filename = os.path.splitext(re.sub(f"^{args.notes.name}", os.path.join(args.output_dir.name, 'notes'), filename))[0] + '.html'
2021-08-15 19:40:13 +00:00
fm = frontmatter.load(filename)
if isinstance(fm.get('tags'), list):
for tag in fm.get('tags'):
if tag in tag_dict.keys():
tag_dict[tag].append({
'path': str(pathlib.Path(*pathlib.Path(output_filename).parts[1:])),
'title': fm.get('title')
})
else:
tag_dict[tag] = [ {
'path': str(pathlib.Path(*pathlib.Path(output_filename).parts[1:])),
'title': fm.get('title')
} ]
2021-08-20 13:31:34 +00:00
with open(filename) as fp:
lines = fp.read().split('\n')
header_lines = []
for line in lines:
if re.match('^#{1,6} \S', line):
header_lines.append(" ".join(line.split(" ")[1:]))
print(f"{output_filename=}")
2021-08-19 13:43:42 +00:00
all_entries.append({
'path': str(pathlib.Path(*pathlib.Path(output_filename).parts[1:])),
'title': fm.get('title'),
2021-08-20 13:31:34 +00:00
'tags': fm.get('tags'),
'headers': header_lines
2021-08-19 13:43:42 +00:00
})
2021-06-29 13:35:21 +00:00
2021-08-21 01:14:12 +00:00
filehistory = git_filehistory(args.notes, filename)
2021-08-15 18:34:29 +00:00
if update_required(filename, output_filename) or args.force:
2021-08-21 01:14:12 +00:00
html = pypandoc.convert_file(filename, 'html', extra_args=[f'--template={args.template}', '-V', f'filehistory={filehistory}'])
2021-08-15 18:34:29 +00:00
os.makedirs(os.path.dirname(output_filename), exist_ok=True)
with open(output_filename, 'w+') as fp:
fp.write(html)
2021-06-29 13:35:21 +00:00
print(f"{plaintext_files=}")
for filename in plaintext_files:
2021-08-21 01:14:12 +00:00
filehistory = git_filehistory(args.notes, filename)
2021-07-29 13:06:03 +00:00
title = os.path.basename(re.sub(f"^{args.notes.name}", args.output_dir.name, filename))
output_filename = re.sub(f"^{args.notes.name}", os.path.join(args.output_dir.name, 'notes'), filename) + '.html'
2021-06-29 13:35:21 +00:00
os.makedirs(os.path.dirname(output_filename), exist_ok=True)
html = re.sub(r'\$title\$', title, TEXT_ARTICLE_TEMPLATE_HEAD)
2021-08-15 19:40:13 +00:00
html = re.sub(r'\$h1title\$', title, html)
2021-06-29 13:35:21 +00:00
html = re.sub(r'\$raw\$', os.path.basename(filename), html)
2021-08-21 01:29:00 +00:00
html = html.replace('$filehistory$', filehistory)
2021-06-29 13:35:21 +00:00
with open(filename) as fp:
html += fp.read()
html += TEXT_ARTICLE_TEMPLATE_FOOT
with open(output_filename, 'w+') as fp:
fp.write(html)
2021-08-19 13:43:42 +00:00
all_entries.append({
'path': str(pathlib.Path(*pathlib.Path(output_filename).parts[1:])),
'title': title,
2021-08-20 13:31:34 +00:00
'tags': [],
'headers': []
2021-08-19 13:43:42 +00:00
})
2021-06-29 13:35:21 +00:00
print(f"{other_files=}")
for filename in other_files:
output_filename = re.sub(f"^{args.notes.name}", os.path.join(args.output_dir.name, 'notes'), filename)
2021-06-29 13:35:21 +00:00
os.makedirs(os.path.dirname(output_filename), exist_ok=True)
2021-08-19 13:43:42 +00:00
all_entries.append({
'path': str(pathlib.Path(*pathlib.Path(output_filename).parts[1:])),
'title': str(pathlib.Path(*pathlib.Path(output_filename).parts[1:])),
2021-08-20 13:31:34 +00:00
'tags': [],
'headers': []
2021-08-19 13:43:42 +00:00
})
2021-06-29 13:35:21 +00:00
shutil.copyfile(filename, output_filename)
2021-08-15 19:40:13 +00:00
tagdir = os.path.join(args.output_dir, '.tags')
os.makedirs(tagdir, exist_ok=True)
for tag in tag_dict.keys():
html = re.sub(r'\$title\$', f'{tag}', INDEX_TEMPLATE_HEAD)
html = re.sub(r'\$h1title\$', f'tag: {tag}', html)
html = re.sub(r'\$extra_content\$', '', html)
for entry in tag_dict[tag]:
html += f"<div class=\"article\"><a href=\"/{entry['path']}\">{entry['title']}</a></div>"
html += INDEX_TEMPLATE_FOOT
with open(os.path.join(tagdir, f'{tag}.html'), 'w+') as fp:
fp.write(html)
2021-06-29 13:35:21 +00:00
dirs_to_index = [args.output_dir.name] + get_dirs(args.output_dir)
print(f"{dirs_to_index=}")
print(f"{os.path.commonpath(dirs_to_index)=}")
2021-06-29 13:35:21 +00:00
for directory in dirs_to_index:
if directory in dirs_with_index_article:
continue
2021-06-29 13:35:21 +00:00
paths = os.listdir(directory)
print(f"{paths=}")
indexentries = []
for path in paths:
print(f"{path=}")
if path in [ 'index.html', '.git' ]:
2021-06-29 13:35:21 +00:00
continue
fullpath = os.path.join(directory, path)
if os.path.splitext(path)[1] == '.html':
with open(fullpath) as fp:
soup = bs(fp.read(), 'html.parser')
try:
title = soup.find('title').get_text()
except AttributeError:
title = path
2021-07-29 13:06:03 +00:00
elif os.path.isdir(fullpath):
2021-06-29 13:35:21 +00:00
title = path
2021-07-29 13:06:03 +00:00
else:
# don't add plaintext files to index, since they have a html wrapper
continue
2021-06-29 13:35:21 +00:00
if title.strip() == '':
title = path
indexentries.append({
'title': title,
'path': path,
'isdirectory': os.path.isdir(fullpath)
})
indexentries.sort(key=lambda entry: entry['title'])
indexentries.sort(key=lambda entry: entry['isdirectory'], reverse=True)
html = re.sub(r'\$title\$', directory, INDEX_TEMPLATE_HEAD)
2021-08-15 19:40:13 +00:00
html = re.sub(r'\$h1title\$', directory, html)
html = re.sub(r'\$extra_content\$',
EXTRA_INDEX_CONTENT if directory == os.path.commonpath(dirs_to_index) else '',
html
)
2021-06-29 13:35:21 +00:00
for entry in indexentries:
html += f"<div class=\"article\"><a href=\"{entry['path']}\">{entry['title']}{'/' if entry['isdirectory'] else ''}</a></div>"
html += INDEX_TEMPLATE_FOOT
with open(os.path.join(directory, 'index.html'), 'w+') as fp:
fp.write(html)
shutil.copyfile(args.stylesheet, os.path.join(args.output_dir.name, 'styles.css'))
2021-08-19 13:43:42 +00:00
shutil.copyfile(args.fuse, os.path.join(args.output_dir.name, 'fuse.js'))
shutil.copyfile(args.searchjs, os.path.join(args.output_dir.name, 'search.js'))
with open(os.path.join(args.output_dir.name, 'index.html'), 'w+') as fp:
with open(args.home_index) as fp2:
html = re.sub(r'\$title\$', args.output_dir.parts[0], fp2.read())
html = re.sub(r'\$h1title\$', args.output_dir.parts[0], html)
2021-08-19 13:43:42 +00:00
html = re.sub(r'\$data\$', json.dumps(all_entries), html)
fp.write(html)
2021-08-15 19:40:13 +00:00
print(tag_dict)
2021-06-29 13:35:21 +00:00
return 0
if __name__ == '__main__':
try:
sys.exit(main(get_args()))
except KeyboardInterrupt:
sys.exit(0)