Compare commits

...

30 Commits

Author SHA1 Message Date
0ae9d3ddd7 updated Makefile 2023-10-02 20:44:49 -07:00
84e647a390 (minor) re-added blue border to profile 2023-09-30 18:56:22 -07:00
9fea166b94 getting ready for merge to master 2023-09-30 18:34:56 -07:00
2ff8027d34 added try-except to sort function 2023-09-30 09:53:57 -07:00
4c16afebf9 readme conflict resolved 2023-09-30 04:37:07 -07:00
4e4f2f2a7a squash merge writepage-fixup + webring supports more than one page 2023-09-30 04:35:34 -07:00
9b2e9ed74d clearer variable names 2023-09-30 01:45:42 -07:00
db14ea6b3d replaced placeholder function names 2023-09-30 01:24:46 -07:00
eec7bc5fff fixed typo 2023-09-29 18:50:06 -07:00
0e80b8220a Merge branch 'master' into syndication 2023-09-29 18:49:04 -07:00
9fea7d8275 squash merge readme-update 2023-09-29 17:47:12 -07:00
likhy
57bd52e537 added option to skip fetching remote profile data 2023-09-28 20:06:52 -07:00
likhy
0eb134136f updated example settings.toml 2023-09-03 13:42:51 -07:00
likhy
fd96708790 does not save file if it already exists 2023-09-02 11:17:20 -07:00
likhy
0bccc33730 added functionality to avoid hotlinking to avatars 2023-09-01 23:55:06 -07:00
likhy
de78ad42b8 simplified export_profile() 2023-08-30 00:07:37 -07:00
likhy
9fbdf2a8a7 generalized output of fetch 2023-08-29 23:56:49 -07:00
likhy
26b7d47996 added proxy settings for upload script 2023-08-15 21:57:14 -07:00
7d474af899 added tor check to settings, webring disabled by default 2023-08-09 02:09:50 -07:00
3bf37386fc using pycurl; dont need urllib.request or pysocks 2023-08-09 02:00:43 -07:00
e4af5bbd6a added proxy and tor support 2023-06-03 21:56:15 -07:00
972165018c handled exception for dns resolution + webring errors dont halt page gen. 2023-05-19 18:03:59 -07:00
likhy
8a723c2bc8 sort webring users by most recently updated 2023-05-04 15:49:34 -07:00
likhy
f2afe19bc9 fixed example settings file 2023-05-01 17:03:12 -07:00
likhy
10740cae92 fixed variable names 2023-04-30 11:11:44 -07:00
likhy
d8c4cb47fa tested for http requests (404s) 2023-04-30 10:32:50 -07:00
likhy
9905a15ace updated name of config table from syndication to webring 2023-04-30 09:58:51 -07:00
likhy
427d4ff972 generates follow page mostly the way I want 2023-04-30 09:31:01 -07:00
d9e998750c added http requests for remote profiles 2023-04-16 10:25:20 -07:00
0b8b5ce498 json profile export 2023-04-15 15:09:54 -07:00
6 changed files with 284 additions and 55 deletions

View File

@ -7,9 +7,9 @@ Simple and stylish text-to-html microblog generator.
python3 dateutil toml make curl pycurl urllib
* `dateutil`, `toml` are Python modules.
* `dateutil`, `toml`, `json`, `pycurl`, `hashlib` are Python modules.
* `make` (optional), method for invoking the script.
* `curl`, `pycurl` and `urllib` (optional), for uploading multiple files to neocities (`neouploader.py`).
* `urllib` (optional), for uploading multiple files to neocities (`neouploader.py`).
### Usage

View File

@ -1,22 +1,23 @@
all: template.tpl content.txt timeline.css
all: demo tpl css settings
python microblog.py ./template.tpl ./content.txt > result.html
# for people who don't want to read the README
# and want to hit `make` to see how things work.
template.tpl:
tpl:
cp ./example/default.tpl ./template.tpl
timeline.css:
css:
cp ./example/timeline.css ./timeline.css
content.txt:
demo:
cp ./example/demo.txt ./content.txt
settings:
cp ./example/settings.toml ./settings.toml
.PHONY: clean
clean:
rm ./pages/*.html
rm ./tags/*/*.html
rm lastfullpage.txt
rmdir ./pages ./tags/* ./tags
rm ./webring/*.html
rmdir ./pages ./tags/* ./tags ./webring

View File

@ -1,4 +1,5 @@
latestpage="result.html"
# latestpage="result.html"
latestpages=["meta.json", "result.html"]
[page]
postsperpage = 20
@ -28,3 +29,37 @@ interact = "https://yoursite.tld/cgi?postid="
[post.gallery]
path_to_thumb="./thumbs"
path_to_fullsize="./images"
[webring]
enabled=false
file_output="meta.json"
[webring.profile]
username="Your name here"
url="https://yourdomain.tld/microblog/"
avatar="https://yourdomain.tld/microblog/images/avatar.jpg"
short_bio= "Your self-description. Anything longer than 150 characters is truncated."
[webring.following]
list= ["https://likho.neocities.org/microblog/meta.json"]
format="""
<div class="fill">
<div class="postcell">
<img src="{__avatar__}" alt="Avatar" class="avatar">
<span class="wrapper"">
<div class="handle">
<a href="{__url__}">{__handle__}</a>
</div>
<div class="last-updated">Last Update: {__lastupdated__}</div>
<span class="post-count">Posts: {__post_count__}</span>
</span>
<p class="short-bio">{__shortbio__}</p>
</div>
</div>
"""
# internally link avatars - avoids hotlinks
[webring.following.internal-avatars]
enabled=false
path_to_avatars="/microblog/avatars" # link rendered on page
local_path_to_avatars="./avatars" # destination folder on pc

View File

@ -31,28 +31,29 @@
color: green;
font-weight: bold;
}
.profile {
vertical-align: middle;
padding-left: 10px;
border:1px solid blue;
}
.avatar {
vertical-align: middle;
width: 50px;
height: 50px;
}
.handle{
.column .profile {
vertical-align: middle;
padding-left: 10px;
padding:1%;
border:1px solid blue;
}
.column .profile .handle{
font-size: 1.1em;
font-weight: bold;
}
.email{
text-align:left;
.column .profile .email{
font-size: 0.8em;
text-align:left;
text-decoration:none;
}
.bio {
vertical-align: middle;
.column .profile .bio {
font-size: 0.9em;
vertical-align: middle;
margin: 1em
}
.gallery {
@ -73,6 +74,28 @@
border: 1px solid #777;
filter: invert(100%);
}
.postcell .avatar {
margin-left:3%;
margin-top:2%;
height: 4em;
width:auto;
vertical-align:top;
}
.postcell .wrapper {
margin-top:2%;
display: inline-block;
}
.postcell .wrapper .last-updated,
.postcell .wrapper .post-count {
font-size: 1em;
color:grey;
}
.postcell .short-bio{
padding-left: 3%;
padding-right: 2%;
font-style: italic;
word-wrap: break-word;
}
/* Clear floats after the columns */
.row:after {
content: "";

View File

@ -1,6 +1,7 @@
import sys, os, traceback
import dateutil.parser
from time import strftime, localtime
# returns html-formatted string
def make_buttons(btn_dict, msg_id):
@ -52,6 +53,8 @@ def make_gallery(indices, w, conf=None):
tag.append("</div>")
return tag
# apply basic HTML formatting - only div class here is gallery
from html import escape
def markup(message, config):
def is_image(s, image_formats):
l = s.rsplit('.', maxsplit=1)
@ -116,8 +119,6 @@ def markup(message, config):
gallery = []
return sep.join(output), tags
# apply basic HTML formatting - only div class here is gallery
from html import escape
class Post:
def __init__(self, ts, msg):
self.timestamp = ts.strip() # string
@ -160,8 +161,7 @@ def parse_txt(filename):
state = 0
return posts
def get_posts(filename, config):
posts = parse_txt(filename)
def get_posts(posts, config):
taginfos = []
tagcloud = dict() # (tag, count)
tagged = dict() # (tag, index of message)
@ -277,11 +277,15 @@ if __name__ == "__main__":
help="sorts content from oldest to newest"
" (this is a separate operation from page generation)", \
action="store_true")
p.add_argument("--skip-fetch", \
help="skips fetching profile data from remote sources;"
" has no effect if webring is not enabled",\
action="store_true")
args = p.parse_args()
if args.sort:
sort(args.content)
exit()
return args.template, args.content
return args.template, args.content, args.skip_fetch
# assume relative path
def demote_css(template, css_list, level=1):
@ -296,14 +300,12 @@ if __name__ == "__main__":
tpl = tpl.replace(css, ("%s%s" % (prepend, css) ))
return tpl
# needs review / clean-up
# ideally relate 'lvl' with sub dir instead of hardcoding
def writepage(template, timeline, tagcloud, config, subdir = None):
count = len(timeline)
html = ""
with open(template,'r') as f:
html = f.read()
try:
count = len(timeline)
p = config["postsperpage"]
pagectrl = Paginator(count, p, subdir)
except ZeroDivisionError as e:
@ -312,28 +314,33 @@ if __name__ == "__main__":
except Exception as e:
print("error: ",e, ("(number of posts = %i)" % count), file=sys.stderr)
exit()
latest = timeline if count <= pagectrl.PPP else timeline[:pagectrl.PPP]
latest = timeline[:pagectrl.PPP]
link_from_top = "./tags/%s/latest.html"
link_from_subdir = "../tags/%s/latest.html"
link_from_tagdir = "../%s/latest.html"
cloud = ""
level = 1
is_tagline = False
if subdir == None: # if top level page
lvl = 1
tcloud = make_tagcloud(tagcloud, "./tags/%s/latest.html")
print(pagectrl.singlepage(html, tcloud, latest))
tcloud = make_tagcloud(tagcloud, "../tags/%s/latest.html")
pagectrl.paginate(
demote_css(html, config["relative_css"], lvl),
tcloud, timeline
)
else: # if timelines per tag
cloud = make_tagcloud(tagcloud, link_from_top)
print(pagectrl.singlepage(html, cloud, latest))
cloud = make_tagcloud(tagcloud, link_from_subdir)
else:
if subdir != "webring": # timelines per tag
is_tagline = True
lvl = 2
newhtml = demote_css(html, config["relative_css"], lvl)
tcloud = make_tagcloud(tagcloud, "../%s/latest.html")
fn = "%s/latest.html" % subdir
with open(fn, 'w') as f:
pagectrl.written.append(fn)
f.write(
pagectrl.singlepage(newhtml, tcloud, latest, p=".")
)
pagectrl.paginate(newhtml, tcloud, timeline, is_tagline)
level += 1
cloud = make_tagcloud(tagcloud, link_from_tagdir)
else:
cloud = make_tagcloud(tagcloud, link_from_subdir)
demoted = demote_css(html, config["relative_css"], level)
filename = "%s/latest.html" % subdir
with open(filename, 'w') as f: # landing page for tag
pagectrl.written.append(filename)
page = pagectrl.singlepage(demoted, cloud, latest, p=".")
f.write(page)
pagectrl.paginate(
demote_css(html, config["relative_css"], level),
cloud, timeline, is_tagline)
return pagectrl.written
import toml
@ -347,8 +354,133 @@ if __name__ == "__main__":
s = None
return s
import json
def export_profile(post_count, last_update, config):
if "profile" not in config:
return
p = config["profile"]
p["post-count"] = post_count
p["last-updated"] = last_update
if "username" not in p or "url" not in p:
print("Warning: no profile exported", file=sys.stderr)
return
with open(config["file_output"], 'w') as f:
print(json.dumps(p), file=f)
def get_webring(f_cfg):
import pycurl
from io import BytesIO
def get_proxy():
proxy = ""
if "http_proxy" in os.environ:
proxy = os.environ["http_proxy"]
elif "https_proxy" in os.environ:
proxy = os.environ["https_proxy"]
host = proxy[proxy.rfind('/') + 1: proxy.rfind(':')]
port = proxy[proxy.rfind(':') + 1:]
foo = proxy.find("socks://") >= 0 or proxy.find("socks5h://")
return host, int(port), foo
def fetch(url_list):
curl = pycurl.Curl()
if "http_proxy" in os.environ or "https_proxy" in os.environ:
hostname, port_no, is_socks = get_proxy()
curl.setopt(pycurl.PROXY, hostname)
curl.setopt(pycurl.PROXYPORT, port_no)
if is_socks:
curl.setopt(pycurl.PROXYTYPE, pycurl.PROXYTYPE_SOCKS5_HOSTNAME)
datum = []
meta = []
for url in url_list:
buf = BytesIO()
curl.setopt(curl.WRITEDATA, buf)
curl.setopt(pycurl.URL, url)
try:
curl.perform()
datum.append(buf)
meta.append(curl.getinfo(curl.CONTENT_TYPE))
except pycurl.error as e:
print(e,": ", url, file=sys.stderr)
# print(buf.getvalue(),"\n\t", curl.getinfo(curl.CONTENT_TYPE), file=sys.stderr)
curl.close()
assert(len(datum) == len(meta))
return datum, meta
def to_json(curl_outs):
json_objs = []
for buf in curl_outs:
try:
json_objs.append(json.loads(buf.getvalue()))
except Exception as e:
print(e)
return json_objs
def render(profiles, template):
rendered = []
SHORT_BIO_LIMIT = 150
for profile in profiles:
try:
epoch_timestamp = profile["last-updated"]
if not isinstance(epoch_timestamp, int):
epoch_timestamp = 0
post_count = profile["post-count"]
if not isinstance(post_count, int):
post_count = 0
self_desc = profile["short-bio"]
if len(profile["short-bio"]) >= SHORT_BIO_LIMIT:
self_desc = profile["short-bio"][:SHORT_BIO_LIMIT] + "..."
foo = template.format(
__avatar__=escape(profile["avatar"]),
__handle__=escape(profile["username"]),
__url__=escape(profile["url"]),
__post_count__ = post_count,
__shortbio__= escape(self_desc),
__lastupdated__= strftime(
"%Y %b %d", localtime(epoch_timestamp)) )
rendered.append(foo)
except KeyError as e:
print("remote profile is missing key: ", e, file=sys.stderr)
print("\tsource: ", profile, file=sys.stderr)
return rendered
def get_avatars(profiles, save_path, img_src):
import hashlib
imgs, info = fetch([p["avatar"] for p in profiles])
length = len(imgs)
if length != len(profiles) or length == 0:
print("error in retrieving images", file=sys.stderr)
return
for i in range(0,length):
content_type = info[i].split('/')
ext = content_type.pop()
if content_type.pop() != "image":
print("\tskip: not an image", file=sys.stderr)
continue
data = imgs[i].getvalue()
h = hashlib.sha1(data).hexdigest()
filename = "%s.%s" % (h, ext)
path = "%s/%s" % (save_path, filename)
profiles[i]["avatar"] = "%s/%s" % (img_src, filename)
if not os.path.isfile(path):
with open(path, "wb") as f:
f.write(data)
j, m = fetch(f_cfg["list"])
list_of_json_objs = to_json(j)
if list_of_json_objs == []:
print("no remote profiles loaded", file=sys.stderr)
return []
if f_cfg["internal-avatars"]["enabled"]:
a = f_cfg["internal-avatars"]["local_path_to_avatars"]
b = f_cfg["internal-avatars"]["path_to_avatars"]
get_avatars(list_of_json_objs, a, b)
try:
list_of_json_objs.sort(key=lambda e: e["last-updated"], reverse=True)
except KeyError: pass
return render(list_of_json_objs, f_cfg["format"])
def main():
tpl, content = get_args()
tpl, content, skip_fetch = get_args()
cfg = load_settings()
if cfg == None:
print("exit: no settings.toml found.", file=sys.stderr)
@ -359,7 +491,8 @@ if __name__ == "__main__":
if "page" not in cfg:
print("exit: table 'page' absent in settings.toml", file=sys.stderr)
return
tl, tc, tg = get_posts(content, cfg["post"])
p = parse_txt(content)
tl, tc, tg = get_posts(p, cfg["post"])
if tl == []:
return
# main timeline
@ -378,18 +511,36 @@ if __name__ == "__main__":
tpl, tagline, tc, cfg["page"], \
subdir="tags/%s" % key[1:] \
)
if "webring" in cfg:
if cfg["webring"]["enabled"] == True:
export_profile(
len(p), p[0].get_epoch_time(), cfg["webring"] )
if not skip_fetch:
fellows = get_webring(cfg["webring"]["following"] )
if fellows != []:
updated += writepage(
tpl, fellows, tc, cfg["page"], subdir="webring")
with open("updatedfiles.txt", 'w') as f:
for filename in updated:
print(filename, file=f) # sys.stderr)
if "latestpage" in cfg:
print(cfg["latestpage"], file=f)
if "latestpages" in cfg:
for page in cfg["latestpages"]:
print(page, file=f)
try:
main()
except KeyError as e:
traceback.print_exc()
print("\n\tA key may be missing from your settings file.", file=sys.stderr)
except dateutil.parser._parser.ParserError as e:
except dateutil.parser._parser.ParserError:
traceback.print_exc()
print("\n\tFailed to interpret a date from string..",
"\n\tYour file of posts may be malformed.",
"\n\tCheck if your file starts with a line break.", file=sys.stderr)
except toml.decoder.TomlDecodeError:
traceback.print_exc()
print("\n\tYour configuration file is malformed.")
except FileNotFoundError as e:
traceback.print_exc()
print("\n\tA potential cause is attempting to save a file to a folder that does not exist.")

View File

@ -1,11 +1,30 @@
import sys, subprocess, getpass, pycurl, urllib.parse
import sys, os, subprocess, getpass, pycurl, urllib.parse
if __name__ == "__main__":
def get_proxy():
proxy = ""
if "http_proxy" in os.environ:
proxy = os.environ["http_proxy"]
elif "https_proxy" in os.environ:
proxy = os.environ["https_proxy"]
host = proxy[proxy.rfind('/') + 1: proxy.rfind(':')]
port = proxy[proxy.rfind(':') + 1:]
foo = proxy.find("socks://") >= 0 or proxy.find("socks5h://")
return host, int(port), foo
def api_upload(endpoint, dest_fmt = "/microblog/%s"):
pages = []
with open("updatedfiles.txt") as f:
pages = f.readlines()
c = pycurl.Curl()
if "http_proxy" in os.environ or "https_proxy" in os.environ:
hostname, port_no, is_socks = get_proxy()
c.setopt(pycurl.PROXY, hostname)
c.setopt(pycurl.PROXYPORT, port_no)
if is_socks:
c.setopt(pycurl.PROXYTYPE, pycurl.PROXYTYPE_SOCKS5_HOSTNAME)
c.setopt(c.URL, endpoint)
c.setopt(c.POST, 1)
for page in pages: