commit mixups, again. I'll get this down at some point
This commit is contained in:
commit
0f3188c4cd
|
@ -0,0 +1,4 @@
|
|||
img/
|
||||
__pycache__/
|
||||
rss.xml
|
||||
.secrets
|
|
@ -0,0 +1,18 @@
|
|||
# onecatper.day
|
||||
|
||||
A small website where you can see one cat image, once per day. With the ability to (hopefully) subscribe via RSS as well!
|
||||
|
||||
TODO:
|
||||
- Add RSS support
|
||||
|
||||
## Things I used for this that you might wanna check out
|
||||
|
||||
[The Cat API](https://thecatapi.com/) - Kind of the heart of this whole website. You can play around with this wihtout signing up for an API key. For this project I think the free tier with an API key is more than enough.
|
||||
|
||||
Google CDN replacement for Google Fonts. Used for material icons currently on buttons [fonts.coollabs.io](https://fonts.coollabs.io/) - this is on the [32bit cafe resource page](https://discourse.32bit.cafe/t/resources-list-for-the-personal-web/49)
|
||||
|
||||
Color scheme for index.html: [AG-500Redux Palette](https://lospec.com/palette-list/ag-500redux)
|
||||
|
||||
## check it out
|
||||
|
||||
You can see the site currently here - [onecatper.day](https://onecatper.day)
|
|
@ -0,0 +1,3 @@
|
|||
#!/bin/sh
|
||||
cd public_html/
|
||||
python3 main.py && python3 rss_update.py
|
|
@ -0,0 +1,30 @@
|
|||
<htmL lang="en">
|
||||
<head>
|
||||
<title>One cat per day!</title>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<link rel="stylesheet" href="style.css">
|
||||
<link rel="stylesheet" href="https://api.fonts.coollabs.io/icon?family=Material+Icons">
|
||||
</head>
|
||||
<body>
|
||||
<header>
|
||||
<nav>
|
||||
<div class="greeting">
|
||||
<h1>One cat per day!</h1>
|
||||
</div>
|
||||
<div class="buttons">
|
||||
<!-- RSS isn't ready yet, so let's disable it since the site is public now -->
|
||||
<!-- <a href="rss.xml"><button><span class="material-icons">rss_feed</span><span>Subscribe via RSS</span></button></a> -->
|
||||
<a href="https://git.32bit.cafe/zepp/onecatper.day"><button><span class="material-icons">terminal</span><span>Check out the code</span></button></a>
|
||||
</div>
|
||||
</nav>
|
||||
</header>
|
||||
<main>
|
||||
<!-- a more descriptive alt text is not possible, I can only know ahead of time that one or more catss will be in the image -->
|
||||
<img class = "cat-img" src=img/cat2024-07-25-08:58:14.jpg alt="A picture of a cat">
|
||||
</main>
|
||||
<footer>
|
||||
<p>Made by <a href="https://zepp.omg.lol">zepp</a> for the love of cats. </p>
|
||||
</footer>
|
||||
</body>
|
||||
</htmL>
|
|
@ -0,0 +1,111 @@
|
|||
# uses https://thecatapi.com/ to get an image of a cat
|
||||
|
||||
# todo on python side
|
||||
# learn about rss and how to use this script to update a feed
|
||||
|
||||
import requests
|
||||
import datetime
|
||||
|
||||
# this will change between calls due to microseconds
|
||||
# better for it just to be a global
|
||||
DATETIME_STR = str(datetime.datetime.today()).replace(' ', "-")
|
||||
# we don't need those pesky microseconds any way
|
||||
DATETIME_STR = DATETIME_STR[:-7]
|
||||
|
||||
def get_image_url():
|
||||
|
||||
# you can ask for a cat without a key, but the free tier has some nice benefits
|
||||
# note below that I ask for 10 cats, which is the maximum with no api key. Having an
|
||||
# api key gets you access to more cats however, which is purrfect for our needs
|
||||
api_key = ""
|
||||
with open(".secrets", "r") as f:
|
||||
api_key = f.readlines()
|
||||
api_key = api_key[0].strip("\n")
|
||||
|
||||
# I ask for 10 cats because I don't want an animated gif to use as the cat image of the day
|
||||
# I'm specifically looking for a jpg, and may look for specific image dimeensions as well in the future
|
||||
data = requests.get("https://api.thecatapi.com/v1/images/search?limit=10", headers={"x-api-key" : f"{api_key}"})
|
||||
data = data.json()
|
||||
|
||||
# create a list of dictionaries
|
||||
# each dictonary has the following keys
|
||||
# url - the url to the iamge
|
||||
# height - the height of the image in px
|
||||
# width - the width of the image in px
|
||||
image_data = {}
|
||||
image_data_list = []
|
||||
|
||||
for url in data:
|
||||
image_data["url"] = url["url"]
|
||||
image_data["height"] = url["height"]
|
||||
image_data["width"] = url["width"]
|
||||
image_data_list.append(image_data)
|
||||
image_data = {}
|
||||
|
||||
# we're looking for images of a specific height to help with a new image being on the
|
||||
# index page every day. I've stored the width property if it is ever needed as well.
|
||||
# As of now, it is not.
|
||||
urls = []
|
||||
for properties in image_data_list:
|
||||
if properties["height"] >= 500 and properties["height"] <= 800:
|
||||
urls.append(properties["url"])
|
||||
|
||||
image_url = ""
|
||||
|
||||
# not a big deal if both happen to be a jpg, but elminates the possibility of saving a gif to disk
|
||||
for url in urls:
|
||||
if ".jpg" in url:
|
||||
image_url = url
|
||||
return image_url
|
||||
|
||||
def update_html_img_tag(index):
|
||||
old_img_tag = ""
|
||||
tag_list = []
|
||||
with open(index, "r") as f:
|
||||
tag_list = f.readlines()
|
||||
|
||||
for tag in tag_list:
|
||||
if "<img" in tag:
|
||||
old_img_tag += tag
|
||||
|
||||
split_img_element = old_img_tag.split(" ")
|
||||
time = datetime.datetime.today()
|
||||
time = str(time).strip()
|
||||
for i in range(0, len(split_img_element)):
|
||||
if "src=" in split_img_element[i]:
|
||||
split_img_element[i] = f"src=img/cat{DATETIME_STR}.jpg"
|
||||
in_progress_img_tag = " ".join(split_img_element)
|
||||
|
||||
# cleaning up some things that don't need to be in the tag before writing it to index.html
|
||||
img_element_as_list = in_progress_img_tag.split(" ")
|
||||
new_img_tag = " ".join(img_element_as_list)
|
||||
|
||||
html = " ".join(tag_list)
|
||||
html = html.replace(old_img_tag, new_img_tag)
|
||||
|
||||
with open(index, "w") as f:
|
||||
f.write(html)
|
||||
|
||||
def main():
|
||||
update_html_img_tag("index.html")
|
||||
|
||||
image_url = get_image_url()
|
||||
|
||||
# with our requirements for image picking in the above function, it is possible we end up
|
||||
# with no url to get an image from. If that is the case, image_url will be empty.
|
||||
# It can only be empty or have a url, so we should call it again if it is empty.
|
||||
while image_url == "":
|
||||
image_url = get_image_url()
|
||||
|
||||
# get the actual image and write to disk
|
||||
# img/ should be a folder relative to main.py
|
||||
image = requests.get(image_url)
|
||||
path = "img/"
|
||||
|
||||
# a consistent name and placement of the image makes it easy for the index page to update
|
||||
# when this script is run.
|
||||
|
||||
with open(f"{path}cat{DATETIME_STR}.jpg", "wb") as f:
|
||||
f.write(image.content)
|
||||
|
||||
main()
|
|
@ -0,0 +1,90 @@
|
|||
from random import randint
|
||||
import datetime
|
||||
#might as well use the datetime we made in main for consistency
|
||||
from main import DATETIME_STR
|
||||
|
||||
# updates the guid given in two places with the RSS file, essential for letting readers know
|
||||
# a new picture has been published
|
||||
|
||||
# refrence: https://cyber.harvard.edu/rss/rss.html#ltguidgtSubelementOfLtitemgt
|
||||
|
||||
# also updates pubDate and lastBuildDate
|
||||
# finay, updates the image name as well
|
||||
|
||||
def update_rss_feed(rss_file_name):
|
||||
rss_file = ""
|
||||
|
||||
with open(rss_file_name, "r") as f:
|
||||
rss_file = f.read()
|
||||
element_list = rss_file.split('\n')
|
||||
|
||||
items = []
|
||||
for el in element_list:
|
||||
if "link" in el:
|
||||
items.append(el)
|
||||
if "guid" in el:
|
||||
items.append(el)
|
||||
if "enclosure" in el:
|
||||
items.append(el)
|
||||
if "<lastBuildDate>" in el:
|
||||
items.append(el)
|
||||
if "<pubDate>" in el:
|
||||
items.append(el)
|
||||
|
||||
guid_items = []
|
||||
for el in items:
|
||||
if "https://onecatper.day#" in el:
|
||||
guid_items.append(el)
|
||||
|
||||
hash_located_at = []
|
||||
for el in guid_items:
|
||||
hash_located_at.append(el.index('#'))
|
||||
|
||||
CLOSING_TAG_LENGTH = 7
|
||||
as_char_list = list(items[2])
|
||||
for i in range(hash_located_at[0] + 1, len(as_char_list) - CLOSING_TAG_LENGTH):
|
||||
as_char_list[i] = str(randint(0, 9))
|
||||
new_guid_link_one = "".join(as_char_list)
|
||||
|
||||
as_char_list = list(items[4])
|
||||
for i in range(hash_located_at[1] + 1, len(as_char_list) - CLOSING_TAG_LENGTH):
|
||||
as_char_list[i] = str(randint(0, 9))
|
||||
new_guid_link_two = "".join(as_char_list)
|
||||
|
||||
split_file = rss_file.split("\n")
|
||||
old_guid_links = []
|
||||
|
||||
for element in split_file:
|
||||
if "https://onecatper.day#" in element:
|
||||
old_guid_links.append(element)
|
||||
|
||||
# preppring strings for replacement of the old strings
|
||||
|
||||
# cat image stored in img/
|
||||
old_img_link = items[-1].split(" ")
|
||||
old_cat_image = old_img_link[-1].split("\"")
|
||||
old_cat_image_url = old_cat_image[1]
|
||||
new_cat_image_url = f"https://onecatper.day/img/cat{DATETIME_STR}.jpg"
|
||||
|
||||
# storess dates and times in the rss.xml file, needs to be updated for feed to update
|
||||
old_last_build_date = items[1]
|
||||
old_pub_date = items[3]
|
||||
# date and time need to be a specific format
|
||||
# don't want to use constant for this, it needs to be formated
|
||||
rss_date = datetime.datetime.strftime(datetime.datetime.now(), "%a, %d %b %Y %H:%M:%S -0500")
|
||||
# create new date/time links for feed
|
||||
new_last_build_date = f"\t\t<lastBuildDate>{rss_date}</lastBuildDate>"
|
||||
new_pub_date = f"\t\t\t<pubDate>{rss_date}</pubDate>"
|
||||
|
||||
# replace what is necessary to make the feed update
|
||||
rss_file = rss_file.replace(old_guid_links[0], new_guid_link_one)
|
||||
rss_file = rss_file.replace(old_guid_links[1], new_guid_link_two)
|
||||
rss_file = rss_file.replace(old_cat_image_url, new_cat_image_url)
|
||||
rss_file = rss_file.replace(old_last_build_date, new_last_build_date)
|
||||
rss_file = rss_file.replace(old_pub_date, new_pub_date)
|
||||
|
||||
# write the changes to disk
|
||||
with open(rss_file_name, "w") as f:
|
||||
f.write(rss_file)
|
||||
|
||||
update_rss_feed("rss.xml")
|
|
@ -0,0 +1,73 @@
|
|||
* {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
nav {
|
||||
display: flex;
|
||||
justify-content: space-around;
|
||||
}
|
||||
|
||||
header {
|
||||
padding: 0.5em;
|
||||
}
|
||||
|
||||
header, footer {
|
||||
background-color: #4c8ed2;
|
||||
color: #403f46;
|
||||
}
|
||||
|
||||
header div h1 {
|
||||
padding: 0.1em;
|
||||
font-size: 3em;
|
||||
display: inline;
|
||||
}
|
||||
|
||||
.greeting {
|
||||
width: 50%;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
}
|
||||
|
||||
.buttons {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: flex-end;
|
||||
}
|
||||
|
||||
button {
|
||||
font-size: 1.25em;
|
||||
margin: 5px;
|
||||
padding: 10px;
|
||||
border-radius: 10px;
|
||||
color: #403f46;
|
||||
background-color: #1cb9b5;
|
||||
box-shadow: 10px 5px 5px #67cbc4;
|
||||
}
|
||||
|
||||
button span {
|
||||
vertical-align: middle;
|
||||
padding-left: 5px;
|
||||
padding-right: 5px;
|
||||
}
|
||||
|
||||
main {
|
||||
background-color: #9a6db6;
|
||||
display: flex;
|
||||
justify-content: center ;
|
||||
}
|
||||
|
||||
.cat-img {
|
||||
height: 500px;
|
||||
margin: 15px;
|
||||
width: auto;
|
||||
border-radius: 10px;
|
||||
border: 5px solid #6d5a8c;
|
||||
}
|
||||
|
||||
footer p {
|
||||
margin: auto;
|
||||
padding: 1em;
|
||||
text-align: center;
|
||||
font-size: 1.5em;
|
||||
}
|
Loading…
Reference in New Issue