aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xdocker-compose.yml140
-rw-r--r--edaweb/app.py516
-rw-r--r--edaweb/cache.py2
-rw-r--r--edaweb/database.py502
-rw-r--r--edaweb/services.py818
-rw-r--r--edaweb/static/cow.txt58
-rw-r--r--edaweb/static/index.md68
-rw-r--r--ffs.md108
-rw-r--r--homelab-wiki/Dockerfile38
-rw-r--r--homelab-wiki/LocalSettings.php366
-rwxr-xr-xscripts/export.sh24
-rwxr-xr-xscripts/update.sh12
12 files changed, 1304 insertions, 1348 deletions
diff --git a/docker-compose.yml b/docker-compose.yml
index 01bf365..b1d984f 100755
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -1,70 +1,70 @@
-services:
- edaweb:
- build:
- context: .
- dockerfile: Dockerfile
- image: reg.reaweb.uk/edaweb
- volumes:
- - /tmp/:/media/ISOs/
- - ./edaweb/static/:/app/edaweb/static/
- - ./edaweb.conf:/app/edaweb.conf
- - edaweb-tmp:/tmp/
- ports:
- - "6969:6969"
- external_links:
- - mariadb:mysql
- - transmission_1:transmission
- mac_address: 44:c8:09:a7:d0:93
- networks:
- db-network:
- rr-net:
- ipv4_address: "192.168.23.13"
- restart: unless-stopped
-
- edaweb_cron:
- build:
- context: .
- dockerfile: Dockerfile_cron
- image: reg.reaweb.uk/edaweb_cron
- volumes:
- - /tmp/:/media/ISOs/
- - ./edaweb/static/:/app/edaweb/static/
- - ./edaweb.conf:/app/edaweb.conf
- - ./edaweb-docker.pem:/keys/docker-key.pem
- - edaweb-tmp:/tmp/
- networks:
- - db-network
- external_links:
- - mariadb:mysql
- restart: unless-stopped
-
- homelab-wiki:
- build:
- context: ./homelab-wiki
- dockerfile: Dockerfile
- image: reg.reaweb.uk/edawiki2
- volumes:
- - ./homelab-wiki/images:/var/www/html/images
- - ./homelab-wiki/LocalSettings.php:/var/www/html/LocalSettings.php
- env_file:
- - ./homelab-wiki/wiki.env
- ports:
- - "6970:80"
- networks:
- - db-network
- external_links:
- - mariadb:mysql
- restart: unless-stopped
-
-volumes:
- edaweb-tmp:
-
-networks:
- db-network:
- external: true
- name: mariadb
-
- rr-net:
- external: true
- name: rr-net
-
+services:
+ edaweb:
+ build:
+ context: .
+ dockerfile: Dockerfile
+ image: reg.reaweb.uk/edaweb
+ volumes:
+ - /tmp/:/media/ISOs/
+ - ./edaweb/static/:/app/edaweb/static/
+ - ./edaweb.conf:/app/edaweb.conf
+ - edaweb-tmp:/tmp/
+ ports:
+ - "6969:6969"
+ external_links:
+ - mariadb:mysql
+ - transmission_1:transmission
+ mac_address: 44:c8:09:a7:d0:93
+ networks:
+ db-network:
+ rr-net:
+ ipv4_address: "192.168.23.13"
+ restart: unless-stopped
+
+ edaweb_cron:
+ build:
+ context: .
+ dockerfile: Dockerfile_cron
+ image: reg.reaweb.uk/edaweb_cron
+ volumes:
+ - /tmp/:/media/ISOs/
+ - ./edaweb/static/:/app/edaweb/static/
+ - ./edaweb.conf:/app/edaweb.conf
+ - ./edaweb-docker.pem:/keys/docker-key.pem
+ - edaweb-tmp:/tmp/
+ networks:
+ - db-network
+ external_links:
+ - mariadb:mysql
+ restart: unless-stopped
+
+ homelab-wiki:
+ build:
+ context: ./homelab-wiki
+ dockerfile: Dockerfile
+ image: reg.reaweb.uk/edawiki2
+ volumes:
+ - ./homelab-wiki/images:/var/www/html/images
+ - ./homelab-wiki/LocalSettings.php:/var/www/html/LocalSettings.php
+ env_file:
+ - ./homelab-wiki/wiki.env
+ ports:
+ - "6970:80"
+ networks:
+ - db-network
+ external_links:
+ - mariadb:mysql
+ restart: unless-stopped
+
+volumes:
+ edaweb-tmp:
+
+networks:
+ db-network:
+ external: true
+ name: mariadb
+
+ rr-net:
+ external: true
+ name: rr-net
+
diff --git a/edaweb/app.py b/edaweb/app.py
index 36fe8bb..923b792 100644
--- a/edaweb/app.py
+++ b/edaweb/app.py
@@ -1,256 +1,260 @@
-from paste.translogger import TransLogger
-from waitress import serve
-from PIL import Image
-import configparser
-import transmission_rpc
-import downloader
-import datetime
-import database
-import services
-import urllib
-import random
-import parser
-import flask
-import sys
-import os
-import io
-
-app = flask.Flask(__name__)
-CONFIG = configparser.ConfigParser(interpolation = None)
-CONFIG.read(os.path.join(os.path.dirname(__file__), "edaweb.conf"))
-shown_images = set()
-shown_sidebar_images = set()
-
-def get_pfp_img(db:database.Database):
- global shown_images
- dbimg = db.get_pfp_images()
- if len(shown_images) == len(dbimg):
- shown_images = set()
- folder = set(dbimg).difference(shown_images)
- choice = random.choice(list(folder))
- shown_images.add(choice)
- return choice
-
-def get_sidebar_img(db:database.Database):
- global shown_sidebar_images
- dbimg = db.get_sidebar_images()
- if len(shown_sidebar_images) == len(dbimg):
- shown_sidebar_images = set()
- folder = set(dbimg).difference(shown_sidebar_images)
- choice = random.choice(list(folder))
- shown_sidebar_images.add(choice)
- return choice
-
-def get_correct_article_headers(db:database.Database, title):
- db_headers = list(db.get_header_articles())
- if title in [i[0] for i in db_headers]:
- out = []
- for i in db_headers:
- if i[0] != title:
- out.append(i)
- return out + [("index", "/~")]
- else:
- return db_headers + [("index", "/~")]
-
-def get_template_items(title, db):
- return {
- "links": db.get_header_links(),
- "image": get_pfp_img(db),
- "title": title,
- "articles": get_correct_article_headers(db, title)
- }
-
-@app.route("/")
-@app.route("/~")
-def index():
- with database.Database() as db:
- with open(os.path.join(os.path.dirname(__file__), "static", "index.md"), "r") as f:
- return flask.render_template(
- "index.html.j2",
- **get_template_items("eden's site :3", db),
- days_till_ffs = datetime.datetime(2025, 11, 8) - datetime.datetime.now(),
- markdown = parser.parse_text(f.read())[0],
- featured_thoughts = db.get_featured_thoughts(),
- commits = services.get_recent_commits(db)[:15],
- sidebar_img = get_sidebar_img(db)
- )
-
-@app.route("/robots.txt")
-def robots():
- return flask.send_from_directory("static", "robots.txt")
-
-@app.route("/services")
-def serve_services():
- with database.Database() as db:
- return flask.render_template(
- "services.html.j2",
- **get_template_items("services", db),
- docker = services.get_all_docker_containers(),
- trans = services.get_torrent_stats(),
- pihole = services.get_pihole_stats()
- )
-
-@app.route("/discord")
-def discord():
- with database.Database() as db:
- return flask.render_template(
- "discord.html.j2",
- **get_template_items("discord", db),
- discord = CONFIG["discord"]["username"]
- )
-
-@app.route("/thought")
-def get_thought():
- thought_id = flask.request.args.get("id", type=int)
- with database.Database() as db:
- try:
- category_name, title, dt, parsed, headers, redirect = parser.get_thought_from_id(db, thought_id)
- # print(headers)
- except TypeError:
- flask.abort(404)
- return
-
- if redirect is not None:
- return flask.redirect(redirect, code = 301)
-
- return flask.render_template(
- "thought.html.j2",
- **get_template_items(title, db),
- md_html = parsed,
- contents_html = headers,
- dt = "published: " + str(dt),
- category = category_name,
- othercategories = db.get_categories_not(category_name),
- related = db.get_similar_thoughts(category_name, thought_id)
- )
-
-@app.route("/thoughts")
-def get_thoughts():
- with database.Database() as db:
- all_ = db.get_all_thoughts()
- tree = {}
- for id_, title, dt, category in all_:
- if category not in tree.keys():
- tree[category] = [(id_, title, dt)]
- else:
- tree[category].append((id_, title, str(dt)))
-
- return flask.render_template(
- "thoughts.html.j2",
- **get_template_items("thoughts", db),
- tree = tree
- )
-
-@app.route("/img/<filename>")
-def serve_image(filename):
- imdirpath = os.path.join(os.path.dirname(__file__), "static", "images")
- if filename in os.listdir(imdirpath):
- try:
- w = int(flask.request.args['w'])
- h = int(flask.request.args['h'])
- except (KeyError, ValueError):
- return flask.send_from_directory(imdirpath, filename)
-
- img = Image.open(os.path.join(imdirpath, filename))
- img.thumbnail((w, h), Image.LANCZOS)
- io_ = io.BytesIO()
- img.save(io_, format='JPEG')
- return flask.Response(io_.getvalue(), mimetype='image/jpeg')
- else:
- flask.abort(404)
-
-@app.route("/nhdl")
-def serve_nhdl():
- with database.Database() as db:
- try:
- nhentai_id = int(flask.request.args["id"])
- with downloader.CompressedImages(nhentai_id) as zippath:
- # return app.send_static_file(os.path.split(zippath)[-1])
- return flask.redirect("/zip/%s" % os.path.split(zippath)[-1])
-
- except (KeyError, ValueError):
- return flask.render_template(
- "nhdl.html.j2",
- **get_template_items("Hentai Downloader", db)
- )
-
-@app.route("/isocd")
-def serve_iso_form():
- with database.Database() as db:
- return flask.render_template(
- "isocd.html.j2",
- **get_template_items("Get a GNU/Linux install CD", db),
- iso_options = db.get_iso_cd_options()
- )
-
-@app.route("/zip/<zipfile>")
-def serve_zip(zipfile):
- return flask.send_from_directory(os.path.join(os.path.dirname(__file__), "static", "zips"), zipfile)
-
-@app.route("/pdf/<pdfname>")
-def serve_pdf(pdfname):
- return flask.send_from_directory(os.path.join(os.path.dirname(__file__), "static", "papers"), pdfname)
-
-@app.route("/nhdlredirect", methods = ["POST"])
-def redirect_nhdl():
- req = dict(flask.request.form)
- try:
- return flask.redirect("/nhdl?id=%i" % int(req["number_input"]))
- except (TypeError, ValueError, KeyError):
- flask.abort(400)
-
-@app.route("/getisocd", methods = ["POST"])
-def get_iso_cd():
- req = dict(flask.request.form)
- print(req)
- with database.Database() as db:
- id_ = db.append_cd_orders(**req)
- print(id_)
- return flask.render_template(
- "isocd_confirmation.html.j2",
- **get_template_items("Get a GNU/Linux install CD", db),
- email = req["email"],
- req = req,
- id_ = id_
- )
-
-#@app.route("/random")
-#def serve_random():
-# try:
-# tags = flask.request.args['tags'].split(" ")
-# except KeyError:
-# flask.abort(400)
-#
-# sbi = services.get_random_image(tags)
-# req = urllib.request.Request(sbi.imurl)
-# mediaContent = urllib.request.urlopen(req).read()
-# with open(os.path.join(os.path.dirname(__file__), "static", "images", "random.jpg"), "wb") as f:
-# f.write(mediaContent)
-#
-# with database.Database() as db:
-# return flask.render_template(
-# "random.html.j2",
-# **get_template_items("random image", db),
-# sbi = sbi,
-# localimg = "/img/random.jpg?seed=%i" % random.randint(0, 9999)
-# )
-
-@app.route("/questions")
-def serve_questions():
- with database.Database() as db:
- return flask.render_template(
- "questions.html.j2",
- **get_template_items("questions and answers", db),
- qnas_link = CONFIG.get("qnas", "url"),
- qnas = db.get_qnas()
- )
-
-if __name__ == "__main__":
- try:
- if sys.argv[1] == "--production":
- #serve(TransLogger(app), host='127.0.0.1', port = 6969)
- serve(TransLogger(app), host='0.0.0.0', port = 6969, threads = 32)
- else:
- app.run(host = "0.0.0.0", port = 5001, debug = True)
- except IndexError:
- app.run(host = "0.0.0.0", port = 5001, debug = True)
+from paste.translogger import TransLogger
+from waitress import serve
+from PIL import Image
+import configparser
+import transmission_rpc
+import downloader
+import datetime
+import database
+import services
+import urllib
+import random
+import parser
+import flask
+import sys
+import os
+import io
+
+app = flask.Flask(__name__)
+CONFIG = configparser.ConfigParser(interpolation = None)
+CONFIG.read(os.path.join(os.path.dirname(__file__), "edaweb.conf"))
+shown_images = set()
+shown_sidebar_images = set()
+
+def get_pfp_img(db:database.Database):
+ global shown_images
+ dbimg = db.get_pfp_images()
+ if len(shown_images) == len(dbimg):
+ shown_images = set()
+ folder = set(dbimg).difference(shown_images)
+ choice = random.choice(list(folder))
+ shown_images.add(choice)
+ return choice
+
+def get_sidebar_img(db:database.Database):
+ global shown_sidebar_images
+ dbimg = db.get_sidebar_images()
+ if len(shown_sidebar_images) == len(dbimg):
+ shown_sidebar_images = set()
+ folder = set(dbimg).difference(shown_sidebar_images)
+ choice = random.choice(list(folder))
+ shown_sidebar_images.add(choice)
+ return choice
+
+def get_correct_article_headers(db:database.Database, title):
+ db_headers = list(db.get_header_articles())
+ if title in [i[0] for i in db_headers]:
+ out = []
+ for i in db_headers:
+ if i[0] != title:
+ out.append(i)
+ return out + [("index", "/~")]
+ else:
+ return db_headers + [("index", "/~")]
+
+def get_template_items(title, db):
+ return {
+ "links": db.get_header_links(),
+ "image": get_pfp_img(db),
+ "title": title,
+ "articles": get_correct_article_headers(db, title)
+ }
+
+@app.route("/")
+@app.route("/~")
+def index():
+ with database.Database() as db:
+ with open(os.path.join(os.path.dirname(__file__), "static", "index.md"), "r") as f:
+ return flask.render_template(
+ "index.html.j2",
+ **get_template_items("eden's site :3", db),
+ days_till_ffs = datetime.datetime(2025, 11, 8) - datetime.datetime.now(),
+ markdown = parser.parse_text(f.read())[0],
+ featured_thoughts = db.get_featured_thoughts(),
+ commits = services.get_recent_commits(db)[:15],
+ sidebar_img = get_sidebar_img(db)
+ )
+
+@app.route("/robots.txt")
+def robots():
+ return flask.send_from_directory("static", "robots.txt")
+
+@app.route("/cow.txt")
+def moo():
+ return flask.send_from_directory("static", "cow.txt")
+
+@app.route("/services")
+def serve_services():
+ with database.Database() as db:
+ return flask.render_template(
+ "services.html.j2",
+ **get_template_items("services", db),
+ docker = services.get_all_docker_containers(),
+ trans = services.get_torrent_stats(),
+ pihole = services.get_pihole_stats()
+ )
+
+@app.route("/discord")
+def discord():
+ with database.Database() as db:
+ return flask.render_template(
+ "discord.html.j2",
+ **get_template_items("discord", db),
+ discord = CONFIG["discord"]["username"]
+ )
+
+@app.route("/thought")
+def get_thought():
+ thought_id = flask.request.args.get("id", type=int)
+ with database.Database() as db:
+ try:
+ category_name, title, dt, parsed, headers, redirect = parser.get_thought_from_id(db, thought_id)
+ # print(headers)
+ except TypeError:
+ flask.abort(404)
+ return
+
+ if redirect is not None:
+ return flask.redirect(redirect, code = 301)
+
+ return flask.render_template(
+ "thought.html.j2",
+ **get_template_items(title, db),
+ md_html = parsed,
+ contents_html = headers,
+ dt = "published: " + str(dt),
+ category = category_name,
+ othercategories = db.get_categories_not(category_name),
+ related = db.get_similar_thoughts(category_name, thought_id)
+ )
+
+@app.route("/thoughts")
+def get_thoughts():
+ with database.Database() as db:
+ all_ = db.get_all_thoughts()
+ tree = {}
+ for id_, title, dt, category in all_:
+ if category not in tree.keys():
+ tree[category] = [(id_, title, dt)]
+ else:
+ tree[category].append((id_, title, str(dt)))
+
+ return flask.render_template(
+ "thoughts.html.j2",
+ **get_template_items("thoughts", db),
+ tree = tree
+ )
+
+@app.route("/img/<filename>")
+def serve_image(filename):
+ imdirpath = os.path.join(os.path.dirname(__file__), "static", "images")
+ if filename in os.listdir(imdirpath):
+ try:
+ w = int(flask.request.args['w'])
+ h = int(flask.request.args['h'])
+ except (KeyError, ValueError):
+ return flask.send_from_directory(imdirpath, filename)
+
+ img = Image.open(os.path.join(imdirpath, filename))
+ img.thumbnail((w, h), Image.LANCZOS)
+ io_ = io.BytesIO()
+ img.save(io_, format='JPEG')
+ return flask.Response(io_.getvalue(), mimetype='image/jpeg')
+ else:
+ flask.abort(404)
+
+@app.route("/nhdl")
+def serve_nhdl():
+ with database.Database() as db:
+ try:
+ nhentai_id = int(flask.request.args["id"])
+ with downloader.CompressedImages(nhentai_id) as zippath:
+ # return app.send_static_file(os.path.split(zippath)[-1])
+ return flask.redirect("/zip/%s" % os.path.split(zippath)[-1])
+
+ except (KeyError, ValueError):
+ return flask.render_template(
+ "nhdl.html.j2",
+ **get_template_items("Hentai Downloader", db)
+ )
+
+@app.route("/isocd")
+def serve_iso_form():
+ with database.Database() as db:
+ return flask.render_template(
+ "isocd.html.j2",
+ **get_template_items("Get a GNU/Linux install CD", db),
+ iso_options = db.get_iso_cd_options()
+ )
+
+@app.route("/zip/<zipfile>")
+def serve_zip(zipfile):
+ return flask.send_from_directory(os.path.join(os.path.dirname(__file__), "static", "zips"), zipfile)
+
+@app.route("/pdf/<pdfname>")
+def serve_pdf(pdfname):
+ return flask.send_from_directory(os.path.join(os.path.dirname(__file__), "static", "papers"), pdfname)
+
+@app.route("/nhdlredirect", methods = ["POST"])
+def redirect_nhdl():
+ req = dict(flask.request.form)
+ try:
+ return flask.redirect("/nhdl?id=%i" % int(req["number_input"]))
+ except (TypeError, ValueError, KeyError):
+ flask.abort(400)
+
+@app.route("/getisocd", methods = ["POST"])
+def get_iso_cd():
+ req = dict(flask.request.form)
+ print(req)
+ with database.Database() as db:
+ id_ = db.append_cd_orders(**req)
+ print(id_)
+ return flask.render_template(
+ "isocd_confirmation.html.j2",
+ **get_template_items("Get a GNU/Linux install CD", db),
+ email = req["email"],
+ req = req,
+ id_ = id_
+ )
+
+#@app.route("/random")
+#def serve_random():
+# try:
+# tags = flask.request.args['tags'].split(" ")
+# except KeyError:
+# flask.abort(400)
+#
+# sbi = services.get_random_image(tags)
+# req = urllib.request.Request(sbi.imurl)
+# mediaContent = urllib.request.urlopen(req).read()
+# with open(os.path.join(os.path.dirname(__file__), "static", "images", "random.jpg"), "wb") as f:
+# f.write(mediaContent)
+#
+# with database.Database() as db:
+# return flask.render_template(
+# "random.html.j2",
+# **get_template_items("random image", db),
+# sbi = sbi,
+# localimg = "/img/random.jpg?seed=%i" % random.randint(0, 9999)
+# )
+
+@app.route("/questions")
+def serve_questions():
+ with database.Database() as db:
+ return flask.render_template(
+ "questions.html.j2",
+ **get_template_items("questions and answers", db),
+ qnas_link = CONFIG.get("qnas", "url"),
+ qnas = db.get_qnas()
+ )
+
+if __name__ == "__main__":
+ try:
+ if sys.argv[1] == "--production":
+ #serve(TransLogger(app), host='127.0.0.1', port = 6969)
+ serve(TransLogger(app), host='0.0.0.0', port = 6969, threads = 32)
+ else:
+ app.run(host = "0.0.0.0", port = 5001, debug = True)
+ except IndexError:
+ app.run(host = "0.0.0.0", port = 5001, debug = True)
diff --git a/edaweb/cache.py b/edaweb/cache.py
index 5b66e43..8694666 100644
--- a/edaweb/cache.py
+++ b/edaweb/cache.py
@@ -1,5 +1,6 @@
import database
import services
+import json
def update_cache():
print("Updating cache...")
@@ -7,6 +8,7 @@ def update_cache():
db.update_commit_cache(services.request_recent_commits(since = db.get_last_commit_time()))
print("Finished adding github commits...")
db.append_qnas(services.scrape_whispa(db.config.get("qnas", "url"), since = db.get_oldest_qna()))
+ # print(json.dumps(services.scrape_whispa(db.config.get("qnas", "url"), since = db.get_oldest_qna()), indent = 4))
print("Finished parsing Q&As...")
print("Started getting docker information with SSH...")
diff --git a/edaweb/database.py b/edaweb/database.py
index c6553a6..8bb9a60 100644
--- a/edaweb/database.py
+++ b/edaweb/database.py
@@ -1,251 +1,251 @@
-from urllib.parse import urlparse
-from dataclasses import dataclass
-from lxml import html
-import configparser
-import threading
-import services
-import operator
-import datetime
-import requests
-import twython
-import pymysql
-import random
-import os
-import re
-
-@dataclass
-class Database:
- safeLogin:bool = True #automatically login with the user in the config file, who is read only
- user:str = None #otherwise, login with the given username and passwd
- passwd:str = None
-
- def __enter__(self):
- config_path = os.path.join(os.path.dirname(__file__), "..", "edaweb.conf")
- if not os.path.exists(config_path):
- raise FileNotFoundError("Could not find edaweb.conf config file")
- self.config = configparser.ConfigParser(interpolation = None)
- self.config.read(config_path)
-
- if self.safeLogin:
- self.__connection = pymysql.connect(
- **self.config["mysql"],
- charset = "utf8mb4"
- )
- else:
- self.__connection = pymysql.connect(
- user = self.user,
- passwd = self.passwd,
- host = self.config["mysql"]["host"],
- db = self.config["mysql"]["db"],
- charset = "utf8mb4"
- )
- return self
-
- def __exit__(self, type, value, traceback):
- self.__connection.commit()
- self.__connection.close()
-
- def get_header_links(self):
- with self.__connection.cursor() as cursor:
- cursor.execute("SELECT name, link FROM headerLinks WHERE display = true ORDER BY name;")
- return cursor.fetchall()
-
- def get_image(self, imageName):
- with self.__connection.cursor() as cursor:
- cursor.execute("SELECT alt, url FROM images WHERE imageName = %s;", (imageName, ))
- return cursor.fetchone()
-
- def get_pfp_images(self):
- with self.__connection.cursor() as cursor:
- cursor.execute("SELECT alt, url FROM images WHERE pfp_img = 1;")
- return cursor.fetchall()
-
- def get_sidebar_images(self):
- with self.__connection.cursor() as cursor:
- cursor.execute("SELECT alt, url FROM images WHERE sidebar_image = 1;")
- return cursor.fetchall()
-
- def get_header_articles(self):
- with self.__connection.cursor() as cursor:
- cursor.execute("SELECT articleName, link FROM headerArticles WHERE display = true;")
- return cursor.fetchall()
-
- def get_all_categories(self):
- with self.__connection.cursor() as cursor:
- cursor.execute("SELECT category_name FROM categories;")
- return [i[0] for i in cursor.fetchall()]
-
- def add_category(self, category):
- if not category in self.get_all_categories():
- with self.__connection.cursor() as cursor:
- cursor.execute("INSERT INTO categories (category_name) VALUES (%s);", (category, ))
-
- self.__connection.commit()
- return True
-
- return False
-
- def add_thought(self, category, title, markdown):
- with self.__connection.cursor() as cursor:
- cursor.execute("""
- INSERT INTO thoughts (category_id, title, markdown_text)
- VALUES ((
- SELECT category_id FROM categories WHERE category_name = %s
- ), %s, %s);""", (category, title, markdown))
- self.__connection.commit()
-
- def get_thought(self, id_):
- with self.__connection.cursor() as cursor:
- cursor.execute("""
- SELECT categories.category_name, thoughts.title, thoughts.dt, thoughts.markdown_text, thoughts.redirect
- FROM thoughts INNER JOIN categories
- ON thoughts.category_id = categories.category_id
- WHERE thought_id = %s;""", (id_, ))
- return cursor.fetchone()
-
- def get_similar_thoughts(self, category, id_):
- with self.__connection.cursor() as cursor:
- cursor.execute("""
- SELECT thought_id, title, dt, category_name FROM thoughts
- INNER JOIN categories ON thoughts.category_id = categories.category_id
- WHERE category_name = %s AND thought_id != %s;""",
- (category, id_))
- return cursor.fetchall()
-
- def get_featured_thoughts(self):
- with self.__connection.cursor() as cursor:
- cursor.execute("SELECT thought_id, title FROM thoughts WHERE featured = 1;")
- return cursor.fetchall()
-
- def update_thought_markdown(self, id_, markdown):
- with self.__connection.cursor() as cursor:
- cursor.execute("UPDATE thoughts SET markdown_text = %s WHERE thought_id = %s;", (markdown, id_))
- self.__connection.commit()
-
- def get_categories_not(self, category_name):
- with self.__connection.cursor() as cursor:
- cursor.execute("SELECT category_name FROM categories WHERE category_name != %s;", (category_name, ))
- return [i[0] for i in cursor.fetchall()]
-
- def get_all_thoughts(self):
- with self.__connection.cursor() as cursor:
- cursor.execute("""
- SELECT thought_id, title, dt, category_name FROM thoughts
- INNER JOIN categories ON thoughts.category_id = categories.category_id;
- """)
- return cursor.fetchall()
-
- def get_cached_tweets(self, numToGet = None):
- with self.__connection.cursor() as cursor:
- sql = "SELECT tweet, tweet_id, account FROM diary WHERE account = %s ORDER BY tweeted_at DESC"
- args = (self.config.get("twitter", "main_account"), )
- if numToGet is not None:
- sql += " LIMIT %s;"
- args = (self.config.get("twitter", "main_account"), numToGet)
- else:
- sql += ";"
- cursor.execute(sql, args)
-
- return [(i[0], "https://%s/%s/status/%d" % (self.config.get("nitter", "outsideurl"), i[2], i[1])) for i in cursor.fetchall()]
-
- def get_cached_commits(self, since = None, recurse = True):
- with self.__connection.cursor() as cursor:
- if since is not None:
- cursor.execute("SELECT DISTINCT message, url, commitTime, additions, deletions, total FROM commitCache WHERE commitTime > %s ORDER BY commitTime DESC;", (since, ))
- else:
- cursor.execute("SELECT DISTINCT message, url, commitTime, additions, deletions, total FROM commitCache ORDER BY commitTime DESC;")
- # i think i might have spent too long doing functional programming
- return [{
- "repo": urlparse(i[1]).path.split("/")[2],
- "github_repo_url": "https://github.com" + "/".join(urlparse(i[1]).path.split("/")[:3]),
- "git_repo_url": "https://%s/%s.git/about" % (self.config.get("github", "personal_domain"), urlparse(i[1]).path.split("/")[2]),
- "message": i[0],
- "github_commit_url": i[1],
- "git_commit_url": "https://%s/%s.git/commit/?id=%s" % (
- self.config.get("github", "personal_domain"),
- urlparse(i[1]).path.split("/")[2],
- urlparse(i[1]).path.split("/")[-1]
- ),
- "datetime": i[2].timestamp(),
- "stats": {
- "additions": i[3],
- "deletions": i[4],
- "total": i[5]
- }
- } for i in cursor.fetchall()]
-
- def update_commit_cache(self, requested):
- with self.__connection.cursor() as cursor:
- for commit in requested:
- cursor.execute("SELECT DISTINCT url FROM commitCache;")
- urls = [i[0] for i in cursor.fetchall()]
-
- if commit["url"] not in urls:
- cursor.execute("""
- INSERT INTO commitCache (message, url, commitTime, additions, deletions, total)
- VALUES (%s, %s, %s, %s, %s, %s)""",
- (commit["message"], commit["url"], commit["datetime"], commit["stats"]["additions"], commit["stats"]["deletions"], commit["stats"]["total"])
- )
- self.__connection.commit()
-
- def get_last_commit_time(self):
- with self.__connection.cursor() as cursor:
- cursor.execute("SELECT MAX(commitTime) FROM commitCache;")
- return cursor.fetchone()[0]
-
- def get_my_twitter(self):
- with self.__connection.cursor() as cursor:
- cursor.execute("SELECT link FROM headerLinks WHERE name = 'twitter';")
- return cursor.fetchone()[0]
-
- def get_my_diary_twitter(self):
- return self.config.get("twitter", "diary_account")
-
- def get_iso_cd_options(self):
- iso_dir = self.config.get("cds", "location")
- return [
- i
- for i in os.listdir(iso_dir)
- if os.path.splitext(i)[-1].lower() in [".iso"]
- and os.path.getsize(os.path.join(iso_dir, i)) < self.config.getint("cds", "maxsize")
- ]
-
- def append_cd_orders(self, iso, email, house, street, city, county, postcode, name):
- with self.__connection.cursor() as cursor:
- cursor.execute("""
- INSERT INTO cd_orders_2 (iso, email, house, street, city, county, postcode, name)
- VALUES (%s, %s, %s, %s, %s, %s, %s, %s);
- """, (iso, email, house, street, city, county, postcode, name))
- id_ = cursor.lastrowid
- self.__connection.commit()
- return id_
-
- def append_qnas(self, qnas):
- with self.__connection.cursor() as cursor:
- for qna in qnas:
- cursor.execute("SELECT curiouscat_id FROM qnas WHERE curiouscat_id = %s;", (qna["id"], ))
- if cursor.fetchone() is None:
-
- cursor.execute("INSERT INTO `qnas` VALUES (%s, %s, %s, %s, %s, %s);", (
- qna["id"], qna["link"], qna["datetime"], qna["question"], qna["answer"], qna["host"]
- ))
- print("Appended question with timestamp %s" % qna["datetime"].isoformat())
-
- else:
- print("Skipped question with timestamp %s" % qna["datetime"].isoformat())
- self.__connection.commit()
-
- def get_oldest_qna(self):
- with self.__connection.cursor() as cursor:
- cursor.execute("SELECT MAX(timestamp) FROM qnas;")
- return cursor.fetchone()[0]
-
- def get_qnas(self):
- with self.__connection.cursor() as cursor:
- cursor.execute("SELECT * FROM qnas;")
- return sorted(cursor.fetchall(), key = operator.itemgetter(2), reverse = True)
-
-
-
-
-
+from urllib.parse import urlparse
+from dataclasses import dataclass
+from lxml import html
+import configparser
+import threading
+import services
+import operator
+import datetime
+import requests
+import twython
+import pymysql
+import random
+import os
+import re
+
+@dataclass
+class Database:
+ safeLogin:bool = True #automatically login with the user in the config file, who is read only
+ user:str = None #otherwise, login with the given username and passwd
+ passwd:str = None
+
+ def __enter__(self):
+ config_path = os.path.join(os.path.dirname(__file__), "..", "edaweb.conf")
+ if not os.path.exists(config_path):
+ raise FileNotFoundError("Could not find edaweb.conf config file")
+ self.config = configparser.ConfigParser(interpolation = None)
+ self.config.read(config_path)
+
+ if self.safeLogin:
+ self.__connection = pymysql.connect(
+ **self.config["mysql"],
+ charset = "utf8mb4"
+ )
+ else:
+ self.__connection = pymysql.connect(
+ user = self.user,
+ passwd = self.passwd,
+ host = self.config["mysql"]["host"],
+ db = self.config["mysql"]["db"],
+ charset = "utf8mb4"
+ )
+ return self
+
+ def __exit__(self, type, value, traceback):
+ self.__connection.commit()
+ self.__connection.close()
+
+ def get_header_links(self):
+ with self.__connection.cursor() as cursor:
+ cursor.execute("SELECT name, link FROM headerLinks WHERE display = true ORDER BY name;")
+ return cursor.fetchall()
+
+ def get_image(self, imageName):
+ with self.__connection.cursor() as cursor:
+ cursor.execute("SELECT alt, url FROM images WHERE imageName = %s;", (imageName, ))
+ return cursor.fetchone()
+
+ def get_pfp_images(self):
+ with self.__connection.cursor() as cursor:
+ cursor.execute("SELECT alt, url FROM images WHERE pfp_img = 1;")
+ return cursor.fetchall()
+
+ def get_sidebar_images(self):
+ with self.__connection.cursor() as cursor:
+ cursor.execute("SELECT alt, url FROM images WHERE sidebar_image = 1;")
+ return cursor.fetchall()
+
+ def get_header_articles(self):
+ with self.__connection.cursor() as cursor:
+ cursor.execute("SELECT articleName, link FROM headerArticles WHERE display = true;")
+ return cursor.fetchall()
+
+ def get_all_categories(self):
+ with self.__connection.cursor() as cursor:
+ cursor.execute("SELECT category_name FROM categories;")
+ return [i[0] for i in cursor.fetchall()]
+
+ def add_category(self, category):
+ if not category in self.get_all_categories():
+ with self.__connection.cursor() as cursor:
+ cursor.execute("INSERT INTO categories (category_name) VALUES (%s);", (category, ))
+
+ self.__connection.commit()
+ return True
+
+ return False
+
+ def add_thought(self, category, title, markdown):
+ with self.__connection.cursor() as cursor:
+ cursor.execute("""
+ INSERT INTO thoughts (category_id, title, markdown_text)
+ VALUES ((
+ SELECT category_id FROM categories WHERE category_name = %s
+ ), %s, %s);""", (category, title, markdown))
+ self.__connection.commit()
+
+ def get_thought(self, id_):
+ with self.__connection.cursor() as cursor:
+ cursor.execute("""
+ SELECT categories.category_name, thoughts.title, thoughts.dt, thoughts.markdown_text, thoughts.redirect
+ FROM thoughts INNER JOIN categories
+ ON thoughts.category_id = categories.category_id
+ WHERE thought_id = %s;""", (id_, ))
+ return cursor.fetchone()
+
+ def get_similar_thoughts(self, category, id_):
+ with self.__connection.cursor() as cursor:
+ cursor.execute("""
+ SELECT thought_id, title, dt, category_name FROM thoughts
+ INNER JOIN categories ON thoughts.category_id = categories.category_id
+ WHERE category_name = %s AND thought_id != %s;""",
+ (category, id_))
+ return cursor.fetchall()
+
+ def get_featured_thoughts(self):
+ with self.__connection.cursor() as cursor:
+ cursor.execute("SELECT thought_id, title FROM thoughts WHERE featured = 1;")
+ return cursor.fetchall()
+
+ def update_thought_markdown(self, id_, markdown):
+ with self.__connection.cursor() as cursor:
+ cursor.execute("UPDATE thoughts SET markdown_text = %s WHERE thought_id = %s;", (markdown, id_))
+ self.__connection.commit()
+
+ def get_categories_not(self, category_name):
+ with self.__connection.cursor() as cursor:
+ cursor.execute("SELECT category_name FROM categories WHERE category_name != %s;", (category_name, ))
+ return [i[0] for i in cursor.fetchall()]
+
+ def get_all_thoughts(self):
+ with self.__connection.cursor() as cursor:
+ cursor.execute("""
+ SELECT thought_id, title, dt, category_name FROM thoughts
+ INNER JOIN categories ON thoughts.category_id = categories.category_id;
+ """)
+ return cursor.fetchall()
+
+ def get_cached_tweets(self, numToGet = None):
+ with self.__connection.cursor() as cursor:
+ sql = "SELECT tweet, tweet_id, account FROM diary WHERE account = %s ORDER BY tweeted_at DESC"
+ args = (self.config.get("twitter", "main_account"), )
+ if numToGet is not None:
+ sql += " LIMIT %s;"
+ args = (self.config.get("twitter", "main_account"), numToGet)
+ else:
+ sql += ";"
+ cursor.execute(sql, args)
+
+ return [(i[0], "https://%s/%s/status/%d" % (self.config.get("nitter", "outsideurl"), i[2], i[1])) for i in cursor.fetchall()]
+
+ def get_cached_commits(self, since = None, recurse = True):
+ with self.__connection.cursor() as cursor:
+ if since is not None:
+ cursor.execute("SELECT DISTINCT message, url, commitTime, additions, deletions, total FROM commitCache WHERE commitTime > %s ORDER BY commitTime DESC;", (since, ))
+ else:
+ cursor.execute("SELECT DISTINCT message, url, commitTime, additions, deletions, total FROM commitCache ORDER BY commitTime DESC;")
+ # i think i might have spent too long doing functional programming
+ return [{
+ "repo": urlparse(i[1]).path.split("/")[2],
+ "github_repo_url": "https://github.com" + "/".join(urlparse(i[1]).path.split("/")[:3]),
+ "git_repo_url": "https://%s/%s.git/about" % (self.config.get("github", "personal_domain"), urlparse(i[1]).path.split("/")[2]),
+ "message": i[0],
+ "github_commit_url": i[1],
+ "git_commit_url": "https://%s/%s.git/commit/?id=%s" % (
+ self.config.get("github", "personal_domain"),
+ urlparse(i[1]).path.split("/")[2],
+ urlparse(i[1]).path.split("/")[-1]
+ ),
+ "datetime": i[2].timestamp(),
+ "stats": {
+ "additions": i[3],
+ "deletions": i[4],
+ "total": i[5]
+ }
+ } for i in cursor.fetchall()]
+
+ def update_commit_cache(self, requested):
+ with self.__connection.cursor() as cursor:
+ for commit in requested:
+ cursor.execute("SELECT DISTINCT url FROM commitCache;")
+ urls = [i[0] for i in cursor.fetchall()]
+
+ if commit["url"] not in urls:
+ cursor.execute("""
+ INSERT INTO commitCache (message, url, commitTime, additions, deletions, total)
+ VALUES (%s, %s, %s, %s, %s, %s)""",
+ (commit["message"], commit["url"], commit["datetime"], commit["stats"]["additions"], commit["stats"]["deletions"], commit["stats"]["total"])
+ )
+ self.__connection.commit()
+
+ def get_last_commit_time(self):
+ with self.__connection.cursor() as cursor:
+ cursor.execute("SELECT MAX(commitTime) FROM commitCache;")
+ return cursor.fetchone()[0]
+
+ def get_my_twitter(self):
+ with self.__connection.cursor() as cursor:
+ cursor.execute("SELECT link FROM headerLinks WHERE name = 'twitter';")
+ return cursor.fetchone()[0]
+
+ def get_my_diary_twitter(self):
+ return self.config.get("twitter", "diary_account")
+
+ def get_iso_cd_options(self):
+ iso_dir = self.config.get("cds", "location")
+ return [
+ i
+ for i in os.listdir(iso_dir)
+ if os.path.splitext(i)[-1].lower() in [".iso"]
+ and os.path.getsize(os.path.join(iso_dir, i)) < self.config.getint("cds", "maxsize")
+ ]
+
+ def append_cd_orders(self, iso, email, house, street, city, county, postcode, name):
+ with self.__connection.cursor() as cursor:
+ cursor.execute("""
+ INSERT INTO cd_orders_2 (iso, email, house, street, city, county, postcode, name)
+ VALUES (%s, %s, %s, %s, %s, %s, %s, %s);
+ """, (iso, email, house, street, city, county, postcode, name))
+ id_ = cursor.lastrowid
+ self.__connection.commit()
+ return id_
+
+ def append_qnas(self, qnas):
+ with self.__connection.cursor() as cursor:
+ for qna in qnas:
+ cursor.execute("SELECT curiouscat_id FROM qnas WHERE curiouscat_id = %s;", (qna["id"], ))
+ if cursor.fetchone() is None:
+
+ cursor.execute("INSERT INTO `qnas` VALUES (%s, %s, %s, %s, %s, %s);", (
+ qna["id"], qna["link"], qna["datetime"], qna["question"], qna["answer"], qna["host"]
+ ))
+ print("Appended question with timestamp %s" % qna["datetime"].isoformat())
+
+ else:
+ print("Skipped question with timestamp %s" % qna["datetime"].isoformat())
+ self.__connection.commit()
+
+ def get_oldest_qna(self):
+ with self.__connection.cursor() as cursor:
+ cursor.execute("SELECT MAX(timestamp) FROM qnas;")
+ return cursor.fetchone()[0]
+
+ def get_qnas(self):
+ with self.__connection.cursor() as cursor:
+ cursor.execute("SELECT * FROM qnas;")
+ return sorted(cursor.fetchall(), key = operator.itemgetter(2), reverse = True)
+
+
+
+
+
diff --git a/edaweb/services.py b/edaweb/services.py
index 50eed45..11f21fc 100644
--- a/edaweb/services.py
+++ b/edaweb/services.py
@@ -1,409 +1,409 @@
-from dataclasses import dataclass
-from io import StringIO
-from lxml import html, etree
-from github import Github
-import multiprocessing
-import paramiko.client
-from APiHole import PiHole
-import transmission_rpc
-import configparser
-import math as maths
-import requests
-import datetime
-import urllib
-import docker
-import random
-import subprocess
-import fabric
-import pickle
-import queue
-import json
-import time
-import os
-
-theLastId = 0
-config_path = os.path.join(os.path.dirname(__file__), "..", "edaweb.conf")
-if not os.path.exists(config_path):
- raise FileNotFoundError("Could not find edaweb.conf config file")
-CONFIG = configparser.ConfigParser(interpolation = None)
-CONFIG.read(config_path)
-
-def humanbytes(B):
- 'Return the given bytes as a human friendly KB, MB, GB, or TB string'
- B = float(B)
- KB = float(1024)
- MB = float(KB ** 2) # 1,048,576
- GB = float(KB ** 3) # 1,073,741,824
- TB = float(KB ** 4) # 1,099,511,627,776
-
- if B < KB:
- return '{0} {1}'.format(B,'Bytes' if 0 == B > 1 else 'Byte')
- elif KB <= B < MB:
- return '{0:.2f} KB'.format(B/KB)
- elif MB <= B < GB:
- return '{0:.2f} MB'.format(B/MB)
- elif GB <= B < TB:
- return '{0:.2f} GB'.format(B/GB)
- elif TB <= B:
- return '{0:.2f} TB'.format(B/TB)
-
-@dataclass
-class SafebooruImage:
- id_: int
- url: str
- searchTags: list
- tags: list
- source: str
- imurl: str
-
- def remove_tag(self, tag):
- return list(set(self.searchTags).difference(set([tag])))
-
-@dataclass
-class DownloadedImage:
- imurl: str
-
- def __enter__(self):
- self.filename = os.path.join("static", "images", "random.jpg")
-
- req = urllib.request.Request(self.imurl, headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_5_8) AppleWebKit/534.50.2 (KHTML, like Gecko) Version/5.0.6 Safari/533.22.3'})
- mediaContent = urllib.request.urlopen(req).read()
- with open(self.filename, "wb") as f:
- f.write(mediaContent)
- return self.filename
-
- def __exit__(self, type, value, traceback):
- os.remove(self.filename)
-
-def get_num_pages(tags):
- pages_url = "https://safebooru.org/index.php?page=post&s=list&tags=%s" % "+".join(tags)
- tree = html.fromstring(requests.get(pages_url).content)
- try:
- finalpage_element = tree.xpath("/html/body/div[6]/div/div[2]/div[2]/div/a[12]")[0]
- except IndexError:
- return 1
- else:
- return int(int(urllib.parse.parse_qs(finalpage_element.get("href"))["pid"][0]) / (5*8))
-
-def get_id_from_url(url):
- return int(urllib.parse.parse_qs(url)["id"][0])
-
-def get_random_image(tags):
- global theLastId
- searchPage = random.randint(1, get_num_pages(tags)) * 5 * 8
- url = "https://safebooru.org/index.php?page=post&s=list&tags=%s&pid=%i" % ("+".join(tags), searchPage)
- tree = html.fromstring(requests.get(url).content)
-
- imageElements = [e for e in tree.xpath("/html/body/div[6]/div/div[2]/div[1]")[0].iter(tag = "a")]
- try:
- element = random.choice(imageElements)
- except IndexError:
- # raise ConnectionError("Couldn't find any images")
- return get_random_image(tags)
-
- url = "https://safebooru.org/" + element.get("href")
- if get_id_from_url(url) == theLastId:
- return get_random_image(tags)
- theLastId = get_id_from_url(url)
-
- try:
- sbi = SafebooruImage(
- id_ = get_id_from_url(url),
- url = url,
- tags = element.find("img").get("alt").split(),
- searchTags = tags,
- source = fix_source_url(get_source(url)),
- imurl = get_imurl(url)
- )
- except (ConnectionError, KeyError) as e:
- print("[ERROR]", e)
- return get_random_image(tags)
-
- if link_deleted(sbi.url):
- print("Retried since the source was deleted...")
- return get_random_image(tags)
-
- return sbi
-
-def get_source(url):
- tree = html.fromstring(requests.get(url).content)
- for element in tree.xpath('//*[@id="stats"]')[0].iter("li"):
- if element.text.startswith("Source: h"):
- return element.text[8:]
- elif element.text.startswith("Source:"):
- for child in element.iter():
- if child.get("href") is not None:
- return child.get("href")
- raise ConnectionError("Couldn't find source image for id %i" % get_id_from_url(url))
-
-def fix_source_url(url):
- parsed = urllib.parse.urlparse(url)
- if parsed.netloc == "www.pixiv.net":
- return "https://www.pixiv.net/en/artworks/" + urllib.parse.parse_qs(parsed.query)["illust_id"][0]
- elif parsed.netloc in ["bishie.booru.org", "www.secchan.net"]:
- return ConnectionError("Couldn't get source")
- elif "pximg.net" in parsed.netloc or "pixiv.net" in parsed.netloc:
- return "https://www.pixiv.net/en/artworks/" + parsed.path.split("/")[-1][:8]
- elif parsed.netloc == "twitter.com":
- return url.replace("twitter.com", "nitter.eda.gay")
- return url
-
-def get_imurl(url):
- tree = html.fromstring(requests.get(url).content)
- return tree.xpath('//*[@id="image"]')[0].get("src")
-
-def link_deleted(url):
- text = requests.get(url).text
- return text[text.find("<title>") + 7 : text.find("</title>")] in ["Error | nitter", "イラストコミュニケーションサービス[pixiv]"]
-
-def request_recent_commits(since = datetime.datetime.now() - datetime.timedelta(days=7)):
- g = Github(CONFIG.get("github", "access_code"))
- out = []
- for repo in g.get_user().get_repos():
- # print(repo.name, list(repo.get_branches()))
- try:
- for commit in repo.get_commits(since = since):
- out.append({
- "repo": repo.name,
- "message": commit.commit.message,
- "url": commit.html_url,
- "datetime": commit.commit.author.date,
- "stats": {
- "additions": commit.stats.additions,
- "deletions": commit.stats.deletions,
- "total": commit.stats.total
- }
- })
- except Exception as e:
- print(repo, e)
-
- return sorted(out, key = lambda a: a["datetime"], reverse = True)
-
-def scrape_nitter(username, get_until:int):
- new_tweets = []
- nitter_url = CONFIG.get("nitter", "internalurl")
- nitter_port = CONFIG.getint("nitter", "internalport")
- scrape_new_pages = True
- url = "http://%s:%d/%s" % (nitter_url, nitter_port, username)
-
- while scrape_new_pages:
- tree = html.fromstring(requests.get(url).content)
- for i, tweetUrlElement in enumerate(tree.xpath('//*[@class="tweet-link"]'), 0):
- if i > 0 and tweetUrlElement.get("href").split("/")[1] == username:
- id_ = int(urllib.parse.urlparse(tweetUrlElement.get("href")).path.split("/")[-1])
- tweet_link = "http://%s:%d%s" % (nitter_url, nitter_port, tweetUrlElement.get("href"))
-
- if id_ == get_until:
- scrape_new_pages = False
- break
-
- try:
- dt, replying_to, text, images = parse_tweet(tweet_link)
- new_tweets.append((id_, dt, replying_to, text, username, images))
- print(dt, "'%s'" % text)
- except IndexError:
- print("Couldn't get any more tweets")
- scrape_new_pages = False
- break
- except ConnectionError:
- print("Rate limited, try again later")
- return []
-
-
- try:
- cursor = tree.xpath('//*[@class="show-more"]/a')[0].get("href")
- except IndexError:
- # no more elements
- break
- url = "http://%s:%d/%s%s" % (nitter_url, nitter_port, username, cursor)
-
- return new_tweets
-
-def parse_tweet(tweet_url):
- # print(tweet_url)
- tree = html.fromstring(requests.get(tweet_url).content)
- # with open("2images.html", "r") as f:
- # tree = html.fromstring(f.read())
-
- rate_limited_elem = tree.xpath("/html/body/div/div/div/span")
- if rate_limited_elem != []:
- if rate_limited_elem[0].text == "Instance has been rate limited.":
- raise ConnectionError("Instance has been rate limited.")
-
- main_tweet_elem = tree.xpath('//*[@class="main-tweet"]')[0]
-
- dt_str = main_tweet_elem.xpath('//*[@class="tweet-published"]')[0].text
- dt = datetime.datetime.strptime(dt_str.replace("Â", ""), "%b %d, %Y · %I:%M %p UTC")
- text = tree.xpath('//*[@class="main-tweet"]/div/div/div[2]')[0].text_content()
- if text == "":
- text = "[Image only]"
- replying_to_elems = tree.xpath('//*[@class="before-tweet thread-line"]/div/a')
- if replying_to_elems != []:
- replying_to = int(urllib.parse.urlparse(replying_to_elems[-1].get("href")).path.split("/")[-1])
- else:
- replying_to = None
-
- images = []
- images_elems = tree.xpath('//*[@class="main-tweet"]/div/div/div[3]/div/div/a/img')
- for image_elem in images_elems:
- images.append("https://" + CONFIG.get("nitter", "outsideurl") + urllib.parse.urlparse(image_elem.get("src")).path)
-
- return dt, replying_to, text, images
-
-def scrape_whispa(whispa_url, since = None):
- def query_answer(answer_url, max_retries = 10):
- for i in range(max_retries):
- try:
- return requests.get(answer_url)
- except requests.exceptions.ConnectionError:
- s = 5.05 * (i + 1)
- print("Connection timed out, retrying in %.2fs" % s)
- time.sleep(s)
- continue
-
- # add a bit of wiggle room in case i don't answer the questions in order (i often do this)
- if since is None:
- stop_at = datetime.datetime(year = 2001, month = 8, day = 12)
- else:
- stop_at = since - datetime.timedelta(days = 14)
- print("The newest Q&A timestamp in the database was %s, we will stop looking at %s." % (since.astimezone().isoformat(), stop_at.astimezone().isoformat()))
-
- html_ = requests.get(whispa_url).content.decode()
- # with open("temp.html", "w") as f:
- # f.write(html_)
-
- tree = html.fromstring(html_)
- qnas = []
- # we're not doing proper HTML scraping here really... since the site uses client side rendering
- # we rather parse the JS scripts to get the JSON payload of useful information... sadly this looks horrible
- for i, script in enumerate(tree.xpath("/html/body/script"), 0):
- js = str(script.text)
- if "receivedFeedback" in js:
- # my god this is horrible...
- parsed_json = json.loads(json.loads(js[19:-1])[1][2:])[0][3]["loadedUser"]["receivedFeedback"]
- # print(json.dumps(parsed_json, indent = 4))
- # with open("whispas_%i.json" % i, "w") as f:
- # json.dump(parsed_json, f, indent = 4)
- for j in parsed_json:
- if j["_count"]["childFeedback"] < 0:
- continue
-
- answer_url = "https://apiv4.whispa.sh/feedbacks/%s/children/public" % j["id"]
- req = query_answer(answer_url)
- try:
- firstanswer = req.json()["data"][0]
- except IndexError:
- continue
- dt = datetime.datetime.fromisoformat(firstanswer["createdAt"][:-1])
-
- qna = {
- # "id": int(j["id"], base = 16),
- "id": int(dt.timestamp()),
- "link": answer_url,
- "datetime": dt,
- "question": j["content"],
- "answer": firstanswer["content"],
- "host": "whispa.sh"
- }
- print(qna)
- qnas.append(qna)
- time.sleep(2.03)
- if dt <= stop_at:
- print("Met the threshold for oldest Q&A, so stopped looking.")
- break
- return qnas
-
-def get_docker_containers(host, ssh_key_path):
- result = fabric.Connection(
- host = host,
- user = "root",
- connect_kwargs = {
- "key_filename": ssh_key_path,
- "look_for_keys": False
- }
- ).run('docker ps -a -s --format "table {{.Names}};{{.Status}};{{.Image}}"', hide = True)
- return [line.split(";") for line in result.stdout.split("\n")[1:-1]]
-
-def cache_all_docker_containers(ssh_key_path):
- containers = {}
- containers["containers"] = {}
- for host, name in CONFIG["docker_hosts"].items():
- print(host)
- containers["containers"][(host, name)] = get_docker_containers(host, ssh_key_path)
-
- containers["cachetime"] = "Docker information last updated at %s" % str(datetime.datetime.now())
- with open("/tmp/docker-cache.json", "wb") as f:
- pickle.dump(containers, f)
-
-def get_all_docker_containers():
- if not os.path.exists("/tmp/docker-cache.json"):
- return {"containers": {}, "cachetime": "No cached docker information"}
-
- with open("/tmp/docker-cache.json", "rb") as f:
- return pickle.load(f)
-
-def timeout(func):
- # cant get this to work with queue.Queue() for some reason?
- # this works but Manager() uses an extra thread than Queue()
- manager = multiprocessing.Manager()
- returnVan = manager.list()
- # ti = time.time()
-
- def runFunc(q, func):
- q.append(func())
-
- def beginTimeout():
- t = multiprocessing.Process(target = runFunc, args = (returnVan, func))
- t.start()
-
- t.join(timeout = CONFIG["servicetimeout"].getint("seconds"))
-
- # print("Request took:", time.time() - ti)
- try:
- return returnVan[0]
- except IndexError:
- if t.is_alive():
- t.terminate()
-
- return beginTimeout
-
-@timeout
-def get_torrent_stats():
- client = transmission_rpc.client.Client(
- host = CONFIG.get("transmission", "host")
- )
- s = vars(client.session_stats())["fields"]
- return {
- "Active torrents:": s["activeTorrentCount"],
- "Downloaded:": humanbytes(s["cumulative-stats"]["downloadedBytes"]),
- "Uploaded:": humanbytes(s["cumulative-stats"]["uploadedBytes"]),
- "Active time:": str(datetime.timedelta(seconds = s["cumulative-stats"]["secondsActive"])),
- "Files added:": s["cumulative-stats"]["filesAdded"],
- "Current upload speed:": humanbytes(s["uploadSpeed"]) + "s/S",
- "Current download speed:": humanbytes(s["downloadSpeed"]) + "s/S"
- }
-
-@timeout
-def get_pihole_stats():
- return PiHole.GetSummary(CONFIG.get("pihole", "url"), CONFIG.get("pihole", "key"), True)
-
-def get_recent_commits(db, max_per_repo = 3):
- cache = db.get_cached_commits()
- num_per_repo = {}
- out = []
- for commit in cache:
- if commit["repo"] not in num_per_repo.keys():
- num_per_repo[commit["repo"]] = 0
-
- num_per_repo[commit["repo"]] += 1
- if num_per_repo[commit["repo"]] <= max_per_repo:
- out.append(commit)
-
- return sorted(out, key = lambda a: a["datetime"], reverse = True)
-
-if __name__ == "__main__":
- print(scrape_whispa(CONFIG.get("qnas", "url")))
- # import database
-
- # with database.Database() as db:
- # print(json.dumps(get_recent_commits(db), indent=4))
+from dataclasses import dataclass
+from io import StringIO
+from lxml import html, etree
+from github import Github
+import multiprocessing
+import paramiko.client
+from APiHole import PiHole
+import transmission_rpc
+import configparser
+import math as maths
+import requests
+import datetime
+import urllib
+import docker
+import random
+import subprocess
+import fabric
+import pickle
+import queue
+import json
+import time
+import os
+
+theLastId = 0
+config_path = os.path.join(os.path.dirname(__file__), "..", "edaweb.conf")
+if not os.path.exists(config_path):
+ raise FileNotFoundError("Could not find edaweb.conf config file")
+CONFIG = configparser.ConfigParser(interpolation = None)
+CONFIG.read(config_path)
+
+def humanbytes(B):
+ 'Return the given bytes as a human friendly KB, MB, GB, or TB string'
+ B = float(B)
+ KB = float(1024)
+ MB = float(KB ** 2) # 1,048,576
+ GB = float(KB ** 3) # 1,073,741,824
+ TB = float(KB ** 4) # 1,099,511,627,776
+
+ if B < KB:
+ return '{0} {1}'.format(B,'Bytes' if 0 == B > 1 else 'Byte')
+ elif KB <= B < MB:
+ return '{0:.2f} KB'.format(B/KB)
+ elif MB <= B < GB:
+ return '{0:.2f} MB'.format(B/MB)
+ elif GB <= B < TB:
+ return '{0:.2f} GB'.format(B/GB)
+ elif TB <= B:
+ return '{0:.2f} TB'.format(B/TB)
+
+@dataclass
+class SafebooruImage:
+ id_: int
+ url: str
+ searchTags: list
+ tags: list
+ source: str
+ imurl: str
+
+ def remove_tag(self, tag):
+ return list(set(self.searchTags).difference(set([tag])))
+
+@dataclass
+class DownloadedImage:
+ imurl: str
+
+ def __enter__(self):
+ self.filename = os.path.join("static", "images", "random.jpg")
+
+ req = urllib.request.Request(self.imurl, headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_5_8) AppleWebKit/534.50.2 (KHTML, like Gecko) Version/5.0.6 Safari/533.22.3'})
+ mediaContent = urllib.request.urlopen(req).read()
+ with open(self.filename, "wb") as f:
+ f.write(mediaContent)
+ return self.filename
+
+ def __exit__(self, type, value, traceback):
+ os.remove(self.filename)
+
+def get_num_pages(tags):
+ pages_url = "https://safebooru.org/index.php?page=post&s=list&tags=%s" % "+".join(tags)
+ tree = html.fromstring(requests.get(pages_url).content)
+ try:
+ finalpage_element = tree.xpath("/html/body/div[6]/div/div[2]/div[2]/div/a[12]")[0]
+ except IndexError:
+ return 1
+ else:
+ return int(int(urllib.parse.parse_qs(finalpage_element.get("href"))["pid"][0]) / (5*8))
+
+def get_id_from_url(url):
+ return int(urllib.parse.parse_qs(url)["id"][0])
+
+def get_random_image(tags):
+ global theLastId
+ searchPage = random.randint(1, get_num_pages(tags)) * 5 * 8
+ url = "https://safebooru.org/index.php?page=post&s=list&tags=%s&pid=%i" % ("+".join(tags), searchPage)
+ tree = html.fromstring(requests.get(url).content)
+
+ imageElements = [e for e in tree.xpath("/html/body/div[6]/div/div[2]/div[1]")[0].iter(tag = "a")]
+ try:
+ element = random.choice(imageElements)
+ except IndexError:
+ # raise ConnectionError("Couldn't find any images")
+ return get_random_image(tags)
+
+ url = "https://safebooru.org/" + element.get("href")
+ if get_id_from_url(url) == theLastId:
+ return get_random_image(tags)
+ theLastId = get_id_from_url(url)
+
+ try:
+ sbi = SafebooruImage(
+ id_ = get_id_from_url(url),
+ url = url,
+ tags = element.find("img").get("alt").split(),
+ searchTags = tags,
+ source = fix_source_url(get_source(url)),
+ imurl = get_imurl(url)
+ )
+ except (ConnectionError, KeyError) as e:
+ print("[ERROR]", e)
+ return get_random_image(tags)
+
+ if link_deleted(sbi.url):
+ print("Retried since the source was deleted...")
+ return get_random_image(tags)
+
+ return sbi
+
+def get_source(url):
+ tree = html.fromstring(requests.get(url).content)
+ for element in tree.xpath('//*[@id="stats"]')[0].iter("li"):
+ if element.text.startswith("Source: h"):
+ return element.text[8:]
+ elif element.text.startswith("Source:"):
+ for child in element.iter():
+ if child.get("href") is not None:
+ return child.get("href")
+ raise ConnectionError("Couldn't find source image for id %i" % get_id_from_url(url))
+
+def fix_source_url(url):
+ parsed = urllib.parse.urlparse(url)
+ if parsed.netloc == "www.pixiv.net":
+ return "https://www.pixiv.net/en/artworks/" + urllib.parse.parse_qs(parsed.query)["illust_id"][0]
+ elif parsed.netloc in ["bishie.booru.org", "www.secchan.net"]:
+ return ConnectionError("Couldn't get source")
+ elif "pximg.net" in parsed.netloc or "pixiv.net" in parsed.netloc:
+ return "https://www.pixiv.net/en/artworks/" + parsed.path.split("/")[-1][:8]
+ elif parsed.netloc == "twitter.com":
+ return url.replace("twitter.com", "nitter.eda.gay")
+ return url
+
+def get_imurl(url):
+ tree = html.fromstring(requests.get(url).content)
+ return tree.xpath('//*[@id="image"]')[0].get("src")
+
+def link_deleted(url):
+ text = requests.get(url).text
+ return text[text.find("<title>") + 7 : text.find("</title>")] in ["Error | nitter", "イラストコミュニケーションサービス[pixiv]"]
+
+def request_recent_commits(since = datetime.datetime.now() - datetime.timedelta(days=7)):
+ g = Github(CONFIG.get("github", "access_code"))
+ out = []
+ for repo in g.get_user().get_repos():
+ # print(repo.name, list(repo.get_branches()))
+ try:
+ for commit in repo.get_commits(since = since):
+ out.append({
+ "repo": repo.name,
+ "message": commit.commit.message,
+ "url": commit.html_url,
+ "datetime": commit.commit.author.date,
+ "stats": {
+ "additions": commit.stats.additions,
+ "deletions": commit.stats.deletions,
+ "total": commit.stats.total
+ }
+ })
+ except Exception as e:
+ print(repo, e)
+
+ return sorted(out, key = lambda a: a["datetime"], reverse = True)
+
+def scrape_nitter(username, get_until:int):
+ new_tweets = []
+ nitter_url = CONFIG.get("nitter", "internalurl")
+ nitter_port = CONFIG.getint("nitter", "internalport")
+ scrape_new_pages = True
+ url = "http://%s:%d/%s" % (nitter_url, nitter_port, username)
+
+ while scrape_new_pages:
+ tree = html.fromstring(requests.get(url).content)
+ for i, tweetUrlElement in enumerate(tree.xpath('//*[@class="tweet-link"]'), 0):
+ if i > 0 and tweetUrlElement.get("href").split("/")[1] == username:
+ id_ = int(urllib.parse.urlparse(tweetUrlElement.get("href")).path.split("/")[-1])
+ tweet_link = "http://%s:%d%s" % (nitter_url, nitter_port, tweetUrlElement.get("href"))
+
+ if id_ == get_until:
+ scrape_new_pages = False
+ break
+
+ try:
+ dt, replying_to, text, images = parse_tweet(tweet_link)
+ new_tweets.append((id_, dt, replying_to, text, username, images))
+ print(dt, "'%s'" % text)
+ except IndexError:
+ print("Couldn't get any more tweets")
+ scrape_new_pages = False
+ break
+ except ConnectionError:
+ print("Rate limited, try again later")
+ return []
+
+
+ try:
+ cursor = tree.xpath('//*[@class="show-more"]/a')[0].get("href")
+ except IndexError:
+ # no more elements
+ break
+ url = "http://%s:%d/%s%s" % (nitter_url, nitter_port, username, cursor)
+
+ return new_tweets
+
+def parse_tweet(tweet_url):
+ # print(tweet_url)
+ tree = html.fromstring(requests.get(tweet_url).content)
+ # with open("2images.html", "r") as f:
+ # tree = html.fromstring(f.read())
+
+ rate_limited_elem = tree.xpath("/html/body/div/div/div/span")
+ if rate_limited_elem != []:
+ if rate_limited_elem[0].text == "Instance has been rate limited.":
+ raise ConnectionError("Instance has been rate limited.")
+
+ main_tweet_elem = tree.xpath('//*[@class="main-tweet"]')[0]
+
+ dt_str = main_tweet_elem.xpath('//*[@class="tweet-published"]')[0].text
+ dt = datetime.datetime.strptime(dt_str.replace("Â", ""), "%b %d, %Y · %I:%M %p UTC")
+ text = tree.xpath('//*[@class="main-tweet"]/div/div/div[2]')[0].text_content()
+ if text == "":
+ text = "[Image only]"
+ replying_to_elems = tree.xpath('//*[@class="before-tweet thread-line"]/div/a')
+ if replying_to_elems != []:
+ replying_to = int(urllib.parse.urlparse(replying_to_elems[-1].get("href")).path.split("/")[-1])
+ else:
+ replying_to = None
+
+ images = []
+ images_elems = tree.xpath('//*[@class="main-tweet"]/div/div/div[3]/div/div/a/img')
+ for image_elem in images_elems:
+ images.append("https://" + CONFIG.get("nitter", "outsideurl") + urllib.parse.urlparse(image_elem.get("src")).path)
+
+ return dt, replying_to, text, images
+
+def scrape_whispa(whispa_url, since = None):
+ def query_answer(answer_url, max_retries = 10):
+ for i in range(max_retries):
+ try:
+ return requests.get(answer_url)
+ except requests.exceptions.ConnectionError:
+ s = 5.05 * (i + 1)
+ print("Connection timed out, retrying in %.2fs" % s)
+ time.sleep(s)
+ continue
+
+ # add a bit of wiggle room in case i don't answer the questions in order (i often do this)
+ if since is None:
+ stop_at = datetime.datetime(year = 2001, month = 8, day = 12)
+ else:
+ stop_at = since - datetime.timedelta(days = 14)
+ print("The newest Q&A timestamp in the database was %s, we will stop looking at %s." % (since.astimezone().isoformat(), stop_at.astimezone().isoformat()))
+
+ html_ = requests.get(whispa_url).content.decode()
+ # with open("temp.html", "w") as f:
+ # f.write(html_)
+
+ tree = html.fromstring(html_)
+ qnas = []
+ # we're not doing proper HTML scraping here really... since the site uses client side rendering
+ # we rather parse the JS scripts to get the JSON payload of useful information... sadly this looks horrible
+ for i, script in enumerate(tree.xpath("/html/body/script"), 0):
+ js = str(script.text)
+ if "receivedFeedback" in js:
+ # my god this is horrible...
+ parsed_json = json.loads(json.loads(js[19:-1])[1][2:])[0][3]["loadedUser"]["receivedFeedback"]
+ # print(json.dumps(parsed_json, indent = 4))
+ # with open("whispas_%i.json" % i, "w") as f:
+ # json.dump(parsed_json, f, indent = 4)
+ for j in parsed_json:
+ if j["_count"]["childFeedback"] < 0:
+ continue
+
+ answer_url = "https://apiv4.whispa.sh/feedbacks/%s/children/public" % j["id"]
+ req = query_answer(answer_url)
+ try:
+ firstanswer = req.json()["data"][0]
+ except IndexError:
+ continue
+ dt = datetime.datetime.fromisoformat(firstanswer["createdAt"][:-1])
+
+ qna = {
+ # "id": int(j["id"], base = 16),
+ "id": int(dt.timestamp()),
+ "link": answer_url,
+ "datetime": dt,
+ "question": j["content"],
+ "answer": firstanswer["content"],
+ "host": "whispa.sh"
+ }
+ print(qna)
+ qnas.append(qna)
+ time.sleep(2.03)
+ if dt <= stop_at:
+ print("Met the threshold for oldest Q&A, so stopped looking.")
+ break
+ return qnas
+
+def get_docker_containers(host, ssh_key_path):
+ result = fabric.Connection(
+ host = host,
+ user = "root",
+ connect_kwargs = {
+ "key_filename": ssh_key_path,
+ "look_for_keys": False
+ }
+ ).run('docker ps -a -s --format "table {{.Names}};{{.Status}};{{.Image}}"', hide = True)
+ return [line.split(";") for line in result.stdout.split("\n")[1:-1]]
+
+def cache_all_docker_containers(ssh_key_path):
+ containers = {}
+ containers["containers"] = {}
+ for host, name in CONFIG["docker_hosts"].items():
+ print(host)
+ containers["containers"][(host, name)] = get_docker_containers(host, ssh_key_path)
+
+ containers["cachetime"] = "Docker information last updated at %s" % str(datetime.datetime.now())
+ with open("/tmp/docker-cache.json", "wb") as f:
+ pickle.dump(containers, f)
+
+def get_all_docker_containers():
+ if not os.path.exists("/tmp/docker-cache.json"):
+ return {"containers": {}, "cachetime": "No cached docker information"}
+
+ with open("/tmp/docker-cache.json", "rb") as f:
+ return pickle.load(f)
+
+def timeout(func):
+ # cant get this to work with queue.Queue() for some reason?
+ # this works but Manager() uses an extra thread than Queue()
+ manager = multiprocessing.Manager()
+ returnVan = manager.list()
+ # ti = time.time()
+
+ def runFunc(q, func):
+ q.append(func())
+
+ def beginTimeout():
+ t = multiprocessing.Process(target = runFunc, args = (returnVan, func))
+ t.start()
+
+ t.join(timeout = CONFIG["servicetimeout"].getint("seconds"))
+
+ # print("Request took:", time.time() - ti)
+ try:
+ return returnVan[0]
+ except IndexError:
+ if t.is_alive():
+ t.terminate()
+
+ return beginTimeout
+
+@timeout
+def get_torrent_stats():
+ client = transmission_rpc.client.Client(
+ host = CONFIG.get("transmission", "host")
+ )
+ s = vars(client.session_stats())["fields"]
+ return {
+ "Active torrents:": s["activeTorrentCount"],
+ "Downloaded:": humanbytes(s["cumulative-stats"]["downloadedBytes"]),
+ "Uploaded:": humanbytes(s["cumulative-stats"]["uploadedBytes"]),
+ "Active time:": str(datetime.timedelta(seconds = s["cumulative-stats"]["secondsActive"])),
+ "Files added:": s["cumulative-stats"]["filesAdded"],
+ "Current upload speed:": humanbytes(s["uploadSpeed"]) + "s/S",
+ "Current download speed:": humanbytes(s["downloadSpeed"]) + "s/S"
+ }
+
+@timeout
+def get_pihole_stats():
+ return PiHole.GetSummary(CONFIG.get("pihole", "url"), CONFIG.get("pihole", "key"), True)
+
+def get_recent_commits(db, max_per_repo = 3):
+ cache = db.get_cached_commits()
+ num_per_repo = {}
+ out = []
+ for commit in cache:
+ if commit["repo"] not in num_per_repo.keys():
+ num_per_repo[commit["repo"]] = 0
+
+ num_per_repo[commit["repo"]] += 1
+ if num_per_repo[commit["repo"]] <= max_per_repo:
+ out.append(commit)
+
+ return sorted(out, key = lambda a: a["datetime"], reverse = True)
+
+if __name__ == "__main__":
+ print(scrape_whispa(CONFIG.get("qnas", "url")))
+ # import database
+
+ # with database.Database() as db:
+ # print(json.dumps(get_recent_commits(db), indent=4))
diff --git a/edaweb/static/cow.txt b/edaweb/static/cow.txt
new file mode 100644
index 0000000..41aa536
--- /dev/null
+++ b/edaweb/static/cow.txt
@@ -0,0 +1,58 @@
+
+
+
+
+
+
+ ..........................
+ ....*o|||||||8#@@@@@@@@@@@@@@@@@@@@@@@###&|o:_..
+ ..*:o|||&8##@###8888888######@#@###########################|*...
+ .:o|||8#####8888|:::**. *&########################@@################&o_
+ .*o&8###@#8&o*_. :###@##############@########################@@##&o_
+ .*o8########& :##@#@##############@############################@###|_
+ .*o|8##########8o .#######################################################&o_
+ *&##|_ ..*&##8&o*|88888|_ _#######################################@##################|.
+ *#####& *&######&o_..*o|o:_ .&##o _###########################################################&_
+ _##8*##8 .|88|:::|#######8###8|*:_ .&#@@8 _##@@@########################################################&_
+ _#@8_##8_ *8#8|*_ _:|#####&&####8 .&##############################################################|
+ _#@8.|##8_ _::o###8&##8 .|##@############################8###########################@@#|_
+ *###o.|88o ..*&####|..##& _|##########################8|_ .|#############################8
+ *|###|_ ._&####8|*_ _*_ _::&8888888888888888|::*_ .|##@####@@@##################|
+ *&###|_ _:_ .&88###8|*_ ..... .|#####@@@##################8
+ .##@#& _##& .|##o _#@@#@#| .|#######&:_ _|###@####################8
+ .:8##8o _o:*&##| *##8_.&@@##@#| _::o8#8|::|#####|_ _|#################88###8
+ .&##&*_ *###o_###| .|##8*&##|*###o _###8####8|_ _:|###|_ .*o|||o:_ _:::&8888888888|_ _##8
+ .###|_. _###o *###|*&#######8 *##8 .##8_ _:|###|_ _|###|_ .&########o _##&
+ _|####8|&##8:_ _|#########88o .##8 *##& _|###o .|###o .#########| .o##o
+ o#8|*:#@@###o _:::*__*_ _##8_ _##8_ _&##|_ *##8_ *8#####8|_ .*oo:_ o##|
+ *###o.&#####& _oo* .8##& .8##8_ .|##& o##& _::::_.*o|8######|_ .##8.
+ _###&o&##8_:*_ .###& .###|_&#####| _##8 :###o *ooo&#########@#@#& ....:##&
+ .|8||###&. _**_ .###88##|*&###|*._&##& *|##8_ *o&####@@####@@######& .*o||||||&#######8_
+ *&###o _|88##8_ _:8######|*:###|_ _##################88|_ *&#################&
+ *#####o *&8o *##& _:::*_.&##|_ _#@##############8_ :##################8*
+ .###&##8_.|88o *&8o _@@& .###| .&####@#########|_ .####@###@@########8*
+ _##&.|###|_.... .|88o _##8* *&###|_ *###o _:&88######8|_ .*o|||##################o
+ _##8_ _|########|_ .*o8####&#@@#@##o *#@8 _*:*. .&###@###################|
+ .|##& _:::::&##& .*&##############@#8_ .###o _#@####################|_
+ .&##|_ .&##8_ *o&####################8_ *##& .&#####@@############8*
+ .|##8_.&###&####8_ _########################8**##& _##################|_
+ .&###&##888888##8_ .|88######@###########|*######o _|8###############|_
+ .&#####o *###|_ _::::::*o##8**o##8 .|###8o .&##@#############&*
+ .|####o _|###|_ _##8.*&##& _*_ .#################|
+ _*_ _|###|_.. .|#####8|_ *&#@#########8###&
+ *#######&|o:_... ..*:::*. ......._:o&8####888&o:#####8_
+ _###|&888#####@#####&|||o:_........................._:o||||8##@@@@####8|:*_ _:::*_
+ .|#@###o _:::o#@#888######@@@@@@@@@@@@@@@@@@@@@@@@#####888|::::::**_
+ _::*_ :##& *&8|_:::::::::::::::::::::::::**_
+ .###8||&##8o
+ _|888888|_
+
+
+
+
+
+
+
+
+
+
diff --git a/edaweb/static/index.md b/edaweb/static/index.md
index 0d45f12..6259ba6 100644
--- a/edaweb/static/index.md
+++ b/edaweb/static/index.md
@@ -1,34 +1,34 @@
-site now also avaliable under the domain [boymoder.blog](https://boymoder.blog)!
-
-![yaoi](/img/shun-hashimoto-mio-chibana.gif)
-
-## haiiiiiii
-my name is eden and im a 23yo (boymoder/[fujoshi](https://www.urbandictionary.com/define.php?term=fujoshi)) computer science/robotics PhD student. i made my own website to encourage others to do so too.
-i'll post my thoughts on here sometimes, and use this site to link to other stuff i host [more about me](/thought?id=2).
-
-## FOSS alternative services
-
-- [nextcloud - dropbox (+ much more!) alternative](https://nc.eda.gay)
-- [git server - github alternative](https://git.eda.gay/)
-- [jellyfin - web player for ~~legally downloaded~~ TV and films](https://jellyfin.eda.gay) - RIP emby!
-
-[see the services im running right now](/services) (takes a couple seconds to load)
-
-these sites are hosted on my [homelab system](https://wiki.eda.gay)
-
-![startech 8u rack cropped](/img/GcyexeCW0AAYssz.jpg?w=300&h=5000)
-![startech 12u rack cropped](/img/Startech.jpg?h=250&w=5000)
-
-## nice websites
-- [wiby.me](http://wiby.me/) - search engine for old style websites with limited javascript (my site used to be on here but it got blacklisted for some reason?)
-- [dysmorph.nekoweb.org](https://dysmorph.nekoweb.org/) - a site that is very based because it looks similar
-- [transsexual.org](https://web.archive.org/web/20010802032136/http://transsexual.org/Toon.html) - awesome and relatable transsexual comics from a website that's slightly older than me
-- [norfolkchurches.co.uk](http://www.norfolkchurches.co.uk/norwichintro.htm) - site about all the churches in norwich (and norfolk!), the city that has far too many medieval churches than it knows what to do with. this site is preciesly what the internet should be, the muted ramblings of someone with an expert knowledge on his preferred niche interest. without any javascript. nice if, like me, you have a middling interest in theology
-- [boymoder.network](https://boymoder.network/) - website for boymoder awareness
-- [4chan.org/lgbt/](https://boards.4channel.org/lgbt/) - but dont blame me if u catch brainworms
-- [https://www.math.uni-bielefeld.de/~sillke/Twister/fun/elevator-fun90.html](https://www.math.uni-bielefeld.de/~sillke/Twister/fun/elevator-fun90.html) any website with a URL like this is gonna be good
-- [boymoder.moe](https://nyaomidev.github.io/boymoder.moe/)
-- [boymoders.com](https://boymoders.com)
-- [john.citrons.xyz](https://john.citrons.xyz/) - for the nice 'ads' featured at the bottom of my page
-
-
+site now also avaliable under the domain [boymoder.blog](https://boymoder.blog)!
+
+![yaoi](/img/shun-hashimoto-mio-chibana.gif)
+
+## haiiiiiii
+my name is eden and im a 23yo (boymoder/[fujoshi](https://www.urbandictionary.com/define.php?term=fujoshi)) computer science/robotics PhD student. i made my own website to encourage others to do so too.
+i'll post my thoughts on here sometimes, and use this site to link to other stuff i host [more about me](/thought?id=2).
+
+## FOSS alternative services
+
+- [nextcloud - dropbox (+ much more!) alternative](https://nc.eda.gay)
+- [git server - github alternative](https://git.eda.gay/)
+- [jellyfin - web player for ~~legally downloaded~~ TV and films](https://jellyfin.eda.gay) - RIP emby!
+
+[see the services im running right now](/services) (takes a couple seconds to load)
+
+these sites are hosted on my [homelab system](https://wiki.eda.gay)
+
+![startech 8u rack cropped](/img/GcyexeCW0AAYssz.jpg?w=300&h=5000)
+![startech 12u rack cropped](/img/Startech.jpg?h=250&w=5000)
+
+## nice websites
+- [wiby.me](http://wiby.me/) - search engine for old style websites with limited javascript (my site used to be on here but it got blacklisted for some reason?)
+- [dysmorph.nekoweb.org](https://dysmorph.nekoweb.org/) - a site that is very based because it looks similar
+- [transsexual.org](https://web.archive.org/web/20010802032136/http://transsexual.org/Toon.html) - awesome and relatable transsexual comics from a website that's slightly older than me
+- [norfolkchurches.co.uk](http://www.norfolkchurches.co.uk/norwichintro.htm) - site about all the churches in norwich (and norfolk!), the city that has far too many medieval churches than it knows what to do with. this site is preciesly what the internet should be, the muted ramblings of someone with an expert knowledge on his preferred niche interest. without any javascript. nice if, like me, you have a middling interest in theology
+- [boymoder.network](https://boymoder.network/) - website for boymoder awareness
+- [4chan.org/lgbt/](https://boards.4channel.org/lgbt/) - but dont blame me if u catch brainworms
+- [https://www.math.uni-bielefeld.de/~sillke/Twister/fun/elevator-fun90.html](https://www.math.uni-bielefeld.de/~sillke/Twister/fun/elevator-fun90.html) any website with a URL like this is gonna be good
+- [boymoder.moe](https://nyaomidev.github.io/boymoder.moe/)
+- [boymoders.com](https://boymoders.com)
+- [john.citrons.xyz](https://john.citrons.xyz/) - for the nice 'ads' featured at the bottom of my page
+
+
diff --git a/ffs.md b/ffs.md
deleted file mode 100644
index 9710cd1..0000000
--- a/ffs.md
+++ /dev/null
@@ -1,108 +0,0 @@
-![lobotomy](/img/photo_2025-12-04_22-34-24.jpg)
-
-Quite a lot of people have asked me questions about my FFS experience, so I don't have to repeat myself,
-I write this detailed post about how it went for me, the questions to ask, and the recovery process etc.
-
-## Selecting your surgeon and what procedures to get
-
-For me, I was raised on transgender imageboards where surgeons are openly discussed, where people know
-every single FFS surgeon in the world, call them by their first name, and have opinions on every single
-one of them. It's almost funny that there is this group of people, who are extremely famous to this tiny
-niche audience. But for those of us who are unfamiliar with surgeons, and who don't know what to look for
-in their results, this section describes your options.
-
-I consider there to be three categories of surgeons:
-
-1. Big-name American surgeons who also do midface (maxillofacial/double jaw surgery work)
-2. European surgeons, and local (state-level/no-name) American surgeons
-3. South American surgeons
-
-With price reducing proportionally for each category. American surgeons are generally significantly more
-expensive, since the assumption is that they're being paid for by insurance. The top level surgeons
-could cost you $80-100k, and are thus basically out of reach unless you have an America-tier salary or an
-American insurance plan who agrees to pay for it.
-
-If you're American, you can probably afford to shop round, and choose your surgeon in accordance to the results
-you like, but the rest of us (me) had to dictate their choice by budget. I knew my only real choice was to
-go to a European surgeon, but my personal choices are discussed later.
-
-How do you decide if a surgeon's results are good? I think most of us have an implicit, subconcious, vibes-based impression of facial sexual dimorphism, but if you're like me and spent too much time on /lgbt/, we can get more specific on the features that define how we are gendered.
-
-![skullzie](/img/1583581996540.jpg?h=400&w=5000)
-
-I wish I could use a real human face instead of a skull, but I feel weird about using someone else's picture,
-so for now a skull will do. In this picture above, we basically can't help A and E, unless we find some way of
-shrinking the brain, and C doesn't matter imo, but FFS surgery aims to fix the bone-based deviations of the others:
-
-- Orbitoplasty fixes B, by shaving down the orbital bones, which really help bring out the eyes, it is normally done in conjunciton with a frontoplasty
-- V-line genioplasty (right) and mandible angle contouring (left) fixes D, and is imo one of the most important procedures. A genioplasty shifts round bones and re-attaches them with metal plates, other times if chin projection doesn't need to be changed, surgeons simply just remove and cut away mandible bone, thus making a 'V' shape instead of a 'U' shape. The incision is made inside the mouth, at the gumline and thus their is no visible scar.
-
-![mandible angle contouring](/img/2684330373.jpg?h=400&w=400) ![v line (narrowing) genioplasty](/img/2209509307.jpg?h=400&w=400)
-
-- Frontoplasty reduces the browbone, which is especially obvious in profile. This can either consist of just shaving down the bone, or can be *Type-3 forehead construction* for cases in which there is a cavity/sinus in the browbone, in this case the bone above the sinus is removed completely, shaved down, and then reinserted and set with metalwork. The way this is achieved is rather scary- the incision is made at the hairline (some surgeons also do a "coronal" incision, in which the incision is made at the top of the head), and the skin is pulled almost as down as the eyes to expose the browbone. See the below pictures.
-
-![spoopy](/img/20251111_102045.jpg)
-
-- Not discussed by the top image, but important nonetheless is *midface*, by which I mean overall face length. Some tgirls can look uncanny if they get other aspects of their face fixed, but leave the midface untouched. It can be fixed with orthadontic techniques, like *Le Fort* by taking out bits of bone above the top jaw, but only a few American FFS surgeons do this, because it is usually considered a separate medical dicipline to the other stuff.
-
-![bdd fuel](/img/bdd_fuel.png?h=300&w=5000)
-
-So so far we've talked about bone differences, but what about soft tissue differences? What procedures are avaliable then? (I really wish I could use pictures to demonstrate here)
-
-- Forehead shape: The female hairline is different to the male hairline, the latter is more round, and less square. This is the case even if you don't have hair loss. You can essentially get this fixed for free if you're having forehead work done anyway, the surgeon just stitches your hairline back in a different shape.
-- Nose: despite what some wokescolds on twitter would tell you, yes, human noses are sexually dimorphic, and getting a rhinoplasty is not enforcing "western european beauty standards", especially if you happen to be a western european. The main sexual difference is nasal projection and the shape of the tip, the female nose is tipped up more at the end. If you're getting a rhinoplasty at the same time anyway, you might as well get other aesthetic improvements, such as dorsal hump reduction, width reduction, and nostril size reduction. American surgeons love to do this beverly-hills look, I call it a "ski slope nose", in which the nose is made completely concave, but unless you're lucky to have a tiny round face with zero midface, I think a simple straight nose is going to suit you better. European surgeons are better about this imo.
-- Cheeks: A rarer procedure that some people get is implants or fat grafting into the cheeks to make them look fuller. Perhaps this could help with midface, but this is just a theory of mine, the aesthetic improvements are besides this.
-- Blepharoplasty: Reduction of excess skin above the eyes. This is an age related surgery that you don't need to get done if you're young, but some people get it done at the same time as FFS.
-- Lip lift: An underrated dimorphic feature is the size of the lips, the top lift especially. Males have a bigger philtrum length (the space between the bottom of the nose and the top of the lip) and a lip lift can improve this, and make the lips look fuller. Lip injections can also help with this, but in this aspect you should be conservative imo unless you want to look like a bimbo. Just be tasteful with it. Things to look out for here are that the shape of the top lip is preserved, a nice "bow" shape.
-- Tracheal shave: Males obviously have a bigger Adam's apple. Some people are scared it affects voice and are nervous to get it done, especially after FFS, but personally my surgeon advised me that it deals with tissue far away from the tissue that affects voice and was extremely confident my voice would be unaffected. Nonetheless, it is under the area in which an ENT surgeon would be its speciality, thus FFS surgeons may not be as familiar with this region. I would advise that if you're getting VFS from an ENT surgeon anyway, you get a trach shave done by them instead. My VFS surgeon did offer this but I wasn't aware to ask for it at the time.
-
-Now we know what to look for in a surgeon's results pictures, we can better guage the surgeon we want to pick. But still, it is common to have consultations with multiple surgeons so you can decide which one you want to choose that way.
-
-## My FFS journey
-
-![the results from my nose and forehead](/img/faceoff.jpg?h=400&w=5000)
-
-*I am extremely pleased with the results from my forehead and nose. At the time of writing it is too soon to see the results from the jaw work.*
-
-This section discusses my personal experiences, which might be a good way of knowing what to expect during the recovery process. My experiences remain personal, and your recovery could well be different.
-
-I had wanted to get FFS for years. However, I had always considered it out of reach- I'd even planned my life around moving to America to get it with insurance. After I gave up on trying to get an American work visa, I instead started to look at cheaper european surgeons. Whilst most people shop around with lots of surgeons, my choice was dictated by my budget, so in some ways my choice was easier. I basically had to pick a French surgeon, either Qassemyar or Lachiver. For a long time the most popular european choice was Facialteam in Spain, and once upon a time they were considered a budget option, but nowadays they're not even much cheaper than American surgeons. Moreover, they are often considered overly conservative. Picking a French surgeon was a bit of a strange choice at the time; even though French surgeons are increasing in popularity now, francophone surgeons hadn't had much discussion in english-speaking spaces. I knew people who had been to both surgeons and had excellent results, but I chose Qassemyar over Lachiver because in the before and after pictures on his website, the girls in the after pictures were wearing heavy makeup, which rubbed me the wrong way a little, whereas Qassemyar's instagram pictures were simply taken on the operating table with zero anglefrauding. I also thought Lachiver's results were a bit more inconsistant, although that could have been the patients asking for fewer procedures or for the surgeon to be conservative.
-
-I procrastinated for a long time before trying to book a consultation. I was a bit nervous because I thought they might have found it rude for a random person speaking english to speak to them out of the blue. Nonetheless, I first messged them on instagram, they sent me an e-mail address, and then a Whatsapp number to message, of his assistant who was going to organize everything. She asked me if I had a recent CT scan, which she said wasn't a problem when I said I hadn't, and asked me to take some pictures of my face. Taking these pictures was really traumatic- I basically had to take non-anglefrauded pictures of myself for the first time in years, specifically highlighting my worst features that I normally try to keep hidden. It made me extremely dysphoric. In the end, I had an online consulation booked for the end of January. The time from first message to consultation was less than two weeks. I now know that nowadays this process takes multiple months because of the increased popularity of the surgeon. When the time for the consultation came, I was pretty nervous, but I knew what I wanted to say. It made me laugh a little that he was two and a half hours late, but I suppose it shows that he takes care of his patients that he's happy to discuss in detail with them for that long. My own consultation was pretty short, it surprised me how fast he started talking about my face. In my previous surgery, my VFS, the surgeon asked me a bunch of canned questions about if I was dysphoric about my voice etc., how long I'd been a tranny for etc., I guess to guage if I was a real tranny or not. In this case however, the only question in this regard he asked me was if I'd been on hormones for over two years, which I replied in the affirmation. I suppose this question could also have been interpreted as being about facial changes from hormones. Nonetheless, neither of the surgeons tried to gatekeep me or asked for letters from psychiatrists etc. He asked me about my facial features I disliked, I talked about my jawline, my adam's apple, and the size of my forehead despite having no hair loss. In this time he nodded undersandingly. I said I wasn't sure if I needed brow work or a rhinoplasty, and asked him for his opinion if I needed work there or not. FFS consultations are pretty funny when you think about it. You basically send pictures of yourself to some guy, and he makes eye contact with you and politely describes everything wrong with your face. I felt validated that he agreed with me about what was wrong with my face, and described how he thought brow work and rhinoplasty would be good for me. At the time, I'd hardly even thought about my nose, since I'd been most brainwormed about my jawline instead. I consider that the consultation is sorta a balancing act between hiding your power level that you know all of the techical terms and don't appear BDD, and appearing to be not sufficiently dysphoric and leading the surgeon to not take you seriously. I think in this consultation I was too far on the latter of this scale- my consulation was only 20-30 minutes long where I think most are longer. How was it possible that something I hate this much, I could only talk about for that short amount of time? It rubbed me the wrong way a little bit that we didn't discuss surgery risks etc. in the first consultation like I did with my VFS, I put this down to differences in the English and French medical systems.
-
-I recieved a quote for the surgery soon after, containing a list of the procedures he'd recommend and an according price. I signed it and sent it off, and paid my deposit 2 days later. I felt a bit strange that I only had a consultation with one surgeon, but as I said earlier, my choices were rather limited and I'd probably choose him anyway. The assistant asked me if I had a date I wanted for the surgery, I said any time after mid-May. They replied to me with a date at the start of November, 11 months after my initial consultation. I was pretty upset with how long I had to wait for. Perhaps it was because I said I wanted a date after mid-May, but I think waiting times for Qassemyar are entirely arbitrary and random. People who had consultations *after* me got surgery dates in July. I probably could have gotten an earlier date if I asked for one, but I was too polite to do so. Waiting for the surgery was pretty hard. Of course I'd wanted to get FFS for years, but with a specific date to it now, I felt like I'd never wanted anything as much in my life before. I had an app in my phone that counted down the number of days, and looked at it multiple times a day for the 250+ days. I felt like such a loser. In the upcoming months, I paid off the rest of the surgery plenty early, since I had no reason to do otherwise. As early as I could, I also booked tickets on the eurostar to Paris. I knew I was going to get the Eurostar over an aeroplane, since it worked out the same price with baggage since I didn't want to deal with airport security whilst in a post-FFS recovery state. A few months before the surgery, my friend and her bf (who I am also friends with) asked if they could join me in Paris, I of course agreed, since the more people taking care of me the better. Qassemyar patients often use a 'villa' for their recovery, but with these additional people joining me, I instead opted to just use an AirBNB instead, which actually worked out cheaper anyway. My surgery date was on Saturday, with a CT scan and pre-op appointment the day before on Friday, and a post-op appointment on the Friday next week. So I booked the eurostar tickets accordingly, traveling from Norwich to London and London to Paris on Thursday, and back a week later on the Friday after my post-op. I found it funny that the price of the train ticket from Norwich to London was cheaper than the ticket from London to Paris. In the months I was waiting for my surgery date, I found myself becoming even more dysphoric about my facial features, especially the ones the surgeon had pointed out to me, like my nose, but at least this time I had something to look forward to. I was also terrified of being dissapointed with the results as I was for my VFS, but this time I improved my mental attitude, saying to myself that after this, there will be realistically speaking nothing more I can change about my face, so I might as well like it.
-
-When the time came on the Thursday, after packing my bags the night before, I travelled down to London on the train and met my friend at St. Pancras station. I had actually never used the eurostar before, so I was interested by the experience. It is true that there is less security than at an airport, but there is still some. You go through passport control before you arrive at your destination, and their is x-rays for your luggage. At the London side at least, I didn't need to take anything out of my bag. Once you're through security, you have to wait for ages in a really crammed waiting space. Imagine Eindhoven airport but a million times more hot and crammed. The train journey was pretty cool, its the fastest you can travel on trains in the UK. You can also buy a 'Navigo' Paris metro card on the train so you don't have to do it in Paris. It was fun until the train broke down and we were stuck just outside of Paris for two hours, I was very glad I decided to travel down a day earlier than required. Also, if the train is delayed by more than two hours, you get 50% of the ticket price refunded. We got to the AirBNB pretty late, but all was well in the end.
-
-The next day we got up to go to my CT scanner appointment. Using public transport in a new city when you don't speak the language can be pretty scary, but we worked it out in the end. We got to the radiology clinic over an hour early, so we just spent a while walking round killing time.
-
-
-I was pretty nervous about interacting with people since I don't speak french very well, but the entire time I had zero issues. I showed my prescription to the desk person and was sent upstairs to wait. The actual CT scan was pretty easy, it only lasted like 5 minutes and I only had to focus on keeping my head straight. MRI scans last much longer I believe. Once that was done I wask shown into another room where the radiologist looked at my scans on this powerful computer. It had a really cool 3D rendering of my face and skull, it was rather surreal seeping my own face up on there. After staring at my face for a while, the radiologist was like "yeah you have a normal skull" (one of the best compliments i've recieved) and sent me out to wait again. i wasn't expecting this but i was handed a huge A3 enveolope of my CT imagery to take with me.
-
-i left to go and find my boyfriend where he was waiting for me in a cafe, where I had some time to kill until my pre-op. it was very surreal, me walking around paris with a huge envelope of pictures of my skull. Anyway after that it was time for me to go to my appointment. After walking to his office I felt, dare I say, the most mogged i've ever felt in my life. there was all these beautiful dolls and i was out there looking like an autistic sperg loser. I felt very out of place. I was thinking that if you were a chaser you could sit in the cafe outside his office and oogle all of the women going past every day. in the appointment, it was just me sitting across from him and his two assistants. the chair was one of those ones you sink into with the back really far back that I hate, so I just perched on the end and tried to keep my back straight. I'm really insecure in this situation because I've heard horror stories of surgeons not trying hard because they're not attracted to the girl they're working on. the surgeon said a sarcastic joke to try to help me relax but I was so nervous it fell completely flat on my ears. he had the 3D scan of me up and was describing the procedures he was going to do to me, e.g. type III forehead reconstruction because of my forehead sinus. again, I was rather at a loss at what to say: i described the nose style I wanted, a straight nose, pointed up at the end, with the projection reduced a little, with my nostrils smaller too, but besides that I just wanted as much bone reduced as possible. some girls get really specific about the type of jaw and forehead they want, but I simply wanted as much bone removed as possible. I felt as if I hadn't been explicit enough in my previous consultation, so this time I explicitally said basically "yeah, go ham".
-
-After my pre-op, basically nothing remained but to show up for my surgery. So after a night with little sleep due to nerves, I showed up at the hospital over an hour early. So early in fact that the hospital wasn't open yet. So my and my boyfriend went around on a nice walk. The hospital was close to the viewing area for the effiel tower, so we shared a tender moment there. Walking round Paris in the very early morning was really peaceful and really helped with my nerves. Inside the hospital, I introduced myself and was taken to a side room where they took copies of my identification and I paid for the anaethstatist. Again, in the hospital I felt very out of place. It was very extravigantely decorated and luxourious, nothing at all like an NHS hospital. Because after all it is a cosmetics-only hospital made for middle aged-french housewives, I felt out of place as a mid-working-class-british tranny. Once my room was ready, I was taken to it and dressed in a fetching blue gown, tied at the waist made my hips look surprisingly good. At some point qassemyar and his assistants came to see me; and they said there would be a minor delay due to some issue with the hospital. I didn't really mind, but I had to wait for an hour or so. When the time came, I kissed goodbye to my boyfriend, which was a very strange moment. It was sort of like saying goodbye to him with my old face, I was shortly going to become a different person. The nurse walked me downstairs to the basement where the operating theatres were. it was a hub of activity, loads of nurses diligently working away. in that moment i felt very vulnerable; naked besides my gown and underwear, not even wearing my glasses, and everyone was speaking a language i didn't understand. walking into the operating room, i recognised the anethetist who smiled at me which made me feel a little more relaxed. the operating table looked pretty scary; it reminded me of those american execution tables, there was a pad for the left arm out to the side, no straps though. i remember the room was freezing cold, but they had my lie on the bed and covered me with a thin blanket. there was a heater blowing near my feet that warmed me up, which I was grateful for. the anaethatatist asked me if i wanted to play music and handed me his iphone with spotify on it, i wondered what his search history would be like so i was a tiny bit nosey and it was all classical music, which relieved me slightly. I didn't think they would appreciate hyperpop, so i selected canon in d. Its traditionally a wedding song but I find its repeating cycles very relaxing. the anasthatist's assistant started working on me. my only other experience with surgery was for my VFS (glottoplasty) which was a different experience. im not sure if it's a language barrier thing, or a culture difference thing, but in england they're always like "im going to put this needle in you now." "im going to put this blood pressure cuff on you". "we're putting this drug in you which will make you feel wierd". whereas here they sorta just... did stuff to you. i suppose I don't really mind; just an interesting observation. when I was poked with a needle, i was thinking ""you'd better man up over this minor pain; you're going to be experiencing a hell of a lot worse pain soon". i smelled my surgeon before i saw or heard him, i could smell his cigarrettes, he tenderly touched me on my shoulder and the combination of that and the smell was just what i needed to comfort me when i was nervous and starting to feel woozy. he said he was just talking to my friend in the smoking area, and that he promised him he would take care of me. i wondered if my body was shown to them when they were putting the monitoring on me, but i figured they wouldn't care either way since they've presumabley seen plenty of trannies' bodies before. i never breathed into a mask like for VFS, i just slowly drifted off to sleep while i felt my surgeon brushing my hair from behind.
-
-![the last pre-ffs picture of me that exists](/img/PXL_20251108_063442686.MP.jpg?h=400&w=5000)
-
-*The last picture of me that exists pre-FFS*
-
-when i awoke, i remembered where i was pretty quickly. i remember there was a curtain to my left, and some silvery equipment in the wall in front of me. i was feeling extremely exhausted and tired, and i had nothing else to do but close my eyes and go back to sleep. at this stage, the only pain i had was in my throat, it just felt like a slightly worse VFS. i couldn't feel my face at all. it felt very strange. i awoke again to the feeling of someone shaking my shoulder and asking me for my pain out of ten, i was so high i couldn't even really comprehend the question or speak so i just shrugged my shoulders. i saw the nurses around me really, i only felt and heard them. i remember them yelling the name of a girl next to me trying to get her to wake up. i wondered if they were doing the same to me earlier. i awoke again retching, sitting up violently. at first there was nothing, but soon a paper kidney dish was thrust unto me and i vomited into it. i was at first scared to see that what had just came out of me was brown blood, but the unconcerned reaction of the nurse led me to be unconcerned with this too. i later learned that vomiting blood is fine, its just that the blood from your rhinoplasty and jaw work during surgery going down your throat and into your stomach. it may surprise you to year that i did'nt really mind vomiting that much. for me, the feeling of nausea is a lot worse than the actual act of vomiting. and i did't feel any nauesa at all. its a bit like vomiting when you're drunk. or high. which i suppose i was. i think the anaeshtatist did a good job in making me know what to expect; i knew that i would be taken to a recovery room before back to my room, and would vomit a lot. i think my experience would have been a lot worse if he didnt tell me what to expect beforehand. the nurse again asked me for my pain out of ten, i said four because my pain was mild. honestly over the whole time the pain was manageable, it mostly just felt uncomfortable. i was always breathing through my mouth. i knew i wouldn't be able to breathe through my nose, so i didnt try.
-
-my next memory was back in my room being ushered to wiggle my bum onto my bed from the trolley bed. i was surprised in myself that i was able to move, but the action made me vomit again. i saw my boyfriend across the room from me looking at my phone, which made me feel nice. outside it was pitch dark, which was the only indication that a lot of time had passed. i later learned that my surgery was eight hours long. it was only supposed to be six, but my surgeon said that everything was normal, he just spent that extra time trying to remove as much extra bone as possible from my jaw. i also learned that the first thing he did after he finished workin g on me was to call another one of his patients to comfort her. i really got the impression that he went to do as much as possible for people. at this stage i was going through this stage of waking up needing to vomit, and being exhaused and going back to sleep straight away. at some point my boyfriend told me that he was leaving, i didn't really mind because i was mostly sleeping anyway. the next time i woke i was happy to see my phone on charge next to me. at one point i tried to read the messages on my phone, but even the act of sitting up and reading made me feel so exhausted. i supposed this was what chronic fatigue syndrome was like, it really wasn't nice. it is a wierd thing to vomit when you cant really open your mouth, it covers your lips and chin in blood. but i couldn't feel any of my face anyway. because i was breathing through my mouth, it felt extremely dry. like as dry as the sahara desert. this was the main source of discomfort for the next few weeks, it was really bad. i would awake with my mouth feeling insanely dry, vomit, and the blood would dry on my lips. i had no sensation in my face at all, like i had no sensation when i touched it and had no sensation of the bandages and gauze on my face. if i touched where i thought where my mouth would be, i didn't recognise what i was touching, which was my very swollen up bottom lip area. i spent the next few hours in this cycle of waking up, puking, and lying back with my eyes closed in this fugue-like state between conciousness and unconciousness. at some point i could no longer ignore the fact i needed to urinate, and asked the nurse to help me use the toilet. she undid the compression things on my legs, and undid my drip so i could use the toilet. throughout this whole stage my voice was extremely fucked up, i could barely speak. so later i gave up on trying to speak at all for the next few days and just communicated with my phone or by gesturing and pointing. i inadvertantly saw my face in the mirror and was aghast at what looked back at me, i looked bloodied and bruised and was wearing bandages i didn't realize i was wearing. the actual act of peeing was pretty strange. i assume its something to do with being on a drip, but like the pressure was really low, and it took a long time to get everything out. i wasn't sure if i should leave the door to the bathroom open or closed. i decided to leave it open whilst i peed in case i passed out or something. i felt so awful any normal politeness i would normally be concerned of no longer mattered to me. I cant really overstate how awful the drymouth was. it was certainly the most uncomfortable part of ffs for me. once i got back into bed i vomited a lot more blood. i figured the movement was making me motionsick, so i tried not to move at all. the nurse left me a big stack of kidney dishes on the side for me to vomit into. i awoke every few hours, but mostly just tried to sleep through the worst of it.
-
-![the only picture i have after the first day](/img/PXL_20251109_072147683.jpg?h=400&w=5000)
-
-*the only picture i have in the first day. interestingly my surgeon doesn't seem to go for the big thick bandages other surgeons do, only this white bandage that was removed after the first night plus the compression garment.*
-
-
-
-
-
-
-
-
-
-
-
-**To be continued...**
diff --git a/homelab-wiki/Dockerfile b/homelab-wiki/Dockerfile
index 62b1f60..391654c 100644
--- a/homelab-wiki/Dockerfile
+++ b/homelab-wiki/Dockerfile
@@ -1,19 +1,19 @@
-FROM mediawiki:1.43.3
-
-ENV MW_HOME=/var/www/html
-
-# download and add composer to path
-RUN set -x; \
- php -r "readfile('https://getcomposer.org/installer');" | php \
- && mv composer.phar /usr/local/bin/composer
-
-# get extensions
-RUN set -x; \
- cd $MW_HOME/extensions \
- && git clone --depth 1 -b REL1_39 https://github.com/wikimedia/mediawiki-extensions-TemplateStyles \
- && git clone --depth 1 https://github.com/Universal-Omega/PortableInfobox.git
-
-# install extensions
-RUN set -x; \
- cd $MW_HOME/extensions/mediawiki-extensions-TemplateStyles \
- && composer install --no-dev
+FROM mediawiki:1.43.3
+
+ENV MW_HOME=/var/www/html
+
+# download and add composer to path
+RUN set -x; \
+ php -r "readfile('https://getcomposer.org/installer');" | php \
+ && mv composer.phar /usr/local/bin/composer
+
+# get extensions
+RUN set -x; \
+ cd $MW_HOME/extensions \
+ && git clone --depth 1 -b REL1_39 https://github.com/wikimedia/mediawiki-extensions-TemplateStyles \
+ && git clone --depth 1 https://github.com/Universal-Omega/PortableInfobox.git
+
+# install extensions
+RUN set -x; \
+ cd $MW_HOME/extensions/mediawiki-extensions-TemplateStyles \
+ && composer install --no-dev
diff --git a/homelab-wiki/LocalSettings.php b/homelab-wiki/LocalSettings.php
index e8e5eb2..13d0dfa 100644
--- a/homelab-wiki/LocalSettings.php
+++ b/homelab-wiki/LocalSettings.php
@@ -1,183 +1,183 @@
-<?php
-# This file was automatically generated by the MediaWiki 1.39.3
-# installer. If you make manual changes, please keep track in case you
-# need to recreate them later.
-#
-# See docs/Configuration.md for all configurable settings
-# and their default values, but don't forget to make changes in _this_
-# file, not there.
-#
-# Further documentation for configuration settings may be found at:
-# https://www.mediawiki.org/wiki/Manual:Configuration_settings
-
-# Protect against web entry
-if ( !defined( 'MEDIAWIKI' ) ) {
- exit;
-}
-
-
-## Uncomment this to disable output compression
-# $wgDisableOutputCompression = true;
-
-$wgSitename = "Eden's Homelab Wiki";
-$wgMetaNamespace = "Eden's_Homelab_Wiki";
-
-## The URL base path to the directory containing the wiki;
-## defaults for all runtime URL paths are based off of this.
-## For more information on customizing the URLs
-## (like /w/index.php/Page_title to /wiki/Page_title) please see:
-## https://www.mediawiki.org/wiki/Manual:Short_URL
-$wgScriptPath = "";
-
-## The protocol and server name to use in fully-qualified URLs
-$wgServer = "https://homelabwiki.boymoder.blog";
-
-## The URL path to static resources (images, scripts, etc.)
-$wgResourceBasePath = $wgScriptPath;
-
-## The URL paths to the logo. Make sure you change this from the default,
-## or else you'll overwrite your logo when you upgrade!
-$wgLogos = [
- '1x' => "$wgResourceBasePath/images/c/c9/Logo.png",
-];
-
-## UPO means: this is also a user preference option
-
-$wgEnableEmail = false;
-$wgEnableUserEmail = true; # UPO
-
-$wgEmergencyContact = "";
-$wgPasswordSender = "";
-
-$wgEnotifUserTalk = false; # UPO
-$wgEnotifWatchlist = false; # UPO
-$wgEmailAuthentication = true;
-
-## Database settings
-$wgDBtype = "mysql";
-$wgDBserver = "mysql";
-$wgDBname = "homelabwiki2";
-$wgDBuser = "root";
-$wgDBpassword = getenv( "WG_DB_PASSWORD" );
-
-# MySQL specific settings
-$wgDBprefix = "";
-
-# MySQL table options to use during installation or update
-$wgDBTableOptions = "ENGINE=InnoDB, DEFAULT CHARSET=binary";
-
-# Shared database table
-# This has no effect unless $wgSharedDB is also set.
-$wgSharedTables[] = "actor";
-
-## Shared memory settings
-$wgMainCacheType = CACHE_ACCEL;
-$wgMemCachedServers = [];
-
-## To enable image uploads, make sure the 'images' directory
-## is writable, then set this to true:
-$wgEnableUploads = true;
-$wgUseImageMagick = true;
-$wgImageMagickConvertCommand = "/usr/bin/convert";
-
-# InstantCommons allows wiki to use images from https://commons.wikimedia.org
-$wgUseInstantCommons = true;
-
-# Periodically send a pingback to https://www.mediawiki.org/ with basic data
-# about this MediaWiki instance. The Wikimedia Foundation shares this data
-# with MediaWiki developers to help guide future development efforts.
-$wgPingback = false;
-
-# Site language code, should be one of the list in ./includes/languages/data/Names.php
-$wgLanguageCode = "en-gb";
-
-# Time zone
-$wgLocaltimezone = "UTC";
-
-## Set $wgCacheDirectory to a writable directory on the web server
-## to make your wiki go slightly faster. The directory should not
-## be publicly accessible from the web.
-#$wgCacheDirectory = "$IP/cache";
-
-$wgSecretKey = getenv( "WG_SECRET_KEY" );
-
-# Changing this will log out all existing sessions.
-$wgAuthenticationTokenVersion = "1";
-
-# Site upgrade key. Must be set to a string (default provided) to turn on the
-# web installer while LocalSettings.php is in place
-$wgUpgradeKey = getenv( "WG_UPGRADE_KEY" );
-
-## For attaching licensing metadata to pages, and displaying an
-## appropriate copyright notice / icon. GNU Free Documentation
-## License and Creative Commons licenses are supported so far.
-$wgRightsPage = ""; # Set to the title of a wiki page that describes your license/copyright
-$wgRightsUrl = "https://www.gnu.org/copyleft/fdl.html";
-$wgRightsText = "GNU Free Documentation Licence 1.3 or later";
-$wgRightsIcon = "$wgResourceBasePath/resources/assets/licenses/gnu-fdl.png";
-
-# Path to the GNU diff3 utility. Used for conflict resolution.
-$wgDiff3 = "/usr/bin/diff3";
-
-# The following permissions were set based on your choice in the installer
-$wgGroupPermissions['*']['createaccount'] = false;
-$wgGroupPermissions['*']['edit'] = false;
-
-## Default skin: you can change the default skin. Use the internal symbolic
-## names, e.g. 'vector' or 'monobook':
-$wgDefaultSkin = "monobook";
-
-# Enabled skins.
-# The following skins were automatically enabled:
-wfLoadSkin( 'MinervaNeue' );
-wfLoadSkin( 'MonoBook' );
-wfLoadSkin( 'Timeless' );
-wfLoadSkin( 'Vector' );
-
-
-# Enabled extensions. Most of the extensions are enabled by adding
-# wfLoadExtension( 'ExtensionName' );
-# to LocalSettings.php. Check specific extension documentation for more details.
-# The following extensions were automatically enabled:
-wfLoadExtension( 'AbuseFilter' );
-wfLoadExtension( 'CategoryTree' );
-wfLoadExtension( 'Cite' );
-wfLoadExtension( 'CiteThisPage' );
-wfLoadExtension( 'CodeEditor' );
-wfLoadExtension( 'ConfirmEdit' );
-wfLoadExtension( 'Gadgets' );
-wfLoadExtension( 'ImageMap' );
-wfLoadExtension( 'InputBox' );
-wfLoadExtension( 'Interwiki' );
-wfLoadExtension( 'Math' );
-wfLoadExtension( 'mediawiki-extensions-TemplateStyles' );
-wfLoadExtension( 'MultimediaViewer' );
-wfLoadExtension( 'Nuke' );
-wfLoadExtension( 'OATHAuth' );
-wfLoadExtension( 'PageImages' );
-wfLoadExtension( 'ParserFunctions' );
-wfLoadExtension( 'PdfHandler' );
-wfLoadExtension( 'Poem' );
-wfLoadExtension( 'PortableInfobox' );
-wfLoadExtension( 'ReplaceText' );
-wfLoadExtension( 'Scribunto' );
-wfLoadExtension( 'SecureLinkFixer' );
-wfLoadExtension( 'SpamBlacklist' );
-wfLoadExtension( 'SyntaxHighlight_GeSHi' );
-wfLoadExtension( 'TemplateData' );
-wfLoadExtension( 'TextExtracts' );
-wfLoadExtension( 'TitleBlacklist' );
-wfLoadExtension( 'VisualEditor' );
-wfLoadExtension( 'WikiEditor' );
-
-
-# End of automatically generated settings.
-# Add more configuration options below.
-# $wgShowDebug = false;
-# $wgDevelopmentWarnings = false;
-# $wgShowExceptionDetails = false;
-# $wgDebugToolbar = false;
-
-$wgShowExceptionDetails = true;
-$wgShowDBErrorBacktrace = true;
-$wgShowSQLErrors = true;
+<?php
+# This file was automatically generated by the MediaWiki 1.39.3
+# installer. If you make manual changes, please keep track in case you
+# need to recreate them later.
+#
+# See docs/Configuration.md for all configurable settings
+# and their default values, but don't forget to make changes in _this_
+# file, not there.
+#
+# Further documentation for configuration settings may be found at:
+# https://www.mediawiki.org/wiki/Manual:Configuration_settings
+
+# Protect against web entry
+if ( !defined( 'MEDIAWIKI' ) ) {
+ exit;
+}
+
+
+## Uncomment this to disable output compression
+# $wgDisableOutputCompression = true;
+
+$wgSitename = "Eden's Homelab Wiki";
+$wgMetaNamespace = "Eden's_Homelab_Wiki";
+
+## The URL base path to the directory containing the wiki;
+## defaults for all runtime URL paths are based off of this.
+## For more information on customizing the URLs
+## (like /w/index.php/Page_title to /wiki/Page_title) please see:
+## https://www.mediawiki.org/wiki/Manual:Short_URL
+$wgScriptPath = "";
+
+## The protocol and server name to use in fully-qualified URLs
+$wgServer = "https://homelabwiki.boymoder.blog";
+
+## The URL path to static resources (images, scripts, etc.)
+$wgResourceBasePath = $wgScriptPath;
+
+## The URL paths to the logo. Make sure you change this from the default,
+## or else you'll overwrite your logo when you upgrade!
+$wgLogos = [
+ '1x' => "$wgResourceBasePath/images/c/c9/Logo.png",
+];
+
+## UPO means: this is also a user preference option
+
+$wgEnableEmail = false;
+$wgEnableUserEmail = true; # UPO
+
+$wgEmergencyContact = "";
+$wgPasswordSender = "";
+
+$wgEnotifUserTalk = false; # UPO
+$wgEnotifWatchlist = false; # UPO
+$wgEmailAuthentication = true;
+
+## Database settings
+$wgDBtype = "mysql";
+$wgDBserver = "mysql";
+$wgDBname = "homelabwiki2";
+$wgDBuser = "root";
+$wgDBpassword = getenv( "WG_DB_PASSWORD" );
+
+# MySQL specific settings
+$wgDBprefix = "";
+
+# MySQL table options to use during installation or update
+$wgDBTableOptions = "ENGINE=InnoDB, DEFAULT CHARSET=binary";
+
+# Shared database table
+# This has no effect unless $wgSharedDB is also set.
+$wgSharedTables[] = "actor";
+
+## Shared memory settings
+$wgMainCacheType = CACHE_ACCEL;
+$wgMemCachedServers = [];
+
+## To enable image uploads, make sure the 'images' directory
+## is writable, then set this to true:
+$wgEnableUploads = true;
+$wgUseImageMagick = true;
+$wgImageMagickConvertCommand = "/usr/bin/convert";
+
+# InstantCommons allows wiki to use images from https://commons.wikimedia.org
+$wgUseInstantCommons = true;
+
+# Periodically send a pingback to https://www.mediawiki.org/ with basic data
+# about this MediaWiki instance. The Wikimedia Foundation shares this data
+# with MediaWiki developers to help guide future development efforts.
+$wgPingback = false;
+
+# Site language code, should be one of the list in ./includes/languages/data/Names.php
+$wgLanguageCode = "en-gb";
+
+# Time zone
+$wgLocaltimezone = "UTC";
+
+## Set $wgCacheDirectory to a writable directory on the web server
+## to make your wiki go slightly faster. The directory should not
+## be publicly accessible from the web.
+#$wgCacheDirectory = "$IP/cache";
+
+$wgSecretKey = getenv( "WG_SECRET_KEY" );
+
+# Changing this will log out all existing sessions.
+$wgAuthenticationTokenVersion = "1";
+
+# Site upgrade key. Must be set to a string (default provided) to turn on the
+# web installer while LocalSettings.php is in place
+$wgUpgradeKey = getenv( "WG_UPGRADE_KEY" );
+
+## For attaching licensing metadata to pages, and displaying an
+## appropriate copyright notice / icon. GNU Free Documentation
+## License and Creative Commons licenses are supported so far.
+$wgRightsPage = ""; # Set to the title of a wiki page that describes your license/copyright
+$wgRightsUrl = "https://www.gnu.org/copyleft/fdl.html";
+$wgRightsText = "GNU Free Documentation Licence 1.3 or later";
+$wgRightsIcon = "$wgResourceBasePath/resources/assets/licenses/gnu-fdl.png";
+
+# Path to the GNU diff3 utility. Used for conflict resolution.
+$wgDiff3 = "/usr/bin/diff3";
+
+# The following permissions were set based on your choice in the installer
+$wgGroupPermissions['*']['createaccount'] = false;
+$wgGroupPermissions['*']['edit'] = false;
+
+## Default skin: you can change the default skin. Use the internal symbolic
+## names, e.g. 'vector' or 'monobook':
+$wgDefaultSkin = "monobook";
+
+# Enabled skins.
+# The following skins were automatically enabled:
+wfLoadSkin( 'MinervaNeue' );
+wfLoadSkin( 'MonoBook' );
+wfLoadSkin( 'Timeless' );
+wfLoadSkin( 'Vector' );
+
+
+# Enabled extensions. Most of the extensions are enabled by adding
+# wfLoadExtension( 'ExtensionName' );
+# to LocalSettings.php. Check specific extension documentation for more details.
+# The following extensions were automatically enabled:
+wfLoadExtension( 'AbuseFilter' );
+wfLoadExtension( 'CategoryTree' );
+wfLoadExtension( 'Cite' );
+wfLoadExtension( 'CiteThisPage' );
+wfLoadExtension( 'CodeEditor' );
+wfLoadExtension( 'ConfirmEdit' );
+wfLoadExtension( 'Gadgets' );
+wfLoadExtension( 'ImageMap' );
+wfLoadExtension( 'InputBox' );
+wfLoadExtension( 'Interwiki' );
+wfLoadExtension( 'Math' );
+wfLoadExtension( 'mediawiki-extensions-TemplateStyles' );
+wfLoadExtension( 'MultimediaViewer' );
+wfLoadExtension( 'Nuke' );
+wfLoadExtension( 'OATHAuth' );
+wfLoadExtension( 'PageImages' );
+wfLoadExtension( 'ParserFunctions' );
+wfLoadExtension( 'PdfHandler' );
+wfLoadExtension( 'Poem' );
+wfLoadExtension( 'PortableInfobox' );
+wfLoadExtension( 'ReplaceText' );
+wfLoadExtension( 'Scribunto' );
+wfLoadExtension( 'SecureLinkFixer' );
+wfLoadExtension( 'SpamBlacklist' );
+wfLoadExtension( 'SyntaxHighlight_GeSHi' );
+wfLoadExtension( 'TemplateData' );
+wfLoadExtension( 'TextExtracts' );
+wfLoadExtension( 'TitleBlacklist' );
+wfLoadExtension( 'VisualEditor' );
+wfLoadExtension( 'WikiEditor' );
+
+
+# End of automatically generated settings.
+# Add more configuration options below.
+# $wgShowDebug = false;
+# $wgDevelopmentWarnings = false;
+# $wgShowExceptionDetails = false;
+# $wgDebugToolbar = false;
+
+$wgShowExceptionDetails = true;
+$wgShowDBErrorBacktrace = true;
+$wgShowSQLErrors = true;
diff --git a/scripts/export.sh b/scripts/export.sh
index a2ecb6f..082c908 100755
--- a/scripts/export.sh
+++ b/scripts/export.sh
@@ -1,12 +1,12 @@
-#!/bin/bash
-
-echo -n "Input blog post ID to export: "
-read id
-
-echo -n "Input export file name: "
-read export_name
-
-echo "Exporting blog post " $id " to " $export_name
-
-touch $export_name
-sudo docker run -it --entrypoint python3 -v "$(pwd)/edaweb.conf":/app/edaweb.conf -v "$(pwd)/edaweb.conf":/app/edaweb/edaweb.conf -v "$(pwd)/$export_name":/app/$export_name --network mariadb --rm reg.reaweb.uk/edaweb /app/edaweb/parser.py export -i $id -u root -o $export_name
+#!/bin/bash
+
+echo -n "Input blog post ID to export: "
+read id
+
+echo -n "Input export file name: "
+read export_name
+
+echo "Exporting blog post " $id " to " $export_name
+
+touch $export_name
+sudo docker run -it --entrypoint python3 -v "$(pwd)/edaweb.conf":/app/edaweb.conf -v "$(pwd)/edaweb.conf":/app/edaweb/edaweb.conf -v "$(pwd)/$export_name":/app/$export_name --network mariadb --rm reg.reaweb.uk/edaweb /app/edaweb/parser.py export -i $id -u root -o $export_name
diff --git a/scripts/update.sh b/scripts/update.sh
index 32b3b2a..2498c4e 100755
--- a/scripts/update.sh
+++ b/scripts/update.sh
@@ -1,6 +1,6 @@
-#!/bin/bash
-
-echo -n "Input blog post ID to update: "
-read id
-
-sudo docker run -it --entrypoint python3 -v "$(pwd)/edaweb.conf":/app/edaweb.conf -v "$(pwd)/edaweb.conf":/app/edaweb/edaweb.conf -v "$(pwd)/$1":/app/$1 --network mariadb --rm reg.reaweb.uk/edaweb /app/edaweb/parser.py update -i $id -u root -m $1
+#!/bin/bash
+
+echo -n "Input blog post ID to update: "
+read id
+
+sudo docker run -it --entrypoint python3 -v "$(pwd)/edaweb.conf":/app/edaweb.conf -v "$(pwd)/edaweb.conf":/app/edaweb/edaweb.conf -v "$(pwd)/$1":/app/$1 --network mariadb --rm reg.reaweb.uk/edaweb /app/edaweb/parser.py update -i $id -u root -m $1