aboutsummaryrefslogtreecommitdiffstats
path: root/edaweb
diff options
context:
space:
mode:
authorjwansek <eddie.atten.ea29@gmail.com>2026-02-24 15:24:02 +0000
committerjwansek <eddie.atten.ea29@gmail.com>2026-02-24 15:24:02 +0000
commitd6bf03230346e504a18f238668bf6370f5f2068c (patch)
treef577aef62b158f1444215c415f04408a67c300d8 /edaweb
parentb504077c13ab79c1e899b8402d3afdbf4d4da3f6 (diff)
downloadboymoder.blog-d6bf03230346e504a18f238668bf6370f5f2068c.tar.gz
boymoder.blog-d6bf03230346e504a18f238668bf6370f5f2068c.zip
Appended some imagesHEADmaster
Diffstat (limited to 'edaweb')
-rw-r--r--edaweb/app.py520
-rw-r--r--edaweb/database.py502
-rw-r--r--edaweb/services.py818
-rw-r--r--edaweb/static/images/PXL_20251111_125628695.jpgbin0 -> 1608118 bytes
-rw-r--r--edaweb/static/images/PXL_20251115_180322252.jpgbin0 -> 1549576 bytes
-rw-r--r--edaweb/static/images/PXL_20251115_180349152.jpgbin0 -> 1244189 bytes
-rw-r--r--edaweb/static/images/PXL_20251130_222326376.jpgbin0 -> 1749091 bytes
-rw-r--r--edaweb/static/images/PXL_20260210_231506089.jpgbin0 -> 1546275 bytes
8 files changed, 920 insertions, 920 deletions
diff --git a/edaweb/app.py b/edaweb/app.py
index 923b792..a7a0b1a 100644
--- a/edaweb/app.py
+++ b/edaweb/app.py
@@ -1,260 +1,260 @@
-from paste.translogger import TransLogger
-from waitress import serve
-from PIL import Image
-import configparser
-import transmission_rpc
-import downloader
-import datetime
-import database
-import services
-import urllib
-import random
-import parser
-import flask
-import sys
-import os
-import io
-
-app = flask.Flask(__name__)
-CONFIG = configparser.ConfigParser(interpolation = None)
-CONFIG.read(os.path.join(os.path.dirname(__file__), "edaweb.conf"))
-shown_images = set()
-shown_sidebar_images = set()
-
-def get_pfp_img(db:database.Database):
- global shown_images
- dbimg = db.get_pfp_images()
- if len(shown_images) == len(dbimg):
- shown_images = set()
- folder = set(dbimg).difference(shown_images)
- choice = random.choice(list(folder))
- shown_images.add(choice)
- return choice
-
-def get_sidebar_img(db:database.Database):
- global shown_sidebar_images
- dbimg = db.get_sidebar_images()
- if len(shown_sidebar_images) == len(dbimg):
- shown_sidebar_images = set()
- folder = set(dbimg).difference(shown_sidebar_images)
- choice = random.choice(list(folder))
- shown_sidebar_images.add(choice)
- return choice
-
-def get_correct_article_headers(db:database.Database, title):
- db_headers = list(db.get_header_articles())
- if title in [i[0] for i in db_headers]:
- out = []
- for i in db_headers:
- if i[0] != title:
- out.append(i)
- return out + [("index", "/~")]
- else:
- return db_headers + [("index", "/~")]
-
-def get_template_items(title, db):
- return {
- "links": db.get_header_links(),
- "image": get_pfp_img(db),
- "title": title,
- "articles": get_correct_article_headers(db, title)
- }
-
-@app.route("/")
-@app.route("/~")
-def index():
- with database.Database() as db:
- with open(os.path.join(os.path.dirname(__file__), "static", "index.md"), "r") as f:
- return flask.render_template(
- "index.html.j2",
- **get_template_items("eden's site :3", db),
- days_till_ffs = datetime.datetime(2025, 11, 8) - datetime.datetime.now(),
- markdown = parser.parse_text(f.read())[0],
- featured_thoughts = db.get_featured_thoughts(),
- commits = services.get_recent_commits(db)[:15],
- sidebar_img = get_sidebar_img(db)
- )
-
-@app.route("/robots.txt")
-def robots():
- return flask.send_from_directory("static", "robots.txt")
-
-@app.route("/cow.txt")
-def moo():
- return flask.send_from_directory("static", "cow.txt")
-
-@app.route("/services")
-def serve_services():
- with database.Database() as db:
- return flask.render_template(
- "services.html.j2",
- **get_template_items("services", db),
- docker = services.get_all_docker_containers(),
- trans = services.get_torrent_stats(),
- pihole = services.get_pihole_stats()
- )
-
-@app.route("/discord")
-def discord():
- with database.Database() as db:
- return flask.render_template(
- "discord.html.j2",
- **get_template_items("discord", db),
- discord = CONFIG["discord"]["username"]
- )
-
-@app.route("/thought")
-def get_thought():
- thought_id = flask.request.args.get("id", type=int)
- with database.Database() as db:
- try:
- category_name, title, dt, parsed, headers, redirect = parser.get_thought_from_id(db, thought_id)
- # print(headers)
- except TypeError:
- flask.abort(404)
- return
-
- if redirect is not None:
- return flask.redirect(redirect, code = 301)
-
- return flask.render_template(
- "thought.html.j2",
- **get_template_items(title, db),
- md_html = parsed,
- contents_html = headers,
- dt = "published: " + str(dt),
- category = category_name,
- othercategories = db.get_categories_not(category_name),
- related = db.get_similar_thoughts(category_name, thought_id)
- )
-
-@app.route("/thoughts")
-def get_thoughts():
- with database.Database() as db:
- all_ = db.get_all_thoughts()
- tree = {}
- for id_, title, dt, category in all_:
- if category not in tree.keys():
- tree[category] = [(id_, title, dt)]
- else:
- tree[category].append((id_, title, str(dt)))
-
- return flask.render_template(
- "thoughts.html.j2",
- **get_template_items("thoughts", db),
- tree = tree
- )
-
-@app.route("/img/<filename>")
-def serve_image(filename):
- imdirpath = os.path.join(os.path.dirname(__file__), "static", "images")
- if filename in os.listdir(imdirpath):
- try:
- w = int(flask.request.args['w'])
- h = int(flask.request.args['h'])
- except (KeyError, ValueError):
- return flask.send_from_directory(imdirpath, filename)
-
- img = Image.open(os.path.join(imdirpath, filename))
- img.thumbnail((w, h), Image.LANCZOS)
- io_ = io.BytesIO()
- img.save(io_, format='JPEG')
- return flask.Response(io_.getvalue(), mimetype='image/jpeg')
- else:
- flask.abort(404)
-
-@app.route("/nhdl")
-def serve_nhdl():
- with database.Database() as db:
- try:
- nhentai_id = int(flask.request.args["id"])
- with downloader.CompressedImages(nhentai_id) as zippath:
- # return app.send_static_file(os.path.split(zippath)[-1])
- return flask.redirect("/zip/%s" % os.path.split(zippath)[-1])
-
- except (KeyError, ValueError):
- return flask.render_template(
- "nhdl.html.j2",
- **get_template_items("Hentai Downloader", db)
- )
-
-@app.route("/isocd")
-def serve_iso_form():
- with database.Database() as db:
- return flask.render_template(
- "isocd.html.j2",
- **get_template_items("Get a GNU/Linux install CD", db),
- iso_options = db.get_iso_cd_options()
- )
-
-@app.route("/zip/<zipfile>")
-def serve_zip(zipfile):
- return flask.send_from_directory(os.path.join(os.path.dirname(__file__), "static", "zips"), zipfile)
-
-@app.route("/pdf/<pdfname>")
-def serve_pdf(pdfname):
- return flask.send_from_directory(os.path.join(os.path.dirname(__file__), "static", "papers"), pdfname)
-
-@app.route("/nhdlredirect", methods = ["POST"])
-def redirect_nhdl():
- req = dict(flask.request.form)
- try:
- return flask.redirect("/nhdl?id=%i" % int(req["number_input"]))
- except (TypeError, ValueError, KeyError):
- flask.abort(400)
-
-@app.route("/getisocd", methods = ["POST"])
-def get_iso_cd():
- req = dict(flask.request.form)
- print(req)
- with database.Database() as db:
- id_ = db.append_cd_orders(**req)
- print(id_)
- return flask.render_template(
- "isocd_confirmation.html.j2",
- **get_template_items("Get a GNU/Linux install CD", db),
- email = req["email"],
- req = req,
- id_ = id_
- )
-
-#@app.route("/random")
-#def serve_random():
-# try:
-# tags = flask.request.args['tags'].split(" ")
-# except KeyError:
-# flask.abort(400)
-#
-# sbi = services.get_random_image(tags)
-# req = urllib.request.Request(sbi.imurl)
-# mediaContent = urllib.request.urlopen(req).read()
-# with open(os.path.join(os.path.dirname(__file__), "static", "images", "random.jpg"), "wb") as f:
-# f.write(mediaContent)
-#
-# with database.Database() as db:
-# return flask.render_template(
-# "random.html.j2",
-# **get_template_items("random image", db),
-# sbi = sbi,
-# localimg = "/img/random.jpg?seed=%i" % random.randint(0, 9999)
-# )
-
-@app.route("/questions")
-def serve_questions():
- with database.Database() as db:
- return flask.render_template(
- "questions.html.j2",
- **get_template_items("questions and answers", db),
- qnas_link = CONFIG.get("qnas", "url"),
- qnas = db.get_qnas()
- )
-
-if __name__ == "__main__":
- try:
- if sys.argv[1] == "--production":
- #serve(TransLogger(app), host='127.0.0.1', port = 6969)
- serve(TransLogger(app), host='0.0.0.0', port = 6969, threads = 32)
- else:
- app.run(host = "0.0.0.0", port = 5001, debug = True)
- except IndexError:
- app.run(host = "0.0.0.0", port = 5001, debug = True)
+from paste.translogger import TransLogger
+from waitress import serve
+from PIL import Image
+import configparser
+import transmission_rpc
+import downloader
+import datetime
+import database
+import services
+import urllib
+import random
+import parser
+import flask
+import sys
+import os
+import io
+
+app = flask.Flask(__name__)
+CONFIG = configparser.ConfigParser(interpolation = None)
+CONFIG.read(os.path.join(os.path.dirname(__file__), "edaweb.conf"))
+shown_images = set()
+shown_sidebar_images = set()
+
+def get_pfp_img(db:database.Database):
+ global shown_images
+ dbimg = db.get_pfp_images()
+ if len(shown_images) == len(dbimg):
+ shown_images = set()
+ folder = set(dbimg).difference(shown_images)
+ choice = random.choice(list(folder))
+ shown_images.add(choice)
+ return choice
+
+def get_sidebar_img(db:database.Database):
+ global shown_sidebar_images
+ dbimg = db.get_sidebar_images()
+ if len(shown_sidebar_images) == len(dbimg):
+ shown_sidebar_images = set()
+ folder = set(dbimg).difference(shown_sidebar_images)
+ choice = random.choice(list(folder))
+ shown_sidebar_images.add(choice)
+ return choice
+
+def get_correct_article_headers(db:database.Database, title):
+ db_headers = list(db.get_header_articles())
+ if title in [i[0] for i in db_headers]:
+ out = []
+ for i in db_headers:
+ if i[0] != title:
+ out.append(i)
+ return out + [("index", "/~")]
+ else:
+ return db_headers + [("index", "/~")]
+
+def get_template_items(title, db):
+ return {
+ "links": db.get_header_links(),
+ "image": get_pfp_img(db),
+ "title": title,
+ "articles": get_correct_article_headers(db, title)
+ }
+
+@app.route("/")
+@app.route("/~")
+def index():
+ with database.Database() as db:
+ with open(os.path.join(os.path.dirname(__file__), "static", "index.md"), "r") as f:
+ return flask.render_template(
+ "index.html.j2",
+ **get_template_items("eden's site :3", db),
+ days_till_ffs = datetime.datetime(2025, 11, 8) - datetime.datetime.now(),
+ markdown = parser.parse_text(f.read())[0],
+ featured_thoughts = db.get_featured_thoughts(),
+ commits = services.get_recent_commits(db)[:15],
+ sidebar_img = get_sidebar_img(db)
+ )
+
+@app.route("/robots.txt")
+def robots():
+ return flask.send_from_directory("static", "robots.txt")
+
+@app.route("/cow.txt")
+def moo():
+ return flask.send_from_directory("static", "cow.txt")
+
+@app.route("/services")
+def serve_services():
+ with database.Database() as db:
+ return flask.render_template(
+ "services.html.j2",
+ **get_template_items("services", db),
+ docker = services.get_all_docker_containers(),
+ trans = services.get_torrent_stats(),
+ pihole = services.get_pihole_stats()
+ )
+
+@app.route("/discord")
+def discord():
+ with database.Database() as db:
+ return flask.render_template(
+ "discord.html.j2",
+ **get_template_items("discord", db),
+ discord = CONFIG["discord"]["username"]
+ )
+
+@app.route("/thought")
+def get_thought():
+ thought_id = flask.request.args.get("id", type=int)
+ with database.Database() as db:
+ try:
+ category_name, title, dt, parsed, headers, redirect = parser.get_thought_from_id(db, thought_id)
+ # print(headers)
+ except TypeError:
+ flask.abort(404)
+ return
+
+ if redirect is not None:
+ return flask.redirect(redirect, code = 301)
+
+ return flask.render_template(
+ "thought.html.j2",
+ **get_template_items(title, db),
+ md_html = parsed,
+ contents_html = headers,
+ dt = "published: " + str(dt),
+ category = category_name,
+ othercategories = db.get_categories_not(category_name),
+ related = db.get_similar_thoughts(category_name, thought_id)
+ )
+
+@app.route("/thoughts")
+def get_thoughts():
+ with database.Database() as db:
+ all_ = db.get_all_thoughts()
+ tree = {}
+ for id_, title, dt, category in all_:
+ if category not in tree.keys():
+ tree[category] = [(id_, title, dt)]
+ else:
+ tree[category].append((id_, title, str(dt)))
+
+ return flask.render_template(
+ "thoughts.html.j2",
+ **get_template_items("thoughts", db),
+ tree = tree
+ )
+
+@app.route("/img/<filename>")
+def serve_image(filename):
+ imdirpath = os.path.join(os.path.dirname(__file__), "static", "images")
+ if filename in os.listdir(imdirpath):
+ try:
+ w = int(flask.request.args['w'])
+ h = int(flask.request.args['h'])
+ except (KeyError, ValueError):
+ return flask.send_from_directory(imdirpath, filename)
+
+ img = Image.open(os.path.join(imdirpath, filename))
+ img.thumbnail((w, h), Image.LANCZOS)
+ io_ = io.BytesIO()
+ img.save(io_, format='JPEG')
+ return flask.Response(io_.getvalue(), mimetype='image/jpeg')
+ else:
+ flask.abort(404)
+
+@app.route("/nhdl")
+def serve_nhdl():
+ with database.Database() as db:
+ try:
+ nhentai_id = int(flask.request.args["id"])
+ with downloader.CompressedImages(nhentai_id) as zippath:
+ # return app.send_static_file(os.path.split(zippath)[-1])
+ return flask.redirect("/zip/%s" % os.path.split(zippath)[-1])
+
+ except (KeyError, ValueError):
+ return flask.render_template(
+ "nhdl.html.j2",
+ **get_template_items("Hentai Downloader", db)
+ )
+
+@app.route("/isocd")
+def serve_iso_form():
+ with database.Database() as db:
+ return flask.render_template(
+ "isocd.html.j2",
+ **get_template_items("Get a GNU/Linux install CD", db),
+ iso_options = db.get_iso_cd_options()
+ )
+
+@app.route("/zip/<zipfile>")
+def serve_zip(zipfile):
+ return flask.send_from_directory(os.path.join(os.path.dirname(__file__), "static", "zips"), zipfile)
+
+@app.route("/pdf/<pdfname>")
+def serve_pdf(pdfname):
+ return flask.send_from_directory(os.path.join(os.path.dirname(__file__), "static", "papers"), pdfname)
+
+@app.route("/nhdlredirect", methods = ["POST"])
+def redirect_nhdl():
+ req = dict(flask.request.form)
+ try:
+ return flask.redirect("/nhdl?id=%i" % int(req["number_input"]))
+ except (TypeError, ValueError, KeyError):
+ flask.abort(400)
+
+@app.route("/getisocd", methods = ["POST"])
+def get_iso_cd():
+ req = dict(flask.request.form)
+ print(req)
+ with database.Database() as db:
+ id_ = db.append_cd_orders(**req)
+ print(id_)
+ return flask.render_template(
+ "isocd_confirmation.html.j2",
+ **get_template_items("Get a GNU/Linux install CD", db),
+ email = req["email"],
+ req = req,
+ id_ = id_
+ )
+
+#@app.route("/random")
+#def serve_random():
+# try:
+# tags = flask.request.args['tags'].split(" ")
+# except KeyError:
+# flask.abort(400)
+#
+# sbi = services.get_random_image(tags)
+# req = urllib.request.Request(sbi.imurl)
+# mediaContent = urllib.request.urlopen(req).read()
+# with open(os.path.join(os.path.dirname(__file__), "static", "images", "random.jpg"), "wb") as f:
+# f.write(mediaContent)
+#
+# with database.Database() as db:
+# return flask.render_template(
+# "random.html.j2",
+# **get_template_items("random image", db),
+# sbi = sbi,
+# localimg = "/img/random.jpg?seed=%i" % random.randint(0, 9999)
+# )
+
+@app.route("/questions")
+def serve_questions():
+ with database.Database() as db:
+ return flask.render_template(
+ "questions.html.j2",
+ **get_template_items("questions and answers", db),
+ qnas_link = CONFIG.get("qnas", "url"),
+ qnas = db.get_qnas()
+ )
+
+if __name__ == "__main__":
+ try:
+ if sys.argv[1] == "--production":
+ #serve(TransLogger(app), host='127.0.0.1', port = 6969)
+ serve(TransLogger(app), host='0.0.0.0', port = 6969, threads = 32)
+ else:
+ app.run(host = "0.0.0.0", port = 5001, debug = True)
+ except IndexError:
+ app.run(host = "0.0.0.0", port = 5001, debug = True)
diff --git a/edaweb/database.py b/edaweb/database.py
index 8bb9a60..c6553a6 100644
--- a/edaweb/database.py
+++ b/edaweb/database.py
@@ -1,251 +1,251 @@
-from urllib.parse import urlparse
-from dataclasses import dataclass
-from lxml import html
-import configparser
-import threading
-import services
-import operator
-import datetime
-import requests
-import twython
-import pymysql
-import random
-import os
-import re
-
-@dataclass
-class Database:
- safeLogin:bool = True #automatically login with the user in the config file, who is read only
- user:str = None #otherwise, login with the given username and passwd
- passwd:str = None
-
- def __enter__(self):
- config_path = os.path.join(os.path.dirname(__file__), "..", "edaweb.conf")
- if not os.path.exists(config_path):
- raise FileNotFoundError("Could not find edaweb.conf config file")
- self.config = configparser.ConfigParser(interpolation = None)
- self.config.read(config_path)
-
- if self.safeLogin:
- self.__connection = pymysql.connect(
- **self.config["mysql"],
- charset = "utf8mb4"
- )
- else:
- self.__connection = pymysql.connect(
- user = self.user,
- passwd = self.passwd,
- host = self.config["mysql"]["host"],
- db = self.config["mysql"]["db"],
- charset = "utf8mb4"
- )
- return self
-
- def __exit__(self, type, value, traceback):
- self.__connection.commit()
- self.__connection.close()
-
- def get_header_links(self):
- with self.__connection.cursor() as cursor:
- cursor.execute("SELECT name, link FROM headerLinks WHERE display = true ORDER BY name;")
- return cursor.fetchall()
-
- def get_image(self, imageName):
- with self.__connection.cursor() as cursor:
- cursor.execute("SELECT alt, url FROM images WHERE imageName = %s;", (imageName, ))
- return cursor.fetchone()
-
- def get_pfp_images(self):
- with self.__connection.cursor() as cursor:
- cursor.execute("SELECT alt, url FROM images WHERE pfp_img = 1;")
- return cursor.fetchall()
-
- def get_sidebar_images(self):
- with self.__connection.cursor() as cursor:
- cursor.execute("SELECT alt, url FROM images WHERE sidebar_image = 1;")
- return cursor.fetchall()
-
- def get_header_articles(self):
- with self.__connection.cursor() as cursor:
- cursor.execute("SELECT articleName, link FROM headerArticles WHERE display = true;")
- return cursor.fetchall()
-
- def get_all_categories(self):
- with self.__connection.cursor() as cursor:
- cursor.execute("SELECT category_name FROM categories;")
- return [i[0] for i in cursor.fetchall()]
-
- def add_category(self, category):
- if not category in self.get_all_categories():
- with self.__connection.cursor() as cursor:
- cursor.execute("INSERT INTO categories (category_name) VALUES (%s);", (category, ))
-
- self.__connection.commit()
- return True
-
- return False
-
- def add_thought(self, category, title, markdown):
- with self.__connection.cursor() as cursor:
- cursor.execute("""
- INSERT INTO thoughts (category_id, title, markdown_text)
- VALUES ((
- SELECT category_id FROM categories WHERE category_name = %s
- ), %s, %s);""", (category, title, markdown))
- self.__connection.commit()
-
- def get_thought(self, id_):
- with self.__connection.cursor() as cursor:
- cursor.execute("""
- SELECT categories.category_name, thoughts.title, thoughts.dt, thoughts.markdown_text, thoughts.redirect
- FROM thoughts INNER JOIN categories
- ON thoughts.category_id = categories.category_id
- WHERE thought_id = %s;""", (id_, ))
- return cursor.fetchone()
-
- def get_similar_thoughts(self, category, id_):
- with self.__connection.cursor() as cursor:
- cursor.execute("""
- SELECT thought_id, title, dt, category_name FROM thoughts
- INNER JOIN categories ON thoughts.category_id = categories.category_id
- WHERE category_name = %s AND thought_id != %s;""",
- (category, id_))
- return cursor.fetchall()
-
- def get_featured_thoughts(self):
- with self.__connection.cursor() as cursor:
- cursor.execute("SELECT thought_id, title FROM thoughts WHERE featured = 1;")
- return cursor.fetchall()
-
- def update_thought_markdown(self, id_, markdown):
- with self.__connection.cursor() as cursor:
- cursor.execute("UPDATE thoughts SET markdown_text = %s WHERE thought_id = %s;", (markdown, id_))
- self.__connection.commit()
-
- def get_categories_not(self, category_name):
- with self.__connection.cursor() as cursor:
- cursor.execute("SELECT category_name FROM categories WHERE category_name != %s;", (category_name, ))
- return [i[0] for i in cursor.fetchall()]
-
- def get_all_thoughts(self):
- with self.__connection.cursor() as cursor:
- cursor.execute("""
- SELECT thought_id, title, dt, category_name FROM thoughts
- INNER JOIN categories ON thoughts.category_id = categories.category_id;
- """)
- return cursor.fetchall()
-
- def get_cached_tweets(self, numToGet = None):
- with self.__connection.cursor() as cursor:
- sql = "SELECT tweet, tweet_id, account FROM diary WHERE account = %s ORDER BY tweeted_at DESC"
- args = (self.config.get("twitter", "main_account"), )
- if numToGet is not None:
- sql += " LIMIT %s;"
- args = (self.config.get("twitter", "main_account"), numToGet)
- else:
- sql += ";"
- cursor.execute(sql, args)
-
- return [(i[0], "https://%s/%s/status/%d" % (self.config.get("nitter", "outsideurl"), i[2], i[1])) for i in cursor.fetchall()]
-
- def get_cached_commits(self, since = None, recurse = True):
- with self.__connection.cursor() as cursor:
- if since is not None:
- cursor.execute("SELECT DISTINCT message, url, commitTime, additions, deletions, total FROM commitCache WHERE commitTime > %s ORDER BY commitTime DESC;", (since, ))
- else:
- cursor.execute("SELECT DISTINCT message, url, commitTime, additions, deletions, total FROM commitCache ORDER BY commitTime DESC;")
- # i think i might have spent too long doing functional programming
- return [{
- "repo": urlparse(i[1]).path.split("/")[2],
- "github_repo_url": "https://github.com" + "/".join(urlparse(i[1]).path.split("/")[:3]),
- "git_repo_url": "https://%s/%s.git/about" % (self.config.get("github", "personal_domain"), urlparse(i[1]).path.split("/")[2]),
- "message": i[0],
- "github_commit_url": i[1],
- "git_commit_url": "https://%s/%s.git/commit/?id=%s" % (
- self.config.get("github", "personal_domain"),
- urlparse(i[1]).path.split("/")[2],
- urlparse(i[1]).path.split("/")[-1]
- ),
- "datetime": i[2].timestamp(),
- "stats": {
- "additions": i[3],
- "deletions": i[4],
- "total": i[5]
- }
- } for i in cursor.fetchall()]
-
- def update_commit_cache(self, requested):
- with self.__connection.cursor() as cursor:
- for commit in requested:
- cursor.execute("SELECT DISTINCT url FROM commitCache;")
- urls = [i[0] for i in cursor.fetchall()]
-
- if commit["url"] not in urls:
- cursor.execute("""
- INSERT INTO commitCache (message, url, commitTime, additions, deletions, total)
- VALUES (%s, %s, %s, %s, %s, %s)""",
- (commit["message"], commit["url"], commit["datetime"], commit["stats"]["additions"], commit["stats"]["deletions"], commit["stats"]["total"])
- )
- self.__connection.commit()
-
- def get_last_commit_time(self):
- with self.__connection.cursor() as cursor:
- cursor.execute("SELECT MAX(commitTime) FROM commitCache;")
- return cursor.fetchone()[0]
-
- def get_my_twitter(self):
- with self.__connection.cursor() as cursor:
- cursor.execute("SELECT link FROM headerLinks WHERE name = 'twitter';")
- return cursor.fetchone()[0]
-
- def get_my_diary_twitter(self):
- return self.config.get("twitter", "diary_account")
-
- def get_iso_cd_options(self):
- iso_dir = self.config.get("cds", "location")
- return [
- i
- for i in os.listdir(iso_dir)
- if os.path.splitext(i)[-1].lower() in [".iso"]
- and os.path.getsize(os.path.join(iso_dir, i)) < self.config.getint("cds", "maxsize")
- ]
-
- def append_cd_orders(self, iso, email, house, street, city, county, postcode, name):
- with self.__connection.cursor() as cursor:
- cursor.execute("""
- INSERT INTO cd_orders_2 (iso, email, house, street, city, county, postcode, name)
- VALUES (%s, %s, %s, %s, %s, %s, %s, %s);
- """, (iso, email, house, street, city, county, postcode, name))
- id_ = cursor.lastrowid
- self.__connection.commit()
- return id_
-
- def append_qnas(self, qnas):
- with self.__connection.cursor() as cursor:
- for qna in qnas:
- cursor.execute("SELECT curiouscat_id FROM qnas WHERE curiouscat_id = %s;", (qna["id"], ))
- if cursor.fetchone() is None:
-
- cursor.execute("INSERT INTO `qnas` VALUES (%s, %s, %s, %s, %s, %s);", (
- qna["id"], qna["link"], qna["datetime"], qna["question"], qna["answer"], qna["host"]
- ))
- print("Appended question with timestamp %s" % qna["datetime"].isoformat())
-
- else:
- print("Skipped question with timestamp %s" % qna["datetime"].isoformat())
- self.__connection.commit()
-
- def get_oldest_qna(self):
- with self.__connection.cursor() as cursor:
- cursor.execute("SELECT MAX(timestamp) FROM qnas;")
- return cursor.fetchone()[0]
-
- def get_qnas(self):
- with self.__connection.cursor() as cursor:
- cursor.execute("SELECT * FROM qnas;")
- return sorted(cursor.fetchall(), key = operator.itemgetter(2), reverse = True)
-
-
-
-
-
+from urllib.parse import urlparse
+from dataclasses import dataclass
+from lxml import html
+import configparser
+import threading
+import services
+import operator
+import datetime
+import requests
+import twython
+import pymysql
+import random
+import os
+import re
+
+@dataclass
+class Database:
+ safeLogin:bool = True #automatically login with the user in the config file, who is read only
+ user:str = None #otherwise, login with the given username and passwd
+ passwd:str = None
+
+ def __enter__(self):
+ config_path = os.path.join(os.path.dirname(__file__), "..", "edaweb.conf")
+ if not os.path.exists(config_path):
+ raise FileNotFoundError("Could not find edaweb.conf config file")
+ self.config = configparser.ConfigParser(interpolation = None)
+ self.config.read(config_path)
+
+ if self.safeLogin:
+ self.__connection = pymysql.connect(
+ **self.config["mysql"],
+ charset = "utf8mb4"
+ )
+ else:
+ self.__connection = pymysql.connect(
+ user = self.user,
+ passwd = self.passwd,
+ host = self.config["mysql"]["host"],
+ db = self.config["mysql"]["db"],
+ charset = "utf8mb4"
+ )
+ return self
+
+ def __exit__(self, type, value, traceback):
+ self.__connection.commit()
+ self.__connection.close()
+
+ def get_header_links(self):
+ with self.__connection.cursor() as cursor:
+ cursor.execute("SELECT name, link FROM headerLinks WHERE display = true ORDER BY name;")
+ return cursor.fetchall()
+
+ def get_image(self, imageName):
+ with self.__connection.cursor() as cursor:
+ cursor.execute("SELECT alt, url FROM images WHERE imageName = %s;", (imageName, ))
+ return cursor.fetchone()
+
+ def get_pfp_images(self):
+ with self.__connection.cursor() as cursor:
+ cursor.execute("SELECT alt, url FROM images WHERE pfp_img = 1;")
+ return cursor.fetchall()
+
+ def get_sidebar_images(self):
+ with self.__connection.cursor() as cursor:
+ cursor.execute("SELECT alt, url FROM images WHERE sidebar_image = 1;")
+ return cursor.fetchall()
+
+ def get_header_articles(self):
+ with self.__connection.cursor() as cursor:
+ cursor.execute("SELECT articleName, link FROM headerArticles WHERE display = true;")
+ return cursor.fetchall()
+
+ def get_all_categories(self):
+ with self.__connection.cursor() as cursor:
+ cursor.execute("SELECT category_name FROM categories;")
+ return [i[0] for i in cursor.fetchall()]
+
+ def add_category(self, category):
+ if not category in self.get_all_categories():
+ with self.__connection.cursor() as cursor:
+ cursor.execute("INSERT INTO categories (category_name) VALUES (%s);", (category, ))
+
+ self.__connection.commit()
+ return True
+
+ return False
+
+ def add_thought(self, category, title, markdown):
+ with self.__connection.cursor() as cursor:
+ cursor.execute("""
+ INSERT INTO thoughts (category_id, title, markdown_text)
+ VALUES ((
+ SELECT category_id FROM categories WHERE category_name = %s
+ ), %s, %s);""", (category, title, markdown))
+ self.__connection.commit()
+
+ def get_thought(self, id_):
+ with self.__connection.cursor() as cursor:
+ cursor.execute("""
+ SELECT categories.category_name, thoughts.title, thoughts.dt, thoughts.markdown_text, thoughts.redirect
+ FROM thoughts INNER JOIN categories
+ ON thoughts.category_id = categories.category_id
+ WHERE thought_id = %s;""", (id_, ))
+ return cursor.fetchone()
+
+ def get_similar_thoughts(self, category, id_):
+ with self.__connection.cursor() as cursor:
+ cursor.execute("""
+ SELECT thought_id, title, dt, category_name FROM thoughts
+ INNER JOIN categories ON thoughts.category_id = categories.category_id
+ WHERE category_name = %s AND thought_id != %s;""",
+ (category, id_))
+ return cursor.fetchall()
+
+ def get_featured_thoughts(self):
+ with self.__connection.cursor() as cursor:
+ cursor.execute("SELECT thought_id, title FROM thoughts WHERE featured = 1;")
+ return cursor.fetchall()
+
+ def update_thought_markdown(self, id_, markdown):
+ with self.__connection.cursor() as cursor:
+ cursor.execute("UPDATE thoughts SET markdown_text = %s WHERE thought_id = %s;", (markdown, id_))
+ self.__connection.commit()
+
+ def get_categories_not(self, category_name):
+ with self.__connection.cursor() as cursor:
+ cursor.execute("SELECT category_name FROM categories WHERE category_name != %s;", (category_name, ))
+ return [i[0] for i in cursor.fetchall()]
+
+ def get_all_thoughts(self):
+ with self.__connection.cursor() as cursor:
+ cursor.execute("""
+ SELECT thought_id, title, dt, category_name FROM thoughts
+ INNER JOIN categories ON thoughts.category_id = categories.category_id;
+ """)
+ return cursor.fetchall()
+
+ def get_cached_tweets(self, numToGet = None):
+ with self.__connection.cursor() as cursor:
+ sql = "SELECT tweet, tweet_id, account FROM diary WHERE account = %s ORDER BY tweeted_at DESC"
+ args = (self.config.get("twitter", "main_account"), )
+ if numToGet is not None:
+ sql += " LIMIT %s;"
+ args = (self.config.get("twitter", "main_account"), numToGet)
+ else:
+ sql += ";"
+ cursor.execute(sql, args)
+
+ return [(i[0], "https://%s/%s/status/%d" % (self.config.get("nitter", "outsideurl"), i[2], i[1])) for i in cursor.fetchall()]
+
+ def get_cached_commits(self, since = None, recurse = True):
+ with self.__connection.cursor() as cursor:
+ if since is not None:
+ cursor.execute("SELECT DISTINCT message, url, commitTime, additions, deletions, total FROM commitCache WHERE commitTime > %s ORDER BY commitTime DESC;", (since, ))
+ else:
+ cursor.execute("SELECT DISTINCT message, url, commitTime, additions, deletions, total FROM commitCache ORDER BY commitTime DESC;")
+ # i think i might have spent too long doing functional programming
+ return [{
+ "repo": urlparse(i[1]).path.split("/")[2],
+ "github_repo_url": "https://github.com" + "/".join(urlparse(i[1]).path.split("/")[:3]),
+ "git_repo_url": "https://%s/%s.git/about" % (self.config.get("github", "personal_domain"), urlparse(i[1]).path.split("/")[2]),
+ "message": i[0],
+ "github_commit_url": i[1],
+ "git_commit_url": "https://%s/%s.git/commit/?id=%s" % (
+ self.config.get("github", "personal_domain"),
+ urlparse(i[1]).path.split("/")[2],
+ urlparse(i[1]).path.split("/")[-1]
+ ),
+ "datetime": i[2].timestamp(),
+ "stats": {
+ "additions": i[3],
+ "deletions": i[4],
+ "total": i[5]
+ }
+ } for i in cursor.fetchall()]
+
+ def update_commit_cache(self, requested):
+ with self.__connection.cursor() as cursor:
+ for commit in requested:
+ cursor.execute("SELECT DISTINCT url FROM commitCache;")
+ urls = [i[0] for i in cursor.fetchall()]
+
+ if commit["url"] not in urls:
+ cursor.execute("""
+ INSERT INTO commitCache (message, url, commitTime, additions, deletions, total)
+ VALUES (%s, %s, %s, %s, %s, %s)""",
+ (commit["message"], commit["url"], commit["datetime"], commit["stats"]["additions"], commit["stats"]["deletions"], commit["stats"]["total"])
+ )
+ self.__connection.commit()
+
+ def get_last_commit_time(self):
+ with self.__connection.cursor() as cursor:
+ cursor.execute("SELECT MAX(commitTime) FROM commitCache;")
+ return cursor.fetchone()[0]
+
+ def get_my_twitter(self):
+ with self.__connection.cursor() as cursor:
+ cursor.execute("SELECT link FROM headerLinks WHERE name = 'twitter';")
+ return cursor.fetchone()[0]
+
+ def get_my_diary_twitter(self):
+ return self.config.get("twitter", "diary_account")
+
+ def get_iso_cd_options(self):
+ iso_dir = self.config.get("cds", "location")
+ return [
+ i
+ for i in os.listdir(iso_dir)
+ if os.path.splitext(i)[-1].lower() in [".iso"]
+ and os.path.getsize(os.path.join(iso_dir, i)) < self.config.getint("cds", "maxsize")
+ ]
+
+ def append_cd_orders(self, iso, email, house, street, city, county, postcode, name):
+ with self.__connection.cursor() as cursor:
+ cursor.execute("""
+ INSERT INTO cd_orders_2 (iso, email, house, street, city, county, postcode, name)
+ VALUES (%s, %s, %s, %s, %s, %s, %s, %s);
+ """, (iso, email, house, street, city, county, postcode, name))
+ id_ = cursor.lastrowid
+ self.__connection.commit()
+ return id_
+
+ def append_qnas(self, qnas):
+ with self.__connection.cursor() as cursor:
+ for qna in qnas:
+ cursor.execute("SELECT curiouscat_id FROM qnas WHERE curiouscat_id = %s;", (qna["id"], ))
+ if cursor.fetchone() is None:
+
+ cursor.execute("INSERT INTO `qnas` VALUES (%s, %s, %s, %s, %s, %s);", (
+ qna["id"], qna["link"], qna["datetime"], qna["question"], qna["answer"], qna["host"]
+ ))
+ print("Appended question with timestamp %s" % qna["datetime"].isoformat())
+
+ else:
+ print("Skipped question with timestamp %s" % qna["datetime"].isoformat())
+ self.__connection.commit()
+
+ def get_oldest_qna(self):
+ with self.__connection.cursor() as cursor:
+ cursor.execute("SELECT MAX(timestamp) FROM qnas;")
+ return cursor.fetchone()[0]
+
+ def get_qnas(self):
+ with self.__connection.cursor() as cursor:
+ cursor.execute("SELECT * FROM qnas;")
+ return sorted(cursor.fetchall(), key = operator.itemgetter(2), reverse = True)
+
+
+
+
+
diff --git a/edaweb/services.py b/edaweb/services.py
index 11f21fc..50eed45 100644
--- a/edaweb/services.py
+++ b/edaweb/services.py
@@ -1,409 +1,409 @@
-from dataclasses import dataclass
-from io import StringIO
-from lxml import html, etree
-from github import Github
-import multiprocessing
-import paramiko.client
-from APiHole import PiHole
-import transmission_rpc
-import configparser
-import math as maths
-import requests
-import datetime
-import urllib
-import docker
-import random
-import subprocess
-import fabric
-import pickle
-import queue
-import json
-import time
-import os
-
-theLastId = 0
-config_path = os.path.join(os.path.dirname(__file__), "..", "edaweb.conf")
-if not os.path.exists(config_path):
- raise FileNotFoundError("Could not find edaweb.conf config file")
-CONFIG = configparser.ConfigParser(interpolation = None)
-CONFIG.read(config_path)
-
-def humanbytes(B):
- 'Return the given bytes as a human friendly KB, MB, GB, or TB string'
- B = float(B)
- KB = float(1024)
- MB = float(KB ** 2) # 1,048,576
- GB = float(KB ** 3) # 1,073,741,824
- TB = float(KB ** 4) # 1,099,511,627,776
-
- if B < KB:
- return '{0} {1}'.format(B,'Bytes' if 0 == B > 1 else 'Byte')
- elif KB <= B < MB:
- return '{0:.2f} KB'.format(B/KB)
- elif MB <= B < GB:
- return '{0:.2f} MB'.format(B/MB)
- elif GB <= B < TB:
- return '{0:.2f} GB'.format(B/GB)
- elif TB <= B:
- return '{0:.2f} TB'.format(B/TB)
-
-@dataclass
-class SafebooruImage:
- id_: int
- url: str
- searchTags: list
- tags: list
- source: str
- imurl: str
-
- def remove_tag(self, tag):
- return list(set(self.searchTags).difference(set([tag])))
-
-@dataclass
-class DownloadedImage:
- imurl: str
-
- def __enter__(self):
- self.filename = os.path.join("static", "images", "random.jpg")
-
- req = urllib.request.Request(self.imurl, headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_5_8) AppleWebKit/534.50.2 (KHTML, like Gecko) Version/5.0.6 Safari/533.22.3'})
- mediaContent = urllib.request.urlopen(req).read()
- with open(self.filename, "wb") as f:
- f.write(mediaContent)
- return self.filename
-
- def __exit__(self, type, value, traceback):
- os.remove(self.filename)
-
-def get_num_pages(tags):
- pages_url = "https://safebooru.org/index.php?page=post&s=list&tags=%s" % "+".join(tags)
- tree = html.fromstring(requests.get(pages_url).content)
- try:
- finalpage_element = tree.xpath("/html/body/div[6]/div/div[2]/div[2]/div/a[12]")[0]
- except IndexError:
- return 1
- else:
- return int(int(urllib.parse.parse_qs(finalpage_element.get("href"))["pid"][0]) / (5*8))
-
-def get_id_from_url(url):
- return int(urllib.parse.parse_qs(url)["id"][0])
-
-def get_random_image(tags):
- global theLastId
- searchPage = random.randint(1, get_num_pages(tags)) * 5 * 8
- url = "https://safebooru.org/index.php?page=post&s=list&tags=%s&pid=%i" % ("+".join(tags), searchPage)
- tree = html.fromstring(requests.get(url).content)
-
- imageElements = [e for e in tree.xpath("/html/body/div[6]/div/div[2]/div[1]")[0].iter(tag = "a")]
- try:
- element = random.choice(imageElements)
- except IndexError:
- # raise ConnectionError("Couldn't find any images")
- return get_random_image(tags)
-
- url = "https://safebooru.org/" + element.get("href")
- if get_id_from_url(url) == theLastId:
- return get_random_image(tags)
- theLastId = get_id_from_url(url)
-
- try:
- sbi = SafebooruImage(
- id_ = get_id_from_url(url),
- url = url,
- tags = element.find("img").get("alt").split(),
- searchTags = tags,
- source = fix_source_url(get_source(url)),
- imurl = get_imurl(url)
- )
- except (ConnectionError, KeyError) as e:
- print("[ERROR]", e)
- return get_random_image(tags)
-
- if link_deleted(sbi.url):
- print("Retried since the source was deleted...")
- return get_random_image(tags)
-
- return sbi
-
-def get_source(url):
- tree = html.fromstring(requests.get(url).content)
- for element in tree.xpath('//*[@id="stats"]')[0].iter("li"):
- if element.text.startswith("Source: h"):
- return element.text[8:]
- elif element.text.startswith("Source:"):
- for child in element.iter():
- if child.get("href") is not None:
- return child.get("href")
- raise ConnectionError("Couldn't find source image for id %i" % get_id_from_url(url))
-
-def fix_source_url(url):
- parsed = urllib.parse.urlparse(url)
- if parsed.netloc == "www.pixiv.net":
- return "https://www.pixiv.net/en/artworks/" + urllib.parse.parse_qs(parsed.query)["illust_id"][0]
- elif parsed.netloc in ["bishie.booru.org", "www.secchan.net"]:
- return ConnectionError("Couldn't get source")
- elif "pximg.net" in parsed.netloc or "pixiv.net" in parsed.netloc:
- return "https://www.pixiv.net/en/artworks/" + parsed.path.split("/")[-1][:8]
- elif parsed.netloc == "twitter.com":
- return url.replace("twitter.com", "nitter.eda.gay")
- return url
-
-def get_imurl(url):
- tree = html.fromstring(requests.get(url).content)
- return tree.xpath('//*[@id="image"]')[0].get("src")
-
-def link_deleted(url):
- text = requests.get(url).text
- return text[text.find("<title>") + 7 : text.find("</title>")] in ["Error | nitter", "イラストコミュニケーションサービス[pixiv]"]
-
-def request_recent_commits(since = datetime.datetime.now() - datetime.timedelta(days=7)):
- g = Github(CONFIG.get("github", "access_code"))
- out = []
- for repo in g.get_user().get_repos():
- # print(repo.name, list(repo.get_branches()))
- try:
- for commit in repo.get_commits(since = since):
- out.append({
- "repo": repo.name,
- "message": commit.commit.message,
- "url": commit.html_url,
- "datetime": commit.commit.author.date,
- "stats": {
- "additions": commit.stats.additions,
- "deletions": commit.stats.deletions,
- "total": commit.stats.total
- }
- })
- except Exception as e:
- print(repo, e)
-
- return sorted(out, key = lambda a: a["datetime"], reverse = True)
-
-def scrape_nitter(username, get_until:int):
- new_tweets = []
- nitter_url = CONFIG.get("nitter", "internalurl")
- nitter_port = CONFIG.getint("nitter", "internalport")
- scrape_new_pages = True
- url = "http://%s:%d/%s" % (nitter_url, nitter_port, username)
-
- while scrape_new_pages:
- tree = html.fromstring(requests.get(url).content)
- for i, tweetUrlElement in enumerate(tree.xpath('//*[@class="tweet-link"]'), 0):
- if i > 0 and tweetUrlElement.get("href").split("/")[1] == username:
- id_ = int(urllib.parse.urlparse(tweetUrlElement.get("href")).path.split("/")[-1])
- tweet_link = "http://%s:%d%s" % (nitter_url, nitter_port, tweetUrlElement.get("href"))
-
- if id_ == get_until:
- scrape_new_pages = False
- break
-
- try:
- dt, replying_to, text, images = parse_tweet(tweet_link)
- new_tweets.append((id_, dt, replying_to, text, username, images))
- print(dt, "'%s'" % text)
- except IndexError:
- print("Couldn't get any more tweets")
- scrape_new_pages = False
- break
- except ConnectionError:
- print("Rate limited, try again later")
- return []
-
-
- try:
- cursor = tree.xpath('//*[@class="show-more"]/a')[0].get("href")
- except IndexError:
- # no more elements
- break
- url = "http://%s:%d/%s%s" % (nitter_url, nitter_port, username, cursor)
-
- return new_tweets
-
-def parse_tweet(tweet_url):
- # print(tweet_url)
- tree = html.fromstring(requests.get(tweet_url).content)
- # with open("2images.html", "r") as f:
- # tree = html.fromstring(f.read())
-
- rate_limited_elem = tree.xpath("/html/body/div/div/div/span")
- if rate_limited_elem != []:
- if rate_limited_elem[0].text == "Instance has been rate limited.":
- raise ConnectionError("Instance has been rate limited.")
-
- main_tweet_elem = tree.xpath('//*[@class="main-tweet"]')[0]
-
- dt_str = main_tweet_elem.xpath('//*[@class="tweet-published"]')[0].text
- dt = datetime.datetime.strptime(dt_str.replace("Â", ""), "%b %d, %Y · %I:%M %p UTC")
- text = tree.xpath('//*[@class="main-tweet"]/div/div/div[2]')[0].text_content()
- if text == "":
- text = "[Image only]"
- replying_to_elems = tree.xpath('//*[@class="before-tweet thread-line"]/div/a')
- if replying_to_elems != []:
- replying_to = int(urllib.parse.urlparse(replying_to_elems[-1].get("href")).path.split("/")[-1])
- else:
- replying_to = None
-
- images = []
- images_elems = tree.xpath('//*[@class="main-tweet"]/div/div/div[3]/div/div/a/img')
- for image_elem in images_elems:
- images.append("https://" + CONFIG.get("nitter", "outsideurl") + urllib.parse.urlparse(image_elem.get("src")).path)
-
- return dt, replying_to, text, images
-
-def scrape_whispa(whispa_url, since = None):
- def query_answer(answer_url, max_retries = 10):
- for i in range(max_retries):
- try:
- return requests.get(answer_url)
- except requests.exceptions.ConnectionError:
- s = 5.05 * (i + 1)
- print("Connection timed out, retrying in %.2fs" % s)
- time.sleep(s)
- continue
-
- # add a bit of wiggle room in case i don't answer the questions in order (i often do this)
- if since is None:
- stop_at = datetime.datetime(year = 2001, month = 8, day = 12)
- else:
- stop_at = since - datetime.timedelta(days = 14)
- print("The newest Q&A timestamp in the database was %s, we will stop looking at %s." % (since.astimezone().isoformat(), stop_at.astimezone().isoformat()))
-
- html_ = requests.get(whispa_url).content.decode()
- # with open("temp.html", "w") as f:
- # f.write(html_)
-
- tree = html.fromstring(html_)
- qnas = []
- # we're not doing proper HTML scraping here really... since the site uses client side rendering
- # we rather parse the JS scripts to get the JSON payload of useful information... sadly this looks horrible
- for i, script in enumerate(tree.xpath("/html/body/script"), 0):
- js = str(script.text)
- if "receivedFeedback" in js:
- # my god this is horrible...
- parsed_json = json.loads(json.loads(js[19:-1])[1][2:])[0][3]["loadedUser"]["receivedFeedback"]
- # print(json.dumps(parsed_json, indent = 4))
- # with open("whispas_%i.json" % i, "w") as f:
- # json.dump(parsed_json, f, indent = 4)
- for j in parsed_json:
- if j["_count"]["childFeedback"] < 0:
- continue
-
- answer_url = "https://apiv4.whispa.sh/feedbacks/%s/children/public" % j["id"]
- req = query_answer(answer_url)
- try:
- firstanswer = req.json()["data"][0]
- except IndexError:
- continue
- dt = datetime.datetime.fromisoformat(firstanswer["createdAt"][:-1])
-
- qna = {
- # "id": int(j["id"], base = 16),
- "id": int(dt.timestamp()),
- "link": answer_url,
- "datetime": dt,
- "question": j["content"],
- "answer": firstanswer["content"],
- "host": "whispa.sh"
- }
- print(qna)
- qnas.append(qna)
- time.sleep(2.03)
- if dt <= stop_at:
- print("Met the threshold for oldest Q&A, so stopped looking.")
- break
- return qnas
-
-def get_docker_containers(host, ssh_key_path):
- result = fabric.Connection(
- host = host,
- user = "root",
- connect_kwargs = {
- "key_filename": ssh_key_path,
- "look_for_keys": False
- }
- ).run('docker ps -a -s --format "table {{.Names}};{{.Status}};{{.Image}}"', hide = True)
- return [line.split(";") for line in result.stdout.split("\n")[1:-1]]
-
-def cache_all_docker_containers(ssh_key_path):
- containers = {}
- containers["containers"] = {}
- for host, name in CONFIG["docker_hosts"].items():
- print(host)
- containers["containers"][(host, name)] = get_docker_containers(host, ssh_key_path)
-
- containers["cachetime"] = "Docker information last updated at %s" % str(datetime.datetime.now())
- with open("/tmp/docker-cache.json", "wb") as f:
- pickle.dump(containers, f)
-
-def get_all_docker_containers():
- if not os.path.exists("/tmp/docker-cache.json"):
- return {"containers": {}, "cachetime": "No cached docker information"}
-
- with open("/tmp/docker-cache.json", "rb") as f:
- return pickle.load(f)
-
-def timeout(func):
- # cant get this to work with queue.Queue() for some reason?
- # this works but Manager() uses an extra thread than Queue()
- manager = multiprocessing.Manager()
- returnVan = manager.list()
- # ti = time.time()
-
- def runFunc(q, func):
- q.append(func())
-
- def beginTimeout():
- t = multiprocessing.Process(target = runFunc, args = (returnVan, func))
- t.start()
-
- t.join(timeout = CONFIG["servicetimeout"].getint("seconds"))
-
- # print("Request took:", time.time() - ti)
- try:
- return returnVan[0]
- except IndexError:
- if t.is_alive():
- t.terminate()
-
- return beginTimeout
-
-@timeout
-def get_torrent_stats():
- client = transmission_rpc.client.Client(
- host = CONFIG.get("transmission", "host")
- )
- s = vars(client.session_stats())["fields"]
- return {
- "Active torrents:": s["activeTorrentCount"],
- "Downloaded:": humanbytes(s["cumulative-stats"]["downloadedBytes"]),
- "Uploaded:": humanbytes(s["cumulative-stats"]["uploadedBytes"]),
- "Active time:": str(datetime.timedelta(seconds = s["cumulative-stats"]["secondsActive"])),
- "Files added:": s["cumulative-stats"]["filesAdded"],
- "Current upload speed:": humanbytes(s["uploadSpeed"]) + "s/S",
- "Current download speed:": humanbytes(s["downloadSpeed"]) + "s/S"
- }
-
-@timeout
-def get_pihole_stats():
- return PiHole.GetSummary(CONFIG.get("pihole", "url"), CONFIG.get("pihole", "key"), True)
-
-def get_recent_commits(db, max_per_repo = 3):
- cache = db.get_cached_commits()
- num_per_repo = {}
- out = []
- for commit in cache:
- if commit["repo"] not in num_per_repo.keys():
- num_per_repo[commit["repo"]] = 0
-
- num_per_repo[commit["repo"]] += 1
- if num_per_repo[commit["repo"]] <= max_per_repo:
- out.append(commit)
-
- return sorted(out, key = lambda a: a["datetime"], reverse = True)
-
-if __name__ == "__main__":
- print(scrape_whispa(CONFIG.get("qnas", "url")))
- # import database
-
- # with database.Database() as db:
- # print(json.dumps(get_recent_commits(db), indent=4))
+from dataclasses import dataclass
+from io import StringIO
+from lxml import html, etree
+from github import Github
+import multiprocessing
+import paramiko.client
+from APiHole import PiHole
+import transmission_rpc
+import configparser
+import math as maths
+import requests
+import datetime
+import urllib
+import docker
+import random
+import subprocess
+import fabric
+import pickle
+import queue
+import json
+import time
+import os
+
+theLastId = 0
+config_path = os.path.join(os.path.dirname(__file__), "..", "edaweb.conf")
+if not os.path.exists(config_path):
+ raise FileNotFoundError("Could not find edaweb.conf config file")
+CONFIG = configparser.ConfigParser(interpolation = None)
+CONFIG.read(config_path)
+
+def humanbytes(B):
+ 'Return the given bytes as a human friendly KB, MB, GB, or TB string'
+ B = float(B)
+ KB = float(1024)
+ MB = float(KB ** 2) # 1,048,576
+ GB = float(KB ** 3) # 1,073,741,824
+ TB = float(KB ** 4) # 1,099,511,627,776
+
+ if B < KB:
+ return '{0} {1}'.format(B,'Bytes' if 0 == B > 1 else 'Byte')
+ elif KB <= B < MB:
+ return '{0:.2f} KB'.format(B/KB)
+ elif MB <= B < GB:
+ return '{0:.2f} MB'.format(B/MB)
+ elif GB <= B < TB:
+ return '{0:.2f} GB'.format(B/GB)
+ elif TB <= B:
+ return '{0:.2f} TB'.format(B/TB)
+
+@dataclass
+class SafebooruImage:
+ id_: int
+ url: str
+ searchTags: list
+ tags: list
+ source: str
+ imurl: str
+
+ def remove_tag(self, tag):
+ return list(set(self.searchTags).difference(set([tag])))
+
+@dataclass
+class DownloadedImage:
+ imurl: str
+
+ def __enter__(self):
+ self.filename = os.path.join("static", "images", "random.jpg")
+
+ req = urllib.request.Request(self.imurl, headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_5_8) AppleWebKit/534.50.2 (KHTML, like Gecko) Version/5.0.6 Safari/533.22.3'})
+ mediaContent = urllib.request.urlopen(req).read()
+ with open(self.filename, "wb") as f:
+ f.write(mediaContent)
+ return self.filename
+
+ def __exit__(self, type, value, traceback):
+ os.remove(self.filename)
+
+def get_num_pages(tags):
+ pages_url = "https://safebooru.org/index.php?page=post&s=list&tags=%s" % "+".join(tags)
+ tree = html.fromstring(requests.get(pages_url).content)
+ try:
+ finalpage_element = tree.xpath("/html/body/div[6]/div/div[2]/div[2]/div/a[12]")[0]
+ except IndexError:
+ return 1
+ else:
+ return int(int(urllib.parse.parse_qs(finalpage_element.get("href"))["pid"][0]) / (5*8))
+
+def get_id_from_url(url):
+ return int(urllib.parse.parse_qs(url)["id"][0])
+
+def get_random_image(tags):
+ global theLastId
+ searchPage = random.randint(1, get_num_pages(tags)) * 5 * 8
+ url = "https://safebooru.org/index.php?page=post&s=list&tags=%s&pid=%i" % ("+".join(tags), searchPage)
+ tree = html.fromstring(requests.get(url).content)
+
+ imageElements = [e for e in tree.xpath("/html/body/div[6]/div/div[2]/div[1]")[0].iter(tag = "a")]
+ try:
+ element = random.choice(imageElements)
+ except IndexError:
+ # raise ConnectionError("Couldn't find any images")
+ return get_random_image(tags)
+
+ url = "https://safebooru.org/" + element.get("href")
+ if get_id_from_url(url) == theLastId:
+ return get_random_image(tags)
+ theLastId = get_id_from_url(url)
+
+ try:
+ sbi = SafebooruImage(
+ id_ = get_id_from_url(url),
+ url = url,
+ tags = element.find("img").get("alt").split(),
+ searchTags = tags,
+ source = fix_source_url(get_source(url)),
+ imurl = get_imurl(url)
+ )
+ except (ConnectionError, KeyError) as e:
+ print("[ERROR]", e)
+ return get_random_image(tags)
+
+ if link_deleted(sbi.url):
+ print("Retried since the source was deleted...")
+ return get_random_image(tags)
+
+ return sbi
+
+def get_source(url):
+ tree = html.fromstring(requests.get(url).content)
+ for element in tree.xpath('//*[@id="stats"]')[0].iter("li"):
+ if element.text.startswith("Source: h"):
+ return element.text[8:]
+ elif element.text.startswith("Source:"):
+ for child in element.iter():
+ if child.get("href") is not None:
+ return child.get("href")
+ raise ConnectionError("Couldn't find source image for id %i" % get_id_from_url(url))
+
+def fix_source_url(url):
+ parsed = urllib.parse.urlparse(url)
+ if parsed.netloc == "www.pixiv.net":
+ return "https://www.pixiv.net/en/artworks/" + urllib.parse.parse_qs(parsed.query)["illust_id"][0]
+ elif parsed.netloc in ["bishie.booru.org", "www.secchan.net"]:
+ return ConnectionError("Couldn't get source")
+ elif "pximg.net" in parsed.netloc or "pixiv.net" in parsed.netloc:
+ return "https://www.pixiv.net/en/artworks/" + parsed.path.split("/")[-1][:8]
+ elif parsed.netloc == "twitter.com":
+ return url.replace("twitter.com", "nitter.eda.gay")
+ return url
+
+def get_imurl(url):
+ tree = html.fromstring(requests.get(url).content)
+ return tree.xpath('//*[@id="image"]')[0].get("src")
+
+def link_deleted(url):
+ text = requests.get(url).text
+ return text[text.find("<title>") + 7 : text.find("</title>")] in ["Error | nitter", "イラストコミュニケーションサービス[pixiv]"]
+
+def request_recent_commits(since = datetime.datetime.now() - datetime.timedelta(days=7)):
+ g = Github(CONFIG.get("github", "access_code"))
+ out = []
+ for repo in g.get_user().get_repos():
+ # print(repo.name, list(repo.get_branches()))
+ try:
+ for commit in repo.get_commits(since = since):
+ out.append({
+ "repo": repo.name,
+ "message": commit.commit.message,
+ "url": commit.html_url,
+ "datetime": commit.commit.author.date,
+ "stats": {
+ "additions": commit.stats.additions,
+ "deletions": commit.stats.deletions,
+ "total": commit.stats.total
+ }
+ })
+ except Exception as e:
+ print(repo, e)
+
+ return sorted(out, key = lambda a: a["datetime"], reverse = True)
+
+def scrape_nitter(username, get_until:int):
+ new_tweets = []
+ nitter_url = CONFIG.get("nitter", "internalurl")
+ nitter_port = CONFIG.getint("nitter", "internalport")
+ scrape_new_pages = True
+ url = "http://%s:%d/%s" % (nitter_url, nitter_port, username)
+
+ while scrape_new_pages:
+ tree = html.fromstring(requests.get(url).content)
+ for i, tweetUrlElement in enumerate(tree.xpath('//*[@class="tweet-link"]'), 0):
+ if i > 0 and tweetUrlElement.get("href").split("/")[1] == username:
+ id_ = int(urllib.parse.urlparse(tweetUrlElement.get("href")).path.split("/")[-1])
+ tweet_link = "http://%s:%d%s" % (nitter_url, nitter_port, tweetUrlElement.get("href"))
+
+ if id_ == get_until:
+ scrape_new_pages = False
+ break
+
+ try:
+ dt, replying_to, text, images = parse_tweet(tweet_link)
+ new_tweets.append((id_, dt, replying_to, text, username, images))
+ print(dt, "'%s'" % text)
+ except IndexError:
+ print("Couldn't get any more tweets")
+ scrape_new_pages = False
+ break
+ except ConnectionError:
+ print("Rate limited, try again later")
+ return []
+
+
+ try:
+ cursor = tree.xpath('//*[@class="show-more"]/a')[0].get("href")
+ except IndexError:
+ # no more elements
+ break
+ url = "http://%s:%d/%s%s" % (nitter_url, nitter_port, username, cursor)
+
+ return new_tweets
+
+def parse_tweet(tweet_url):
+ # print(tweet_url)
+ tree = html.fromstring(requests.get(tweet_url).content)
+ # with open("2images.html", "r") as f:
+ # tree = html.fromstring(f.read())
+
+ rate_limited_elem = tree.xpath("/html/body/div/div/div/span")
+ if rate_limited_elem != []:
+ if rate_limited_elem[0].text == "Instance has been rate limited.":
+ raise ConnectionError("Instance has been rate limited.")
+
+ main_tweet_elem = tree.xpath('//*[@class="main-tweet"]')[0]
+
+ dt_str = main_tweet_elem.xpath('//*[@class="tweet-published"]')[0].text
+ dt = datetime.datetime.strptime(dt_str.replace("Â", ""), "%b %d, %Y · %I:%M %p UTC")
+ text = tree.xpath('//*[@class="main-tweet"]/div/div/div[2]')[0].text_content()
+ if text == "":
+ text = "[Image only]"
+ replying_to_elems = tree.xpath('//*[@class="before-tweet thread-line"]/div/a')
+ if replying_to_elems != []:
+ replying_to = int(urllib.parse.urlparse(replying_to_elems[-1].get("href")).path.split("/")[-1])
+ else:
+ replying_to = None
+
+ images = []
+ images_elems = tree.xpath('//*[@class="main-tweet"]/div/div/div[3]/div/div/a/img')
+ for image_elem in images_elems:
+ images.append("https://" + CONFIG.get("nitter", "outsideurl") + urllib.parse.urlparse(image_elem.get("src")).path)
+
+ return dt, replying_to, text, images
+
+def scrape_whispa(whispa_url, since = None):
+ def query_answer(answer_url, max_retries = 10):
+ for i in range(max_retries):
+ try:
+ return requests.get(answer_url)
+ except requests.exceptions.ConnectionError:
+ s = 5.05 * (i + 1)
+ print("Connection timed out, retrying in %.2fs" % s)
+ time.sleep(s)
+ continue
+
+ # add a bit of wiggle room in case i don't answer the questions in order (i often do this)
+ if since is None:
+ stop_at = datetime.datetime(year = 2001, month = 8, day = 12)
+ else:
+ stop_at = since - datetime.timedelta(days = 14)
+ print("The newest Q&A timestamp in the database was %s, we will stop looking at %s." % (since.astimezone().isoformat(), stop_at.astimezone().isoformat()))
+
+ html_ = requests.get(whispa_url).content.decode()
+ # with open("temp.html", "w") as f:
+ # f.write(html_)
+
+ tree = html.fromstring(html_)
+ qnas = []
+ # we're not doing proper HTML scraping here really... since the site uses client side rendering
+ # we rather parse the JS scripts to get the JSON payload of useful information... sadly this looks horrible
+ for i, script in enumerate(tree.xpath("/html/body/script"), 0):
+ js = str(script.text)
+ if "receivedFeedback" in js:
+ # my god this is horrible...
+ parsed_json = json.loads(json.loads(js[19:-1])[1][2:])[0][3]["loadedUser"]["receivedFeedback"]
+ # print(json.dumps(parsed_json, indent = 4))
+ # with open("whispas_%i.json" % i, "w") as f:
+ # json.dump(parsed_json, f, indent = 4)
+ for j in parsed_json:
+ if j["_count"]["childFeedback"] < 0:
+ continue
+
+ answer_url = "https://apiv4.whispa.sh/feedbacks/%s/children/public" % j["id"]
+ req = query_answer(answer_url)
+ try:
+ firstanswer = req.json()["data"][0]
+ except IndexError:
+ continue
+ dt = datetime.datetime.fromisoformat(firstanswer["createdAt"][:-1])
+
+ qna = {
+ # "id": int(j["id"], base = 16),
+ "id": int(dt.timestamp()),
+ "link": answer_url,
+ "datetime": dt,
+ "question": j["content"],
+ "answer": firstanswer["content"],
+ "host": "whispa.sh"
+ }
+ print(qna)
+ qnas.append(qna)
+ time.sleep(2.03)
+ if dt <= stop_at:
+ print("Met the threshold for oldest Q&A, so stopped looking.")
+ break
+ return qnas
+
+def get_docker_containers(host, ssh_key_path):
+ result = fabric.Connection(
+ host = host,
+ user = "root",
+ connect_kwargs = {
+ "key_filename": ssh_key_path,
+ "look_for_keys": False
+ }
+ ).run('docker ps -a -s --format "table {{.Names}};{{.Status}};{{.Image}}"', hide = True)
+ return [line.split(";") for line in result.stdout.split("\n")[1:-1]]
+
+def cache_all_docker_containers(ssh_key_path):
+ containers = {}
+ containers["containers"] = {}
+ for host, name in CONFIG["docker_hosts"].items():
+ print(host)
+ containers["containers"][(host, name)] = get_docker_containers(host, ssh_key_path)
+
+ containers["cachetime"] = "Docker information last updated at %s" % str(datetime.datetime.now())
+ with open("/tmp/docker-cache.json", "wb") as f:
+ pickle.dump(containers, f)
+
+def get_all_docker_containers():
+ if not os.path.exists("/tmp/docker-cache.json"):
+ return {"containers": {}, "cachetime": "No cached docker information"}
+
+ with open("/tmp/docker-cache.json", "rb") as f:
+ return pickle.load(f)
+
+def timeout(func):
+ # cant get this to work with queue.Queue() for some reason?
+ # this works but Manager() uses an extra thread than Queue()
+ manager = multiprocessing.Manager()
+ returnVan = manager.list()
+ # ti = time.time()
+
+ def runFunc(q, func):
+ q.append(func())
+
+ def beginTimeout():
+ t = multiprocessing.Process(target = runFunc, args = (returnVan, func))
+ t.start()
+
+ t.join(timeout = CONFIG["servicetimeout"].getint("seconds"))
+
+ # print("Request took:", time.time() - ti)
+ try:
+ return returnVan[0]
+ except IndexError:
+ if t.is_alive():
+ t.terminate()
+
+ return beginTimeout
+
+@timeout
+def get_torrent_stats():
+ client = transmission_rpc.client.Client(
+ host = CONFIG.get("transmission", "host")
+ )
+ s = vars(client.session_stats())["fields"]
+ return {
+ "Active torrents:": s["activeTorrentCount"],
+ "Downloaded:": humanbytes(s["cumulative-stats"]["downloadedBytes"]),
+ "Uploaded:": humanbytes(s["cumulative-stats"]["uploadedBytes"]),
+ "Active time:": str(datetime.timedelta(seconds = s["cumulative-stats"]["secondsActive"])),
+ "Files added:": s["cumulative-stats"]["filesAdded"],
+ "Current upload speed:": humanbytes(s["uploadSpeed"]) + "s/S",
+ "Current download speed:": humanbytes(s["downloadSpeed"]) + "s/S"
+ }
+
+@timeout
+def get_pihole_stats():
+ return PiHole.GetSummary(CONFIG.get("pihole", "url"), CONFIG.get("pihole", "key"), True)
+
+def get_recent_commits(db, max_per_repo = 3):
+ cache = db.get_cached_commits()
+ num_per_repo = {}
+ out = []
+ for commit in cache:
+ if commit["repo"] not in num_per_repo.keys():
+ num_per_repo[commit["repo"]] = 0
+
+ num_per_repo[commit["repo"]] += 1
+ if num_per_repo[commit["repo"]] <= max_per_repo:
+ out.append(commit)
+
+ return sorted(out, key = lambda a: a["datetime"], reverse = True)
+
+if __name__ == "__main__":
+ print(scrape_whispa(CONFIG.get("qnas", "url")))
+ # import database
+
+ # with database.Database() as db:
+ # print(json.dumps(get_recent_commits(db), indent=4))
diff --git a/edaweb/static/images/PXL_20251111_125628695.jpg b/edaweb/static/images/PXL_20251111_125628695.jpg
new file mode 100644
index 0000000..557de37
--- /dev/null
+++ b/edaweb/static/images/PXL_20251111_125628695.jpg
Binary files differ
diff --git a/edaweb/static/images/PXL_20251115_180322252.jpg b/edaweb/static/images/PXL_20251115_180322252.jpg
new file mode 100644
index 0000000..9eb2905
--- /dev/null
+++ b/edaweb/static/images/PXL_20251115_180322252.jpg
Binary files differ
diff --git a/edaweb/static/images/PXL_20251115_180349152.jpg b/edaweb/static/images/PXL_20251115_180349152.jpg
new file mode 100644
index 0000000..452d7ca
--- /dev/null
+++ b/edaweb/static/images/PXL_20251115_180349152.jpg
Binary files differ
diff --git a/edaweb/static/images/PXL_20251130_222326376.jpg b/edaweb/static/images/PXL_20251130_222326376.jpg
new file mode 100644
index 0000000..1f11f65
--- /dev/null
+++ b/edaweb/static/images/PXL_20251130_222326376.jpg
Binary files differ
diff --git a/edaweb/static/images/PXL_20260210_231506089.jpg b/edaweb/static/images/PXL_20260210_231506089.jpg
new file mode 100644
index 0000000..9d7816f
--- /dev/null
+++ b/edaweb/static/images/PXL_20260210_231506089.jpg
Binary files differ