--- /dev/null
+from dataclasses import dataclass
+from pathlib import Path
+from typing import Optional
+from urllib.parse import urlparse, quote, unquote
+
+import os
+import requests
+import feedparser
+
+
+FEED_CACHE=Path.home() / ".cache" / "nom" / "feeds"
+FEED_LIST=Path.home() / ".local" / "share" / "nom" / "feedlist" / "default"
+
+
+class NomError(Exception):
+ pass
+
+def url2filename(url: str)->str:
+ p = urlparse(url)
+ stringified = ''.join([p.netloc, p.path])
+ return quote(stringified, safe='',).replace('%2F','-')
+
+def filename2url(url: str)->str:
+ p = urlparse(url.replace('-','%2f'))
+ return "https://" + unquote(''.join([p.netloc, p.path]))
+
+
+# TODO: This should probably use Pydantic.
+@dataclass
+class Entry:
+ title: str
+ url: str
+ updated: str # TODO: Make this datetime when I add filters
+ # summary: str # TODO: Add this when you feel like stripping HTML
+
+ # TODO: What if there's a pipe in one of the fields?
+ def write_line(self, delimiter: str ='|'):
+ return delimiter.join([self.title, self.url, self.updated])
+
+class Feed:
+
+ def __init__(self, url: str):
+ d = feedparser.parse(url)
+ self.d = d
+ self.name = d.feed.title
+ self.url = url # how is this different from d.feed.link?
+ self.entries : list[Entry] = [
+ Entry(
+ e.title,
+ e.link,
+ e.updated
+ ) for e in d.entries]
+
+ # TODO: Fix this with command line option
+ def to_stdout(self, file: Optional[Path]=None):
+ for entry in self.entries:
+ if entry:
+ print(entry.write_line())
+
+class FeedList:
+
+ def __init__(self, file: Path):
+ with open(file, 'r') as f:
+ urls = f.read().splitlines()
+
+ self.name = file.name
+ self.urls = urls
+
+ def fetch_feeds(self, save_dir: Path=FEED_CACHE):
+ if not os.path.exists(save_dir):
+ os.makedirs(save_dir)
+
+ for url in self.urls:
+ filename = url2filename(url)
+ path = save_dir / filename
+ with open(path, 'w') as f:
+ # TODO: URL Error Handling
+ r = requests.get(url)
+ f.write(r.text)
+ print(f"{path} updated")
+
+# TODO: Need to append feeds to one another (and save to entrylist)
+# TODO: Flesh out CLI.
+if __name__ == "__main__":
+
+ from argparse import ArgumentParser
+ parser = ArgumentParser(description="Nom Script")
+ subparsers = parser.add_subparsers(dest='command', help='Sub-command help')
+
+ # Entry subcommand
+ entry_parser = subparsers.add_parser('entry', help='Entry related commands')
+ entry_subparsers = entry_parser.add_subparsers(dest='entry_command', help='Entry sub-command help')
+ entry_show_parser = entry_subparsers.add_parser('show', help='Show entries')
+
+ # Feed subcommand
+ feed_parser = subparsers.add_parser('feed', help='Feed related commands')
+ feed_subparsers = feed_parser.add_subparsers(dest='feed_command', help='Feed sub-command help')
+ feed_update_parser = feed_subparsers.add_parser('update', help='Update feed')
+
+ # Parse Args
+ args = parser.parse_args()
+
+ # Direct Logic
+ feedlist=FeedList(FEED_LIST)
+ if args.command == "entry" and args.entry_command == "show":
+ for url in feedlist.urls:
+ feed=Feed(str(FEED_CACHE / url2filename(url)))
+ feed.to_stdout()
+ elif args.command == "feed" and args.feed_command == "update":
+ feedlist.fetch_feeds()
+ else:
+ raise NomError("That option is not yet supported.")
\ No newline at end of file