.venv
+.vscode
+.pytest_cache
**/__pycache__/**
+**/nom.egg-info
--- /dev/null
+MIT License
+
+Copyright (c) 2025 Kyle Bowman
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
--- /dev/null
+[build-system]
+requires = ["setuptools>=58"]
+build-backend = "setuptools.build_meta"
+
+[project]
+name = "nom"
+version = "0.0.1"
+description = "Uses the command line to manage your RSS/Atom feeds."
+authors = [{ name = "Kyle Bowman", email = "kylebowman14@gmail.com" }]
+readme = "README.md"
+requires-python = ">=3.11.9"
+classifiers = [
+ "Programming Language :: Python :: 3",
+ "License :: OSI Approved :: MIT License",
+ "Operating System :: OS Independent",
+]
+dependencies = [
+ "requests >= 2.32",
+ "feedparser >= 6.0"
+]
+
+[project.scripts]
+nom = "nom.main:main"
+
+[project.optional-dependencies]
+dev = [
+ "pytest",
+]
+
+[tool.setuptools.packages.find]
+where = ["src"]
\ No newline at end of file
+++ /dev/null
-from dataclasses import dataclass
-from pathlib import Path
-from typing import Optional
-from urllib.parse import urlparse, quote, unquote
-
-import os
-import requests
-import feedparser
-
-
-FEED_CACHE=Path.home() / ".cache" / "nom" / "feeds"
-FEED_LIST=Path.home() / ".local" / "share" / "nom" / "feedlist" / "default"
-
-
-class NomError(Exception):
- pass
-
-def url2filename(url: str)->str:
- p = urlparse(url)
- stringified = ''.join([p.netloc, p.path])
- return quote(stringified, safe='',).replace('%2F','-')
-
-def filename2url(url: str)->str:
- p = urlparse(url.replace('-','%2f'))
- return "https://" + unquote(''.join([p.netloc, p.path]))
-
-
-# TODO: This should probably use Pydantic.
-@dataclass
-class Entry:
- title: str
- url: str
- updated: str # TODO: Make this datetime when I add filters
- # summary: str # TODO: Add this when you feel like stripping HTML
-
- # TODO: What if there's a pipe in one of the fields?
- def write_line(self, delimiter: str ='|'):
- return delimiter.join([self.title, self.url, self.updated])
-
-class Feed:
-
- def __init__(self, url: str):
- d = feedparser.parse(url)
- self.d = d
- self.name = d.feed.title
- self.url = url # how is this different from d.feed.link?
- self.entries : list[Entry] = [
- Entry(
- e.title,
- e.link,
- e.updated
- ) for e in d.entries]
-
- # TODO: Fix this with command line option
- def to_stdout(self, file: Optional[Path]=None):
- for entry in self.entries:
- if entry:
- print(entry.write_line())
-
-class FeedList:
-
- def __init__(self, file: Path):
- with open(file, 'r') as f:
- urls = f.read().splitlines()
-
- self.name = file.name
- self.urls = urls
-
- def fetch_feeds(self, save_dir: Path=FEED_CACHE):
- if not os.path.exists(save_dir):
- os.makedirs(save_dir)
-
- for url in self.urls:
- filename = url2filename(url)
- path = save_dir / filename
- with open(path, 'w') as f:
- # TODO: URL Error Handling
- r = requests.get(url)
- f.write(r.text)
- print(f"{path} updated")
-
-# TODO: Need to append feeds to one another (and save to entrylist)
-# TODO: Flesh out CLI.
-if __name__ == "__main__":
-
- from argparse import ArgumentParser
- parser = ArgumentParser(description="Nom Script")
- subparsers = parser.add_subparsers(dest='command', help='Sub-command help')
-
- # Entry subcommand
- entry_parser = subparsers.add_parser('entry', help='Entry related commands')
- entry_subparsers = entry_parser.add_subparsers(dest='entry_command', help='Entry sub-command help')
- entry_show_parser = entry_subparsers.add_parser('show', help='Show entries')
-
- # Feed subcommand
- feed_parser = subparsers.add_parser('feed', help='Feed related commands')
- feed_subparsers = feed_parser.add_subparsers(dest='feed_command', help='Feed sub-command help')
- feed_update_parser = feed_subparsers.add_parser('update', help='Update feed')
-
- # Parse Args
- args = parser.parse_args()
-
- # Direct Logic
- feedlist=FeedList(FEED_LIST)
- if args.command == "entry" and args.entry_command == "show":
- for url in feedlist.urls:
- feed=Feed(str(FEED_CACHE / url2filename(url)))
- feed.to_stdout()
- elif args.command == "feed" and args.feed_command == "update":
- feedlist.fetch_feeds()
- else:
- raise NomError("That option is not yet supported.")
\ No newline at end of file
--- /dev/null
+from argparse import ArgumentParser
+
+
+def cli():
+ parser = ArgumentParser(description="Nom Script")
+ subparsers = parser.add_subparsers(dest='command', help='Sub-command help')
+
+ # Entry subcommand
+ entry_parser = subparsers.add_parser('entry', help='Entry related commands')
+ entry_subparsers = entry_parser.add_subparsers(dest='entry_command', help='Entry sub-command help')
+ entry_show_parser = entry_subparsers.add_parser('show', help='Show entries')
+
+ # Feed subcommand
+ feed_parser = subparsers.add_parser('feed', help='Feed related commands')
+ feed_subparsers = feed_parser.add_subparsers(dest='feed_command', help='Feed sub-command help')
+ feed_update_parser = feed_subparsers.add_parser('update', help='Update feed')
+
+ return parser
--- /dev/null
+from dataclasses import dataclass
+
+
+# TODO: This should probably use Pydantic.
+@dataclass
+class Entry:
+ title: str
+ url: str
+ updated: str # TODO: Make this datetime when I add filters
+ # summary: str # TODO: Add this when you feel like stripping HTML
+
+ # TODO: What if there's a pipe in one of the fields?
+ def write_line(self, delimiter: str ='|'):
+ return delimiter.join([self.title, self.url, self.updated])
\ No newline at end of file
--- /dev/null
+import os
+from pathlib import Path
+from typing import Optional
+
+import feedparser
+import requests
+
+from nom.entry import Entry
+from nom.utils import url2filename
+
+
+class Feed:
+
+ def __init__(self, url: str):
+ d = feedparser.parse(url)
+ self.d = d
+ self.name = d.feed.title
+ self.url = url # how is this different from d.feed.link?
+ self.entries : list[Entry] = [
+ Entry(
+ e.title,
+ e.link,
+ e.updated
+ ) for e in d.entries]
+
+ # TODO: Fix this with command line option
+ def to_stdout(self, file: Optional[Path]=None):
+ for entry in self.entries:
+ if entry:
+ print(entry.write_line())
+
+class FeedList:
+
+ def __init__(self, file: Path):
+ with open(file, 'r') as f:
+ urls = f.read().splitlines()
+
+ self.name = file.name
+ self.urls = urls
+
+ def fetch_feeds(self, save_dir: Path):
+ if not os.path.exists(save_dir):
+ os.makedirs(save_dir)
+
+ for url in self.urls:
+ filename = url2filename(url)
+ path = save_dir / filename
+ with open(path, 'w') as f:
+ # TODO: URL Error Handling
+ r = requests.get(url)
+ f.write(r.text)
+ print(f"{path} updated")
\ No newline at end of file
--- /dev/null
+from pathlib import Path
+
+from nom.utils import url2filename, NomError
+from nom.entry import Entry
+from nom.feed import Feed, FeedList
+from nom.cli import cli
+
+# Globals. Sue me.
+FEED_CACHE=Path.home() / ".cache" / "nom" / "feeds"
+FEED_LIST=Path.home() / ".local" / "share" / "nom" / "feedlist" / "default"
+
+# TODO: Need to append feeds to one another (and save to entrylist)
+# TODO: Flesh out CLI.
+def main():
+ parser = cli()
+ args = parser.parse_args()
+
+ # Direct Logic
+ feedlist=FeedList(FEED_LIST)
+ if args.command == "entry" and args.entry_command == "show":
+ for url in feedlist.urls:
+ feed=Feed(str(FEED_CACHE / url2filename(url)))
+ feed.to_stdout()
+ elif args.command == "feed" and args.feed_command == "update":
+ feedlist.fetch_feeds(FEED_CACHE)
+ else:
+ raise NomError("That option is not yet supported.")
+
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
--- /dev/null
+from urllib.parse import quote, unquote, urlparse
+
+
+class NomError(Exception):
+ pass
+
+def url2filename(url: str)->str:
+ p = urlparse(url)
+ stringified = ''.join([p.netloc, p.path])
+ return quote(stringified, safe='',).replace('%2F','-')
+
+def filename2url(url: str)->str:
+ p = urlparse(url.replace('-','%2f'))
+ return "https://" + unquote(''.join([p.netloc, p.path]))
\ No newline at end of file
--- /dev/null
+def test_pytest():
+ assert True
\ No newline at end of file