Merge pull request #72 from yedpodtrzitko/yed/readability

improve code readability
This commit is contained in:
Travis Abendshien 2024-04-27 15:27:58 -07:00 committed by GitHub
commit 2110592971
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 87 additions and 116 deletions

View file

@ -493,7 +493,7 @@ class CliDriver:
# print(f'Char Limit: {char_limit}, Len: {len(text)}')
return char_limit
def truncate_text(self, text: str) -> int:
def truncate_text(self, text: str) -> str:
"""Returns a truncated string for displaying, calculated with `get_char_limit()`."""
if len(text) > self.get_char_limit(text):
# print(f'Char Limit: {self.get_char_limit(text)}, Len: {len(text)}')
@ -761,7 +761,7 @@ class CliDriver:
offset = 1 if (index >= row_count) and (
row_number != row_count) else 0
elif displayable % table_size != 0:
if col_num > 1 and col_num <= displayable % table_size:
if 1 < col_num <= displayable % table_size:
offset += col_num - 1
elif col_num > 1 and col_num > displayable % table_size:
offset = displayable % table_size
@ -1022,30 +1022,31 @@ class CliDriver:
"""
was_executed:bool = False
message:str = ''
com_name = com[0].lower()
# Backup Library =======================================================
if (com[0].lower() == 'backup'):
if com_name == 'backup':
self.backup_library(display_message=False)
was_executed = True
message=f'{INFO} Backed up Library to disk.'
# Create Collage =======================================================
elif (com[0].lower() == 'collage'):
elif com_name == 'collage':
filename = self.create_collage()
if filename:
was_executed = True
message = f'{INFO} Saved collage to \"{filename}\".'
# Save Library =========================================================
elif (com[0].lower() == 'save' or com[0].lower() == 'write' or com[0].lower() == 'w'):
elif com_name in ('save', 'write', 'w'):
self.save_library(display_message=False)
was_executed = True
message=f'{INFO} Library saved to disk.'
# Toggle Debug =========================================================
elif (com[0].lower() == 'toggle-debug'):
elif com_name == 'toggle-debug':
self.args.debug = not self.args.debug
was_executed = True
message=f'{INFO} Debug Mode Active.' if self.args.debug else f'{INFO} Debug Mode Deactivated.'
# Toggle External Preview ==============================================
elif (com[0].lower() == 'toggle-external-preview'):
elif com_name == 'toggle-external-preview':
self.args.external_preview = not self.args.external_preview
if self.args.external_preview:
self.init_external_preview()
@ -1054,11 +1055,11 @@ class CliDriver:
was_executed = True
message=f'{INFO} External Preview Enabled.' if self.args.external_preview else f'{INFO} External Preview Disabled.'
# Quit =================================================================
elif com[0].lower() == 'quit' or com[0].lower() == 'q':
elif com_name in ('quit', 'q'):
self.exit(save=True, backup=False)
was_executed = True
# Quit without Saving ==================================================
elif com[0].lower() == 'quit!' or com[0].lower() == 'q!':
elif com_name in ('quit!', 'q!'):
self.exit(save=False, backup=False)
was_executed = True
@ -1345,7 +1346,7 @@ class CliDriver:
# self.scr_library_home(clear_scr=False)
# Add New Entries ==================================================
elif ' '.join(com) == 'add new':
if self.is_new_file_count_init == False:
if not self.is_new_file_count_init:
print(
f'{INFO} Scanning for files in \'{self.lib.library_dir}\' (This may take a while)...')
# if not self.lib.files_not_in_library:
@ -1390,7 +1391,7 @@ class CliDriver:
for unresolved in self.lib.missing_matches:
res = self.scr_choose_missing_match(
self.lib.get_entry_id_from_filepath(unresolved), clear_scr=False)
if res != None and int(res) >= 0:
if res is not None and int(res) >= 0:
clear()
print(
f'{INFO} Updated {self.lib.entries[self.lib.get_entry_id_from_filepath(unresolved)].path} -> {self.lib.missing_matches[unresolved][res]}')
@ -1555,7 +1556,7 @@ class CliDriver:
print(self.format_title(title))
if len(self.filtered_entries) > 0:
if self.filtered_entries:
# entry = self.lib.get_entry_from_index(
# self.filtered_entries[index])
entry = self.lib.get_entry(self.filtered_entries[index][1])
@ -1580,7 +1581,7 @@ class CliDriver:
self.print_fields(self.filtered_entries[index][1])
else:
if len(self.lib.entries) > 0:
if self.lib.entries:
print(self.format_h1('No Entry Results for Query', color=BRIGHT_RED_FG))
self.set_external_preview_default()
else:
@ -2049,7 +2050,7 @@ class CliDriver:
'<#> Quit', BRIGHT_CYAN_FG))
print('> ', end='')
com: list[str] = input().lstrip().rstrip().split(' ')
com: list[str] = input().strip().split(' ')
gc, message = self.global_commands(com)
if gc:
if message:
@ -2057,6 +2058,7 @@ class CliDriver:
print(message)
clear_scr=False
else:
com_name = com[0].lower()
try:
# # Quit =========================================================
@ -2069,13 +2071,13 @@ class CliDriver:
# # self.cleanup()
# sys.exit()
# Cancel =======================================================
if (com[0].lower() == 'cancel' or com[0].lower() == 'c' or com[0] == '0') and required==False:
if com_name in ('cancel', 'c', '0') and not required:
clear()
return -1
# Selection ====================================================
elif int(com[0]) > 0 and int(com[0]) <= len(choices):
elif com_name.isdigit() and 0 < int(com_name) <= len(choices):
clear()
return int(com[0]) - 1
return int(com_name) - 1
else:
# invalid_input = True
# print(self.format_h1(str='Please Enter a Valid Selection Number', color=BRIGHT_RED_FG))
@ -2554,7 +2556,7 @@ class CliDriver:
f'Enter #{plural} Cancel', fg_color))
print('> ', end='')
com: list[int] = input().split(' ')
com: list[str] = input().split(' ')
selected_ids: list[int] = []
try:
for c in com:
@ -2625,14 +2627,14 @@ class CliDriver:
self.lib.update_entry_field(
entry_index, field_index, new_content.rstrip('\n').rstrip('\r'), 'replace')
def scr_list_tags(self, query: str = '', tag_ids: list[int] = [], clear_scr=True) -> None:
def scr_list_tags(self, query: str = '', tag_ids: list[int] = None, clear_scr=True) -> None:
"""A screen for listing out and performing CRUD operations on Library Tags."""
# NOTE: While a screen that just displays the first 40 or so random tags on your screen
# isn't really that useful, this is just a temporary measure to provide a launchpad
# screen for necessary commands such as adding and editing tags.
# A more useful screen presentation might look like a list of ranked occurrences, but
# that can be figured out and implemented later.
tag_ids = tag_ids or []
title = f'{self.base_title} - Library \'{self.lib.library_dir}\''
@ -2673,7 +2675,7 @@ class CliDriver:
'Create Edit <#> Delete <#> Search <Query> Close/Done', BRIGHT_MAGENTA_FG))
print('> ', end='')
com: list[str] = input().lstrip().rstrip().split(' ')
com: list[str] = input().strip().split(' ')
gc, message = self.global_commands(com)
if gc:
if message:
@ -2681,9 +2683,9 @@ class CliDriver:
print(message)
clear_scr=False
else:
com_name = com[0].lower()
# Search Tags ==========================================================
if (com[0].lower() == 'search' or com[0].lower() == 's'):
if com_name in ('search', 's'):
if len(com) > 1:
new_query: str = ' '.join(com[1:])
# self.scr_list_tags(prev_scr, query=new_query,
@ -2696,7 +2698,7 @@ class CliDriver:
tag_ids=self.lib.search_tags('')
# return
# Edit Tag ===========================================================
elif com[0].lower() == 'edit' or com[0].lower() == 'e':
elif com_name in ('edit', 'e'):
if len(com) > 1:
try:
index = int(com[1]) - 1
@ -2720,7 +2722,7 @@ class CliDriver:
# return
# Create Tag ============================================================
elif com[0].lower() == 'create' or com[0].lower() == 'mk':
elif com_name in ('create', 'mk'):
tag = Tag(id=0, name='New Tag', shorthand='',
aliases=[], subtags_ids=[], color='')
self.scr_manage_tag(
@ -2731,7 +2733,7 @@ class CliDriver:
# self.scr_list_tags(prev_scr, query=query, tag_ids=tag_ids)
# return
# Delete Tag ===========================================================
elif com[0].lower() == 'delete' or com[0].lower() == 'del':
elif com_name in ('delete', 'del'):
if len(com) > 1:
if len(com) > 1:
try:
@ -2757,7 +2759,7 @@ class CliDriver:
# tag_ids=tag_ids, clear_scr=False)
# return
# Close View ===========================================================
elif (com[0].lower() == 'close' or com[0].lower() == 'c' or com[0].lower() == 'done'):
elif com_name in ('close', 'c', 'done'):
# prev_scr()
return
# # Quit =================================================================
@ -3192,7 +3194,7 @@ class CliDriver:
selected: str = input()
try:
if int(selected) > 0 and int(selected) <= len(colors):
if selected.isdigit() and 0 < int(selected) <= len(colors):
selected = colors[int(selected)-1]
return selected
# except SystemExit:

View file

@ -131,7 +131,7 @@ class Entry:
# if self.fields:
# if field_index != -1:
# logging.info(f'[LIBRARY] ADD TAG to E:{self.id}, F-DI:{field_id}, F-INDEX:{field_index}')
field_index = -1 if field_index == None else field_index
field_index = -1 if field_index is None else field_index
for i, f in enumerate(self.fields):
if library.get_field_attr(f, 'id') == field_id:
field_index = i
@ -631,33 +631,21 @@ class Library:
# Step 2: Create a Tag object and append it to the internal Tags list,
# then map that Tag's ID to its index in the Tags list.
id = 0
if 'id' in tag.keys():
id = tag['id']
id = int(tag.get('id', 0))
# Don't load tags with duplicate IDs
if id not in [t.id for t in self.tags]:
if int(id) >= self._next_tag_id:
self._next_tag_id = int(id) + 1
if id not in {t.id for t in self.tags}:
if id >= self._next_tag_id:
self._next_tag_id = id + 1
name = ''
if 'name' in tag.keys():
name = tag['name']
shorthand = ''
if 'shorthand' in tag.keys():
shorthand = tag['shorthand']
aliases = []
if 'aliases' in tag.keys():
aliases = tag['aliases']
subtag_ids = []
if 'subtag_ids' in tag.keys():
subtag_ids = tag['subtag_ids']
color = ''
if 'color' in tag.keys():
color = tag['color']
name = tag.get('name', '')
shorthand = tag.get('shorthand', '')
aliases = tag.get('aliases', [])
subtag_ids = tag.get('subtag_ids', [])
color = tag.get('color', '')
t = Tag(
id=int(id),
id=id,
name=name,
shorthand=shorthand,
aliases=aliases,
@ -683,12 +671,11 @@ class Library:
logging.info(f'[LIBRARY] Tags loaded in {(end_time - start_time):.3f} seconds')
# Parse Entries ------------------------------------------------
if 'entries' in json_dump.keys():
if entries := json_dump.get('entries'):
start_time = time.time()
for entry in json_dump['entries']:
for entry in entries:
id = 0
if 'id' in entry.keys():
if 'id' in entry:
id = int(entry['id'])
if id >= self._next_entry_id:
self._next_entry_id = id + 1
@ -697,16 +684,12 @@ class Library:
id = self._next_entry_id
self._next_entry_id += 1
filename = ''
if 'filename' in entry.keys():
filename = entry['filename']
e_path = ''
if 'path' in entry.keys():
e_path = entry['path']
filename = entry.get('filename', '')
e_path = entry.get('path', '')
fields = []
if 'fields' in entry.keys():
if 'fields' in entry:
# Cast JSON str keys to ints
for f in entry['fields']:
for f in fields:
f[int(list(f.keys())[0])
] = f[list(f.keys())[0]]
del f[list(f.keys())[0]]
@ -768,28 +751,17 @@ class Library:
# the internal Collations list, then map that
# Collation's ID to its index in the Collations list.
id = 0
if 'id' in collation.keys():
id = collation['id']
id = int(collation.get('id', 0))
if id >= self._next_collation_id:
self._next_collation_id = id + 1
if int(id) >= self._next_collation_id:
self._next_collation_id = int(id) + 1
title = ''
if 'title' in collation.keys():
title = collation['title']
e_ids_and_pages = ''
if 'e_ids_and_pages' in collation.keys():
e_ids_and_pages = collation['e_ids_and_pages']
sort_order = []
if 'sort_order' in collation.keys():
sort_order = collation['sort_order']
cover_id = []
if 'cover_id' in collation.keys():
cover_id = collation['cover_id']
title = collation.get('title', '')
e_ids_and_pages = collation.get('e_ids_and_pages', '')
sort_order = collation.get('sort_order', [])
cover_id = collation.get('cover_id', [])
c = Collation(
id=int(id),
id=id,
title=title,
e_ids_and_pages=e_ids_and_pages,
sort_order=sort_order,
@ -1395,20 +1367,20 @@ class Library:
query: str = query.strip().lower()
query_words: list[str] = query.split(' ')
all_tag_terms: list[str] = []
only_untagged: bool = True if 'untagged' in query or 'no tags' in query else False
only_empty: bool = True if 'empty' in query or 'no fields' in query else False
only_missing: bool = True if 'missing' in query or 'no file' in query else False
allow_adv: bool = True if 'filename:' in query_words else False
tag_only: bool = True if 'tag_id:' in query_words else False
only_untagged: bool = ('untagged' in query or 'no tags' in query)
only_empty: bool = ('empty' in query or 'no fields' in query)
only_missing: bool = ('missing' in query or 'no file' in query)
allow_adv: bool = 'filename:' in query_words
tag_only: bool = 'tag_id:' in query_words
if allow_adv:
query_words.remove('filename:')
if tag_only:
query_words.remove('tag_id:')
# TODO: Expand this to allow for dynamic fields to work.
only_no_author: bool = True if 'no author' in query or 'no artist' in query else False
only_no_author: bool = ('no author' in query or 'no artist' in query)
# Preprocess the Tag terms.
if len(query_words) > 0:
if query_words:
for i, term in enumerate(query_words):
for j, term in enumerate(query_words):
if query_words[i:j+1] and " ".join(query_words[i:j+1]) in self._tag_strings_to_id_map:
@ -1428,7 +1400,7 @@ class Library:
# non_entry_count = 0
# Iterate over all Entries =============================================================
for entry in self.entries:
allowed_ext: bool = False if os.path.splitext(entry.filename)[1][1:].lower() in self.ignored_extensions else True
allowed_ext: bool = os.path.splitext(entry.filename)[1][1:].lower() not in self.ignored_extensions
# try:
# entry: Entry = self.entries[self.file_to_library_index_map[self._source_filenames[i]]]
# print(f'{entry}')
@ -1550,7 +1522,7 @@ class Library:
for entry in self.entries:
added = False
allowed_ext: bool = False if os.path.splitext(entry.filename)[1][1:].lower() in self.ignored_extensions else True
allowed_ext: bool = os.path.splitext(entry.filename)[1][1:].lower() not in self.ignored_extensions
if allowed_ext:
for f in entry.fields:
if self.get_field_attr(f, 'type') == 'collation':
@ -1566,7 +1538,7 @@ class Library:
results.reverse()
return results
def search_tags(self, query: str, include_cluster=False, ignore_builtin=False, threshold: int = 1, context: list[str] = []) -> list[int]:
def search_tags(self, query: str, include_cluster=False, ignore_builtin=False, threshold: int = 1, context: list[str] = None) -> list[int]:
"""Returns a list of Tag IDs returned from a string query."""
# tag_ids: list[int] = []
# if query:
@ -1659,7 +1631,6 @@ class Library:
# Contextual Weighing
if context and ((len(id_weights) > 1 and len(priority_ids) > 1) or (len(priority_ids) > 1)):
context_ids: list[int] = []
context_strings: list[str] = [s.replace(' ', '').replace('_', '').replace('-', '').replace(
"'", '').replace('(', '').replace(')', '').replace('[', '').replace(']', '').lower() for s in context]
for term in context:
@ -1833,16 +1804,16 @@ class Library:
# Step [3/7]:
# Remove ID -> cluster reference.
if tag_id in self._tag_id_to_cluster_map.keys():
if tag_id in self._tag_id_to_cluster_map:
del self._tag_id_to_cluster_map[tag.id]
# Remove mentions of this ID in all clusters.
for key in self._tag_id_to_cluster_map.keys():
if tag_id in self._tag_id_to_cluster_map[key]:
self._tag_id_to_cluster_map[key].remove(tag.id)
for key, values in self._tag_id_to_cluster_map.items():
if tag_id in values:
values.remove(tag.id)
# Step [4/7]:
# Remove mapping of this ID to its index in the tags list.
if tag.id in self._tag_id_to_index_map.keys():
if tag.id in self._tag_id_to_index_map:
del self._tag_id_to_index_map[tag.id]
# Step [5/7]:
@ -1921,7 +1892,7 @@ class Library:
if data:
# Add a Title Field if the data doesn't already exist.
if "title" in data.keys() and data["title"]:
if data.get("title"):
field_id = 0 # Title Field ID
if not self.does_field_content_exist(entry_id, field_id, data['title']):
self.add_field_to_entry(entry_id, field_id)
@ -1929,7 +1900,7 @@ class Library:
entry_id, -1, data["title"], 'replace')
# Add an Author Field if the data doesn't already exist.
if "author" in data.keys() and data["author"]:
if data.get("author"):
field_id = 1 # Author Field ID
if not self.does_field_content_exist(entry_id, field_id, data['author']):
self.add_field_to_entry(entry_id, field_id)
@ -1937,7 +1908,7 @@ class Library:
entry_id, -1, data["author"], 'replace')
# Add an Artist Field if the data doesn't already exist.
if "artist" in data.keys() and data["artist"]:
if data.get("artist"):
field_id = 2 # Artist Field ID
if not self.does_field_content_exist(entry_id, field_id, data['artist']):
self.add_field_to_entry(entry_id, field_id)
@ -1945,7 +1916,7 @@ class Library:
entry_id, -1, data["artist"], 'replace')
# Add a Date Published Field if the data doesn't already exist.
if "date_published" in data.keys() and data["date_published"]:
if data.get("date_published"):
field_id = 14 # Date Published Field ID
date = str(datetime.datetime.strptime(
data["date_published"], '%Y-%m-%d %H:%M:%S'))
@ -1955,7 +1926,7 @@ class Library:
self.update_entry_field(entry_id, -1, date, 'replace')
# Process String Tags if the data doesn't already exist.
if "tags" in data.keys() and data["tags"]:
if data.get("tags"):
tags_field_id = 6 # Tags Field ID
content_tags_field_id = 7 # Content Tags Field ID
meta_tags_field_id = 8 # Meta Tags Field ID
@ -1992,7 +1963,7 @@ class Library:
matching: list[int] = self.search_tags(
tag.replace('_', ' ').replace('-', ' '), include_cluster=False, ignore_builtin=True, threshold=2, context=tags)
priority_field_index = -1
if len(matching) > 0:
if matching:
# NOTE: The following commented-out code enables the ability
# to prefer an existing built-in tag_box field to add to

View file

@ -137,12 +137,11 @@ class TagStudioCore:
# # # print("Could not resolve URL.")
# # pass
def match_conditions(self, entry_id: int) -> str:
def match_conditions(self, entry_id: int) -> None:
"""Matches defined conditions against a file to add Entry data."""
cond_file = os.path.normpath(f'{self.lib.library_dir}/{TS_FOLDER_NAME}/conditions.json')
# TODO: Make this stored somewhere better instead of temporarily in this JSON file.
json_dump = {}
entry: Entry = self.lib.get_entry(entry_id)
try:
if os.path.isfile(cond_file):
@ -155,8 +154,8 @@ class TagStudioCore:
match = True
break
if match:
if 'fields' in c.keys() and c['fields']:
for field in c['fields']:
if fields := c.get('fields'):
for field in fields:
field_id = self.lib.get_field_attr(
field, 'id')

View file

@ -1421,10 +1421,9 @@ class FixDupeFilesModal(QWidget):
os.path.normpath(self.lib.library_dir))
qfd.setFileMode(QFileDialog.FileMode.ExistingFile)
qfd.setNameFilter("DupeGuru Files (*.dupeguru)")
filename = []
if qfd.exec_():
filename = qfd.selectedFiles()
if len(filename) > 0:
if filename:
self.set_filename(filename[0])
def set_filename(self, filename:str):
@ -3879,7 +3878,7 @@ class QtDriver(QObject):
'Open/Create Library',
'/',
QFileDialog.ShowDirsOnly)
if dir != None and dir != '':
if dir not in (None, ''):
self.open_library(dir)
def signal_handler(self, sig, frame):
@ -4307,7 +4306,7 @@ class QtDriver(QObject):
# sleep(5)
# pb.deleteLater()
def run_macros(self, name: str, entry_ids: int):
def run_macros(self, name: str, entry_ids: list[int]):
"""Runs a specific Macro on a group of given entry_ids."""
for id in entry_ids:
self.run_macro(name, id)
@ -4383,7 +4382,7 @@ class QtDriver(QObject):
trimmed = False
if len(self.nav_frames) > self.cur_frame_idx + 1:
if (frame_content != None):
if frame_content is not None:
# Trim the nav stack if user is taking a new route.
self.nav_frames = self.nav_frames[:self.cur_frame_idx+1]
if self.nav_frames and not self.nav_frames[self.cur_frame_idx].contents:
@ -4395,7 +4394,7 @@ class QtDriver(QObject):
self.nav_frames[self.cur_frame_idx].scrollbar_pos = sb_pos
self.cur_frame_idx += 1 if not trimmed else 0
# Moving forward at the end of the stack with new content
elif (frame_content != None):
elif frame_content is not None:
# If the current page is empty, don't include it in the new stack.
if self.nav_frames and not self.nav_frames[self.cur_frame_idx].contents:
self.nav_frames.pop()
@ -4406,7 +4405,7 @@ class QtDriver(QObject):
self.cur_frame_idx += 1 if not trimmed else 0
# if self.nav_stack[self.cur_page_idx].contents:
if (self.cur_frame_idx != original_pos) or (frame_content != None):
if (self.cur_frame_idx != original_pos) or (frame_content is not None):
self.update_thumbs()
sb.verticalScrollBar().setValue(
self.nav_frames[self.cur_frame_idx].scrollbar_pos)
@ -4680,7 +4679,7 @@ class QtDriver(QObject):
for x in collation_entries])
# self.update_thumbs()
def get_frame_contents(self, index=0, query=str):
def get_frame_contents(self, index=0, query: str = None):
return ([] if not self.frame_dict[query] else self.frame_dict[query][index], index, len(self.frame_dict[query]))
def filter_items(self, query=''):