diff --git a/Pipfile b/Pipfile index 68e466b..1efaf38 100644 --- a/Pipfile +++ b/Pipfile @@ -8,6 +8,7 @@ flake8 = "*" black = "*" pytest = "*" pytest-cov = "*" +pylint = "*" [packages] natsort = "*" @@ -15,3 +16,6 @@ pillow = "*" [requires] python_version = "3.6" + +[pipenv] +allow_prereleases = true diff --git a/Pipfile.lock b/Pipfile.lock index 7ea641a..6926181 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "15b9977381b7a0f056df08f459574898d31b2c21bae5f0cff44c132dd97a3015" + "sha256": "007240e9eb41b8c64416c1efaa2bc217087c1495afe5ad114a4c0f349330200d" }, "pipfile-spec": 6, "requires": { @@ -61,6 +61,13 @@ ], "version": "==1.4.3" }, + "astroid": { + "hashes": [ + "sha256:71ea07f44df9568a75d0f354c49143a4575d90645e9fead6dfb52c26a85ed13a", + "sha256:840947ebfa8b58f318d42301cf8c0a20fd794a33b61cc4638e28e9e61ba32f42" + ], + "version": "==2.3.3" + }, "attrs": { "hashes": [ "sha256:08a96c641c3a74e44eb59afb61a24f2cb9f4d7188748e76ba4bb5edfa3cb7d1c", @@ -142,6 +149,39 @@ "markers": "python_version < '3.8'", "version": "==1.3.0" }, + "isort": { + "hashes": [ + "sha256:54da7e92468955c4fceacd0c86bd0ec997b0e1ee80d97f67c35a78b719dccab1", + "sha256:6e811fcb295968434526407adb8796944f1988c5b65e8139058f2014cbe100fd" + ], + "version": "==4.3.21" + }, + "lazy-object-proxy": { + "hashes": [ + "sha256:0c4b206227a8097f05c4dbdd323c50edf81f15db3b8dc064d08c62d37e1a504d", + "sha256:194d092e6f246b906e8f70884e620e459fc54db3259e60cf69a4d66c3fda3449", + "sha256:1be7e4c9f96948003609aa6c974ae59830a6baecc5376c25c92d7d697e684c08", + "sha256:4677f594e474c91da97f489fea5b7daa17b5517190899cf213697e48d3902f5a", + "sha256:48dab84ebd4831077b150572aec802f303117c8cc5c871e182447281ebf3ac50", + "sha256:5541cada25cd173702dbd99f8e22434105456314462326f06dba3e180f203dfd", + "sha256:59f79fef100b09564bc2df42ea2d8d21a64fdcda64979c0fa3db7bdaabaf6239", + "sha256:8d859b89baf8ef7f8bc6b00aa20316483d67f0b1cbf422f5b4dc56701c8f2ffb", + "sha256:9254f4358b9b541e3441b007a0ea0764b9d056afdeafc1a5569eee1cc6c1b9ea", + "sha256:9651375199045a358eb6741df3e02a651e0330be090b3bc79f6d0de31a80ec3e", + "sha256:97bb5884f6f1cdce0099f86b907aa41c970c3c672ac8b9c8352789e103cf3156", + "sha256:9b15f3f4c0f35727d3a0fba4b770b3c4ebbb1fa907dbcc046a1d2799f3edd142", + "sha256:a2238e9d1bb71a56cd710611a1614d1194dc10a175c1e08d75e1a7bcc250d442", + "sha256:a6ae12d08c0bf9909ce12385803a543bfe99b95fe01e752536a60af2b7797c62", + "sha256:ca0a928a3ddbc5725be2dd1cf895ec0a254798915fb3a36af0964a0a4149e3db", + "sha256:cb2c7c57005a6804ab66f106ceb8482da55f5314b7fcb06551db1edae4ad1531", + "sha256:d74bb8693bf9cf75ac3b47a54d716bbb1a92648d5f781fc799347cfc95952383", + "sha256:d945239a5639b3ff35b70a88c5f2f491913eb94871780ebfabb2568bd58afc5a", + "sha256:eba7011090323c1dadf18b3b689845fd96a61ba0a1dfbd7f24b921398affc357", + "sha256:efa1909120ce98bbb3777e8b6f92237f5d5c8ea6758efea36a473e1d38f7d3e4", + "sha256:f3900e8a5de27447acbf900b4750b0ddfd7ec1ea7fbaf11dfa911141bc522af0" + ], + "version": "==1.4.3" + }, "mccabe": { "hashes": [ "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42", @@ -198,6 +238,14 @@ ], "version": "==2.1.1" }, + "pylint": { + "hashes": [ + "sha256:3db5468ad013380e987410a8d6956226963aed94ecb5f9d3a28acca6d9ac36cd", + "sha256:886e6afc935ea2590b462664b161ca9a5e40168ea99e5300935f6591ad467df4" + ], + "index": "pypi", + "version": "==2.4.4" + }, "pyparsing": { "hashes": [ "sha256:4c830582a84fb022400b85429791bc551f1f4871c33f23e44f353119e92f969f", @@ -223,29 +271,29 @@ }, "regex": { "hashes": [ - "sha256:08047f4b31254489316b489c24983d72c0b9d520da084b8c624f45891a9c6da2", - "sha256:08d042155592c24cbdb81158a99aeeded4493381a1aba5eba9def6d29961042c", - "sha256:13901ac914de7a7e58a92f99c71415e268e88ac4be8b389d8360c38e64b2f1c5", - "sha256:15b6f7e10f764c5162242a7db89da51218a38299415ba5e70f235a6a83c53b94", - "sha256:46d01bb4139e7051470037f8b9a5b90c48cb77a3d307c2621bf3791bfae4d9d8", - "sha256:52814a8423d52a7e0f070dbb79f7bdfce5221992b881f83bad69f8daf4b831c3", - "sha256:6d999447f77b1b638ea620bde466b958144af90ac2e9b1f23b98a79ced14ce3f", - "sha256:7391eeee49bb3ce895ca43479eaca810f0c2608556711fa02a82075768f81a37", - "sha256:79530d60a8644f72f78834c01a2d70a60be110e2f4a0a612b78da23ef60c2730", - "sha256:841056961d441f05b949d9003e7f2b5d51a11dd52d8bd7c0a5325943b6a0ea6b", - "sha256:895f95344182b4ecb84044910e62ad33ca63a7e7b447c7ba858d24e9f1aad939", - "sha256:93e797cf16e07b315413d1157b5ce7a7c2b28b2b95768e25c0ccd290443661ad", - "sha256:a4677dc8245f1127b70fa79fb7f15a61eae0fee36ae15cbbe017207485fe9a5c", - "sha256:b2faf1dce478c0ca1c92575bdc48b7afdce3a887a02afb6342fae476af41bbe2", - "sha256:bcd9bcba67ae8d1e1b21426ea7995f7ca08260bea601ba15e13e5ca8588208ef", - "sha256:d47a89e6029852c88fff859dbc9a11dcec820413b4c2510e80ced1c99c3e79ea", - "sha256:dd69d165bee099b02d122d1e0dd55a85ebf9a65493dcd17124b628db9edfc833", - "sha256:e77f64a3ae8b9a555e170a3908748b4e2ccd0c58f8385f328baf8fc70f9ea497", - "sha256:ec75e8baa576aed6065b615a8f8e91a05e42b492b24ffd16cbb075ad62fb9185", - "sha256:ed75b64c6694bbe840b3340191b2039f633fd1ec6fc567454e47d7326eda557f", - "sha256:ef85a6a15342559bed737dc16dfb1545dc043ca5bf5bce6bff4830f0e7a74395" - ], - "version": "==2020.1.7" + "sha256:07b39bf943d3d2fe63d46281d8504f8df0ff3fe4c57e13d1656737950e53e525", + "sha256:0932941cdfb3afcbc26cc3bcf7c3f3d73d5a9b9c56955d432dbf8bbc147d4c5b", + "sha256:0e182d2f097ea8549a249040922fa2b92ae28be4be4895933e369a525ba36576", + "sha256:10671601ee06cf4dc1bc0b4805309040bb34c9af423c12c379c83d7895622bb5", + "sha256:23e2c2c0ff50f44877f64780b815b8fd2e003cda9ce817a7fd00dea5600c84a0", + "sha256:26ff99c980f53b3191d8931b199b29d6787c059f2e029b2b0c694343b1708c35", + "sha256:27429b8d74ba683484a06b260b7bb00f312e7c757792628ea251afdbf1434003", + "sha256:3e77409b678b21a056415da3a56abfd7c3ad03da71f3051bbcdb68cf44d3c34d", + "sha256:4e8f02d3d72ca94efc8396f8036c0d3bcc812aefc28ec70f35bb888c74a25161", + "sha256:4eae742636aec40cf7ab98171ab9400393360b97e8f9da67b1867a9ee0889b26", + "sha256:6a6ae17bf8f2d82d1e8858a47757ce389b880083c4ff2498dba17c56e6c103b9", + "sha256:6a6ba91b94427cd49cd27764679024b14a96874e0dc638ae6bdd4b1a3ce97be1", + "sha256:7bcd322935377abcc79bfe5b63c44abd0b29387f267791d566bbb566edfdd146", + "sha256:98b8ed7bb2155e2cbb8b76f627b2fd12cf4b22ab6e14873e8641f266e0fb6d8f", + "sha256:bd25bb7980917e4e70ccccd7e3b5740614f1c408a642c245019cff9d7d1b6149", + "sha256:d0f424328f9822b0323b3b6f2e4b9c90960b24743d220763c7f07071e0778351", + "sha256:d58e4606da2a41659c84baeb3cfa2e4c87a74cec89a1e7c56bee4b956f9d7461", + "sha256:e3cd21cc2840ca67de0bbe4071f79f031c81418deb544ceda93ad75ca1ee9f7b", + "sha256:e6c02171d62ed6972ca8631f6f34fa3281d51db8b326ee397b9c83093a6b7242", + "sha256:e7c7661f7276507bce416eaae22040fd91ca471b5b33c13f8ff21137ed6f248c", + "sha256:ecc6de77df3ef68fee966bb8cb4e067e84d4d1f397d0ef6fce46913663540d77" + ], + "version": "==2020.1.8" }, "six": { "hashes": [ @@ -284,6 +332,7 @@ "sha256:fdc1c9bbf79510b76408840e009ed65958feba92a88833cdceecff93ae8fff66", "sha256:ffde2fbfad571af120fcbfbbc61c72469e72f550d676c3342492a9dfdefb8f12" ], + "markers": "implementation_name == 'cpython' and python_version < '3.8'", "version": "==1.4.0" }, "wcwidth": { @@ -293,6 +342,12 @@ ], "version": "==0.1.8" }, + "wrapt": { + "hashes": [ + "sha256:565a021fd19419476b9362b05eeaa094178de64f8361e44468f9e9d7843901e1" + ], + "version": "==1.11.2" + }, "zipp": { "hashes": [ "sha256:3718b1cbcd963c7d4c5511a8240812904164b7f381b647143a89d3b98f9bcd8e", diff --git a/darkseid/__init__.py b/darkseid/__init__.py index 7dd029c..98926fe 100644 --- a/darkseid/__init__.py +++ b/darkseid/__init__.py @@ -1,3 +1,3 @@ """Projects version information used in setup.py""" -VERSION_INFO = (0, 5, 5) +VERSION_INFO = (0, 6, 0) VERSION = ".".join(str(c) for c in VERSION_INFO) diff --git a/darkseid/comicarchive.py b/darkseid/comicarchive.py index 66c1cc5..6a05ef3 100644 --- a/darkseid/comicarchive.py +++ b/darkseid/comicarchive.py @@ -1,6 +1,7 @@ """A class to represent a single comic, be it file or folder of images""" # Copyright 2012-2014 Anthony Beville +# Copyright 2019 Brian Pepple import io import os @@ -9,24 +10,19 @@ import zipfile from natsort import natsorted +from PIL import Image from .comicinfoxml import ComicInfoXml from .filenameparser import FileNameParser from .genericmetadata import GenericMetadata -try: - from PIL import Image - - pil_available = True -except ImportError: - pil_available = False - - sys.path.insert(0, os.path.abspath(".")) class MetaDataStyle: - # Only have ComicRack is supported for now, but if we want to add back other formats this will be useful. + # Only ComicRack is supported for now, + # if we want to add back other formats this + # will be useful to have CIX = 0 name = ["ComicRack"] @@ -38,61 +34,72 @@ class ZipArchiver: def __init__(self, path): self.path = path - def readArchiveFile(self, archive_file): + def read_archive_file(self, archive_file): + """Read the contents of a comic archive""" + data = "" - zf = zipfile.ZipFile(self.path, "r") + zip_file = zipfile.ZipFile(self.path, "r") try: - data = zf.read(archive_file) - except zipfile.BadZipfile as e: - print(f"bad zipfile [{e}]: {self.path} :: {archive_file}", file=sys.stderr) - zf.close() + data = zip_file.read(archive_file) + except zipfile.BadZipfile as bad_zip_error: + print( + f"bad zipfile [{bad_zip_error}]: {self.path} :: {archive_file}", + file=sys.stderr, + ) + zip_file.close() raise IOError - except Exception as e: - zf.close() - print(f"bad zipfile [{e}]: {self.path} :: {archive_file}", file=sys.stderr) + except Exception as exception_error: + zip_file.close() + print( + f"bad zipfile [{exception_error}]: {self.path} :: {archive_file}", + file=sys.stderr, + ) raise IOError finally: - zf.close() + zip_file.close() return data - def removeArchiveFile(self, archive_file): + def remove_archive_file(self, archive_file): try: - self.rebuildZipFile([archive_file]) + self.rebuild_zipfile([archive_file]) except zipfile.BadZipfile: return False else: return True - def writeArchiveFile(self, archive_file, data): + def write_archive_file(self, archive_file, data): # At the moment, no other option but to rebuild the whole # zip archive w/o the indicated file. Very sucky, but maybe # another solution can be found try: - self.rebuildZipFile([archive_file]) + self.rebuild_zipfile([archive_file]) # now just add the archive file as a new one - zf = zipfile.ZipFile( + zip_file = zipfile.ZipFile( self.path, mode="a", allowZip64=True, compression=zipfile.ZIP_DEFLATED ) - zf.writestr(archive_file, data) - zf.close() + zip_file.writestr(archive_file, data) + zip_file.close() return True - except (zipfile.BadZipfile, zipfile.LargeZipFile) as e: - print(f"Error writing zipfile: {e}.") + except (zipfile.BadZipfile, zipfile.LargeZipFile) as exception_error: + print(f"Error writing zipfile: {exception_error}.") return False - def getArchiveFilenameList(self): + def get_archive_filename_list(self): try: - zf = zipfile.ZipFile(self.path, "r") - namelist = zf.namelist() - zf.close() + zip_file = zipfile.ZipFile(self.path, "r") + namelist = zip_file.namelist() + zip_file.close() return namelist - except Exception as e: - print(f"Unable to get zipfile list [{e}]: {self.path}", file=sys.stderr) + except Exception as exception_error: + print( + f"Unable to get zipfile list [{exception_error}]: {self.path}", + file=sys.stderr, + ) return [] - def rebuildZipFile(self, exclude_list): + def rebuild_zipfile(self, exclude_list): """Zip helper func This recompresses the zip archive, without the files in the exclude_list @@ -119,18 +126,21 @@ def rebuildZipFile(self, exclude_list): os.remove(self.path) os.rename(tmp_name, self.path) - def copyFromArchive(self, otherArchive): + def copy_from_archive(self, other_archive): """Replace the current zip with one copied from another archive""" try: zout = zipfile.ZipFile(self.path, "w", allowZip64=True) - for fname in otherArchive.getArchiveFilenameList(): - data = otherArchive.readArchiveFile(fname) + for fname in other_archive.get_archive_filename_list(): + data = other_archive.read_archive_file(fname) if data is not None: zout.writestr(fname, data) zout.close() - except Exception as e: - print(f"Error while copying to {self.path}: {e}", file=sys.stderr) + except Exception as exception_error: + print( + f"Error while copying to {self.path}: {exception_error}", + file=sys.stderr, + ) return False else: return True @@ -147,19 +157,19 @@ def __init__(self, path): self.path = path @classmethod - def readArchiveFile(self): + def read_archive_file(cls): return "" @classmethod - def writeArchiveFile(self, archive_file, data): + def write_archive_file(cls, archive_file, data): return False @classmethod - def removeArchiveFile(self, archive_file): + def remove_archive_file(cls, archive_file): return False @classmethod - def getArchiveFilenameList(self): + def get_archive_filename_list(cls): return [] @@ -174,37 +184,40 @@ def __init__(self, path): self.path = path self.ci_xml_filename = "ComicInfo.xml" - self.resetCache() + self.has_cix = None + self.page_count = None + self.page_list = None + self.cix_md = None self.archive_type = self.ArchiveType.Unknown self.archiver = UnknownArchiver(self.path) - if self.zipTest(): + if self.zip_test(): self.archive_type = self.ArchiveType.Zip self.archiver = ZipArchiver(self.path) - def resetCache(self): + def reset_cache(self): """Clears the cached data""" self.has_cix = None self.page_count = None self.page_list = None self.cix_md = None - def loadCache(self, style_list): + def load_cache(self, style_list): for style in style_list: - self.readMetadata(style) + self.read_metadata(style) def rename(self, path): self.path = path self.archiver.path = path - def zipTest(self): + def zip_test(self): return zipfile.is_zipfile(self.path) - def isZip(self): + def is_zip(self): return self.archive_type == self.ArchiveType.Zip - def isWritable(self): + def is_writable(self): if self.archive_type == self.ArchiveType.Unknown: return False @@ -213,64 +226,64 @@ def isWritable(self): return True - def isWritableForStyle(self, data_style): + def is_writable_for_style(self, data_style): - return self.isWritable() + return self.is_writable() - def seemsToBeAComicArchive(self): + def seems_to_be_a_comic_archive(self): - if (self.isZip()) and (self.getNumberOfPages() > 0): + if (self.is_zip()) and (self.get_number_of_pages() > 0): return True else: return False - def readMetadata(self, style): + def read_metadata(self, style): if style == MetaDataStyle.CIX: - return self.readCIX() + return self.read_cix() else: return GenericMetadata() - def writeMetadata(self, metadata, style): + def write_metadata(self, metadata, style): retcode = None if style == MetaDataStyle.CIX: - retcode = self.writeCIX(metadata) + retcode = self.write_cix(metadata) return retcode - def hasMetadata(self, style): + def has_metadata(self, style): if style == MetaDataStyle.CIX: - return self.hasCIX() + return self.check_for_cix() else: return False - def removeMetadata(self, style): + def remove_metadata(self, style): retcode = True if style == MetaDataStyle.CIX: - retcode = self.removeCIX() + retcode = self.remove_cix() return retcode - def getPage(self, index): + def get_page(self, index): image_data = None - filename = self.getPageName(index) + filename = self.get_page_name(index) if filename is not None: try: - image_data = self.archiver.readArchiveFile(filename) + image_data = self.archiver.read_archive_file(filename) except IOError: print("Error reading in page.", file=sys.stderr) return image_data - def getPageName(self, index): + def get_page_name(self, index): if index is None: return None - page_list = self.getPageNameList() + page_list = self.get_page_name_list() num_pages = len(page_list) if num_pages == 0 or index >= num_pages: @@ -278,13 +291,13 @@ def getPageName(self, index): return page_list[index] - def getScannerPageIndex(self): + def get_scanner_page_index(self): scanner_page_index = None # make a guess at the scanner page - name_list = self.getPageNameList() - count = self.getNumberOfPages() + name_list = self.get_page_name_list() + count = self.get_number_of_pages() # too few pages to really know if count < 5: @@ -330,11 +343,11 @@ def getScannerPageIndex(self): return scanner_page_index - def getPageNameList(self, sort_list=True): + def get_page_name_list(self, sort_list=True): if self.page_list is None: # get the list file names in the archive, and sort - files = self.archiver.getArchiveFilenameList() + files = self.archiver.get_archive_filename_list() # seems like some archive creators are on Windows, and don't know # about case-sensitivity! @@ -360,114 +373,113 @@ def keyfunc(k): return self.page_list - def getNumberOfPages(self): + def get_number_of_pages(self): if self.page_count is None: - self.page_count = len(self.getPageNameList()) + self.page_count = len(self.get_page_name_list()) return self.page_count - def readCIX(self): + def read_cix(self): if self.cix_md is None: - raw_cix = self.readRawCIX() + raw_cix = self.read_raw_cix() if raw_cix is None or raw_cix == "": self.cix_md = GenericMetadata() else: - self.cix_md = ComicInfoXml().metadataFromString(raw_cix) + self.cix_md = ComicInfoXml().metadata_from_string(raw_cix) # validate the existing page list (make sure count is correct) if len(self.cix_md.pages) != 0: - if len(self.cix_md.pages) != self.getNumberOfPages(): + if len(self.cix_md.pages) != self.get_number_of_pages(): # pages array doesn't match the actual number of images we're seeing # in the archive, so discard the data self.cix_md.pages = [] if len(self.cix_md.pages) == 0: - self.cix_md.setDefaultPageList(self.getNumberOfPages()) + self.cix_md.set_default_page_list(self.get_number_of_pages()) return self.cix_md - def readRawCIX(self): - if not self.hasCIX(): + def read_raw_cix(self): + if not self.check_for_cix(): return None try: - raw_cix = self.archiver.readArchiveFile(self.ci_xml_filename) + raw_cix = self.archiver.read_archive_file(self.ci_xml_filename) except IOError: print("Error reading in raw CIX!") raw_cix = "" return raw_cix - def writeCIX(self, metadata): + def write_cix(self, metadata): if metadata is not None: - self.applyArchiveInfoToMetadata(metadata, calc_page_sizes=True) - cix_string = ComicInfoXml().stringFromMetadata(metadata) - write_success = self.archiver.writeArchiveFile( + self.apply_archive_info_to_metadata(metadata, calc_page_sizes=True) + cix_string = ComicInfoXml().string_from_metadata(metadata) + write_success = self.archiver.write_archive_file( self.ci_xml_filename, cix_string ) if write_success: self.has_cix = True self.cix_md = metadata - self.resetCache() + self.reset_cache() return write_success else: return False - def removeCIX(self): - if self.hasCIX(): - write_success = self.archiver.removeArchiveFile(self.ci_xml_filename) + def remove_cix(self): + if self.check_for_cix(): + write_success = self.archiver.remove_archive_file(self.ci_xml_filename) if write_success: self.has_cix = False self.cix_md = None - self.resetCache() + self.reset_cache() return write_success return True - def hasCIX(self): + def check_for_cix(self): if self.has_cix is None: - if not self.seemsToBeAComicArchive(): + if not self.seems_to_be_a_comic_archive(): self.has_cix = False - elif self.ci_xml_filename in self.archiver.getArchiveFilenameList(): + elif self.ci_xml_filename in self.archiver.get_archive_filename_list(): self.has_cix = True else: self.has_cix = False return self.has_cix - def applyArchiveInfoToMetadata(self, md, calc_page_sizes=False): - md.pageCount = self.getNumberOfPages() + def apply_archive_info_to_metadata(self, metadata, calc_page_sizes=False): + metadata.page_count = self.get_number_of_pages() if calc_page_sizes: - for p in md.pages: - idx = int(p["Image"]) - if pil_available: - if ( - "ImageSize" not in p - or "ImageHeight" not in p - or "ImageWidth" not in p - ): - data = self.getPage(idx) - if data is not None: - try: - im = Image.open(io.BytesIO(data)) - w, h = im.size - - p["ImageSize"] = str(len(data)) - p["ImageHeight"] = str(h) - p["ImageWidth"] = str(w) - except IOError: - p["ImageSize"] = str(len(data)) + for page in metadata.pages: + idx = int(page["Image"]) + if ( + "ImageSize" not in page + or "ImageHeight" not in page + or "ImageWidth" not in page + ): + data = self.get_page(idx) + if data is not None: + try: + page_image = Image.open(io.BytesIO(data)) + width, height = page_image.size + + page["ImageSize"] = str(len(data)) + page["ImageHeight"] = str(height) + page["ImageWidth"] = str(width) + except IOError: + page["ImageSize"] = str(len(data)) else: - if "ImageSize" not in p: - data = self.getPage(idx) - p["ImageSize"] = str(len(data)) + if "ImageSize" not in page: + data = self.get_page(idx) + page["ImageSize"] = str(len(data)) - def metadataFromFilename(self, parse_scan_info=True): + def metadata_from_filename(self, parse_scan_info=True): metadata = GenericMetadata() fnp = FileNameParser() - fnp.parseFilename(self.path) + fnp.parse_filename(self.path) if fnp.issue != "": metadata.issue = fnp.issue @@ -487,10 +499,10 @@ def metadataFromFilename(self, parse_scan_info=True): return metadata - def exportAsZip(self, zipfilename): + def export_as_zip(self, zipfilename): if self.archive_type == self.ArchiveType.Zip: # nothing to do, we're already a zip return True zip_archiver = ZipArchiver(zipfilename) - return zip_archiver.copyFromArchive(self.archiver) + return zip_archiver.copy_from_archive(self.archiver) diff --git a/darkseid/comicinfoxml.py b/darkseid/comicinfoxml.py index 7a3bc74..ebf463c 100644 --- a/darkseid/comicinfoxml.py +++ b/darkseid/comicinfoxml.py @@ -19,7 +19,7 @@ class ComicInfoXml: cover_synonyms = ["cover", "covers", "coverartist", "cover artist"] editor_synonyms = ["editor"] - def getParseableCredits(self): + def get_parseable_credits(self): parsable_credits = [] parsable_credits.extend(self.writer_synonyms) parsable_credits.extend(self.penciller_synonyms) @@ -30,23 +30,23 @@ def getParseableCredits(self): parsable_credits.extend(self.editor_synonyms) return parsable_credits - def metadataFromString(self, string): + def metadata_from_string(self, string): tree = ET.ElementTree(ET.fromstring(string)) - return self.convertXMLToMetadata(tree) + return self.convert_xml_to_metadata(tree) - def stringFromMetadata(self, metadata): + def string_from_metadata(self, metadata): header = '\n' - tree = self.convertMetadataToXML(self, metadata) + tree = self.convert_metadata_to_xml(self, metadata) tree_str = ET.tostring(tree.getroot()).decode() return header + tree_str def indent(self, elem, level=0): # for making the XML output readable i = "\n" + level * " " - if len(elem): + if elem: if not elem.text or not elem.text.strip(): elem.text = i + " " if not elem.tail or not elem.tail.strip(): @@ -59,10 +59,7 @@ def indent(self, elem, level=0): if level and (not elem.tail or not elem.tail.strip()): elem.tail = i - def convertMetadataToXML(self, filename, metadata): - - # shorthand for the metadata - md = metadata + def convert_metadata_to_xml(self, filename, metadata): # build a tree structure root = ET.Element("ComicInfo") @@ -74,21 +71,21 @@ def assign(cix_entry, md_entry): if md_entry is not None: ET.SubElement(root, cix_entry).text = "{0}".format(md_entry) - assign("Title", md.title) - assign("Series", md.series) - assign("Number", md.issue) - assign("Count", md.issueCount) - assign("Volume", md.volume) - assign("AlternateSeries", md.alternateSeries) - assign("AlternateNumber", md.alternateNumber) - assign("StoryArc", md.storyArc) - assign("SeriesGroup", md.seriesGroup) - assign("AlternateCount", md.alternateCount) - assign("Summary", md.comments) - assign("Notes", md.notes) - assign("Year", md.year) - assign("Month", md.month) - assign("Day", md.day) + assign("Title", metadata.title) + assign("Series", metadata.series) + assign("Number", metadata.issue) + assign("Count", metadata.issue_count) + assign("Volume", metadata.volume) + assign("AlternateSeries", metadata.alternate_series) + assign("AlternateNumber", metadata.alternate_number) + assign("StoryArc", metadata.story_arc) + assign("SeriesGroup", metadata.series_group) + assign("AlternateCount", metadata.alternate_count) + assign("Summary", metadata.comments) + assign("Notes", metadata.notes) + assign("Year", metadata.year) + assign("Month", metadata.month) + assign("Day", metadata.day) # need to specially process the credits, since they are structured # differently than CIX @@ -128,52 +125,52 @@ def assign(cix_entry, md_entry): # second, convert each list to string, and add to XML struct if len(credit_writer_list) > 0: node = ET.SubElement(root, "Writer") - node.text = utils.listToString(credit_writer_list) + node.text = utils.list_to_string(credit_writer_list) if len(credit_penciller_list) > 0: node = ET.SubElement(root, "Penciller") - node.text = utils.listToString(credit_penciller_list) + node.text = utils.list_to_string(credit_penciller_list) if len(credit_inker_list) > 0: node = ET.SubElement(root, "Inker") - node.text = utils.listToString(credit_inker_list) + node.text = utils.list_to_string(credit_inker_list) if len(credit_colorist_list) > 0: node = ET.SubElement(root, "Colorist") - node.text = utils.listToString(credit_colorist_list) + node.text = utils.list_to_string(credit_colorist_list) if len(credit_letterer_list) > 0: node = ET.SubElement(root, "Letterer") - node.text = utils.listToString(credit_letterer_list) + node.text = utils.list_to_string(credit_letterer_list) if len(credit_cover_list) > 0: node = ET.SubElement(root, "CoverArtist") - node.text = utils.listToString(credit_cover_list) + node.text = utils.list_to_string(credit_cover_list) if len(credit_editor_list) > 0: node = ET.SubElement(root, "Editor") - node.text = utils.listToString(credit_editor_list) - - assign("Publisher", md.publisher) - assign("Imprint", md.imprint) - assign("Genre", md.genre) - assign("Web", md.webLink) - assign("PageCount", md.pageCount) - assign("LanguageISO", md.language) - assign("Format", md.format) - assign("AgeRating", md.maturityRating) - if md.blackAndWhite is not None and md.blackAndWhite: + node.text = utils.list_to_string(credit_editor_list) + + assign("Publisher", metadata.publisher) + assign("Imprint", metadata.imprint) + assign("Genre", metadata.genre) + assign("Web", metadata.web_link) + assign("PageCount", metadata.page_count) + assign("LanguageISO", metadata.language) + assign("Format", metadata.format) + assign("AgeRating", metadata.maturity_rating) + if metadata.black_and_white is not None and metadata.black_and_white: ET.SubElement(root, "BlackAndWhite").text = "Yes" - assign("Manga", md.manga) - assign("Characters", md.characters) - assign("Teams", md.teams) - assign("Locations", md.locations) - assign("ScanInformation", md.scanInfo) + assign("Manga", metadata.manga) + assign("Characters", metadata.characters) + assign("Teams", metadata.teams) + assign("Locations", metadata.locations) + assign("ScanInformation", metadata.scan_info) # loop and add the page entries under pages node - if len(md.pages) > 0: + if len(metadata.pages) > 0: pages_node = ET.SubElement(root, "Pages") - for page_dict in md.pages: + for page_dict in metadata.pages: page_node = ET.SubElement(pages_node, "Page") page_node.attrib = page_dict @@ -185,7 +182,7 @@ def assign(cix_entry, md_entry): return tree @classmethod - def convertXMLToMetadata(cls, tree): + def convert_xml_to_metadata(cls, tree): root = tree.getroot() @@ -193,7 +190,6 @@ def convertXMLToMetadata(cls, tree): raise ValueError("Metadata is not ComicInfo format") metadata = GenericMetadata() - md = metadata # Helper function def xlate(tag): @@ -203,57 +199,57 @@ def xlate(tag): else: return None - md.series = xlate("Series") - md.title = xlate("Title") - md.issue = xlate("Number") - md.issueCount = xlate("Count") - md.volume = xlate("Volume") - md.alternateSeries = xlate("AlternateSeries") - md.alternateNumber = xlate("AlternateNumber") - md.alternateCount = xlate("AlternateCount") - md.comments = xlate("Summary") - md.notes = xlate("Notes") - md.year = xlate("Year") - md.month = xlate("Month") - md.day = xlate("Day") - md.publisher = xlate("Publisher") - md.imprint = xlate("Imprint") - md.genre = xlate("Genre") - md.webLink = xlate("Web") - md.language = xlate("LanguageISO") - md.format = xlate("Format") - md.manga = xlate("Manga") - md.characters = xlate("Characters") - md.teams = xlate("Teams") - md.locations = xlate("Locations") - md.pageCount = xlate("PageCount") - md.scanInfo = xlate("ScanInformation") - md.storyArc = xlate("StoryArc") - md.seriesGroup = xlate("SeriesGroup") - md.maturityRating = xlate("AgeRating") + metadata.series = xlate("Series") + metadata.title = xlate("Title") + metadata.issue = xlate("Number") + metadata.issue_count = xlate("Count") + metadata.volume = xlate("Volume") + metadata.alternate_series = xlate("AlternateSeries") + metadata.alternate_number = xlate("AlternateNumber") + metadata.alternate_count = xlate("AlternateCount") + metadata.comments = xlate("Summary") + metadata.notes = xlate("Notes") + metadata.year = xlate("Year") + metadata.month = xlate("Month") + metadata.day = xlate("Day") + metadata.publisher = xlate("Publisher") + metadata.imprint = xlate("Imprint") + metadata.genre = xlate("Genre") + metadata.web_link = xlate("Web") + metadata.language = xlate("LanguageISO") + metadata.format = xlate("Format") + metadata.manga = xlate("Manga") + metadata.characters = xlate("Characters") + metadata.teams = xlate("Teams") + metadata.locations = xlate("Locations") + metadata.page_count = xlate("PageCount") + metadata.scan_info = xlate("ScanInformation") + metadata.story_arc = xlate("StoryArc") + metadata.series_group = xlate("SeriesGroup") + metadata.maturity_rating = xlate("AgeRating") tmp = xlate("BlackAndWhite") - md.blackAndWhite = False + metadata.black_and_white = False if tmp is not None and tmp.lower() in ["yes", "true", "1"]: - md.blackAndWhite = True + metadata.black_and_white = True # Now extract the credit info - for n in root: + for credit_node in root: if ( - n.tag == "Writer" - or n.tag == "Penciller" - or n.tag == "Inker" - or n.tag == "Colorist" - or n.tag == "Letterer" - or n.tag == "Editor" + credit_node.tag == "Writer" + or credit_node.tag == "Penciller" + or credit_node.tag == "Inker" + or credit_node.tag == "Colorist" + or credit_node.tag == "Letterer" + or credit_node.tag == "Editor" ): - if n.text is not None: - for name in n.text.split(","): - metadata.addCredit(name.strip(), n.tag) + if credit_node.text is not None: + for name in credit_node.text.split(","): + metadata.add_credit(name.strip(), credit_node.tag) - if n.tag == "CoverArtist": - if n.text is not None: - for name in n.text.split(","): - metadata.addCredit(name.strip(), "Cover") + if credit_node.tag == "CoverArtist": + if credit_node.text is not None: + for name in credit_node.text.split(","): + metadata.add_credit(name.strip(), "Cover") # parse page data now pages_node = root.find("Pages") @@ -266,13 +262,13 @@ def xlate(tag): return metadata - def writeToExternalFile(self, filename, metadata): + def write_to_external_file(self, filename, metadata): - tree = self.convertMetadataToXML(self, metadata) + tree = self.convert_metadata_to_xml(self, metadata) # ET.dump(tree) tree.write(filename, encoding="utf-8") - def readFromExternalFile(self, filename): + def read_from_external_file(self, filename): tree = ET.parse(filename) - return self.convertXMLToMetadata(tree) + return self.convert_xml_to_metadata(tree) diff --git a/darkseid/filenameparser.py b/darkseid/filenameparser.py index e82d344..e938a15 100644 --- a/darkseid/filenameparser.py +++ b/darkseid/filenameparser.py @@ -11,26 +11,39 @@ class FileNameParser: + """Class to get parse the filename to get information about the comic.""" + + def __init__(self): + self.issue = "" + self.series = "" + self.volume = "" + self.issue_count = "" + self.year = "" + self.remainder = "" + @staticmethod - def repl(m): - return " " * len(m.group()) + def repl(match): + return " " * len(match.group()) + + def fix_spaces(self, string, remove_dashes=True): + """Returns a string with the spaces fixed""" - def fixSpaces(self, string, remove_dashes=True): if remove_dashes: placeholders = ["[-_]", " +"] else: placeholders = ["[_]", " +"] - for ph in placeholders: - string = re.sub(ph, self.repl, string) + for place_holder in placeholders: + string = re.sub(place_holder, self.repl, string) return string # .strip() - def getIssueCount(self, filename, issue_end): + def get_issue_count(self, filename, issue_end): + """Returns a string with the issue count""" count = "" filename = filename[issue_end:] # replace any name separators with spaces - tmpstr = self.fixSpaces(filename) + tmpstr = self.fix_spaces(filename) found = False match = re.search(r"(?<=\sof\s)\d+(?=\s)", tmpstr, re.IGNORECASE) @@ -48,7 +61,7 @@ def getIssueCount(self, filename, issue_end): return count - def getIssueNumber(self, filename): + def get_issue_number(self, filename): """Returns a tuple of issue number string, and start and end indexes in the filename (The indexes will be used to split the string up for further parsing) """ @@ -77,7 +90,7 @@ def getIssueNumber(self, filename): filename = re.sub(r"\[.*?\]", self.repl, filename) # replace any name separators with spaces - filename = self.fixSpaces(filename) + filename = self.fix_spaces(filename) # remove any "of NN" phrase with spaces (problem: this could break on # some titles) @@ -90,8 +103,8 @@ def getIssueNumber(self, filename): # make a list of each word and its position word_list = list() - for m in re.finditer(r"\S+", filename): - word_list.append((m.group(0), m.start(), m.end())) + for match in re.finditer(r"\S+", filename): + word_list.append((match.group(0), match.start(), match.end())) # remove the first word, since it can't be the issue number if len(word_list) > 1: @@ -104,35 +117,35 @@ def getIssueNumber(self, filename): # first look for a word with "#" followed by digits with optional suffix # this is almost certainly the issue number - for w in reversed(word_list): - if re.match(r"#[-]?(([0-9]*\.[0-9]+|[0-9]+)(\w*))", w[0]): + for word in reversed(word_list): + if re.match(r"#[-]?(([0-9]*\.[0-9]+|[0-9]+)(\w*))", word[0]): found = True break # same as above but w/o a '#', and only look at the last word in the # list if not found: - w = word_list[-1] - if re.match(r"[-]?(([0-9]*\.[0-9]+|[0-9]+)(\w*))", w[0]): + word = word_list[-1] + if re.match(r"[-]?(([0-9]*\.[0-9]+|[0-9]+)(\w*))", word[0]): found = True # now try to look for a # followed by any characters if not found: - for w in reversed(word_list): - if re.match(r"#\S+", w[0]): + for word in reversed(word_list): + if re.match(r"#\S+", word[0]): found = True break if found: - issue = w[0] - start = w[1] - end = w[2] + issue = word[0] + start = word[1] + end = word[2] if issue[0] == "#": issue = issue[1:] return issue, start, end - def getSeriesName(self, filename, issue_start): + def get_series_name(self, filename, issue_start): """Use the issue number string index to split the filename string""" if issue_start != 0: @@ -150,7 +163,7 @@ def getSeriesName(self, filename, issue_start): filename = re.sub("__.*", self.repl, filename) filename = filename.replace("+", " ") - tmpstr = self.fixSpaces(filename, remove_dashes=False) + tmpstr = self.fix_spaces(filename, remove_dashes=False) series = tmpstr volume = "" @@ -195,7 +208,8 @@ def getSeriesName(self, filename, issue_start): return series, volume.strip() @staticmethod - def getYear(filename, issue_end): + def get_year(filename, issue_end): + """Return the year from the filename""" filename = filename[issue_end:] @@ -208,7 +222,7 @@ def getYear(filename, issue_end): year = re.sub("[^0-9]", "", year) return year - def getRemainder(self, filename, year, count, volume, issue_end): + def get_remainder(self, filename, year, count, volume, issue_end): """Make a guess at where the the non-interesting stuff begins""" remainder = "" @@ -220,7 +234,7 @@ def getRemainder(self, filename, year, count, volume, issue_end): elif issue_end != 0: remainder = filename[issue_end:] - remainder = self.fixSpaces(remainder, remove_dashes=False) + remainder = self.fix_spaces(remainder, remove_dashes=False) if volume != "": remainder = remainder.replace("Vol." + volume, "", 1) if year != "": @@ -233,7 +247,9 @@ def getRemainder(self, filename, year, count, volume, issue_end): return remainder.strip() - def parseFilename(self, filename): + def parse_filename(self, filename): + """Method to parse the filename.""" + # Get file name without path or extension filename = pathlib.Path(filename).stem @@ -247,16 +263,16 @@ def parseFilename(self, filename): filename = filename.replace("_28", "(") filename = filename.replace("_29", ")") - self.issue, issue_start, issue_end = self.getIssueNumber(filename) - self.series, self.volume = self.getSeriesName(filename, issue_start) + self.issue, issue_start, issue_end = self.get_issue_number(filename) + self.series, self.volume = self.get_series_name(filename, issue_start) # provides proper value when the filename doesn't have a issue number if issue_end == 0: issue_end = len(self.series) - self.year = self.getYear(filename, issue_end) - self.issue_count = self.getIssueCount(filename, issue_end) - self.remainder = self.getRemainder( + self.year = self.get_year(filename, issue_end) + self.issue_count = self.get_issue_count(filename, issue_end) + self.remainder = self.get_remainder( filename, self.year, self.issue_count, self.volume, issue_end ) diff --git a/darkseid/genericmetadata.py b/darkseid/genericmetadata.py index 16e37b8..f84ab6c 100644 --- a/darkseid/genericmetadata.py +++ b/darkseid/genericmetadata.py @@ -34,8 +34,8 @@ class PageType: class GenericMetadata: def __init__(self): - self.isEmpty = True - self.tagOrigin = None + self.is_empty = True + self.tag_origin = None self.series = None self.issue = None @@ -44,31 +44,31 @@ def __init__(self): self.month = None self.year = None self.day = None - self.issueCount = None + self.issue_count = None self.volume = None self.genre = None self.language = None # 2 letter iso code self.comments = None # use same way as Summary in CIX - self.volumeCount = None - self.criticalRating = None + self.volume_count = None + self.critical_rating = None self.country = None - self.alternateSeries = None - self.alternateNumber = None - self.alternateCount = None + self.alternate_series = None + self.alternate_number = None + self.alternate_count = None self.imprint = None self.notes = None - self.webLink = None + self.web_link = None self.format = None self.manga = None - self.blackAndWhite = None - self.pageCount = None - self.maturityRating = None + self.black_and_white = None + self.page_count = None + self.maturity_rating = None - self.storyArc = None - self.seriesGroup = None - self.scanInfo = None + self.story_arc = None + self.series_group = None + self.scan_info = None self.characters = None self.teams = None @@ -80,11 +80,11 @@ def __init__(self): # Some CoMet-only items self.price = None - self.isVersionOf = None + self.is_version_of = None self.rights = None self.identifier = None - self.lastMark = None - self.coverImage = None + self.last_mark = None + self.cover_image = None def overlay(self, new_md): """Overlay a metadata object on this one @@ -100,35 +100,35 @@ def assign(cur, new): else: setattr(self, cur, new) - if not new_md.isEmpty: - self.isEmpty = False + if not new_md.is_empty: + self.is_empty = False assign("series", new_md.series) assign("issue", new_md.issue) - assign("issueCount", new_md.issueCount) + assign("issue_count", new_md.issue_count) assign("title", new_md.title) assign("publisher", new_md.publisher) assign("day", new_md.day) assign("month", new_md.month) assign("year", new_md.year) assign("volume", new_md.volume) - assign("volumeCount", new_md.volumeCount) + assign("volume_count", new_md.volume_count) assign("genre", new_md.genre) assign("language", new_md.language) assign("country", new_md.country) - assign("criticalRating", new_md.criticalRating) - assign("alternateSeries", new_md.alternateSeries) - assign("alternateNumber", new_md.alternateNumber) - assign("alternateCount", new_md.alternateCount) + assign("critical_rating", new_md.critical_rating) + assign("alternate_series", new_md.alternate_series) + assign("alternate_number", new_md.alternate_number) + assign("alternate_count", new_md.alternate_count) assign("imprint", new_md.imprint) - assign("webLink", new_md.webLink) + assign("web_link", new_md.web_link) assign("format", new_md.format) assign("manga", new_md.manga) - assign("blackAndWhite", new_md.blackAndWhite) - assign("maturityRating", new_md.maturityRating) - assign("storyArc", new_md.storyArc) - assign("seriesGroup", new_md.seriesGroup) - assign("scanInfo", new_md.scanInfo) + assign("black_and_white", new_md.black_and_white) + assign("maturity_rating", new_md.maturity_rating) + assign("story_arc", new_md.story_arc) + assign("series_group", new_md.series_group) + assign("scan_info", new_md.scan_info) assign("characters", new_md.characters) assign("teams", new_md.teams) assign("locations", new_md.locations) @@ -136,12 +136,12 @@ def assign(cur, new): assign("notes", new_md.notes) assign("price", new_md.price) - assign("isVersionOf", new_md.isVersionOf) + assign("is_version_of", new_md.is_version_of) assign("rights", new_md.rights) assign("identifier", new_md.identifier) - assign("lastMark", new_md.lastMark) + assign("last_mark", new_md.last_mark) - self.overlayCredits(new_md.credits) + self.overlay_credits(new_md.credits) # TODO # not sure if the tags and pages should broken down, or treated @@ -155,23 +155,23 @@ def assign(cur, new): if len(new_md.pages) > 0: assign("pages", new_md.pages) - def overlayCredits(self, new_credits): - for c in new_credits: - if "primary" in c and c["primary"]: + def overlay_credits(self, new_credits): + for credit in new_credits: + if "primary" in credit and credit["primary"]: primary = True else: primary = False # Remove credit role if person is blank - if c["person"] == "": + if credit["person"] == "": for r in reversed(self.credits): - if r["role"].lower() == c["role"].lower(): + if r["role"].lower() == credit["role"].lower(): self.credits.remove(r) # otherwise, add it! else: - self.addCredit(c["person"], c["role"], primary) + self.add_credit(credit["person"], credit["role"], primary) - def setDefaultPageList(self, count): + def set_default_page_list(self, count): # generate a default page list, with the first page marked as the cover for i in range(count): page_dict = dict() @@ -180,7 +180,7 @@ def setDefaultPageList(self, count): page_dict["Type"] = PageType.FrontCover self.pages.append(page_dict) - def getArchivePageIndex(self, pagenum): + def get_archive_page_index(self, pagenum): # convert the displayed page number to the page index of the file in # the archive if pagenum < len(self.pages): @@ -188,19 +188,19 @@ def getArchivePageIndex(self, pagenum): else: return 0 - def getCoverPageIndexList(self): + def get_cover_page_index_list(self): # return a list of archive page indices of cover pages coverlist = [] - for p in self.pages: - if "Type" in p and p["Type"] == PageType.FrontCover: - coverlist.append(int(p["Image"])) + for page in self.pages: + if "Type" in page and page["Type"] == PageType.FrontCover: + coverlist.append(int(page["Image"])) if len(coverlist) == 0: coverlist.append(0) return coverlist - def addCredit(self, person, role, primary=False): + def add_credit(self, person, role, primary=False): credit = dict() credit["person"] = person @@ -225,7 +225,7 @@ def addCredit(self, person, role, primary=False): def __str__(self): vals = [] - if self.isEmpty: + if self.is_empty: return "No metadata" def add_string(tag, val): @@ -237,45 +237,45 @@ def add_attr_string(tag): add_attr_string("series") add_attr_string("issue") - add_attr_string("issueCount") + add_attr_string("issue_count") add_attr_string("title") add_attr_string("publisher") add_attr_string("year") add_attr_string("month") add_attr_string("day") add_attr_string("volume") - add_attr_string("volumeCount") + add_attr_string("volume_count") add_attr_string("genre") add_attr_string("language") add_attr_string("country") - add_attr_string("criticalRating") - add_attr_string("alternateSeries") - add_attr_string("alternateNumber") - add_attr_string("alternateCount") + add_attr_string("critical_rating") + add_attr_string("alternate_series") + add_attr_string("alternate_number") + add_attr_string("alternate_count") add_attr_string("imprint") - add_attr_string("webLink") + add_attr_string("web_link") add_attr_string("format") add_attr_string("manga") add_attr_string("price") - add_attr_string("isVersionOf") + add_attr_string("is_version_of") add_attr_string("rights") add_attr_string("identifier") - add_attr_string("lastMark") - - if self.blackAndWhite: - add_attr_string("blackAndWhite") - add_attr_string("maturityRating") - add_attr_string("storyArc") - add_attr_string("seriesGroup") - add_attr_string("scanInfo") + add_attr_string("last_mark") + + if self.black_and_white: + add_attr_string("black_and_white") + add_attr_string("maturity_rating") + add_attr_string("story_arc") + add_attr_string("series_group") + add_attr_string("scan_info") add_attr_string("characters") add_attr_string("teams") add_attr_string("locations") add_attr_string("comments") add_attr_string("notes") - add_string("tags", utils.listToString(self.tags)) + add_string("tags", utils.list_to_string(self.tags)) for c in self.credits: primary = "" diff --git a/darkseid/issuestring.py b/darkseid/issuestring.py index c3f2175..c0209b2 100644 --- a/darkseid/issuestring.py +++ b/darkseid/issuestring.py @@ -9,6 +9,8 @@ class IssueString: + """Class to handle various types of comic issue numbers.""" + def __init__(self, text): # break up the issue number string into 2 parts: the numeric and suffix string. @@ -71,7 +73,8 @@ def __init__(self, text): # print "num: {0} suf: {1}".format(self.num, self.suffix) - def asString(self, pad=0): + def as_string(self, pad=0): + """Returns a string with left-side zero padding""" # return the float, left side zero-padded, with suffix attached if self.num is None: return self.suffix @@ -89,9 +92,9 @@ def asString(self, pad=0): # create padding padding = "" - l = len(str(num_int)) - if l < pad: - padding = "0" * (pad - l) + length = len(str(num_int)) + if length < pad: + padding = "0" * (pad - length) num_s = padding + num_s if negative: @@ -99,8 +102,11 @@ def asString(self, pad=0): return num_s - def asFloat(self): - # return the float, with no suffix + def as_float(self): + """Return a float with no suffix + + example: "1½" is returned as "1.5" + """ if self.suffix == "½": if self.num is not None: return self.num + 0.5 @@ -108,8 +114,8 @@ def asFloat(self): return 0.5 return self.num - def asInt(self): - # return the int version of the float + def as_int(self): + """Returns the integer version of the float""" if self.num is None: return None return int(self.num) diff --git a/darkseid/utils.py b/darkseid/utils.py index dad1a12..86c4ba2 100644 --- a/darkseid/utils.py +++ b/darkseid/utils.py @@ -7,7 +7,7 @@ def get_recursive_filelist(pathlist): - """ Create a recursive list of comic files """ + """Takes a list of paths and return a list of comic archives""" filelist = [] for path in pathlist: path = pathlib.Path(path) @@ -22,35 +22,41 @@ def get_recursive_filelist(pathlist): return filelist -def listToString(l): +def list_to_string(list_of_strings): + """ + Function that takes a list of string and converts it to a string. + For example: ["apple", "banana", "cherry"] is changed to "apple; banana; cherry" + """ string = "" - if l is not None: - for item in l: + if list_of_strings is not None: + for item in list_of_strings: if len(string) > 0: string += "; " string += item return string -def removearticles(text): +def remove_articles(text): + """Takes a string and removes any articles in it.""" text = text.lower() articles = ["and", "a", "&", "issue", "the"] - newText = "" + new_text = "" for word in text.split(" "): if word not in articles: - newText += word + " " + new_text += word + " " - newText = newText[:-1] + new_text = new_text[:-1] # now get rid of some other junk - newText = newText.replace(":", "") - newText = newText.replace(",", "") - newText = newText.replace("-", " ") + new_text = new_text.replace(":", "") + new_text = new_text.replace(",", "") + new_text = new_text.replace("-", " ") - return newText + return new_text def unique_file(file_name): + """Takes a filename and if one already exist with that name returns a new filename""" counter = 1 # returns ('/path/file', '.ext') file_name_parts = os.path.splitext(file_name) diff --git a/tests/test_darkseid_comicarchive.py b/tests/test_darkseid_comicarchive.py index 0319ee0..189712d 100644 --- a/tests/test_darkseid_comicarchive.py +++ b/tests/test_darkseid_comicarchive.py @@ -48,56 +48,56 @@ def tearDown(self): def test_zip_file_exists(self): """ Test function that determines if a file is a zip file """ - res = self.comic_archive.isZip() + res = self.comic_archive.is_zip() self.assertTrue(res) def test_archive_number_of_pages(self): """ Test to determine number of pages in a comic archive """ - res = self.comic_archive.getNumberOfPages() + res = self.comic_archive.get_number_of_pages() self.assertEqual(res, 3) def test_archive_is_writable(self): """ Test to determine if a comic archive is writable """ - res = self.comic_archive.isWritable() + res = self.comic_archive.is_writable() self.assertTrue(res) def test_archive_is_writable_for_style(self): """ Test to determine writing style of comic tag """ - res = self.comic_archive.isWritableForStyle(MetaDataStyle.CIX) + res = self.comic_archive.is_writable_for_style(MetaDataStyle.CIX) self.assertTrue(res) def test_archive_test_metadata(self): """ Test to determine if a comic archive has metadata """ # verify archive has no metadata - res = self.comic_archive.hasMetadata(MetaDataStyle.CIX) + res = self.comic_archive.has_metadata(MetaDataStyle.CIX) self.assertFalse(res) # now let's test that we can write some - self.comic_archive.writeMetadata(self.meta_data, MetaDataStyle.CIX) - has_md = self.comic_archive.hasMetadata(MetaDataStyle.CIX) + self.comic_archive.write_metadata(self.meta_data, MetaDataStyle.CIX) + has_md = self.comic_archive.has_metadata(MetaDataStyle.CIX) self.assertTrue(has_md) # Verify what was written - new_md = self.comic_archive.readMetadata(MetaDataStyle.CIX) + new_md = self.comic_archive.read_metadata(MetaDataStyle.CIX) self.assertEqual(new_md.series, self.meta_data.series) self.assertEqual(new_md.issue, self.meta_data.issue) self.assertEqual(new_md.title, self.meta_data.title) self.assertEqual(new_md.notes, self.meta_data.notes) # now remove what was just written - self.comic_archive.removeMetadata(MetaDataStyle.CIX) - remove_md = self.comic_archive.hasMetadata(MetaDataStyle.CIX) + self.comic_archive.remove_metadata(MetaDataStyle.CIX) + remove_md = self.comic_archive.has_metadata(MetaDataStyle.CIX) self.assertFalse(remove_md) def test_archive_get_page(self): """ Test to set if a page from a comic archive can be retrieved """ # Get page 2 - img = self.comic_archive.getPage(1) + img = self.comic_archive.get_page(1) self.assertIsNotNone(img) def test_archive_metadata_from_filename(self): """ Test to get metadata from comic archives filename """ - test_md = self.comic_archive.metadataFromFilename() + test_md = self.comic_archive.metadata_from_filename() self.assertEqual(test_md.series, "Aquaman") self.assertEqual(test_md.issue, "1") self.assertEqual(test_md.year, "1994") @@ -105,9 +105,9 @@ def test_archive_metadata_from_filename(self): def test_archive_apply_file_info_to_metadata(self): """ Test to apply archive info to the generic metadata """ test_md = GenericMetadata() - self.comic_archive.applyArchiveInfoToMetadata(test_md) + self.comic_archive.apply_archive_info_to_metadata(test_md) # TODO: Need to test calculate page sizes - self.assertEqual(test_md.pageCount, 3) + self.assertEqual(test_md.page_count, 3) if __name__ == "__main__": diff --git a/tests/test_darkseid_comicinfoxml.py b/tests/test_darkseid_comicinfoxml.py index 392e859..1a8e054 100644 --- a/tests/test_darkseid_comicinfoxml.py +++ b/tests/test_darkseid_comicinfoxml.py @@ -15,24 +15,24 @@ def setUp(self): self.meta_data.issue = "1" self.meta_data.year = "1993" self.meta_data.day = "15" - self.meta_data.addCredit("Peter David", "Writer", primary=True) - self.meta_data.addCredit("Martin Egeland", "Penciller") - self.meta_data.addCredit("Martin Egeland", "Cover") - self.meta_data.addCredit("Kevin Dooley", "Editor") - self.meta_data.addCredit("Howard Shum", "Inker") - self.meta_data.addCredit("Tom McCraw", "Colorist") - self.meta_data.addCredit("Dan Nakrosis", "Letterer") + self.meta_data.add_credit("Peter David", "Writer", primary=True) + self.meta_data.add_credit("Martin Egeland", "Penciller") + self.meta_data.add_credit("Martin Egeland", "Cover") + self.meta_data.add_credit("Kevin Dooley", "Editor") + self.meta_data.add_credit("Howard Shum", "Inker") + self.meta_data.add_credit("Tom McCraw", "Colorist") + self.meta_data.add_credit("Dan Nakrosis", "Letterer") def test_metadata_from_xml(self): """ Simple test of creating the ComicInfo """ - res = ComicInfoXml().stringFromMetadata(self.meta_data) + res = ComicInfoXml().string_from_metadata(self.meta_data) # TODO: add more asserts to verify data. self.assertIsNotNone(res) def test_meta_write_to_file(self): """ Test of writing the metadata to a file """ tmp_file = tempfile.NamedTemporaryFile(suffix=".xml") - ComicInfoXml().writeToExternalFile(tmp_file.name, self.meta_data) + ComicInfoXml().write_to_external_file(tmp_file.name, self.meta_data) # Read the contents of the file just written. # TODO: Verify the data. res = open(tmp_file.name).read() @@ -43,9 +43,9 @@ def test_read_from_file(self): """ Test to read in the data from a file """ tmp_file = tempfile.NamedTemporaryFile(suffix=".xml") # Write metadata to file - ComicInfoXml().writeToExternalFile(tmp_file.name, self.meta_data) + ComicInfoXml().write_to_external_file(tmp_file.name, self.meta_data) # Read the metadat from the file - new_md = ComicInfoXml().readFromExternalFile(tmp_file.name) + new_md = ComicInfoXml().read_from_external_file(tmp_file.name) tmp_file.close() self.assertIsNotNone(new_md) diff --git a/tests/test_darkseid_filenameparser.py b/tests/test_darkseid_filenameparser.py index b0c5684..acd341a 100644 --- a/tests/test_darkseid_filenameparser.py +++ b/tests/test_darkseid_filenameparser.py @@ -16,8 +16,8 @@ def setUp(self): def test_special_format(self): """ Test for files with a special name like 'TPB' """ comic = "Aquaman TPB (1994)" - _, issue_start, _ = self.fnp.getIssueNumber(comic) - series, volume = self.fnp.getSeriesName(comic, issue_start) + _, issue_start, _ = self.fnp.get_issue_number(comic) + series, volume = self.fnp.get_series_name(comic, issue_start) self.assertEqual(issue_start, 0) self.assertEqual(series, "Aquaman") self.assertEqual(volume, "1994") @@ -25,42 +25,42 @@ def test_special_format(self): def test_get_issue_number(self): """ Test to get the issue number from the filename """ # Returns a tuple of issue number string, and start and end indexes in the filename - issue, issue_start, issue_end = self.fnp.getIssueNumber(self.comic) + issue, issue_start, issue_end = self.fnp.get_issue_number(self.comic) self.assertEqual(issue, "002") self.assertEqual(issue_start, 25) self.assertEqual(issue_end, 29) def test_get_year(self): """ Test to get the year from a filename """ - _, _, issue_end = self.fnp.getIssueNumber(self.comic) - year = self.fnp.getYear(self.comic, issue_end) + _, _, issue_end = self.fnp.get_issue_number(self.comic) + year = self.fnp.get_year(self.comic, issue_end) self.assertEqual(year, "2013") def test_get_series_name(self): """ Test to get the series name from a filename """ - _, issue_start, _ = self.fnp.getIssueNumber(self.comic) - series, volume = self.fnp.getSeriesName(self.comic, issue_start) + _, issue_start, _ = self.fnp.get_issue_number(self.comic) + series, volume = self.fnp.get_series_name(self.comic, issue_start) self.assertEqual(series, "Afterlife With Archie") self.assertEqual(volume, "1") def test_get_count(self): """ Test to get the total number of issues from the filename """ - _, _, issue_end = self.fnp.getIssueNumber(self.comic) - issue_count = self.fnp.getIssueCount(self.comic, issue_end) + _, _, issue_end = self.fnp.get_issue_number(self.comic) + issue_count = self.fnp.get_issue_count(self.comic, issue_end) self.assertEqual(issue_count, "8") def test_fix_spaces(self): """ Test of converting underscores to spaces in the filename """ - new_name = self.fnp.fixSpaces(self.comic) + new_name = self.fnp.fix_spaces(self.comic) self.assertNotEqual(new_name, "Afterlife With Archie") def test_get_remainder(self): """ Test the remainder function """ - _, issue_start, issue_end = self.fnp.getIssueNumber(self.comic) - year = self.fnp.getYear(self.comic, issue_end) - _, volume = self.fnp.getSeriesName(self.comic, issue_start) - count = self.fnp.getIssueCount(self.comic, issue_end) - remainder = self.fnp.getRemainder(self.comic, year, count, volume, issue_end) + _, issue_start, issue_end = self.fnp.get_issue_number(self.comic) + year = self.fnp.get_year(self.comic, issue_end) + _, volume = self.fnp.get_series_name(self.comic, issue_start) + count = self.fnp.get_issue_count(self.comic, issue_end) + remainder = self.fnp.get_remainder(self.comic, year, count, volume, issue_end) self.assertEqual(remainder, "(of 08)") def test_parse_filename(self): @@ -70,7 +70,7 @@ def test_parse_filename(self): test_filename = tmp_path + os.path.pathsep + tmp_file.name tmp_file.close() - self.fnp.parseFilename(test_filename) + self.fnp.parse_filename(test_filename) self.assertEqual(self.fnp.series, "Afterlife With Archie") self.assertEqual(self.fnp.volume, "1") self.assertEqual(self.fnp.issue, "2") diff --git a/tests/test_darkseid_genericmetadata.py b/tests/test_darkseid_genericmetadata.py index 37a98f7..bf7e292 100644 --- a/tests/test_darkseid_genericmetadata.py +++ b/tests/test_darkseid_genericmetadata.py @@ -9,13 +9,13 @@ def setUp(self): self.meta_data.series = "Aquaman" self.meta_data.issue = "0" self.meta_data.title = "A Crash of Symbols" - self.meta_data.isEmpty = False + self.meta_data.is_empty = False self.new_md = GenericMetadata() self.new_md.year = "1994" self.new_md.month = "10" self.new_md.day = "1" - self.meta_data.isEmpty = False + self.meta_data.is_empty = False def test_metadata_overlay(self): self.meta_data.overlay(self.new_md) @@ -34,9 +34,9 @@ def test_metadata_credits(self): {"person": "Martin Egeland", "role": "Cover"}, ] - self.meta_data.addCredit("Peter David", "Writer", primary=True) - self.meta_data.addCredit("Martin Egeland", "Penciller") - self.meta_data.addCredit("Martin Egeland", "Cover") + self.meta_data.add_credit("Peter David", "Writer", primary=True) + self.meta_data.add_credit("Martin Egeland", "Penciller") + self.meta_data.add_credit("Martin Egeland", "Cover") self.assertEqual(self.meta_data.credits, result) @@ -47,8 +47,8 @@ def test_metadata_credits_overlay(self): {"person": "Tom McCray", "role": "Colorist"}, ] - self.meta_data.addCredit("Peter David", "Writer") - self.meta_data.overlayCredits(new_credit) + self.meta_data.add_credit("Peter David", "Writer") + self.meta_data.overlay_credits(new_credit) self.assertEqual(self.meta_data.credits, result) diff --git a/tests/test_darkseid_issuestring.py b/tests/test_darkseid_issuestring.py index ba50bc4..9cddff5 100644 --- a/tests/test_darkseid_issuestring.py +++ b/tests/test_darkseid_issuestring.py @@ -5,43 +5,43 @@ class TestIssueString(TestCase): def test_issue_string_pad(self): - val = IssueString(int(1)).asString(pad=3) + val = IssueString(int(1)).as_string(pad=3) self.assertEqual(val, "001") def test_issue_float(self): - val = IssueString("1½").asFloat() + val = IssueString("1½").as_float() self.assertEqual(val, 1.5) def test_issue_float_half(self): - val = IssueString("½").asFloat() + val = IssueString("½").as_float() self.assertEqual(val, 0.5) def test_issue_verify_float(self): - val = IssueString("1.5").asFloat() + val = IssueString("1.5").as_float() self.assertEqual(val, 1.5) def test_issue_string_no_value_as_int(self): - val = IssueString("").asInt() + val = IssueString("").as_int() self.assertIsNone(val) def test_issue_int(self): - val = IssueString("1").asInt() + val = IssueString("1").as_int() self.assertEqual(val, 1) def test_issue_float_as_int(self): - val = IssueString("1.5").asInt() + val = IssueString("1.5").as_int() self.assertEqual(val, 1) def test_issue_string_monsters_unleashed(self): - val = IssueString("1.MU").asString(3) + val = IssueString("1.MU").as_string(3) self.assertEqual(val, "001.MU") def test_issue_string_minus_one(self): - val = IssueString("-1").asString(3) + val = IssueString("-1").as_string(3) self.assertEqual(val, "-001") def test_issue_string_none_value(self): - val = IssueString("Test").asString() + val = IssueString("Test").as_string() self.assertEqual(val, "Test") diff --git a/tests/test_darkseid_utils.py b/tests/test_darkseid_utils.py index 9f68414..8a995ca 100644 --- a/tests/test_darkseid_utils.py +++ b/tests/test_darkseid_utils.py @@ -23,14 +23,14 @@ def tearDown(self): def test_remove_articles(self): txt = "The Champions & Inhumans" - new_txt = utils.removearticles(txt) + new_txt = utils.remove_articles(txt) self.assertEqual(new_txt, "champions inhumans") def test_list_to_string(self): thislist = ["apple", "banana", "cherry"] expected_result = "apple; banana; cherry" - list_string = utils.listToString(thislist) + list_string = utils.list_to_string(thislist) self.assertEqual(list_string, expected_result) def test_unique_name(self):