diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 00000000..c3d0309e --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1 @@ +custom: ["https://PayPal.Me/calibreweb",] diff --git a/.gitignore b/.gitignore index 614e9936..14da8a03 100644 --- a/.gitignore +++ b/.gitignore @@ -23,6 +23,7 @@ vendor/ # calibre-web *.db *.log +cps/cache .idea/ *.bak diff --git a/README.md b/README.md index 896d4535..267f21ef 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # About -Calibre-Web is a web app providing a clean interface for browsing, reading and downloading eBooks using an existing [Calibre](https://calibre-ebook.com) database. +Calibre-Web is a web app providing a clean interface for browsing, reading and downloading eBooks using a valid [Calibre](https://calibre-ebook.com) database. [![GitHub License](https://img.shields.io/github/license/janeczku/calibre-web?style=flat-square)](https://github.com/janeczku/calibre-web/blob/master/LICENSE) [![GitHub commit activity](https://img.shields.io/github/commit-activity/w/janeczku/calibre-web?logo=github&style=flat-square&label=commits)]() @@ -19,7 +19,7 @@ Calibre-Web is a web app providing a clean interface for browsing, reading and d - full graphical setup - User management with fine-grained per-user permissions - Admin interface -- User Interface in brazilian, czech, dutch, english, finnish, french, german, greek, hungarian, italian, japanese, khmer, polish, russian, simplified and traditional chinese, spanish, swedish, turkish, ukrainian +- User Interface in brazilian, czech, dutch, english, finnish, french, german, greek, hungarian, italian, japanese, khmer, korean, polish, russian, simplified and traditional chinese, spanish, swedish, turkish, ukrainian - OPDS feed for eBook reader apps - Filter and search by titles, authors, tags, series and language - Create a custom book collection (shelves) @@ -40,23 +40,20 @@ Calibre-Web is a web app providing a clean interface for browsing, reading and d ## Installation #### Installation via pip (recommended) -1. Install calibre web via pip with the command `pip install calibreweb` (Depending on your OS and or distro the command could also be `pip3`). -2. Optional features can also be installed via pip, please refer to [this page](https://github.com/janeczku/calibre-web/wiki/Dependencies-in-Calibre-Web-Linux-Windows) for details -3. Calibre-Web can be started afterwards by typing `cps` or `python3 -m cps` +1. To avoid problems with already installed python dependencies, it's recommended to create a virtual environment for Calibre-Web +2. Install Calibre-Web via pip with the command `pip install calibreweb` (Depending on your OS and or distro the command could also be `pip3`). +3. Optional features can also be installed via pip, please refer to [this page](https://github.com/janeczku/calibre-web/wiki/Dependencies-in-Calibre-Web-Linux-Windows) for details +4. Calibre-Web can be started afterwards by typing `cps` -#### Manual installation -1. Install dependencies by running `pip3 install --target vendor -r requirements.txt` (python3.x). Alternativly set up a python virtual environment. -2. Execute the command: `python3 cps.py` (or `nohup python3 cps.py` - recommended if you want to exit the terminal window) - -Issues with Ubuntu: -Please note that running the above install command can fail on some versions of Ubuntu, saying `"can't combine user with prefix"`. This is a [known bug](https://github.com/pypa/pip/issues/3826) and can be remedied by using the command `pip install --system --target vendor -r requirements.txt` instead. +In the Wiki there are also examples for: a [manual installation](https://github.com/janeczku/calibre-web/wiki/Manual-installation), [installation on Linux Mint](https://github.com/janeczku/calibre-web/wiki/How-To:Install-Calibre-Web-in-Linux-Mint-19-or-20), [installation on a Cloud Provider](https://github.com/janeczku/calibre-web/wiki/How-To:-Install-Calibre-Web-on-a-Cloud-Provider). ## Quick start -Point your browser to `http://localhost:8083` or `http://localhost:8083/opds` for the OPDS catalog -Set `Location of Calibre database` to the path of the folder where your Calibre library (metadata.db) lives, push "submit" button\ -Optionally a Google Drive can be used to host the calibre library [-> Using Google Drive integration](https://github.com/janeczku/calibre-web/wiki/Configuration#using-google-drive-integration) -Go to Login page +Point your browser to `http://localhost:8083` or `http://localhost:8083/opds` for the OPDS catalog \ +Login with default admin login \ +Set `Location of Calibre database` to the path of the folder where your Calibre library (metadata.db) lives, push "submit" button \ +Optionally a Google Drive can be used to host the calibre library [-> Using Google Drive integration](https://github.com/janeczku/calibre-web/wiki/Configuration#using-google-drive-integration) \ +Afterwards you can configure your Calibre-Web instance ([Basic Configuration](https://github.com/janeczku/calibre-web/wiki/Configuration#basic-configuration) and [UI Configuration](https://github.com/janeczku/calibre-web/wiki/Configuration#ui-configuration) on admin page) #### Default admin login: *Username:* admin\ @@ -71,7 +68,7 @@ Optionally, to enable on-the-fly conversion from one ebook format to another whe [Download and install](https://calibre-ebook.com/download) the Calibre desktop program for your platform and enter the folder including program name (normally /opt/calibre/ebook-convert, or C:\Program Files\calibre\ebook-convert.exe) in the field "calibre's converter tool" on the setup page. -[Download](https://github.com/pgaskin/kepubify/releases/latest) Kepubify tool for your platform and place the binary starting with `kepubify` in Linux: `\opt\kepubify` Windows: `C:\Program Files\kepubify`. +[Download](https://github.com/pgaskin/kepubify/releases/latest) Kepubify tool for your platform and place the binary starting with `kepubify` in Linux: `/opt/kepubify` Windows: `C:\Program Files\kepubify`. ## Docker Images diff --git a/SECURITY.md b/SECURITY.md index bbaad7c4..1b93b5f9 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -10,23 +10,36 @@ To receive fixes for security vulnerabilities it is required to always upgrade t ## History -| Fixed in | Description |CVE number | -| ---------- |---------|---------| -| 3rd July 2018 | Guest access acts as a backdoor|| -| V 0.6.7 |Hardcoded secret key for sessions |CVE-2020-12627 | -| V 0.6.13|Calibre-Web Metadata cross site scripting |CVE-2021-25964| -| V 0.6.13|Name of Shelves are only visible to users who can access the corresponding shelf Thanks to @ibarrionuevo|| -| V 0.6.13|JavaScript could get executed in the description field. Thanks to @ranjit-git and Hagai Wechsler (WhiteSource)|| -| V 0.6.13|JavaScript could get executed in a custom column of type "comment" field || -| V 0.6.13|JavaScript could get executed after converting a book to another format with a title containing javascript code|| -| V 0.6.13|JavaScript could get executed after converting a book to another format with a username containing javascript code|| -| V 0.6.13|JavaScript could get executed in the description series, categories or publishers title|| -| V 0.6.13|JavaScript could get executed in the shelf title|| -| V 0.6.13|Login with the old session cookie after logout. Thanks to @ibarrionuevo|| -| V 0.6.14|CSRF was possible. Thanks to @mik317 and Hagai Wechsler (WhiteSource) |CVE-2021-25965| -| V 0.6.14|Cross-Site Scripting vulnerability on typeahead inputs. Thanks to @notdodo|| +| Fixed in | Description |CVE number | +|---------------|--------------------------------------------------------------------------------------------------------------------|---------| +| 3rd July 2018 | Guest access acts as a backdoor || +| V 0.6.7 | Hardcoded secret key for sessions |CVE-2020-12627 | +| V 0.6.13 | Calibre-Web Metadata cross site scripting |CVE-2021-25964| +| V 0.6.13 | Name of Shelves are only visible to users who can access the corresponding shelf Thanks to @ibarrionuevo || +| V 0.6.13 | JavaScript could get executed in the description field. Thanks to @ranjit-git and Hagai Wechsler (WhiteSource) || +| V 0.6.13 | JavaScript could get executed in a custom column of type "comment" field || +| V 0.6.13 | JavaScript could get executed after converting a book to another format with a title containing javascript code || +| V 0.6.13 | JavaScript could get executed after converting a book to another format with a username containing javascript code || +| V 0.6.13 | JavaScript could get executed in the description series, categories or publishers title || +| V 0.6.13 | JavaScript could get executed in the shelf title || +| V 0.6.13 | Login with the old session cookie after logout. Thanks to @ibarrionuevo || +| V 0.6.14 | CSRF was possible. Thanks to @mik317 and Hagai Wechsler (WhiteSource) |CVE-2021-25965| +| V 0.6.14 | Migrated some routes to POST-requests (CSRF protection). Thanks to @scara31 |CVE-2021-4164| +| V 0.6.15 | Fix for "javascript:" script links in identifier. Thanks to @scara31 |CVE-2021-4170| +| V 0.6.15 | Cross-Site Scripting vulnerability on uploaded cover file names. Thanks to @ibarrionuevo || +| V 0.6.15 | Creating public shelfs is now denied if user is missing the edit public shelf right. Thanks to @ibarrionuevo || +| V 0.6.15 | Changed error message in case of trying to delete a shelf unauthorized. Thanks to @ibarrionuevo || +| V 0.6.16 | JavaScript could get executed on authors page. Thanks to @alicaz |CVE-2022-0352| +| V 0.6.16 | Localhost can no longer be used to upload covers. Thanks to @scara31 |CVE-2022-0339| +| V 0.6.16 | Another case where public shelfs could be created without permission is prevented. Thanks to @nhiephon |CVE-2022-0273| +| V 0.6.16 | It's prevented to get the name of a private shelfs. Thanks to @nhiephon |CVE-2022-0405| +| V 0.6.17 | The SSRF Protection can no longer be bypassed via an HTTP redirect. Thanks to @416e6e61 |CVE-2022-0767| +| V 0.6.17 | The SSRF Protection can no longer be bypassed via 0.0.0.0 and it's ipv6 equivalent. Thanks to @r0hanSH |CVE-2022-0766| +| V 0.6.18 | Possible SQL Injection is prevented in user table Thanks to Iman Sharafaldin (Forward Security) || +| V 0.6.18 | The SSRF protection no longer can be bypassed by IPV6/IPV4 embedding. Thanks to @416e6e61 |CVE-2022-0939| +| V 0.6.18 | The SSRF protection no longer can be bypassed to connect to other servers in the local network. Thanks to @michaellrowley |CVE-2022-0990| -## Staement regarding Log4j (CVE-2021-44228 and related) +## Statement regarding Log4j (CVE-2021-44228 and related) Calibre-web is not affected by bugs related to Log4j. Calibre-Web is a python program, therefore not using Java, and not using the Java logging feature log4j. diff --git a/cps.py b/cps.py index 277da288..e4f9c520 100755 --- a/cps.py +++ b/cps.py @@ -2,7 +2,7 @@ # -*- coding: utf-8 -*- # This file is part of the Calibre-Web (https://github.com/janeczku/calibre-web) -# Copyright (C) 2012-2019 OzzieIsaacs +# Copyright (C) 2022 OzzieIsaacs # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -16,72 +16,19 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . -try: - from gevent import monkey - monkey.patch_all() -except ImportError: - pass -import sys import os +import sys -# Insert local directories into path -sys.path.append(os.path.dirname(os.path.abspath(__file__))) -sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'vendor')) - - -from cps import create_app -from cps import web_server -from cps.opds import opds -from cps.web import web -from cps.jinjia import jinjia -from cps.about import about -from cps.shelf import shelf -from cps.admin import admi -from cps.gdrive import gdrive -from cps.editbooks import editbook -from cps.remotelogin import remotelogin -from cps.search_metadata import meta -from cps.error_handler import init_errorhandler - -try: - from cps.kobo import kobo, get_kobo_activated - from cps.kobo_auth import kobo_auth - kobo_available = get_kobo_activated() -except (ImportError, AttributeError): # Catch also error for not installed flask-WTF (missing csrf decorator) - kobo_available = False - -try: - from cps.oauth_bb import oauth - oauth_available = True -except ImportError: - oauth_available = False - - -def main(): - app = create_app() - - init_errorhandler() - - app.register_blueprint(web) - app.register_blueprint(opds) - app.register_blueprint(jinjia) - app.register_blueprint(about) - app.register_blueprint(shelf) - app.register_blueprint(admi) - app.register_blueprint(remotelogin) - app.register_blueprint(meta) - app.register_blueprint(gdrive) - app.register_blueprint(editbook) - if kobo_available: - app.register_blueprint(kobo) - app.register_blueprint(kobo_auth) - if oauth_available: - app.register_blueprint(oauth) - success = web_server.start() - sys.exit(0 if success else 1) +# Add local path to sys.path so we can import cps +path = os.path.dirname(os.path.abspath(__file__)) +sys.path.insert(0, path) +from cps.main import main if __name__ == '__main__': main() + + + diff --git a/cps/MyLoginManager.py b/cps/MyLoginManager.py index 7c916bd5..2f06be94 100644 --- a/cps/MyLoginManager.py +++ b/cps/MyLoginManager.py @@ -27,8 +27,9 @@ from flask import session class MyLoginManager(LoginManager): def _session_protection_failed(self): - sess = session._get_current_object() + _session = session._get_current_object() ident = self._session_identifier_generator() - if(sess and not (len(sess) == 1 and sess.get('csrf_token', None))) and ident != sess.get('_id', None): + if(_session and not (len(_session) == 1 + and _session.get('csrf_token', None))) and ident != _session.get('_id', None): return super(). _session_protection_failed() return False diff --git a/cps/__init__.py b/cps/__init__.py index 34ccf438..1ba1f778 100644 --- a/cps/__init__.py +++ b/cps/__init__.py @@ -25,24 +25,21 @@ import sys import os import mimetypes -from babel import Locale as LC -from babel import negotiate_locale -from babel.core import UnknownLocaleError -from flask import Flask, request, g +from flask import Flask from .MyLoginManager import MyLoginManager -from flask_babel import Babel from flask_principal import Principal -from . import config_sql, logger, cache_buster, cli, ub, db +from . import logger +from .cli import CliParameter +from .constants import CONFIG_DIR from .reverseproxy import ReverseProxied from .server import WebServer from .dep_check import dependency_check - -try: - import lxml - lxml_present = True -except ImportError: - lxml_present = False +from .updater import Updater +from .babel import babel +from . import config_sql +from . import cache_buster +from . import ub, db try: from flask_wtf.csrf import CSRFProtect @@ -50,6 +47,7 @@ try: except ImportError: wtf_present = False + mimetypes.init() mimetypes.add_type('application/xhtml+xml', '.xhtml') mimetypes.add_type('application/epub+zip', '.epub') @@ -71,6 +69,8 @@ mimetypes.add_type('application/ogg', '.oga') mimetypes.add_type('text/css', '.css') mimetypes.add_type('text/javascript; charset=UTF-8', '.js') +log = logger.create() + app = Flask(__name__) app.config.update( SESSION_COOKIE_HTTPONLY=True, @@ -79,61 +79,72 @@ app.config.update( WTF_CSRF_SSL_STRICT=False ) - lm = MyLoginManager() -lm.login_view = 'web.login' -lm.anonymous_user = ub.Anonymous -lm.session_protection = 'strong' + +config = config_sql._ConfigSQL() + +cli_param = CliParameter() if wtf_present: csrf = CSRFProtect() - csrf.init_app(app) else: csrf = None -ub.init_db(cli.settingspath) -# pylint: disable=no-member -config = config_sql.load_configuration(ub.session) +calibre_db = db.CalibreDB() web_server = WebServer() -babel = Babel() -_BABEL_TRANSLATIONS = set() +updater_thread = Updater() -log = logger.create() - - -from . import services - -db.CalibreDB.update_config(config) -db.CalibreDB.setup_db(config.config_calibre_dir, cli.settingspath) - - -calibre_db = db.CalibreDB() def create_app(): + lm.login_view = 'web.login' + lm.anonymous_user = ub.Anonymous + lm.session_protection = 'strong' + + if csrf: + csrf.init_app(app) + + cli_param.init() + + ub.init_db(cli_param.settings_path, cli_param.user_credentials) + + # pylint: disable=no-member + config_sql.load_configuration(config, ub.session, cli_param) + + db.CalibreDB.update_config(config) + db.CalibreDB.setup_db(config.config_calibre_dir, cli_param.settings_path) + calibre_db.init_db() + + updater_thread.init_updater(config, web_server) + # Perform dry run of updater and exit afterwards + if cli_param.dry_run: + updater_thread.dry_run() + sys.exit(0) + updater_thread.start() + if sys.version_info < (3, 0): log.info( - '*** Python2 is EOL since end of 2019, this version of Calibre-Web is no longer supporting Python2, please update your installation to Python3 ***') + '*** Python2 is EOL since end of 2019, this version of Calibre-Web is no longer supporting Python2, ' + 'please update your installation to Python3 ***') print( - '*** Python2 is EOL since end of 2019, this version of Calibre-Web is no longer supporting Python2, please update your installation to Python3 ***') + '*** Python2 is EOL since end of 2019, this version of Calibre-Web is no longer supporting Python2, ' + 'please update your installation to Python3 ***') web_server.stop(True) sys.exit(5) - if not lxml_present: - log.info('*** "lxml" is needed for calibre-web to run. Please install it using pip: "pip install lxml" ***') - print('*** "lxml" is needed for calibre-web to run. Please install it using pip: "pip install lxml" ***') - web_server.stop(True) - sys.exit(6) if not wtf_present: - log.info('*** "flask-WTF" is needed for calibre-web to run. Please install it using pip: "pip install flask-WTF" ***') - print('*** "flask-WTF" is needed for calibre-web to run. Please install it using pip: "pip install flask-WTF" ***') + log.info('*** "flask-WTF" is needed for calibre-web to run. ' + 'Please install it using pip: "pip install flask-WTF" ***') + print('*** "flask-WTF" is needed for calibre-web to run. ' + 'Please install it using pip: "pip install flask-WTF" ***') web_server.stop(True) sys.exit(7) for res in dependency_check() + dependency_check(True): - log.info('*** "{}" version does not fit the requirements. Should: {}, Found: {}, please consider installing required version ***' - .format(res['name'], - res['target'], - res['found'])) + log.info('*** "{}" version does not fit the requirements. ' + 'Should: {}, Found: {}, please consider installing required version ***' + .format(res['name'], + res['target'], + res['found'])) app.wsgi_app = ReverseProxied(app.wsgi_app) if os.environ.get('FLASK_DEBUG'): @@ -147,8 +158,8 @@ def create_app(): web_server.init_app(app, config) babel.init_app(app) - _BABEL_TRANSLATIONS.update(str(item) for item in babel.list_translations()) - _BABEL_TRANSLATIONS.add('en') + + from . import services if services.ldap: services.ldap.init_app(app, config) @@ -156,34 +167,12 @@ def create_app(): services.goodreads_support.connect(config.config_goodreads_api_key, config.config_goodreads_api_secret, config.config_use_goodreads) + config.store_calibre_uuid(calibre_db, db.Library_Id) + # Register scheduled tasks + from .schedule import register_scheduled_tasks, register_startup_tasks + register_scheduled_tasks(config.schedule_reconnect) + register_startup_tasks() return app -@babel.localeselector -def get_locale(): - # if a user is logged in, use the locale from the user settings - user = getattr(g, 'user', None) - if user is not None and hasattr(user, "locale"): - if user.name != 'Guest': # if the account is the guest account bypass the config lang settings - return user.locale - preferred = list() - if request.accept_languages: - for x in request.accept_languages.values(): - try: - preferred.append(str(LC.parse(x.replace('-', '_')))) - except (UnknownLocaleError, ValueError) as e: - log.debug('Could not parse locale "%s": %s', x, e) - - return negotiate_locale(preferred or ['en'], _BABEL_TRANSLATIONS) - - -@babel.timezoneselector -def get_timezone(): - user = getattr(g, 'user', None) - return user.timezone if user else None - - -from .updater import Updater -updater_thread = Updater() -updater_thread.start() diff --git a/cps/about.py b/cps/about.py index ba5a99af..1b68818d 100644 --- a/cps/about.py +++ b/cps/about.py @@ -25,46 +25,15 @@ import platform import sqlite3 from collections import OrderedDict -import babel, pytz, requests, sqlalchemy -import werkzeug, flask, flask_login, flask_principal, jinja2 +import werkzeug +import flask +import flask_login +import jinja2 from flask_babel import gettext as _ -try: - from flask_wtf import __version__ as flaskwtf_version -except ImportError: - flaskwtf_version = _(u'not installed') -from . import db, calibre_db, converter, uploader, server, isoLanguages, constants, gdriveutils, dep_check +from . import db, calibre_db, converter, uploader, constants, dep_check from .render_template import render_title_template -try: - from flask_login import __version__ as flask_loginVersion -except ImportError: - from flask_login.__about__ import __version__ as flask_loginVersion -try: - # pylint: disable=unused-import - import unidecode - # _() necessary to make babel aware of string for translation - unidecode_version = _(u'installed') -except ImportError: - unidecode_version = _(u'not installed') - -try: - from flask_dance import __version__ as flask_danceVersion -except ImportError: - flask_danceVersion = None - -try: - from greenlet import __version__ as greenlet_Version -except ImportError: - greenlet_Version = None - -try: - from scholarly import scholarly - scholarly_version = _(u'installed') -except ImportError: - scholarly_version = _(u'not installed') - -from . import services about = flask.Blueprint('about', __name__) @@ -74,59 +43,38 @@ opt = dep_check.load_dependencys(True) for i in (req + opt): ret[i[1]] = i[0] -if not ret: - _VERSIONS = OrderedDict( - Platform = '{0[0]} {0[2]} {0[3]} {0[4]} {0[5]}'.format(platform.uname()), - Python=sys.version, - Calibre_Web=constants.STABLE_VERSION['version'] + ' - ' - + constants.NIGHTLY_VERSION[0].replace('%','%%') + ' - ' - + constants.NIGHTLY_VERSION[1].replace('%','%%'), - WebServer=server.VERSION, - Flask=flask.__version__, - Flask_Login=flask_loginVersion, - Flask_Principal=flask_principal.__version__, - Flask_WTF=flaskwtf_version, - Werkzeug=werkzeug.__version__, - Babel=babel.__version__, - Jinja2=jinja2.__version__, - Requests=requests.__version__, - SqlAlchemy=sqlalchemy.__version__, - pySqlite=sqlite3.version, - SQLite=sqlite3.sqlite_version, - iso639=isoLanguages.__version__, - pytz=pytz.__version__, - Unidecode=unidecode_version, - Scholarly=scholarly_version, - Flask_SimpleLDAP=u'installed' if bool(services.ldap) else None, - python_LDAP=services.ldapVersion if bool(services.ldapVersion) else None, - Goodreads=u'installed' if bool(services.goodreads_support) else None, - jsonschema=services.SyncToken.__version__ if bool(services.SyncToken) else None, - flask_dance=flask_danceVersion, - greenlet=greenlet_Version - ) - _VERSIONS.update(gdriveutils.get_versions()) - _VERSIONS.update(uploader.get_versions(True)) +if constants.NIGHTLY_VERSION[0] == "$Format:%H$": + calibre_web_version = constants.STABLE_VERSION['version'] else: - _VERSIONS = OrderedDict( - Platform = '{0[0]} {0[2]} {0[3]} {0[4]} {0[5]}'.format(platform.uname()), - Python = sys.version, - Calibre_Web = constants.STABLE_VERSION['version'] + ' - ' - + constants.NIGHTLY_VERSION[0].replace('%', '%%') + ' - ' - + constants.NIGHTLY_VERSION[1].replace('%', '%%'), - Werkzeug = werkzeug.__version__, - Jinja2=jinja2.__version__, - pySqlite = sqlite3.version, - SQLite = sqlite3.sqlite_version, - ) - _VERSIONS.update(ret) - _VERSIONS.update(uploader.get_versions(False)) + calibre_web_version = (constants.STABLE_VERSION['version'] + ' - ' + + constants.NIGHTLY_VERSION[0].replace('%', '%%') + ' - ' + + constants.NIGHTLY_VERSION[1].replace('%', '%%')) + +if getattr(sys, 'frozen', False): + calibre_web_version += " - Exe-Version" +elif constants.HOME_CONFIG: + calibre_web_version += " - pyPi" + +_VERSIONS = OrderedDict( + Platform='{0[0]} {0[2]} {0[3]} {0[4]} {0[5]}'.format(platform.uname()), + Python=sys.version, + Calibre_Web=calibre_web_version, + Werkzeug=werkzeug.__version__, + Jinja2=jinja2.__version__, + pySqlite=sqlite3.version, + SQLite=sqlite3.sqlite_version, +) +_VERSIONS.update(ret) +_VERSIONS.update(uploader.get_versions()) + def collect_stats(): - _VERSIONS['ebook converter'] = _(converter.get_calibre_version()) - _VERSIONS['unrar'] = _(converter.get_unrar_version()) - _VERSIONS['kepubify'] = _(converter.get_kepubify_version()) + _VERSIONS['ebook converter'] = converter.get_calibre_version() + _VERSIONS['unrar'] = converter.get_unrar_version() + _VERSIONS['kepubify'] = converter.get_kepubify_version() return _VERSIONS + @about.route("/stats") @flask_login.login_required def stats(): diff --git a/cps/admin.py b/cps/admin.py index 04d9138f..76275922 100644 --- a/cps/admin.py +++ b/cps/admin.py @@ -24,33 +24,32 @@ import os import re import base64 import json -import time import operator -from datetime import datetime, timedelta +from datetime import datetime, timedelta, time +from functools import wraps + -from babel import Locale as LC -from babel.dates import format_datetime from flask import Blueprint, flash, redirect, url_for, abort, request, make_response, send_from_directory, g, Response from flask_login import login_required, current_user, logout_user, confirm_login from flask_babel import gettext as _ +from flask_babel import get_locale, format_time, format_datetime, format_timedelta from flask import session as flask_session from sqlalchemy import and_ from sqlalchemy.orm.attributes import flag_modified from sqlalchemy.exc import IntegrityError, OperationalError, InvalidRequestError from sqlalchemy.sql.expression import func, or_, text -from . import constants, logger, helper, services -from . import db, calibre_db, ub, web_server, get_locale, config, updater_thread, babel, gdriveutils, kobo_sync_status +from . import constants, logger, helper, services, cli_param +from . import db, calibre_db, ub, web_server, config, updater_thread, gdriveutils, \ + kobo_sync_status, schedule from .helper import check_valid_domain, send_test_mail, reset_password, generate_password_hash, check_email, \ valid_email, check_username from .gdriveutils import is_gdrive_ready, gdrive_support from .render_template import render_title_template, get_sidebar_config -from . import debug_info, _BABEL_TRANSLATIONS +from .services.worker import WorkerThread +from .babel import get_available_translations, get_available_locale, get_user_locale_language +from . import debug_info -try: - from functools import wraps -except ImportError: - pass # We're not using Python 3 log = logger.create() @@ -59,7 +58,9 @@ feature_support = { 'goodreads': bool(services.goodreads_support), 'kobo': bool(services.kobo), 'updater': constants.UPDATER_AVAILABLE, - 'gmail': bool(services.gmail) + 'gmail': bool(services.gmail), + 'scheduler': schedule.use_APScheduler, + 'gdrive': gdrive_support } try: @@ -78,7 +79,6 @@ except ImportError as err: oauth_check = {} -feature_support['gdrive'] = gdrive_support admi = Blueprint('admin', __name__) @@ -129,11 +129,11 @@ def admin_forbidden(): abort(403) -@admi.route("/shutdown") +@admi.route("/shutdown", methods=["POST"]) @login_required @admin_required def shutdown(): - task = int(request.args.get("parameter").strip()) + task = request.get_json().get('parameter', -1) showtext = {} if task in (0, 1): # valid commandos received # close all database connections @@ -158,6 +158,29 @@ def shutdown(): return json.dumps(showtext), 400 +# method is available without login and not protected by CSRF to make it easy reachable, is per default switched of +# needed for docker applications, as changes on metadata.db from host are not visible to application +@admi.route("/reconnect", methods=['GET']) +def reconnect(): + if cli_param.reconnect_enable: + calibre_db.reconnect_db(config, ub.app_DB_path) + return json.dumps({}) + else: + log.debug("'/reconnect' was accessed but is not enabled") + abort(404) + + +@admi.route("/ajax/updateThumbnails", methods=['POST']) +@admin_required +@login_required +def update_thumbnails(): + content = config.get_scheduled_task_settings() + if content['schedule_generate_book_covers']: + log.info("Update of Cover cache requested") + helper.update_thumbnail_cache() + return "" + + @admi.route("/admin/view") @login_required @admin_required @@ -176,17 +199,22 @@ def admin(): form_date -= timedelta(hours=int(commit[20:22]), minutes=int(commit[23:])) elif commit[19] == '-': form_date += timedelta(hours=int(commit[20:22]), minutes=int(commit[23:])) - commit = format_datetime(form_date - tz, format='short', locale=get_locale()) + commit = format_datetime(form_date - tz, format='short') else: commit = version['version'] - allUser = ub.session.query(ub.User).all() + all_user = ub.session.query(ub.User).all() email_settings = config.get_mail_settings() - kobo_support = feature_support['kobo'] and config.config_kobo_sync - return render_title_template("admin.html", allUser=allUser, email=email_settings, config=config, commit=commit, - feature_support=feature_support, kobo_support=kobo_support, + schedule_time = format_time(time(hour=config.schedule_start_time), format="short") + t = timedelta(hours=config.schedule_duration // 60, minutes=config.schedule_duration % 60) + schedule_duration = format_timedelta(t, threshold=.99) + + return render_title_template("admin.html", allUser=all_user, email=email_settings, config=config, commit=commit, + feature_support=feature_support, schedule_time=schedule_time, + schedule_duration=schedule_duration, title=_(u"Admin page"), page="admin") + @admi.route("/admin/dbconfig", methods=["GET", "POST"]) @login_required @admin_required @@ -227,30 +255,32 @@ def ajax_db_config(): def calibreweb_alive(): return "", 200 + @admi.route("/admin/viewconfig") @login_required @admin_required def view_configuration(): - read_column = calibre_db.session.query(db.Custom_Columns)\ - .filter(and_(db.Custom_Columns.datatype == 'bool', db.Custom_Columns.mark_for_delete == 0)).all() - restrict_columns = calibre_db.session.query(db.Custom_Columns)\ - .filter(and_(db.Custom_Columns.datatype == 'text', db.Custom_Columns.mark_for_delete == 0)).all() + read_column = calibre_db.session.query(db.CustomColumns)\ + .filter(and_(db.CustomColumns.datatype == 'bool', db.CustomColumns.mark_for_delete == 0)).all() + restrict_columns = calibre_db.session.query(db.CustomColumns)\ + .filter(and_(db.CustomColumns.datatype == 'text', db.CustomColumns.mark_for_delete == 0)).all() languages = calibre_db.speaking_language() - translations = [LC('en')] + babel.list_translations() + translations = get_available_locale() return render_title_template("config_view_edit.html", conf=config, readColumns=read_column, restrictColumns=restrict_columns, languages=languages, translations=translations, title=_(u"UI Configuration"), page="uiconfig") + @admi.route("/admin/usertable") @login_required @admin_required def edit_user_table(): visibility = current_user.view_settings.get('useredit', {}) languages = calibre_db.speaking_language() - translations = babel.list_translations() + [LC('en')] - allUser = ub.session.query(ub.User) + translations = get_available_locale() + all_user = ub.session.query(ub.User) tags = calibre_db.session.query(db.Tags)\ .join(db.books_tags_link)\ .join(db.Books)\ @@ -262,10 +292,10 @@ def edit_user_table(): else: custom_values = [] if not config.config_anonbrowse: - allUser = allUser.filter(ub.User.role.op('&')(constants.ROLE_ANONYMOUS) != constants.ROLE_ANONYMOUS) + all_user = all_user.filter(ub.User.role.op('&')(constants.ROLE_ANONYMOUS) != constants.ROLE_ANONYMOUS) kobo_support = feature_support['kobo'] and config.config_kobo_sync return render_title_template("user_table.html", - users=allUser.all(), + users=all_user.all(), tags=tags, custom_values=custom_values, translations=translations, @@ -286,10 +316,13 @@ def list_users(): limit = int(request.args.get("limit") or 10) search = request.args.get("search") sort = request.args.get("sort", "id") - order = request.args.get("order", "").lower() state = None if sort == "state": state = json.loads(request.args.get("state", "[]")) + else: + if sort not in ub.User.__table__.columns.keys(): + sort = "id" + order = request.args.get("order", "").lower() if sort != "state" and order: order = text(sort + " " + order) @@ -304,8 +337,8 @@ def list_users(): if search: all_user = all_user.filter(or_(func.lower(ub.User.name).ilike("%" + search + "%"), - func.lower(ub.User.kindle_mail).ilike("%" + search + "%"), - func.lower(ub.User.email).ilike("%" + search + "%"))) + func.lower(ub.User.kindle_mail).ilike("%" + search + "%"), + func.lower(ub.User.email).ilike("%" + search + "%"))) if state: users = calibre_db.get_checkbox_sorted(all_user.all(), state, off, limit, request.args.get("order", "").lower()) else: @@ -317,7 +350,7 @@ def list_users(): if user.default_language == "all": user.default = _("All") else: - user.default = LC.parse(user.default_language).get_language_name(get_locale()) + user.default = get_user_locale_language(user.default_language) table_entries = {'totalNotFiltered': total_count, 'total': filtered_count, "rows": users} js_list = json.dumps(table_entries, cls=db.AlchemyEncoder) @@ -325,12 +358,14 @@ def list_users(): response.headers["Content-Type"] = "application/json; charset=utf-8" return response + @admi.route("/ajax/deleteuser", methods=['POST']) @login_required @admin_required def delete_user(): user_ids = request.form.to_dict(flat=False) users = None + message = "" if "userid[]" in user_ids: users = ub.session.query(ub.User).filter(ub.User.id.in_(user_ids['userid[]'])).all() elif "userid" in user_ids: @@ -358,11 +393,12 @@ def delete_user(): success.extend(errors) return Response(json.dumps(success), mimetype='application/json') + @admi.route("/ajax/getlocale") @login_required @admin_required def table_get_locale(): - locale = babel.list_translations() + [LC('en')] + locale = get_available_locale() ret = list() current_locale = get_locale() for loc in locale: @@ -417,16 +453,16 @@ def edit_list_user(param): if user.name == "Guest": raise Exception(_("Guest Name can't be changed")) user.name = check_username(vals['value']) - elif param =='email': + elif param == 'email': user.email = check_email(vals['value']) - elif param =='kobo_only_shelves_sync': + elif param == 'kobo_only_shelves_sync': user.kobo_only_shelves_sync = int(vals['value'] == 'true') elif param == 'kindle_mail': user.kindle_mail = valid_email(vals['value']) if vals['value'] else "" elif param.endswith('role'): value = int(vals['field_index']) if user.name == "Guest" and value in \ - [constants.ROLE_ADMIN, constants.ROLE_PASSWD, constants.ROLE_EDIT_SHELFS]: + [constants.ROLE_ADMIN, constants.ROLE_PASSWD, constants.ROLE_EDIT_SHELFS]: raise Exception(_("Guest can't have this role")) # check for valid value, last on checks for power of 2 value if value > 0 and value <= constants.ROLE_VIEWER and (value & value-1 == 0 or value == 1): @@ -439,8 +475,8 @@ def edit_list_user(param): ub.User.id != user.id).count(): return Response( json.dumps([{'type': "danger", - 'message':_(u"No admin user remaining, can't remove admin role", - nick=user.name)}]), mimetype='application/json') + 'message': _(u"No admin user remaining, can't remove admin role", + nick=user.name)}]), mimetype='application/json') user.role &= ~value else: raise Exception(_("Value has to be true or false")) @@ -463,7 +499,7 @@ def edit_list_user(param): elif param == 'locale': if user.name == "Guest": raise Exception(_("Guest's Locale is determined automatically and can't be set")) - if vals['value'] in _BABEL_TRANSLATIONS: + if vals['value'] in get_available_translations(): user.locale = vals['value'] else: raise Exception(_("No Valid Locale Given")) @@ -481,7 +517,7 @@ def edit_list_user(param): else: return _("Parameter not found"), 400 except Exception as ex: - log.debug_or_exception(ex) + log.error_or_exception(ex) return str(ex), 400 ub.session_commit() return "" @@ -503,20 +539,6 @@ def update_table_settings(): return "Invalid request", 400 return "" -def check_valid_read_column(column): - if column != "0": - if not calibre_db.session.query(db.Custom_Columns).filter(db.Custom_Columns.id == column) \ - .filter(and_(db.Custom_Columns.datatype == 'bool', db.Custom_Columns.mark_for_delete == 0)).all(): - return False - return True - -def check_valid_restricted_column(column): - if column != "0": - if not calibre_db.session.query(db.Custom_Columns).filter(db.Custom_Columns.id == column) \ - .filter(and_(db.Custom_Columns.datatype == 'text', db.Custom_Columns.mark_for_delete == 0)).all(): - return False - return True - @admi.route("/admin/viewconfig", methods=["POST"]) @login_required @@ -548,7 +570,6 @@ def update_view_configuration(): _config_string(to_save, "config_default_language") _config_string(to_save, "config_default_locale") - config.config_default_role = constants.selected_roles(to_save) config.config_default_role &= ~constants.ROLE_ANONYMOUS @@ -585,13 +606,17 @@ def load_dialogtexts(element_id): elif element_id == "restrictions": texts["main"] = _('Are you sure you want to change the selected restrictions for the selected user(s)?') elif element_id == "sidebar_view": - texts["main"] = _('Are you sure you want to change the selected visibility restrictions for the selected user(s)?') + texts["main"] = _('Are you sure you want to change the selected visibility restrictions ' + 'for the selected user(s)?') elif element_id == "kobo_only_shelves_sync": texts["main"] = _('Are you sure you want to change shelf sync behavior for the selected user(s)?') elif element_id == "db_submit": texts["main"] = _('Are you sure you want to change Calibre library location?') + elif element_id == "admin_refresh_cover_cache": + texts["main"] = _('Calibre-Web will search for updated Covers and update Cover Thumbnails, this may take a while?') elif element_id == "btnfullsync": - texts["main"] = _("Are you sure you want delete Calibre-Web's sync database to force a full sync with your Kobo Reader?") + texts["main"] = _("Are you sure you want delete Calibre-Web's sync database " + "to force a full sync with your Kobo Reader?") return json.dumps(texts) @@ -719,44 +744,13 @@ def edit_restriction(res_type, user_id): ub.session_commit("Changed denied columns of user {} to {}".format(usr.name, usr.denied_column_value)) return "" - -def restriction_addition(element, list_func): - elementlist = list_func() - if elementlist == ['']: - elementlist = [] - if not element['add_element'] in elementlist: - elementlist += [element['add_element']] - return ','.join(elementlist) +@admi.route("/ajax/addrestriction/", methods=['POST']) +@login_required +@admin_required +def add_user_0_restriction(res_type): + return add_restriction(res_type, 0) -def restriction_deletion(element, list_func): - elementlist = list_func() - if element['Element'] in elementlist: - elementlist.remove(element['Element']) - return ','.join(elementlist) - - -def prepare_tags(user, action, tags_name, id_list): - if "tags" in tags_name: - tags = calibre_db.session.query(db.Tags).filter(db.Tags.id.in_(id_list)).all() - if not tags: - raise Exception(_("Tag not found")) - new_tags_list = [x.name for x in tags] - else: - tags = calibre_db.session.query(db.cc_classes[config.config_restricted_column])\ - .filter(db.cc_classes[config.config_restricted_column].id.in_(id_list)).all() - new_tags_list = [x.value for x in tags] - saved_tags_list = user.__dict__[tags_name].split(",") if len(user.__dict__[tags_name]) else [] - if action == "remove": - saved_tags_list = [x for x in saved_tags_list if x not in new_tags_list] - elif action == "add": - saved_tags_list.extend(x for x in new_tags_list if x not in saved_tags_list) - else: - raise Exception(_("Invalid Action")) - return ",".join(saved_tags_list) - - -@admi.route("/ajax/addrestriction/", defaults={"user_id": 0}, methods=['POST']) @admi.route("/ajax/addrestriction//", methods=['POST']) @login_required @admin_required @@ -803,7 +797,13 @@ def add_restriction(res_type, user_id): return "" -@admi.route("/ajax/deleterestriction/", defaults={"user_id": 0}, methods=['POST']) +@admi.route("/ajax/deleterestriction/", methods=['POST']) +@login_required +@admin_required +def delete_user_0_restriction(res_type): + return delete_restriction(res_type, 0) + + @admi.route("/ajax/deleterestriction//", methods=['POST']) @login_required @admin_required @@ -830,10 +830,10 @@ def delete_restriction(res_type, user_id): usr = current_user if element['id'].startswith('a'): usr.allowed_tags = restriction_deletion(element, usr.list_allowed_tags) - ub.session_commit("Deleted allowed tags of user {}: {}".format(usr.name, usr.list_allowed_tags)) + ub.session_commit("Deleted allowed tags of user {}: {}".format(usr.name, element['Element'])) elif element['id'].startswith('d'): usr.denied_tags = restriction_deletion(element, usr.list_denied_tags) - ub.session_commit("Deleted denied tags of user {}: {}".format(usr.name, usr.list_allowed_tags)) + ub.session_commit("Deleted denied tag of user {}: {}".format(usr.name, element['Element'])) elif res_type == 3: # Columns per user if isinstance(user_id, int): usr = ub.session.query(ub.User).filter(ub.User.id == int(user_id)).first() @@ -842,12 +842,12 @@ def delete_restriction(res_type, user_id): if element['id'].startswith('a'): usr.allowed_column_value = restriction_deletion(element, usr.list_allowed_column_values) ub.session_commit("Deleted allowed columns of user {}: {}".format(usr.name, - usr.list_allowed_column_values)) + usr.list_allowed_column_values())) elif element['id'].startswith('d'): usr.denied_column_value = restriction_deletion(element, usr.list_denied_column_values) ub.session_commit("Deleted denied columns of user {}: {}".format(usr.name, - usr.list_denied_column_values)) + usr.list_denied_column_values())) return "" @@ -857,8 +857,8 @@ def delete_restriction(res_type, user_id): @admin_required def list_restriction(res_type, user_id): if res_type == 0: # Tags as template - restrict = [{'Element': x, 'type':_('Deny'), 'id': 'd'+str(i) } - for i,x in enumerate(config.list_denied_tags()) if x != ''] + restrict = [{'Element': x, 'type': _('Deny'), 'id': 'd'+str(i)} + for i, x in enumerate(config.list_denied_tags()) if x != ''] allow = [{'Element': x, 'type': _('Allow'), 'id': 'a'+str(i)} for i, x in enumerate(config.list_allowed_tags()) if x != ''] json_dumps = restrict + allow @@ -895,7 +895,8 @@ def list_restriction(res_type, user_id): response.headers["Content-Type"] = "application/json; charset=utf-8" return response -@admi.route("/ajax/fullsync") + +@admi.route("/ajax/fullsync", methods=["POST"]) @login_required def ajax_fullsync(): count = ub.session.query(ub.KoboSyncedBooks).filter(current_user.id == ub.KoboSyncedBooks.user_id).delete() @@ -911,6 +912,58 @@ def ajax_pathchooser(): return pathchooser() +def check_valid_read_column(column): + if column != "0": + if not calibre_db.session.query(db.CustomColumns).filter(db.CustomColumns.id == column) \ + .filter(and_(db.CustomColumns.datatype == 'bool', db.CustomColumns.mark_for_delete == 0)).all(): + return False + return True + + +def check_valid_restricted_column(column): + if column != "0": + if not calibre_db.session.query(db.CustomColumns).filter(db.CustomColumns.id == column) \ + .filter(and_(db.CustomColumns.datatype == 'text', db.CustomColumns.mark_for_delete == 0)).all(): + return False + return True + + +def restriction_addition(element, list_func): + elementlist = list_func() + if elementlist == ['']: + elementlist = [] + if not element['add_element'] in elementlist: + elementlist += [element['add_element']] + return ','.join(elementlist) + + +def restriction_deletion(element, list_func): + elementlist = list_func() + if element['Element'] in elementlist: + elementlist.remove(element['Element']) + return ','.join(elementlist) + + +def prepare_tags(user, action, tags_name, id_list): + if "tags" in tags_name: + tags = calibre_db.session.query(db.Tags).filter(db.Tags.id.in_(id_list)).all() + if not tags: + raise Exception(_("Tag not found")) + new_tags_list = [x.name for x in tags] + else: + tags = calibre_db.session.query(db.cc_classes[config.config_restricted_column])\ + .filter(db.cc_classes[config.config_restricted_column].id.in_(id_list)).all() + new_tags_list = [x.value for x in tags] + saved_tags_list = user.__dict__[tags_name].split(",") if len(user.__dict__[tags_name]) else [] + if action == "remove": + saved_tags_list = [x for x in saved_tags_list if x not in new_tags_list] + elif action == "add": + saved_tags_list.extend(x for x in new_tags_list if x not in saved_tags_list) + else: + raise Exception(_("Invalid Action")) + return ",".join(saved_tags_list) + + def pathchooser(): browse_for = "folder" folder_only = request.args.get('folder', False) == "true" @@ -1044,12 +1097,12 @@ def _configuration_oauth_helper(to_save): reboot_required = False for element in oauthblueprints: if to_save["config_" + str(element['id']) + "_oauth_client_id"] != element['oauth_client_id'] \ - or to_save["config_" + str(element['id']) + "_oauth_client_secret"] != element['oauth_client_secret']: + or to_save["config_" + str(element['id']) + "_oauth_client_secret"] != element['oauth_client_secret']: reboot_required = True element['oauth_client_id'] = to_save["config_" + str(element['id']) + "_oauth_client_id"] element['oauth_client_secret'] = to_save["config_" + str(element['id']) + "_oauth_client_secret"] if to_save["config_" + str(element['id']) + "_oauth_client_id"] \ - and to_save["config_" + str(element['id']) + "_oauth_client_secret"]: + and to_save["config_" + str(element['id']) + "_oauth_client_secret"]: active_oauths += 1 element["active"] = 1 else: @@ -1102,7 +1155,7 @@ def _configuration_ldap_helper(to_save): if not config.config_ldap_provider_url \ or not config.config_ldap_port \ or not config.config_ldap_dn \ - or not config.config_ldap_user_object: + or not config.config_ldap_user_object: return reboot_required, _configuration_result(_('Please Enter a LDAP Provider, ' 'Port, DN and User Object Identifier')) @@ -1154,15 +1207,431 @@ def simulatedbchange(): return Response(json.dumps({"change": db_change, "valid": db_valid}), mimetype='application/json') +@admi.route("/admin/user/new", methods=["GET", "POST"]) +@login_required +@admin_required +def new_user(): + content = ub.User() + languages = calibre_db.speaking_language() + translations = get_available_locale() + kobo_support = feature_support['kobo'] and config.config_kobo_sync + if request.method == "POST": + to_save = request.form.to_dict() + _handle_new_user(to_save, content, languages, translations, kobo_support) + else: + content.role = config.config_default_role + content.sidebar_view = config.config_default_show + content.locale = config.config_default_locale + content.default_language = config.config_default_language + return render_title_template("user_edit.html", new_user=1, content=content, + config=config, translations=translations, + languages=languages, title=_(u"Add new user"), page="newuser", + kobo_support=kobo_support, registered_oauth=oauth_check) + + +@admi.route("/admin/mailsettings") +@login_required +@admin_required +def edit_mailsettings(): + content = config.get_mail_settings() + return render_title_template("email_edit.html", content=content, title=_(u"Edit E-mail Server Settings"), + page="mailset", feature_support=feature_support) + + +@admi.route("/admin/mailsettings", methods=["POST"]) +@login_required +@admin_required +def update_mailsettings(): + to_save = request.form.to_dict() + _config_int(to_save, "mail_server_type") + if to_save.get("invalidate"): + config.mail_gmail_token = {} + try: + flag_modified(config, "mail_gmail_token") + except AttributeError: + pass + elif to_save.get("gmail"): + try: + config.mail_gmail_token = services.gmail.setup_gmail(config.mail_gmail_token) + flash(_(u"Gmail Account Verification Successful"), category="success") + except Exception as ex: + flash(str(ex), category="error") + log.error(ex) + return edit_mailsettings() + + else: + _config_string(to_save, "mail_server") + _config_int(to_save, "mail_port") + _config_int(to_save, "mail_use_ssl") + _config_string(to_save, "mail_login") + _config_string(to_save, "mail_password") + _config_string(to_save, "mail_from") + _config_int(to_save, "mail_size", lambda y: int(y)*1024*1024) + try: + config.save() + except (OperationalError, InvalidRequestError) as e: + ub.session.rollback() + log.error_or_exception("Settings Database error: {}".format(e)) + flash(_(u"Database error: %(error)s.", error=e.orig), category="error") + return edit_mailsettings() + + if to_save.get("test"): + if current_user.email: + result = send_test_mail(current_user.email, current_user.name) + if result is None: + flash(_(u"Test e-mail queued for sending to %(email)s, please check Tasks for result", + email=current_user.email), category="info") + else: + flash(_(u"There was an error sending the Test e-mail: %(res)s", res=result), category="error") + else: + flash(_(u"Please configure your e-mail address first..."), category="error") + else: + flash(_(u"E-mail server settings updated"), category="success") + + return edit_mailsettings() + + +@admi.route("/admin/scheduledtasks") +@login_required +@admin_required +def edit_scheduledtasks(): + content = config.get_scheduled_task_settings() + time_field = list() + duration_field = list() + + for n in range(24): + time_field.append((n , format_time(time(hour=n), format="short",))) + for n in range(5, 65, 5): + t = timedelta(hours=n // 60, minutes=n % 60) + duration_field.append((n, format_timedelta(t, threshold=.9))) + + return render_title_template("schedule_edit.html", + config=content, + starttime=time_field, + duration=duration_field, + title=_(u"Edit Scheduled Tasks Settings")) + + +@admi.route("/admin/scheduledtasks", methods=["POST"]) +@login_required +@admin_required +def update_scheduledtasks(): + error = False + to_save = request.form.to_dict() + if 0 <= int(to_save.get("schedule_start_time")) <= 23: + _config_int(to_save, "schedule_start_time") + else: + flash(_(u"Invalid start time for task specified"), category="error") + error = True + if 0 < int(to_save.get("schedule_duration")) <= 60: + _config_int(to_save, "schedule_duration") + else: + flash(_(u"Invalid duration for task specified"), category="error") + error = True + _config_checkbox(to_save, "schedule_generate_book_covers") + _config_checkbox(to_save, "schedule_generate_series_covers") + _config_checkbox(to_save, "schedule_reconnect") + + if not error: + try: + config.save() + flash(_(u"Scheduled tasks settings updated"), category="success") + + # Cancel any running tasks + schedule.end_scheduled_tasks() + + # Re-register tasks with new settings + schedule.register_scheduled_tasks(config.schedule_reconnect) + except IntegrityError: + ub.session.rollback() + log.error("An unknown error occurred while saving scheduled tasks settings") + flash(_(u"An unknown error occurred. Please try again later."), category="error") + except OperationalError: + ub.session.rollback() + log.error("Settings DB is not Writeable") + flash(_("Settings DB is not Writeable"), category="error") + + return edit_scheduledtasks() + + +@admi.route("/admin/user/", methods=["GET", "POST"]) +@login_required +@admin_required +def edit_user(user_id): + content = ub.session.query(ub.User).filter(ub.User.id == int(user_id)).first() # type: ub.User + if not content or (not config.config_anonbrowse and content.name == "Guest"): + flash(_(u"User not found"), category="error") + return redirect(url_for('admin.admin')) + languages = calibre_db.speaking_language(return_all_languages=True) + translations = get_available_locale() + kobo_support = feature_support['kobo'] and config.config_kobo_sync + if request.method == "POST": + to_save = request.form.to_dict() + resp = _handle_edit_user(to_save, content, languages, translations, kobo_support) + if resp: + return resp + return render_title_template("user_edit.html", + translations=translations, + languages=languages, + new_user=0, + content=content, + config=config, + registered_oauth=oauth_check, + mail_configured=config.get_mail_server_configured(), + kobo_support=kobo_support, + title=_(u"Edit User %(nick)s", nick=content.name), + page="edituser") + + +@admi.route("/admin/resetpassword/", methods=["POST"]) +@login_required +@admin_required +def reset_user_password(user_id): + if current_user is not None and current_user.is_authenticated: + ret, message = reset_password(user_id) + if ret == 1: + log.debug(u"Password for user %s reset", message) + flash(_(u"Password for user %(user)s reset", user=message), category="success") + elif ret == 0: + log.error(u"An unknown error occurred. Please try again later.") + flash(_(u"An unknown error occurred. Please try again later."), category="error") + else: + log.error(u"Please configure the SMTP mail settings first...") + flash(_(u"Please configure the SMTP mail settings first..."), category="error") + return redirect(url_for('admin.admin')) + + +@admi.route("/admin/logfile") +@login_required +@admin_required +def view_logfile(): + logfiles = {0: logger.get_logfile(config.config_logfile), + 1: logger.get_accesslogfile(config.config_access_logfile)} + return render_title_template("logviewer.html", + title=_(u"Logfile viewer"), + accesslog_enable=config.config_access_log, + log_enable=bool(config.config_logfile != logger.LOG_TO_STDOUT), + logfiles=logfiles, + page="logfile") + + +@admi.route("/ajax/log/") +@login_required +@admin_required +def send_logfile(logtype): + if logtype == 1: + logfile = logger.get_accesslogfile(config.config_access_logfile) + return send_from_directory(os.path.dirname(logfile), + os.path.basename(logfile)) + if logtype == 0: + logfile = logger.get_logfile(config.config_logfile) + return send_from_directory(os.path.dirname(logfile), + os.path.basename(logfile)) + else: + return "" + + +@admi.route("/admin/logdownload/") +@login_required +@admin_required +def download_log(logtype): + if logtype == 0: + file_name = logger.get_logfile(config.config_logfile) + elif logtype == 1: + file_name = logger.get_accesslogfile(config.config_access_logfile) + else: + abort(404) + if logger.is_valid_logfile(file_name): + return debug_info.assemble_logfiles(file_name) + abort(404) + + +@admi.route("/admin/debug") +@login_required +@admin_required +def download_debug(): + return debug_info.send_debug() + + +@admi.route("/get_update_status", methods=['GET']) +@login_required +@admin_required +def get_update_status(): + if feature_support['updater']: + log.info(u"Update status requested") + return updater_thread.get_available_updates(request.method) + else: + return '' + + +@admi.route("/get_updater_status", methods=['GET', 'POST']) +@login_required +@admin_required +def get_updater_status(): + status = {} + if feature_support['updater']: + if request.method == "POST": + commit = request.form.to_dict() + if "start" in commit and commit['start'] == 'True': + txt = { + "1": _(u'Requesting update package'), + "2": _(u'Downloading update package'), + "3": _(u'Unzipping update package'), + "4": _(u'Replacing files'), + "5": _(u'Database connections are closed'), + "6": _(u'Stopping server'), + "7": _(u'Update finished, please press okay and reload page'), + "8": _(u'Update failed:') + u' ' + _(u'HTTP Error'), + "9": _(u'Update failed:') + u' ' + _(u'Connection error'), + "10": _(u'Update failed:') + u' ' + _(u'Timeout while establishing connection'), + "11": _(u'Update failed:') + u' ' + _(u'General error'), + "12": _(u'Update failed:') + u' ' + _(u'Update file could not be saved in temp dir'), + "13": _(u'Update failed:') + u' ' + _(u'Files could not be replaced during update') + } + status['text'] = txt + updater_thread.status = 0 + updater_thread.resume() + status['status'] = updater_thread.get_update_status() + elif request.method == "GET": + try: + status['status'] = updater_thread.get_update_status() + if status['status'] == -1: + status['status'] = 7 + except Exception: + status['status'] = 11 + return json.dumps(status) + return '' + + +def ldap_import_create_user(user, user_data): + user_login_field = extract_dynamic_field_from_filter(user, config.config_ldap_user_object) + + try: + username = user_data[user_login_field][0].decode('utf-8') + except KeyError as ex: + log.error("Failed to extract LDAP user: %s - %s", user, ex) + message = _(u'Failed to extract at least One LDAP User') + return 0, message + + # check for duplicate username + if ub.session.query(ub.User).filter(func.lower(ub.User.name) == username.lower()).first(): + # if ub.session.query(ub.User).filter(ub.User.name == username).first(): + log.warning("LDAP User %s Already in Database", user_data) + return 0, None + + kindlemail = '' + if 'mail' in user_data: + useremail = user_data['mail'][0].decode('utf-8') + if len(user_data['mail']) > 1: + kindlemail = user_data['mail'][1].decode('utf-8') + + else: + log.debug('No Mail Field Found in LDAP Response') + useremail = username + '@email.com' + + try: + # check for duplicate email + useremail = check_email(useremail) + except Exception as ex: + log.warning("LDAP Email Error: {}, {}".format(user_data, ex)) + return 0, None + content = ub.User() + content.name = username + content.password = '' # dummy password which will be replaced by ldap one + content.email = useremail + content.kindle_mail = kindlemail + content.default_language = config.config_default_language + content.locale = config.config_default_locale + content.role = config.config_default_role + content.sidebar_view = config.config_default_show + content.allowed_tags = config.config_allowed_tags + content.denied_tags = config.config_denied_tags + content.allowed_column_value = config.config_allowed_column_value + content.denied_column_value = config.config_denied_column_value + ub.session.add(content) + try: + ub.session.commit() + return 1, None # increase no of users + except Exception as ex: + log.warning("Failed to create LDAP user: %s - %s", user, ex) + ub.session.rollback() + message = _(u'Failed to Create at Least One LDAP User') + return 0, message + + +@admi.route('/import_ldap_users', methods=["POST"]) +@login_required +@admin_required +def import_ldap_users(): + showtext = {} + try: + new_users = services.ldap.get_group_members(config.config_ldap_group_name) + except (services.ldap.LDAPException, TypeError, AttributeError, KeyError) as e: + log.error_or_exception(e) + showtext['text'] = _(u'Error: %(ldaperror)s', ldaperror=e) + return json.dumps(showtext) + if not new_users: + log.debug('LDAP empty response') + showtext['text'] = _(u'Error: No user returned in response of LDAP server') + return json.dumps(showtext) + + imported = 0 + for username in new_users: + user = username.decode('utf-8') + if '=' in user: + # if member object field is empty take user object as filter + if config.config_ldap_member_user_object: + query_filter = config.config_ldap_member_user_object + else: + query_filter = config.config_ldap_user_object + try: + user_identifier = extract_user_identifier(user, query_filter) + except Exception as ex: + log.warning(ex) + continue + else: + user_identifier = user + query_filter = None + try: + user_data = services.ldap.get_object_details(user=user_identifier, query_filter=query_filter) + except AttributeError as ex: + log.error_or_exception(ex) + continue + if user_data: + user_count, message = ldap_import_create_user(user, user_data) + if message: + showtext['text'] = message + else: + imported += user_count + else: + log.warning("LDAP User: %s Not Found", user) + showtext['text'] = _(u'At Least One LDAP User Not Found in Database') + if not showtext: + showtext['text'] = _(u'{} User Successfully Imported'.format(imported)) + return json.dumps(showtext) + + +@admi.route("/ajax/canceltask", methods=['POST']) +@login_required +@admin_required +def cancel_task(): + task_id = request.get_json().get('task_id', None) + worker = WorkerThread.get_instance() + worker.end_task(task_id) + return "" + + def _db_simulate_change(): param = request.form.to_dict() - to_save = {} + to_save = dict() to_save['config_calibre_dir'] = re.sub(r'[\\/]metadata\.db$', '', param['config_calibre_dir'], flags=re.IGNORECASE).strip() - db_change = config.config_calibre_dir != to_save["config_calibre_dir"] and config.config_calibre_dir - db_valid = calibre_db.check_valid_db(to_save["config_calibre_dir"], ub.app_DB_path) + db_valid, db_change = calibre_db.check_valid_db(to_save["config_calibre_dir"], + ub.app_DB_path, + config.config_calibre_uuid) + db_change = bool(db_change and config.config_calibre_dir) return db_change, db_valid @@ -1175,15 +1644,16 @@ def _db_configuration_update_helper(): '', to_save['config_calibre_dir'], flags=re.IGNORECASE) + db_valid = False try: db_change, db_valid = _db_simulate_change() # gdrive_error drive setup gdrive_error = _configuration_gdrive_helper(to_save) - except (OperationalError, InvalidRequestError): + except (OperationalError, InvalidRequestError) as e: ub.session.rollback() - log.error("Settings DB is not Writeable") - _db_configuration_result(_("Settings DB is not Writeable"), gdrive_error) + log.error_or_exception("Settings Database error: {}".format(e)) + _db_configuration_result(_(u"Database error: %(error)s.", error=e.orig), gdrive_error) try: metadata_db = os.path.join(to_save['config_calibre_dir'], "metadata.db") if config.config_use_google_drive and is_gdrive_ready() and not os.path.exists(metadata_db): @@ -1192,20 +1662,25 @@ def _db_configuration_update_helper(): except Exception as ex: return _db_configuration_result('{}'.format(ex), gdrive_error) - if db_change or not db_valid or not config.db_configured: + if db_change or not db_valid or not config.db_configured \ + or config.config_calibre_dir != to_save["config_calibre_dir"]: if not calibre_db.setup_db(to_save['config_calibre_dir'], ub.app_DB_path): return _db_configuration_result(_('DB Location is not Valid, Please Enter Correct Path'), gdrive_error) + config.store_calibre_uuid(calibre_db, db.Library_Id) # if db changed -> delete shelfs, delete download books, delete read books, kobo sync... - ub.session.query(ub.Downloads).delete() - ub.session.query(ub.ArchivedBook).delete() - ub.session.query(ub.ReadBook).delete() - ub.session.query(ub.BookShelf).delete() - ub.session.query(ub.Bookmark).delete() - ub.session.query(ub.KoboReadingState).delete() - ub.session.query(ub.KoboStatistics).delete() - ub.session.query(ub.KoboSyncedBooks).delete() - ub.session_commit() + if db_change: + log.info("Calibre Database changed, all Calibre-Web info related to old Database gets deleted") + ub.session.query(ub.Downloads).delete() + ub.session.query(ub.ArchivedBook).delete() + ub.session.query(ub.ReadBook).delete() + ub.session.query(ub.BookShelf).delete() + ub.session.query(ub.Bookmark).delete() + ub.session.query(ub.KoboReadingState).delete() + ub.session.query(ub.KoboStatistics).delete() + ub.session.query(ub.KoboSyncedBooks).delete() + helper.delete_thumbnail_cache() + ub.session_commit() _config_string(to_save, "config_calibre_dir") calibre_db.update_config(config) if not os.access(os.path.join(config.config_calibre_dir, "metadata.db"), os.W_OK): @@ -1213,6 +1688,7 @@ def _db_configuration_update_helper(): config.save() return _db_configuration_result(None, gdrive_error) + def _configuration_update_helper(): reboot_required = False to_save = request.form.to_dict() @@ -1231,7 +1707,7 @@ def _configuration_update_helper(): _config_checkbox_int(to_save, "config_unicode_filename") # Reboot on config_anonbrowse with enabled ldap, as decoraters are changed in this case reboot_required |= (_config_checkbox_int(to_save, "config_anonbrowse") - and config.config_login_type == constants.LOGIN_LDAP) + and config.config_login_type == constants.LOGIN_LDAP) _config_checkbox_int(to_save, "config_public_reg") _config_checkbox_int(to_save, "config_register_email") reboot_required |= _config_checkbox_int(to_save, "config_kobo_sync") @@ -1291,10 +1767,10 @@ def _configuration_update_helper(): unrar_status = helper.check_unrar(config.config_rarfile_location) if unrar_status: return _configuration_result(unrar_status) - except (OperationalError, InvalidRequestError): + except (OperationalError, InvalidRequestError) as e: ub.session.rollback() - log.error("Settings DB is not Writeable") - _configuration_result(_("Settings DB is not Writeable")) + log.error_or_exception("Settings Database error: {}".format(e)) + _configuration_result(_(u"Database error: %(error)s.", error=e.orig)) config.save() if reboot_required: @@ -1302,6 +1778,7 @@ def _configuration_update_helper(): return _configuration_result(None, reboot_required) + def _configuration_result(error_flash=None, reboot=False): resp = {} if error_flash: @@ -1309,9 +1786,9 @@ def _configuration_result(error_flash=None, reboot=False): config.load() resp['result'] = [{'type': "danger", 'message': error_flash}] else: - resp['result'] = [{'type': "success", 'message':_(u"Calibre-Web configuration updated")}] + resp['result'] = [{'type': "success", 'message': _(u"Calibre-Web configuration updated")}] resp['reboot'] = reboot - resp['config_upload']= config.config_upload_formats + resp['config_upload'] = config.config_upload_formats return Response(json.dumps(resp), mimetype='application/json') @@ -1388,10 +1865,11 @@ def _handle_new_user(to_save, content, languages, translations, kobo_support): ub.session.rollback() log.error("Found an existing account for {} or {}".format(content.name, content.email)) flash(_("Found an existing account for this e-mail address or name."), category="error") - except OperationalError: + except OperationalError as e: ub.session.rollback() - log.error("Settings DB is not Writeable") - flash(_("Settings DB is not Writeable"), category="error") + log.error_or_exception("Settings Database error: {}".format(e)) + flash(_(u"Database error: %(error)s.", error=e.orig), category="error") + def _delete_user(content): if ub.session.query(ub.User).filter(ub.User.role.op('&')(constants.ROLE_ADMIN) == constants.ROLE_ADMIN, @@ -1404,16 +1882,25 @@ def _delete_user(content): for us in ub.session.query(ub.Shelf).filter(content.id == ub.Shelf.user_id): ub.session.query(ub.BookShelf).filter(us.id == ub.BookShelf.shelf).delete() ub.session.query(ub.Shelf).filter(content.id == ub.Shelf.user_id).delete() + ub.session.query(ub.Bookmark).filter(content.id == ub.Bookmark.user_id).delete() ub.session.query(ub.User).filter(ub.User.id == content.id).delete() + ub.session.query(ub.ArchivedBook).filter(ub.ArchivedBook.user_id == content.id).delete() + ub.session.query(ub.RemoteAuthToken).filter(ub.RemoteAuthToken.user_id == content.id).delete() + ub.session.query(ub.User_Sessions).filter(ub.User_Sessions.user_id == content.id).delete() + ub.session.query(ub.KoboSyncedBooks).filter(ub.KoboSyncedBooks.user_id == content.id).delete() + # delete KoboReadingState and all it's children + kobo_entries = ub.session.query(ub.KoboReadingState).filter(ub.KoboReadingState.user_id == content.id).all() + for kobo_entry in kobo_entries: + ub.session.delete(kobo_entry) ub.session_commit() - log.info(u"User {} deleted".format(content.name)) - return(_(u"User '%(nick)s' deleted", nick=content.name)) + log.info("User {} deleted".format(content.name)) + return _("User '%(nick)s' deleted", nick=content.name) else: - log.warning(_(u"Can't delete Guest User")) - raise Exception(_(u"Can't delete Guest User")) + log.warning(_("Can't delete Guest User")) + raise Exception(_("Can't delete Guest User")) else: - log.warning(u"No admin user remaining, can't delete user") - raise Exception(_(u"No admin user remaining, can't delete user")) + log.warning("No admin user remaining, can't delete user") + raise Exception(_("No admin user remaining, can't delete user")) def _handle_edit_user(to_save, content, languages, translations, kobo_support): @@ -1440,7 +1927,7 @@ def _handle_edit_user(to_save, content, languages, translations, kobo_support): content.role &= ~constants.ROLE_ANONYMOUS val = [int(k[5:]) for k in to_save if k.startswith('show_')] - sidebar = get_sidebar_config() + sidebar, __ = get_sidebar_config() for element in sidebar: value = element['visibility'] if value in val and not content.check_visibility(value): @@ -1495,354 +1982,13 @@ def _handle_edit_user(to_save, content, languages, translations, kobo_support): ub.session.rollback() log.error("An unknown error occurred while changing user: {}".format(str(ex))) flash(_(u"An unknown error occurred. Please try again later."), category="error") - except OperationalError: + except OperationalError as e: ub.session.rollback() - log.error("Settings DB is not Writeable") - flash(_("Settings DB is not Writeable"), category="error") + log.error_or_exception("Settings Database error: {}".format(e)) + flash(_(u"Database error: %(error)s.", error=e.orig), category="error") return "" -@admi.route("/admin/user/new", methods=["GET", "POST"]) -@login_required -@admin_required -def new_user(): - content = ub.User() - languages = calibre_db.speaking_language() - translations = [LC('en')] + babel.list_translations() - kobo_support = feature_support['kobo'] and config.config_kobo_sync - if request.method == "POST": - to_save = request.form.to_dict() - _handle_new_user(to_save, content, languages, translations, kobo_support) - else: - content.role = config.config_default_role - content.sidebar_view = config.config_default_show - content.locale = config.config_default_locale - content.default_language = config.config_default_language - return render_title_template("user_edit.html", new_user=1, content=content, - config=config, translations=translations, - languages=languages, title=_(u"Add new user"), page="newuser", - kobo_support=kobo_support, registered_oauth=oauth_check) - - -@admi.route("/admin/mailsettings") -@login_required -@admin_required -def edit_mailsettings(): - content = config.get_mail_settings() - return render_title_template("email_edit.html", content=content, title=_(u"Edit E-mail Server Settings"), - page="mailset", feature_support=feature_support) - - -@admi.route("/admin/mailsettings", methods=["POST"]) -@login_required -@admin_required -def update_mailsettings(): - to_save = request.form.to_dict() - _config_int(to_save, "mail_server_type") - if to_save.get("invalidate"): - config.mail_gmail_token = {} - try: - flag_modified(config, "mail_gmail_token") - except AttributeError: - pass - elif to_save.get("gmail"): - try: - config.mail_gmail_token = services.gmail.setup_gmail(config.mail_gmail_token) - flash(_(u"Gmail Account Verification Successful"), category="success") - except Exception as ex: - flash(str(ex), category="error") - log.error(ex) - return edit_mailsettings() - - else: - _config_string(to_save, "mail_server") - _config_int(to_save, "mail_port") - _config_int(to_save, "mail_use_ssl") - _config_string(to_save, "mail_login") - _config_string(to_save, "mail_password") - _config_string(to_save, "mail_from") - _config_int(to_save, "mail_size", lambda y: int(y)*1024*1024) - try: - config.save() - except (OperationalError, InvalidRequestError): - ub.session.rollback() - log.error("Settings DB is not Writeable") - flash(_("Settings DB is not Writeable"), category="error") - return edit_mailsettings() - - if to_save.get("test"): - if current_user.email: - result = send_test_mail(current_user.email, current_user.name) - if result is None: - flash(_(u"Test e-mail queued for sending to %(email)s, please check Tasks for result", - email=current_user.email), category="info") - else: - flash(_(u"There was an error sending the Test e-mail: %(res)s", res=result), category="error") - else: - flash(_(u"Please configure your e-mail address first..."), category="error") - else: - flash(_(u"E-mail server settings updated"), category="success") - - return edit_mailsettings() - - -@admi.route("/admin/user/", methods=["GET", "POST"]) -@login_required -@admin_required -def edit_user(user_id): - content = ub.session.query(ub.User).filter(ub.User.id == int(user_id)).first() # type: ub.User - if not content or (not config.config_anonbrowse and content.name == "Guest"): - flash(_(u"User not found"), category="error") - return redirect(url_for('admin.admin')) - languages = calibre_db.speaking_language(return_all_languages=True) - translations = babel.list_translations() + [LC('en')] - kobo_support = feature_support['kobo'] and config.config_kobo_sync - if request.method == "POST": - to_save = request.form.to_dict() - resp = _handle_edit_user(to_save, content, languages, translations, kobo_support) - if resp: - return resp - return render_title_template("user_edit.html", - translations=translations, - languages=languages, - new_user=0, - content=content, - config=config, - registered_oauth=oauth_check, - mail_configured=config.get_mail_server_configured(), - kobo_support=kobo_support, - title=_(u"Edit User %(nick)s", nick=content.name), - page="edituser") - - -@admi.route("/admin/resetpassword/") -@login_required -@admin_required -def reset_user_password(user_id): - if current_user is not None and current_user.is_authenticated: - ret, message = reset_password(user_id) - if ret == 1: - log.debug(u"Password for user %s reset", message) - flash(_(u"Password for user %(user)s reset", user=message), category="success") - elif ret == 0: - log.error(u"An unknown error occurred. Please try again later.") - flash(_(u"An unknown error occurred. Please try again later."), category="error") - else: - log.error(u"Please configure the SMTP mail settings first...") - flash(_(u"Please configure the SMTP mail settings first..."), category="error") - return redirect(url_for('admin.admin')) - - -@admi.route("/admin/logfile") -@login_required -@admin_required -def view_logfile(): - logfiles = {0: logger.get_logfile(config.config_logfile), - 1: logger.get_accesslogfile(config.config_access_logfile)} - return render_title_template("logviewer.html", - title=_(u"Logfile viewer"), - accesslog_enable=config.config_access_log, - log_enable=bool(config.config_logfile != logger.LOG_TO_STDOUT), - logfiles=logfiles, - page="logfile") - - -@admi.route("/ajax/log/") -@login_required -@admin_required -def send_logfile(logtype): - if logtype == 1: - logfile = logger.get_accesslogfile(config.config_access_logfile) - return send_from_directory(os.path.dirname(logfile), - os.path.basename(logfile)) - if logtype == 0: - logfile = logger.get_logfile(config.config_logfile) - return send_from_directory(os.path.dirname(logfile), - os.path.basename(logfile)) - else: - return "" - - -@admi.route("/admin/logdownload/") -@login_required -@admin_required -def download_log(logtype): - if logtype == 0: - file_name = logger.get_logfile(config.config_logfile) - elif logtype == 1: - file_name = logger.get_accesslogfile(config.config_access_logfile) - else: - abort(404) - if logger.is_valid_logfile(file_name): - return debug_info.assemble_logfiles(file_name) - abort(404) - - -@admi.route("/admin/debug") -@login_required -@admin_required -def download_debug(): - return debug_info.send_debug() - - -@admi.route("/get_update_status", methods=['GET']) -@login_required -@admin_required -def get_update_status(): - if feature_support['updater']: - log.info(u"Update status requested") - return updater_thread.get_available_updates(request.method, locale=get_locale()) - else: - return '' - - -@admi.route("/get_updater_status", methods=['GET', 'POST']) -@login_required -@admin_required -def get_updater_status(): - status = {} - if feature_support['updater']: - if request.method == "POST": - commit = request.form.to_dict() - if "start" in commit and commit['start'] == 'True': - text = { - "1": _(u'Requesting update package'), - "2": _(u'Downloading update package'), - "3": _(u'Unzipping update package'), - "4": _(u'Replacing files'), - "5": _(u'Database connections are closed'), - "6": _(u'Stopping server'), - "7": _(u'Update finished, please press okay and reload page'), - "8": _(u'Update failed:') + u' ' + _(u'HTTP Error'), - "9": _(u'Update failed:') + u' ' + _(u'Connection error'), - "10": _(u'Update failed:') + u' ' + _(u'Timeout while establishing connection'), - "11": _(u'Update failed:') + u' ' + _(u'General error'), - "12": _(u'Update failed:') + u' ' + _(u'Update file could not be saved in temp dir'), - "13": _(u'Update failed:') + u' ' + _(u'Files could not be replaced during update') - } - status['text'] = text - updater_thread.status = 0 - updater_thread.resume() - status['status'] = updater_thread.get_update_status() - elif request.method == "GET": - try: - status['status'] = updater_thread.get_update_status() - if status['status'] == -1: - status['status'] = 7 - except Exception: - status['status'] = 11 - return json.dumps(status) - return '' - - -def ldap_import_create_user(user, user_data): - user_login_field = extract_dynamic_field_from_filter(user, config.config_ldap_user_object) - - try: - username = user_data[user_login_field][0].decode('utf-8') - except KeyError as ex: - log.error("Failed to extract LDAP user: %s - %s", user, ex) - message = _(u'Failed to extract at least One LDAP User') - return 0, message - - # check for duplicate username - if ub.session.query(ub.User).filter(func.lower(ub.User.name) == username.lower()).first(): - # if ub.session.query(ub.User).filter(ub.User.name == username).first(): - log.warning("LDAP User %s Already in Database", user_data) - return 0, None - - kindlemail = '' - if 'mail' in user_data: - useremail = user_data['mail'][0].decode('utf-8') - if len(user_data['mail']) > 1: - kindlemail = user_data['mail'][1].decode('utf-8') - - else: - log.debug('No Mail Field Found in LDAP Response') - useremail = username + '@email.com' - - try: - # check for duplicate email - useremail = check_email(useremail) - except Exception as ex: - log.warning("LDAP Email Error: {}, {}".format(user_data, ex)) - return 0, None - content = ub.User() - content.name = username - content.password = '' # dummy password which will be replaced by ldap one - content.email = useremail - content.kindle_mail = kindlemail - content.default_language = config.config_default_language - content.locale = config.config_default_locale - content.role = config.config_default_role - content.sidebar_view = config.config_default_show - content.allowed_tags = config.config_allowed_tags - content.denied_tags = config.config_denied_tags - content.allowed_column_value = config.config_allowed_column_value - content.denied_column_value = config.config_denied_column_value - ub.session.add(content) - try: - ub.session.commit() - return 1, None # increase no of users - except Exception as ex: - log.warning("Failed to create LDAP user: %s - %s", user, ex) - ub.session.rollback() - message = _(u'Failed to Create at Least One LDAP User') - return 0, message - - -@admi.route('/import_ldap_users') -@login_required -@admin_required -def import_ldap_users(): - showtext = {} - try: - new_users = services.ldap.get_group_members(config.config_ldap_group_name) - except (services.ldap.LDAPException, TypeError, AttributeError, KeyError) as e: - log.debug_or_exception(e) - showtext['text'] = _(u'Error: %(ldaperror)s', ldaperror=e) - return json.dumps(showtext) - if not new_users: - log.debug('LDAP empty response') - showtext['text'] = _(u'Error: No user returned in response of LDAP server') - return json.dumps(showtext) - - imported = 0 - for username in new_users: - user = username.decode('utf-8') - if '=' in user: - # if member object field is empty take user object as filter - if config.config_ldap_member_user_object: - query_filter = config.config_ldap_member_user_object - else: - query_filter = config.config_ldap_user_object - try: - user_identifier = extract_user_identifier(user, query_filter) - except Exception as ex: - log.warning(ex) - continue - else: - user_identifier = user - query_filter = None - try: - user_data = services.ldap.get_object_details(user=user_identifier, query_filter=query_filter) - except AttributeError as ex: - log.debug_or_exception(ex) - continue - if user_data: - user_count, message = ldap_import_create_user(user, user_data) - if message: - showtext['text'] = message - else: - imported += user_count - else: - log.warning("LDAP User: %s Not Found", user) - showtext['text'] = _(u'At Least One LDAP User Not Found in Database') - if not showtext: - showtext['text'] = _(u'{} User Successfully Imported'.format(imported)) - return json.dumps(showtext) - - def extract_user_data_from_field(user, field): match = re.search(field + r"=([\.\d\s\w-]+)", user, re.IGNORECASE | re.UNICODE) if match: diff --git a/cps/babel.py b/cps/babel.py new file mode 100644 index 00000000..f5ecaf5a --- /dev/null +++ b/cps/babel.py @@ -0,0 +1,39 @@ +from babel import negotiate_locale +from flask_babel import Babel, Locale +from babel.core import UnknownLocaleError +from flask import request, g + +from . import logger + +log = logger.create() + +babel = Babel() + + +@babel.localeselector +def get_locale(): + # if a user is logged in, use the locale from the user settings + user = getattr(g, 'user', None) + if user is not None and hasattr(user, "locale"): + if user.name != 'Guest': # if the account is the guest account bypass the config lang settings + return user.locale + + preferred = list() + if request.accept_languages: + for x in request.accept_languages.values(): + try: + preferred.append(str(Locale.parse(x.replace('-', '_')))) + except (UnknownLocaleError, ValueError) as e: + log.debug('Could not parse locale "%s": %s', x, e) + + return negotiate_locale(preferred or ['en'], get_available_translations()) + + +def get_user_locale_language(user_language): + return Locale.parse(user_language).get_language_name(get_locale()) + +def get_available_locale(): + return [Locale('en')] + babel.list_translations() + +def get_available_translations(): + return set(str(item) for item in get_available_locale()) diff --git a/cps/cache_buster.py b/cps/cache_buster.py index 9619d605..ba19afd6 100644 --- a/cps/cache_buster.py +++ b/cps/cache_buster.py @@ -47,13 +47,16 @@ def init_cache_busting(app): for filename in filenames: # compute version component rooted_filename = os.path.join(dirpath, filename) - with open(rooted_filename, 'rb') as f: - file_hash = hashlib.md5(f.read()).hexdigest()[:7] # nosec + try: + with open(rooted_filename, 'rb') as f: + file_hash = hashlib.md5(f.read()).hexdigest()[:7] # nosec + # save version to tables + file_path = rooted_filename.replace(static_folder, "") + file_path = file_path.replace("\\", "/") # Convert Windows path to web path + hash_table[file_path] = file_hash + except PermissionError: + log.error("No permission to access {} file.".format(rooted_filename)) - # save version to tables - file_path = rooted_filename.replace(static_folder, "") - file_path = file_path.replace("\\", "/") # Convert Windows path to web path - hash_table[file_path] = file_hash log.debug('Finished computing cache-busting values') def bust_filename(filename): diff --git a/cps/cli.py b/cps/cli.py index 3685e8e2..cf4f36fb 100644 --- a/cps/cli.py +++ b/cps/cli.py @@ -24,82 +24,106 @@ import socket from .constants import CONFIG_DIR as _CONFIG_DIR from .constants import STABLE_VERSION as _STABLE_VERSION from .constants import NIGHTLY_VERSION as _NIGHTLY_VERSION - +from .constants import DEFAULT_SETTINGS_FILE, DEFAULT_GDRIVE_FILE def version_info(): if _NIGHTLY_VERSION[1].startswith('$Format'): return "Calibre-Web version: %s - unkown git-clone" % _STABLE_VERSION['version'] return "Calibre-Web version: %s -%s" % (_STABLE_VERSION['version'], _NIGHTLY_VERSION[1]) +class CliParameter(object): -parser = argparse.ArgumentParser(description='Calibre Web is a web app' - ' providing a interface for browsing, reading and downloading eBooks\n', prog='cps.py') -parser.add_argument('-p', metavar='path', help='path and name to settings db, e.g. /opt/cw.db') -parser.add_argument('-g', metavar='path', help='path and name to gdrive db, e.g. /opt/gd.db') -parser.add_argument('-c', metavar='path', - help='path and name to SSL certfile, e.g. /opt/test.cert, works only in combination with keyfile') -parser.add_argument('-k', metavar='path', - help='path and name to SSL keyfile, e.g. /opt/test.key, works only in combination with certfile') -parser.add_argument('-v', '--version', action='version', help='Shows version number and exits Calibre-web', - version=version_info()) -parser.add_argument('-i', metavar='ip-address', help='Server IP-Address to listen') -parser.add_argument('-s', metavar='user:pass', help='Sets specific username to new password') -parser.add_argument('-f', action='store_true', help='Flag is depreciated and will be removed in next version') -args = parser.parse_args() + def init(self): + self.arg_parser() -settingspath = args.p or os.path.join(_CONFIG_DIR, "app.db") -gdpath = args.g or os.path.join(_CONFIG_DIR, "gdrive.db") + def arg_parser(self): + parser = argparse.ArgumentParser(description='Calibre Web is a web app' + ' providing a interface for browsing, reading and downloading eBooks\n', + prog='cps.py') + parser.add_argument('-p', metavar='path', help='path and name to settings db, e.g. /opt/cw.db') + parser.add_argument('-g', metavar='path', help='path and name to gdrive db, e.g. /opt/gd.db') + parser.add_argument('-c', metavar='path', + help='path and name to SSL certfile, e.g. /opt/test.cert, works only in combination with keyfile') + parser.add_argument('-k', metavar='path', + help='path and name to SSL keyfile, e.g. /opt/test.key, works only in combination with certfile') + parser.add_argument('-v', '--version', action='version', help='Shows version number and exits Calibre-Web', + version=version_info()) + parser.add_argument('-i', metavar='ip-address', help='Server IP-Address to listen') + parser.add_argument('-s', metavar='user:pass', + help='Sets specific username to new password and exits Calibre-Web') + parser.add_argument('-f', action='store_true', help='Flag is depreciated and will be removed in next version') + parser.add_argument('-l', action='store_true', help='Allow loading covers from localhost') + parser.add_argument('-d', action='store_true', help='Dry run of updater to check file permissions in advance ' + 'and exits Calibre-Web') + parser.add_argument('-r', action='store_true', help='Enable public database reconnect route under /reconnect') + args = parser.parse_args() -# handle and check parameter for ssl encryption -certfilepath = None -keyfilepath = None -if args.c: - if os.path.isfile(args.c): - certfilepath = args.c - else: - print("Certfile path is invalid. Exiting...") - sys.exit(1) + self.settings_path = args.p or os.path.join(_CONFIG_DIR, DEFAULT_SETTINGS_FILE) + self.gd_path = args.g or os.path.join(_CONFIG_DIR, DEFAULT_GDRIVE_FILE) -if args.c == "": - certfilepath = "" + if os.path.isdir(self.settings_path): + self.settings_path = os.path.join(self.settings_path, DEFAULT_SETTINGS_FILE) -if args.k: - if os.path.isfile(args.k): - keyfilepath = args.k - else: - print("Keyfile path is invalid. Exiting...") - sys.exit(1) + if os.path.isdir(self.gd_path): + self.gd_path = os.path.join(self.gd_path, DEFAULT_GDRIVE_FILE) -if (args.k and not args.c) or (not args.k and args.c): - print("Certfile and Keyfile have to be used together. Exiting...") - sys.exit(1) -if args.k == "": - keyfilepath = "" - -# handle and check ip address argument -ip_address = args.i or None -if ip_address: - try: - # try to parse the given ip address with socket - if hasattr(socket, 'inet_pton'): - if ':' in ip_address: - socket.inet_pton(socket.AF_INET6, ip_address) + # handle and check parameter for ssl encryption + self.certfilepath = None + self.keyfilepath = None + if args.c: + if os.path.isfile(args.c): + self.certfilepath = args.c else: - socket.inet_pton(socket.AF_INET, ip_address) - else: - # on windows python < 3.4, inet_pton is not available - # inet_atom only handles IPv4 addresses - socket.inet_aton(ip_address) - except socket.error as err: - print(ip_address, ':', err) - sys.exit(1) + print("Certfile path is invalid. Exiting...") + sys.exit(1) -# handle and check user password argument -user_credentials = args.s or None -if user_credentials and ":" not in user_credentials: - print("No valid 'username:password' format") - sys.exit(3) + if args.c == "": + self.certfilepath = "" -if args.f: - print("Warning: -f flag is depreciated and will be removed in next version") + if args.k: + if os.path.isfile(args.k): + self.keyfilepath = args.k + else: + print("Keyfile path is invalid. Exiting...") + sys.exit(1) + + if (args.k and not args.c) or (not args.k and args.c): + print("Certfile and Keyfile have to be used together. Exiting...") + sys.exit(1) + + if args.k == "": + self.keyfilepath = "" + + # dry run updater + self.dry_run =args.d or None + # enable reconnect endpoint for docker database reconnect + self.reconnect_enable = args.r or os.environ.get("CALIBRE_RECONNECT", None) + # load covers from localhost + self.allow_localhost = args.l or os.environ.get("CALIBRE_LOCALHOST", None) + # handle and check ip address argument + self.ip_address = args.i or None + if self.ip_address: + try: + # try to parse the given ip address with socket + if hasattr(socket, 'inet_pton'): + if ':' in self.ip_address: + socket.inet_pton(socket.AF_INET6, self.ip_address) + else: + socket.inet_pton(socket.AF_INET, self.ip_address) + else: + # on windows python < 3.4, inet_pton is not available + # inet_atom only handles IPv4 addresses + socket.inet_aton(self.ip_address) + except socket.error as err: + print(self.ip_address, ':', err) + sys.exit(1) + + # handle and check user password argument + self.user_credentials = args.s or None + if self.user_credentials and ":" not in self.user_credentials: + print("No valid 'username:password' format") + sys.exit(3) + + if args.f: + print("Warning: -f flag is depreciated and will be removed in next version") diff --git a/cps/comic.py b/cps/comic.py index b094c60f..8f3a6f61 100644 --- a/cps/comic.py +++ b/cps/comic.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # This file is part of the Calibre-Web (https://github.com/janeczku/calibre-web) -# Copyright (C) 2018 OzzieIsaacs +# Copyright (C) 2018-2022 OzzieIsaacs # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -18,19 +18,16 @@ import os -from . import logger, isoLanguages +from . import logger, isoLanguages, cover from .constants import BookMeta - -log = logger.create() - - try: from wand.image import Image use_IM = True except (ImportError, RuntimeError) as e: use_IM = False +log = logger.create() try: from comicapi.comicarchive import ComicArchive, MetaDataStyle @@ -51,37 +48,16 @@ except (ImportError, LookupError) as e: use_rarfile = False use_comic_meta = False -NO_JPEG_EXTENSIONS = ['.png', '.webp', '.bmp'] -COVER_EXTENSIONS = ['.png', '.webp', '.bmp', '.jpg', '.jpeg'] -def _cover_processing(tmp_file_name, img, extension): - tmp_cover_name = os.path.join(os.path.dirname(tmp_file_name), 'cover.jpg') - if use_IM: - # convert to jpg because calibre only supports jpg - if extension in NO_JPEG_EXTENSIONS: - with Image(filename=tmp_file_name) as imgc: - imgc.format = 'jpeg' - imgc.transform_colorspace('rgb') - imgc.save(tmp_cover_name) - return tmp_cover_name - - if not img: - return None - - with open(tmp_cover_name, 'wb') as f: - f.write(img) - return tmp_cover_name - - -def _extract_Cover_from_archive(original_file_extension, tmp_file_name, rarExecutable): - cover_data = None +def _extract_cover_from_archive(original_file_extension, tmp_file_name, rar_executable): + cover_data = extension = None if original_file_extension.upper() == '.CBZ': cf = zipfile.ZipFile(tmp_file_name) for name in cf.namelist(): ext = os.path.splitext(name) if len(ext) > 1: extension = ext[1].lower() - if extension in COVER_EXTENSIONS: + if extension in cover.COVER_EXTENSIONS: cover_data = cf.read(name) break elif original_file_extension.upper() == '.CBT': @@ -90,44 +66,44 @@ def _extract_Cover_from_archive(original_file_extension, tmp_file_name, rarExecu ext = os.path.splitext(name) if len(ext) > 1: extension = ext[1].lower() - if extension in COVER_EXTENSIONS: + if extension in cover.COVER_EXTENSIONS: cover_data = cf.extractfile(name).read() break elif original_file_extension.upper() == '.CBR' and use_rarfile: try: - rarfile.UNRAR_TOOL = rarExecutable + rarfile.UNRAR_TOOL = rar_executable cf = rarfile.RarFile(tmp_file_name) - for name in cf.getnames(): + for name in cf.namelist(): ext = os.path.splitext(name) if len(ext) > 1: extension = ext[1].lower() - if extension in COVER_EXTENSIONS: + if extension in cover.COVER_EXTENSIONS: cover_data = cf.read(name) break except Exception as ex: - log.debug('Rarfile failed with error: %s', ex) - return cover_data + log.debug('Rarfile failed with error: {}'.format(ex)) + return cover_data, extension -def _extractCover(tmp_file_name, original_file_extension, rarExecutable): +def _extract_cover(tmp_file_name, original_file_extension, rar_executable): cover_data = extension = None if use_comic_meta: - archive = ComicArchive(tmp_file_name, rar_exe_path=rarExecutable) + archive = ComicArchive(tmp_file_name, rar_exe_path=rar_executable) for index, name in enumerate(archive.getPageNameList()): ext = os.path.splitext(name) if len(ext) > 1: extension = ext[1].lower() - if extension in COVER_EXTENSIONS: + if extension in cover.COVER_EXTENSIONS: cover_data = archive.getPage(index) break else: - cover_data = _extract_Cover_from_archive(original_file_extension, tmp_file_name, rarExecutable) - return _cover_processing(tmp_file_name, cover_data, extension) + cover_data, extension = _extract_cover_from_archive(original_file_extension, tmp_file_name, rar_executable) + return cover.cover_processing(tmp_file_name, cover_data, extension) -def get_comic_info(tmp_file_path, original_file_name, original_file_extension, rarExecutable): +def get_comic_info(tmp_file_path, original_file_name, original_file_extension, rar_executable): if use_comic_meta: - archive = ComicArchive(tmp_file_path, rar_exe_path=rarExecutable) + archive = ComicArchive(tmp_file_path, rar_exe_path=rar_executable) if archive.seemsToBeAComicArchive(): if archive.hasMetadata(MetaDataStyle.CIX): style = MetaDataStyle.CIX @@ -137,34 +113,38 @@ def get_comic_info(tmp_file_path, original_file_name, original_file_extension, r style = None # if style is not None: - loadedMetadata = archive.readMetadata(style) + loaded_metadata = archive.readMetadata(style) - lang = loadedMetadata.language or "" - loadedMetadata.language = isoLanguages.get_lang3(lang) + lang = loaded_metadata.language or "" + loaded_metadata.language = isoLanguages.get_lang3(lang) return BookMeta( file_path=tmp_file_path, extension=original_file_extension, - title=loadedMetadata.title or original_file_name, + title=loaded_metadata.title or original_file_name, author=" & ".join([credit["person"] - for credit in loadedMetadata.credits if credit["role"] == "Writer"]) or u'Unknown', - cover=_extractCover(tmp_file_path, original_file_extension, rarExecutable), - description=loadedMetadata.comments or "", + for credit in loaded_metadata.credits if credit["role"] == "Writer"]) or 'Unknown', + cover=_extract_cover(tmp_file_path, original_file_extension, rar_executable), + description=loaded_metadata.comments or "", tags="", - series=loadedMetadata.series or "", - series_id=loadedMetadata.issue or "", - languages=loadedMetadata.language, - publisher="") + series=loaded_metadata.series or "", + series_id=loaded_metadata.issue or "", + languages=loaded_metadata.language, + publisher="", + pubdate="", + identifiers=[]) return BookMeta( file_path=tmp_file_path, extension=original_file_extension, title=original_file_name, author=u'Unknown', - cover=_extractCover(tmp_file_path, original_file_extension, rarExecutable), + cover=_extract_cover(tmp_file_path, original_file_extension, rar_executable), description="", tags="", series="", series_id="", languages="", - publisher="") + publisher="", + pubdate="", + identifiers=[]) diff --git a/cps/config_sql.py b/cps/config_sql.py index ebc4ca24..743b2ce7 100644 --- a/cps/config_sql.py +++ b/cps/config_sql.py @@ -29,7 +29,7 @@ try: except ImportError: from sqlalchemy.ext.declarative import declarative_base -from . import constants, cli, logger +from . import constants, logger log = logger.create() @@ -62,6 +62,7 @@ class _Settings(_Base): mail_gmail_token = Column(JSON, default={}) config_calibre_dir = Column(String) + config_calibre_uuid = Column(String) config_port = Column(Integer, default=constants.DEFAULT_PORT) config_external_port = Column(Integer, default=constants.DEFAULT_PORT) config_certfile = Column(String) @@ -133,13 +134,19 @@ class _Settings(_Base): config_calibre = Column(String) config_rarfile_location = Column(String, default=None) config_upload_formats = Column(String, default=','.join(constants.EXTENSIONS_UPLOAD)) - config_unicode_filename =Column(Boolean, default=False) + config_unicode_filename = Column(Boolean, default=False) config_updatechannel = Column(Integer, default=constants.UPDATE_STABLE) config_reverse_proxy_login_header_name = Column(String) config_allow_reverse_proxy_header_login = Column(Boolean, default=False) + schedule_start_time = Column(Integer, default=4) + schedule_duration = Column(Integer, default=10) + schedule_generate_book_covers = Column(Boolean, default=False) + schedule_generate_series_covers = Column(Boolean, default=False) + schedule_reconnect = Column(Boolean, default=False) + def __repr__(self): return self.__class__.__name__ @@ -147,12 +154,16 @@ class _Settings(_Base): # Class holds all application specific settings in calibre-web class _ConfigSQL(object): # pylint: disable=no-member - def __init__(self, session): + def __init__(self): + pass + + def init_config(self, session, cli): self._session = session self._settings = None self.db_configured = None self.config_calibre_dir = None self.load() + self.cli = cli change = False if self.config_converterpath == None: # pylint: disable=access-member-before-definition @@ -170,7 +181,6 @@ class _ConfigSQL(object): if change: self.save() - def _read_from_storage(self): if self._settings is None: log.debug("_ConfigSQL._read_from_storage") @@ -178,22 +188,21 @@ class _ConfigSQL(object): return self._settings def get_config_certfile(self): - if cli.certfilepath: - return cli.certfilepath - if cli.certfilepath == "": + if self.cli.certfilepath: + return self.cli.certfilepath + if self.cli.certfilepath == "": return None return self.config_certfile def get_config_keyfile(self): - if cli.keyfilepath: - return cli.keyfilepath - if cli.certfilepath == "": + if self.cli.keyfilepath: + return self.cli.keyfilepath + if self.cli.certfilepath == "": return None return self.config_keyfile - @staticmethod - def get_config_ipaddress(): - return cli.ip_address or "" + def get_config_ipaddress(self): + return self.cli.ip_address or "" def _has_role(self, role_flag): return constants.has_flag(self.config_default_role, role_flag) @@ -254,6 +263,8 @@ class _ConfigSQL(object): return bool((self.mail_server != constants.DEFAULT_MAIL_SERVER and self.mail_server_type == 0) or (self.mail_gmail_token != {} and self.mail_server_type == 1)) + def get_scheduled_task_settings(self): + return {k:v for k, v in self.__dict__.items() if k.startswith('schedule_')} def set_from_dictionary(self, dictionary, field, convertor=None, default=None, encode=None): """Possibly updates a field of this object. @@ -285,11 +296,10 @@ class _ConfigSQL(object): def toDict(self): storage = {} for k, v in self.__dict__.items(): - if k[0] != '_' and not k.endswith("password") and not k.endswith("secret"): + if k[0] != '_' and not k.endswith("password") and not k.endswith("secret") and not k == "cli": storage[k] = v return storage - def load(self): '''Load all configuration values from the underlying storage.''' s = self._read_from_storage() # type: _Settings @@ -304,9 +314,8 @@ class _ConfigSQL(object): have_metadata_db = bool(self.config_calibre_dir) if have_metadata_db: - if not self.config_use_google_drive: - db_file = os.path.join(self.config_calibre_dir, 'metadata.db') - have_metadata_db = os.path.isfile(db_file) + db_file = os.path.join(self.config_calibre_dir, 'metadata.db') + have_metadata_db = os.path.isfile(db_file) self.db_configured = have_metadata_db constants.EXTENSIONS_UPLOAD = [x.lstrip().rstrip().lower() for x in self.config_upload_formats.split(',')] if os.environ.get('FLASK_DEBUG'): @@ -351,6 +360,14 @@ class _ConfigSQL(object): # self.config_calibre_dir = None self.save() + def store_calibre_uuid(self, calibre_db, Library_table): + try: + calibre_uuid = calibre_db.session.query(Library_table).one_or_none() + if self.config_calibre_uuid != calibre_uuid.uuid: + self.config_calibre_uuid = calibre_uuid.uuid + self.save() + except AttributeError: + pass def _migrate_table(session, orm_class): changed = False @@ -403,6 +420,7 @@ def autodetect_calibre_binary(): return element return "" + def autodetect_unrar_binary(): if sys.platform == "win32": calibre_path = ["C:\\program files\\WinRar\\unRAR.exe", @@ -414,6 +432,7 @@ def autodetect_unrar_binary(): return element return "" + def autodetect_kepubify_binary(): if sys.platform == "win32": calibre_path = ["C:\\program files\\kepubify\\kepubify-windows-64Bit.exe", @@ -425,6 +444,7 @@ def autodetect_kepubify_binary(): return element return "" + def _migrate_database(session): # make sure the table is created, if it does not exist _Base.metadata.create_all(session.bind) @@ -432,26 +452,20 @@ def _migrate_database(session): _migrate_table(session, _Flask_Settings) -def load_configuration(session): +def load_configuration(conf, session, cli): _migrate_database(session) if not session.query(_Settings).count(): session.add(_Settings()) session.commit() - conf = _ConfigSQL(session) - # Migrate from global restrictions to user based restrictions - #if bool(conf.config_default_show & constants.MATURE_CONTENT) and conf.config_denied_tags == "": - # conf.config_denied_tags = conf.config_mature_content_tags - # conf.save() - # session.query(ub.User).filter(ub.User.mature_content != True). \ - # update({"denied_tags": conf.config_mature_content_tags}, synchronize_session=False) - # session.commit() - return conf + # conf = _ConfigSQL() + conf.init_config(session, cli) + # return conf -def get_flask_session_key(session): - flask_settings = session.query(_Flask_Settings).one_or_none() +def get_flask_session_key(_session): + flask_settings = _session.query(_Flask_Settings).one_or_none() if flask_settings == None: flask_settings = _Flask_Settings(os.urandom(32)) - session.add(flask_settings) - session.commit() + _session.add(flask_settings) + _session.commit() return flask_settings.flask_session_key diff --git a/cps/constants.py b/cps/constants.py index e37ad900..0f3b2530 100644 --- a/cps/constants.py +++ b/cps/constants.py @@ -21,28 +21,37 @@ import os from collections import namedtuple from sqlalchemy import __version__ as sql_version -sqlalchemy_version2 = ([int(x) for x in sql_version.split('.')] >= [2,0,0]) +sqlalchemy_version2 = ([int(x) for x in sql_version.split('.')] >= [2, 0, 0]) + +# APP_MODE - production, development, or test +APP_MODE = os.environ.get('APP_MODE', 'production') # if installed via pip this variable is set to true (empty file with name .HOMEDIR present) HOME_CONFIG = os.path.isfile(os.path.join(os.path.dirname(os.path.abspath(__file__)), '.HOMEDIR')) -#In executables updater is not available, so variable is set to False there +# In executables updater is not available, so variable is set to False there UPDATER_AVAILABLE = True # Base dir is parent of current file, necessary if called from different folder -BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)),os.pardir)) +BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir)) STATIC_DIR = os.path.join(BASE_DIR, 'cps', 'static') TEMPLATES_DIR = os.path.join(BASE_DIR, 'cps', 'templates') TRANSLATIONS_DIR = os.path.join(BASE_DIR, 'cps', 'translations') +# Cache dir - use CACHE_DIR environment variable, otherwise use the default directory: cps/cache +DEFAULT_CACHE_DIR = os.path.join(BASE_DIR, 'cps', 'cache') +CACHE_DIR = os.environ.get('CACHE_DIR', DEFAULT_CACHE_DIR) + if HOME_CONFIG: - home_dir = os.path.join(os.path.expanduser("~"),".calibre-web") + home_dir = os.path.join(os.path.expanduser("~"), ".calibre-web") if not os.path.exists(home_dir): os.makedirs(home_dir) CONFIG_DIR = os.environ.get('CALIBRE_DBPATH', home_dir) else: CONFIG_DIR = os.environ.get('CALIBRE_DBPATH', BASE_DIR) +DEFAULT_SETTINGS_FILE = "app.db" +DEFAULT_GDRIVE_FILE = "gdrive.db" ROLE_USER = 0 << 0 ROLE_ADMIN = 1 << 0 @@ -133,11 +142,14 @@ except ValueError: del env_CALIBRE_PORT -EXTENSIONS_AUDIO = {'mp3', 'mp4', 'ogg', 'opus', 'wav', 'flac', 'm4a', 'm4b'} -EXTENSIONS_CONVERT_FROM = ['pdf', 'epub', 'mobi', 'azw3', 'docx', 'rtf', 'fb2', 'lit', 'lrf', 'txt', 'htmlz', 'rtf', 'odt','cbz','cbr'] -EXTENSIONS_CONVERT_TO = ['pdf', 'epub', 'mobi', 'azw3', 'docx', 'rtf', 'fb2', 'lit', 'lrf', 'txt', 'htmlz', 'rtf', 'odt'] -EXTENSIONS_UPLOAD = {'txt', 'pdf', 'epub', 'kepub', 'mobi', 'azw', 'azw3', 'cbr', 'cbz', 'cbt', 'djvu', 'prc', 'doc', 'docx', - 'fb2', 'html', 'rtf', 'lit', 'odt', 'mp3', 'mp4', 'ogg', 'opus', 'wav', 'flac', 'm4a', 'm4b'} +EXTENSIONS_AUDIO = {'mp3', 'mp4', 'ogg', 'opus', 'wav', 'flac', 'm4a', 'm4b'} +EXTENSIONS_CONVERT_FROM = ['pdf', 'epub', 'mobi', 'azw3', 'docx', 'rtf', 'fb2', 'lit', 'lrf', + 'txt', 'htmlz', 'rtf', 'odt', 'cbz', 'cbr'] +EXTENSIONS_CONVERT_TO = ['pdf', 'epub', 'mobi', 'azw3', 'docx', 'rtf', 'fb2', + 'lit', 'lrf', 'txt', 'htmlz', 'rtf', 'odt'] +EXTENSIONS_UPLOAD = {'txt', 'pdf', 'epub', 'kepub', 'mobi', 'azw', 'azw3', 'cbr', 'cbz', 'cbt', 'djvu', + 'prc', 'doc', 'docx', 'fb2', 'html', 'rtf', 'lit', 'odt', 'mp3', 'mp4', 'ogg', + 'opus', 'wav', 'flac', 'm4a', 'm4b'} def has_flag(value, bit_flag): @@ -149,16 +161,29 @@ def selected_roles(dictionary): # :rtype: BookMeta BookMeta = namedtuple('BookMeta', 'file_path, extension, title, author, cover, description, tags, series, ' - 'series_id, languages, publisher') + 'series_id, languages, publisher, pubdate, identifiers') -STABLE_VERSION = {'version': '0.6.15 Beta'} +STABLE_VERSION = {'version': '0.6.19 Beta'} -NIGHTLY_VERSION = {} +NIGHTLY_VERSION = dict() NIGHTLY_VERSION[0] = '$Format:%H$' NIGHTLY_VERSION[1] = '$Format:%cI$' # NIGHTLY_VERSION[0] = 'bb7d2c6273ae4560e83950d36d64533343623a57' # NIGHTLY_VERSION[1] = '2018-09-09T10:13:08+02:00' +# CACHE +CACHE_TYPE_THUMBNAILS = 'thumbnails' + +# Thumbnail Types +THUMBNAIL_TYPE_COVER = 1 +THUMBNAIL_TYPE_SERIES = 2 +THUMBNAIL_TYPE_AUTHOR = 3 + +# Thumbnails Sizes +COVER_THUMBNAIL_ORIGINAL = 0 +COVER_THUMBNAIL_SMALL = 1 +COVER_THUMBNAIL_MEDIUM = 2 +COVER_THUMBNAIL_LARGE = 3 # clean-up the module namespace del sys, os, namedtuple diff --git a/cps/converter.py b/cps/converter.py index fcbabbfc..af2a6c09 100644 --- a/cps/converter.py +++ b/cps/converter.py @@ -18,7 +18,8 @@ import os import re -from flask_babel import gettext as _ + +from flask_babel import lazy_gettext as N_ from . import config, logger from .subproc_wrapper import process_wait @@ -26,10 +27,9 @@ from .subproc_wrapper import process_wait log = logger.create() -# _() necessary to make babel aware of string for translation -_NOT_CONFIGURED = _('not configured') -_NOT_INSTALLED = _('not installed') -_EXECUTION_ERROR = _('Execution permissions missing') +# strings getting translated when used +_NOT_INSTALLED = N_('not installed') +_EXECUTION_ERROR = N_('Execution permissions missing') def _get_command_version(path, pattern, argument=None): @@ -48,14 +48,16 @@ def _get_command_version(path, pattern, argument=None): def get_calibre_version(): - return _get_command_version(config.config_converterpath, r'ebook-convert.*\(calibre', '--version') \ - or _NOT_CONFIGURED + return _get_command_version(config.config_converterpath, r'ebook-convert.*\(calibre', '--version') def get_unrar_version(): - return _get_command_version(config.config_rarfile_location, r'UNRAR.*\d') or _NOT_CONFIGURED + unrar_version = _get_command_version(config.config_rarfile_location, r'UNRAR.*\d') + if unrar_version == "not installed": + unrar_version = _get_command_version(config.config_rarfile_location, r'unrar.*\d','-V') + return unrar_version def get_kepubify_version(): - return _get_command_version(config.config_kepubifypath, r'kepubify\s','--version') or _NOT_CONFIGURED + return _get_command_version(config.config_kepubifypath, r'kepubify\s','--version') diff --git a/cps/cover.py b/cps/cover.py new file mode 100644 index 00000000..5dd29534 --- /dev/null +++ b/cps/cover.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- + +# This file is part of the Calibre-Web (https://github.com/janeczku/calibre-web) +# Copyright (C) 2022 OzzieIsaacs +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os + +try: + from wand.image import Image + use_IM = True +except (ImportError, RuntimeError) as e: + use_IM = False + + +NO_JPEG_EXTENSIONS = ['.png', '.webp', '.bmp'] +COVER_EXTENSIONS = ['.png', '.webp', '.bmp', '.jpg', '.jpeg'] + + +def cover_processing(tmp_file_name, img, extension): + tmp_cover_name = os.path.join(os.path.dirname(tmp_file_name), 'cover.jpg') + if extension in NO_JPEG_EXTENSIONS: + if use_IM: + with Image(blob=img) as imgc: + imgc.format = 'jpeg' + imgc.transform_colorspace('rgb') + imgc.save(filename=tmp_cover_name) + return tmp_cover_name + else: + return None + if img: + with open(tmp_cover_name, 'wb') as f: + f.write(img) + return tmp_cover_name + else: + return None diff --git a/cps/db.py b/cps/db.py index 0359d3d2..f4dc4ccb 100644 --- a/cps/db.py +++ b/cps/db.py @@ -17,13 +17,15 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -import sys import os import re import ast import json from datetime import datetime +from urllib.parse import quote +import unidecode +from sqlite3 import OperationalError as sqliteOperationalError from sqlalchemy import create_engine from sqlalchemy import Table, Column, ForeignKey, CheckConstraint from sqlalchemy import String, Integer, Boolean, TIMESTAMP, Float @@ -41,6 +43,7 @@ from sqlalchemy.sql.expression import and_, true, false, text, func, or_ from sqlalchemy.ext.associationproxy import association_proxy from flask_login import current_user from flask_babel import gettext as _ +from flask_babel import get_locale from flask import flash from . import logger, ub, isoLanguages @@ -48,11 +51,6 @@ from .pagination import Pagination from weakref import WeakSet -try: - import unidecode - use_unidecode = True -except ImportError: - use_unidecode = False log = logger.create() @@ -92,6 +90,12 @@ books_publishers_link = Table('books_publishers_link', Base.metadata, ) +class Library_Id(Base): + __tablename__ = 'library_id' + id = Column(Integer, primary_key=True) + uuid = Column(String, nullable=False) + + class Identifiers(Base): __tablename__ = 'identifiers' @@ -105,7 +109,7 @@ class Identifiers(Base): self.type = id_type self.book = book - def formatType(self): + def format_type(self): format_type = self.type.lower() if format_type == 'amazon': return u"Amazon" @@ -164,6 +168,8 @@ class Identifiers(Base): return u"https://portal.issn.org/resource/ISSN/{0}".format(self.val) elif format_type == "isfdb": return u"http://www.isfdb.org/cgi-bin/pl.cgi?{0}".format(self.val) + elif self.val.lower().startswith("javascript:"): + return quote(self.val) else: return u"{0}".format(self.val) @@ -172,11 +178,11 @@ class Comments(Base): __tablename__ = 'comments' id = Column(Integer, primary_key=True) + book = Column(Integer, ForeignKey('books.id'), nullable=False, unique=True) text = Column(String(collation='NOCASE'), nullable=False) - book = Column(Integer, ForeignKey('books.id'), nullable=False) - def __init__(self, text, book): - self.text = text + def __init__(self, comment, book): + self.text = comment self.book = book def get(self): @@ -358,7 +364,6 @@ class Books(Base): self.path = path self.has_cover = (has_cover != None) - def __repr__(self): return u"".format(self.title, self.sort, self.author_sort, self.timestamp, self.pubdate, self.series_index, @@ -366,10 +371,10 @@ class Books(Base): @property def atom_timestamp(self): - return (self.timestamp.strftime('%Y-%m-%dT%H:%M:%S+00:00') or '') + return self.timestamp.strftime('%Y-%m-%dT%H:%M:%S+00:00') or '' -class Custom_Columns(Base): +class CustomColumns(Base): __tablename__ = 'custom_columns' id = Column(Integer, primary_key=True) @@ -427,7 +432,7 @@ class AlchemyEncoder(json.JSONEncoder): return json.JSONEncoder.default(self, o) -class CalibreDB(): +class CalibreDB: _init = False engine = None config = None @@ -436,22 +441,27 @@ class CalibreDB(): # instances alive once they reach the end of their respective scopes instances = WeakSet() - def __init__(self, expire_on_commit=True): + def __init__(self, expire_on_commit=True, init=False): """ Initialize a new CalibreDB session """ self.session = None + if init: + self.init_db(expire_on_commit) + + + def init_db(self, expire_on_commit=True): if self._init: - self.initSession(expire_on_commit) + self.init_session(expire_on_commit) self.instances.add(self) - def initSession(self, expire_on_commit=True): + def init_session(self, expire_on_commit=True): self.session = self.session_factory() self.session.expire_on_commit = expire_on_commit self.update_title_sort(self.config) @classmethod - def setup_db_cc_classes(self, cc): + def setup_db_cc_classes(cls, cc): cc_ids = [] books_custom_column_links = {} for row in cc: @@ -522,25 +532,31 @@ class CalibreDB(): return cc_classes @classmethod - def check_valid_db(cls, config_calibre_dir, app_db_path): + def check_valid_db(cls, config_calibre_dir, app_db_path, config_calibre_uuid): if not config_calibre_dir: - return False + return False, False dbpath = os.path.join(config_calibre_dir, "metadata.db") if not os.path.exists(dbpath): - return False + return False, False try: check_engine = create_engine('sqlite://', - echo=False, - isolation_level="SERIALIZABLE", - connect_args={'check_same_thread': False}, - poolclass=StaticPool) + echo=False, + isolation_level="SERIALIZABLE", + connect_args={'check_same_thread': False}, + poolclass=StaticPool) with check_engine.begin() as connection: connection.execute(text("attach database '{}' as calibre;".format(dbpath))) connection.execute(text("attach database '{}' as app_settings;".format(app_db_path))) + local_session = scoped_session(sessionmaker()) + local_session.configure(bind=connection) + database_uuid = local_session().query(Library_Id).one_or_none() + # local_session.dispose() + check_engine.connect() + db_change = config_calibre_uuid != database_uuid.uuid except Exception: - return False - return True + return False, False + return True, db_change @classmethod def update_config(cls, config): @@ -582,13 +598,14 @@ class CalibreDB(): cc = conn.execute(text("SELECT id, datatype FROM custom_columns")) cls.setup_db_cc_classes(cc) except OperationalError as e: - log.debug_or_exception(e) + log.error_or_exception(e) + return False cls.session_factory = scoped_session(sessionmaker(autocommit=False, autoflush=True, bind=cls.engine)) for inst in cls.instances: - inst.initSession() + inst.init_session() cls._init = True return True @@ -611,8 +628,8 @@ class CalibreDB(): bd = (self.session.query(Books, read_column.value, ub.ArchivedBook.is_archived).select_from(Books) .join(read_column, read_column.book == book_id, isouter=True)) - except (KeyError, AttributeError): - log.error("Custom Column No.%d is not existing in calibre database", read_column) + except (KeyError, AttributeError, IndexError): + log.error("Custom Column No.{} is not existing in calibre database".format(read_column)) # Skip linking read column and return None instead of read status bd = self.session.query(Books, None, ub.ArchivedBook.is_archived) return (bd.filter(Books.id == book_id) @@ -629,12 +646,10 @@ class CalibreDB(): # Language and content filters for displaying in the UI def common_filters(self, allow_show_archived=False, return_all_languages=False): if not allow_show_archived: - archived_books = ( - ub.session.query(ub.ArchivedBook) - .filter(ub.ArchivedBook.user_id == int(current_user.id)) - .filter(ub.ArchivedBook.is_archived == True) - .all() - ) + archived_books = (ub.session.query(ub.ArchivedBook) + .filter(ub.ArchivedBook.user_id == int(current_user.id)) + .filter(ub.ArchivedBook.is_archived == True) + .all()) archived_book_ids = [archived_book.book_id for archived_book in archived_books] archived_filter = Books.id.notin_(archived_book_ids) else: @@ -653,16 +668,16 @@ class CalibreDB(): pos_cc_list = current_user.allowed_column_value.split(',') pos_content_cc_filter = true() if pos_cc_list == [''] else \ getattr(Books, 'custom_column_' + str(self.config.config_restricted_column)). \ - any(cc_classes[self.config.config_restricted_column].value.in_(pos_cc_list)) + any(cc_classes[self.config.config_restricted_column].value.in_(pos_cc_list)) neg_cc_list = current_user.denied_column_value.split(',') neg_content_cc_filter = false() if neg_cc_list == [''] else \ getattr(Books, 'custom_column_' + str(self.config.config_restricted_column)). \ - any(cc_classes[self.config.config_restricted_column].value.in_(neg_cc_list)) - except (KeyError, AttributeError): + any(cc_classes[self.config.config_restricted_column].value.in_(neg_cc_list)) + except (KeyError, AttributeError, IndexError): pos_content_cc_filter = false() neg_content_cc_filter = true() - log.error(u"Custom Column No.%d is not existing in calibre database", - self.config.config_restricted_column) + log.error("Custom Column No.{} is not existing in calibre database".format( + self.config.config_restricted_column)) flash(_("Custom Column No.%(column)d is not existing in calibre database", column=self.config.config_restricted_column), category="error") @@ -673,6 +688,25 @@ class CalibreDB(): return and_(lang_filter, pos_content_tags_filter, ~neg_content_tags_filter, pos_content_cc_filter, ~neg_content_cc_filter, archived_filter) + def generate_linked_query(self, config_read_column, database): + if not config_read_column: + query = (self.session.query(database, ub.ArchivedBook.is_archived, ub.ReadBook.read_status) + .select_from(Books) + .outerjoin(ub.ReadBook, + and_(ub.ReadBook.user_id == int(current_user.id), ub.ReadBook.book_id == Books.id))) + else: + try: + read_column = cc_classes[config_read_column] + query = (self.session.query(database, ub.ArchivedBook.is_archived, read_column.value) + .select_from(Books) + .outerjoin(read_column, read_column.book == Books.id)) + except (KeyError, AttributeError, IndexError): + log.error("Custom Column No.{} is not existing in calibre database".format(config_read_column)) + # Skip linking read column and return None instead of read status + query = self.session.query(database, None, ub.ArchivedBook.is_archived) + return query.outerjoin(ub.ArchivedBook, and_(Books.id == ub.ArchivedBook.book_id, + int(current_user.id) == ub.ArchivedBook.user_id)) + @staticmethod def get_checkbox_sorted(inputlist, state, offset, limit, order, combo=False): outcome = list() @@ -702,30 +736,14 @@ class CalibreDB(): join_archive_read, config_read_column, *join): pagesize = pagesize or self.config.config_books_per_page if current_user.show_detail_random(): - randm = self.session.query(Books) \ - .filter(self.common_filters(allow_show_archived)) \ - .order_by(func.random()) \ - .limit(self.config.config_random_books).all() + random_query = self.generate_linked_query(config_read_column, database) + randm = (random_query.filter(self.common_filters(allow_show_archived)) + .order_by(func.random()) + .limit(self.config.config_random_books).all()) else: randm = false() if join_archive_read: - if not config_read_column: - query = (self.session.query(database, ub.ReadBook.read_status, ub.ArchivedBook.is_archived) - .select_from(Books) - .outerjoin(ub.ReadBook, - and_(ub.ReadBook.user_id == int(current_user.id), ub.ReadBook.book_id == Books.id))) - else: - try: - read_column = cc_classes[config_read_column] - query = (self.session.query(database, read_column.value, ub.ArchivedBook.is_archived) - .select_from(Books) - .outerjoin(read_column, read_column.book == Books.id)) - except (KeyError, AttributeError): - log.error("Custom Column No.%d is not existing in calibre database", read_column) - # Skip linking read column and return None instead of read status - query =self.session.query(database, None, ub.ArchivedBook.is_archived) - query = query.outerjoin(ub.ArchivedBook, and_(Books.id == ub.ArchivedBook.book_id, - int(current_user.id) == ub.ArchivedBook.user_id)) + query = self.generate_linked_query(config_read_column, database) else: query = self.session.query(database) off = int(int(pagesize) * (page - 1)) @@ -754,13 +772,15 @@ class CalibreDB(): len(query.all())) entries = query.order_by(*order).offset(off).limit(pagesize).all() except Exception as ex: - log.debug_or_exception(ex) + log.error_or_exception(ex) # display authors in right order entries = self.order_authors(entries, True, join_archive_read) return entries, randm, pagination # Orders all Authors in the list according to authors sort def order_authors(self, entries, list_return=False, combined=False): + # entries_copy = copy.deepcopy(entries) + # entries_copy =[] for entry in entries: if combined: sort_authors = entry.Books.author_sort.split('&') @@ -770,25 +790,30 @@ class CalibreDB(): sort_authors = entry.author_sort.split('&') ids = [a.id for a in entry.authors] authors_ordered = list() - error = False + # error = False for auth in sort_authors: results = self.session.query(Authors).filter(Authors.sort == auth.lstrip().strip()).all() - # ToDo: How to handle not found authorname + # ToDo: How to handle not found author name if not len(results): - error = True + log.error("Author {} not found to display name in right order".format(auth.strip())) + # error = True break for r in results: if r.id in ids: authors_ordered.append(r) - if not error: + ids.remove(r.id) + for author_id in ids: + result = self.session.query(Authors).filter(Authors.id == author_id).first() + authors_ordered.append(result) + + if list_return: if combined: entry.Books.authors = authors_ordered else: - entry.authors = authors_ordered - if list_return: - return entries - else: - return authors_ordered + entry.ordered_authors = authors_ordered + else: + return authors_ordered + return entries def get_typeahead(self, database, query, replace=('', ''), tag_filter=true()): query = query or '' @@ -802,36 +827,21 @@ class CalibreDB(): def check_exists_book(self, authr, title): self.session.connection().connection.connection.create_function("lower", 1, lcase) q = list() - authorterms = re.split(r'\s*&\s*', authr) - for authorterm in authorterms: - q.append(Books.authors.any(func.lower(Authors.name).ilike("%" + authorterm + "%"))) + author_terms = re.split(r'\s*&\s*', authr) + for author_term in author_terms: + q.append(Books.authors.any(func.lower(Authors.name).ilike("%" + author_term + "%"))) return self.session.query(Books) \ .filter(and_(Books.authors.any(and_(*q)), func.lower(Books.title).ilike("%" + title + "%"))).first() - def search_query(self, term, config_read_column, *join): + def search_query(self, term, config, *join): term.strip().lower() self.session.connection().connection.connection.create_function("lower", 1, lcase) q = list() - authorterms = re.split("[, ]+", term) - for authorterm in authorterms: - q.append(Books.authors.any(func.lower(Authors.name).ilike("%" + authorterm + "%"))) - if not config_read_column: - query = (self.session.query(Books, ub.ArchivedBook.is_archived, ub.ReadBook).select_from(Books) - .outerjoin(ub.ReadBook, and_(Books.id == ub.ReadBook.book_id, - int(current_user.id) == ub.ReadBook.user_id))) - else: - try: - read_column = cc_classes[config_read_column] - query = (self.session.query(Books, ub.ArchivedBook.is_archived, read_column.value).select_from(Books) - .outerjoin(read_column, read_column.book == Books.id)) - except (KeyError, AttributeError): - log.error("Custom Column No.%d is not existing in calibre database", config_read_column) - # Skip linking read column - query = self.session.query(Books, ub.ArchivedBook.is_archived, None) - query = query.outerjoin(ub.ArchivedBook, and_(Books.id == ub.ArchivedBook.book_id, - int(current_user.id) == ub.ArchivedBook.user_id)) - + author_terms = re.split("[, ]+", term) + for author_term in author_terms: + q.append(Books.authors.any(func.lower(Authors.name).ilike("%" + author_term + "%"))) + query = self.generate_linked_query(config.config_read_column, Books) if len(join) == 6: query = query.outerjoin(join[0], join[1]).outerjoin(join[2]).outerjoin(join[3], join[4]).outerjoin(join[5]) if len(join) == 3: @@ -840,20 +850,42 @@ class CalibreDB(): query = query.outerjoin(join[0], join[1]) elif len(join) == 1: query = query.outerjoin(join[0]) - return query.filter(self.common_filters(True)).filter( - or_(Books.tags.any(func.lower(Tags.name).ilike("%" + term + "%")), - Books.series.any(func.lower(Series.name).ilike("%" + term + "%")), - Books.authors.any(and_(*q)), - Books.publishers.any(func.lower(Publishers.name).ilike("%" + term + "%")), - func.lower(Books.title).ilike("%" + term + "%") - )) + + cc = self.get_cc_columns(config, filter_config_custom_read=True) + filter_expression = [Books.tags.any(func.lower(Tags.name).ilike("%" + term + "%")), + Books.series.any(func.lower(Series.name).ilike("%" + term + "%")), + Books.authors.any(and_(*q)), + Books.publishers.any(func.lower(Publishers.name).ilike("%" + term + "%")), + func.lower(Books.title).ilike("%" + term + "%")] + for c in cc: + if c.datatype not in ["datetime", "rating", "bool", "int", "float"]: + filter_expression.append( + getattr(Books, + 'custom_column_' + str(c.id)).any( + func.lower(cc_classes[c.id].value).ilike("%" + term + "%"))) + return query.filter(self.common_filters(True)).filter(or_(*filter_expression)) + + def get_cc_columns(self, config, filter_config_custom_read=False): + tmp_cc = self.session.query(CustomColumns).filter(CustomColumns.datatype.notin_(cc_exceptions)).all() + cc = [] + r = None + if config.config_columns_to_ignore: + r = re.compile(config.config_columns_to_ignore) + + for col in tmp_cc: + if filter_config_custom_read and config.config_read_column and config.config_read_column == col.id: + continue + if r and r.match(col.name): + continue + cc.append(col) + + return cc # read search results from calibre-database and return it (function is used for feed and simple search - def get_search_results(self, term, offset=None, order=None, limit=None, allow_show_archived=False, - config_read_column=False, *join): + def get_search_results(self, term, config, offset=None, order=None, limit=None, *join): order = order[0] if order else [Books.sort] pagination = None - result = self.search_query(term, config_read_column, *join).order_by(*order).all() + result = self.search_query(term, config, *join).order_by(*order).all() result_count = len(result) if offset != None and limit != None: offset = int(offset) @@ -870,28 +902,38 @@ class CalibreDB(): # Creates for all stored languages a translated speaking name in the array for the UI def speaking_language(self, languages=None, return_all_languages=False, with_count=False, reverse_order=False): - from . import get_locale - if not languages: - if with_count: + if with_count: + if not languages: languages = self.session.query(Languages, func.count('books_languages_link.book'))\ .join(books_languages_link).join(Books)\ .filter(self.common_filters(return_all_languages=return_all_languages)) \ .group_by(text('books_languages_link.lang_code')).all() - for lang in languages: - lang[0].name = isoLanguages.get_language_name(get_locale(), lang[0].lang_code) - return sorted(languages, key=lambda x: x[0].name, reverse=reverse_order) - else: + tags = list() + for lang in languages: + tag = Category(isoLanguages.get_language_name(get_locale(), lang[0].lang_code), lang[0].lang_code) + tags.append([tag, lang[1]]) + # Append all books without language to list + if not return_all_languages: + no_lang_count = (self.session.query(Books) + .outerjoin(books_languages_link).outerjoin(Languages) + .filter(Languages.lang_code == None) + .filter(self.common_filters()) + .count()) + if no_lang_count: + tags.append([Category(_("None"), "none"), no_lang_count]) + return sorted(tags, key=lambda x: x[0].name.lower(), reverse=reverse_order) + else: + if not languages: languages = self.session.query(Languages) \ .join(books_languages_link) \ .join(Books) \ .filter(self.common_filters(return_all_languages=return_all_languages)) \ .group_by(text('books_languages_link.lang_code')).all() - for lang in languages: - lang.name = isoLanguages.get_language_name(get_locale(), lang.lang_code) + for lang in languages: + lang.name = isoLanguages.get_language_name(get_locale(), lang.lang_code) return sorted(languages, key=lambda x: x.name, reverse=reverse_order) - def update_title_sort(self, config, conn=None): # user defined sort function for calibre databases (Series, etc.) def _title_sort(title): @@ -904,7 +946,10 @@ class CalibreDB(): return title.strip() conn = conn or self.session.connection().connection.connection - conn.create_function("title_sort", 1, _title_sort) + try: + conn.create_function("title_sort", 1, _title_sort) + except sqliteOperationalError: + pass @classmethod def dispose(cls): @@ -949,6 +994,25 @@ def lcase(s): try: return unidecode.unidecode(s.lower()) except Exception as ex: - log = logger.create() - log.debug_or_exception(ex) + _log = logger.create() + _log.error_or_exception(ex) return s.lower() + + +class Category: + name = None + id = None + count = None + rating = None + + def __init__(self, name, cat_id, rating=None): + self.name = name + self.id = cat_id + self.rating = rating + self.count = 1 + +'''class Count: + count = None + + def __init__(self, count): + self.count = count''' diff --git a/cps/dep_check.py b/cps/dep_check.py index 12436d1d..929185c2 100644 --- a/cps/dep_check.py +++ b/cps/dep_check.py @@ -1,14 +1,16 @@ import os import re +import sys +import json from .constants import BASE_DIR try: - from importlib_metadata import version + from importlib.metadata import version importlib = True ImportNotFound = BaseException except ImportError: importlib = False - + version = None if not importlib: try: @@ -20,6 +22,13 @@ if not importlib: def load_dependencys(optional=False): deps = list() + if getattr(sys, 'frozen', False): + pip_installed = os.path.join(BASE_DIR, ".pip_installed") + if os.path.exists(pip_installed): + with open(pip_installed) as f: + exe_deps = json.loads("".join(f.readlines())) + else: + return deps if importlib or pkgresources: if optional: req_path = os.path.join(BASE_DIR, "optional-requirements.txt") @@ -31,11 +40,14 @@ def load_dependencys(optional=False): if not line.startswith('#') and not line == '\n' and not line.startswith('git'): res = re.match(r'(.*?)([<=>\s]+)([\d\.]+),?\s?([<=>\s]+)?([\d\.]+)?', line.strip()) try: - if importlib: - dep_version = version(res.group(1)) + if getattr(sys, 'frozen', False): + dep_version = exe_deps[res.group(1).lower().replace('_','-')] else: - dep_version = pkg_resources.get_distribution(res.group(1)).version - except ImportNotFound: + if importlib: + dep_version = version(res.group(1)) + else: + dep_version = pkg_resources.get_distribution(res.group(1)).version + except (ImportNotFound, KeyError): if optional: continue dep_version = "not installed" diff --git a/cps/editbooks.py b/cps/editbooks.py old mode 100644 new mode 100755 index 8bbe8208..d3615050 --- a/cps/editbooks.py +++ b/cps/editbooks.py @@ -25,42 +25,31 @@ from datetime import datetime import json from shutil import copyfile from uuid import uuid4 -from markupsafe import escape +from markupsafe import escape # dependency of flask +from functools import wraps + try: from lxml.html.clean import clean_html except ImportError: - pass - -# Improve this to check if scholarly is available in a global way, like other pythonic libraries -try: - from scholarly import scholarly - have_scholar = True -except ImportError: - have_scholar = False + clean_html = None from flask import Blueprint, request, flash, redirect, url_for, abort, Markup, Response from flask_babel import gettext as _ +from flask_babel import lazy_gettext as N_ +from flask_babel import get_locale from flask_login import current_user, login_required from sqlalchemy.exc import OperationalError, IntegrityError -from sqlite3 import OperationalError as sqliteOperationalError + from . import constants, logger, isoLanguages, gdriveutils, uploader, helper, kobo_sync_status -from . import config, get_locale, ub, db -from . import calibre_db +from . import config, ub, db, calibre_db from .services.worker import WorkerThread from .tasks.upload import TaskUpload from .render_template import render_title_template from .usermanagement import login_required_if_no_ano from .kobo_sync_status import change_archived_books -try: - from functools import wraps -except ImportError: - pass # We're not using Python 3 - - - -editbook = Blueprint('editbook', __name__) +editbook = Blueprint('edit-book', __name__) log = logger.create() @@ -73,6 +62,7 @@ def upload_required(f): return inner + def edit_required(f): @wraps(f) def inner(*args, **kwargs): @@ -82,177 +72,693 @@ def edit_required(f): return inner -def search_objects_remove(db_book_object, db_type, input_elements): - del_elements = [] - for c_elements in db_book_object: - found = False - if db_type == 'languages': - type_elements = c_elements.lang_code - elif db_type == 'custom': - type_elements = c_elements.value - else: - type_elements = c_elements.name - for inp_element in input_elements: - if inp_element.lower() == type_elements.lower(): - # if inp_element == type_elements: - found = True - break - # if the element was not found in the new list, add it to remove list - if not found: - del_elements.append(c_elements) - return del_elements - -def search_objects_add(db_book_object, db_type, input_elements): - add_elements = [] - for inp_element in input_elements: - found = False - for c_elements in db_book_object: - if db_type == 'languages': - type_elements = c_elements.lang_code - elif db_type == 'custom': - type_elements = c_elements.value - else: - type_elements = c_elements.name - if inp_element == type_elements: - found = True - break - if not found: - add_elements.append(inp_element) - return add_elements - - -def remove_objects(db_book_object, db_session, del_elements): - changed = False - if len(del_elements) > 0: - for del_element in del_elements: - db_book_object.remove(del_element) - changed = True - if len(del_element.books) == 0: - db_session.delete(del_element) - return changed - -def add_objects(db_book_object, db_object, db_session, db_type, add_elements): - changed = False - if db_type == 'languages': - db_filter = db_object.lang_code - elif db_type == 'custom': - db_filter = db_object.value - else: - db_filter = db_object.name - for add_element in add_elements: - # check if a element with that name exists - db_element = db_session.query(db_object).filter(db_filter == add_element).first() - # if no element is found add it - # if new_element is None: - if db_type == 'author': - new_element = db_object(add_element, helper.get_sorted_author(add_element.replace('|', ',')), "") - elif db_type == 'series': - new_element = db_object(add_element, add_element) - elif db_type == 'custom': - new_element = db_object(value=add_element) - elif db_type == 'publisher': - new_element = db_object(add_element, None) - else: # db_type should be tag or language - new_element = db_object(add_element) - if db_element is None: - changed = True - db_session.add(new_element) - db_book_object.append(new_element) - else: - db_element = create_objects_for_addition(db_element, add_element, db_type) - changed = True - # add element to book - changed = True - db_book_object.append(db_element) - return changed - - -def create_objects_for_addition(db_element, add_element, db_type): - if db_type == 'custom': - if db_element.value != add_element: - db_element.value = add_element - elif db_type == 'languages': - if db_element.lang_code != add_element: - db_element.lang_code = add_element - elif db_type == 'series': - if db_element.name != add_element: - db_element.name = add_element - db_element.sort = add_element - elif db_type == 'author': - if db_element.name != add_element: - db_element.name = add_element - db_element.sort = helper.get_sorted_author(add_element.replace('|', ',')) - elif db_type == 'publisher': - if db_element.name != add_element: - db_element.name = add_element - db_element.sort = None - elif db_element.name != add_element: - db_element.name = add_element - return db_element - - -# Modifies different Database objects, first check if elements if elements have to be deleted, -# because they are no longer used, than check if elements have to be added to database -def modify_database_object(input_elements, db_book_object, db_object, db_session, db_type): - # passing input_elements not as a list may lead to undesired results - if not isinstance(input_elements, list): - raise TypeError(str(input_elements) + " should be passed as a list") - input_elements = [x for x in input_elements if x != ''] - # we have all input element (authors, series, tags) names now - # 1. search for elements to remove - del_elements = search_objects_remove(db_book_object, db_type, input_elements) - # 2. search for elements that need to be added - add_elements = search_objects_add(db_book_object, db_type, input_elements) - # if there are elements to remove, we remove them now - changed = remove_objects(db_book_object, db_session, del_elements) - # if there are elements to add, we add them now! - if len(add_elements) > 0: - changed |= add_objects(db_book_object, db_object, db_session, db_type, add_elements) - return changed - - -def modify_identifiers(input_identifiers, db_identifiers, db_session): - """Modify Identifiers to match input information. - input_identifiers is a list of read-to-persist Identifiers objects. - db_identifiers is a list of already persisted list of Identifiers objects.""" - changed = False - error = False - input_dict = dict([(identifier.type.lower(), identifier) for identifier in input_identifiers]) - if len(input_identifiers) != len(input_dict): - error = True - db_dict = dict([(identifier.type.lower(), identifier) for identifier in db_identifiers ]) - # delete db identifiers not present in input or modify them with input val - for identifier_type, identifier in db_dict.items(): - if identifier_type not in input_dict.keys(): - db_session.delete(identifier) - changed = True - else: - input_identifier = input_dict[identifier_type] - identifier.type = input_identifier.type - identifier.val = input_identifier.val - # add input identifiers not present in db - for identifier_type, identifier in input_dict.items(): - if identifier_type not in db_dict.keys(): - db_session.add(identifier) - changed = True - return changed, error - -@editbook.route("/ajax/delete/") +@editbook.route("/ajax/delete/", methods=["POST"]) @login_required def delete_book_from_details(book_id): return Response(delete_book_from_table(book_id, "", True), mimetype='application/json') -@editbook.route("/delete/", defaults={'book_format': ""}) -@editbook.route("/delete//") +@editbook.route("/delete/", defaults={'book_format': ""}, methods=["POST"]) +@editbook.route("/delete//", methods=["POST"]) @login_required def delete_book_ajax(book_id, book_format): return delete_book_from_table(book_id, book_format, False) +@editbook.route("/admin/book/", methods=['GET']) +@login_required_if_no_ano +@edit_required +def show_edit_book(book_id): + return render_edit_book(book_id) + + +@editbook.route("/admin/book/", methods=['POST']) +@login_required_if_no_ano +@edit_required +def edit_book(book_id): + modify_date = False + edit_error = False + + # create the function for sorting... + calibre_db.update_title_sort(config) + + book = calibre_db.get_filtered_book(book_id, allow_show_archived=True) + # Book not found + if not book: + flash(_(u"Oops! Selected book title is unavailable. File does not exist or is not accessible"), + category="error") + return redirect(url_for("web.index")) + + to_save = request.form.to_dict() + + try: + # Update folder of book on local disk + edited_books_id = None + title_author_error = None + # handle book title change + title_change = handle_title_on_edit(book, to_save["book_title"]) + # handle book author change + input_authors, author_change, renamed = handle_author_on_edit(book, to_save["author_name"]) + if author_change or title_change: + edited_books_id = book.id + modify_date = True + title_author_error = helper.update_dir_structure(edited_books_id, + config.config_calibre_dir, + input_authors[0], + renamed_author=renamed) + if title_author_error: + flash(title_author_error, category="error") + calibre_db.session.rollback() + book = calibre_db.get_filtered_book(book_id, allow_show_archived=True) + + # handle upload other formats from local disk + meta = upload_single_file(request, book, book_id) + # only merge metadata if file was uploaded and no error occurred (meta equals not false or none) + if meta: + merge_metadata(to_save, meta) + # handle upload covers from local disk + cover_upload_success = upload_cover(request, book) + if cover_upload_success: + book.has_cover = 1 + modify_date = True + + # upload new covers or new file formats to google drive + if config.config_use_google_drive: + gdriveutils.updateGdriveCalibreFromLocal() + + if to_save.get("cover_url", None): + if not current_user.role_upload(): + edit_error = True + flash(_(u"User has no rights to upload cover"), category="error") + if to_save["cover_url"].endswith('/static/generic_cover.jpg'): + book.has_cover = 0 + else: + result, error = helper.save_cover_from_url(to_save["cover_url"].strip(), book.path) + if result is True: + book.has_cover = 1 + modify_date = True + helper.replace_cover_thumbnail_cache(book.id) + else: + flash(error, category="error") + + # Add default series_index to book + modify_date |= edit_book_series_index(to_save["series_index"], book) + # Handle book comments/description + modify_date |= edit_book_comments(Markup(to_save['description']).unescape(), book) + # Handle identifiers + input_identifiers = identifier_list(to_save, book) + modification, warning = modify_identifiers(input_identifiers, book.identifiers, calibre_db.session) + if warning: + flash(_("Identifiers are not Case Sensitive, Overwriting Old Identifier"), category="warning") + modify_date |= modification + # Handle book tags + modify_date |= edit_book_tags(to_save['tags'], book) + # Handle book series + modify_date |= edit_book_series(to_save["series"], book) + # handle book publisher + modify_date |= edit_book_publisher(to_save['publisher'], book) + # handle book languages + try: + modify_date |= edit_book_languages(to_save['languages'], book) + except ValueError as e: + flash(str(e), category="error") + edit_error = True + # handle book ratings + modify_date |= edit_book_ratings(to_save, book) + # handle cc data + modify_date |= edit_all_cc_data(book_id, book, to_save) + + if to_save.get("pubdate", None): + try: + book.pubdate = datetime.strptime(to_save["pubdate"], "%Y-%m-%d") + except ValueError as e: + book.pubdate = db.Books.DEFAULT_PUBDATE + flash(str(e), category="error") + edit_error = True + else: + book.pubdate = db.Books.DEFAULT_PUBDATE + + if modify_date: + book.last_modified = datetime.utcnow() + kobo_sync_status.remove_synced_book(edited_books_id, all=True) + + calibre_db.session.merge(book) + calibre_db.session.commit() + if config.config_use_google_drive: + gdriveutils.updateGdriveCalibreFromLocal() + if meta is not False \ + and edit_error is not True \ + and title_author_error is not True \ + and cover_upload_success is not False: + flash(_("Metadata successfully updated"), category="success") + if "detail_view" in to_save: + return redirect(url_for('web.show_book', book_id=book.id)) + else: + return render_edit_book(book_id) + except ValueError as e: + log.error_or_exception("Error: {}".format(e)) + calibre_db.session.rollback() + flash(str(e), category="error") + return redirect(url_for('web.show_book', book_id=book.id)) + except (OperationalError, IntegrityError) as e: + log.error_or_exception("Database error: {}".format(e)) + calibre_db.session.rollback() + flash(_(u"Database error: %(error)s.", error=e.orig), category="error") + return redirect(url_for('web.show_book', book_id=book.id)) + except Exception as ex: + log.error_or_exception(ex) + calibre_db.session.rollback() + flash(_("Error editing book: {}".format(ex)), category="error") + return redirect(url_for('web.show_book', book_id=book.id)) + + +@editbook.route("/upload", methods=["POST"]) +@login_required_if_no_ano +@upload_required +def upload(): + if not config.config_uploading: + abort(404) + if request.method == 'POST' and 'btn-upload' in request.files: + for requested_file in request.files.getlist("btn-upload"): + try: + modify_date = False + # create the function for sorting... + calibre_db.update_title_sort(config) + calibre_db.session.connection().connection.connection.create_function('uuid4', 0, lambda: str(uuid4())) + + meta, error = file_handling_on_upload(requested_file) + if error: + return error + + db_book, input_authors, title_dir, renamed_authors = create_book_on_upload(modify_date, meta) + + # Comments need book id therefore only possible after flush + modify_date |= edit_book_comments(Markup(meta.description).unescape(), db_book) + + book_id = db_book.id + title = db_book.title + if config.config_use_google_drive: + helper.upload_new_file_gdrive(book_id, + input_authors[0], + renamed_authors, + title, + title_dir, + meta.file_path, + meta.extension.lower()) + else: + error = helper.update_dir_structure(book_id, + config.config_calibre_dir, + input_authors[0], + meta.file_path, + title_dir + meta.extension.lower(), + renamed_author=renamed_authors) + + move_coverfile(meta, db_book) + + # save data to database, reread data + calibre_db.session.commit() + + if config.config_use_google_drive: + gdriveutils.updateGdriveCalibreFromLocal() + if error: + flash(error, category="error") + link = '{}'.format(url_for('web.show_book', book_id=book_id), escape(title)) + upload_text = N_(u"File %(file)s uploaded", file=link) + WorkerThread.add(current_user.name, TaskUpload(upload_text, escape(title))) + helper.add_book_to_thumbnail_cache(book_id) + + if len(request.files.getlist("btn-upload")) < 2: + if current_user.role_edit() or current_user.role_admin(): + resp = {"location": url_for('edit-book.show_edit_book', book_id=book_id)} + return Response(json.dumps(resp), mimetype='application/json') + else: + resp = {"location": url_for('web.show_book', book_id=book_id)} + return Response(json.dumps(resp), mimetype='application/json') + except (OperationalError, IntegrityError) as e: + calibre_db.session.rollback() + log.error_or_exception("Database error: {}".format(e)) + flash(_(u"Database error: %(error)s.", error=e.orig), category="error") + return Response(json.dumps({"location": url_for("web.index")}), mimetype='application/json') + + +@editbook.route("/admin/book/convert/", methods=['POST']) +@login_required_if_no_ano +@edit_required +def convert_bookformat(book_id): + # check to see if we have form fields to work with - if not send user back + book_format_from = request.form.get('book_format_from', None) + book_format_to = request.form.get('book_format_to', None) + + if (book_format_from is None) or (book_format_to is None): + flash(_(u"Source or destination format for conversion missing"), category="error") + return redirect(url_for('edit-book.show_edit_book', book_id=book_id)) + + log.info('converting: book id: %s from: %s to: %s', book_id, book_format_from, book_format_to) + rtn = helper.convert_book_format(book_id, config.config_calibre_dir, book_format_from.upper(), + book_format_to.upper(), current_user.name) + + if rtn is None: + flash(_(u"Book successfully queued for converting to %(book_format)s", + book_format=book_format_to), + category="success") + else: + flash(_(u"There was an error converting this book: %(res)s", res=rtn), category="error") + return redirect(url_for('edit-book.show_edit_book', book_id=book_id)) + + +@editbook.route("/ajax/getcustomenum/") +@login_required +def table_get_custom_enum(c_id): + ret = list() + cc = (calibre_db.session.query(db.CustomColumns) + .filter(db.CustomColumns.id == c_id) + .filter(db.CustomColumns.datatype.notin_(db.cc_exceptions)).one_or_none()) + ret.append({'value': "", 'text': ""}) + for idx, en in enumerate(cc.get_display_dict()['enum_values']): + ret.append({'value': en, 'text': en}) + return json.dumps(ret) + + +@editbook.route("/ajax/editbooks/", methods=['POST']) +@login_required_if_no_ano +@edit_required +def edit_list_book(param): + vals = request.form.to_dict() + book = calibre_db.get_book(vals['pk']) + sort_param = "" + ret = "" + try: + if param == 'series_index': + edit_book_series_index(vals['value'], book) + ret = Response(json.dumps({'success': True, 'newValue': book.series_index}), mimetype='application/json') + elif param == 'tags': + edit_book_tags(vals['value'], book) + ret = Response(json.dumps({'success': True, 'newValue': ', '.join([tag.name for tag in book.tags])}), + mimetype='application/json') + elif param == 'series': + edit_book_series(vals['value'], book) + ret = Response(json.dumps({'success': True, 'newValue': ', '.join([serie.name for serie in book.series])}), + mimetype='application/json') + elif param == 'publishers': + edit_book_publisher(vals['value'], book) + ret = Response(json.dumps({'success': True, + 'newValue': ', '.join([publisher.name for publisher in book.publishers])}), + mimetype='application/json') + elif param == 'languages': + invalid = list() + edit_book_languages(vals['value'], book, invalid=invalid) + if invalid: + ret = Response(json.dumps({'success': False, + 'msg': 'Invalid languages in request: {}'.format(','.join(invalid))}), + mimetype='application/json') + else: + lang_names = list() + for lang in book.languages: + lang_names.append(isoLanguages.get_language_name(get_locale(), lang.lang_code)) + ret = Response(json.dumps({'success': True, 'newValue': ', '.join(lang_names)}), + mimetype='application/json') + elif param == 'author_sort': + book.author_sort = vals['value'] + ret = Response(json.dumps({'success': True, 'newValue': book.author_sort}), + mimetype='application/json') + elif param == 'title': + sort_param = book.sort + if handle_title_on_edit(book, vals.get('value', "")): + rename_error = helper.update_dir_structure(book.id, config.config_calibre_dir) + if not rename_error: + ret = Response(json.dumps({'success': True, 'newValue': book.title}), + mimetype='application/json') + else: + ret = Response(json.dumps({'success': False, + 'msg': rename_error}), + mimetype='application/json') + elif param == 'sort': + book.sort = vals['value'] + ret = Response(json.dumps({'success': True, 'newValue': book.sort}), + mimetype='application/json') + elif param == 'comments': + edit_book_comments(vals['value'], book) + ret = Response(json.dumps({'success': True, 'newValue': book.comments[0].text}), + mimetype='application/json') + elif param == 'authors': + input_authors, __, renamed = handle_author_on_edit(book, vals['value'], vals.get('checkA', None) == "true") + rename_error = helper.update_dir_structure(book.id, config.config_calibre_dir, input_authors[0], + renamed_author=renamed) + if not rename_error: + ret = Response(json.dumps({ + 'success': True, + 'newValue': ' & '.join([author.replace('|', ',') for author in input_authors])}), + mimetype='application/json') + else: + ret = Response(json.dumps({'success': False, + 'msg': rename_error}), + mimetype='application/json') + elif param == 'is_archived': + is_archived = change_archived_books(book.id, vals['value'] == "True", + message="Book {} archive bit set to: {}".format(book.id, vals['value'])) + if is_archived: + kobo_sync_status.remove_synced_book(book.id) + return "" + elif param == 'read_status': + ret = helper.edit_book_read_status(book.id, vals['value'] == "True") + if ret: + return ret, 400 + elif param.startswith("custom_column_"): + new_val = dict() + new_val[param] = vals['value'] + edit_single_cc_data(book.id, book, param[14:], new_val) + # ToDo: Very hacky find better solution + if vals['value'] in ["True", "False"]: + ret = "" + else: + ret = Response(json.dumps({'success': True, 'newValue': vals['value']}), + mimetype='application/json') + else: + return _("Parameter not found"), 400 + book.last_modified = datetime.utcnow() + + calibre_db.session.commit() + # revert change for sort if automatic fields link is deactivated + if param == 'title' and vals.get('checkT') == "false": + book.sort = sort_param + calibre_db.session.commit() + except (OperationalError, IntegrityError) as e: + calibre_db.session.rollback() + log.error_or_exception("Database error: {}".format(e)) + ret = Response(json.dumps({'success': False, + 'msg': 'Database error: {}'.format(e.orig)}), + mimetype='application/json') + return ret + + +@editbook.route("/ajax/sort_value//") +@login_required +def get_sorted_entry(field, bookid): + if field in ['title', 'authors', 'sort', 'author_sort']: + book = calibre_db.get_filtered_book(bookid) + if book: + if field == 'title': + return json.dumps({'sort': book.sort}) + elif field == 'authors': + return json.dumps({'author_sort': book.author_sort}) + if field == 'sort': + return json.dumps({'sort': book.title}) + if field == 'author_sort': + return json.dumps({'author_sort': book.author}) + return "" + + +@editbook.route("/ajax/simulatemerge", methods=['POST']) +@login_required +@edit_required +def simulate_merge_list_book(): + vals = request.get_json().get('Merge_books') + if vals: + to_book = calibre_db.get_book(vals[0]).title + vals.pop(0) + if to_book: + from_book = [] + for book_id in vals: + from_book.append(calibre_db.get_book(book_id).title) + return json.dumps({'to': to_book, 'from': from_book}) + return "" + + +@editbook.route("/ajax/mergebooks", methods=['POST']) +@login_required +@edit_required +def merge_list_book(): + vals = request.get_json().get('Merge_books') + to_file = list() + if vals: + # load all formats from target book + to_book = calibre_db.get_book(vals[0]) + vals.pop(0) + if to_book: + for file in to_book.data: + to_file.append(file.format) + to_name = helper.get_valid_filename(to_book.title, + chars=96) + ' - ' + helper.get_valid_filename(to_book.authors[0].name, + chars=96) + for book_id in vals: + from_book = calibre_db.get_book(book_id) + if from_book: + for element in from_book.data: + if element.format not in to_file: + # create new data entry with: book_id, book_format, uncompressed_size, name + filepath_new = os.path.normpath(os.path.join(config.config_calibre_dir, + to_book.path, + to_name + "." + element.format.lower())) + filepath_old = os.path.normpath(os.path.join(config.config_calibre_dir, + from_book.path, + element.name + "." + element.format.lower())) + copyfile(filepath_old, filepath_new) + to_book.data.append(db.Data(to_book.id, + element.format, + element.uncompressed_size, + to_name)) + delete_book_from_table(from_book.id, "", True) + return json.dumps({'success': True}) + return "" + + +@editbook.route("/ajax/xchange", methods=['POST']) +@login_required +@edit_required +def table_xchange_author_title(): + vals = request.get_json().get('xchange') + edited_books_id = False + if vals: + for val in vals: + modify_date = False + book = calibre_db.get_book(val) + authors = book.title + book.authors = calibre_db.order_authors([book]) + author_names = [] + for authr in book.authors: + author_names.append(authr.name.replace('|', ',')) + + title_change = handle_title_on_edit(book, " ".join(author_names)) + input_authors, author_change, renamed = handle_author_on_edit(book, authors) + if author_change or title_change: + edited_books_id = book.id + modify_date = True + + if config.config_use_google_drive: + gdriveutils.updateGdriveCalibreFromLocal() + + if edited_books_id: + # toDo: Handle error + edit_error = helper.update_dir_structure(edited_books_id, config.config_calibre_dir, input_authors[0], + renamed_author=renamed) + if modify_date: + book.last_modified = datetime.utcnow() + try: + calibre_db.session.commit() + except (OperationalError, IntegrityError) as e: + calibre_db.session.rollback() + log.error_or_exception("Database error: %s", e) + return json.dumps({'success': False}) + + if config.config_use_google_drive: + gdriveutils.updateGdriveCalibreFromLocal() + return json.dumps({'success': True}) + return "" + + +def merge_metadata(to_save, meta): + if to_save.get('author_name', "") == _(u'Unknown'): + to_save['author_name'] = '' + if to_save.get('book_title', "") == _(u'Unknown'): + to_save['book_title'] = '' + for s_field, m_field in [ + ('tags', 'tags'), ('author_name', 'author'), ('series', 'series'), + ('series_index', 'series_id'), ('languages', 'languages'), + ('book_title', 'title')]: + to_save[s_field] = to_save[s_field] or getattr(meta, m_field, '') + to_save["description"] = to_save["description"] or Markup( + getattr(meta, 'description', '')).unescape() + + +def identifier_list(to_save, book): + """Generate a list of Identifiers from form information""" + id_type_prefix = 'identifier-type-' + id_val_prefix = 'identifier-val-' + result = [] + for type_key, type_value in to_save.items(): + if not type_key.startswith(id_type_prefix): + continue + val_key = id_val_prefix + type_key[len(id_type_prefix):] + if val_key not in to_save.keys(): + continue + result.append(db.Identifiers(to_save[val_key], type_value, book.id)) + return result + + +def prepare_authors(authr): + # handle authors + input_authors = authr.split('&') + # handle_authors(input_authors) + input_authors = list(map(lambda it: it.strip().replace(',', '|'), input_authors)) + # Remove duplicates in authors list + input_authors = helper.uniq(input_authors) + + # we have all author names now + if input_authors == ['']: + input_authors = [_(u'Unknown')] # prevent empty Author + + renamed = list() + for in_aut in input_authors: + renamed_author = calibre_db.session.query(db.Authors).filter(db.Authors.name == in_aut).first() + if renamed_author and in_aut != renamed_author.name: + renamed.append(renamed_author.name) + all_books = calibre_db.session.query(db.Books) \ + .filter(db.Books.authors.any(db.Authors.name == renamed_author.name)).all() + sorted_renamed_author = helper.get_sorted_author(renamed_author.name) + sorted_old_author = helper.get_sorted_author(in_aut) + for one_book in all_books: + one_book.author_sort = one_book.author_sort.replace(sorted_renamed_author, sorted_old_author) + return input_authors, renamed + + +def prepare_authors_on_upload(title, authr): + if title != _(u'Unknown') and authr != _(u'Unknown'): + entry = calibre_db.check_exists_book(authr, title) + if entry: + log.info("Uploaded book probably exists in library") + flash(_(u"Uploaded book probably exists in the library, consider to change before upload new: ") + + Markup(render_title_template('book_exists_flash.html', entry=entry)), category="warning") + + input_authors, renamed = prepare_authors(authr) + + sort_authors_list = list() + db_author = None + for inp in input_authors: + stored_author = calibre_db.session.query(db.Authors).filter(db.Authors.name == inp).first() + if not stored_author: + if not db_author: + db_author = db.Authors(inp, helper.get_sorted_author(inp), "") + calibre_db.session.add(db_author) + calibre_db.session.commit() + sort_author = helper.get_sorted_author(inp) + else: + if not db_author: + db_author = stored_author + sort_author = stored_author.sort + sort_authors_list.append(sort_author) + sort_authors = ' & '.join(sort_authors_list) + return sort_authors, input_authors, db_author, renamed + + +def create_book_on_upload(modify_date, meta): + title = meta.title + authr = meta.author + sort_authors, input_authors, db_author, renamed_authors = prepare_authors_on_upload(title, authr) + + title_dir = helper.get_valid_filename(title, chars=96) + author_dir = helper.get_valid_filename(db_author.name, chars=96) + + # combine path and normalize path from Windows systems + path = os.path.join(author_dir, title_dir).replace('\\', '/') + + try: + pubdate = datetime.strptime(meta.pubdate[:10], "%Y-%m-%d") + except ValueError: + pubdate = datetime(101, 1, 1) + + # Calibre adds books with utc as timezone + db_book = db.Books(title, "", sort_authors, datetime.utcnow(), pubdate, + '1', datetime.utcnow(), path, meta.cover, db_author, [], "") + + modify_date |= modify_database_object(input_authors, db_book.authors, db.Authors, calibre_db.session, + 'author') + + # Add series_index to book + modify_date |= edit_book_series_index(meta.series_id, db_book) + + # add languages + invalid = [] + modify_date |= edit_book_languages(meta.languages, db_book, upload_mode=True, invalid=invalid) + if invalid: + for lang in invalid: + flash(_(u"'%(langname)s' is not a valid language", langname=lang), category="warning") + + # handle tags + modify_date |= edit_book_tags(meta.tags, db_book) + + # handle publisher + modify_date |= edit_book_publisher(meta.publisher, db_book) + + # handle series + modify_date |= edit_book_series(meta.series, db_book) + + # Add file to book + file_size = os.path.getsize(meta.file_path) + db_data = db.Data(db_book, meta.extension.upper()[1:], file_size, title_dir) + db_book.data.append(db_data) + calibre_db.session.add(db_book) + + # flush content, get db_book.id available + calibre_db.session.flush() + + # Handle identifiers now that db_book.id is available + identifier_list = [] + for type_key, type_value in meta.identifiers: + identifier_list.append(db.Identifiers(type_value, type_key, db_book.id)) + modification, warning = modify_identifiers(identifier_list, db_book.identifiers, calibre_db.session) + if warning: + flash(_("Identifiers are not Case Sensitive, Overwriting Old Identifier"), category="warning") + modify_date |= modification + + return db_book, input_authors, title_dir, renamed_authors + + +def file_handling_on_upload(requested_file): + # check if file extension is correct + if '.' in requested_file.filename: + file_ext = requested_file.filename.rsplit('.', 1)[-1].lower() + if file_ext not in constants.EXTENSIONS_UPLOAD and '' not in constants.EXTENSIONS_UPLOAD: + flash( + _("File extension '%(ext)s' is not allowed to be uploaded to this server", + ext=file_ext), category="error") + return None, Response(json.dumps({"location": url_for("web.index")}), mimetype='application/json') + else: + flash(_('File to be uploaded must have an extension'), category="error") + return None, Response(json.dumps({"location": url_for("web.index")}), mimetype='application/json') + + # extract metadata from file + try: + meta = uploader.upload(requested_file, config.config_rarfile_location) + except (IOError, OSError): + log.error("File %s could not saved to temp dir", requested_file.filename) + flash(_(u"File %(filename)s could not saved to temp dir", + filename=requested_file.filename), category="error") + return None, Response(json.dumps({"location": url_for("web.index")}), mimetype='application/json') + return meta, None + + +def move_coverfile(meta, db_book): + # move cover to final directory, including book id + if meta.cover: + cover_file = meta.cover + else: + cover_file = os.path.join(constants.STATIC_DIR, 'generic_cover.jpg') + new_cover_path = os.path.join(config.config_calibre_dir, db_book.path) + try: + os.makedirs(new_cover_path, exist_ok=True) + copyfile(cover_file, os.path.join(new_cover_path, "cover.jpg")) + if meta.cover: + os.unlink(meta.cover) + except OSError as e: + log.error("Failed to move cover file %s: %s", new_cover_path, e) + flash(_(u"Failed to Move Cover File %(file)s: %(error)s", file=new_cover_path, + error=e), + category="error") + + def delete_whole_book(book_id, book): - # delete book from Shelfs, Downloads, Read list + # delete book from shelves, Downloads, Read list ub.session.query(ub.BookShelf).filter(ub.BookShelf.book_id == book_id).delete() ub.session.query(ub.ReadBook).filter(ub.ReadBook.book_id == book_id).delete() ub.delete_download(book_id) @@ -266,8 +772,8 @@ def delete_whole_book(book_id, book): modify_database_object([u''], book.languages, db.Languages, calibre_db.session, 'languages') modify_database_object([u''], book.publishers, db.Publishers, calibre_db.session, 'publishers') - cc = calibre_db.session.query(db.Custom_Columns). \ - filter(db.Custom_Columns.datatype.notin_(db.cc_exceptions)).all() + cc = calibre_db.session.query(db.CustomColumns). \ + filter(db.CustomColumns.datatype.notin_(db.cc_exceptions)).all() for c in cc: cc_string = "custom_column_" + str(c.id) if not c.is_multiple: @@ -297,18 +803,18 @@ def delete_whole_book(book_id, book): calibre_db.session.query(db.Books).filter(db.Books.id == book_id).delete() -def render_delete_book_result(book_format, jsonResponse, warning, book_id): +def render_delete_book_result(book_format, json_response, warning, book_id): if book_format: - if jsonResponse: - return json.dumps([warning, {"location": url_for("editbook.edit_book", book_id=book_id), + if json_response: + return json.dumps([warning, {"location": url_for("edit-book.show_edit_book", book_id=book_id), "type": "success", "format": book_format, "message": _('Book Format Successfully Deleted')}]) else: flash(_('Book Format Successfully Deleted'), category="success") - return redirect(url_for('editbook.edit_book', book_id=book_id)) + return redirect(url_for('edit-book.show_edit_book', book_id=book_id)) else: - if jsonResponse: + if json_response: return json.dumps([warning, {"location": url_for('web.index'), "type": "success", "format": book_format, @@ -318,7 +824,7 @@ def render_delete_book_result(book_format, jsonResponse, warning, book_id): return redirect(url_for('web.index')) -def delete_book_from_table(book_id, book_format, jsonResponse): +def delete_book_from_table(book_id, book_format, json_response): warning = {} if current_user.role_delete_books(): book = calibre_db.get_book(book_id) @@ -326,20 +832,20 @@ def delete_book_from_table(book_id, book_format, jsonResponse): try: result, error = helper.delete_book(book, config.config_calibre_dir, book_format=book_format.upper()) if not result: - if jsonResponse: - return json.dumps([{"location": url_for("editbook.edit_book", book_id=book_id), - "type": "danger", - "format": "", - "message": error}]) + if json_response: + return json.dumps([{"location": url_for("edit-book.show_edit_book", book_id=book_id), + "type": "danger", + "format": "", + "message": error}]) else: flash(error, category="error") - return redirect(url_for('editbook.edit_book', book_id=book_id)) + return redirect(url_for('edit-book.show_edit_book', book_id=book_id)) if error: - if jsonResponse: - warning = {"location": url_for("editbook.edit_book", book_id=book_id), - "type": "warning", - "format": "", - "message": error} + if json_response: + warning = {"location": url_for("edit-book.show_edit_book", book_id=book_id), + "type": "warning", + "format": "", + "message": error} else: flash(error, category="warning") if not book_format: @@ -347,30 +853,42 @@ def delete_book_from_table(book_id, book_format, jsonResponse): else: calibre_db.session.query(db.Data).filter(db.Data.book == book.id).\ filter(db.Data.format == book_format).delete() + if book_format.upper() in ['KEPUB', 'EPUB', 'EPUB3']: + kobo_sync_status.remove_synced_book(book.id, True) calibre_db.session.commit() except Exception as ex: - log.debug_or_exception(ex) + log.error_or_exception(ex) calibre_db.session.rollback() - if jsonResponse: - return json.dumps([{"location": url_for("editbook.edit_book", book_id=book_id), + if json_response: + return json.dumps([{"location": url_for("edit-book.show_edit_book", book_id=book_id), "type": "danger", "format": "", "message": ex}]) else: flash(str(ex), category="error") - return redirect(url_for('editbook.edit_book', book_id=book_id)) + return redirect(url_for('edit-book.show_edit_book', book_id=book_id)) else: # book not found log.error('Book with id "%s" could not be deleted: not found', book_id) - return render_delete_book_result(book_format, jsonResponse, warning, book_id) + return render_delete_book_result(book_format, json_response, warning, book_id) + message = _("You are missing permissions to delete books") + if json_response: + return json.dumps({"location": url_for("edit-book.show_edit_book", book_id=book_id), + "type": "danger", + "format": "", + "message": message}) + else: + flash(message, category="error") + return redirect(url_for('edit-book.show_edit_book', book_id=book_id)) def render_edit_book(book_id): - cc = calibre_db.session.query(db.Custom_Columns).filter(db.Custom_Columns.datatype.notin_(db.cc_exceptions)).all() + cc = calibre_db.session.query(db.CustomColumns).filter(db.CustomColumns.datatype.notin_(db.cc_exceptions)).all() book = calibre_db.get_filtered_book(book_id, allow_show_archived=True) if not book: - flash(_(u"Oops! Selected book title is unavailable. File does not exist or is not accessible"), category="error") + flash(_(u"Oops! Selected book title is unavailable. File does not exist or is not accessible"), + category="error") return redirect(url_for("web.index")) for lang in book.languages: @@ -382,10 +900,10 @@ def render_edit_book(book_id): for authr in book.authors: author_names.append(authr.name.replace('|', ',')) - # Option for showing convertbook button - valid_source_formats=list() + # Option for showing convert_book button + valid_source_formats = list() allowed_conversion_formats = list() - kepub_possible=None + kepub_possible = None if config.config_converterpath: for file in book.data: if file.format.lower() in constants.EXTENSIONS_CONVERT_FROM: @@ -412,18 +930,18 @@ def render_edit_book(book_id): def edit_book_ratings(to_save, book): changed = False - if to_save["rating"].strip(): + if to_save.get("rating", "").strip(): old_rating = False if len(book.ratings) > 0: old_rating = book.ratings[0].rating - ratingx2 = int(float(to_save["rating"]) * 2) - if ratingx2 != old_rating: + rating_x2 = int(float(to_save.get("rating", "")) * 2) + if rating_x2 != old_rating: changed = True - is_rating = calibre_db.session.query(db.Ratings).filter(db.Ratings.rating == ratingx2).first() + is_rating = calibre_db.session.query(db.Ratings).filter(db.Ratings.rating == rating_x2).first() if is_rating: book.ratings.append(is_rating) else: - new_rating = db.Ratings(rating=ratingx2) + new_rating = db.Ratings(rating=rating_x2) book.ratings.append(new_rating) if old_rating: book.ratings.remove(book.ratings[0]) @@ -433,6 +951,7 @@ def edit_book_ratings(to_save, book): changed = True return changed + def edit_book_tags(tags, book): input_tags = tags.split(',') input_tags = list(map(lambda it: it.strip(), input_tags)) @@ -449,48 +968,48 @@ def edit_book_series(series, book): def edit_book_series_index(series_index, book): # Add default series_index to book - modif_date = False + modify_date = False series_index = series_index or '1' if not series_index.replace('.', '', 1).isdigit(): flash(_("%(seriesindex)s is not a valid number, skipping", seriesindex=series_index), category="warning") return False if str(book.series_index) != series_index: book.series_index = series_index - modif_date = True - return modif_date + modify_date = True + return modify_date # Handle book comments/description def edit_book_comments(comments, book): - modif_date = False + modify_date = False if comments: comments = clean_html(comments) if len(book.comments): if book.comments[0].text != comments: book.comments[0].text = comments - modif_date = True + modify_date = True else: if comments: - book.comments.append(db.Comments(text=comments, book=book.id)) - modif_date = True - return modif_date + book.comments.append(db.Comments(comment=comments, book=book.id)) + modify_date = True + return modify_date -def edit_book_languages(languages, book, upload=False, invalid=None): +def edit_book_languages(languages, book, upload_mode=False, invalid=None): input_languages = languages.split(',') unknown_languages = [] - if not upload: + if not upload_mode: input_l = isoLanguages.get_language_codes(get_locale(), input_languages, unknown_languages) else: input_l = isoLanguages.get_valid_language_codes(get_locale(), input_languages, unknown_languages) - for l in unknown_languages: - log.error("'%s' is not a valid language", l) + for lang in unknown_languages: + log.error("'%s' is not a valid language", lang) if isinstance(invalid, list): - invalid.append(l) + invalid.append(lang) else: - raise ValueError(_(u"'%(langname)s' is not a valid language", langname=l)) + raise ValueError(_(u"'%(langname)s' is not a valid language", langname=lang)) # ToDo: Not working correct - if upload and len(input_l) == 1: + if upload_mode and len(input_l) == 1: # If the language of the file is excluded from the users view, it's not imported, to allow the user to view # the book it's language is set to the filter language if input_l[0] != current_user.filter_language() and current_user.filter_language() != "all": @@ -574,17 +1093,20 @@ def edit_cc_data_string(book, c, to_save, cc_db_value, cc_string): getattr(book, cc_string).append(new_cc) return changed, to_save + def edit_single_cc_data(book_id, book, column_id, to_save): - cc = (calibre_db.session.query(db.Custom_Columns) - .filter(db.Custom_Columns.datatype.notin_(db.cc_exceptions)) - .filter(db.Custom_Columns.id == column_id) + cc = (calibre_db.session.query(db.CustomColumns) + .filter(db.CustomColumns.datatype.notin_(db.cc_exceptions)) + .filter(db.CustomColumns.id == column_id) .all()) return edit_cc_data(book_id, book, to_save, cc) + def edit_all_cc_data(book_id, book, to_save): - cc = calibre_db.session.query(db.Custom_Columns).filter(db.Custom_Columns.datatype.notin_(db.cc_exceptions)).all() + cc = calibre_db.session.query(db.CustomColumns).filter(db.CustomColumns.datatype.notin_(db.cc_exceptions)).all() return edit_cc_data(book_id, book, to_save, cc) + def edit_cc_data(book_id, book, to_save, cc): changed = False for c in cc: @@ -617,23 +1139,27 @@ def edit_cc_data(book_id, book, to_save, cc): 'custom') return changed -def upload_single_file(request, book, book_id): + +# returns None if no file is uploaded +# returns False if an error occurs, in all other cases the ebook metadata is returned +def upload_single_file(file_request, book, book_id): # Check and handle Uploaded file - if 'btn-upload-format' in request.files: - requested_file = request.files['btn-upload-format'] + requested_file = file_request.files.get('btn-upload-format', None) + if requested_file: # check for empty request if requested_file.filename != '': if not current_user.role_upload(): - abort(403) + flash(_(u"User has no rights to upload additional file formats"), category="error") + return False if '.' in requested_file.filename: file_ext = requested_file.filename.rsplit('.', 1)[-1].lower() if file_ext not in constants.EXTENSIONS_UPLOAD and '' not in constants.EXTENSIONS_UPLOAD: flash(_("File extension '%(ext)s' is not allowed to be uploaded to this server", ext=file_ext), category="error") - return redirect(url_for('web.show_book', book_id=book.id)) + return False else: flash(_('File to be uploaded must have an extension'), category="error") - return redirect(url_for('web.show_book', book_id=book.id)) + return False file_name = book.path.rsplit('/', 1)[-1] filepath = os.path.normpath(os.path.join(config.config_calibre_dir, book.path)) @@ -645,12 +1171,12 @@ def upload_single_file(request, book, book_id): os.makedirs(filepath) except OSError: flash(_(u"Failed to create path %(path)s (Permission denied).", path=filepath), category="error") - return redirect(url_for('web.show_book', book_id=book.id)) + return False try: requested_file.save(saved_filename) except OSError: flash(_(u"Failed to store file %(file)s.", file=saved_filename), category="error") - return redirect(url_for('web.show_book', book_id=book.id)) + return False file_size = os.path.getsize(saved_filename) is_format = calibre_db.get_book_format(book_id, file_ext.upper()) @@ -666,29 +1192,32 @@ def upload_single_file(request, book, book_id): calibre_db.update_title_sort(config) except (OperationalError, IntegrityError) as e: calibre_db.session.rollback() - log.error('Database error: %s', e) - flash(_(u"Database error: %(error)s.", error=e), category="error") - return redirect(url_for('web.show_book', book_id=book.id)) + log.error_or_exception("Database error: {}".format(e)) + flash(_(u"Database error: %(error)s.", error=e.orig), category="error") + return False # return redirect(url_for('web.show_book', book_id=book.id)) # Queue uploader info link = '{}'.format(url_for('web.show_book', book_id=book.id), escape(book.title)) - uploadText=_(u"File format %(ext)s added to %(book)s", ext=file_ext.upper(), book=link) - WorkerThread.add(current_user.name, TaskUpload(uploadText)) + upload_text = N_(u"File format %(ext)s added to %(book)s", ext=file_ext.upper(), book=link) + WorkerThread.add(current_user.name, TaskUpload(upload_text, escape(book.title))) return uploader.process( saved_filename, *os.path.splitext(requested_file.filename), rarExecutable=config.config_rarfile_location) + return None -def upload_cover(request, book): - if 'btn-upload-cover' in request.files: - requested_file = request.files['btn-upload-cover'] +def upload_cover(cover_request, book): + requested_file = cover_request.files.get('btn-upload-cover', None) + if requested_file: # check for empty request if requested_file.filename != '': if not current_user.role_upload(): - abort(403) + flash(_(u"User has no rights to upload cover"), category="error") + return False ret, message = helper.save_cover(requested_file, book.path) if ret is True: + helper.replace_cover_thumbnail_cache(book.id) return True else: flash(message, category="error") @@ -709,26 +1238,7 @@ def handle_title_on_edit(book, book_title): def handle_author_on_edit(book, author_name, update_stored=True): # handle author(s) - # renamed = False - input_authors = author_name.split('&') - input_authors = list(map(lambda it: it.strip().replace(',', '|'), input_authors)) - # Remove duplicates in authors list - input_authors = helper.uniq(input_authors) - # we have all author names now - if input_authors == ['']: - input_authors = [_(u'Unknown')] # prevent empty Author - - renamed = list() - for in_aut in input_authors: - renamed_author = calibre_db.session.query(db.Authors).filter(db.Authors.name == in_aut).first() - if renamed_author and in_aut != renamed_author.name: - renamed.append(renamed_author.name) - all_books = calibre_db.session.query(db.Books) \ - .filter(db.Books.authors.any(db.Authors.name == renamed_author.name)).all() - sorted_renamed_author = helper.get_sorted_author(renamed_author.name) - sorted_old_author = helper.get_sorted_author(in_aut) - for one_book in all_books: - one_book.author_sort = one_book.author_sort.replace(sorted_renamed_author, sorted_old_author) + input_authors, renamed = prepare_authors(author_name) change = modify_database_object(input_authors, book.authors, db.Authors, calibre_db.session, 'author') @@ -749,594 +1259,156 @@ def handle_author_on_edit(book, author_name, update_stored=True): return input_authors, change, renamed -@editbook.route("/admin/book/", methods=['GET', 'POST']) -@login_required_if_no_ano -@edit_required -def edit_book(book_id): - modif_date = False +def search_objects_remove(db_book_object, db_type, input_elements): + del_elements = [] + for c_elements in db_book_object: + found = False + if db_type == 'languages': + type_elements = c_elements.lang_code + elif db_type == 'custom': + type_elements = c_elements.value + else: + type_elements = c_elements.name + for inp_element in input_elements: + if inp_element.lower() == type_elements.lower(): + found = True + break + # if the element was not found in the new list, add it to remove list + if not found: + del_elements.append(c_elements) + return del_elements - # create the function for sorting... - try: - calibre_db.update_title_sort(config) - except sqliteOperationalError as e: - log.debug_or_exception(e) - calibre_db.session.rollback() - # Show form - if request.method != 'POST': - return render_edit_book(book_id) - - book = calibre_db.get_filtered_book(book_id, allow_show_archived=True) - - # Book not found - if not book: - flash(_(u"Oops! Selected book title is unavailable. File does not exist or is not accessible"), category="error") - return redirect(url_for("web.index")) - - meta = upload_single_file(request, book, book_id) - if upload_cover(request, book) is True: - book.has_cover = 1 - modif_date = True - try: - to_save = request.form.to_dict() - merge_metadata(to_save, meta) - # Update book - edited_books_id = None - - # handle book title - title_change = handle_title_on_edit(book, to_save["book_title"]) - - input_authors, authorchange, renamed = handle_author_on_edit(book, to_save["author_name"]) - if authorchange or title_change: - edited_books_id = book.id - modif_date = True - - if config.config_use_google_drive: - gdriveutils.updateGdriveCalibreFromLocal() - - error = False - if edited_books_id: - error = helper.update_dir_structure(edited_books_id, config.config_calibre_dir, input_authors[0], - renamed_author=renamed) - - if not error: - if "cover_url" in to_save: - if to_save["cover_url"]: - if not current_user.role_upload(): - return "", (403) - if to_save["cover_url"].endswith('/static/generic_cover.jpg'): - book.has_cover = 0 - else: - result, error = helper.save_cover_from_url(to_save["cover_url"], book.path) - if result is True: - book.has_cover = 1 - modif_date = True - else: - flash(error, category="error") - - # Add default series_index to book - modif_date |= edit_book_series_index(to_save["series_index"], book) - # Handle book comments/description - modif_date |= edit_book_comments(Markup(to_save['description']).unescape(), book) - # Handle identifiers - input_identifiers = identifier_list(to_save, book) - modification, warning = modify_identifiers(input_identifiers, book.identifiers, calibre_db.session) - if warning: - flash(_("Identifiers are not Case Sensitive, Overwriting Old Identifier"), category="warning") - modif_date |= modification - # Handle book tags - modif_date |= edit_book_tags(to_save['tags'], book) - # Handle book series - modif_date |= edit_book_series(to_save["series"], book) - # handle book publisher - modif_date |= edit_book_publisher(to_save['publisher'], book) - # handle book languages - modif_date |= edit_book_languages(to_save['languages'], book) - # handle book ratings - modif_date |= edit_book_ratings(to_save, book) - # handle cc data - modif_date |= edit_all_cc_data(book_id, book, to_save) - - if to_save["pubdate"]: - try: - book.pubdate = datetime.strptime(to_save["pubdate"], "%Y-%m-%d") - except ValueError: - book.pubdate = db.Books.DEFAULT_PUBDATE +def search_objects_add(db_book_object, db_type, input_elements): + add_elements = [] + for inp_element in input_elements: + found = False + for c_elements in db_book_object: + if db_type == 'languages': + type_elements = c_elements.lang_code + elif db_type == 'custom': + type_elements = c_elements.value else: - book.pubdate = db.Books.DEFAULT_PUBDATE - - if modif_date: - book.last_modified = datetime.utcnow() - kobo_sync_status.remove_synced_book(edited_books_id) - - calibre_db.session.merge(book) - calibre_db.session.commit() - if config.config_use_google_drive: - gdriveutils.updateGdriveCalibreFromLocal() - if "detail_view" in to_save: - return redirect(url_for('web.show_book', book_id=book.id)) - else: - flash(_("Metadata successfully updated"), category="success") - return render_edit_book(book_id) - else: - calibre_db.session.rollback() - flash(error, category="error") - return render_edit_book(book_id) - except ValueError as e: - calibre_db.session.rollback() - flash(str(e), category="error") - return redirect(url_for('web.show_book', book_id=book.id)) - except Exception as ex: - log.debug_or_exception(ex) - calibre_db.session.rollback() - flash(_("Error editing book, please check logfile for details"), category="error") - return redirect(url_for('web.show_book', book_id=book.id)) + type_elements = c_elements.name + if inp_element == type_elements: + found = True + break + if not found: + add_elements.append(inp_element) + return add_elements -def merge_metadata(to_save, meta): - if to_save['author_name'] == _(u'Unknown'): - to_save['author_name'] = '' - if to_save['book_title'] == _(u'Unknown'): - to_save['book_title'] = '' - for s_field, m_field in [ - ('tags', 'tags'), ('author_name', 'author'), ('series', 'series'), - ('series_index', 'series_id'), ('languages', 'languages'), - ('book_title', 'title')]: - to_save[s_field] = to_save[s_field] or getattr(meta, m_field, '') - to_save["description"] = to_save["description"] or Markup( - getattr(meta, 'description', '')).unescape() +def remove_objects(db_book_object, db_session, del_elements): + changed = False + if len(del_elements) > 0: + for del_element in del_elements: + db_book_object.remove(del_element) + changed = True + if len(del_element.books) == 0: + db_session.delete(del_element) + return changed -def identifier_list(to_save, book): - """Generate a list of Identifiers from form information""" - id_type_prefix = 'identifier-type-' - id_val_prefix = 'identifier-val-' - result = [] - for type_key, type_value in to_save.items(): - if not type_key.startswith(id_type_prefix): - continue - val_key = id_val_prefix + type_key[len(id_type_prefix):] - if val_key not in to_save.keys(): - continue - result.append(db.Identifiers(to_save[val_key], type_value, book.id)) - return result - - -def prepare_authors_on_upload(title, authr): - if title != _(u'Unknown') and authr != _(u'Unknown'): - entry = calibre_db.check_exists_book(authr, title) - if entry: - log.info("Uploaded book probably exists in library") - flash(_(u"Uploaded book probably exists in the library, consider to change before upload new: ") - + Markup(render_title_template('book_exists_flash.html', entry=entry)), category="warning") - - # handle authors - input_authors = authr.split('&') - # handle_authors(input_authors) - input_authors = list(map(lambda it: it.strip().replace(',', '|'), input_authors)) - # Remove duplicates in authors list - input_authors = helper.uniq(input_authors) - - # we have all author names now - if input_authors == ['']: - input_authors = [_(u'Unknown')] # prevent empty Author - - renamed = list() - for in_aut in input_authors: - renamed_author = calibre_db.session.query(db.Authors).filter(db.Authors.name == in_aut).first() - if renamed_author and in_aut != renamed_author.name: - renamed.append(renamed_author.name) - all_books = calibre_db.session.query(db.Books) \ - .filter(db.Books.authors.any(db.Authors.name == renamed_author.name)).all() - sorted_renamed_author = helper.get_sorted_author(renamed_author.name) - sorted_old_author = helper.get_sorted_author(in_aut) - for one_book in all_books: - one_book.author_sort = one_book.author_sort.replace(sorted_renamed_author, sorted_old_author) - - sort_authors_list = list() - db_author = None - for inp in input_authors: - stored_author = calibre_db.session.query(db.Authors).filter(db.Authors.name == inp).first() - if not stored_author: - if not db_author: - db_author = db.Authors(inp, helper.get_sorted_author(inp), "") - calibre_db.session.add(db_author) - calibre_db.session.commit() - sort_author = helper.get_sorted_author(inp) - else: - if not db_author: - db_author = stored_author - sort_author = stored_author.sort - sort_authors_list.append(sort_author) - sort_authors = ' & '.join(sort_authors_list) - return sort_authors, input_authors, db_author, renamed - - -def create_book_on_upload(modif_date, meta): - title = meta.title - authr = meta.author - sort_authors, input_authors, db_author, renamed_authors = prepare_authors_on_upload(title, authr) - - title_dir = helper.get_valid_filename(title) - author_dir = helper.get_valid_filename(db_author.name) - - # combine path and normalize path from windows systems - path = os.path.join(author_dir, title_dir).replace('\\', '/') - - # Calibre adds books with utc as timezone - db_book = db.Books(title, "", sort_authors, datetime.utcnow(), datetime(101, 1, 1), - '1', datetime.utcnow(), path, meta.cover, db_author, [], "") - - modif_date |= modify_database_object(input_authors, db_book.authors, db.Authors, calibre_db.session, - 'author') - - # Add series_index to book - modif_date |= edit_book_series_index(meta.series_id, db_book) - - # add languages - invalid=[] - modif_date |= edit_book_languages(meta.languages, db_book, upload=True, invalid=invalid) - if invalid: - for l in invalid: - flash(_(u"'%(langname)s' is not a valid language", langname=l), category="warning") - - # handle tags - modif_date |= edit_book_tags(meta.tags, db_book) - - # handle publisher - modif_date |= edit_book_publisher(meta.publisher, db_book) - - # handle series - modif_date |= edit_book_series(meta.series, db_book) - - # Add file to book - file_size = os.path.getsize(meta.file_path) - db_data = db.Data(db_book, meta.extension.upper()[1:], file_size, title_dir) - db_book.data.append(db_data) - calibre_db.session.add(db_book) - - # flush content, get db_book.id available - calibre_db.session.flush() - return db_book, input_authors, title_dir, renamed_authors - -def file_handling_on_upload(requested_file): - # check if file extension is correct - if '.' in requested_file.filename: - file_ext = requested_file.filename.rsplit('.', 1)[-1].lower() - if file_ext not in constants.EXTENSIONS_UPLOAD and '' not in constants.EXTENSIONS_UPLOAD: - flash( - _("File extension '%(ext)s' is not allowed to be uploaded to this server", - ext=file_ext), category="error") - return None, Response(json.dumps({"location": url_for("web.index")}), mimetype='application/json') +def add_objects(db_book_object, db_object, db_session, db_type, add_elements): + changed = False + if db_type == 'languages': + db_filter = db_object.lang_code + elif db_type == 'custom': + db_filter = db_object.value else: - flash(_('File to be uploaded must have an extension'), category="error") - return None, Response(json.dumps({"location": url_for("web.index")}), mimetype='application/json') - - # extract metadata from file - try: - meta = uploader.upload(requested_file, config.config_rarfile_location) - except (IOError, OSError): - log.error("File %s could not saved to temp dir", requested_file.filename) - flash(_(u"File %(filename)s could not saved to temp dir", - filename=requested_file.filename), category="error") - return None, Response(json.dumps({"location": url_for("web.index")}), mimetype='application/json') - return meta, None - - -def move_coverfile(meta, db_book): - # move cover to final directory, including book id - if meta.cover: - coverfile = meta.cover - else: - coverfile = os.path.join(constants.STATIC_DIR, 'generic_cover.jpg') - new_coverpath = os.path.join(config.config_calibre_dir, db_book.path, "cover.jpg") - try: - copyfile(coverfile, new_coverpath) - if meta.cover: - os.unlink(meta.cover) - except OSError as e: - log.error("Failed to move cover file %s: %s", new_coverpath, e) - flash(_(u"Failed to Move Cover File %(file)s: %(error)s", file=new_coverpath, - error=e), - category="error") - - -@editbook.route("/upload", methods=["GET", "POST"]) -@login_required_if_no_ano -@upload_required -def upload(): - if not config.config_uploading: - abort(404) - if request.method == 'POST' and 'btn-upload' in request.files: - for requested_file in request.files.getlist("btn-upload"): - try: - modif_date = False - # create the function for sorting... - calibre_db.update_title_sort(config) - calibre_db.session.connection().connection.connection.create_function('uuid4', 0, lambda: str(uuid4())) - - meta, error = file_handling_on_upload(requested_file) - if error: - return error - - db_book, input_authors, title_dir, renamed_authors = create_book_on_upload(modif_date, meta) - - # Comments needs book id therefore only possible after flush - modif_date |= edit_book_comments(Markup(meta.description).unescape(), db_book) - - book_id = db_book.id - title = db_book.title - - error = helper.update_dir_structure_file(book_id, - config.config_calibre_dir, - input_authors[0], - meta.file_path, - title_dir + meta.extension.lower(), - renamed_author=renamed_authors) - - move_coverfile(meta, db_book) - - # save data to database, reread data - calibre_db.session.commit() - - if config.config_use_google_drive: - gdriveutils.updateGdriveCalibreFromLocal() - if error: - flash(error, category="error") - link = '{}'.format(url_for('web.show_book', book_id=book_id), escape(title)) - uploadText = _(u"File %(file)s uploaded", file=link) - WorkerThread.add(current_user.name, TaskUpload(uploadText)) - - if len(request.files.getlist("btn-upload")) < 2: - if current_user.role_edit() or current_user.role_admin(): - resp = {"location": url_for('editbook.edit_book', book_id=book_id)} - return Response(json.dumps(resp), mimetype='application/json') - else: - resp = {"location": url_for('web.show_book', book_id=book_id)} - return Response(json.dumps(resp), mimetype='application/json') - except (OperationalError, IntegrityError) as e: - calibre_db.session.rollback() - log.error("Database error: %s", e) - flash(_(u"Database error: %(error)s.", error=e), category="error") - return Response(json.dumps({"location": url_for("web.index")}), mimetype='application/json') - - -@editbook.route("/admin/book/convert/", methods=['POST']) -@login_required_if_no_ano -@edit_required -def convert_bookformat(book_id): - # check to see if we have form fields to work with - if not send user back - book_format_from = request.form.get('book_format_from', None) - book_format_to = request.form.get('book_format_to', None) - - if (book_format_from is None) or (book_format_to is None): - flash(_(u"Source or destination format for conversion missing"), category="error") - return redirect(url_for('editbook.edit_book', book_id=book_id)) - - log.info('converting: book id: %s from: %s to: %s', book_id, book_format_from, book_format_to) - rtn = helper.convert_book_format(book_id, config.config_calibre_dir, book_format_from.upper(), - book_format_to.upper(), current_user.name) - - if rtn is None: - flash(_(u"Book successfully queued for converting to %(book_format)s", - book_format=book_format_to), - category="success") - else: - flash(_(u"There was an error converting this book: %(res)s", res=rtn), category="error") - return redirect(url_for('editbook.edit_book', book_id=book_id)) - -@editbook.route("/ajax/getcustomenum/") -@login_required -def table_get_custom_enum(c_id): - ret = list() - cc = (calibre_db.session.query(db.Custom_Columns) - .filter(db.Custom_Columns.id == c_id) - .filter(db.Custom_Columns.datatype.notin_(db.cc_exceptions)).one_or_none()) - ret.append({'value': "", 'text': ""}) - for idx, en in enumerate(cc.get_display_dict()['enum_values']): - ret.append({'value': en, 'text': en}) - return json.dumps(ret) - - -@editbook.route("/ajax/editbooks/", methods=['POST']) -@login_required_if_no_ano -@edit_required -def edit_list_book(param): - vals = request.form.to_dict() - book = calibre_db.get_book(vals['pk']) - ret = "" - if param =='series_index': - edit_book_series_index(vals['value'], book) - ret = Response(json.dumps({'success': True, 'newValue': book.series_index}), mimetype='application/json') - elif param =='tags': - edit_book_tags(vals['value'], book) - ret = Response(json.dumps({'success': True, 'newValue': ', '.join([tag.name for tag in book.tags])}), - mimetype='application/json') - elif param =='series': - edit_book_series(vals['value'], book) - ret = Response(json.dumps({'success': True, 'newValue': ', '.join([serie.name for serie in book.series])}), - mimetype='application/json') - elif param =='publishers': - edit_book_publisher(vals['value'], book) - ret = Response(json.dumps({'success': True, - 'newValue': ', '.join([publisher.name for publisher in book.publishers])}), - mimetype='application/json') - elif param =='languages': - invalid = list() - edit_book_languages(vals['value'], book, invalid=invalid) - if invalid: - ret = Response(json.dumps({'success': False, - 'msg': 'Invalid languages in request: {}'.format(','.join(invalid))}), - mimetype='application/json') + db_filter = db_object.name + for add_element in add_elements: + # check if an element with that name exists + db_element = db_session.query(db_object).filter(db_filter == add_element).first() + # if no element is found add it + if db_type == 'author': + new_element = db_object(add_element, helper.get_sorted_author(add_element.replace('|', ',')), "") + elif db_type == 'series': + new_element = db_object(add_element, add_element) + elif db_type == 'custom': + new_element = db_object(value=add_element) + elif db_type == 'publisher': + new_element = db_object(add_element, None) + else: # db_type should be tag or language + new_element = db_object(add_element) + if db_element is None: + changed = True + db_session.add(new_element) + db_book_object.append(new_element) else: - lang_names = list() - for lang in book.languages: - lang_names.append(isoLanguages.get_language_name(get_locale(), lang.lang_code)) - ret = Response(json.dumps({'success': True, 'newValue': ', '.join(lang_names)}), - mimetype='application/json') - elif param =='author_sort': - book.author_sort = vals['value'] - ret = Response(json.dumps({'success': True, 'newValue': book.author_sort}), - mimetype='application/json') - elif param == 'title': - sort = book.sort - handle_title_on_edit(book, vals.get('value', "")) - helper.update_dir_structure(book.id, config.config_calibre_dir) - ret = Response(json.dumps({'success': True, 'newValue': book.title}), - mimetype='application/json') - elif param =='sort': - book.sort = vals['value'] - ret = Response(json.dumps({'success': True, 'newValue': book.sort}), - mimetype='application/json') - elif param =='comments': - edit_book_comments(vals['value'], book) - ret = Response(json.dumps({'success': True, 'newValue': book.comments[0].text}), - mimetype='application/json') - elif param =='authors': - input_authors, __, renamed = handle_author_on_edit(book, vals['value'], vals.get('checkA', None) == "true") - helper.update_dir_structure(book.id, config.config_calibre_dir, input_authors[0], renamed_author=renamed) - ret = Response(json.dumps({'success': True, - 'newValue': ' & '.join([author.replace('|',',') for author in input_authors])}), - mimetype='application/json') - elif param =='is_archived': - change_archived_books(book.id, vals['value']=="True") - ret = "" - elif param =='read_status': - # ToDo save - ret = Response(json.dumps({'success': True, 'newValue': vals['value']}), - mimetype='application/json') - elif param.startswith("custom_column_"): - new_val = dict() - new_val[param] = vals['value'] - edit_single_cc_data(book.id, book, param[14:], new_val) - # ToDo: Very hacky find better solution - if vals['value'] in ["True", "False"]: - ret = "" + db_element = create_objects_for_addition(db_element, add_element, db_type) + # add element to book + changed = True + db_book_object.append(db_element) + return changed + + +def create_objects_for_addition(db_element, add_element, db_type): + if db_type == 'custom': + if db_element.value != add_element: + db_element.value = add_element + elif db_type == 'languages': + if db_element.lang_code != add_element: + db_element.lang_code = add_element + elif db_type == 'series': + if db_element.name != add_element: + db_element.name = add_element + db_element.sort = add_element + elif db_type == 'author': + if db_element.name != add_element: + db_element.name = add_element + db_element.sort = helper.get_sorted_author(add_element.replace('|', ',')) + elif db_type == 'publisher': + if db_element.name != add_element: + db_element.name = add_element + db_element.sort = None + elif db_element.name != add_element: + db_element.name = add_element + return db_element + + +# Modifies different Database objects, first check if elements have to be deleted, +# because they are no longer used, than check if elements have to be added to database +def modify_database_object(input_elements, db_book_object, db_object, db_session, db_type): + # passing input_elements not as a list may lead to undesired results + if not isinstance(input_elements, list): + raise TypeError(str(input_elements) + " should be passed as a list") + input_elements = [x for x in input_elements if x != ''] + # we have all input element (authors, series, tags) names now + # 1. search for elements to remove + del_elements = search_objects_remove(db_book_object, db_type, input_elements) + # 2. search for elements that need to be added + add_elements = search_objects_add(db_book_object, db_type, input_elements) + # if there are elements to remove, we remove them now + changed = remove_objects(db_book_object, db_session, del_elements) + # if there are elements to add, we add them now! + if len(add_elements) > 0: + changed |= add_objects(db_book_object, db_object, db_session, db_type, add_elements) + return changed + + +def modify_identifiers(input_identifiers, db_identifiers, db_session): + """Modify Identifiers to match input information. + input_identifiers is a list of read-to-persist Identifiers objects. + db_identifiers is a list of already persisted list of Identifiers objects.""" + changed = False + error = False + input_dict = dict([(identifier.type.lower(), identifier) for identifier in input_identifiers]) + if len(input_identifiers) != len(input_dict): + error = True + db_dict = dict([(identifier.type.lower(), identifier) for identifier in db_identifiers]) + # delete db identifiers not present in input or modify them with input val + for identifier_type, identifier in db_dict.items(): + if identifier_type not in input_dict.keys(): + db_session.delete(identifier) + changed = True else: - ret = Response(json.dumps({'success': True, 'newValue': vals['value']}), - mimetype='application/json') - else: - return _("Parameter not found"), 400 - book.last_modified = datetime.utcnow() - try: - calibre_db.session.commit() - # revert change for sort if automatic fields link is deactivated - if param == 'title' and vals.get('checkT') == "false": - book.sort = sort - calibre_db.session.commit() - except (OperationalError, IntegrityError) as e: - calibre_db.session.rollback() - log.error("Database error: %s", e) - return ret - - -@editbook.route("/ajax/sort_value//") -@login_required -def get_sorted_entry(field, bookid): - if field in ['title', 'authors', 'sort', 'author_sort']: - book = calibre_db.get_filtered_book(bookid) - if book: - if field == 'title': - return json.dumps({'sort': book.sort}) - elif field == 'authors': - return json.dumps({'author_sort': book.author_sort}) - if field == 'sort': - return json.dumps({'sort': book.title}) - if field == 'author_sort': - return json.dumps({'author_sort': book.author}) - return "" - - -@editbook.route("/ajax/simulatemerge", methods=['POST']) -@login_required -@edit_required -def simulate_merge_list_book(): - vals = request.get_json().get('Merge_books') - if vals: - to_book = calibre_db.get_book(vals[0]).title - vals.pop(0) - if to_book: - for book_id in vals: - from_book = [] - from_book.append(calibre_db.get_book(book_id).title) - return json.dumps({'to': to_book, 'from': from_book}) - return "" - - -@editbook.route("/ajax/mergebooks", methods=['POST']) -@login_required -@edit_required -def merge_list_book(): - vals = request.get_json().get('Merge_books') - to_file = list() - if vals: - # load all formats from target book - to_book = calibre_db.get_book(vals[0]) - vals.pop(0) - if to_book: - for file in to_book.data: - to_file.append(file.format) - to_name = helper.get_valid_filename(to_book.title) + ' - ' + \ - helper.get_valid_filename(to_book.authors[0].name) - for book_id in vals: - from_book = calibre_db.get_book(book_id) - if from_book: - for element in from_book.data: - if element.format not in to_file: - # create new data entry with: book_id, book_format, uncompressed_size, name - filepath_new = os.path.normpath(os.path.join(config.config_calibre_dir, - to_book.path, - to_name + "." + element.format.lower())) - filepath_old = os.path.normpath(os.path.join(config.config_calibre_dir, - from_book.path, - element.name + "." + element.format.lower())) - copyfile(filepath_old, filepath_new) - to_book.data.append(db.Data(to_book.id, - element.format, - element.uncompressed_size, - to_name)) - delete_book_from_table(from_book.id,"", True) - return json.dumps({'success': True}) - return "" - - -@editbook.route("/ajax/xchange", methods=['POST']) -@login_required -@edit_required -def table_xchange_author_title(): - vals = request.get_json().get('xchange') - if vals: - for val in vals: - modif_date = False - book = calibre_db.get_book(val) - authors = book.title - book.authors = calibre_db.order_authors([book]) - author_names = [] - for authr in book.authors: - author_names.append(authr.name.replace('|', ',')) - - title_change = handle_title_on_edit(book, " ".join(author_names)) - input_authors, authorchange, renamed = handle_author_on_edit(book, authors) - if authorchange or title_change: - edited_books_id = book.id - modif_date = True - - if config.config_use_google_drive: - gdriveutils.updateGdriveCalibreFromLocal() - - if edited_books_id: - helper.update_dir_structure(edited_books_id, config.config_calibre_dir, input_authors[0], - renamed_author=renamed) - if modif_date: - book.last_modified = datetime.utcnow() - try: - calibre_db.session.commit() - except (OperationalError, IntegrityError) as e: - calibre_db.session.rollback() - log.error("Database error: %s", e) - return json.dumps({'success': False}) - - if config.config_use_google_drive: - gdriveutils.updateGdriveCalibreFromLocal() - return json.dumps({'success': True}) - return "" + input_identifier = input_dict[identifier_type] + identifier.type = input_identifier.type + identifier.val = input_identifier.val + # add input identifiers not present in db + for identifier_type, identifier in input_dict.items(): + if identifier_type not in db_dict.keys(): + db_session.add(identifier) + changed = True + return changed, error diff --git a/cps/epub.py b/cps/epub.py index aae6120b..be6cd65d 100644 --- a/cps/epub.py +++ b/cps/epub.py @@ -20,25 +20,27 @@ import os import zipfile from lxml import etree -from . import isoLanguages +from . import isoLanguages, cover from . import config from .helper import split_authors from .constants import BookMeta - -def extractCover(zipFile, coverFile, coverpath, tmp_file_name): - if coverFile is None: +def _extract_cover(zip_file, cover_file, cover_path, tmp_file_name): + if cover_file is None: return None else: - zipCoverPath = os.path.join(coverpath, coverFile).replace('\\', '/') - cf = zipFile.read(zipCoverPath) + cf = extension = None + zip_cover_path = os.path.join(cover_path, cover_file).replace('\\', '/') + prefix = os.path.splitext(tmp_file_name)[0] - tmp_cover_name = prefix + '.' + os.path.basename(zipCoverPath) - image = open(tmp_cover_name, 'wb') - image.write(cf) - image.close() - return tmp_cover_name + tmp_cover_name = prefix + '.' + os.path.basename(zip_cover_path) + ext = os.path.splitext(tmp_cover_name) + if len(ext) > 1: + extension = ext[1].lower() + if extension in cover.COVER_EXTENSIONS: + cf = zip_file.read(zip_cover_path) + return cover.cover_processing(tmp_file_name, cf, extension) def get_epub_layout(book, book_data): ns = { @@ -72,35 +74,43 @@ def get_epub_info(tmp_file_path, original_file_name, original_file_extension): 'dc': 'http://purl.org/dc/elements/1.1/' } - epubZip = zipfile.ZipFile(tmp_file_path) + epub_zip = zipfile.ZipFile(tmp_file_path) - txt = epubZip.read('META-INF/container.xml') + txt = epub_zip.read('META-INF/container.xml') tree = etree.fromstring(txt) - cfname = tree.xpath('n:rootfiles/n:rootfile/@full-path', namespaces=ns)[0] - cf = epubZip.read(cfname) + cf_name = tree.xpath('n:rootfiles/n:rootfile/@full-path', namespaces=ns)[0] + cf = epub_zip.read(cf_name) tree = etree.fromstring(cf) - coverpath = os.path.dirname(cfname) + cover_path = os.path.dirname(cf_name) p = tree.xpath('/pkg:package/pkg:metadata', namespaces=ns)[0] epub_metadata = {} - for s in ['title', 'description', 'creator', 'language', 'subject']: + for s in ['title', 'description', 'creator', 'language', 'subject', 'publisher', 'date']: tmp = p.xpath('dc:%s/text()' % s, namespaces=ns) if len(tmp) > 0: if s == 'creator': epub_metadata[s] = ' & '.join(split_authors(tmp)) elif s == 'subject': epub_metadata[s] = ', '.join(tmp) + elif s == 'date': + epub_metadata[s] = tmp[0][:10] else: epub_metadata[s] = tmp[0] else: - epub_metadata[s] = u'Unknown' + epub_metadata[s] = 'Unknown' - if epub_metadata['subject'] == u'Unknown': + if epub_metadata['subject'] == 'Unknown': epub_metadata['subject'] = '' + if epub_metadata['publisher'] == u'Unknown': + epub_metadata['publisher'] = '' + + if epub_metadata['date'] == u'Unknown': + epub_metadata['date'] = '' + if epub_metadata['description'] == u'Unknown': description = tree.xpath("//*[local-name() = 'description']/text()") if len(description) > 0: @@ -111,9 +121,17 @@ def get_epub_info(tmp_file_path, original_file_name, original_file_extension): lang = epub_metadata['language'].split('-', 1)[0].lower() epub_metadata['language'] = isoLanguages.get_lang3(lang) - epub_metadata = parse_epbub_series(ns, tree, epub_metadata) + epub_metadata = parse_epub_series(ns, tree, epub_metadata) - coverfile = parse_ebpub_cover(ns, tree, epubZip, coverpath, tmp_file_path) + cover_file = parse_epub_cover(ns, tree, epub_zip, cover_path, tmp_file_path) + + identifiers = [] + for node in p.xpath('dc:identifier', namespaces=ns): + identifier_name=node.attrib.values()[-1]; + identifier_value=node.text; + if identifier_name in ('uuid','calibre'): + continue; + identifiers.append( [identifier_name, identifier_value] ) if not epub_metadata['title']: title = original_file_name @@ -125,45 +143,57 @@ def get_epub_info(tmp_file_path, original_file_name, original_file_extension): extension=original_file_extension, title=title.encode('utf-8').decode('utf-8'), author=epub_metadata['creator'].encode('utf-8').decode('utf-8'), - cover=coverfile, + cover=cover_file, description=epub_metadata['description'], tags=epub_metadata['subject'].encode('utf-8').decode('utf-8'), series=epub_metadata['series'].encode('utf-8').decode('utf-8'), series_id=epub_metadata['series_id'].encode('utf-8').decode('utf-8'), languages=epub_metadata['language'], - publisher="") + publisher=epub_metadata['publisher'].encode('utf-8').decode('utf-8'), + pubdate=epub_metadata['date'], + identifiers=identifiers) -def parse_ebpub_cover(ns, tree, epubZip, coverpath, tmp_file_path): - coversection = tree.xpath("/pkg:package/pkg:manifest/pkg:item[@id='cover-image']/@href", namespaces=ns) - coverfile = None - if len(coversection) > 0: - coverfile = extractCover(epubZip, coversection[0], coverpath, tmp_file_path) - else: + +def parse_epub_cover(ns, tree, epub_zip, cover_path, tmp_file_path): + cover_section = tree.xpath("/pkg:package/pkg:manifest/pkg:item[@id='cover-image']/@href", namespaces=ns) + cover_file = None + # if len(cover_section) > 0: + for cs in cover_section: + cover_file = _extract_cover(epub_zip, cs, cover_path, tmp_file_path) + if cover_file: + break + if not cover_file: meta_cover = tree.xpath("/pkg:package/pkg:metadata/pkg:meta[@name='cover']/@content", namespaces=ns) if len(meta_cover) > 0: - coversection = tree.xpath("/pkg:package/pkg:manifest/pkg:item[@id='"+meta_cover[0]+"']/@href", namespaces=ns) + cover_section = tree.xpath( + "/pkg:package/pkg:manifest/pkg:item[@id='"+meta_cover[0]+"']/@href", namespaces=ns) + if not cover_section: + cover_section = tree.xpath( + "/pkg:package/pkg:manifest/pkg:item[@properties='" + meta_cover[0] + "']/@href", namespaces=ns) else: - coversection = tree.xpath("/pkg:package/pkg:guide/pkg:reference/@href", namespaces=ns) - if len(coversection) > 0: - filetype = coversection[0].rsplit('.', 1)[-1] + cover_section = tree.xpath("/pkg:package/pkg:guide/pkg:reference/@href", namespaces=ns) + for cs in cover_section: + filetype = cs.rsplit('.', 1)[-1] if filetype == "xhtml" or filetype == "html": # if cover is (x)html format - markup = epubZip.read(os.path.join(coverpath, coversection[0])) - markupTree = etree.fromstring(markup) + markup = epub_zip.read(os.path.join(cover_path, cs)) + markup_tree = etree.fromstring(markup) # no matter xhtml or html with no namespace - imgsrc = markupTree.xpath("//*[local-name() = 'img']/@src") + img_src = markup_tree.xpath("//*[local-name() = 'img']/@src") # Alternative image source - if not len(imgsrc): - imgsrc = markupTree.xpath("//attribute::*[contains(local-name(), 'href')]") - if len(imgsrc): - # imgsrc maybe startwith "../"" so fullpath join then relpath to cwd - filename = os.path.relpath(os.path.join(os.path.dirname(os.path.join(coverpath, coversection[0])), - imgsrc[0])) - coverfile = extractCover(epubZip, filename, "", tmp_file_path) + if not len(img_src): + img_src = markup_tree.xpath("//attribute::*[contains(local-name(), 'href')]") + if len(img_src): + # img_src maybe start with "../"" so fullpath join then relpath to cwd + filename = os.path.relpath(os.path.join(os.path.dirname(os.path.join(cover_path, cover_section[0])), + img_src[0])) + cover_file = _extract_cover(epub_zip, filename, "", tmp_file_path) else: - coverfile = extractCover(epubZip, coversection[0], coverpath, tmp_file_path) - return coverfile + cover_file = _extract_cover(epub_zip, cs, cover_path, tmp_file_path) + if cover_file: break + return cover_file -def parse_epbub_series(ns, tree, epub_metadata): + +def parse_epub_series(ns, tree, epub_metadata): series = tree.xpath("/pkg:package/pkg:metadata/pkg:meta[@name='calibre:series']/@content", namespaces=ns) if len(series) > 0: epub_metadata['series'] = series[0] diff --git a/cps/error_handler.py b/cps/error_handler.py index 37b7500e..7c003bdb 100644 --- a/cps/error_handler.py +++ b/cps/error_handler.py @@ -17,6 +17,7 @@ # along with this program. If not, see . import traceback + from flask import render_template from werkzeug.exceptions import default_exceptions try: @@ -42,8 +43,9 @@ def error_http(error): def internal_error(error): return render_template('http_error.html', - error_code="Internal Server Error", - error_name=str(error), + error_code="500 Internal Server Error", + error_name='The server encountered an internal error and was unable to complete your ' + 'request. There is an error in the application.', issue=True, unconfigured=False, error_stack=traceback.format_exc().split("\n"), diff --git a/cps/fb2.py b/cps/fb2.py index 21586736..c4b89fd6 100644 --- a/cps/fb2.py +++ b/cps/fb2.py @@ -77,4 +77,6 @@ def get_fb2_info(tmp_file_path, original_file_extension): series="", series_id="", languages="", - publisher="") + publisher="", + pubdate="", + identifiers=[]) diff --git a/cps/fs.py b/cps/fs.py new file mode 100644 index 00000000..996499c3 --- /dev/null +++ b/cps/fs.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- + +# This file is part of the Calibre-Web (https://github.com/janeczku/calibre-web) +# Copyright (C) 2020 mmonkey +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from . import logger +from .constants import CACHE_DIR +from os import makedirs, remove +from os.path import isdir, isfile, join +from shutil import rmtree + + +class FileSystem: + _instance = None + _cache_dir = CACHE_DIR + + def __new__(cls): + if cls._instance is None: + cls._instance = super(FileSystem, cls).__new__(cls) + cls.log = logger.create() + return cls._instance + + def get_cache_dir(self, cache_type=None): + if not isdir(self._cache_dir): + try: + makedirs(self._cache_dir) + except OSError: + self.log.info(f'Failed to create path {self._cache_dir} (Permission denied).') + raise + + path = join(self._cache_dir, cache_type) + if cache_type and not isdir(path): + try: + makedirs(path) + except OSError: + self.log.info(f'Failed to create path {path} (Permission denied).') + raise + + return path if cache_type else self._cache_dir + + def get_cache_file_dir(self, filename, cache_type=None): + path = join(self.get_cache_dir(cache_type), filename[:2]) + if not isdir(path): + try: + makedirs(path) + except OSError: + self.log.info(f'Failed to create path {path} (Permission denied).') + raise + + return path + + def get_cache_file_path(self, filename, cache_type=None): + return join(self.get_cache_file_dir(filename, cache_type), filename) if filename else None + + def get_cache_file_exists(self, filename, cache_type=None): + path = self.get_cache_file_path(filename, cache_type) + return isfile(path) + + def delete_cache_dir(self, cache_type=None): + if not cache_type and isdir(self._cache_dir): + try: + rmtree(self._cache_dir) + except OSError: + self.log.info(f'Failed to delete path {self._cache_dir} (Permission denied).') + raise + + path = join(self._cache_dir, cache_type) + if cache_type and isdir(path): + try: + rmtree(path) + except OSError: + self.log.info(f'Failed to delete path {path} (Permission denied).') + raise + + def delete_cache_file(self, filename, cache_type=None): + path = self.get_cache_file_path(filename, cache_type) + if isfile(path): + try: + remove(path) + except OSError: + self.log.info(f'Failed to delete path {path} (Permission denied).') + raise diff --git a/cps/gdrive.py b/cps/gdrive.py index 6ca73ca9..60e3d47b 100644 --- a/cps/gdrive.py +++ b/cps/gdrive.py @@ -109,7 +109,7 @@ def revoke_watch_gdrive(): try: gdriveutils.stopChannel(gdriveutils.Gdrive.Instance().drive, last_watch_response['id'], last_watch_response['resourceId']) - except HttpError: + except (HttpError, AttributeError): pass config.config_google_drive_watch_changes_response = {} config.save() @@ -152,7 +152,7 @@ try: move(os.path.join(tmp_dir, "tmp_metadata.db"), dbpath) calibre_db.reconnect_db(config, ub.app_DB_path) except Exception as ex: - log.debug_or_exception(ex) + log.error_or_exception(ex) return '' except AttributeError: pass diff --git a/cps/gdriveutils.py b/cps/gdriveutils.py index 878c1f9f..e2e0a536 100644 --- a/cps/gdriveutils.py +++ b/cps/gdriveutils.py @@ -32,13 +32,9 @@ try: from sqlalchemy.orm import declarative_base except ImportError: from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy.exc import OperationalError, InvalidRequestError +from sqlalchemy.exc import OperationalError, InvalidRequestError, IntegrityError from sqlalchemy.sql.expression import text -try: - from six import __version__ as six_version -except ImportError: - six_version = "not installed" try: from httplib2 import __version__ as httplib2_version except ImportError: @@ -56,16 +52,18 @@ try: from pydrive2.auth import GoogleAuth from pydrive2.drive import GoogleDrive from pydrive2.auth import RefreshError + from pydrive2.files import ApiRequestError except ImportError as err: try: from pydrive.auth import GoogleAuth from pydrive.drive import GoogleDrive from pydrive.auth import RefreshError + from pydrive.files import ApiRequestError except ImportError as err: importError = err gdrive_support = False -from . import logger, cli, config +from . import logger, cli_param, config from .constants import CONFIG_DIR as _CONFIG_DIR @@ -79,7 +77,7 @@ if gdrive_support: if not logger.is_debug_enabled(): logger.get('googleapiclient.discovery').setLevel(logger.logging.ERROR) else: - log.debug("Cannot import pydrive,httplib2, using gdrive will not work: %s", importError) + log.debug("Cannot import pydrive, httplib2, using gdrive will not work: {}".format(importError)) class Singleton: @@ -139,11 +137,12 @@ class Gdrive: def __init__(self): self.drive = getDrive(gauth=Gauth.Instance().auth) + def is_gdrive_ready(): return os.path.exists(SETTINGS_YAML) and os.path.exists(CREDENTIALS) -engine = create_engine('sqlite:///{0}'.format(cli.gdpath), echo=False) +engine = create_engine('sqlite:///{0}'.format(cli_param.gd_path), echo=False) Base = declarative_base() # Open session for database connection @@ -191,10 +190,11 @@ def migrate(): session.execute('ALTER TABLE gdrive_ids2 RENAME to gdrive_ids') break -if not os.path.exists(cli.gdpath): +if not os.path.exists(cli_param.gd_path): try: Base.metadata.create_all(engine) - except Exception: + except Exception as ex: + log.error("Error connect to database: {} - {}".format(cli_param.gd_path, ex)) raise migrate() @@ -210,9 +210,9 @@ def getDrive(drive=None, gauth=None): try: gauth.Refresh() except RefreshError as e: - log.error("Google Drive error: %s", e) + log.error("Google Drive error: {}".format(e)) except Exception as ex: - log.debug_or_exception(ex) + log.error_or_exception(ex) else: # Initialize the saved creds gauth.Authorize() @@ -222,7 +222,7 @@ def getDrive(drive=None, gauth=None): try: drive.auth.Refresh() except RefreshError as e: - log.error("Google Drive error: %s", e) + log.error("Google Drive error: {}".format(e)) return drive def listRootFolders(): @@ -231,7 +231,7 @@ def listRootFolders(): folder = "'root' in parents and mimeType = 'application/vnd.google-apps.folder' and trashed = false" fileList = drive.ListFile({'q': folder}).GetList() except (ServerNotFoundError, ssl.SSLError, RefreshError) as e: - log.info("GDrive Error %s" % e) + log.info("GDrive Error {}".format(e)) fileList = [] return fileList @@ -269,8 +269,7 @@ def getEbooksFolderId(drive=None): try: session.commit() except OperationalError as ex: - log.error("gdrive.db DB is not Writeable") - log.debug('Database error: %s', ex) + log.error_or_exception('Database error: {}'.format(ex)) session.rollback() return gDriveId.gdrive_id @@ -286,6 +285,7 @@ def getFile(pathId, fileName, drive): def getFolderId(path, drive): # drive = getDrive(drive) + currentFolderId = None try: currentFolderId = getEbooksFolderId(drive) sqlCheckPath = path if path[-1] == '/' else path + '/' @@ -318,10 +318,14 @@ def getFolderId(path, drive): session.commit() else: currentFolderId = storedPathName.gdrive_id - except OperationalError as ex: - log.error("gdrive.db DB is not Writeable") - log.debug('Database error: %s', ex) + except (OperationalError, IntegrityError) as ex: + log.error_or_exception('Database error: {}'.format(ex)) session.rollback() + except ApiRequestError as ex: + log.error('{} {}'.format(ex.error['message'], path)) + session.rollback() + except RefreshError as ex: + log.error(ex) return currentFolderId @@ -355,16 +359,27 @@ def moveGdriveFolderRemote(origin_file, target_folder): children = drive.auth.service.children().list(folderId=previous_parents).execute() gFileTargetDir = getFileFromEbooksFolder(None, target_folder) if not gFileTargetDir: - # Folder is not existing, create, and move folder gFileTargetDir = drive.CreateFile( {'title': target_folder, 'parents': [{"kind": "drive#fileLink", 'id': getEbooksFolderId()}], "mimeType": "application/vnd.google-apps.folder"}) gFileTargetDir.Upload() - # Move the file to the new folder - drive.auth.service.files().update(fileId=origin_file['id'], - addParents=gFileTargetDir['id'], - removeParents=previous_parents, - fields='id, parents').execute() + # Move the file to the new folder + drive.auth.service.files().update(fileId=origin_file['id'], + addParents=gFileTargetDir['id'], + removeParents=previous_parents, + fields='id, parents').execute() + + elif gFileTargetDir['title'] != target_folder: + # Folder is not existing, create, and move folder + drive.auth.service.files().patch(fileId=origin_file['id'], + body={'title': target_folder}, + fields='title').execute() + else: + # Move the file to the new folder + drive.auth.service.files().update(fileId=origin_file['id'], + addParents=gFileTargetDir['id'], + removeParents=previous_parents, + fields='id, parents').execute() # if previous_parents has no children anymore, delete original fileparent if len(children['items']) == 1: deleteDatabaseEntry(previous_parents) @@ -412,24 +427,24 @@ def uploadFileToEbooksFolder(destFile, f): splitDir = destFile.split('/') for i, x in enumerate(splitDir): if i == len(splitDir)-1: - existingFiles = drive.ListFile({'q': "title = '%s' and '%s' in parents and trashed = false" % + existing_Files = drive.ListFile({'q': "title = '%s' and '%s' in parents and trashed = false" % (x.replace("'", r"\'"), parent['id'])}).GetList() - if len(existingFiles) > 0: - driveFile = existingFiles[0] + if len(existing_Files) > 0: + driveFile = existing_Files[0] else: driveFile = drive.CreateFile({'title': x, 'parents': [{"kind": "drive#fileLink", 'id': parent['id']}], }) driveFile.SetContentFile(f) driveFile.Upload() else: - existingFolder = drive.ListFile({'q': "title = '%s' and '%s' in parents and trashed = false" % + existing_Folder = drive.ListFile({'q': "title = '%s' and '%s' in parents and trashed = false" % (x.replace("'", r"\'"), parent['id'])}).GetList() - if len(existingFolder) == 0: + if len(existing_Folder) == 0: parent = drive.CreateFile({'title': x, 'parents': [{"kind": "drive#fileLink", 'id': parent['id']}], "mimeType": "application/vnd.google-apps.folder"}) parent.Upload() else: - parent = existingFolder[0] + parent = existing_Folder[0] def watchChange(drive, channel_id, channel_type, channel_address, @@ -528,8 +543,8 @@ def deleteDatabaseOnChange(): session.commit() except (OperationalError, InvalidRequestError) as ex: session.rollback() - log.debug('Database error: %s', ex) - log.error(u"GDrive DB is not Writeable") + log.error_or_exception('Database error: {}'.format(ex)) + session.rollback() def updateGdriveCalibreFromLocal(): @@ -547,8 +562,7 @@ def updateDatabaseOnEdit(ID,newPath): try: session.commit() except OperationalError as ex: - log.error("gdrive.db DB is not Writeable") - log.debug('Database error: %s', ex) + log.error_or_exception('Database error: {}'.format(ex)) session.rollback() @@ -558,8 +572,7 @@ def deleteDatabaseEntry(ID): try: session.commit() except OperationalError as ex: - log.error("gdrive.db DB is not Writeable") - log.debug('Database error: %s', ex) + log.error_or_exception('Database error: {}'.format(ex)) session.rollback() @@ -580,8 +593,7 @@ def get_cover_via_gdrive(cover_path): try: session.commit() except OperationalError as ex: - log.error("gdrive.db DB is not Writeable") - log.debug('Database error: %s', ex) + log.error_or_exception('Database error: {}'.format(ex)) session.rollback() return df.metadata.get('webContentLink') else: @@ -603,7 +615,7 @@ def do_gdrive_download(df, headers, convert_encoding=False): def stream(convert_encoding): for byte in s: - headers = {"Range": 'bytes=%s-%s' % (byte[0], byte[1])} + headers = {"Range": 'bytes={}-{}'.format(byte[0], byte[1])} resp, content = df.auth.Get_Http_Object().request(download_url, headers=headers) if resp.status == 206: if convert_encoding: @@ -611,7 +623,7 @@ def do_gdrive_download(df, headers, convert_encoding=False): content = content.decode(result['encoding']).encode('utf-8') yield content else: - log.warning('An error occurred: %s', resp) + log.warning('An error occurred: {}'.format(resp)) return return Response(stream_with_context(stream(convert_encoding)), headers=headers) @@ -668,8 +680,3 @@ def get_error_text(client_secrets=None): return 'Callback url (redirect url) is missing in client_secrets.json' if client_secrets: client_secrets.update(filedata['web']) - - -def get_versions(): - return {'six': six_version, - 'httplib2': httplib2_version} diff --git a/cps/gevent_wsgi.py b/cps/gevent_wsgi.py new file mode 100644 index 00000000..b044f31b --- /dev/null +++ b/cps/gevent_wsgi.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- + +# This file is part of the Calibre-Web (https://github.com/janeczku/calibre-web) +# Copyright (C) 2022 OzzieIsaacs +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +from gevent.pywsgi import WSGIHandler + +class MyWSGIHandler(WSGIHandler): + def get_environ(self): + env = super().get_environ() + path, __ = self.path.split('?', 1) if '?' in self.path else (self.path, '') + env['RAW_URI'] = path + return env + + diff --git a/cps/helper.py b/cps/helper.py index 21dab9a3..ed11e1c0 100644 --- a/cps/helper.py +++ b/cps/helper.py @@ -17,47 +17,47 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -import sys import os import io +import sys import mimetypes import re import shutil -import time -import unicodedata +import socket from datetime import datetime, timedelta from tempfile import gettempdir - import requests -from babel.dates import format_datetime -from babel.units import format_unit +import unidecode + from flask import send_from_directory, make_response, redirect, abort, url_for from flask_babel import gettext as _ +from flask_babel import lazy_gettext as N_ from flask_login import current_user -from sqlalchemy.sql.expression import true, false, and_, text, func +from sqlalchemy.sql.expression import true, false, and_, or_, text, func +from sqlalchemy.exc import InvalidRequestError, OperationalError from werkzeug.datastructures import Headers from werkzeug.security import generate_password_hash from markupsafe import escape +from urllib.parse import quote try: - from urllib.parse import quote + import advocate + from advocate.exceptions import UnacceptableAddressException + use_advocate = True except ImportError: - from urllib import quote + use_advocate = False + advocate = requests + UnacceptableAddressException = MissingSchema = BaseException -try: - import unidecode - use_unidecode = True -except ImportError: - use_unidecode = False - -from . import calibre_db +from . import calibre_db, cli_param from .tasks.convert import TaskConvert -from . import logger, config, get_locale, db, ub, kobo_sync_status +from . import logger, config, db, ub, fs from . import gdriveutils as gd -from .constants import STATIC_DIR as _STATIC_DIR +from .constants import STATIC_DIR as _STATIC_DIR, CACHE_TYPE_THUMBNAILS, THUMBNAIL_TYPE_COVER, THUMBNAIL_TYPE_SERIES from .subproc_wrapper import process_wait -from .services.worker import WorkerThread, STAT_WAITING, STAT_FAIL, STAT_STARTED, STAT_FINISH_SUCCESS +from .services.worker import WorkerThread from .tasks.mail import TaskEmail +from .tasks.thumbnail import TaskClearCoverThumbnailCache, TaskGenerateCoverThumbnails log = logger.create() @@ -72,10 +72,10 @@ except (ImportError, RuntimeError) as e: # Convert existing book entry to new format -def convert_book_format(book_id, calibrepath, old_book_format, new_book_format, user_id, kindle_mail=None): +def convert_book_format(book_id, calibre_path, old_book_format, new_book_format, user_id, kindle_mail=None): book = calibre_db.get_book(book_id) data = calibre_db.get_book_format(book.id, old_book_format) - file_path = os.path.join(calibrepath, book.path, data.name) + file_path = os.path.join(calibre_path, book.path, data.name) if not data: error_message = _(u"%(format)s format not found for book id: %(book)d", format=old_book_format, book=book_id) log.error("convert_book_format: %s", error_message) @@ -108,9 +108,10 @@ def convert_book_format(book_id, calibrepath, old_book_format, new_book_format, return None +# Texts are not lazy translated as they are supposed to get send out as is def send_test_mail(kindle_mail, user_name): WorkerThread.add(user_name, TaskEmail(_(u'Calibre-Web test e-mail'), None, None, - config.get_mail_settings(), kindle_mail, _(u"Test e-mail"), + config.get_mail_settings(), kindle_mail, N_(u"Test e-mail"), _(u'This e-mail has been sent via Calibre-Web.'))) return @@ -132,27 +133,27 @@ def send_registration_mail(e_mail, user_name, default_password, resend=False): attachment=None, settings=config.get_mail_settings(), recipient=e_mail, - taskMessage=_(u"Registration e-mail for user: %(name)s", name=user_name), + task_message=N_(u"Registration e-mail for user: %(name)s", name=user_name), text=txt )) return def check_send_to_kindle_with_converter(formats): - bookformats = list() + book_formats = list() if 'EPUB' in formats and 'MOBI' not in formats: - bookformats.append({'format': 'Mobi', - 'convert': 1, - 'text': _('Convert %(orig)s to %(format)s and send to Kindle', - orig='Epub', - format='Mobi')}) - if 'AZW3' in formats and not 'MOBI' in formats: - bookformats.append({'format': 'Mobi', - 'convert': 2, - 'text': _('Convert %(orig)s to %(format)s and send to Kindle', - orig='Azw3', - format='Mobi')}) - return bookformats + book_formats.append({'format': 'Mobi', + 'convert': 1, + 'text': _('Convert %(orig)s to %(format)s and send to Kindle', + orig='Epub', + format='Mobi')}) + if 'AZW3' in formats and 'MOBI' not in formats: + book_formats.append({'format': 'Mobi', + 'convert': 2, + 'text': _('Convert %(orig)s to %(format)s and send to Kindle', + orig='Azw3', + format='Mobi')}) + return book_formats def check_send_to_kindle(entry): @@ -160,26 +161,26 @@ def check_send_to_kindle(entry): returns all available book formats for sending to Kindle """ formats = list() - bookformats = list() + book_formats = list() if len(entry.data): for ele in iter(entry.data): if ele.uncompressed_size < config.mail_size: formats.append(ele.format) if 'MOBI' in formats: - bookformats.append({'format': 'Mobi', - 'convert': 0, - 'text': _('Send %(format)s to Kindle', format='Mobi')}) + book_formats.append({'format': 'Mobi', + 'convert': 0, + 'text': _('Send %(format)s to Kindle', format='Mobi')}) if 'PDF' in formats: - bookformats.append({'format': 'Pdf', - 'convert': 0, - 'text': _('Send %(format)s to Kindle', format='Pdf')}) + book_formats.append({'format': 'Pdf', + 'convert': 0, + 'text': _('Send %(format)s to Kindle', format='Pdf')}) if 'AZW' in formats: - bookformats.append({'format': 'Azw', - 'convert': 0, - 'text': _('Send %(format)s to Kindle', format='Azw')}) + book_formats.append({'format': 'Azw', + 'convert': 0, + 'text': _('Send %(format)s to Kindle', format='Azw')}) if config.config_converterpath: - bookformats.extend(check_send_to_kindle_with_converter(formats)) - return bookformats + book_formats.extend(check_send_to_kindle_with_converter(formats)) + return book_formats else: log.error(u'Cannot find book entry %d', entry.id) return None @@ -188,13 +189,13 @@ def check_send_to_kindle(entry): # Check if a reader is existing for any of the book formats, if not, return empty list, otherwise return # list with supported formats def check_read_formats(entry): - EXTENSIONS_READER = {'TXT', 'PDF', 'EPUB', 'CBZ', 'CBT', 'CBR', 'DJVU'} - bookformats = list() + extensions_reader = {'TXT', 'PDF', 'EPUB', 'CBZ', 'CBT', 'CBR', 'DJVU'} + book_formats = list() if len(entry.data): for ele in iter(entry.data): - if ele.format.upper() in EXTENSIONS_READER: - bookformats.append(ele.format.lower()) - return bookformats + if ele.format.upper() in extensions_reader: + book_formats.append(ele.format.lower()) + return book_formats # Files are processed in the following order/priority: @@ -216,15 +217,15 @@ def send_mail(book_id, book_format, convert, kindle_mail, calibrepath, user_id): if entry.format.upper() == book_format.upper(): converted_file_name = entry.name + '.' + book_format.lower() link = '{}'.format(url_for('web.show_book', book_id=book_id), escape(book.title)) - EmailText = _(u"%(book)s send to Kindle", book=link) + email_text = N_(u"%(book)s send to Kindle", book=link) WorkerThread.add(user_id, TaskEmail(_(u"Send to Kindle"), book.path, converted_file_name, config.get_mail_settings(), kindle_mail, - EmailText, _(u'This e-mail has been sent via Calibre-Web.'))) + email_text, _(u'This e-mail has been sent via Calibre-Web.'))) return return _(u"The requested file could not be read. Maybe wrong permissions?") -def get_valid_filename(value, replace_whitespace=True): +def get_valid_filename(value, replace_whitespace=True, chars=128): """ Returns the given string converted to a string that can be used for a clean filename. Limits num characters to 128 max. @@ -232,21 +233,17 @@ def get_valid_filename(value, replace_whitespace=True): if value[-1:] == u'.': value = value[:-1]+u'_' value = value.replace("/", "_").replace(":", "_").strip('\0') - if use_unidecode: - if config.config_unicode_filename: - value = (unidecode.unidecode(value)) - else: - value = value.replace(u'§', u'SS') - value = value.replace(u'ß', u'ss') - value = unicodedata.normalize('NFKD', value) - re_slugify = re.compile(r'[\W\s-]', re.UNICODE) - value = re_slugify.sub('', value) + if config.config_unicode_filename: + value = (unidecode.unidecode(value)) if replace_whitespace: # *+:\"/<>? are replaced by _ value = re.sub(r'[*+:\\\"/<>?]+', u'_', value, flags=re.U) # pipe has to be replaced with comma value = re.sub(r'[|]+', u',', value, flags=re.U) - value = value[:128].strip() + + filename_encoding_for_length = 'utf-16' if sys.platform == "win32" or sys.platform == "darwin" else 'utf-8' + value = value.encode(filename_encoding_for_length)[:chars].decode('utf-8', errors='ignore').strip() + if not value: raise ValueError("Filename cannot be empty") return value @@ -269,6 +266,7 @@ def split_authors(values): def get_sorted_author(value): + value2 = None try: if ',' not in value: regexes = [r"^(JR|SR)\.?$", r"^I{1,3}\.?$", r"^IV\.?$"] @@ -294,9 +292,59 @@ def get_sorted_author(value): return value2 -# Deletes a book fro the local filestorage, returns True if deleting is successfull, otherwise false +def edit_book_read_status(book_id, read_status=None): + if not config.config_read_column: + book = ub.session.query(ub.ReadBook).filter(and_(ub.ReadBook.user_id == int(current_user.id), + ub.ReadBook.book_id == book_id)).first() + if book: + if read_status is None: + if book.read_status == ub.ReadBook.STATUS_FINISHED: + book.read_status = ub.ReadBook.STATUS_UNREAD + else: + book.read_status = ub.ReadBook.STATUS_FINISHED + else: + book.read_status = ub.ReadBook.STATUS_FINISHED if read_status else ub.ReadBook.STATUS_UNREAD + else: + read_book = ub.ReadBook(user_id=current_user.id, book_id=book_id) + read_book.read_status = ub.ReadBook.STATUS_FINISHED + book = read_book + if not book.kobo_reading_state: + kobo_reading_state = ub.KoboReadingState(user_id=current_user.id, book_id=book_id) + kobo_reading_state.current_bookmark = ub.KoboBookmark() + kobo_reading_state.statistics = ub.KoboStatistics() + book.kobo_reading_state = kobo_reading_state + ub.session.merge(book) + ub.session_commit("Book {} readbit toggled".format(book_id)) + else: + try: + calibre_db.update_title_sort(config) + book = calibre_db.get_filtered_book(book_id) + read_status = getattr(book, 'custom_column_' + str(config.config_read_column)) + if len(read_status): + if read_status is None: + read_status[0].value = not read_status[0].value + else: + read_status[0].value = read_status is True + calibre_db.session.commit() + else: + cc_class = db.cc_classes[config.config_read_column] + new_cc = cc_class(value=read_status or 1, book=book_id) + calibre_db.session.add(new_cc) + calibre_db.session.commit() + except (KeyError, AttributeError, IndexError): + log.error( + "Custom Column No.{} is not existing in calibre database".format(config.config_read_column)) + return "Custom Column No.{} is not existing in calibre database".format(config.config_read_column) + except (OperationalError, InvalidRequestError) as ex: + calibre_db.session.rollback() + log.error(u"Read status could not set: {}".format(ex)) + return _("Read status could not set: {}".format(ex.orig)) + return "" + + +# Deletes a book from the local filestorage, returns True if deleting is successful, otherwise false def delete_book_file(book, calibrepath, book_format=None): - # check that path is 2 elements deep, check that target path has no subfolders + # check that path is 2 elements deep, check that target path has no sub folders if book.path.count('/') == 1: path = os.path.join(calibrepath, book.path) if book_format: @@ -317,15 +365,15 @@ def delete_book_file(book, calibrepath, book_format=None): id=book.id, path=book.path) shutil.rmtree(path) - except (IOError, OSError) as e: - log.error("Deleting book %s failed: %s", book.id, e) - return False, _("Deleting book %(id)s failed: %(message)s", id=book.id, message=e) + except (IOError, OSError) as ex: + log.error("Deleting book %s failed: %s", book.id, ex) + return False, _("Deleting book %(id)s failed: %(message)s", id=book.id, message=ex) authorpath = os.path.join(calibrepath, os.path.split(book.path)[0]) if not os.listdir(authorpath): try: shutil.rmtree(authorpath) - except (IOError, OSError) as e: - log.error("Deleting authorpath for book %s failed: %s", book.id, e) + except (IOError, OSError) as ex: + log.error("Deleting authorpath for book %s failed: %s", book.id, ex) return True, None log.error("Deleting book %s from database only, book path in database not valid: %s", @@ -335,8 +383,8 @@ def delete_book_file(book, calibrepath, book_format=None): path=book.path) -def clean_author_database(renamed_author, calibrepath, local_book=None): - valid_filename_authors = [get_valid_filename(r) for r in renamed_author] +def clean_author_database(renamed_author, calibre_path="", local_book=None, gdrive=None): + valid_filename_authors = [get_valid_filename(r, chars=96) for r in renamed_author] for r in renamed_author: if local_book: all_books = [local_book] @@ -347,183 +395,191 @@ def clean_author_database(renamed_author, calibrepath, local_book=None): book_author_path = book.path.split('/')[0] if book_author_path in valid_filename_authors or local_book: new_author = calibre_db.session.query(db.Authors).filter(db.Authors.name == r).first() - all_new_authordir = get_valid_filename(new_author.name) + all_new_authordir = get_valid_filename(new_author.name, chars=96) all_titledir = book.path.split('/')[1] - all_new_path = os.path.join(calibrepath, all_new_authordir, all_titledir) - all_new_name = get_valid_filename(book.title) + ' - ' + all_new_authordir + all_new_path = os.path.join(calibre_path, all_new_authordir, all_titledir) + all_new_name = get_valid_filename(book.title, chars=42) + ' - ' \ + + get_valid_filename(new_author.name, chars=42) # change location in database to new author/title path book.path = os.path.join(all_new_authordir, all_titledir).replace('\\', '/') for file_format in book.data: - shutil.move(os.path.normcase( - os.path.join(all_new_path, file_format.name + '.' + file_format.format.lower())), - os.path.normcase(os.path.join(all_new_path, all_new_name + '.' + file_format.format.lower()))) + if not gdrive: + shutil.move(os.path.normcase(os.path.join(all_new_path, + file_format.name + '.' + file_format.format.lower())), + os.path.normcase(os.path.join(all_new_path, + all_new_name + '.' + file_format.format.lower()))) + else: + g_file = gd.getFileFromEbooksFolder(all_new_path, + file_format.name + '.' + file_format.format.lower()) + if g_file: + gd.moveGdriveFileRemote(g_file, all_new_name + u'.' + file_format.format.lower()) + gd.updateDatabaseOnEdit(g_file['id'], all_new_name + u'.' + file_format.format.lower()) + else: + log.error("File {} not found on gdrive" + .format(all_new_path, file_format.name + '.' + file_format.format.lower())) file_format.name = all_new_name -# was muss gemacht werden: -# Die Autorennamen müssen separiert werden und von dupletten bereinigt werden. -# Es muss geprüft werden: -# - ob es die alten Autoren mit dem letzten Buch verknüpft waren, dann müssen sie gelöscht werden -# - ob es neue Autoren sind, dann müssen sie angelegt werden -> macht modify_database_object -# - ob es bestehende Autoren sind welche umbenannt wurden -> Groß Kleinschreibung, dann muss: -# für jedes Buch und jeder Autor welcher umbenannt wurde: -# - Autorensortierung angepasst werden -# - Pfad im Buch angepasst werden -# - Dateiname in Datatabelle angepasst werden, sowie die Dateien umbenannt werden -# - Dateipfade Autor umbenannt werden -# die letzten Punkte treffen auch zu wenn es sich um einen normalen Autoränderungsvorgang handelt kann man also generell -# behandeln +def rename_all_authors(first_author, renamed_author, calibre_path="", localbook=None, gdrive=False): + # Create new_author_dir from parameter or from database + # Create new title_dir from database and add id + if first_author: + new_authordir = get_valid_filename(first_author, chars=96) + for r in renamed_author: + new_author = calibre_db.session.query(db.Authors).filter(db.Authors.name == r).first() + old_author_dir = get_valid_filename(r, chars=96) + new_author_rename_dir = get_valid_filename(new_author.name, chars=96) + if gdrive: + g_file = gd.getFileFromEbooksFolder(None, old_author_dir) + if g_file: + gd.moveGdriveFolderRemote(g_file, new_author_rename_dir) + else: + if os.path.isdir(os.path.join(calibre_path, old_author_dir)): + try: + old_author_path = os.path.join(calibre_path, old_author_dir) + new_author_path = os.path.join(calibre_path, new_author_rename_dir) + shutil.move(os.path.normcase(old_author_path), os.path.normcase(new_author_path)) + except OSError as ex: + log.error("Rename author from: %s to %s: %s", old_author_path, new_author_path, ex) + log.debug(ex, exc_info=True) + return _("Rename author from: '%(src)s' to '%(dest)s' failed with error: %(error)s", + src=old_author_path, dest=new_author_path, error=str(ex)) + else: + new_authordir = get_valid_filename(localbook.authors[0].name, chars=96) + return new_authordir + # Moves files in file storage during author/title rename, or from temp dir to file storage -def update_dir_structure_file(book_id, calibrepath, first_author, orignal_filepath, db_filename, renamed_author): +def update_dir_structure_file(book_id, calibre_path, first_author, original_filepath, db_filename, renamed_author): # get book database entry from id, if original path overwrite source with original_filepath - localbook = calibre_db.get_book(book_id) - if orignal_filepath: - path = orignal_filepath + local_book = calibre_db.get_book(book_id) + if original_filepath: + path = original_filepath else: - path = os.path.join(calibrepath, localbook.path) + path = os.path.join(calibre_path, local_book.path) - # Create (current) authordir and titledir from database - authordir = localbook.path.split('/')[0] - titledir = localbook.path.split('/')[1] + # Create (current) author_dir and title_dir from database + author_dir = local_book.path.split('/')[0] + title_dir = local_book.path.split('/')[1] - # Create new_authordir from parameter or from database - # Create new titledir from database and add id + # Create new_author_dir from parameter or from database + # Create new title_dir from database and add id + new_author_dir = rename_all_authors(first_author, renamed_author, calibre_path, local_book) if first_author: - new_authordir = get_valid_filename(first_author) - for r in renamed_author: - new_author = calibre_db.session.query(db.Authors).filter(db.Authors.name == r).first() - old_author_dir = get_valid_filename(r) - new_author_rename_dir = get_valid_filename(new_author.name) - if os.path.isdir(os.path.join(calibrepath, old_author_dir)): - try: - old_author_path = os.path.join(calibrepath, old_author_dir) - new_author_path = os.path.join(calibrepath, new_author_rename_dir) - shutil.move(os.path.normcase(old_author_path), os.path.normcase(new_author_path)) - except (OSError) as ex: - log.error("Rename author from: %s to %s: %s", old_author_path, new_author_path, ex) - log.debug(ex, exc_info=True) - return _("Rename author from: '%(src)s' to '%(dest)s' failed with error: %(error)s", - src=old_author_path, dest=new_author_path, error=str(ex)) - else: - new_authordir = get_valid_filename(localbook.authors[0].name) - new_titledir = get_valid_filename(localbook.title) + " (" + str(book_id) + ")" + if first_author.lower() in [r.lower() for r in renamed_author]: + if os.path.isdir(os.path.join(calibre_path, new_author_dir)): + path = os.path.join(calibre_path, new_author_dir, title_dir) + + new_title_dir = get_valid_filename(local_book.title, chars=96) + " (" + str(book_id) + ")" + + if title_dir != new_title_dir or author_dir != new_author_dir or original_filepath: + error = move_files_on_change(calibre_path, + new_author_dir, + new_title_dir, + local_book, + db_filename, + original_filepath, + path) + if error: + return error + + # Rename all files from old names to new names + return rename_files_on_change(first_author, renamed_author, local_book, original_filepath, path, calibre_path) + + +def upload_new_file_gdrive(book_id, first_author, renamed_author, title, title_dir, original_filepath, filename_ext): + book = calibre_db.get_book(book_id) + file_name = get_valid_filename(title, chars=42) + ' - ' + \ + get_valid_filename(first_author, chars=42) + filename_ext + rename_all_authors(first_author, renamed_author, gdrive=True) + gdrive_path = os.path.join(get_valid_filename(first_author, chars=96), + title_dir + " (" + str(book_id) + ")") + book.path = gdrive_path.replace("\\", "/") + gd.uploadFileToEbooksFolder(os.path.join(gdrive_path, file_name).replace("\\", "/"), original_filepath) + return rename_files_on_change(first_author, renamed_author, local_book=book, gdrive=True) - if titledir != new_titledir or authordir != new_authordir or orignal_filepath: - new_path = os.path.join(calibrepath, new_authordir, new_titledir) - new_name = get_valid_filename(localbook.title) + ' - ' + new_authordir - try: - if orignal_filepath: - if not os.path.isdir(new_path): - os.makedirs(new_path) - shutil.move(os.path.normcase(path), os.path.normcase(os.path.join(new_path, db_filename))) - log.debug("Moving title: %s to %s/%s", path, new_path, new_name) - else: - # Check new path is not valid path - if not os.path.exists(new_path): - # move original path to new path - log.debug("Moving title: %s to %s", path, new_path) - shutil.move(os.path.normcase(path), os.path.normcase(new_path)) - else: # path is valid copy only files to new location (merge) - log.info("Moving title: %s into existing: %s", path, new_path) - # Take all files and subfolder from old path (strange command) - for dir_name, __, file_list in os.walk(path): - for file in file_list: - shutil.move(os.path.normcase(os.path.join(dir_name, file)), - os.path.normcase(os.path.join(new_path + dir_name[len(path):], file))) - # change location in database to new author/title path - localbook.path = os.path.join(new_authordir, new_titledir).replace('\\','/') - except (OSError) as ex: - log.error("Rename title from: %s to %s: %s", path, new_path, ex) - log.debug(ex, exc_info=True) - return _("Rename title from: '%(src)s' to '%(dest)s' failed with error: %(error)s", - src=path, dest=new_path, error=str(ex)) - # Rename all files from old names to new names - try: - clean_author_database(renamed_author, calibrepath) - if first_author not in renamed_author: - clean_author_database([first_author], calibrepath, localbook) - if not renamed_author and not orignal_filepath and len(os.listdir(os.path.dirname(path))) == 0: - shutil.rmtree(os.path.dirname(path)) - except (OSError, FileNotFoundError) as ex: - log.error("Error in rename file in path %s", ex) - log.debug(ex, exc_info=True) - return _("Error in rename file in path: %(error)s", error=str(ex)) - return False def update_dir_structure_gdrive(book_id, first_author, renamed_author): - error = False book = calibre_db.get_book(book_id) - path = book.path authordir = book.path.split('/')[0] - if first_author: - new_authordir = get_valid_filename(first_author) - for r in renamed_author: - # Todo: Rename all authors on gdrive - new_author = calibre_db.session.query(db.Authors).filter(db.Authors.name == r).first() - old_author_dir = get_valid_filename(r) - new_author_rename_dir = get_valid_filename(new_author.name) - '''if os.path.isdir(os.path.join(calibrepath, old_author_dir)): - try: - old_author_path = os.path.join(calibrepath, old_author_dir) - new_author_path = os.path.join(calibrepath, new_author_rename_dir) - shutil.move(os.path.normcase(old_author_path), os.path.normcase(new_author_path)) - except (OSError) as ex: - log.error("Rename author from: %s to %s: %s", old_author_path, new_author_path, ex) - log.debug(ex, exc_info=True) - return _("Rename author from: '%(src)s' to '%(dest)s' failed with error: %(error)s", - src=old_author_path, dest=new_author_path, error=str(ex))''' - else: - new_authordir = get_valid_filename(book.authors[0].name) - titledir = book.path.split('/')[1] - new_titledir = get_valid_filename(book.title) + u" (" + str(book_id) + u")" + new_authordir = rename_all_authors(first_author, renamed_author, gdrive=True) + new_titledir = get_valid_filename(book.title, chars=96) + u" (" + str(book_id) + u")" if titledir != new_titledir: - gFile = gd.getFileFromEbooksFolder(os.path.dirname(book.path), titledir) - if gFile: - gFile['title'] = new_titledir - gFile.Upload() + g_file = gd.getFileFromEbooksFolder(os.path.dirname(book.path), titledir) + if g_file: + gd.moveGdriveFileRemote(g_file, new_titledir) book.path = book.path.split('/')[0] + u'/' + new_titledir - path = book.path - gd.updateDatabaseOnEdit(gFile['id'], book.path) # only child folder affected + gd.updateDatabaseOnEdit(g_file['id'], book.path) # only child folder affected else: - error = _(u'File %(file)s not found on Google Drive', file=book.path) # file not found + return _(u'File %(file)s not found on Google Drive', file=book.path) # file not found - if authordir != new_authordir: - gFile = gd.getFileFromEbooksFolder(os.path.dirname(book.path), new_titledir) - if gFile: - gd.moveGdriveFolderRemote(gFile, new_authordir) + if authordir != new_authordir and authordir not in renamed_author: + g_file = gd.getFileFromEbooksFolder(os.path.dirname(book.path), new_titledir) + if g_file: + gd.moveGdriveFolderRemote(g_file, new_authordir) book.path = new_authordir + u'/' + book.path.split('/')[1] - path = book.path - gd.updateDatabaseOnEdit(gFile['id'], book.path) + gd.updateDatabaseOnEdit(g_file['id'], book.path) else: - error = _(u'File %(file)s not found on Google Drive', file=authordir) # file not found - # Todo: Rename all authors on gdrive - # Rename all files from old names to new names - ''' + return _(u'File %(file)s not found on Google Drive', file=authordir) # file not found + + # change location in database to new author/title path + book.path = os.path.join(new_authordir, new_titledir).replace('\\', '/') + return rename_files_on_change(first_author, renamed_author, book, gdrive=True) + + +def move_files_on_change(calibre_path, new_authordir, new_titledir, localbook, db_filename, original_filepath, path): + new_path = os.path.join(calibre_path, new_authordir, new_titledir) + new_name = get_valid_filename(localbook.title, chars=96) + ' - ' + new_authordir try: - clean_author_database(renamed_author, calibrepath) - if first_author not in renamed_author: - clean_author_database([first_author], calibrepath, localbook) - if not renamed_author and not orignal_filepath and len(os.listdir(os.path.dirname(path))) == 0: + if original_filepath: + if not os.path.isdir(new_path): + os.makedirs(new_path) + shutil.move(os.path.normcase(original_filepath), os.path.normcase(os.path.join(new_path, db_filename))) + log.debug("Moving title: %s to %s/%s", original_filepath, new_path, new_name) + else: + # Check new path is not valid path + if not os.path.exists(new_path): + # move original path to new path + log.debug("Moving title: %s to %s", path, new_path) + shutil.move(os.path.normcase(path), os.path.normcase(new_path)) + else: # path is valid copy only files to new location (merge) + log.info("Moving title: %s into existing: %s", path, new_path) + # Take all files and subfolder from old path (strange command) + for dir_name, __, file_list in os.walk(path): + for file in file_list: + shutil.move(os.path.normcase(os.path.join(dir_name, file)), + os.path.normcase(os.path.join(new_path + dir_name[len(path):], file))) + # change location in database to new author/title path + localbook.path = os.path.join(new_authordir, new_titledir).replace('\\', '/') + except OSError as ex: + log.error_or_exception("Rename title from {} to {} failed with error: {}".format(path, new_path, ex)) + return _("Rename title from: '%(src)s' to '%(dest)s' failed with error: %(error)s", + src=path, dest=new_path, error=str(ex)) + return False + + +def rename_files_on_change(first_author, + renamed_author, + local_book, + original_filepath="", + path="", + calibre_path="", + gdrive=False): + # Rename all files from old names to new names + try: + clean_author_database(renamed_author, calibre_path, gdrive=gdrive) + if first_author and first_author not in renamed_author: + clean_author_database([first_author], calibre_path, local_book, gdrive) + if not gdrive and not renamed_author and not original_filepath and len(os.listdir(os.path.dirname(path))) == 0: shutil.rmtree(os.path.dirname(path)) except (OSError, FileNotFoundError) as ex: - log.error("Error in rename file in path %s", ex) - log.debug(ex, exc_info=True) - return _("Error in rename file in path: %(error)s", error=str(ex))''' - if authordir != new_authordir or titledir != new_titledir: - new_name = get_valid_filename(book.title) + u' - ' + get_valid_filename(new_authordir) - for file_format in book.data: - gFile = gd.getFileFromEbooksFolder(path, file_format.name + u'.' + file_format.format.lower()) - if not gFile: - error = _(u'File %(file)s not found on Google Drive', file=file_format.name) # file not found - break - gd.moveGdriveFileRemote(gFile, new_name + u'.' + file_format.format.lower()) - file_format.name = new_name - return error + log.error_or_exception("Error in rename file in path {}".format(ex)) + return _("Error in rename file in path: {}".format(str(ex))) + return False def delete_book_gdrive(book, book_format): @@ -533,12 +589,12 @@ def delete_book_gdrive(book, book_format): for entry in book.data: if entry.format.upper() == book_format: name = entry.name + '.' + book_format - gFile = gd.getFileFromEbooksFolder(book.path, name) + g_file = gd.getFileFromEbooksFolder(book.path, name) else: - gFile = gd.getFileFromEbooksFolder(os.path.dirname(book.path), book.path.split('/')[1]) - if gFile: - gd.deleteDatabaseEntry(gFile['id']) - gFile.Trash() + g_file = gd.getFileFromEbooksFolder(os.path.dirname(book.path), book.path.split('/')[1]) + if g_file: + gd.deleteDatabaseEntry(g_file['id']) + g_file.Trash() else: error = _(u'Book path %(path)s not found on Google Drive', path=book.path) # file not found @@ -570,12 +626,13 @@ def generate_random_password(): def uniq(inpt): output = [] - inpt = [ " ".join(inp.split()) for inp in inpt] + inpt = [" ".join(inp.split()) for inp in inpt] for x in inpt: if x not in output: output.append(x) return output + def check_email(email): email = valid_email(email) if ub.session.query(ub.User).filter(func.lower(ub.User.email) == email.lower()).first(): @@ -588,7 +645,7 @@ def check_username(username): username = username.strip() if ub.session.query(ub.User).filter(func.lower(ub.User.name) == username.lower()).scalar(): log.error(u"This username is already taken") - raise Exception (_(u"This username is already taken")) + raise Exception(_(u"This username is already taken")) return username @@ -605,22 +662,25 @@ def valid_email(email): def update_dir_structure(book_id, - calibrepath, - first_author=None, - orignal_filepath=None, - db_filename=None, - renamed_author=False): + calibre_path, + first_author=None, # change author of book to this author + original_filepath=None, + db_filename=None, + renamed_author=None): + renamed_author = renamed_author or [] if config.config_use_google_drive: return update_dir_structure_gdrive(book_id, first_author, renamed_author) else: return update_dir_structure_file(book_id, - calibrepath, + calibre_path, first_author, - orignal_filepath, + original_filepath, db_filename, renamed_author) def delete_book(book, calibrepath, book_format): + if not book_format: + clear_cover_thumbnail_cache(book.id) ## here it breaks if config.config_use_google_drive: return delete_book_gdrive(book, book_format) else: @@ -629,24 +689,38 @@ def delete_book(book, calibrepath, book_format): def get_cover_on_failure(use_generic_cover): if use_generic_cover: - return send_from_directory(_STATIC_DIR, "generic_cover.jpg") - else: - return None + try: + return send_from_directory(_STATIC_DIR, "generic_cover.jpg") + except PermissionError: + log.error("No permission to access generic_cover.jpg file.") + abort(403) + abort(404) -def get_book_cover(book_id): +def get_book_cover(book_id, resolution=None): book = calibre_db.get_filtered_book(book_id, allow_show_archived=True) - return get_book_cover_internal(book, use_generic_cover_on_failure=True) + return get_book_cover_internal(book, use_generic_cover_on_failure=True, resolution=resolution) -def get_book_cover_with_uuid(book_uuid, - use_generic_cover_on_failure=True): +# Called only by kobo sync -> cover not found should be answered with 404 and not with default cover +def get_book_cover_with_uuid(book_uuid, resolution=None): book = calibre_db.get_book_by_uuid(book_uuid) - return get_book_cover_internal(book, use_generic_cover_on_failure) + return get_book_cover_internal(book, use_generic_cover_on_failure=False, resolution=resolution) -def get_book_cover_internal(book, use_generic_cover_on_failure): +def get_book_cover_internal(book, use_generic_cover_on_failure, resolution=None): if book and book.has_cover: + + # Send the book cover thumbnail if it exists in cache + if resolution: + thumbnail = get_book_cover_thumbnail(book, resolution) + if thumbnail: + cache = fs.FileSystem() + if cache.get_cache_file_exists(thumbnail.filename, CACHE_TYPE_THUMBNAILS): + return send_from_directory(cache.get_cache_file_dir(thumbnail.filename, CACHE_TYPE_THUMBNAILS), + thumbnail.filename) + + # Send the book cover from Google Drive if configured if config.config_use_google_drive: try: if not gd.is_gdrive_ready(): @@ -655,11 +729,13 @@ def get_book_cover_internal(book, use_generic_cover_on_failure): if path: return redirect(path) else: - log.error('%s/cover.jpg not found on Google Drive', book.path) + log.error('{}/cover.jpg not found on Google Drive'.format(book.path)) return get_cover_on_failure(use_generic_cover_on_failure) except Exception as ex: - log.debug_or_exception(ex) + log.error_or_exception(ex) return get_cover_on_failure(use_generic_cover_on_failure) + + # Send the book cover from the Calibre directory else: cover_file_path = os.path.join(config.config_calibre_dir, book.path) if os.path.isfile(os.path.join(cover_file_path, "cover.jpg")): @@ -670,20 +746,82 @@ def get_book_cover_internal(book, use_generic_cover_on_failure): return get_cover_on_failure(use_generic_cover_on_failure) +def get_book_cover_thumbnail(book, resolution): + if book and book.has_cover: + return ub.session \ + .query(ub.Thumbnail) \ + .filter(ub.Thumbnail.type == THUMBNAIL_TYPE_COVER) \ + .filter(ub.Thumbnail.entity_id == book.id) \ + .filter(ub.Thumbnail.resolution == resolution) \ + .filter(or_(ub.Thumbnail.expiration.is_(None), ub.Thumbnail.expiration > datetime.utcnow())) \ + .first() + + +def get_series_thumbnail_on_failure(series_id, resolution): + book = calibre_db.session \ + .query(db.Books) \ + .join(db.books_series_link) \ + .join(db.Series) \ + .filter(db.Series.id == series_id) \ + .filter(db.Books.has_cover == 1) \ + .first() + + return get_book_cover_internal(book, use_generic_cover_on_failure=True, resolution=resolution) + + +def get_series_cover_thumbnail(series_id, resolution=None): + return get_series_cover_internal(series_id, resolution) + + +def get_series_cover_internal(series_id, resolution=None): + # Send the series thumbnail if it exists in cache + if resolution: + thumbnail = get_series_thumbnail(series_id, resolution) + if thumbnail: + cache = fs.FileSystem() + if cache.get_cache_file_exists(thumbnail.filename, CACHE_TYPE_THUMBNAILS): + return send_from_directory(cache.get_cache_file_dir(thumbnail.filename, CACHE_TYPE_THUMBNAILS), + thumbnail.filename) + + return get_series_thumbnail_on_failure(series_id, resolution) + + +def get_series_thumbnail(series_id, resolution): + return ub.session \ + .query(ub.Thumbnail) \ + .filter(ub.Thumbnail.type == THUMBNAIL_TYPE_SERIES) \ + .filter(ub.Thumbnail.entity_id == series_id) \ + .filter(ub.Thumbnail.resolution == resolution) \ + .filter(or_(ub.Thumbnail.expiration.is_(None), ub.Thumbnail.expiration > datetime.utcnow())) \ + .first() + + # saves book cover from url def save_cover_from_url(url, book_path): try: - img = requests.get(url, timeout=(10, 200)) # ToDo: Error Handling + if cli_param.allow_localhost: + img = requests.get(url, timeout=(10, 200), allow_redirects=False) # ToDo: Error Handling + elif use_advocate: + img = advocate.get(url, timeout=(10, 200), allow_redirects=False) # ToDo: Error Handling + else: + log.error("python module advocate is not installed but is needed") + return False, _("Python module 'advocate' is not installed but is needed for cover downloads") img.raise_for_status() return save_cover(img, book_path) - except (requests.exceptions.HTTPError, + except (socket.gaierror, + requests.exceptions.HTTPError, + requests.exceptions.InvalidURL, requests.exceptions.ConnectionError, requests.exceptions.Timeout) as ex: - log.info(u'Cover Download Error %s', ex) + # "Invalid host" can be the result of a redirect response + log.error(u'Cover Download Error %s', ex) return False, _("Error Downloading Cover") except MissingDelegateError as ex: log.info(u'File Format Error %s', ex) return False, _("Cover Format Error") + except UnacceptableAddressException as e: + log.error("Localhost or local network was accessed for cover upload") + return False, _("You are not allowed to access localhost or the local network for cover uploads") def save_cover_from_filestorage(filepath, saved_filename, img): @@ -718,24 +856,23 @@ def save_cover(img, book_path): content_type = img.headers.get('content-type') if use_IM: - if content_type not in ('image/jpeg', 'image/png', 'image/webp', 'image/bmp'): + if content_type not in ('image/jpeg', 'image/jpg', 'image/png', 'image/webp', 'image/bmp'): log.error("Only jpg/jpeg/png/webp/bmp files are supported as coverfile") return False, _("Only jpg/jpeg/png/webp/bmp files are supported as coverfile") # convert to jpg because calibre only supports jpg - if content_type != 'image/jpg': - try: - if hasattr(img, 'stream'): - imgc = Image(blob=img.stream) - else: - imgc = Image(blob=io.BytesIO(img.content)) - imgc.format = 'jpeg' - imgc.transform_colorspace("rgb") - img = imgc - except (BlobError, MissingDelegateError): - log.error("Invalid cover file content") - return False, _("Invalid cover file content") + try: + if hasattr(img, 'stream'): + imgc = Image(blob=img.stream) + else: + imgc = Image(blob=io.BytesIO(img.content)) + imgc.format = 'jpeg' + imgc.transform_colorspace("rgb") + img = imgc + except (BlobError, MissingDelegateError): + log.error("Invalid cover file content") + return False, _("Invalid cover file content") else: - if content_type not in 'image/jpeg': + if content_type not in ['image/jpeg', 'image/jpg']: log.error("Only jpg/jpeg files are supported as coverfile") return False, _("Only jpg/jpeg files are supported as coverfile") @@ -746,7 +883,7 @@ def save_cover(img, book_path): os.mkdir(tmp_dir) ret, message = save_cover_from_filestorage(tmp_dir, "uploaded_cover.jpg", img) if ret is True: - gd.uploadFileToEbooksFolder(os.path.join(book_path, 'cover.jpg').replace("\\","/"), + gd.uploadFileToEbooksFolder(os.path.join(book_path, 'cover.jpg').replace("\\", "/"), os.path.join(tmp_dir, "uploaded_cover.jpg")) log.info("Cover is saved on Google Drive") return True, None @@ -758,9 +895,9 @@ def save_cover(img, book_path): def do_download_file(book, book_format, client, data, headers): if config.config_use_google_drive: - startTime = time.time() + # startTime = time.time() df = gd.getFileFromEbooksFolder(book.path, data.name + "." + book_format) - log.debug('%s', time.time() - startTime) + # log.debug('%s', time.time() - startTime) if df: return gd.do_gdrive_download(df, headers) else: @@ -784,22 +921,22 @@ def do_download_file(book, book_format, client, data, headers): ################################## -def check_unrar(unrarLocation): - if not unrarLocation: +def check_unrar(unrar_location): + if not unrar_location: return - if not os.path.exists(unrarLocation): + if not os.path.exists(unrar_location): return _('Unrar binary file not found') try: - unrarLocation = [unrarLocation] - value = process_wait(unrarLocation, pattern='UNRAR (.*) freeware') + unrar_location = [unrar_location] + value = process_wait(unrar_location, pattern='UNRAR (.*) freeware') if value: version = value.group(1) log.debug("unrar version %s", version) except (OSError, UnicodeDecodeError) as err: - log.debug_or_exception(err) + log.error_or_exception(err) return _('Error excecuting UnRar') @@ -818,54 +955,6 @@ def json_serial(obj): raise TypeError("Type %s not serializable" % type(obj)) -# helper function for displaying the runtime of tasks -def format_runtime(runtime): - retVal = "" - if runtime.days: - retVal = format_unit(runtime.days, 'duration-day', length="long", locale=get_locale()) + ', ' - mins, seconds = divmod(runtime.seconds, 60) - hours, minutes = divmod(mins, 60) - # ToDo: locale.number_symbols._data['timeSeparator'] -> localize time separator ? - if hours: - retVal += '{:d}:{:02d}:{:02d}s'.format(hours, minutes, seconds) - elif minutes: - retVal += '{:2d}:{:02d}s'.format(minutes, seconds) - else: - retVal += '{:2d}s'.format(seconds) - return retVal - - -# helper function to apply localize status information in tasklist entries -def render_task_status(tasklist): - renderedtasklist = list() - for __, user, __, task in tasklist: - if user == current_user.name or current_user.role_admin(): - ret = {} - if task.start_time: - ret['starttime'] = format_datetime(task.start_time, format='short', locale=get_locale()) - ret['runtime'] = format_runtime(task.runtime) - - # localize the task status - if isinstance(task.stat, int): - if task.stat == STAT_WAITING: - ret['status'] = _(u'Waiting') - elif task.stat == STAT_FAIL: - ret['status'] = _(u'Failed') - elif task.stat == STAT_STARTED: - ret['status'] = _(u'Started') - elif task.stat == STAT_FINISH_SUCCESS: - ret['status'] = _(u'Finished') - else: - ret['status'] = _(u'Unknown Status') - - ret['taskMessage'] = "{}: {}".format(_(task.name), task.message) - ret['progress'] = "{} %".format(int(task.progress * 100)) - ret['user'] = escape(user) # prevent xss - renderedtasklist.append(ret) - - return renderedtasklist - - def tags_filters(): negtags_list = current_user.list_denied_tags() postags_list = current_user.list_allowed_tags() @@ -888,27 +977,10 @@ def check_valid_domain(domain_text): return not len(result) -def get_cc_columns(filter_config_custom_read=False): - tmpcc = calibre_db.session.query(db.Custom_Columns)\ - .filter(db.Custom_Columns.datatype.notin_(db.cc_exceptions)).all() - cc = [] - r = None - if config.config_columns_to_ignore: - r = re.compile(config.config_columns_to_ignore) - - for col in tmpcc: - if filter_config_custom_read and config.config_read_column and config.config_read_column == col.id: - continue - if r and r.match(col.name): - continue - cc.append(col) - - return cc - - def get_download_link(book_id, book_format, client): book_format = book_format.split(".")[0] book = calibre_db.get_filtered_book(book_id, allow_show_archived=True) + data1= "" if book: data1 = calibre_db.get_book_format(book.id, book_format.upper()) else: @@ -929,3 +1001,28 @@ def get_download_link(book_id, book_format, client): return do_download_file(book, book_format, client, data1, headers) else: abort(404) + + +def clear_cover_thumbnail_cache(book_id): + if config.schedule_generate_book_covers: + WorkerThread.add(None, TaskClearCoverThumbnailCache(book_id), hidden=True) + + +def replace_cover_thumbnail_cache(book_id): + if config.schedule_generate_book_covers: + WorkerThread.add(None, TaskClearCoverThumbnailCache(book_id), hidden=True) + WorkerThread.add(None, TaskGenerateCoverThumbnails(book_id), hidden=True) + + +def delete_thumbnail_cache(): + WorkerThread.add(None, TaskClearCoverThumbnailCache(-1)) + + +def add_book_to_thumbnail_cache(book_id): + if config.schedule_generate_book_covers: + WorkerThread.add(None, TaskGenerateCoverThumbnails(book_id), hidden=True) + + +def update_thumbnail_cache(): + if config.schedule_generate_book_covers: + WorkerThread.add(None, TaskGenerateCoverThumbnails()) diff --git a/cps/isoLanguages.py b/cps/isoLanguages.py index 50447aca..31e3dade 100644 --- a/cps/isoLanguages.py +++ b/cps/isoLanguages.py @@ -49,7 +49,7 @@ except ImportError: def get_language_names(locale): - return _LANGUAGE_NAMES.get(locale) + return _LANGUAGE_NAMES.get(str(locale)) def get_language_name(locale, lang_code): diff --git a/cps/iso_language_names.py b/cps/iso_language_names.py index c6267ffd..f86e5612 100644 --- a/cps/iso_language_names.py +++ b/cps/iso_language_names.py @@ -102,6 +102,7 @@ LANGUAGE_NAMES = { "div": "Dhivehi", "doi": "Dogri (macrolanguage)", "dsb": "Sorbian; Lower", + "dse": "holandský znakový jazyk", "dua": "dualština", "dum": "Dutch; Middle (ca. 1050-1350)", "dyu": "djula", @@ -526,6 +527,7 @@ LANGUAGE_NAMES = { "div": "Dhivehi", "doi": "Dogri (Makrosprache)", "dsb": "Sorbisch; Nieder", + "dse": "Niederländische Zeichensprache", "dua": "Duala", "dum": "Niederländisch; Mittel (ca. 1050-1350)", "dyu": "Dyula", @@ -945,6 +947,7 @@ LANGUAGE_NAMES = { "dgr": "Dogrib", "dua": "Duala", "nld": "Ολλανδικά", + "dse": "Ολλανδική νοηματική γλώσσα", "dyu": "Dyula", "dzo": "Dzongkha", "efi": "Efik", @@ -1329,6 +1332,7 @@ LANGUAGE_NAMES = { "div": "Dhivehi", "doi": "Dogri (macrolengua)", "dsb": "Bajo sorabo", + "dse": "Lengua de signos neerlandesa", "dua": "Duala", "dum": "Neerlandés medio (ca. 1050-1350)", "dyu": "Diula", @@ -1753,6 +1757,7 @@ LANGUAGE_NAMES = { "div": "Dhivehi", "doi": "Dogri (macrolanguage)", "dsb": "alasorbi", + "dse": "Dutch Sign Language", "dua": "duala", "dum": "Dutch; Middle (ca. 1050-1350)", "dyu": "dyula", @@ -2177,6 +2182,7 @@ LANGUAGE_NAMES = { "div": "dhivehi", "doi": "dogri (macrolangue)", "dsb": "bas-sorbien", + "dse": "langue des signes néerlandaise", "dua": "duala", "dum": "néerlandais moyen (environ 1050-1350)", "dyu": "dioula", @@ -2601,6 +2607,7 @@ LANGUAGE_NAMES = { "div": "Dhivehi", "doi": "Dogri (macrolanguage)", "dsb": "Sorbian; Lower", + "dse": "Dutch Sign Language", "dua": "duala", "dum": "Dutch; Middle (ca. 1050-1350)", "dyu": "djula", @@ -3025,6 +3032,7 @@ LANGUAGE_NAMES = { "div": "Dhivehi", "doi": "Dogri (macrolingua)", "dsb": "Lusaziano inferiore", + "dse": "Olandense (linguaggio dei segni)", "dua": "Duala", "dum": "Olandese medio (ca. 1050-1350)", "dyu": "Diula", @@ -3449,6 +3457,7 @@ LANGUAGE_NAMES = { "div": "Dhivehi", "doi": "Dogri (macrolanguage)", "dsb": "Sorbian; Lower", + "dse": "Dutch Sign Language", "dua": "ドゥアラ語", "dum": "Dutch; Middle (ca. 1050-1350)", "dyu": "デュラ語", @@ -3873,6 +3882,7 @@ LANGUAGE_NAMES = { "div": "Dhivehi", "doi": "Dogri (macrolanguage)", "dsb": "Sorbian; Lower", + "dse": "Dutch Sign Language", "dua": "Duala", "dum": "Dutch; Middle (ca. 1050-1350)", "dyu": "Dyula", @@ -4207,6 +4217,384 @@ LANGUAGE_NAMES = { "zxx": "No linguistic content", "zza": "Zaza" }, + "ko": { + "abk": "압하스어", + "ace": "아체어", + "ach": "아촐리어", + "ada": "Adangme", + "ady": "Adyghe", + "aar": "아파르어", + "afh": "Afrihili", + "afr": "아프리칸스어", + "ain": "Ainu (Japan)", + "aka": "Akan", + "akk": "Akkadian", + "sqi": "Albanian", + "ale": "Aleut", + "amh": "Amharic", + "anp": "Angika", + "ara": "아라비아어", + "arg": "Aragonese", + "arp": "Arapaho", + "arw": "Arawak", + "hye": "아르메니아어", + "asm": "Assamese", + "ast": "Asturian", + "ava": "Avaric", + "ave": "아베스타어", + "awa": "Awadhi", + "aym": "Aymara", + "aze": "Azerbaijani", + "ban": "발리 문자", + "bal": "Baluchi", + "bam": "Bambara", + "bas": "Basa (Cameroon)", + "bak": "Bashkir", + "eus": "바스크어", + "bej": "Beja", + "bel": "벨로루시어", + "bem": "Bemba (Zambia)", + "ben": "벵골 문자", + "bit": "Berinomo", + "bho": "Bhojpuri", + "bik": "Bikol", + "byn": "Bilin", + "bin": "Bini", + "bis": "Bislama", + "zbl": "Blissymbols", + "bos": "Bosnian", + "bra": "Braj", + "bre": "Breton", + "bug": "부기 문자", + "bul": "불가리아어", + "bua": "Buriat", + "mya": "Burmese", + "cad": "Caddo", + "cat": "카탈로니아어", + "ceb": "Cebuano", + "chg": "Chagatai", + "cha": "Chamorro", + "che": "Chechen", + "chr": "체로키 문자", + "chy": "Cheyenne", + "chb": "Chibcha", + "zho": "중국어", + "chn": "Chinook jargon", + "chp": "Chipewyan", + "cho": "Choctaw", + "cht": "Cholón", + "chk": "Chuukese", + "chv": "Chuvash", + "cop": "콥트어", + "cor": "Cornish", + "cos": "Corsican", + "cre": "Cree", + "mus": "Creek", + "hrv": "크로아티아어", + "ces": "체크어", + "dak": "Dakota", + "dan": "덴마크어", + "dar": "Dargwa", + "del": "Delaware", + "div": "Dhivehi", + "din": "Dinka", + "doi": "Dogri (macrolanguage)", + "dgr": "Dogrib", + "dua": "Duala", + "nld": "네덜란드어", + "dse": "Dutch Sign Language", + "dyu": "Dyula", + "dzo": "Dzongkha", + "efi": "Efik", + "egy": "Egyptian (Ancient)", + "eka": "Ekajuk", + "elx": "Elamite", + "eng": "영어", + "enu": "Enu", + "myv": "Erzya", + "epo": "에스페란토어", + "est": "에스토니아어", + "ewe": "Ewe", + "ewo": "Ewondo", + "fan": "Fang (Equatorial Guinea)", + "fat": "Fanti", + "fao": "페로스어", + "fij": "Fijian", + "fil": "Filipino", + "fin": "핀란드어", + "fon": "Fon", + "fra": "프랑스어", + "fur": "Friulian", + "ful": "Fulah", + "gaa": "Ga", + "glg": "Galician", + "lug": "Ganda", + "gay": "Gayo", + "gba": "Gbaya (Central African Republic)", + "hmj": "Ge", + "gez": "Geez", + "kat": "그루지야어", + "deu": "독일어", + "gil": "Gilbertese", + "gon": "Gondi", + "gor": "Gorontalo", + "got": "고트어", + "grb": "Grebo", + "grn": "Guarani", + "guj": "구자라트 문자", + "gwi": "Gwichʼin", + "hai": "Haida", + "hau": "Hausa", + "haw": "Hawaiian", + "heb": "헤브루어", + "her": "Herero", + "hil": "Hiligaynon", + "hin": "Hindi", + "hmo": "Hiri Motu", + "hit": "Hittite", + "hmn": "Hmong", + "hun": "헝가리어", + "hup": "Hupa", + "iba": "Iban", + "isl": "아이슬란드어", + "ido": "Ido", + "ibo": "Igbo", + "ilo": "Iloko", + "ind": "인도네시아어", + "inh": "Ingush", + "ina": "Interlingua (International Auxiliary Language Association)", + "ile": "Interlingue", + "iku": "Inuktitut", + "ipk": "Inupiaq", + "gle": "아일랜드어", + "ita": "이탈리아어", + "jpn": "일본어", + "jav": "Javanese", + "jrb": "Judeo-Arabic", + "jpr": "Judeo-Persian", + "kbd": "Kabardian", + "kab": "Kabyle", + "kac": "Kachin", + "kal": "Kalaallisut", + "xal": "Kalmyk", + "kam": "Kamba (Kenya)", + "kan": " 칸나다 문자", + "kau": "Kanuri", + "kaa": "Kara-Kalpak", + "krc": "Karachay-Balkar", + "krl": "Karelian", + "kas": "Kashmiri", + "csb": "Kashubian", + "kaw": "Kawi", + "kaz": "Kazakh", + "kha": "Khasi", + "kho": "Khotanese", + "kik": "Kikuyu", + "kmb": "Kimbundu", + "kin": "Kinyarwanda", + "kir": "Kirghiz", + "tlh": "Klingon", + "kom": "Komi", + "kon": "Kongo", + "kok": "Konkani (macrolanguage)", + "kor": "한국어", + "kos": "Kosraean", + "kpe": "Kpelle", + "kua": "Kuanyama", + "kum": "Kumyk", + "kur": "Kurdish", + "kru": "Kurukh", + "kut": "Kutenai", + "lad": "Ladino", + "lah": "Lahnda", + "lam": "Lamba", + "lao": "라오 문자", + "lat": "Latin", + "lav": "라트비아어", + "lez": "Lezghian", + "lim": "Limburgan", + "lin": "Lingala", + "lit": "리투아니아어", + "jbo": "Lojban", + "loz": "Lozi", + "lub": "Luba-Katanga", + "lua": "Luba-Lulua", + "lui": "Luiseno", + "smj": "Lule Sami", + "lun": "Lunda", + "luo": "Luo (Kenya and Tanzania)", + "lus": "Lushai", + "ltz": "Luxembourgish", + "mkd": "마케도니아어", + "mad": "Madurese", + "mag": "Magahi", + "mai": "Maithili", + "mak": "Makasar", + "mlg": "Malagasy", + "msa": "Malay (macrolanguage)", + "mal": "말라얄람 문자", + "mlt": "Maltese", + "mnc": "Manchu", + "mdr": "Mandar", + "man": "Mandingo", + "mni": "Manipuri", + "glv": "Manx", + "mri": "Maori", + "arn": "Mapudungun", + "mar": "Marathi", + "chm": "Mari (Russia)", + "mah": "Marshallese", + "mwr": "Marwari", + "mas": "Masai", + "men": "Mende (Sierra Leone)", + "mic": "Mi'kmaq", + "min": "Minangkabau", + "mwl": "Mirandese", + "moh": "Mohawk", + "mdf": "Moksha", + "lol": "Mongo", + "mon": "몽골 문자", + "mos": "Mossi", + "mul": "Multiple languages", + "nqo": "응코 문자", + "nau": "나우루어", + "nav": "나바호어", + "ndo": "Ndonga", + "nap": "Neapolitan", + "nia": "Nias", + "niu": "Niuean", + "zxx": "No linguistic content", + "nog": "Nogai", + "nor": "노르웨이어", + "nob": "Norwegian Bokmål", + "nno": "Norwegian Nynorsk", + "nym": "Nyamwezi", + "nya": "Nyanja", + "nyn": "Nyankole", + "nyo": "Nyoro", + "nzi": "Nzima", + "oci": "Occitan (post 1500)", + "oji": "Ojibwa", + "orm": "Oromo", + "osa": "Osage", + "oss": "Ossetian", + "pal": "Pahlavi", + "pau": "Palauan", + "pli": "Pali", + "pam": "Pampanga", + "pag": "Pangasinan", + "pan": "Panjabi", + "pap": "Papiamento", + "fas": "Persian", + "phn": " 페니키아 문자", + "pon": "Pohnpeian", + "pol": "폴란드어", + "por": "포르투갈어", + "pus": "Pashto", + "que": "Quechua", + "raj": "Rajasthani", + "rap": "Rapanui", + "ron": "루마니아어", + "roh": "Romansh", + "rom": "Romany", + "run": "Rundi", + "rus": "러시아어", + "smo": "Samoan", + "sad": "Sandawe", + "sag": "Sango", + "san": "Sanskrit", + "sat": "Santali", + "srd": "Sardinian", + "sas": "Sasak", + "sco": "Scots", + "sel": "Selkup", + "srp": "세르비아어", + "srr": "Serer", + "shn": "Shan", + "sna": "Shona", + "scn": "Sicilian", + "sid": "Sidamo", + "bla": "Siksika", + "snd": "Sindhi", + "sin": "싱할라 문자", + "den": "Slave (Athapascan)", + "slk": "슬로바키아어", + "slv": "슬로베니아어", + "sog": "Sogdian", + "som": "Somali", + "snk": "Soninke", + "spa": "스페인어", + "srn": "Sranan Tongo", + "suk": "Sukuma", + "sux": "Sumerian", + "sun": "Sundanese", + "sus": "Susu", + "swa": "Swahili (macrolanguage)", + "ssw": "Swati", + "swe": "스웨덴어", + "syr": "시리아 문자", + "tgl": "타갈로그 문자", + "tah": "Tahitian", + "tgk": "Tajik", + "tmh": "Tamashek", + "tam": "타밀 문자", + "tat": "Tatar", + "tel": "텔루구 문자", + "ter": "Tereno", + "tet": "Tetum", + "tha": "태국어", + "bod": "티베트 문자", + "tig": "Tigre", + "tir": "Tigrinya", + "tem": "Timne", + "tiv": "Tiv", + "tli": "Tlingit", + "tpi": "Tok Pisin", + "tkl": "Tokelau", + "tog": "Tonga (Nyasa)", + "ton": "Tonga (Tonga Islands)", + "tsi": "Tsimshian", + "tso": "Tsonga", + "tsn": "Tswana", + "tum": "Tumbuka", + "tur": "터키어", + "tuk": "Turkmen", + "tvl": "Tuvalu", + "tyv": "Tuvinian", + "twi": "Twi", + "udm": "Udmurt", + "uga": "우가리트 문자", + "uig": "Uighur", + "ukr": "Ukrainian", + "umb": "Umbundu", + "mis": "Uncoded languages", + "und": "Undetermined", + "urd": "Urdu", + "uzb": "Uzbek", + "vai": "Vai", + "ven": "Venda", + "vie": "베트남어", + "vol": "Volapük", + "vot": "Votic", + "wln": "Walloon", + "war": "Waray (Philippines)", + "was": "Washo", + "cym": "Welsh", + "wal": "Wolaytta", + "wol": "Wolof", + "xho": "Xhosa", + "sah": "Yakut", + "yao": "Yao", + "yap": "Yapese", + "yid": "Yiddish", + "yor": "Yoruba", + "zap": "Zapotec", + "zza": "Zaza", + "zen": "Zenaga", + "zha": "Zhuang", + "zul": "Zulu", + "zun": "Zuni" + }, "nl": { "aar": "Afar; Hamitisch", "abk": "Abchazisch", @@ -4297,6 +4685,7 @@ LANGUAGE_NAMES = { "div": "Divehi", "doi": "Dogri", "dsb": "Sorbisch; lager", + "dse": "Nederlandse gebarentaal", "dua": "Duala", "dum": "Nederlands; middel (ca. 1050-1350)", "dyu": "Dyula", @@ -4721,6 +5110,7 @@ LANGUAGE_NAMES = { "div": "malediwski; divehi", "doi": "dogri (makrojęzyk)", "dsb": "dolnołużycki", + "dse": "holenderski język migowy", "dua": "duala", "dum": "holenderski średniowieczny (ok. 1050-1350)", "dyu": "diula", @@ -5140,6 +5530,7 @@ LANGUAGE_NAMES = { "dgr": "Dogrib", "dua": "Duala", "nld": "Holandês", + "dse": "Língua gestual holandesa", "dyu": "Dyula", "dzo": "Dzongkha", "efi": "Efik", @@ -5522,6 +5913,7 @@ LANGUAGE_NAMES = { "div": "Dhivehi", "doi": "Dogri (macrolanguage)", "dsb": "Sorbian; Lower", + "dse": "Dutch Sign Language", "dua": "Дуала", "dum": "Dutch; Middle (ca. 1050-1350)", "dyu": "Диула (Дьюла)", @@ -5946,6 +6338,7 @@ LANGUAGE_NAMES = { "div": "Divehi", "doi": "Dogri (macrolanguage)", "dsb": "Sorbian; nedre", + "dse": "Nederländskt teckenspråk", "dua": "Duala", "dum": "Hollänska; medeltida (ca. 1050-1350)", "dyu": "Dyula", @@ -6365,6 +6758,7 @@ LANGUAGE_NAMES = { "dgr": "Dogrib (Kanada)", "dua": "Duala (Afrika)", "nld": "Flâmanca (Hollanda dili)", + "dse": "Hollandalı İşaret Dili", "dyu": "Dyula (Burkina Faso; Mali)", "dzo": "Dzongkha (Butan)", "efi": "Efik (Afrika)", @@ -6747,6 +7141,7 @@ LANGUAGE_NAMES = { "div": "мальдивська", "doi": "догрі (макромова)", "dsb": "нижньолужицька", + "dse": "голландська мова жестів", "dua": "дуала", "dum": "середньовічна голландська (бл. 1050-1350)", "dyu": "діула", @@ -7171,6 +7566,7 @@ LANGUAGE_NAMES = { "div": "迪维希语", "doi": "多格拉语", "dsb": "索布语(下)", + "dse": "荷兰手语", "dua": "杜亚拉语", "dum": "荷兰语(中古,约 1050-1350)", "dyu": "迪尤拉语", @@ -7590,6 +7986,7 @@ LANGUAGE_NAMES = { "dgr": "Dogrib", "dua": "Duala", "nld": "荷蘭文", + "dse": "Dutch Sign Language", "dyu": "Dyula", "dzo": "Dzongkha", "efi": "Efik", @@ -7973,6 +8370,7 @@ LANGUAGE_NAMES = { "div": "Dhivehi", "doi": "Dogri (macrolanguage)", "dsb": "Sorbian; Lower", + "dse": "Dutch Sign Language", "dua": "Duala", "dum": "Dutch; Middle (ca. 1050-1350)", "dyu": "Dyula", diff --git a/cps/jinjia.py b/cps/jinjia.py index 06e99141..e42c650c 100644 --- a/cps/jinjia.py +++ b/cps/jinjia.py @@ -22,17 +22,17 @@ # custom jinja filters +from markupsafe import escape import datetime import mimetypes from uuid import uuid4 -from babel.dates import format_date +# from babel.dates import format_date from flask import Blueprint, request, url_for -from flask_babel import get_locale +from flask_babel import format_date from flask_login import current_user -from markupsafe import escape -from . import logger +from . import constants, logger jinjia = Blueprint('jinjia', __name__) log = logger.create() @@ -77,7 +77,7 @@ def mimetype_filter(val): @jinjia.app_template_filter('formatdate') def formatdate_filter(val): try: - return format_date(val, format='medium', locale=get_locale()) + return format_date(val, format='medium') except AttributeError as e: log.error('Babel error: %s, Current user locale: %s, Current User: %s', e, current_user.locale, @@ -128,12 +128,55 @@ def formatseriesindex_filter(series_index): return series_index return 0 + @jinjia.app_template_filter('escapedlink') def escapedlink_filter(url, text): return "{}".format(url, escape(text)) + @jinjia.app_template_filter('uuidfilter') def uuidfilter(var): return uuid4() +@jinjia.app_template_filter('cache_timestamp') +def cache_timestamp(rolling_period='month'): + if rolling_period == 'day': + return str(int(datetime.datetime.today().replace(hour=1, minute=1).timestamp())) + elif rolling_period == 'year': + return str(int(datetime.datetime.today().replace(day=1).timestamp())) + else: + return str(int(datetime.datetime.today().replace(month=1, day=1).timestamp())) + + +@jinjia.app_template_filter('last_modified') +def book_last_modified(book): + return str(int(book.last_modified.timestamp())) + + +@jinjia.app_template_filter('get_cover_srcset') +def get_cover_srcset(book): + srcset = list() + resolutions = { + constants.COVER_THUMBNAIL_SMALL: 'sm', + constants.COVER_THUMBNAIL_MEDIUM: 'md', + constants.COVER_THUMBNAIL_LARGE: 'lg' + } + for resolution, shortname in resolutions.items(): + url = url_for('web.get_cover', book_id=book.id, resolution=shortname, c=book_last_modified(book)) + srcset.append(f'{url} {resolution}x') + return ', '.join(srcset) + + +@jinjia.app_template_filter('get_series_srcset') +def get_cover_srcset(series): + srcset = list() + resolutions = { + constants.COVER_THUMBNAIL_SMALL: 'sm', + constants.COVER_THUMBNAIL_MEDIUM: 'md', + constants.COVER_THUMBNAIL_LARGE: 'lg' + } + for resolution, shortname in resolutions.items(): + url = url_for('web.get_series_cover', series_id=series.id, resolution=shortname, c=cache_timestamp()) + srcset.append(f'{url} {resolution}x') + return ', '.join(srcset) diff --git a/cps/kobo.py b/cps/kobo.py index 0412ae17..5f14b5cc 100644 --- a/cps/kobo.py +++ b/cps/kobo.py @@ -23,11 +23,7 @@ import os import uuid from time import gmtime, strftime import json - -try: - from urllib import unquote -except ImportError: - from urllib.parse import unquote +from urllib.parse import unquote from flask import ( Blueprint, @@ -50,7 +46,7 @@ import requests from . import config, logger, kobo_auth, db, calibre_db, helper, shelf as shelf_lib, ub, csrf, kobo_sync_status from .epub import get_epub_layout -from .constants import sqlalchemy_version2 +from .constants import sqlalchemy_version2, COVER_THUMBNAIL_SMALL from .helper import get_download_link from .services import SyncToken as SyncToken from .web import download_required @@ -153,8 +149,8 @@ def HandleSyncRequest(): sync_token.books_last_created = datetime.datetime.min sync_token.reading_state_last_modified = datetime.datetime.min - new_books_last_modified = sync_token.books_last_modified # needed for sync selected shelfs only - new_books_last_created = sync_token.books_last_created # needed to distinguish between new and changed entitlement + new_books_last_modified = sync_token.books_last_modified # needed for sync selected shelfs only + new_books_last_created = sync_token.books_last_created # needed to distinguish between new and changed entitlement new_reading_state_last_modified = sync_token.reading_state_last_modified new_archived_last_modified = datetime.datetime.min @@ -178,21 +174,20 @@ def HandleSyncRequest(): ub.BookShelf.date_added, ub.ArchivedBook.is_archived) changed_entries = (changed_entries - .join(db.Data).outerjoin(ub.ArchivedBook, db.Books.id == ub.ArchivedBook.book_id) - .join(ub.KoboSyncedBooks, ub.KoboSyncedBooks.book_id == db.Books.id, isouter=True) - .filter(or_(ub.KoboSyncedBooks.user_id != current_user.id, - ub.KoboSyncedBooks.book_id == None)) - .filter(ub.BookShelf.date_added > sync_token.books_last_modified) - .filter(db.Data.format.in_(KOBO_FORMATS)) - .filter(calibre_db.common_filters(allow_show_archived=True)) - .order_by(db.Books.id) - .order_by(ub.ArchivedBook.last_modified) - .join(ub.BookShelf, db.Books.id == ub.BookShelf.book_id) - .join(ub.Shelf) - .filter(ub.Shelf.user_id == current_user.id) - .filter(ub.Shelf.kobo_sync) - .distinct() - ) + .join(db.Data).outerjoin(ub.ArchivedBook, and_(db.Books.id == ub.ArchivedBook.book_id, + ub.ArchivedBook.user_id == current_user.id)) + .filter(db.Books.id.notin_(calibre_db.session.query(ub.KoboSyncedBooks.book_id) + .filter(ub.KoboSyncedBooks.user_id == current_user.id))) + .filter(ub.BookShelf.date_added > sync_token.books_last_modified) + .filter(db.Data.format.in_(KOBO_FORMATS)) + .filter(calibre_db.common_filters(allow_show_archived=True)) + .order_by(db.Books.id) + .order_by(ub.ArchivedBook.last_modified) + .join(ub.BookShelf, db.Books.id == ub.BookShelf.book_id) + .join(ub.Shelf) + .filter(ub.Shelf.user_id == current_user.id) + .filter(ub.Shelf.kobo_sync) + .distinct()) else: if sqlalchemy_version2: changed_entries = select(db.Books, ub.ArchivedBook.last_modified, ub.ArchivedBook.is_archived) @@ -201,16 +196,14 @@ def HandleSyncRequest(): ub.ArchivedBook.last_modified, ub.ArchivedBook.is_archived) changed_entries = (changed_entries - .join(db.Data).outerjoin(ub.ArchivedBook, db.Books.id == ub.ArchivedBook.book_id) - .join(ub.KoboSyncedBooks, ub.KoboSyncedBooks.book_id == db.Books.id, isouter=True) - .filter(or_(ub.KoboSyncedBooks.user_id != current_user.id, - ub.KoboSyncedBooks.book_id == None)) - .filter(calibre_db.common_filters()) - .filter(db.Data.format.in_(KOBO_FORMATS)) - .order_by(db.Books.last_modified) - .order_by(db.Books.id) - ) - + .join(db.Data).outerjoin(ub.ArchivedBook, and_(db.Books.id == ub.ArchivedBook.book_id, + ub.ArchivedBook.user_id == current_user.id)) + .filter(db.Books.id.notin_(calibre_db.session.query(ub.KoboSyncedBooks.book_id) + .filter(ub.KoboSyncedBooks.user_id == current_user.id))) + .filter(calibre_db.common_filters(allow_show_archived=True)) + .filter(db.Data.format.in_(KOBO_FORMATS)) + .order_by(db.Books.last_modified) + .order_by(db.Books.id)) reading_states_in_new_entitlements = [] if sqlalchemy_version2: @@ -220,7 +213,7 @@ def HandleSyncRequest(): log.debug("Books to Sync: {}".format(len(books.all()))) for book in books: formats = [data.format for data in book.Books.data] - if not 'KEPUB' in formats and config.config_kepubifypath and 'EPUB' in formats: + if 'KEPUB' not in formats and config.config_kepubifypath and 'EPUB' in formats: helper.convert_book_format(book.Books.id, config.config_calibre_dir, 'EPUB', 'KEPUB', current_user.name) kobo_reading_state = get_or_create_reading_state(book.Books.id) @@ -262,10 +255,12 @@ def HandleSyncRequest(): if sqlalchemy_version2: max_change = calibre_db.session.execute(changed_entries .filter(ub.ArchivedBook.is_archived) + .filter(ub.ArchivedBook.user_id == current_user.id) .order_by(func.datetime(ub.ArchivedBook.last_modified).desc()))\ .columns(db.Books).first() else: - max_change = changed_entries.from_self().filter(ub.ArchivedBook.is_archived) \ + max_change = changed_entries.from_self().filter(ub.ArchivedBook.is_archived)\ + .filter(ub.ArchivedBook.user_id == current_user.id) \ .order_by(func.datetime(ub.ArchivedBook.last_modified).desc()).first() max_change = max_change.last_modified if max_change else new_archived_last_modified @@ -300,7 +295,8 @@ def HandleSyncRequest(): changed_reading_states = changed_reading_states.filter( and_(ub.KoboReadingState.user_id == current_user.id, - ub.KoboReadingState.book_id.notin_(reading_states_in_new_entitlements))) + ub.KoboReadingState.book_id.notin_(reading_states_in_new_entitlements)))\ + .order_by(ub.KoboReadingState.last_modified) cont_sync |= bool(changed_reading_states.count() > SYNC_ITEM_LIMIT) for kobo_reading_state in changed_reading_states.limit(SYNC_ITEM_LIMIT).all(): book = calibre_db.session.query(db.Books).filter(db.Books.id == kobo_reading_state.book_id).one_or_none() @@ -326,7 +322,7 @@ def HandleSyncRequest(): def generate_sync_response(sync_token, sync_results, set_cont=False): extra_headers = {} - if config.config_kobo_proxy: + if config.config_kobo_proxy and not set_cont: # Merge in sync results from the official Kobo store. try: store_response = make_request_to_kobo_store(sync_token) @@ -344,7 +340,7 @@ def generate_sync_response(sync_token, sync_results, set_cont=False): extra_headers["x-kobo-sync"] = "continue" sync_token.to_headers(extra_headers) - log.debug("Kobo Sync Content: {}".format(sync_results)) + # log.debug("Kobo Sync Content: {}".format(sync_results)) # jsonify decodes the unicode string different to what kobo expects response = make_response(json.dumps(sync_results), extra_headers) response.headers["Content-Type"] = "application/json; charset=utf-8" @@ -427,9 +423,9 @@ def get_author(book): author_list = [] autor_roles = [] for author in book.authors: - autor_roles.append({"Name":author.name}) #.encode('unicode-escape').decode('latin-1') + autor_roles.append({"Name": author.name}) author_list.append(author.name) - return {"ContributorRoles": autor_roles, "Contributors":author_list} + return {"ContributorRoles": autor_roles, "Contributors": author_list} def get_publisher(book): @@ -443,6 +439,7 @@ def get_series(book): return None return book.series[0].name + def get_seriesindex(book): return book.series_index or 1 @@ -489,7 +486,7 @@ def get_metadata(book): "Language": "en", "PhoneticPronunciations": {}, "PublicationDate": convert_to_kobo_timestamp_string(book.pubdate), - "Publisher": {"Imprint": "", "Name": get_publisher(book),}, + "Publisher": {"Imprint": "", "Name": get_publisher(book), }, "RevisionId": book_uuid, "Title": book.title, "WorkId": book_uuid, @@ -508,6 +505,7 @@ def get_metadata(book): return metadata + @csrf.exempt @kobo.route("/v1/library/tags", methods=["POST", "DELETE"]) @requires_kobo_auth @@ -556,11 +554,9 @@ def HandleTagUpdate(tag_id): else: abort(404, description="Collection isn't known to CalibreWeb") - if not shelf_lib.check_shelf_edit_permissions(shelf): - abort(401, description="User is unauthaurized to edit shelf.") - if request.method == "DELETE": - shelf_lib.delete_shelf_helper(shelf) + if not shelf_lib.delete_shelf_helper(shelf): + abort(401, description="Error deleting Shelf") else: name = None try: @@ -678,11 +674,8 @@ def HandleTagRemoveItem(tag_id): # Note: Public shelves that aren't owned by the user aren't supported. def sync_shelves(sync_token, sync_results, only_kobo_shelves=False): new_tags_last_modified = sync_token.tags_last_modified - - for shelf in ub.session.query(ub.ShelfArchive).filter( - func.datetime(ub.ShelfArchive.last_modified) > sync_token.tags_last_modified, - ub.ShelfArchive.user_id == current_user.id - ): + # transmit all archived shelfs independent of last sync (why should this matter?) + for shelf in ub.session.query(ub.ShelfArchive).filter(ub.ShelfArchive.user_id == current_user.id): new_tags_last_modified = max(shelf.last_modified, new_tags_last_modified) sync_results.append({ "DeletedTag": { @@ -695,7 +688,6 @@ def sync_shelves(sync_token, sync_results, only_kobo_shelves=False): ub.session.delete(shelf) ub.session_commit() - extra_filters = [] if only_kobo_shelves: for shelf in ub.session.query(ub.Shelf).filter( @@ -728,7 +720,6 @@ def sync_shelves(sync_token, sync_results, only_kobo_shelves=False): *extra_filters ).distinct().order_by(func.datetime(ub.Shelf.last_modified).asc()) - for shelf in shelflist: if not shelf_lib.check_shelf_view_permissions(shelf): continue @@ -774,6 +765,7 @@ def create_kobo_tag(shelf): ) return {"Tag": tag} + @csrf.exempt @kobo.route("/v1/library//state", methods=["GET", "PUT"]) @requires_kobo_auth @@ -818,7 +810,7 @@ def HandleStateRequest(book_uuid): book_read = kobo_reading_state.book_read_link new_book_read_status = get_ub_read_status(request_status_info["Status"]) if new_book_read_status == ub.ReadBook.STATUS_IN_PROGRESS \ - and new_book_read_status != book_read.read_status: + and new_book_read_status != book_read.read_status: book_read.times_started_reading += 1 book_read.last_time_started_reading = datetime.datetime.utcnow() book_read.read_status = new_book_read_status @@ -858,7 +850,7 @@ def get_ub_read_status(kobo_read_status): def get_or_create_reading_state(book_id): book_read = ub.session.query(ub.ReadBook).filter(ub.ReadBook.book_id == book_id, - ub.ReadBook.user_id == current_user.id).one_or_none() + ub.ReadBook.user_id == int(current_user.id)).one_or_none() if not book_read: book_read = ub.ReadBook(user_id=current_user.id, book_id=book_id) if not book_read.kobo_reading_state: @@ -922,13 +914,12 @@ def get_current_bookmark_response(current_bookmark): } return resp + @kobo.route("/////image.jpg", defaults={'Quality': ""}) @kobo.route("//////image.jpg") @requires_kobo_auth -def HandleCoverImageRequest(book_uuid, width, height,Quality, isGreyscale): - book_cover = helper.get_book_cover_with_uuid( - book_uuid, use_generic_cover_on_failure=False - ) +def HandleCoverImageRequest(book_uuid, width, height, Quality, isGreyscale): + book_cover = helper.get_book_cover_with_uuid(book_uuid, resolution=COVER_THUMBNAIL_SMALL) if not book_cover: if config.config_kobo_proxy: log.debug("Cover for unknown book: %s proxied to kobo" % book_uuid) @@ -1001,8 +992,8 @@ def handle_getests(): if config.config_kobo_proxy: return redirect_or_proxy_request() else: - testkey = request.headers.get("X-Kobo-userkey","") - return make_response(jsonify({"Result": "Success", "TestKey":testkey, "Tests": {}})) + testkey = request.headers.get("X-Kobo-userkey", "") + return make_response(jsonify({"Result": "Success", "TestKey": testkey, "Tests": {}})) @csrf.exempt @@ -1032,7 +1023,7 @@ def make_calibre_web_auth_response(): content = request.get_json() AccessToken = base64.b64encode(os.urandom(24)).decode('utf-8') RefreshToken = base64.b64encode(os.urandom(24)).decode('utf-8') - return make_response( + return make_response( jsonify( { "AccessToken": AccessToken, @@ -1170,14 +1161,16 @@ def NATIVE_KOBO_RESOURCES(): "eula_page": "https://www.kobo.com/termsofuse?style=onestore", "exchange_auth": "https://storeapi.kobo.com/v1/auth/exchange", "external_book": "https://storeapi.kobo.com/v1/products/books/external/{Ids}", - "facebook_sso_page": "https://authorize.kobo.com/signin/provider/Facebook/login?returnUrl=http://store.kobobooks.com/", + "facebook_sso_page": + "https://authorize.kobo.com/signin/provider/Facebook/login?returnUrl=http://store.kobobooks.com/", "featured_list": "https://storeapi.kobo.com/v1/products/featured/{FeaturedListId}", "featured_lists": "https://storeapi.kobo.com/v1/products/featured", "free_books_page": { "EN": "https://www.kobo.com/{region}/{language}/p/free-ebooks", "FR": "https://www.kobo.com/{region}/{language}/p/livres-gratuits", "IT": "https://www.kobo.com/{region}/{language}/p/libri-gratuiti", - "NL": "https://www.kobo.com/{region}/{language}/List/bekijk-het-overzicht-van-gratis-ebooks/QpkkVWnUw8sxmgjSlCbJRg", + "NL": "https://www.kobo.com/{region}/{language}/" + "List/bekijk-het-overzicht-van-gratis-ebooks/QpkkVWnUw8sxmgjSlCbJRg", "PT": "https://www.kobo.com/{region}/{language}/p/livros-gratis", }, "fte_feedback": "https://storeapi.kobo.com/v1/products/ftefeedback", @@ -1202,7 +1195,8 @@ def NATIVE_KOBO_RESOURCES(): "library_stack": "https://storeapi.kobo.com/v1/user/library/stacks/{LibraryItemId}", "library_sync": "https://storeapi.kobo.com/v1/library/sync", "love_dashboard_page": "https://store.kobobooks.com/{culture}/kobosuperpoints", - "love_points_redemption_page": "https://store.kobobooks.com/{culture}/KoboSuperPointsRedemption?productId={ProductId}", + "love_points_redemption_page": + "https://store.kobobooks.com/{culture}/KoboSuperPointsRedemption?productId={ProductId}", "magazine_landing_page": "https://store.kobobooks.com/emagazines", "notifications_registration_issue": "https://storeapi.kobo.com/v1/notifications/registration", "oauth_host": "https://oauth.kobo.com", @@ -1218,7 +1212,8 @@ def NATIVE_KOBO_RESOURCES(): "product_recommendations": "https://storeapi.kobo.com/v1/products/{ProductId}/recommendations", "product_reviews": "https://storeapi.kobo.com/v1/products/{ProductIds}/reviews", "products": "https://storeapi.kobo.com/v1/products", - "provider_external_sign_in_page": "https://authorize.kobo.com/ExternalSignIn/{providerName}?returnUrl=http://store.kobobooks.com/", + "provider_external_sign_in_page": + "https://authorize.kobo.com/ExternalSignIn/{providerName}?returnUrl=http://store.kobobooks.com/", "purchase_buy": "https://www.kobo.com/checkout/createpurchase/", "purchase_buy_templated": "https://www.kobo.com/{culture}/checkout/createpurchase/{ProductId}", "quickbuy_checkout": "https://storeapi.kobo.com/v1/store/quickbuy/{PurchaseId}/checkout", diff --git a/cps/kobo_auth.py b/cps/kobo_auth.py index a51095c8..9865b993 100644 --- a/cps/kobo_auth.py +++ b/cps/kobo_auth.py @@ -62,6 +62,7 @@ particular calls to non-Kobo specific endpoints such as the CalibreWeb book down from binascii import hexlify from datetime import datetime from os import urandom +from functools import wraps from flask import g, Blueprint, url_for, abort, request from flask_login import login_user, current_user, login_required @@ -70,20 +71,61 @@ from flask_babel import gettext as _ from . import logger, config, calibre_db, db, helper, ub, lm from .render_template import render_title_template -try: - from functools import wraps -except ImportError: - pass # We're not using Python 3 - - log = logger.create() +kobo_auth = Blueprint("kobo_auth", __name__, url_prefix="/kobo_auth") -def register_url_value_preprocessor(kobo): - @kobo.url_value_preprocessor - # pylint: disable=unused-variable - def pop_auth_token(__, values): - g.auth_token = values.pop("auth_token") + +@kobo_auth.route("/generate_auth_token/") +@login_required +def generate_auth_token(user_id): + warning = False + host_list = request.host.rsplit(':') + if len(host_list) == 1: + host = ':'.join(host_list) + else: + host = ':'.join(host_list[0:-1]) + if host.startswith('127.') or host.lower() == 'localhost' or host.startswith('[::ffff:7f') or host == "[::1]": + warning = _('Please access Calibre-Web from non localhost to get valid api_endpoint for kobo device') + + # Generate auth token if none is existing for this user + auth_token = ub.session.query(ub.RemoteAuthToken).filter( + ub.RemoteAuthToken.user_id == user_id + ).filter(ub.RemoteAuthToken.token_type==1).first() + + if not auth_token: + auth_token = ub.RemoteAuthToken() + auth_token.user_id = user_id + auth_token.expiration = datetime.max + auth_token.auth_token = (hexlify(urandom(16))).decode("utf-8") + auth_token.token_type = 1 + + ub.session.add(auth_token) + ub.session_commit() + + books = calibre_db.session.query(db.Books).join(db.Data).all() + + for book in books: + formats = [data.format for data in book.data] + if not 'KEPUB' in formats and config.config_kepubifypath and 'EPUB' in formats: + helper.convert_book_format(book.id, config.config_calibre_dir, 'EPUB', 'KEPUB', current_user.name) + + return render_title_template( + "generate_kobo_auth_url.html", + title=_(u"Kobo Setup"), + auth_token=auth_token.auth_token, + warning = warning + ) + + +@kobo_auth.route("/deleteauthtoken/", methods=["POST"]) +@login_required +def delete_auth_token(user_id): + # Invalidate any previously generated Kobo Auth token for this user + ub.session.query(ub.RemoteAuthToken).filter(ub.RemoteAuthToken.user_id == user_id)\ + .filter(ub.RemoteAuthToken.token_type==1).delete() + + return ub.session_commit() def disable_failed_auth_redirect_for_blueprint(bp): @@ -97,6 +139,13 @@ def get_auth_token(): return None +def register_url_value_preprocessor(kobo): + @kobo.url_value_preprocessor + # pylint: disable=unused-variable + def pop_auth_token(__, values): + g.auth_token = values.pop("auth_token") + + def requires_kobo_auth(f): @wraps(f) def inner(*args, **kwargs): @@ -114,64 +163,3 @@ def requires_kobo_auth(f): log.debug("Received Kobo request without a recognizable auth token.") return abort(401) return inner - - -kobo_auth = Blueprint("kobo_auth", __name__, url_prefix="/kobo_auth") - - -@kobo_auth.route("/generate_auth_token/") -@login_required -def generate_auth_token(user_id): - host_list = request.host.rsplit(':') - if len(host_list) == 1: - host = ':'.join(host_list) - else: - host = ':'.join(host_list[0:-1]) - if host.startswith('127.') or host.lower() == 'localhost' or host.startswith('[::ffff:7f'): - warning = _('PLease access calibre-web from non localhost to get valid api_endpoint for kobo device') - return render_title_template( - "generate_kobo_auth_url.html", - title=_(u"Kobo Setup"), - warning = warning - ) - else: - # Invalidate any prevously generated Kobo Auth token for this user. - auth_token = ub.session.query(ub.RemoteAuthToken).filter( - ub.RemoteAuthToken.user_id == user_id - ).filter(ub.RemoteAuthToken.token_type==1).first() - - if not auth_token: - auth_token = ub.RemoteAuthToken() - auth_token.user_id = user_id - auth_token.expiration = datetime.max - auth_token.auth_token = (hexlify(urandom(16))).decode("utf-8") - auth_token.token_type = 1 - - ub.session.add(auth_token) - ub.session_commit() - - books = calibre_db.session.query(db.Books).join(db.Data).all() - - for book in books: - formats = [data.format for data in book.data] - if not 'KEPUB' in formats and config.config_kepubifypath and 'EPUB' in formats: - helper.convert_book_format(book.id, config.config_calibre_dir, 'EPUB', 'KEPUB', current_user.name) - - return render_title_template( - "generate_kobo_auth_url.html", - title=_(u"Kobo Setup"), - kobo_auth_url=url_for( - "kobo.TopLevelEndpoint", auth_token=auth_token.auth_token, _external=True - ), - warning = False - ) - - -@kobo_auth.route("/deleteauthtoken/") -@login_required -def delete_auth_token(user_id): - # Invalidate any prevously generated Kobo Auth token for this user. - ub.session.query(ub.RemoteAuthToken).filter(ub.RemoteAuthToken.user_id == user_id)\ - .filter(ub.RemoteAuthToken.token_type==1).delete() - - return ub.session_commit() diff --git a/cps/kobo_sync_status.py b/cps/kobo_sync_status.py index 580ac8ea..7a201861 100644 --- a/cps/kobo_sync_status.py +++ b/cps/kobo_sync_status.py @@ -20,7 +20,8 @@ from flask_login import current_user from . import ub import datetime -from sqlalchemy.sql.expression import or_, and_ +from sqlalchemy.sql.expression import or_, and_, true +from sqlalchemy import exc # Add the current book id to kobo_synced_books table for current user, if entry is already present, # do nothing (safety precaution) @@ -36,10 +37,18 @@ def add_synced_books(book_id): # Select all entries of current book in kobo_synced_books table, which are from current user and delete them -def remove_synced_book(book_id): - ub.session.query(ub.KoboSyncedBooks).filter(ub.KoboSyncedBooks.book_id == book_id) \ - .filter(ub.KoboSyncedBooks.user_id == current_user.id).delete() - ub.session_commit() +def remove_synced_book(book_id, all=False, session=None): + if not all: + user = ub.KoboSyncedBooks.user_id == current_user.id + else: + user = true() + if not session: + ub.session.query(ub.KoboSyncedBooks).filter(ub.KoboSyncedBooks.book_id == book_id).filter(user).delete() + ub.session_commit() + else: + session.query(ub.KoboSyncedBooks).filter(ub.KoboSyncedBooks.book_id == book_id).filter(user).delete() + ub.session_commit(_session=session) + def change_archived_books(book_id, state=None, message=None): @@ -56,7 +65,7 @@ def change_archived_books(book_id, state=None, message=None): return archived_book.is_archived -# select all books which are synced by the current user and do not belong to a synced shelf and them to archive +# select all books which are synced by the current user and do not belong to a synced shelf and set them to archive # select all shelves from current user which are synced and do not belong to the "only sync" shelves def update_on_sync_shelfs(user_id): books_to_archive = (ub.session.query(ub.KoboSyncedBooks) @@ -71,6 +80,7 @@ def update_on_sync_shelfs(user_id): .filter(ub.KoboSyncedBooks.user_id == user_id).delete() ub.session_commit() + # Search all shelf which are currently not synced shelves_to_archive = ub.session.query(ub.Shelf).filter(ub.Shelf.user_id == user_id).filter( ub.Shelf.kobo_sync == 0).all() for a in shelves_to_archive: diff --git a/cps/logger.py b/cps/logger.py index 5847188b..fcc25c27 100644 --- a/cps/logger.py +++ b/cps/logger.py @@ -42,20 +42,15 @@ logging.addLevelName(logging.CRITICAL, "CRIT") class _Logger(logging.Logger): - def debug_or_exception(self, message, *args, **kwargs): + def error_or_exception(self, message, stacklevel=2, *args, **kwargs): if sys.version_info > (3, 7): if is_debug_enabled(): - self.exception(message, stacklevel=2, *args, **kwargs) + self.exception(message, stacklevel=stacklevel, *args, **kwargs) else: - self.error(message, stacklevel=2, *args, **kwargs) - elif sys.version_info > (3, 0): - if is_debug_enabled(): - self.exception(message, stack_info=True, *args, **kwargs) - else: - self.error(message, *args, **kwargs) + self.error(message, stacklevel=stacklevel, *args, **kwargs) else: if is_debug_enabled(): - self.exception(message, *args, **kwargs) + self.exception(message, stack_info=True, *args, **kwargs) else: self.error(message, *args, **kwargs) diff --git a/cps/main.py b/cps/main.py new file mode 100644 index 00000000..d3591c06 --- /dev/null +++ b/cps/main.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- + +# This file is part of the Calibre-Web (https://github.com/janeczku/calibre-web) +# Copyright (C) 2012-2022 OzzieIsaacs +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import sys + +from . import create_app +from .jinjia import jinjia +from .remotelogin import remotelogin + +def main(): + app = create_app() + + from .web import web + from .opds import opds + from .admin import admi + from .gdrive import gdrive + from .editbooks import editbook + from .about import about + from .search import search + from .search_metadata import meta + from .shelf import shelf + from .tasks_status import tasks + from .error_handler import init_errorhandler + try: + from .kobo import kobo, get_kobo_activated + from .kobo_auth import kobo_auth + kobo_available = get_kobo_activated() + except (ImportError, AttributeError): # Catch also error for not installed flask-WTF (missing csrf decorator) + kobo_available = False + + try: + from .oauth_bb import oauth + oauth_available = True + except ImportError: + oauth_available = False + + from . import web_server + init_errorhandler() + + app.register_blueprint(search) + app.register_blueprint(tasks) + app.register_blueprint(web) + app.register_blueprint(opds) + app.register_blueprint(jinjia) + app.register_blueprint(about) + app.register_blueprint(shelf) + app.register_blueprint(admi) + app.register_blueprint(remotelogin) + app.register_blueprint(meta) + app.register_blueprint(gdrive) + app.register_blueprint(editbook) + if kobo_available: + app.register_blueprint(kobo) + app.register_blueprint(kobo_auth) + if oauth_available: + app.register_blueprint(oauth) + success = web_server.start() + sys.exit(0 if success else 1) diff --git a/cps/metadata_provider/amazon.py b/cps/metadata_provider/amazon.py new file mode 100644 index 00000000..da3aed79 --- /dev/null +++ b/cps/metadata_provider/amazon.py @@ -0,0 +1,141 @@ +# -*- coding: utf-8 -*- + +# This file is part of the Calibre-Web (https://github.com/janeczku/calibre-web) +# Copyright (C) 2022 quarz12 +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import concurrent.futures +import requests +from bs4 import BeautifulSoup as BS # requirement +from typing import List, Optional + +try: + import cchardet #optional for better speed +except ImportError: + pass +from cps import logger +from cps.services.Metadata import MetaRecord, MetaSourceInfo, Metadata +import cps.logger as logger + +#from time import time +from operator import itemgetter +log = logger.create() + +log = logger.create() + + +class Amazon(Metadata): + __name__ = "Amazon" + __id__ = "amazon" + headers = {'upgrade-insecure-requests': '1', + 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36', + 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', + 'sec-gpc': '1', + 'sec-fetch-site': 'none', + 'sec-fetch-mode': 'navigate', + 'sec-fetch-user': '?1', + 'sec-fetch-dest': 'document', + 'accept-encoding': 'gzip, deflate, br', + 'accept-language': 'en-US,en;q=0.9'} + session = requests.Session() + session.headers=headers + + def search( + self, query: str, generic_cover: str = "", locale: str = "en" + ) -> Optional[List[MetaRecord]]: + #timer=time() + def inner(link, index) -> [dict, int]: + with self.session as session: + try: + r = session.get(f"https://www.amazon.com/{link}") + r.raise_for_status() + except Exception as ex: + log.warning(ex) + return + long_soup = BS(r.text, "lxml") #~4sec :/ + soup2 = long_soup.find("div", attrs={"cel_widget_id": "dpx-books-ppd_csm_instrumentation_wrapper"}) + if soup2 is None: + return + try: + match = MetaRecord( + title = "", + authors = "", + source=MetaSourceInfo( + id=self.__id__, + description="Amazon Books", + link="https://amazon.com/" + ), + url = f"https://www.amazon.com{link}", + #the more searches the slower, these are too hard to find in reasonable time or might not even exist + publisher= "", # very unreliable + publishedDate= "", # very unreliable + id = None, # ? + tags = [] # dont exist on amazon + ) + + try: + match.description = "\n".join( + soup2.find("div", attrs={"data-feature-name": "bookDescription"}).stripped_strings)\ + .replace("\xa0"," ")[:-9].strip().strip("\n") + except (AttributeError, TypeError): + return None # if there is no description it is not a book and therefore should be ignored + try: + match.title = soup2.find("span", attrs={"id": "productTitle"}).text + except (AttributeError, TypeError): + match.title = "" + try: + match.authors = [next( + filter(lambda i: i != " " and i != "\n" and not i.startswith("{"), + x.findAll(text=True))).strip() + for x in soup2.findAll("span", attrs={"class": "author"})] + except (AttributeError, TypeError, StopIteration): + match.authors = "" + try: + match.rating = int( + soup2.find("span", class_="a-icon-alt").text.split(" ")[0].split(".")[ + 0]) # first number in string + except (AttributeError, ValueError): + match.rating = 0 + try: + match.cover = soup2.find("img", attrs={"class": "a-dynamic-image frontImage"})["src"] + except (AttributeError, TypeError): + match.cover = "" + return match, index + except Exception as e: + log.error_or_exception(e) + return + + val = list() + if self.active: + try: + results = self.session.get( + f"https://www.amazon.com/s?k={query.replace(' ', '+')}&i=digital-text&sprefix={query.replace(' ', '+')}" + f"%2Cdigital-text&ref=nb_sb_noss", + headers=self.headers) + results.raise_for_status() + except requests.exceptions.HTTPError as e: + log.error_or_exception(e) + return None + except Exception as e: + log.warning(e) + return None + soup = BS(results.text, 'html.parser') + links_list = [next(filter(lambda i: "digital-text" in i["href"], x.findAll("a")))["href"] for x in + soup.findAll("div", attrs={"data-component-type": "s-search-result"})] + with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor: + fut = {executor.submit(inner, link, index) for index, link in enumerate(links_list[:5])} + val = list(map(lambda x : x.result() ,concurrent.futures.as_completed(fut))) + result = list(filter(lambda x: x, val)) + return [x[0] for x in sorted(result, key=itemgetter(1))] #sort by amazons listing order for best relevance diff --git a/cps/metadata_provider/comicvine.py b/cps/metadata_provider/comicvine.py index 8f496608..b4d8d34c 100644 --- a/cps/metadata_provider/comicvine.py +++ b/cps/metadata_provider/comicvine.py @@ -17,49 +17,76 @@ # along with this program. If not, see . # ComicVine api document: https://comicvine.gamespot.com/api/documentation +from typing import Dict, List, Optional +from urllib.parse import quote import requests -from cps.services.Metadata import Metadata +from cps import logger +from cps.services.Metadata import MetaRecord, MetaSourceInfo, Metadata + +log = logger.create() class ComicVine(Metadata): __name__ = "ComicVine" __id__ = "comicvine" + DESCRIPTION = "ComicVine Books" + META_URL = "https://comicvine.gamespot.com/" + API_KEY = "57558043c53943d5d1e96a9ad425b0eb85532ee6" + BASE_URL = ( + f"https://comicvine.gamespot.com/api/search?api_key={API_KEY}" + f"&resources=issue&query=" + ) + QUERY_PARAMS = "&sort=name:desc&format=json" + HEADERS = {"User-Agent": "Not Evil Browser"} - def search(self, query, __): + def search( + self, query: str, generic_cover: str = "", locale: str = "en" + ) -> Optional[List[MetaRecord]]: val = list() - apikey = "57558043c53943d5d1e96a9ad425b0eb85532ee6" if self.active: - headers = { - 'User-Agent': 'Not Evil Browser' - } - - result = requests.get("https://comicvine.gamespot.com/api/search?api_key=" - + apikey + "&resources=issue&query=" + query + "&sort=name:desc&format=json", headers=headers) - for r in result.json()['results']: - seriesTitle = r['volume'].get('name', "") - if r.get('store_date'): - dateFomers = r.get('store_date') - else: - dateFomers = r.get('date_added') - v = dict() - v['id'] = r['id'] - v['title'] = seriesTitle + " #" + r.get('issue_number', "0") + " - " + ( r.get('name', "") or "") - v['authors'] = r.get('authors', []) - v['description'] = r.get('description', "") - v['publisher'] = "" - v['publishedDate'] = dateFomers - v['tags'] = ["Comics", seriesTitle] - v['rating'] = 0 - v['series'] = seriesTitle - v['cover'] = r['image'].get('original_url') - v['source'] = { - "id": self.__id__, - "description": "ComicVine Books", - "link": "https://comicvine.gamespot.com/" - } - v['url'] = r.get('site_detail_url', "") - val.append(v) + title_tokens = list(self.get_title_tokens(query, strip_joiners=False)) + if title_tokens: + tokens = [quote(t.encode("utf-8")) for t in title_tokens] + query = "%20".join(tokens) + try: + result = requests.get( + f"{ComicVine.BASE_URL}{query}{ComicVine.QUERY_PARAMS}", + headers=ComicVine.HEADERS, + ) + result.raise_for_status() + except Exception as e: + log.warning(e) + return None + for result in result.json()["results"]: + match = self._parse_search_result( + result=result, generic_cover=generic_cover, locale=locale + ) + val.append(match) return val - + def _parse_search_result( + self, result: Dict, generic_cover: str, locale: str + ) -> MetaRecord: + series = result["volume"].get("name", "") + series_index = result.get("issue_number", 0) + issue_name = result.get("name", "") + match = MetaRecord( + id=result["id"], + title=f"{series}#{series_index} - {issue_name}", + authors=result.get("authors", []), + url=result.get("site_detail_url", ""), + source=MetaSourceInfo( + id=self.__id__, + description=ComicVine.DESCRIPTION, + link=ComicVine.META_URL, + ), + series=series, + ) + match.cover = result["image"].get("original_url", generic_cover) + match.description = result.get("description", "") + match.publishedDate = result.get("store_date", result.get("date_added")) + match.series_index = series_index + match.tags = ["Comics", series] + match.identifiers = {"comicvine": match.id} + return match diff --git a/cps/metadata_provider/douban.py b/cps/metadata_provider/douban.py new file mode 100644 index 00000000..ee21f587 --- /dev/null +++ b/cps/metadata_provider/douban.py @@ -0,0 +1,206 @@ +# -*- coding: utf-8 -*- + +# This file is part of the Calibre-Web (https://github.com/janeczku/calibre-web) +# Copyright (C) 2022 xlivevil +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +import re +from concurrent import futures +from typing import List, Optional + +import requests +from html2text import HTML2Text +from lxml import etree + +from cps import logger +from cps.services.Metadata import Metadata, MetaRecord, MetaSourceInfo + +log = logger.create() + + +def html2text(html: str) -> str: + + h2t = HTML2Text() + h2t.body_width = 0 + h2t.single_line_break = True + h2t.emphasis_mark = "*" + return h2t.handle(html) + + +class Douban(Metadata): + __name__ = "豆瓣" + __id__ = "douban" + DESCRIPTION = "豆瓣" + META_URL = "https://book.douban.com/" + SEARCH_URL = "https://www.douban.com/j/search" + + ID_PATTERN = re.compile(r"sid: (?P\d+),") + AUTHORS_PATTERN = re.compile(r"作者|译者") + PUBLISHER_PATTERN = re.compile(r"出版社") + SUBTITLE_PATTERN = re.compile(r"副标题") + PUBLISHED_DATE_PATTERN = re.compile(r"出版年") + SERIES_PATTERN = re.compile(r"丛书") + IDENTIFIERS_PATTERN = re.compile(r"ISBN|统一书号") + + TITTLE_XPATH = "//span[@property='v:itemreviewed']" + COVER_XPATH = "//a[@class='nbg']" + INFO_XPATH = "//*[@id='info']//span[@class='pl']" + TAGS_XPATH = "//a[contains(@class, 'tag')]" + DESCRIPTION_XPATH = "//div[@id='link-report']//div[@class='intro']" + RATING_XPATH = "//div[@class='rating_self clearfix']/strong" + + session = requests.Session() + session.headers = { + 'user-agent': + 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36 Edg/98.0.1108.56', + } + + def search( + self, query: str, generic_cover: str = "", locale: str = "en" + ) -> Optional[List[MetaRecord]]: + if self.active: + log.debug(f"starting search {query} on douban") + if title_tokens := list( + self.get_title_tokens(query, strip_joiners=False) + ): + query = "+".join(title_tokens) + + try: + r = self.session.get( + self.SEARCH_URL, params={"cat": 1001, "q": query} + ) + r.raise_for_status() + + except Exception as e: + log.warning(e) + return None + + results = r.json() + if results["total"] == 0: + return [] + + book_id_list = [ + self.ID_PATTERN.search(item).group("id") + for item in results["items"][:10] if self.ID_PATTERN.search(item) + ] + + with futures.ThreadPoolExecutor(max_workers=5) as executor: + + fut = [ + executor.submit(self._parse_single_book, book_id, generic_cover) + for book_id in book_id_list + ] + + val = [ + future.result() + for future in futures.as_completed(fut) if future.result() + ] + + return val + + def _parse_single_book( + self, id: str, generic_cover: str = "" + ) -> Optional[MetaRecord]: + url = f"https://book.douban.com/subject/{id}/" + + try: + r = self.session.get(url) + r.raise_for_status() + except Exception as e: + log.warning(e) + return None + + match = MetaRecord( + id=id, + title="", + authors=[], + url=url, + source=MetaSourceInfo( + id=self.__id__, + description=self.DESCRIPTION, + link=self.META_URL, + ), + ) + + html = etree.HTML(r.content.decode("utf8")) + + match.title = html.xpath(self.TITTLE_XPATH)[0].text + match.cover = html.xpath(self.COVER_XPATH)[0].attrib["href"] or generic_cover + try: + rating_num = float(html.xpath(self.RATING_XPATH)[0].text.strip()) + except Exception: + rating_num = 0 + match.rating = int(-1 * rating_num // 2 * -1) if rating_num else 0 + + tag_elements = html.xpath(self.TAGS_XPATH) + if len(tag_elements): + match.tags = [tag_element.text for tag_element in tag_elements] + + description_element = html.xpath(self.DESCRIPTION_XPATH) + if len(description_element): + match.description = html2text(etree.tostring( + description_element[-1], encoding="utf8").decode("utf8")) + + info = html.xpath(self.INFO_XPATH) + + for element in info: + text = element.text + if self.AUTHORS_PATTERN.search(text): + next = element.getnext() + while next is not None and next.tag != "br": + match.authors.append(next.text) + next = next.getnext() + elif self.PUBLISHER_PATTERN.search(text): + match.publisher = element.tail.strip() + elif self.SUBTITLE_PATTERN.search(text): + match.title = f'{match.title}:' + element.tail.strip() + elif self.PUBLISHED_DATE_PATTERN.search(text): + match.publishedDate = self._clean_date(element.tail.strip()) + elif self.SUBTITLE_PATTERN.search(text): + match.series = element.getnext().text + elif i_type := self.IDENTIFIERS_PATTERN.search(text): + match.identifiers[i_type.group()] = element.tail.strip() + + return match + + + def _clean_date(self, date: str) -> str: + """ + Clean up the date string to be in the format YYYY-MM-DD + + Examples of possible patterns: + '2014-7-16', '1988年4月', '1995-04', '2021-8', '2020-12-1', '1996年', + '1972', '2004/11/01', '1959年3月北京第1版第1印' + """ + year = date[:4] + moon = "01" + day = "01" + + if len(date) > 5: + digit = [] + ls = [] + for i in range(5, len(date)): + if date[i].isdigit(): + digit.append(date[i]) + elif digit: + ls.append("".join(digit) if len(digit)==2 else f"0{digit[0]}") + digit = [] + if digit: + ls.append("".join(digit) if len(digit)==2 else f"0{digit[0]}") + + moon = ls[0] + if len(ls)>1: + day = ls[1] + + return f"{year}-{moon}-{day}" diff --git a/cps/metadata_provider/google.py b/cps/metadata_provider/google.py index f3d02d8e..98fadd37 100644 --- a/cps/metadata_provider/google.py +++ b/cps/metadata_provider/google.py @@ -17,39 +17,101 @@ # along with this program. If not, see . # Google Books api document: https://developers.google.com/books/docs/v1/using - +from typing import Dict, List, Optional +from urllib.parse import quote import requests -from cps.services.Metadata import Metadata + +from cps import logger +from cps.isoLanguages import get_lang3, get_language_name +from cps.services.Metadata import MetaRecord, MetaSourceInfo, Metadata + +log = logger.create() + class Google(Metadata): __name__ = "Google" __id__ = "google" + DESCRIPTION = "Google Books" + META_URL = "https://books.google.com/" + BOOK_URL = "https://books.google.com/books?id=" + SEARCH_URL = "https://www.googleapis.com/books/v1/volumes?q=" + ISBN_TYPE = "ISBN_13" - def search(self, query, __): + def search( + self, query: str, generic_cover: str = "", locale: str = "en" + ) -> Optional[List[MetaRecord]]: + val = list() if self.active: - val = list() - result = requests.get("https://www.googleapis.com/books/v1/volumes?q="+query.replace(" ","+")) - for r in result.json()['items']: - v = dict() - v['id'] = r['id'] - v['title'] = r['volumeInfo']['title'] - v['authors'] = r['volumeInfo'].get('authors', []) - v['description'] = r['volumeInfo'].get('description', "") - v['publisher'] = r['volumeInfo'].get('publisher', "") - v['publishedDate'] = r['volumeInfo'].get('publishedDate', "") - v['tags'] = r['volumeInfo'].get('categories', []) - v['rating'] = r['volumeInfo'].get('averageRating', 0) - if r['volumeInfo'].get('imageLinks'): - v['cover'] = r['volumeInfo']['imageLinks']['thumbnail'].replace("http://", "https://") - else: - v['cover'] = "/../../../static/generic_cover.jpg" - v['source'] = { - "id": self.__id__, - "description": "Google Books", - "link": "https://books.google.com/"} - v['url'] = "https://books.google.com/books?id=" + r['id'] - val.append(v) - return val + title_tokens = list(self.get_title_tokens(query, strip_joiners=False)) + if title_tokens: + tokens = [quote(t.encode("utf-8")) for t in title_tokens] + query = "+".join(tokens) + try: + results = requests.get(Google.SEARCH_URL + query) + results.raise_for_status() + except Exception as e: + log.warning(e) + return None + for result in results.json().get("items", []): + val.append( + self._parse_search_result( + result=result, generic_cover=generic_cover, locale=locale + ) + ) + return val + def _parse_search_result( + self, result: Dict, generic_cover: str, locale: str + ) -> MetaRecord: + match = MetaRecord( + id=result["id"], + title=result["volumeInfo"]["title"], + authors=result["volumeInfo"].get("authors", []), + url=Google.BOOK_URL + result["id"], + source=MetaSourceInfo( + id=self.__id__, + description=Google.DESCRIPTION, + link=Google.META_URL, + ), + ) + + match.cover = self._parse_cover(result=result, generic_cover=generic_cover) + match.description = result["volumeInfo"].get("description", "") + match.languages = self._parse_languages(result=result, locale=locale) + match.publisher = result["volumeInfo"].get("publisher", "") + match.publishedDate = result["volumeInfo"].get("publishedDate", "") + match.rating = result["volumeInfo"].get("averageRating", 0) + match.series, match.series_index = "", 1 + match.tags = result["volumeInfo"].get("categories", []) + + match.identifiers = {"google": match.id} + match = self._parse_isbn(result=result, match=match) + return match + + @staticmethod + def _parse_isbn(result: Dict, match: MetaRecord) -> MetaRecord: + identifiers = result["volumeInfo"].get("industryIdentifiers", []) + for identifier in identifiers: + if identifier.get("type") == Google.ISBN_TYPE: + match.identifiers["isbn"] = identifier.get("identifier") + break + return match + + @staticmethod + def _parse_cover(result: Dict, generic_cover: str) -> str: + if result["volumeInfo"].get("imageLinks"): + cover_url = result["volumeInfo"]["imageLinks"]["thumbnail"] + return cover_url.replace("http://", "https://") + return generic_cover + + @staticmethod + def _parse_languages(result: Dict, locale: str) -> List[str]: + language_iso2 = result["volumeInfo"].get("language", "") + languages = ( + [get_language_name(locale, get_lang3(language_iso2))] + if language_iso2 + else [] + ) + return languages diff --git a/cps/metadata_provider/lubimyczytac.py b/cps/metadata_provider/lubimyczytac.py new file mode 100644 index 00000000..e4abe9db --- /dev/null +++ b/cps/metadata_provider/lubimyczytac.py @@ -0,0 +1,350 @@ +# -*- coding: utf-8 -*- +# This file is part of the Calibre-Web (https://github.com/janeczku/calibre-web) +# Copyright (C) 2021 OzzieIsaacs +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +import datetime +import json +import re +from multiprocessing.pool import ThreadPool +from typing import List, Optional, Tuple, Union +from urllib.parse import quote + +import requests +from dateutil import parser +from html2text import HTML2Text +from lxml.html import HtmlElement, fromstring, tostring +from markdown2 import Markdown + +from cps import logger +from cps.isoLanguages import get_language_name +from cps.services.Metadata import MetaRecord, MetaSourceInfo, Metadata + +log = logger.create() + +SYMBOLS_TO_TRANSLATE = ( + "öÖüÜóÓőŐúÚéÉáÁűŰíÍąĄćĆęĘłŁńŃóÓśŚźŹżŻ", + "oOuUoOoOuUeEaAuUiIaAcCeElLnNoOsSzZzZ", +) +SYMBOL_TRANSLATION_MAP = dict( + [(ord(a), ord(b)) for (a, b) in zip(*SYMBOLS_TO_TRANSLATE)] +) + + +def get_int_or_float(value: str) -> Union[int, float]: + number_as_float = float(value) + number_as_int = int(number_as_float) + return number_as_int if number_as_float == number_as_int else number_as_float + + +def strip_accents(s: Optional[str]) -> Optional[str]: + return s.translate(SYMBOL_TRANSLATION_MAP) if s is not None else s + + +def sanitize_comments_html(html: str) -> str: + text = html2text(html) + md = Markdown() + html = md.convert(text) + return html + + +def html2text(html: str) -> str: + # replace tags with as becomes emphasis in html2text + if isinstance(html, bytes): + html = html.decode("utf-8") + html = re.sub( + r"<\s*(?P/?)\s*[uU]\b(?P[^>]*)>", + r"<\gspan\g>", + html, + ) + h2t = HTML2Text() + h2t.body_width = 0 + h2t.single_line_break = True + h2t.emphasis_mark = "*" + return h2t.handle(html) + + +class LubimyCzytac(Metadata): + __name__ = "LubimyCzytac.pl" + __id__ = "lubimyczytac" + + BASE_URL = "https://lubimyczytac.pl" + + BOOK_SEARCH_RESULT_XPATH = ( + "*//div[@class='listSearch']//div[@class='authorAllBooks__single']" + ) + SINGLE_BOOK_RESULT_XPATH = ".//div[contains(@class,'authorAllBooks__singleText')]" + TITLE_PATH = "/div/a[contains(@class,'authorAllBooks__singleTextTitle')]" + TITLE_TEXT_PATH = f"{TITLE_PATH}//text()" + URL_PATH = f"{TITLE_PATH}/@href" + AUTHORS_PATH = "/div/a[contains(@href,'autor')]//text()" + + SIBLINGS = "/following-sibling::dd" + + CONTAINER = "//section[@class='container book']" + PUBLISHER = f"{CONTAINER}//dt[contains(text(),'Wydawnictwo:')]{SIBLINGS}/a/text()" + LANGUAGES = f"{CONTAINER}//dt[contains(text(),'Język:')]{SIBLINGS}/text()" + DESCRIPTION = f"{CONTAINER}//div[@class='collapse-content']" + SERIES = f"{CONTAINER}//span/a[contains(@href,'/cykl/')]/text()" + + DETAILS = "//div[@id='book-details']" + PUBLISH_DATE = "//dt[contains(@title,'Data pierwszego wydania" + FIRST_PUBLISH_DATE = f"{DETAILS}{PUBLISH_DATE} oryginalnego')]{SIBLINGS}[1]/text()" + FIRST_PUBLISH_DATE_PL = f"{DETAILS}{PUBLISH_DATE} polskiego')]{SIBLINGS}[1]/text()" + TAGS = "//nav[@aria-label='breadcrumb']//a[contains(@href,'/ksiazki/k/')]/text()" + + RATING = "//meta[@property='books:rating:value']/@content" + COVER = "//meta[@property='og:image']/@content" + ISBN = "//meta[@property='books:isbn']/@content" + META_TITLE = "//meta[@property='og:description']/@content" + + SUMMARY = "//script[@type='application/ld+json']//text()" + + def search( + self, query: str, generic_cover: str = "", locale: str = "en" + ) -> Optional[List[MetaRecord]]: + if self.active: + try: + result = requests.get(self._prepare_query(title=query)) + result.raise_for_status() + except Exception as e: + log.warning(e) + return None + root = fromstring(result.text) + lc_parser = LubimyCzytacParser(root=root, metadata=self) + matches = lc_parser.parse_search_results() + if matches: + with ThreadPool(processes=10) as pool: + final_matches = pool.starmap( + lc_parser.parse_single_book, + [(match, generic_cover, locale) for match in matches], + ) + return final_matches + return matches + + def _prepare_query(self, title: str) -> str: + query = "" + characters_to_remove = "\?()\/" + pattern = "[" + characters_to_remove + "]" + title = re.sub(pattern, "", title) + title = title.replace("_", " ") + if '"' in title or ",," in title: + title = title.split('"')[0].split(",,")[0] + + if "/" in title: + title_tokens = [ + token for token in title.lower().split(" ") if len(token) > 1 + ] + else: + title_tokens = list(self.get_title_tokens(title, strip_joiners=False)) + if title_tokens: + tokens = [quote(t.encode("utf-8")) for t in title_tokens] + query = query + "%20".join(tokens) + if not query: + return "" + return f"{LubimyCzytac.BASE_URL}/szukaj/ksiazki?phrase={query}" + + +class LubimyCzytacParser: + PAGES_TEMPLATE = "

Książka ma {0} stron(y).

" + PUBLISH_DATE_TEMPLATE = "

Data pierwszego wydania: {0}

" + PUBLISH_DATE_PL_TEMPLATE = ( + "

Data pierwszego wydania w Polsce: {0}

" + ) + + def __init__(self, root: HtmlElement, metadata: Metadata) -> None: + self.root = root + self.metadata = metadata + + def parse_search_results(self) -> List[MetaRecord]: + matches = [] + results = self.root.xpath(LubimyCzytac.BOOK_SEARCH_RESULT_XPATH) + for result in results: + title = self._parse_xpath_node( + root=result, + xpath=f"{LubimyCzytac.SINGLE_BOOK_RESULT_XPATH}" + f"{LubimyCzytac.TITLE_TEXT_PATH}", + ) + + book_url = self._parse_xpath_node( + root=result, + xpath=f"{LubimyCzytac.SINGLE_BOOK_RESULT_XPATH}" + f"{LubimyCzytac.URL_PATH}", + ) + authors = self._parse_xpath_node( + root=result, + xpath=f"{LubimyCzytac.SINGLE_BOOK_RESULT_XPATH}" + f"{LubimyCzytac.AUTHORS_PATH}", + take_first=False, + ) + if not all([title, book_url, authors]): + continue + matches.append( + MetaRecord( + id=book_url.replace(f"/ksiazka/", "").split("/")[0], + title=title, + authors=[strip_accents(author) for author in authors], + url=LubimyCzytac.BASE_URL + book_url, + source=MetaSourceInfo( + id=self.metadata.__id__, + description=self.metadata.__name__, + link=LubimyCzytac.BASE_URL, + ), + ) + ) + return matches + + def parse_single_book( + self, match: MetaRecord, generic_cover: str, locale: str + ) -> MetaRecord: + try: + response = requests.get(match.url) + response.raise_for_status() + except Exception as e: + log.warning(e) + return None + self.root = fromstring(response.text) + match.cover = self._parse_cover(generic_cover=generic_cover) + match.description = self._parse_description() + match.languages = self._parse_languages(locale=locale) + match.publisher = self._parse_publisher() + match.publishedDate = self._parse_from_summary(attribute_name="datePublished") + match.rating = self._parse_rating() + match.series, match.series_index = self._parse_series() + match.tags = self._parse_tags() + match.identifiers = { + "isbn": self._parse_isbn(), + "lubimyczytac": match.id, + } + return match + + def _parse_xpath_node( + self, + xpath: str, + root: HtmlElement = None, + take_first: bool = True, + strip_element: bool = True, + ) -> Optional[Union[str, List[str]]]: + root = root if root is not None else self.root + node = root.xpath(xpath) + if not node: + return None + return ( + (node[0].strip() if strip_element else node[0]) + if take_first + else [x.strip() for x in node] + ) + + def _parse_cover(self, generic_cover) -> Optional[str]: + return ( + self._parse_xpath_node(xpath=LubimyCzytac.COVER, take_first=True) + or generic_cover + ) + + def _parse_publisher(self) -> Optional[str]: + return self._parse_xpath_node(xpath=LubimyCzytac.PUBLISHER, take_first=True) + + def _parse_languages(self, locale: str) -> List[str]: + languages = list() + lang = self._parse_xpath_node(xpath=LubimyCzytac.LANGUAGES, take_first=True) + if lang: + if "polski" in lang: + languages.append("pol") + if "angielski" in lang: + languages.append("eng") + return [get_language_name(locale, language) for language in languages] + + def _parse_series(self) -> Tuple[Optional[str], Optional[Union[float, int]]]: + series_index = 0 + series = self._parse_xpath_node(xpath=LubimyCzytac.SERIES, take_first=True) + if series: + if "tom " in series: + series_name, series_info = series.split(" (tom ", 1) + series_info = series_info.replace(" ", "").replace(")", "") + # Check if book is not a bundle, i.e. chapter 1-3 + if "-" in series_info: + series_info = series_info.split("-", 1)[0] + if series_info.replace(".", "").isdigit() is True: + series_index = get_int_or_float(series_info) + return series_name, series_index + return None, None + + def _parse_tags(self) -> List[str]: + tags = self._parse_xpath_node(xpath=LubimyCzytac.TAGS, take_first=False) + return [ + strip_accents(w.replace(", itd.", " itd.")) + for w in tags + if isinstance(w, str) + ] + + def _parse_from_summary(self, attribute_name: str) -> Optional[str]: + value = None + summary_text = self._parse_xpath_node(xpath=LubimyCzytac.SUMMARY) + if summary_text: + data = json.loads(summary_text) + value = data.get(attribute_name) + return value.strip() if value is not None else value + + def _parse_rating(self) -> Optional[str]: + rating = self._parse_xpath_node(xpath=LubimyCzytac.RATING) + return round(float(rating.replace(",", ".")) / 2) if rating else rating + + def _parse_date(self, xpath="first_publish") -> Optional[datetime.datetime]: + options = { + "first_publish": LubimyCzytac.FIRST_PUBLISH_DATE, + "first_publish_pl": LubimyCzytac.FIRST_PUBLISH_DATE_PL, + } + date = self._parse_xpath_node(xpath=options.get(xpath)) + return parser.parse(date) if date else None + + def _parse_isbn(self) -> Optional[str]: + return self._parse_xpath_node(xpath=LubimyCzytac.ISBN) + + def _parse_description(self) -> str: + description = "" + description_node = self._parse_xpath_node( + xpath=LubimyCzytac.DESCRIPTION, strip_element=False + ) + if description_node is not None: + for source in self.root.xpath('//p[@class="source"]'): + source.getparent().remove(source) + description = tostring(description_node, method="html") + description = sanitize_comments_html(description) + + else: + description_node = self._parse_xpath_node(xpath=LubimyCzytac.META_TITLE) + if description_node is not None: + description = description_node + description = sanitize_comments_html(description) + description = self._add_extra_info_to_description(description=description) + return description + + def _add_extra_info_to_description(self, description: str) -> str: + pages = self._parse_from_summary(attribute_name="numberOfPages") + if pages: + description += LubimyCzytacParser.PAGES_TEMPLATE.format(pages) + + first_publish_date = self._parse_date() + if first_publish_date: + description += LubimyCzytacParser.PUBLISH_DATE_TEMPLATE.format( + first_publish_date.strftime("%d.%m.%Y") + ) + + first_publish_date_pl = self._parse_date(xpath="first_publish_pl") + if first_publish_date_pl: + description += LubimyCzytacParser.PUBLISH_DATE_PL_TEMPLATE.format( + first_publish_date_pl.strftime("%d.%m.%Y") + ) + + return description diff --git a/cps/metadata_provider/scholar.py b/cps/metadata_provider/scholar.py index 6e13c768..7feb0ee9 100644 --- a/cps/metadata_provider/scholar.py +++ b/cps/metadata_provider/scholar.py @@ -15,47 +15,67 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . +import itertools +from typing import Dict, List, Optional +from urllib.parse import quote, unquote -from scholarly import scholarly +try: + from fake_useragent.errors import FakeUserAgentError +except (ImportError): + FakeUserAgentError = BaseException +try: + from scholarly import scholarly +except FakeUserAgentError: + raise ImportError("No module named 'scholarly'") -from cps.services.Metadata import Metadata +from cps import logger +from cps.services.Metadata import MetaRecord, MetaSourceInfo, Metadata + +log = logger.create() class scholar(Metadata): __name__ = "Google Scholar" __id__ = "googlescholar" + META_URL = "https://scholar.google.com/" - def search(self, query, generic_cover=""): + def search( + self, query: str, generic_cover: str = "", locale: str = "en" + ) -> Optional[List[MetaRecord]]: val = list() if self.active: - scholar_gen = scholarly.search_pubs(' '.join(query.split('+'))) - i = 0 - for publication in scholar_gen: - v = dict() - v['id'] = "1234" # publication['bib'].get('title') - v['title'] = publication['bib'].get('title') - v['authors'] = publication['bib'].get('author', []) - v['description'] = publication['bib'].get('abstract', "") - v['publisher'] = publication['bib'].get('venue', "") - if publication['bib'].get('pub_year'): - v['publishedDate'] = publication['bib'].get('pub_year')+"-01-01" - else: - v['publishedDate'] = "" - v['tags'] = "" - v['ratings'] = 0 - v['series'] = "" - v['cover'] = generic_cover - v['url'] = publication.get('pub_url') or publication.get('eprint_url') or "", - v['source'] = { - "id": self.__id__, - "description": "Google Scholar", - "link": "https://scholar.google.com/" - } - val.append(v) - i += 1 - if (i >= 10): - break + title_tokens = list(self.get_title_tokens(query, strip_joiners=False)) + if title_tokens: + tokens = [quote(t.encode("utf-8")) for t in title_tokens] + query = " ".join(tokens) + try: + scholar_gen = itertools.islice(scholarly.search_pubs(query), 10) + except Exception as e: + log.warning(e) + return None + for result in scholar_gen: + match = self._parse_search_result( + result=result, generic_cover="", locale=locale + ) + val.append(match) return val + def _parse_search_result( + self, result: Dict, generic_cover: str, locale: str + ) -> MetaRecord: + match = MetaRecord( + id=result.get("pub_url", result.get("eprint_url", "")), + title=result["bib"].get("title"), + authors=result["bib"].get("author", []), + url=result.get("pub_url", result.get("eprint_url", "")), + source=MetaSourceInfo( + id=self.__id__, description=self.__name__, link=scholar.META_URL + ), + ) - + match.cover = result.get("image", {}).get("original_url", generic_cover) + match.description = unquote(result["bib"].get("abstract", "")) + match.publisher = result["bib"].get("venue", "") + match.publishedDate = result["bib"].get("pub_year") + "-01-01" + match.identifiers = {"scholar": match.id} + return match diff --git a/cps/oauth.py b/cps/oauth.py index f8e5c1fd..0caa61ec 100644 --- a/cps/oauth.py +++ b/cps/oauth.py @@ -19,18 +19,12 @@ from flask import session try: - from flask_dance.consumer.backend.sqla import SQLAlchemyBackend, first, _get_real_user + from flask_dance.consumer.storage.sqla import SQLAlchemyStorage as SQLAlchemyBackend + from flask_dance.consumer.storage.sqla import first, _get_real_user from sqlalchemy.orm.exc import NoResultFound - backend_resultcode = False # prevent storing values with this resultcode + backend_resultcode = True # prevent storing values with this resultcode except ImportError: - # fails on flask-dance >1.3, due to renaming - try: - from flask_dance.consumer.storage.sqla import SQLAlchemyStorage as SQLAlchemyBackend - from flask_dance.consumer.storage.sqla import first, _get_real_user - from sqlalchemy.orm.exc import NoResultFound - backend_resultcode = True # prevent storing values with this resultcode - except ImportError: - pass + pass class OAuthBackend(SQLAlchemyBackend): diff --git a/cps/oauth_bb.py b/cps/oauth_bb.py index d9efd41e..d9a60c0e 100644 --- a/cps/oauth_bb.py +++ b/cps/oauth_bb.py @@ -149,7 +149,7 @@ def bind_oauth_or_register(provider_id, provider_user_id, redirect_url, provider log.info("Link to {} Succeeded".format(provider_name)) return redirect(url_for('web.profile')) except Exception as ex: - log.debug_or_exception(ex) + log.error_or_exception(ex) ub.session.rollback() else: flash(_(u"Login failed, No User Linked With OAuth Account"), category="error") @@ -197,7 +197,7 @@ def unlink_oauth(provider): flash(_(u"Unlink to %(oauth)s Succeeded", oauth=oauth_check[provider]), category="success") log.info("Unlink to {} Succeeded".format(oauth_check[provider])) except Exception as ex: - log.debug_or_exception(ex) + log.error_or_exception(ex) ub.session.rollback() flash(_(u"Unlink to %(oauth)s Failed", oauth=oauth_check[provider]), category="error") except NoResultFound: diff --git a/cps/opds.py b/cps/opds.py index 0b0a8923..60dbd551 100644 --- a/cps/opds.py +++ b/cps/opds.py @@ -26,10 +26,12 @@ from functools import wraps from flask import Blueprint, request, render_template, Response, g, make_response, abort from flask_login import current_user +from flask_babel import get_locale from sqlalchemy.sql.expression import func, text, or_, and_, true +from sqlalchemy.exc import InvalidRequestError, OperationalError from werkzeug.security import check_password_hash -from tornado.httputil import HTTPServerRequest -from . import constants, logger, config, db, calibre_db, ub, services, get_locale, isoLanguages + +from . import constants, logger, config, db, calibre_db, ub, services, isoLanguages from .helper import get_download_link, get_book_cover from .pagination import Pagination from .web import render_read_books @@ -54,20 +56,6 @@ def requires_basic_auth_if_no_ano(f): return decorated -class FeedObject: - def __init__(self, rating_id, rating_name): - self.rating_id = rating_id - self.rating_name = rating_name - - @property - def id(self): - return self.rating_id - - @property - def name(self): - return self.rating_name - - @opds.route("/opds/") @opds.route("/opds") @requires_basic_auth_if_no_ano @@ -86,7 +74,7 @@ def feed_osd(): @requires_basic_auth_if_no_ano def feed_cc_search(query): # Handle strange query from Libera Reader with + instead of spaces - plus_query = unquote_plus(request.base_url.split('/opds/search/')[1]).strip() + plus_query = unquote_plus(request.environ['RAW_URI'].split('/opds/search/')[1]).strip() return feed_search(plus_query) @@ -99,26 +87,7 @@ def feed_normal_search(): @opds.route("/opds/books") @requires_basic_auth_if_no_ano def feed_booksindex(): - shift = 0 - off = int(request.args.get("offset") or 0) - entries = calibre_db.session.query(func.upper(func.substr(db.Books.sort, 1, 1)).label('id'))\ - .filter(calibre_db.common_filters()).group_by(func.upper(func.substr(db.Books.sort, 1, 1))).all() - - elements = [] - if off == 0: - elements.append({'id': "00", 'name':_("All")}) - shift = 1 - for entry in entries[ - off + shift - 1: - int(off + int(config.config_books_per_page) - shift)]: - elements.append({'id': entry.id, 'name': entry.id}) - - pagination = Pagination((int(off) / (int(config.config_books_per_page)) + 1), config.config_books_per_page, - len(entries) + 1) - return render_xml_template('feed.xml', - letterelements=elements, - folder='opds.feed_letter_books', - pagination=pagination) + return render_element_index(db.Books.sort, None, 'opds.feed_letter_books') @opds.route("/opds/books/letter/") @@ -129,7 +98,8 @@ def feed_letter_books(book_id): entries, __, pagination = calibre_db.fill_indexpage((int(off) / (int(config.config_books_per_page)) + 1), 0, db.Books, letter, - [db.Books.sort]) + [db.Books.sort], + True, config.config_read_column) return render_xml_template('feed.xml', entries=entries, pagination=pagination) @@ -139,15 +109,16 @@ def feed_letter_books(book_id): def feed_new(): off = request.args.get("offset") or 0 entries, __, pagination = calibre_db.fill_indexpage((int(off) / (int(config.config_books_per_page)) + 1), 0, - db.Books, True, [db.Books.timestamp.desc()]) + db.Books, True, [db.Books.timestamp.desc()], + True, config.config_read_column) return render_xml_template('feed.xml', entries=entries, pagination=pagination) @opds.route("/opds/discover") @requires_basic_auth_if_no_ano def feed_discover(): - entries = calibre_db.session.query(db.Books).filter(calibre_db.common_filters()).order_by(func.random())\ - .limit(config.config_books_per_page) + query = calibre_db.generate_linked_query(config.config_read_column, db.Books) + entries = query.filter(calibre_db.common_filters()).order_by(func.random()).limit(config.config_books_per_page) pagination = Pagination(1, config.config_books_per_page, int(config.config_books_per_page)) return render_xml_template('feed.xml', entries=entries, pagination=pagination) @@ -158,7 +129,8 @@ def feed_best_rated(): off = request.args.get("offset") or 0 entries, __, pagination = calibre_db.fill_indexpage((int(off) / (int(config.config_books_per_page)) + 1), 0, db.Books, db.Books.ratings.any(db.Ratings.rating > 9), - [db.Books.timestamp.desc()]) + [db.Books.timestamp.desc()], + True, config.config_read_column) return render_xml_template('feed.xml', entries=entries, pagination=pagination) @@ -171,43 +143,23 @@ def feed_hot(): hot_books = all_books.offset(off).limit(config.config_books_per_page) entries = list() for book in hot_books: - downloadBook = calibre_db.get_book(book.Downloads.book_id) - if downloadBook: - entries.append( - calibre_db.get_filtered_book(book.Downloads.book_id) - ) + query = calibre_db.generate_linked_query(config.config_read_column, db.Books) + download_book = query.filter(calibre_db.common_filters()).filter( + book.Downloads.book_id == db.Books.id).first() + if download_book: + entries.append(download_book) else: ub.delete_download(book.Downloads.book_id) - numBooks = entries.__len__() + num_books = entries.__len__() pagination = Pagination((int(off) / (int(config.config_books_per_page)) + 1), - config.config_books_per_page, numBooks) + config.config_books_per_page, num_books) return render_xml_template('feed.xml', entries=entries, pagination=pagination) @opds.route("/opds/author") @requires_basic_auth_if_no_ano def feed_authorindex(): - shift = 0 - off = int(request.args.get("offset") or 0) - entries = calibre_db.session.query(func.upper(func.substr(db.Authors.sort, 1, 1)).label('id'))\ - .join(db.books_authors_link).join(db.Books).filter(calibre_db.common_filters())\ - .group_by(func.upper(func.substr(db.Authors.sort, 1, 1))).all() - - elements = [] - if off == 0: - elements.append({'id': "00", 'name':_("All")}) - shift = 1 - for entry in entries[ - off + shift - 1: - int(off + int(config.config_books_per_page) - shift)]: - elements.append({'id': entry.id, 'name': entry.id}) - - pagination = Pagination((int(off) / (int(config.config_books_per_page)) + 1), config.config_books_per_page, - len(entries) + 1) - return render_xml_template('feed.xml', - letterelements=elements, - folder='opds.feed_letter_author', - pagination=pagination) + return render_element_index(db.Authors.sort, db.books_authors_link, 'opds.feed_letter_author') @opds.route("/opds/author/letter/") @@ -228,12 +180,7 @@ def feed_letter_author(book_id): @opds.route("/opds/author/") @requires_basic_auth_if_no_ano def feed_author(book_id): - off = request.args.get("offset") or 0 - entries, __, pagination = calibre_db.fill_indexpage((int(off) / (int(config.config_books_per_page)) + 1), 0, - db.Books, - db.Books.authors.any(db.Authors.id == book_id), - [db.Books.timestamp.desc()]) - return render_xml_template('feed.xml', entries=entries, pagination=pagination) + return render_xml_dataset(db.Authors, book_id) @opds.route("/opds/publisher") @@ -254,37 +201,14 @@ def feed_publisherindex(): @opds.route("/opds/publisher/") @requires_basic_auth_if_no_ano def feed_publisher(book_id): - off = request.args.get("offset") or 0 - entries, __, pagination = calibre_db.fill_indexpage((int(off) / (int(config.config_books_per_page)) + 1), 0, - db.Books, - db.Books.publishers.any(db.Publishers.id == book_id), - [db.Books.timestamp.desc()]) - return render_xml_template('feed.xml', entries=entries, pagination=pagination) + return render_xml_dataset(db.Publishers, book_id) @opds.route("/opds/category") @requires_basic_auth_if_no_ano def feed_categoryindex(): - shift = 0 - off = int(request.args.get("offset") or 0) - entries = calibre_db.session.query(func.upper(func.substr(db.Tags.name, 1, 1)).label('id'))\ - .join(db.books_tags_link).join(db.Books).filter(calibre_db.common_filters())\ - .group_by(func.upper(func.substr(db.Tags.name, 1, 1))).all() - elements = [] - if off == 0: - elements.append({'id': "00", 'name':_("All")}) - shift = 1 - for entry in entries[ - off + shift - 1: - int(off + int(config.config_books_per_page) - shift)]: - elements.append({'id': entry.id, 'name': entry.id}) + return render_element_index(db.Tags.name, db.books_tags_link, 'opds.feed_letter_category') - pagination = Pagination((int(off) / (int(config.config_books_per_page)) + 1), config.config_books_per_page, - len(entries) + 1) - return render_xml_template('feed.xml', - letterelements=elements, - folder='opds.feed_letter_category', - pagination=pagination) @opds.route("/opds/category/letter/") @requires_basic_auth_if_no_ano @@ -306,36 +230,14 @@ def feed_letter_category(book_id): @opds.route("/opds/category/") @requires_basic_auth_if_no_ano def feed_category(book_id): - off = request.args.get("offset") or 0 - entries, __, pagination = calibre_db.fill_indexpage((int(off) / (int(config.config_books_per_page)) + 1), 0, - db.Books, - db.Books.tags.any(db.Tags.id == book_id), - [db.Books.timestamp.desc()]) - return render_xml_template('feed.xml', entries=entries, pagination=pagination) + return render_xml_dataset(db.Tags, book_id) @opds.route("/opds/series") @requires_basic_auth_if_no_ano def feed_seriesindex(): - shift = 0 - off = int(request.args.get("offset") or 0) - entries = calibre_db.session.query(func.upper(func.substr(db.Series.sort, 1, 1)).label('id'))\ - .join(db.books_series_link).join(db.Books).filter(calibre_db.common_filters())\ - .group_by(func.upper(func.substr(db.Series.sort, 1, 1))).all() - elements = [] - if off == 0: - elements.append({'id': "00", 'name':_("All")}) - shift = 1 - for entry in entries[ - off + shift - 1: - int(off + int(config.config_books_per_page) - shift)]: - elements.append({'id': entry.id, 'name': entry.id}) - pagination = Pagination((int(off) / (int(config.config_books_per_page)) + 1), config.config_books_per_page, - len(entries) + 1) - return render_xml_template('feed.xml', - letterelements=elements, - folder='opds.feed_letter_series', - pagination=pagination) + return render_element_index(db.Series.sort, db.books_series_link, 'opds.feed_letter_series') + @opds.route("/opds/series/letter/") @requires_basic_auth_if_no_ano @@ -361,7 +263,8 @@ def feed_series(book_id): entries, __, pagination = calibre_db.fill_indexpage((int(off) / (int(config.config_books_per_page)) + 1), 0, db.Books, db.Books.series.any(db.Series.id == book_id), - [db.Books.series_index]) + [db.Books.series_index], + True, config.config_read_column) return render_xml_template('feed.xml', entries=entries, pagination=pagination) @@ -370,7 +273,7 @@ def feed_series(book_id): def feed_ratingindex(): off = request.args.get("offset") or 0 entries = calibre_db.session.query(db.Ratings, func.count('books_ratings_link.book').label('count'), - (db.Ratings.rating / 2).label('name')) \ + (db.Ratings.rating / 2).label('name')) \ .join(db.books_ratings_link)\ .join(db.Books)\ .filter(calibre_db.common_filters()) \ @@ -388,12 +291,7 @@ def feed_ratingindex(): @opds.route("/opds/ratings/") @requires_basic_auth_if_no_ano def feed_ratings(book_id): - off = request.args.get("offset") or 0 - entries, __, pagination = calibre_db.fill_indexpage((int(off) / (int(config.config_books_per_page)) + 1), 0, - db.Books, - db.Books.ratings.any(db.Ratings.id == book_id), - [db.Books.timestamp.desc()]) - return render_xml_template('feed.xml', entries=entries, pagination=pagination) + return render_xml_dataset(db.Ratings, book_id) @opds.route("/opds/formats") @@ -420,7 +318,8 @@ def feed_format(book_id): entries, __, pagination = calibre_db.fill_indexpage((int(off) / (int(config.config_books_per_page)) + 1), 0, db.Books, db.Books.data.any(db.Data.format == book_id.upper()), - [db.Books.timestamp.desc()]) + [db.Books.timestamp.desc()], + True, config.config_read_column) return render_xml_template('feed.xml', entries=entries, pagination=pagination) @@ -432,17 +331,9 @@ def feed_languagesindex(): if current_user.filter_language() == u"all": languages = calibre_db.speaking_language() else: - #try: - # cur_l = LC.parse(current_user.filter_language()) - #except UnknownLocaleError: - # cur_l = None languages = calibre_db.session.query(db.Languages).filter( db.Languages.lang_code == current_user.filter_language()).all() languages[0].name = isoLanguages.get_language_name(get_locale(), languages[0].lang_code) - #if cur_l: - # languages[0].name = cur_l.get_language_name(get_locale()) - #else: - # languages[0].name = _(isoLanguages.get(part3=languages[0].lang_code).name) pagination = Pagination((int(off) / (int(config.config_books_per_page)) + 1), config.config_books_per_page, len(languages)) return render_xml_template('feed.xml', listelements=languages, folder='opds.feed_languages', pagination=pagination) @@ -455,7 +346,8 @@ def feed_languages(book_id): entries, __, pagination = calibre_db.fill_indexpage((int(off) / (int(config.config_books_per_page)) + 1), 0, db.Books, db.Books.languages.any(db.Languages.id == book_id), - [db.Books.timestamp.desc()]) + [db.Books.timestamp.desc()], + True, config.config_read_column) return render_xml_template('feed.xml', entries=entries, pagination=pagination) @@ -485,13 +377,25 @@ def feed_shelf(book_id): result = list() # user is allowed to access shelf if shelf: - books_in_shelf = ub.session.query(ub.BookShelf).filter(ub.BookShelf.shelf == book_id).order_by( - ub.BookShelf.order.asc()).all() - for book in books_in_shelf: - cur_book = calibre_db.get_book(book.book_id) - result.append(cur_book) - pagination = Pagination((int(off) / (int(config.config_books_per_page)) + 1), config.config_books_per_page, - len(result)) + result, __, pagination = calibre_db.fill_indexpage((int(off) / (int(config.config_books_per_page)) + 1), + config.config_books_per_page, + db.Books, + ub.BookShelf.shelf == shelf.id, + [ub.BookShelf.order.asc()], + True, config.config_read_column, + ub.BookShelf, ub.BookShelf.book_id == db.Books.id) + # delete shelf entries where book is not existent anymore, can happen if book is deleted outside calibre-web + wrong_entries = calibre_db.session.query(ub.BookShelf) \ + .join(db.Books, ub.BookShelf.book_id == db.Books.id, isouter=True) \ + .filter(db.Books.id == None).all() + for entry in wrong_entries: + log.info('Not existing book {} in {} deleted'.format(entry.book_id, shelf)) + try: + ub.session.query(ub.BookShelf).filter(ub.BookShelf.book_id == entry.book_id).delete() + ub.session.commit() + except (OperationalError, InvalidRequestError) as e: + ub.session.rollback() + log.error_or_exception("Settings Database error: {}".format(e)) return render_xml_template('feed.xml', entries=result, pagination=pagination) @@ -499,7 +403,7 @@ def feed_shelf(book_id): @requires_basic_auth_if_no_ano def opds_download_link(book_id, book_format): # I gave up with this: With enabled ldap login, the user doesn't get logged in, therefore it's always guest - # workaround, loading the user from the request and checking it's download rights here + # workaround, loading the user from the request and checking its download rights here # in case of anonymous browsing user is None user = load_user_from_request(request) or current_user if not user.role_download(): @@ -525,47 +429,6 @@ def get_metadata_calibre_companion(uuid, library): return "" -def feed_search(term): - if term: - entries, __, ___ = calibre_db.get_search_results(term, config_read_column=config.config_read_column) - entries_count = len(entries) if len(entries) > 0 else 1 - pagination = Pagination(1, entries_count, entries_count) - return render_xml_template('feed.xml', searchterm=term, entries=entries, pagination=pagination) - else: - return render_xml_template('feed.xml', searchterm="") - - -def check_auth(username, password): - try: - username = username.encode('windows-1252') - except UnicodeEncodeError: - username = username.encode('utf-8') - user = ub.session.query(ub.User).filter(func.lower(ub.User.name) == - username.decode('utf-8').lower()).first() - if bool(user and check_password_hash(str(user.password), password)): - return True - else: - ip_Address = request.headers.get('X-Forwarded-For', request.remote_addr) - log.warning('OPDS Login failed for user "%s" IP-address: %s', username.decode('utf-8'), ip_Address) - return False - - -def authenticate(): - return Response( - 'Could not verify your access level for that URL.\n' - 'You have to login with proper credentials', 401, - {'WWW-Authenticate': 'Basic realm="Login Required"'}) - - -def render_xml_template(*args, **kwargs): - # ToDo: return time in current timezone similar to %z - currtime = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S+00:00") - xml = render_template(current_time=currtime, instance=config.config_calibre_web_title, *args, **kwargs) - response = make_response(xml) - response.headers["Content-Type"] = "application/atom+xml; charset=utf-8" - return response - - @opds.route("/opds/thumb_240_240/") @opds.route("/opds/cover_240_240/") @opds.route("/opds/cover_90_90/") @@ -589,3 +452,92 @@ def feed_unread_books(): off = request.args.get("offset") or 0 result, pagination = render_read_books(int(off) / (int(config.config_books_per_page)) + 1, False, True) return render_xml_template('feed.xml', entries=result, pagination=pagination) + + +class FeedObject: + def __init__(self, rating_id, rating_name): + self.rating_id = rating_id + self.rating_name = rating_name + + @property + def id(self): + return self.rating_id + + @property + def name(self): + return self.rating_name + + +def feed_search(term): + if term: + entries, __, ___ = calibre_db.get_search_results(term, config=config) + entries_count = len(entries) if len(entries) > 0 else 1 + pagination = Pagination(1, entries_count, entries_count) + return render_xml_template('feed.xml', searchterm=term, entries=entries, pagination=pagination) + else: + return render_xml_template('feed.xml', searchterm="") + + +def check_auth(username, password): + try: + username = username.encode('windows-1252') + except UnicodeEncodeError: + username = username.encode('utf-8') + user = ub.session.query(ub.User).filter(func.lower(ub.User.name) == + username.decode('utf-8').lower()).first() + if bool(user and check_password_hash(str(user.password), password)): + return True + else: + ip_address = request.headers.get('X-Forwarded-For', request.remote_addr) + log.warning('OPDS Login failed for user "%s" IP-address: %s', username.decode('utf-8'), ip_address) + return False + + +def authenticate(): + return Response( + 'Could not verify your access level for that URL.\n' + 'You have to login with proper credentials', 401, + {'WWW-Authenticate': 'Basic realm="Login Required"'}) + + +def render_xml_template(*args, **kwargs): + # ToDo: return time in current timezone similar to %z + currtime = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S+00:00") + xml = render_template(current_time=currtime, instance=config.config_calibre_web_title, *args, **kwargs) + response = make_response(xml) + response.headers["Content-Type"] = "application/atom+xml; charset=utf-8" + return response + + +def render_xml_dataset(data_table, book_id): + off = request.args.get("offset") or 0 + entries, __, pagination = calibre_db.fill_indexpage((int(off) / (int(config.config_books_per_page)) + 1), 0, + db.Books, + getattr(db.Books, data_table.__tablename__).any(data_table.id == book_id), + [db.Books.timestamp.desc()], + True, config.config_read_column) + return render_xml_template('feed.xml', entries=entries, pagination=pagination) + + +def render_element_index(database_column, linked_table, folder): + shift = 0 + off = int(request.args.get("offset") or 0) + entries = calibre_db.session.query(func.upper(func.substr(database_column, 1, 1)).label('id'), None, None) + # query = calibre_db.generate_linked_query(config.config_read_column, db.Books) + if linked_table is not None: + entries = entries.join(linked_table).join(db.Books) + entries = entries.filter(calibre_db.common_filters()).group_by(func.upper(func.substr(database_column, 1, 1))).all() + elements = [] + if off == 0: + elements.append({'id': "00", 'name': _("All")}) + shift = 1 + for entry in entries[ + off + shift - 1: + int(off + int(config.config_books_per_page) - shift)]: + elements.append({'id': entry.id, 'name': entry.id}) + pagination = Pagination((int(off) / (int(config.config_books_per_page)) + 1), config.config_books_per_page, + len(entries) + 1) + return render_xml_template('feed.xml', + letterelements=elements, + folder=folder, + pagination=pagination) diff --git a/cps/pagination.py b/cps/pagination.py index 7a9bfb70..bda9f4c8 100644 --- a/cps/pagination.py +++ b/cps/pagination.py @@ -57,10 +57,10 @@ class Pagination(object): def has_next(self): return self.page < self.pages - # right_edge: last right_edges count of all pages are shown as number, means, if 10 pages are paginated -> 9,10 shwn - # left_edge: first left_edges count of all pages are shown as number -> 1,2 shwn - # left_current: left_current count below current page are shown as number, means if current page 5 -> 3,4 shwn - # left_current: right_current count above current page are shown as number, means if current page 5 -> 6,7 shwn + # right_edge: last right_edges count of all pages are shown as number, means, if 10 pages are paginated -> 9,10 shown + # left_edge: first left_edges count of all pages are shown as number -> 1,2 shown + # left_current: left_current count below current page are shown as number, means if current page 5 -> 3,4 shown + # left_current: right_current count above current page are shown as number, means if current page 5 -> 6,7 shown def iter_pages(self, left_edge=2, left_current=2, right_current=4, right_edge=2): last = 0 diff --git a/cps/redirect.py b/cps/redirect.py index d491b353..9382a205 100644 --- a/cps/redirect.py +++ b/cps/redirect.py @@ -27,10 +27,7 @@ # http://flask.pocoo.org/snippets/62/ -try: - from urllib.parse import urlparse, urljoin -except ImportError: - from urlparse import urlparse, urljoin +from urllib.parse import urlparse, urljoin from flask import request, url_for, redirect diff --git a/cps/remotelogin.py b/cps/remotelogin.py index a9994f09..ea613c29 100644 --- a/cps/remotelogin.py +++ b/cps/remotelogin.py @@ -22,6 +22,7 @@ import json from datetime import datetime +from functools import wraps from flask import Blueprint, request, make_response, abort, url_for, flash, redirect from flask_login import login_required, current_user, login_user @@ -31,10 +32,6 @@ from sqlalchemy.sql.expression import true from . import config, logger, ub from .render_template import render_title_template -try: - from functools import wraps -except ImportError: - pass # We're not using Python 3 remotelogin = Blueprint('remotelogin', __name__) log = logger.create() diff --git a/cps/render_template.py b/cps/render_template.py index 7cd341ea..0750a9c4 100644 --- a/cps/render_template.py +++ b/cps/render_template.py @@ -16,13 +16,12 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from flask import render_template +from flask import render_template, g, abort, request from flask_babel import gettext as _ -from flask import g from werkzeug.local import LocalProxy from flask_login import current_user -from . import config, constants, ub, logger, db, calibre_db +from . import config, constants, logger from .ub import User @@ -30,6 +29,8 @@ log = logger.create() def get_sidebar_config(kwargs=None): kwargs = kwargs or [] + simple = bool([e for e in ['kindle', 'tolino', "kobo", "bookeen"] + if (e in request.headers.get('User-Agent', "").lower())]) if 'content' in kwargs: content = kwargs['content'] content = isinstance(content, (User, LocalProxy)) and not content.role_anonymous() @@ -93,14 +94,14 @@ def get_sidebar_config(kwargs=None): {"glyph": "glyphicon-trash", "text": _('Archived Books'), "link": 'web.books_list', "id": "archived", "visibility": constants.SIDEBAR_ARCHIVED, 'public': (not g.user.is_anonymous), "page": "archived", "show_text": _('Show archived books'), "config_show": content}) - sidebar.append( - {"glyph": "glyphicon-th-list", "text": _('Books List'), "link": 'web.books_table', "id": "list", - "visibility": constants.SIDEBAR_LIST, 'public': (not g.user.is_anonymous), "page": "list", - "show_text": _('Show Books List'), "config_show": content}) + if not simple: + sidebar.append( + {"glyph": "glyphicon-th-list", "text": _('Books List'), "link": 'web.books_table', "id": "list", + "visibility": constants.SIDEBAR_LIST, 'public': (not g.user.is_anonymous), "page": "list", + "show_text": _('Show Books List'), "config_show": content}) + return sidebar, simple - return sidebar - -def get_readbooks_ids(): +'''def get_readbooks_ids(): if not config.config_read_column: readBooks = ub.session.query(ub.ReadBook).filter(ub.ReadBook.user_id == int(current_user.id))\ .filter(ub.ReadBook.read_status == ub.ReadBook.STATUS_FINISHED).all() @@ -110,13 +111,17 @@ def get_readbooks_ids(): readBooks = calibre_db.session.query(db.cc_classes[config.config_read_column])\ .filter(db.cc_classes[config.config_read_column].value == True).all() return frozenset([x.book for x in readBooks]) - except (KeyError, AttributeError): - log.error("Custom Column No.%d is not existing in calibre database", config.config_read_column) - return [] + except (KeyError, AttributeError, IndexError): + log.error("Custom Column No.{} is not existing in calibre database".format(config.config_read_column)) + return []''' # Returns the template for rendering and includes the instance name def render_title_template(*args, **kwargs): - sidebar = get_sidebar_config(kwargs) - return render_template(instance=config.config_calibre_web_title, sidebar=sidebar, - accept=constants.EXTENSIONS_UPLOAD, read_book_ids=get_readbooks_ids(), - *args, **kwargs) + sidebar, simple = get_sidebar_config(kwargs) + try: + return render_template(instance=config.config_calibre_web_title, sidebar=sidebar, simple=simple, + accept=constants.EXTENSIONS_UPLOAD, + *args, **kwargs) + except PermissionError: + log.error("No permission to access {} file.".format(args[0])) + abort(403) diff --git a/cps/schedule.py b/cps/schedule.py new file mode 100644 index 00000000..faadfb7e --- /dev/null +++ b/cps/schedule.py @@ -0,0 +1,97 @@ +# -*- coding: utf-8 -*- + +# This file is part of the Calibre-Web (https://github.com/janeczku/calibre-web) +# Copyright (C) 2020 mmonkey +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import datetime + +from . import config, constants +from .services.background_scheduler import BackgroundScheduler, use_APScheduler +from .tasks.database import TaskReconnectDatabase +from .tasks.thumbnail import TaskGenerateCoverThumbnails, TaskGenerateSeriesThumbnails, TaskClearCoverThumbnailCache +from .services.worker import WorkerThread + + +def get_scheduled_tasks(reconnect=True): + tasks = list() + # config.schedule_reconnect or + # Reconnect Calibre database (metadata.db) + if reconnect: + tasks.append([lambda: TaskReconnectDatabase(), 'reconnect', False]) + + # Generate all missing book cover thumbnails + if config.schedule_generate_book_covers: + tasks.append([lambda: TaskClearCoverThumbnailCache(0), 'delete superfluous book covers', True]) + tasks.append([lambda: TaskGenerateCoverThumbnails(), 'generate book covers', False]) + + # Generate all missing series thumbnails + if config.schedule_generate_series_covers: + tasks.append([lambda: TaskGenerateSeriesThumbnails(), 'generate book covers', False]) + + return tasks + + +def end_scheduled_tasks(): + worker = WorkerThread.get_instance() + for __, __, __, task, __ in worker.tasks: + if task.scheduled and task.is_cancellable: + worker.end_task(task.id) + + +def register_scheduled_tasks(reconnect=True): + scheduler = BackgroundScheduler() + + if scheduler: + # Remove all existing jobs + scheduler.remove_all_jobs() + + start = config.schedule_start_time + duration = config.schedule_duration + + # Register scheduled tasks + scheduler.schedule_tasks(tasks=get_scheduled_tasks(reconnect), trigger='cron', hour=start) + end_time = calclulate_end_time(start, duration) + scheduler.schedule(func=end_scheduled_tasks, trigger='cron', name="end scheduled task", hour=end_time.hour, + minute=end_time.minute) + + # Kick-off tasks, if they should currently be running + if should_task_be_running(start, duration): + scheduler.schedule_tasks_immediately(tasks=get_scheduled_tasks(reconnect)) + + +def register_startup_tasks(): + scheduler = BackgroundScheduler() + + if scheduler: + start = config.schedule_start_time + duration = config.schedule_duration + + # Run scheduled tasks immediately for development and testing + # Ignore tasks that should currently be running, as these will be added when registering scheduled tasks + if constants.APP_MODE in ['development', 'test'] and not should_task_be_running(start, duration): + scheduler.schedule_tasks_immediately(tasks=get_scheduled_tasks(False)) + + +def should_task_be_running(start, duration): + now = datetime.datetime.now() + start_time = datetime.datetime.now().replace(hour=start, minute=0, second=0, microsecond=0) + end_time = start_time + datetime.timedelta(hours=duration // 60, minutes=duration % 60) + return start_time < now < end_time + +def calclulate_end_time(start, duration): + start_time = datetime.datetime.now().replace(hour=start, minute=0) + return start_time + datetime.timedelta(hours=duration // 60, minutes=duration % 60) + diff --git a/cps/search.py b/cps/search.py new file mode 100644 index 00000000..602881bf --- /dev/null +++ b/cps/search.py @@ -0,0 +1,418 @@ +# This file is part of the Calibre-Web (https://github.com/janeczku/calibre-web) +# Copyright (C) 2022 OzzieIsaacs +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import json +from datetime import datetime + +from flask import Blueprint, request, redirect, url_for, flash +from flask import session as flask_session +from flask_login import current_user +from flask_babel import format_date +from flask_babel import gettext as _ +from sqlalchemy.sql.expression import func, not_, and_, or_, text +from sqlalchemy.sql.functions import coalesce + +from . import logger, db, calibre_db, config, ub +from .usermanagement import login_required_if_no_ano +from .render_template import render_title_template +from .pagination import Pagination + +search = Blueprint('search', __name__) + +log = logger.create() + + +@search.route("/search", methods=["GET"]) +@login_required_if_no_ano +def simple_search(): + term = request.args.get("query") + if term: + return redirect(url_for('web.books_list', data="search", sort_param='stored', query=term.strip())) + else: + return render_title_template('search.html', + searchterm="", + result_count=0, + title=_(u"Search"), + page="search") + + +@search.route("/advsearch", methods=['POST']) +@login_required_if_no_ano +def advanced_search(): + values = dict(request.form) + params = ['include_tag', 'exclude_tag', 'include_serie', 'exclude_serie', 'include_shelf', 'exclude_shelf', + 'include_language', 'exclude_language', 'include_extension', 'exclude_extension'] + for param in params: + values[param] = list(request.form.getlist(param)) + flask_session['query'] = json.dumps(values) + return redirect(url_for('web.books_list', data="advsearch", sort_param='stored', query="")) + + +@search.route("/advsearch", methods=['GET']) +@login_required_if_no_ano +def advanced_search_form(): + # Build custom columns names + cc = calibre_db.get_cc_columns(config, filter_config_custom_read=True) + return render_prepare_search_form(cc) + + +def adv_search_custom_columns(cc, term, q): + for c in cc: + if c.datatype == "datetime": + custom_start = term.get('custom_column_' + str(c.id) + '_start') + custom_end = term.get('custom_column_' + str(c.id) + '_end') + if custom_start: + q = q.filter(getattr(db.Books, 'custom_column_' + str(c.id)).any( + func.datetime(db.cc_classes[c.id].value) >= func.datetime(custom_start))) + if custom_end: + q = q.filter(getattr(db.Books, 'custom_column_' + str(c.id)).any( + func.datetime(db.cc_classes[c.id].value) <= func.datetime(custom_end))) + else: + custom_query = term.get('custom_column_' + str(c.id)) + if custom_query != '' and custom_query is not None: + if c.datatype == 'bool': + q = q.filter(getattr(db.Books, 'custom_column_' + str(c.id)).any( + db.cc_classes[c.id].value == (custom_query == "True"))) + elif c.datatype == 'int' or c.datatype == 'float': + q = q.filter(getattr(db.Books, 'custom_column_' + str(c.id)).any( + db.cc_classes[c.id].value == custom_query)) + elif c.datatype == 'rating': + q = q.filter(getattr(db.Books, 'custom_column_' + str(c.id)).any( + db.cc_classes[c.id].value == int(float(custom_query) * 2))) + else: + q = q.filter(getattr(db.Books, 'custom_column_' + str(c.id)).any( + func.lower(db.cc_classes[c.id].value).ilike("%" + custom_query + "%"))) + return q + + +def adv_search_language(q, include_languages_inputs, exclude_languages_inputs): + if current_user.filter_language() != "all": + q = q.filter(db.Books.languages.any(db.Languages.lang_code == current_user.filter_language())) + else: + for language in include_languages_inputs: + q = q.filter(db.Books.languages.any(db.Languages.id == language)) + for language in exclude_languages_inputs: + q = q.filter(not_(db.Books.series.any(db.Languages.id == language))) + return q + + +def adv_search_ratings(q, rating_high, rating_low): + if rating_high: + rating_high = int(rating_high) * 2 + q = q.filter(db.Books.ratings.any(db.Ratings.rating <= rating_high)) + if rating_low: + rating_low = int(rating_low) * 2 + q = q.filter(db.Books.ratings.any(db.Ratings.rating >= rating_low)) + return q + + +def adv_search_read_status(q, read_status): + if read_status: + if config.config_read_column: + try: + if read_status == "True": + q = q.join(db.cc_classes[config.config_read_column], isouter=True) \ + .filter(db.cc_classes[config.config_read_column].value == True) + else: + q = q.join(db.cc_classes[config.config_read_column], isouter=True) \ + .filter(coalesce(db.cc_classes[config.config_read_column].value, False) != True) + except (KeyError, AttributeError): + log.error(u"Custom Column No.%d is not existing in calibre database", config.config_read_column) + flash(_("Custom Column No.%(column)d is not existing in calibre database", + column=config.config_read_column), + category="error") + return q + else: + if read_status == "True": + q = q.join(ub.ReadBook, db.Books.id == ub.ReadBook.book_id, isouter=True) \ + .filter(ub.ReadBook.user_id == int(current_user.id), + ub.ReadBook.read_status == ub.ReadBook.STATUS_FINISHED) + else: + q = q.join(ub.ReadBook, db.Books.id == ub.ReadBook.book_id, isouter=True) \ + .filter(ub.ReadBook.user_id == int(current_user.id), + coalesce(ub.ReadBook.read_status, 0) != ub.ReadBook.STATUS_FINISHED) + return q + + +def adv_search_extension(q, include_extension_inputs, exclude_extension_inputs): + for extension in include_extension_inputs: + q = q.filter(db.Books.data.any(db.Data.format == extension)) + for extension in exclude_extension_inputs: + q = q.filter(not_(db.Books.data.any(db.Data.format == extension))) + return q + + +def adv_search_tag(q, include_tag_inputs, exclude_tag_inputs): + for tag in include_tag_inputs: + q = q.filter(db.Books.tags.any(db.Tags.id == tag)) + for tag in exclude_tag_inputs: + q = q.filter(not_(db.Books.tags.any(db.Tags.id == tag))) + return q + + +def adv_search_serie(q, include_series_inputs, exclude_series_inputs): + for serie in include_series_inputs: + q = q.filter(db.Books.series.any(db.Series.id == serie)) + for serie in exclude_series_inputs: + q = q.filter(not_(db.Books.series.any(db.Series.id == serie))) + return q + +def adv_search_shelf(q, include_shelf_inputs, exclude_shelf_inputs): + q = q.outerjoin(ub.BookShelf, db.Books.id == ub.BookShelf.book_id)\ + .filter(or_(ub.BookShelf.shelf == None, ub.BookShelf.shelf.notin_(exclude_shelf_inputs))) + if len(include_shelf_inputs) > 0: + q = q.filter(ub.BookShelf.shelf.in_(include_shelf_inputs)) + return q + +def extend_search_term(searchterm, + author_name, + book_title, + publisher, + pub_start, + pub_end, + tags, + rating_high, + rating_low, + read_status, + ): + searchterm.extend((author_name.replace('|', ','), book_title, publisher)) + if pub_start: + try: + searchterm.extend([_(u"Published after ") + + format_date(datetime.strptime(pub_start, "%Y-%m-%d"), + format='medium')]) + except ValueError: + pub_start = u"" + if pub_end: + try: + searchterm.extend([_(u"Published before ") + + format_date(datetime.strptime(pub_end, "%Y-%m-%d"), + format='medium')]) + except ValueError: + pub_end = u"" + elements = {'tag': db.Tags, 'serie':db.Series, 'shelf':ub.Shelf} + for key, db_element in elements.items(): + tag_names = calibre_db.session.query(db_element).filter(db_element.id.in_(tags['include_' + key])).all() + searchterm.extend(tag.name for tag in tag_names) + tag_names = calibre_db.session.query(db_element).filter(db_element.id.in_(tags['exclude_' + key])).all() + searchterm.extend(tag.name for tag in tag_names) + language_names = calibre_db.session.query(db.Languages). \ + filter(db.Languages.id.in_(tags['include_language'])).all() + if language_names: + language_names = calibre_db.speaking_language(language_names) + searchterm.extend(language.name for language in language_names) + language_names = calibre_db.session.query(db.Languages). \ + filter(db.Languages.id.in_(tags['exclude_language'])).all() + if language_names: + language_names = calibre_db.speaking_language(language_names) + searchterm.extend(language.name for language in language_names) + if rating_high: + searchterm.extend([_(u"Rating <= %(rating)s", rating=rating_high)]) + if rating_low: + searchterm.extend([_(u"Rating >= %(rating)s", rating=rating_low)]) + if read_status: + searchterm.extend([_(u"Read Status = %(status)s", status=read_status)]) + searchterm.extend(ext for ext in tags['include_extension']) + searchterm.extend(ext for ext in tags['exclude_extension']) + # handle custom columns + searchterm = " + ".join(filter(None, searchterm)) + return searchterm, pub_start, pub_end + + +def render_adv_search_results(term, offset=None, order=None, limit=None): + sort = order[0] if order else [db.Books.sort] + pagination = None + + cc = calibre_db.get_cc_columns(config, filter_config_custom_read=True) + calibre_db.session.connection().connection.connection.create_function("lower", 1, db.lcase) + if not config.config_read_column: + query = (calibre_db.session.query(db.Books, ub.ArchivedBook.is_archived, ub.ReadBook).select_from(db.Books) + .outerjoin(ub.ReadBook, and_(db.Books.id == ub.ReadBook.book_id, + int(current_user.id) == ub.ReadBook.user_id))) + else: + try: + read_column = cc[config.config_read_column] + query = (calibre_db.session.query(db.Books, ub.ArchivedBook.is_archived, read_column.value) + .select_from(db.Books) + .outerjoin(read_column, read_column.book == db.Books.id)) + except (KeyError, AttributeError): + log.error("Custom Column No.%d is not existing in calibre database", config.config_read_column) + # Skip linking read column + query = calibre_db.session.query(db.Books, ub.ArchivedBook.is_archived, None) + query = query.outerjoin(ub.ArchivedBook, and_(db.Books.id == ub.ArchivedBook.book_id, + int(current_user.id) == ub.ArchivedBook.user_id)) + + q = query.outerjoin(db.books_series_link, db.Books.id == db.books_series_link.c.book)\ + .outerjoin(db.Series)\ + .filter(calibre_db.common_filters(True)) + + # parse multi selects to a complete dict + tags = dict() + elements = ['tag', 'serie', 'shelf', 'language', 'extension'] + for element in elements: + tags['include_' + element] = term.get('include_' + element) + tags['exclude_' + element] = term.get('exclude_' + element) + + author_name = term.get("author_name") + book_title = term.get("book_title") + publisher = term.get("publisher") + pub_start = term.get("publishstart") + pub_end = term.get("publishend") + rating_low = term.get("ratinghigh") + rating_high = term.get("ratinglow") + description = term.get("comment") + read_status = term.get("read_status") + if author_name: + author_name = author_name.strip().lower().replace(',', '|') + if book_title: + book_title = book_title.strip().lower() + if publisher: + publisher = publisher.strip().lower() + + search_term = [] + cc_present = False + for c in cc: + if c.datatype == "datetime": + column_start = term.get('custom_column_' + str(c.id) + '_start') + column_end = term.get('custom_column_' + str(c.id) + '_end') + if column_start: + search_term.extend([u"{} >= {}".format(c.name, + format_date(datetime.strptime(column_start, "%Y-%m-%d").date(), + format='medium') + )]) + cc_present = True + if column_end: + search_term.extend([u"{} <= {}".format(c.name, + format_date(datetime.strptime(column_end, "%Y-%m-%d").date(), + format='medium') + )]) + cc_present = True + elif term.get('custom_column_' + str(c.id)): + search_term.extend([(u"{}: {}".format(c.name, term.get('custom_column_' + str(c.id))))]) + cc_present = True + + if any(tags.values()) or author_name or book_title or publisher or pub_start or pub_end or rating_low \ + or rating_high or description or cc_present or read_status: + search_term, pub_start, pub_end = extend_search_term(search_term, + author_name, + book_title, + publisher, + pub_start, + pub_end, + tags, + rating_high, + rating_low, + read_status) + if author_name: + q = q.filter(db.Books.authors.any(func.lower(db.Authors.name).ilike("%" + author_name + "%"))) + if book_title: + q = q.filter(func.lower(db.Books.title).ilike("%" + book_title + "%")) + if pub_start: + q = q.filter(func.datetime(db.Books.pubdate) > func.datetime(pub_start)) + if pub_end: + q = q.filter(func.datetime(db.Books.pubdate) < func.datetime(pub_end)) + q = adv_search_read_status(q, read_status) + if publisher: + q = q.filter(db.Books.publishers.any(func.lower(db.Publishers.name).ilike("%" + publisher + "%"))) + q = adv_search_tag(q, tags['include_tag'], tags['exclude_tag']) + q = adv_search_serie(q, tags['include_serie'], tags['exclude_serie']) + q = adv_search_shelf(q, tags['include_shelf'], tags['exclude_shelf']) + q = adv_search_extension(q, tags['include_extension'], tags['exclude_extension']) + q = adv_search_language(q, tags['include_language'], tags['exclude_language']) + q = adv_search_ratings(q, rating_high, rating_low) + + if description: + q = q.filter(db.Books.comments.any(func.lower(db.Comments.text).ilike("%" + description + "%"))) + + # search custom columns + try: + q = adv_search_custom_columns(cc, term, q) + except AttributeError as ex: + log.debug_or_exception(ex) + flash(_("Error on search for custom columns, please restart Calibre-Web"), category="error") + + q = q.order_by(*sort).all() + flask_session['query'] = json.dumps(term) + ub.store_combo_ids(q) + result_count = len(q) + if offset is not None and limit is not None: + offset = int(offset) + limit_all = offset + int(limit) + pagination = Pagination((offset / (int(limit)) + 1), limit, result_count) + else: + offset = 0 + limit_all = result_count + entries = calibre_db.order_authors(q[offset:limit_all], list_return=True, combined=True) + return render_title_template('search.html', + adv_searchterm=search_term, + pagination=pagination, + entries=entries, + result_count=result_count, + title=_(u"Advanced Search"), page="advsearch", + order=order[1]) + + +def render_prepare_search_form(cc): + # prepare data for search-form + tags = calibre_db.session.query(db.Tags)\ + .join(db.books_tags_link)\ + .join(db.Books)\ + .filter(calibre_db.common_filters()) \ + .group_by(text('books_tags_link.tag'))\ + .order_by(db.Tags.name).all() + series = calibre_db.session.query(db.Series)\ + .join(db.books_series_link)\ + .join(db.Books)\ + .filter(calibre_db.common_filters()) \ + .group_by(text('books_series_link.series'))\ + .order_by(db.Series.name)\ + .filter(calibre_db.common_filters()).all() + shelves = ub.session.query(ub.Shelf)\ + .filter(or_(ub.Shelf.is_public == 1, ub.Shelf.user_id == int(current_user.id)))\ + .order_by(ub.Shelf.name).all() + extensions = calibre_db.session.query(db.Data)\ + .join(db.Books)\ + .filter(calibre_db.common_filters()) \ + .group_by(db.Data.format)\ + .order_by(db.Data.format).all() + if current_user.filter_language() == u"all": + languages = calibre_db.speaking_language() + else: + languages = None + return render_title_template('search_form.html', tags=tags, languages=languages, extensions=extensions, + series=series,shelves=shelves, title=_(u"Advanced Search"), cc=cc, page="advsearch") + + +def render_search_results(term, offset=None, order=None, limit=None): + join = db.books_series_link, db.Books.id == db.books_series_link.c.book, db.Series + entries, result_count, pagination = calibre_db.get_search_results(term, + config, + offset, + order, + limit, + *join) + return render_title_template('search.html', + searchterm=term, + pagination=pagination, + query=term, + adv_searchterm=term, + entries=entries, + result_count=result_count, + title=_(u"Search"), + page="search", + order=order[1]) + + diff --git a/cps/search_metadata.py b/cps/search_metadata.py index e837fe21..e018da32 100644 --- a/cps/search_metadata.py +++ b/cps/search_metadata.py @@ -16,69 +16,91 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -import os -import json -import importlib -import sys -import inspect -import datetime import concurrent.futures +import importlib +import inspect +import json +import os +import sys -from flask import Blueprint, request, Response, url_for +from flask import Blueprint, Response, request, url_for from flask_login import current_user from flask_login import login_required +from flask_babel import get_locale +from sqlalchemy.exc import InvalidRequestError, OperationalError from sqlalchemy.orm.attributes import flag_modified -from sqlalchemy.exc import OperationalError, InvalidRequestError -from . import constants, logger, ub from cps.services.Metadata import Metadata +from . import constants, logger, ub, web_server +# current_milli_time = lambda: int(round(time() * 1000)) -meta = Blueprint('metadata', __name__) +meta = Blueprint("metadata", __name__) log = logger.create() +try: + from dataclasses import asdict +except ImportError: + log.info('*** "dataclasses" is needed for calibre-web to run. Please install it using pip: "pip install dataclasses" ***') + print('*** "dataclasses" is needed for calibre-web to run. Please install it using pip: "pip install dataclasses" ***') + web_server.stop(True) + sys.exit(6) + new_list = list() meta_dir = os.path.join(constants.BASE_DIR, "cps", "metadata_provider") modules = os.listdir(os.path.join(constants.BASE_DIR, "cps", "metadata_provider")) for f in modules: - if os.path.isfile(os.path.join(meta_dir, f)) and not f.endswith('__init__.py'): + if os.path.isfile(os.path.join(meta_dir, f)) and not f.endswith("__init__.py"): a = os.path.basename(f)[:-3] try: importlib.import_module("cps.metadata_provider." + a) new_list.append(a) - except ImportError: - log.error("Import error for metadata source: {}".format(a)) - pass + except (IndentationError, SyntaxError) as e: + log.error("Syntax error for metadata source: {} - {}".format(a, e)) + except ImportError as e: + log.debug("Import error for metadata source: {} - {}".format(a, e)) + def list_classes(provider_list): classes = list() for element in provider_list: - for name, obj in inspect.getmembers(sys.modules["cps.metadata_provider." + element]): - if inspect.isclass(obj) and name != "Metadata" and issubclass(obj, Metadata): + for name, obj in inspect.getmembers( + sys.modules["cps.metadata_provider." + element] + ): + if ( + inspect.isclass(obj) + and name != "Metadata" + and issubclass(obj, Metadata) + ): classes.append(obj()) return classes + cl = list_classes(new_list) + @meta.route("/metadata/provider") @login_required def metadata_provider(): - active = current_user.view_settings.get('metadata', {}) + active = current_user.view_settings.get("metadata", {}) provider = list() for c in cl: ac = active.get(c.__id__, True) - provider.append({"name": c.__name__, "active": ac, "initial": ac, "id": c.__id__}) - return Response(json.dumps(provider), mimetype='application/json') + provider.append( + {"name": c.__name__, "active": ac, "initial": ac, "id": c.__id__} + ) + return Response(json.dumps(provider), mimetype="application/json") -@meta.route("/metadata/provider", methods=['POST']) -@meta.route("/metadata/provider/", methods=['POST']) + +@meta.route("/metadata/provider", methods=["POST"]) +@meta.route("/metadata/provider/", methods=["POST"]) @login_required def metadata_change_active_provider(prov_name): new_state = request.get_json() - active = current_user.view_settings.get('metadata', {}) - active[new_state['id']] = new_state['value'] - current_user.view_settings['metadata'] = active + active = current_user.view_settings.get("metadata", {}) + active[new_state["id"]] = new_state["value"] + current_user.view_settings["metadata"] = active try: try: flag_modified(current_user, "view_settings") @@ -89,29 +111,33 @@ def metadata_change_active_provider(prov_name): log.error("Invalid request received: {}".format(request)) return "Invalid request", 400 if "initial" in new_state and prov_name: - for c in cl: - if c.__id__ == prov_name: - data = c.search(new_state.get('query', "")) - break - return Response(json.dumps(data), mimetype='application/json') + data = [] + provider = next((c for c in cl if c.__id__ == prov_name), None) + if provider is not None: + data = provider.search(new_state.get("query", "")) + return Response( + json.dumps([asdict(x) for x in data]), mimetype="application/json" + ) return "" -@meta.route("/metadata/search", methods=['POST']) + +@meta.route("/metadata/search", methods=["POST"]) @login_required def metadata_search(): - query = request.form.to_dict().get('query') + query = request.form.to_dict().get("query") data = list() - active = current_user.view_settings.get('metadata', {}) + active = current_user.view_settings.get("metadata", {}) + locale = get_locale() if query: - static_cover = url_for('static', filename='generic_cover.jpg') + static_cover = url_for("static", filename="generic_cover.jpg") + # start = current_milli_time() with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor: - meta = {executor.submit(c.search, query, static_cover): c for c in cl if active.get(c.__id__, True)} + meta = { + executor.submit(c.search, query, static_cover, locale): c + for c in cl + if active.get(c.__id__, True) + } for future in concurrent.futures.as_completed(meta): - data.extend(future.result()) - return Response(json.dumps(data), mimetype='application/json') - - - - - - + data.extend([asdict(x) for x in future.result() if x]) + # log.info({'Time elapsed {}'.format(current_milli_time()-start)}) + return Response(json.dumps(data), mimetype="application/json") diff --git a/cps/server.py b/cps/server.py index e261c50a..0ffdbd18 100644 --- a/cps/server.py +++ b/cps/server.py @@ -25,6 +25,7 @@ import subprocess # nosec try: from gevent.pywsgi import WSGIServer + from .gevent_wsgi import MyWSGIHandler from gevent.pool import Pool from gevent import __version__ as _version from greenlet import GreenletExit @@ -32,7 +33,7 @@ try: VERSION = 'Gevent ' + _version _GEVENT = True except ImportError: - from tornado.wsgi import WSGIContainer + from .tornado_wsgi import MyWSGIContainer from tornado.httpserver import HTTPServer from tornado.ioloop import IOLoop from tornado import version as _version @@ -202,7 +203,8 @@ class WebServer(object): if output is None: output = _readable_listen_address(self.listen_address, self.listen_port) log.info('Starting Gevent server on %s', output) - self.wsgiserver = WSGIServer(sock, self.app, log=self.access_logger, spawn=Pool(), **ssl_args) + self.wsgiserver = WSGIServer(sock, self.app, log=self.access_logger, handler_class=MyWSGIHandler, + spawn=Pool(), **ssl_args) if ssl_args: wrap_socket = self.wsgiserver.wrap_socket def my_wrap_socket(*args, **kwargs): @@ -225,8 +227,8 @@ class WebServer(object): asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) log.info('Starting Tornado server on %s', _readable_listen_address(self.listen_address, self.listen_port)) - # Max Buffersize set to 200MB ) - http_server = HTTPServer(WSGIContainer(self.app), + # Max Buffersize set to 200MB + http_server = HTTPServer(MyWSGIContainer(self.app), max_buffer_size=209700000, ssl_options=self.ssl_args) http_server.listen(self.listen_port, self.listen_address) diff --git a/cps/services/Metadata.py b/cps/services/Metadata.py index d6e4e7d5..ab4fd482 100644 --- a/cps/services/Metadata.py +++ b/cps/services/Metadata.py @@ -15,13 +15,97 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . +import abc +import dataclasses +import os +import re +from typing import Dict, Generator, List, Optional, Union + +from cps import constants -class Metadata(): +@dataclasses.dataclass +class MetaSourceInfo: + id: str + description: str + link: str + + +@dataclasses.dataclass +class MetaRecord: + id: Union[str, int] + title: str + authors: List[str] + url: str + source: MetaSourceInfo + cover: str = os.path.join(constants.STATIC_DIR, 'generic_cover.jpg') + description: Optional[str] = "" + series: Optional[str] = None + series_index: Optional[Union[int, float]] = 0 + identifiers: Dict[str, Union[str, int]] = dataclasses.field(default_factory=dict) + publisher: Optional[str] = None + publishedDate: Optional[str] = None + rating: Optional[int] = 0 + languages: Optional[List[str]] = dataclasses.field(default_factory=list) + tags: Optional[List[str]] = dataclasses.field(default_factory=list) + + +class Metadata: __name__ = "Generic" + __id__ = "generic" def __init__(self): self.active = True def set_status(self, state): self.active = state + + @abc.abstractmethod + def search( + self, query: str, generic_cover: str = "", locale: str = "en" + ) -> Optional[List[MetaRecord]]: + pass + + @staticmethod + def get_title_tokens( + title: str, strip_joiners: bool = True + ) -> Generator[str, None, None]: + """ + Taken from calibre source code + It's a simplified (cut out what is unnecessary) version of + https://github.com/kovidgoyal/calibre/blob/99d85b97918625d172227c8ffb7e0c71794966c0/ + src/calibre/ebooks/metadata/sources/base.py#L363-L367 + (src/calibre/ebooks/metadata/sources/base.py - lines 363-398) + """ + title_patterns = [ + (re.compile(pat, re.IGNORECASE), repl) + for pat, repl in [ + # Remove things like: (2010) (Omnibus) etc. + ( + r"(?i)[({\[](\d{4}|omnibus|anthology|hardcover|" + r"audiobook|audio\scd|paperback|turtleback|" + r"mass\s*market|edition|ed\.)[\])}]", + "", + ), + # Remove any strings that contain the substring edition inside + # parentheses + (r"(?i)[({\[].*?(edition|ed.).*?[\]})]", ""), + # Remove commas used a separators in numbers + (r"(\d+),(\d+)", r"\1\2"), + # Remove hyphens only if they have whitespace before them + (r"(\s-)", " "), + # Replace other special chars with a space + (r"""[:,;!@$%^&*(){}.`~"\s\[\]/]《》「」“”""", " "), + ] + ] + + for pat, repl in title_patterns: + title = pat.sub(repl, title) + + tokens = title.split() + for token in tokens: + token = token.strip().strip('"').strip("'") + if token and ( + not strip_joiners or token.lower() not in ("a", "and", "the", "&") + ): + yield token diff --git a/cps/services/SyncToken.py b/cps/services/SyncToken.py index 0072a20d..a53d7a99 100644 --- a/cps/services/SyncToken.py +++ b/cps/services/SyncToken.py @@ -21,11 +21,8 @@ import sys from base64 import b64decode, b64encode from jsonschema import validate, exceptions, __version__ from datetime import datetime -try: - # pylint: disable=unused-import - from urllib import unquote -except ImportError: - from urllib.parse import unquote + +from urllib.parse import unquote from flask import json from .. import logger diff --git a/cps/services/__init__.py b/cps/services/__init__.py index 32a9d485..f93eca34 100644 --- a/cps/services/__init__.py +++ b/cps/services/__init__.py @@ -18,11 +18,10 @@ from .. import logger - log = logger.create() - -try: from . import goodreads_support +try: + from . import goodreads_support except ImportError as err: log.debug("Cannot import goodreads, showing authors-metadata will not work: %s", err) goodreads_support = None diff --git a/cps/services/background_scheduler.py b/cps/services/background_scheduler.py new file mode 100644 index 00000000..27285fd9 --- /dev/null +++ b/cps/services/background_scheduler.py @@ -0,0 +1,84 @@ +# -*- coding: utf-8 -*- + +# This file is part of the Calibre-Web (https://github.com/janeczku/calibre-web) +# Copyright (C) 2020 mmonkey +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import atexit + +from .. import logger +from .worker import WorkerThread + +try: + from apscheduler.schedulers.background import BackgroundScheduler as BScheduler + use_APScheduler = True +except (ImportError, RuntimeError) as e: + use_APScheduler = False + log = logger.create() + log.info('APScheduler not found. Unable to schedule tasks.') + + +class BackgroundScheduler: + _instance = None + + def __new__(cls): + if not use_APScheduler: + return False + + if cls._instance is None: + cls._instance = super(BackgroundScheduler, cls).__new__(cls) + cls.log = logger.create() + cls.scheduler = BScheduler() + cls.scheduler.start() + + atexit.register(lambda: cls.scheduler.shutdown()) + + return cls._instance + + def schedule(self, func, trigger, name=None, **trigger_args): + if use_APScheduler: + return self.scheduler.add_job(func=func, trigger=trigger, name=name, **trigger_args) + + # Expects a lambda expression for the task + def schedule_task(self, task, user=None, name=None, hidden=False, trigger='cron', **trigger_args): + if use_APScheduler: + def scheduled_task(): + worker_task = task() + worker_task.scheduled = True + WorkerThread.add(user, worker_task, hidden=hidden) + return self.schedule(func=scheduled_task, trigger=trigger, name=name, **trigger_args) + + # Expects a list of lambda expressions for the tasks + def schedule_tasks(self, tasks, user=None, trigger='cron', **trigger_args): + if use_APScheduler: + for task in tasks: + self.schedule_task(task[0], user=user, trigger=trigger, name=task[1], hidden=task[2], **trigger_args) + + # Expects a lambda expression for the task + def schedule_task_immediately(self, task, user=None, name=None, hidden=False): + if use_APScheduler: + def immediate_task(): + WorkerThread.add(user, task(), hidden) + return self.schedule(func=immediate_task, trigger='date', name=name) + + # Expects a list of lambda expressions for the tasks + def schedule_tasks_immediately(self, tasks, user=None): + if use_APScheduler: + for task in tasks: + self.schedule_task_immediately(task[0], user, name="immediately " + task[1], hidden=task[2]) + + # Remove all jobs + def remove_all_jobs(self): + self.scheduler.remove_all_jobs() diff --git a/cps/services/gmail.py b/cps/services/gmail.py index ff36b308..3a4eab7f 100644 --- a/cps/services/gmail.py +++ b/cps/services/gmail.py @@ -25,7 +25,7 @@ from google.oauth2.credentials import Credentials from datetime import datetime import base64 from flask_babel import gettext as _ -from ..constants import BASE_DIR +from ..constants import CONFIG_DIR from .. import logger @@ -53,11 +53,11 @@ def setup_gmail(token): if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: - cred_file = os.path.join(BASE_DIR, 'gmail.json') + cred_file = os.path.join(CONFIG_DIR, 'gmail.json') if not os.path.exists(cred_file): raise Exception(_("Found no valid gmail.json file with OAuth information")) flow = InstalledAppFlow.from_client_secrets_file( - os.path.join(BASE_DIR, 'gmail.json'), SCOPES) + os.path.join(CONFIG_DIR, 'gmail.json'), SCOPES) creds = flow.run_local_server(port=0) user_info = get_user_info(creds) return { diff --git a/cps/services/worker.py b/cps/services/worker.py index 076c9104..63d83bfb 100644 --- a/cps/services/worker.py +++ b/cps/services/worker.py @@ -37,11 +37,13 @@ STAT_WAITING = 0 STAT_FAIL = 1 STAT_STARTED = 2 STAT_FINISH_SUCCESS = 3 +STAT_ENDED = 4 +STAT_CANCELLED = 5 # Only retain this many tasks in dequeued list TASK_CLEANUP_TRIGGER = 20 -QueuedTask = namedtuple('QueuedTask', 'num, user, added, task') +QueuedTask = namedtuple('QueuedTask', 'num, user, added, task, hidden') def _get_main_thread(): @@ -51,7 +53,6 @@ def _get_main_thread(): raise Exception("main thread not found?!") - class ImprovedQueue(queue.Queue): def to_list(self): """ @@ -61,12 +62,13 @@ class ImprovedQueue(queue.Queue): with self.mutex: return list(self.queue) + # Class for all worker tasks in the background class WorkerThread(threading.Thread): _instance = None @classmethod - def getInstance(cls): + def get_instance(cls): if cls._instance is None: cls._instance = WorkerThread() return cls._instance @@ -82,15 +84,17 @@ class WorkerThread(threading.Thread): self.start() @classmethod - def add(cls, user, task): - ins = cls.getInstance() + def add(cls, user, task, hidden=False): + ins = cls.get_instance() ins.num += 1 - log.debug("Add Task for user: {} - {}".format(user, task)) + username = user if user is not None else 'System' + log.debug("Add Task for user: {} - {}".format(username, task)) ins.queue.put(QueuedTask( num=ins.num, - user=user, + user=username, added=datetime.now(), task=task, + hidden=hidden )) @property @@ -111,10 +115,10 @@ class WorkerThread(threading.Thread): if delta > TASK_CLEANUP_TRIGGER: ret = alive else: - # otherwise, lop off the oldest dead tasks until we hit the target trigger - ret = sorted(dead, key=lambda x: x.task.end_time)[-TASK_CLEANUP_TRIGGER:] + alive + # otherwise, loop off the oldest dead tasks until we hit the target trigger + ret = sorted(dead, key=lambda y: y.task.end_time)[-TASK_CLEANUP_TRIGGER:] + alive - self.dequeued = sorted(ret, key=lambda x: x.num) + self.dequeued = sorted(ret, key=lambda y: y.num) # Main thread loop starting the different tasks def run(self): @@ -141,11 +145,21 @@ class WorkerThread(threading.Thread): # sometimes tasks (like Upload) don't actually have work to do and are created as already finished if item.task.stat is STAT_WAITING: - # CalibreTask.start() should wrap all exceptions in it's own error handling + # CalibreTask.start() should wrap all exceptions in its own error handling item.task.start(self) + # remove self_cleanup tasks and hidden "System Tasks" from list + if item.task.self_cleanup or item.hidden: + self.dequeued.remove(item) + self.queue.task_done() + def end_task(self, task_id): + ins = self.get_instance() + for __, __, __, task, __ in ins.tasks: + if str(task.id) == str(task_id) and task.is_cancellable: + task.stat = STAT_CANCELLED if task.stat == STAT_WAITING else STAT_ENDED + class CalibreTask: __metaclass__ = abc.ABCMeta @@ -158,10 +172,12 @@ class CalibreTask: self.end_time = None self.message = message self.id = uuid.uuid4() + self.self_cleanup = False + self._scheduled = False @abc.abstractmethod def run(self, worker_thread): - """Provides the caller some human-readable name for this class""" + """The main entry-point for this task""" raise NotImplementedError @abc.abstractmethod @@ -169,6 +185,11 @@ class CalibreTask: """Provides the caller some human-readable name for this class""" raise NotImplementedError + @abc.abstractmethod + def is_cancellable(self): + """Does this task gracefully handle being cancelled (STAT_ENDED, STAT_CANCELLED)?""" + raise NotImplementedError + def start(self, *args): self.start_time = datetime.now() self.stat = STAT_STARTED @@ -178,7 +199,7 @@ class CalibreTask: self.run(*args) except Exception as ex: self._handleError(str(ex)) - log.debug_or_exception(ex) + log.error_or_exception(ex) self.end_time = datetime.now() @@ -219,15 +240,23 @@ class CalibreTask: We have a separate dictating this because there may be certain tasks that want to override this """ # By default, we're good to clean a task if it's "Done" - return self.stat in (STAT_FINISH_SUCCESS, STAT_FAIL) + return self.stat in (STAT_FINISH_SUCCESS, STAT_FAIL, STAT_ENDED, STAT_CANCELLED) - '''@progress.setter - def progress(self, x): - if x > 1: - x = 1 - if x < 0: - x = 0 - self._progress = x''' + @property + def self_cleanup(self): + return self._self_cleanup + + @self_cleanup.setter + def self_cleanup(self, is_self_cleanup): + self._self_cleanup = is_self_cleanup + + @property + def scheduled(self): + return self._scheduled + + @scheduled.setter + def scheduled(self, is_scheduled): + self._scheduled = is_scheduled def _handleError(self, error_message): self.stat = STAT_FAIL diff --git a/cps/shelf.py b/cps/shelf.py index 04d9f8b9..49d9a633 100644 --- a/cps/shelf.py +++ b/cps/shelf.py @@ -23,7 +23,7 @@ import sys from datetime import datetime -from flask import Blueprint, flash, redirect, request, url_for +from flask import Blueprint, flash, redirect, request, url_for, abort from flask_babel import gettext as _ from flask_login import current_user, login_required from sqlalchemy.exc import InvalidRequestError, OperationalError @@ -33,30 +33,12 @@ from . import calibre_db, config, db, logger, ub from .render_template import render_title_template from .usermanagement import login_required_if_no_ano -shelf = Blueprint('shelf', __name__) log = logger.create() - -def check_shelf_edit_permissions(cur_shelf): - if not cur_shelf.is_public and not cur_shelf.user_id == int(current_user.id): - log.error("User %s not allowed to edit shelf %s", current_user, cur_shelf) - return False - if cur_shelf.is_public and not current_user.role_edit_shelfs(): - log.info("User %s not allowed to edit public shelves", current_user) - return False - return True +shelf = Blueprint('shelf', __name__) -def check_shelf_view_permissions(cur_shelf): - if cur_shelf.is_public: - return True - if current_user.is_anonymous or cur_shelf.user_id != current_user.id: - log.error("User is unauthorized to view non-public shelf: %s", cur_shelf) - return False - return True - - -@shelf.route("/shelf/add//") +@shelf.route("/shelf/add//", methods=["POST"]) @login_required def add_to_shelf(shelf_id, book_id): xhr = request.headers.get('X-Requested-With') == 'XMLHttpRequest' @@ -94,10 +76,10 @@ def add_to_shelf(shelf_id, book_id): try: ub.session.merge(shelf) ub.session.commit() - except (OperationalError, InvalidRequestError): + except (OperationalError, InvalidRequestError) as e: ub.session.rollback() - log.error("Settings DB is not Writeable") - flash(_(u"Settings DB is not Writeable"), category="error") + log.error_or_exception("Settings Database error: {}".format(e)) + flash(_(u"Database error: %(error)s.", error=e.orig), category="error") if "HTTP_REFERER" in request.environ: return redirect(request.environ["HTTP_REFERER"]) else: @@ -112,12 +94,12 @@ def add_to_shelf(shelf_id, book_id): return "", 204 -@shelf.route("/shelf/massadd/") +@shelf.route("/shelf/massadd/", methods=["POST"]) @login_required def search_to_shelf(shelf_id): shelf = ub.session.query(ub.Shelf).filter(ub.Shelf.id == shelf_id).first() if shelf is None: - log.error("Invalid shelf specified: %s", shelf_id) + log.error("Invalid shelf specified: {}".format(shelf_id)) flash(_(u"Invalid shelf specified"), category="error") return redirect(url_for('web.index')) @@ -154,17 +136,17 @@ def search_to_shelf(shelf_id): ub.session.merge(shelf) ub.session.commit() flash(_(u"Books have been added to shelf: %(sname)s", sname=shelf.name), category="success") - except (OperationalError, InvalidRequestError): + except (OperationalError, InvalidRequestError) as e: ub.session.rollback() - log.error("Settings DB is not Writeable") - flash(_("Settings DB is not Writeable"), category="error") + log.error_or_exception("Settings Database error: {}".format(e)) + flash(_(u"Database error: %(error)s.", error=e.orig), category="error") else: log.error("Could not add books to shelf: {}".format(shelf.name)) flash(_(u"Could not add books to shelf: %(sname)s", sname=shelf.name), category="error") return redirect(url_for('web.index')) -@shelf.route("/shelf/remove//") +@shelf.route("/shelf/remove//", methods=["POST"]) @login_required def remove_from_shelf(shelf_id, book_id): xhr = request.headers.get('X-Requested-With') == 'XMLHttpRequest' @@ -197,10 +179,10 @@ def remove_from_shelf(shelf_id, book_id): ub.session.delete(book_shelf) shelf.last_modified = datetime.utcnow() ub.session.commit() - except (OperationalError, InvalidRequestError): + except (OperationalError, InvalidRequestError) as e: ub.session.rollback() - log.error("Settings DB is not Writeable") - flash(_("Settings DB is not Writeable"), category="error") + log.error_or_exception("Settings Database error: {}".format(e)) + flash(_(u"Database error: %(error)s.", error=e.orig), category="error") if "HTTP_REFERER" in request.environ: return redirect(request.environ["HTTP_REFERER"]) else: @@ -228,7 +210,6 @@ def create_shelf(): return create_edit_shelf(shelf, page_title=_(u"Create a Shelf"), page="shelfcreate") - @shelf.route("/shelf/edit/", methods=["GET", "POST"]) @login_required def edit_shelf(shelf_id): @@ -239,6 +220,89 @@ def edit_shelf(shelf_id): return create_edit_shelf(shelf, page_title=_(u"Edit a shelf"), page="shelfedit", shelf_id=shelf_id) +@shelf.route("/shelf/delete/", methods=["POST"]) +@login_required +def delete_shelf(shelf_id): + cur_shelf = ub.session.query(ub.Shelf).filter(ub.Shelf.id == shelf_id).first() + try: + if not delete_shelf_helper(cur_shelf): + flash(_("Error deleting Shelf"), category="error") + else: + flash(_("Shelf successfully deleted"), category="success") + except InvalidRequestError as e: + ub.session.rollback() + log.error_or_exception("Settings Database error: {}".format(e)) + flash(_(u"Database error: %(error)s.", error=e.orig), category="error") + return redirect(url_for('web.index')) + + +@shelf.route("/simpleshelf/") +@login_required_if_no_ano +def show_simpleshelf(shelf_id): + return render_show_shelf(2, shelf_id, 1, None) + + +@shelf.route("/shelf/", defaults={"sort_param": "order", 'page': 1}) +@shelf.route("/shelf//", defaults={'page': 1}) +@shelf.route("/shelf///") +@login_required_if_no_ano +def show_shelf(shelf_id, sort_param, page): + return render_show_shelf(1, shelf_id, page, sort_param) + + +@shelf.route("/shelf/order/", methods=["GET", "POST"]) +@login_required +def order_shelf(shelf_id): + shelf = ub.session.query(ub.Shelf).filter(ub.Shelf.id == shelf_id).first() + if shelf and check_shelf_view_permissions(shelf): + if request.method == "POST": + to_save = request.form.to_dict() + books_in_shelf = ub.session.query(ub.BookShelf).filter(ub.BookShelf.shelf == shelf_id).order_by( + ub.BookShelf.order.asc()).all() + counter = 0 + for book in books_in_shelf: + setattr(book, 'order', to_save[str(book.book_id)]) + counter += 1 + # if order diffrent from before -> shelf.last_modified = datetime.utcnow() + try: + ub.session.commit() + except (OperationalError, InvalidRequestError) as e: + ub.session.rollback() + log.error_or_exception("Settings Database error: {}".format(e)) + flash(_(u"Database error: %(error)s.", error=e.orig), category="error") + + result = list() + if shelf: + result = calibre_db.session.query(db.Books) \ + .join(ub.BookShelf, ub.BookShelf.book_id == db.Books.id, isouter=True) \ + .add_columns(calibre_db.common_filters().label("visible")) \ + .filter(ub.BookShelf.shelf == shelf_id).order_by(ub.BookShelf.order.asc()).all() + return render_title_template('shelf_order.html', entries=result, + title=_(u"Change order of Shelf: '%(name)s'", name=shelf.name), + shelf=shelf, page="shelforder") + else: + abort(404) + + +def check_shelf_edit_permissions(cur_shelf): + if not cur_shelf.is_public and not cur_shelf.user_id == int(current_user.id): + log.error("User {} not allowed to edit shelf: {}".format(current_user.id, cur_shelf.name)) + return False + if cur_shelf.is_public and not current_user.role_edit_shelfs(): + log.info("User {} not allowed to edit public shelves".format(current_user.id)) + return False + return True + + +def check_shelf_view_permissions(cur_shelf): + if cur_shelf.is_public: + return True + if current_user.is_anonymous or cur_shelf.user_id != current_user.id: + log.error("User is unauthorized to view non-public shelf: {}".format(cur_shelf.name)) + return False + return True + + # if shelf ID is set, we are editing a shelf def create_edit_shelf(shelf, page_title, page, shelf_id=False): sync_only_selected_shelves = current_user.kobo_only_shelves_sync @@ -248,12 +312,17 @@ def create_edit_shelf(shelf, page_title, page, shelf_id=False): if not current_user.role_edit_shelfs() and to_save.get("is_public") == "on": flash(_(u"Sorry you are not allowed to create a public shelf"), category="error") return redirect(url_for('web.index')) - shelf.is_public = 1 if to_save.get("is_public") else 0 + is_public = 1 if to_save.get("is_public") == "on" else 0 if config.config_kobo_sync: shelf.kobo_sync = True if to_save.get("kobo_sync") else False + if shelf.kobo_sync: + ub.session.query(ub.ShelfArchive).filter(ub.ShelfArchive.user_id == current_user.id).filter( + ub.ShelfArchive.uuid == shelf.uuid).delete() + ub.session_commit() shelf_title = to_save.get("title", "") - if check_shelf_is_unique(shelf, shelf_title, shelf_id): + if check_shelf_is_unique(shelf_title, is_public, shelf_id): shelf.name = shelf_title + shelf.is_public = is_public if not shelf_id: shelf.user_id = int(current_user.id) ub.session.add(shelf) @@ -269,12 +338,12 @@ def create_edit_shelf(shelf, page_title, page, shelf_id=False): return redirect(url_for('shelf.show_shelf', shelf_id=shelf.id)) except (OperationalError, InvalidRequestError) as ex: ub.session.rollback() - log.debug_or_exception(ex) - log.error("Settings DB is not Writeable") - flash(_("Settings DB is not Writeable"), category="error") + log.error_or_exception(ex) + log.error_or_exception("Settings Database error: {}".format(ex)) + flash(_(u"Database error: %(error)s.", error=ex.orig), category="error") except Exception as ex: ub.session.rollback() - log.debug_or_exception(ex) + log.error_or_exception(ex) flash(_(u"There was an error"), category="error") return render_title_template('shelf_edit.html', shelf=shelf, @@ -284,12 +353,12 @@ def create_edit_shelf(shelf, page_title, page, shelf_id=False): sync_only_selected_shelves=sync_only_selected_shelves) -def check_shelf_is_unique(shelf, title, shelf_id=False): +def check_shelf_is_unique(title, is_public, shelf_id=False): if shelf_id: ident = ub.Shelf.id != shelf_id else: ident = true() - if shelf.is_public == 1: + if is_public == 1: is_shelf_name_unique = ub.session.query(ub.Shelf) \ .filter((ub.Shelf.name == title) & (ub.Shelf.is_public == 1)) \ .filter(ident) \ @@ -315,70 +384,13 @@ def check_shelf_is_unique(shelf, title, shelf_id=False): def delete_shelf_helper(cur_shelf): if not cur_shelf or not check_shelf_edit_permissions(cur_shelf): - return + return False shelf_id = cur_shelf.id ub.session.delete(cur_shelf) ub.session.query(ub.BookShelf).filter(ub.BookShelf.shelf == shelf_id).delete() ub.session.add(ub.ShelfArchive(uuid=cur_shelf.uuid, user_id=cur_shelf.user_id)) ub.session_commit("successfully deleted Shelf {}".format(cur_shelf.name)) - - -@shelf.route("/shelf/delete/") -@login_required -def delete_shelf(shelf_id): - cur_shelf = ub.session.query(ub.Shelf).filter(ub.Shelf.id == shelf_id).first() - try: - delete_shelf_helper(cur_shelf) - except InvalidRequestError: - ub.session.rollback() - log.error("Settings DB is not Writeable") - flash(_("Settings DB is not Writeable"), category="error") - return redirect(url_for('web.index')) - - -@shelf.route("/simpleshelf/") -@login_required_if_no_ano -def show_simpleshelf(shelf_id): - return render_show_shelf(2, shelf_id, 1, None) - - -@shelf.route("/shelf/", defaults={"sort_param": "order", 'page': 1}) -@shelf.route("/shelf//", defaults={'page': 1}) -@shelf.route("/shelf///") -@login_required_if_no_ano -def show_shelf(shelf_id, sort_param, page): - return render_show_shelf(1, shelf_id, page, sort_param) - - -@shelf.route("/shelf/order/", methods=["GET", "POST"]) -@login_required -def order_shelf(shelf_id): - if request.method == "POST": - to_save = request.form.to_dict() - books_in_shelf = ub.session.query(ub.BookShelf).filter(ub.BookShelf.shelf == shelf_id).order_by( - ub.BookShelf.order.asc()).all() - counter = 0 - for book in books_in_shelf: - setattr(book, 'order', to_save[str(book.book_id)]) - counter += 1 - # if order diffrent from before -> shelf.last_modified = datetime.utcnow() - try: - ub.session.commit() - except (OperationalError, InvalidRequestError): - ub.session.rollback() - log.error("Settings DB is not Writeable") - flash(_("Settings DB is not Writeable"), category="error") - - shelf = ub.session.query(ub.Shelf).filter(ub.Shelf.id == shelf_id).first() - result = list() - if shelf and check_shelf_view_permissions(shelf): - result = calibre_db.session.query(db.Books) \ - .join(ub.BookShelf, ub.BookShelf.book_id == db.Books.id, isouter=True) \ - .add_columns(calibre_db.common_filters().label("visible")) \ - .filter(ub.BookShelf.shelf == shelf_id).order_by(ub.BookShelf.order.asc()).all() - return render_title_template('shelf_order.html', entries=result, - title=_(u"Change order of Shelf: '%(name)s'", name=shelf.name), - shelf=shelf, page="shelforder") + return True def change_shelf_order(shelf_id, order): @@ -398,7 +410,6 @@ def render_show_shelf(shelf_type, shelf_id, page_no, sort_param): # check user is allowed to access shelf if shelf and check_shelf_view_permissions(shelf): - if shelf_type == 1: # order = [ub.BookShelf.order.asc()] if sort_param == 'pubnew': @@ -429,7 +440,7 @@ def render_show_shelf(shelf_type, shelf_id, page_no, sort_param): db.Books, ub.BookShelf.shelf == shelf_id, [ub.BookShelf.order.asc()], - False, 0, + True, config.config_read_column, ub.BookShelf, ub.BookShelf.book_id == db.Books.id) # delete chelf entries where book is not existent anymore, can happen if book is deleted outside calibre-web wrong_entries = calibre_db.session.query(ub.BookShelf) \ @@ -440,10 +451,10 @@ def render_show_shelf(shelf_type, shelf_id, page_no, sort_param): try: ub.session.query(ub.BookShelf).filter(ub.BookShelf.book_id == entry.book_id).delete() ub.session.commit() - except (OperationalError, InvalidRequestError): + except (OperationalError, InvalidRequestError) as e: ub.session.rollback() - log.error("Settings DB is not Writeable") - flash(_("Settings DB is not Writeable"), category="error") + log.error_or_exception("Settings Database error: {}".format(e)) + flash(_(u"Database error: %(error)s.", error=e.orig), category="error") return render_title_template(page, entries=result, diff --git a/cps/static/css/caliBlur.css b/cps/static/css/caliBlur.css index 3a980180..b2b35423 100644 --- a/cps/static/css/caliBlur.css +++ b/cps/static/css/caliBlur.css @@ -5150,7 +5150,7 @@ body.login > div.navbar.navbar-default.navbar-static-top > div > div.navbar-head pointer-events: none } -#DeleteDomain:hover:before, #RestartDialog:hover:before, #ShutdownDialog:hover:before, #StatusDialog:hover:before, #deleteButton, #deleteModal:hover:before, body.mailset > div.container-fluid > div > div.col-sm-10 > div.discover td > a:hover { +#DeleteDomain:hover:before, #RestartDialog:hover:before, #ShutdownDialog:hover:before, #StatusDialog:hover:before, #deleteButton, #deleteModal:hover:before, #cancelTaskModal:hover:before, body.mailset > div.container-fluid > div > div.col-sm-10 > div.discover td > a:hover { cursor: pointer } @@ -5237,7 +5237,11 @@ body.admin > div.container-fluid > div > div.col-sm-10 > div.container-fluid > d margin-bottom: 20px } -body.admin:not(.modal-open) .btn-default { +body.admin > div.container-fluid div.scheduled_tasks_details { + margin-bottom: 20px +} + +body.admin .btn-default { margin-bottom: 10px } @@ -5468,7 +5472,7 @@ body.admin.modal-open .navbar { z-index: 0 !important } -#RestartDialog, #ShutdownDialog, #StatusDialog, #deleteModal { +#RestartDialog, #ShutdownDialog, #StatusDialog, #deleteModal, #cancelTaskModal { top: 0; overflow: hidden; padding-top: 70px; @@ -5478,7 +5482,7 @@ body.admin.modal-open .navbar { background: rgba(0, 0, 0, .5) } -#RestartDialog:before, #ShutdownDialog:before, #StatusDialog:before, #deleteModal:before { +#RestartDialog:before, #ShutdownDialog:before, #StatusDialog:before, #deleteModal:before, #cancelTaskModal:before { content: "\E208"; padding-right: 10px; display: block; @@ -5500,18 +5504,18 @@ body.admin.modal-open .navbar { z-index: 99 } -#RestartDialog.in:before, #ShutdownDialog.in:before, #StatusDialog.in:before, #deleteModal.in:before { +#RestartDialog.in:before, #ShutdownDialog.in:before, #StatusDialog.in:before, #deleteModal.in:before, #cancelTaskModal.in:before { -webkit-transform: translate(0, 0); -ms-transform: translate(0, 0); transform: translate(0, 0) } -#RestartDialog > .modal-dialog, #ShutdownDialog > .modal-dialog, #StatusDialog > .modal-dialog, #deleteModal > .modal-dialog { +#RestartDialog > .modal-dialog, #ShutdownDialog > .modal-dialog, #StatusDialog > .modal-dialog, #deleteModal > .modal-dialog, #cancelTaskModal > .modal-dialog { width: 450px; margin: auto } -#RestartDialog > .modal-dialog > .modal-content, #ShutdownDialog > .modal-dialog > .modal-content, #StatusDialog > .modal-dialog > .modal-content, #deleteModal > .modal-dialog > .modal-content { +#RestartDialog > .modal-dialog > .modal-content, #ShutdownDialog > .modal-dialog > .modal-content, #StatusDialog > .modal-dialog > .modal-content, #deleteModal > .modal-dialog > .modal-content, #cancelTaskModal > .modal-dialog > .modal-content { max-height: calc(100% - 90px); -webkit-box-shadow: 0 5px 15px rgba(0, 0, 0, .5); box-shadow: 0 5px 15px rgba(0, 0, 0, .5); @@ -5522,7 +5526,7 @@ body.admin.modal-open .navbar { width: 450px } -#RestartDialog > .modal-dialog > .modal-content > .modal-header, #ShutdownDialog > .modal-dialog > .modal-content > .modal-header, #StatusDialog > .modal-dialog > .modal-content > .modal-header, #deleteModal > .modal-dialog > .modal-content > .modal-header { +#RestartDialog > .modal-dialog > .modal-content > .modal-header, #ShutdownDialog > .modal-dialog > .modal-content > .modal-header, #StatusDialog > .modal-dialog > .modal-content > .modal-header, #deleteModal > .modal-dialog > .modal-content > .modal-header, #cancelTaskModal > .modal-dialog > .modal-content > .modal-header { padding: 15px 20px; border-radius: 3px 3px 0 0; line-height: 1.71428571; @@ -5535,7 +5539,7 @@ body.admin.modal-open .navbar { text-align: left } -#RestartDialog > .modal-dialog > .modal-content > .modal-header:before, #ShutdownDialog > .modal-dialog > .modal-content > .modal-header:before, #StatusDialog > .modal-dialog > .modal-content > .modal-header:before, #deleteModal > .modal-dialog > .modal-content > .modal-header:before { +#RestartDialog > .modal-dialog > .modal-content > .modal-header:before, #ShutdownDialog > .modal-dialog > .modal-content > .modal-header:before, #StatusDialog > .modal-dialog > .modal-content > .modal-header:before, #deleteModal > .modal-dialog > .modal-content > .modal-header:before, #cancelTaskModal > .modal-dialog > .modal-content > .modal-header:before { padding-right: 10px; font-size: 18px; color: #999; @@ -5564,6 +5568,11 @@ body.admin.modal-open .navbar { font-family: plex-icons-new, serif } +#cancelTaskModal > .modal-dialog > .modal-content > .modal-header:before { + content: "\EA6D"; + font-family: plex-icons-new, serif +} + #RestartDialog > .modal-dialog > .modal-content > .modal-header:after { content: "Restart Calibre-Web"; display: inline-block; @@ -5588,7 +5597,13 @@ body.admin.modal-open .navbar { font-size: 20px } -#StatusDialog > .modal-dialog > .modal-content > .modal-header > span, #deleteModal > .modal-dialog > .modal-content > .modal-header > span, #loader > center > img, .rating-mobile { +#cancelTaskModal > .modal-dialog > .modal-content > .modal-header:after { + content: "Delete Book"; + display: inline-block; + font-size: 20px +} + +#StatusDialog > .modal-dialog > .modal-content > .modal-header > span, #deleteModal > .modal-dialog > .modal-content > .modal-header > span, #cancelTaskModal > .modal-dialog > .modal-content > .modal-header > span, #loader > center > img, .rating-mobile { display: none } @@ -5602,7 +5617,7 @@ body.admin.modal-open .navbar { text-align: left } -#ShutdownDialog > .modal-dialog > .modal-content > .modal-body, #StatusDialog > .modal-dialog > .modal-content > .modal-body, #deleteModal > .modal-dialog > .modal-content > .modal-body { +#ShutdownDialog > .modal-dialog > .modal-content > .modal-body, #StatusDialog > .modal-dialog > .modal-content > .modal-body, #deleteModal > .modal-dialog > .modal-content > .modal-body, #cancelTaskModal > .modal-dialog > .modal-content > .modal-body { padding: 20px 20px 40px; font-size: 16px; line-height: 1.6em; @@ -5612,7 +5627,7 @@ body.admin.modal-open .navbar { text-align: left } -#RestartDialog > .modal-dialog > .modal-content > .modal-body > p, #ShutdownDialog > .modal-dialog > .modal-content > .modal-body > p, #StatusDialog > .modal-dialog > .modal-content > .modal-body > p, #deleteModal > .modal-dialog > .modal-content > .modal-body > p { +#RestartDialog > .modal-dialog > .modal-content > .modal-body > p, #ShutdownDialog > .modal-dialog > .modal-content > .modal-body > p, #StatusDialog > .modal-dialog > .modal-content > .modal-body > p, #deleteModal > .modal-dialog > .modal-content > .modal-body > p, #cancelTaskModal > .modal-dialog > .modal-content > .modal-body > p { padding: 20px 20px 0 0; font-size: 16px; line-height: 1.6em; @@ -5621,7 +5636,7 @@ body.admin.modal-open .navbar { background: #282828 } -#RestartDialog > .modal-dialog > .modal-content > .modal-body > .btn-default:not(#restart), #ShutdownDialog > .modal-dialog > .modal-content > .modal-body > .btn-default:not(#shutdown), #deleteModal > .modal-dialog > .modal-content > .modal-footer > .btn-default { +#RestartDialog > .modal-dialog > .modal-content > .modal-body > .btn-default:not(#restart), #ShutdownDialog > .modal-dialog > .modal-content > .modal-body > .btn-default:not(#shutdown), #deleteModal > .modal-dialog > .modal-content > .modal-footer > .btn-default, #cancelTaskModal > .modal-dialog > .modal-content > .modal-footer > .btn-default { float: right; z-index: 9; position: relative; @@ -5669,6 +5684,18 @@ body.admin.modal-open .navbar { border-radius: 3px } +#cancelTaskModal > .modal-dialog > .modal-content > .modal-footer > .btn-danger { + float: right; + z-index: 9; + position: relative; + margin: 0 0 0 10px; + min-width: 80px; + padding: 10px 18px; + font-size: 16px; + line-height: 1.33; + border-radius: 3px +} + #RestartDialog > .modal-dialog > .modal-content > .modal-body > .btn-default:not(#restart) { margin: 25px 0 0 10px } @@ -5681,7 +5708,11 @@ body.admin.modal-open .navbar { margin: 0 0 0 10px } -#RestartDialog > .modal-dialog > .modal-content > .modal-body > .btn-default:not(#restart):hover, #ShutdownDialog > .modal-dialog > .modal-content > .modal-body > .btn-default:not(#shutdown):hover, #deleteModal > .modal-dialog > .modal-content > .modal-footer > .btn-default:hover { +#cancelTaskModal > .modal-dialog > .modal-content > .modal-footer > .btn-default { + margin: 0 0 0 10px +} + +#RestartDialog > .modal-dialog > .modal-content > .modal-body > .btn-default:not(#restart):hover, #ShutdownDialog > .modal-dialog > .modal-content > .modal-body > .btn-default:not(#shutdown):hover, #deleteModal > .modal-dialog > .modal-content > .modal-footer > .btn-default:hover, #cancelTaskModal > .modal-dialog > .modal-content > .modal-footer > .btn-default:hover { background-color: hsla(0, 0%, 100%, .3) } @@ -7303,11 +7334,11 @@ body.edituser.admin > div.container-fluid > div.row-fluid > div.col-sm-10 > div. background-color: transparent !important } - #RestartDialog > .modal-dialog, #ShutdownDialog > .modal-dialog, #StatusDialog > .modal-dialog, #deleteModal > .modal-dialog { + #RestartDialog > .modal-dialog, #ShutdownDialog > .modal-dialog, #StatusDialog > .modal-dialog, #deleteModal > .modal-dialog, #cancelTaskModal > .modal-dialog { max-width: calc(100vw - 40px) } - #RestartDialog > .modal-dialog > .modal-content, #ShutdownDialog > .modal-dialog > .modal-content, #StatusDialog > .modal-dialog > .modal-content, #deleteModal > .modal-dialog > .modal-content { + #RestartDialog > .modal-dialog > .modal-content, #ShutdownDialog > .modal-dialog > .modal-content, #StatusDialog > .modal-dialog > .modal-content, #deleteModal > .modal-dialog > .modal-content, #cancelTaskModal > .modal-dialog > .modal-content { max-width: calc(100vw - 40px); left: 0 } @@ -7457,7 +7488,7 @@ body.edituser.admin > div.container-fluid > div.row-fluid > div.col-sm-10 > div. padding: 30px 15px } - #RestartDialog.in:before, #ShutdownDialog.in:before, #StatusDialog.in:before, #deleteModal.in:before { + #RestartDialog.in:before, #ShutdownDialog.in:before, #StatusDialog.in:before, #deleteModal.in:before, #cancelTaskModal.in:before { left: auto; right: 34px } diff --git a/cps/static/js/caliBlur.js b/cps/static/js/caliBlur.js index 3203c255..f99779bd 100644 --- a/cps/static/js/caliBlur.js +++ b/cps/static/js/caliBlur.js @@ -270,7 +270,7 @@ if ($("body.book").length > 0) { if (position + $("#add-to-shelves").width() > $(window).width()) { positionOff = position + $("#add-to-shelves").width() - $(window).width(); - adsPosition = position - positionOff - 5 + adsPosition = position - positionOff - 5; $("#add-to-shelves").attr("style", "left: " + adsPosition + "px !important; right: auto; top: " + topPos + "px"); } else { $("#add-to-shelves").attr("style", "left: " + position + "px !important; right: auto; top: " + topPos + "px"); @@ -429,7 +429,7 @@ if($("body.advsearch").length > 0) { if (position + $("#add-to-shelves").width() > $(window).width()) { positionOff = position + $("#add-to-shelves").width() - $(window).width(); - adsPosition = position - positionOff - 5 + adsPosition = position - positionOff - 5; $("#add-to-shelves").attr("style", "left: " + adsPosition + "px !important; right: auto; top: " + topPos + "px"); } else { $("#add-to-shelves").attr("style", "left: " + position + "px !important; right: auto; top: " + topPos + "px"); @@ -479,12 +479,12 @@ if ($.trim($("#add-to-shelves").html()).length === 0) { $("#add-to-shelf").addClass("empty-ul"); } -shelfLength = $("#add-to-shelves li").length -emptyLength = 0 +shelfLength = $("#add-to-shelves li").length; +emptyLength = 0; $("#add-to-shelves").on("click", "li a", function () { console.log("#remove-from-shelves change registered"); - emptyLength++ + emptyLength++; setTimeout(function () { if (emptyLength >= shelfLength) { diff --git a/cps/static/js/details.js b/cps/static/js/details.js index 9caf9470..f0259f8c 100644 --- a/cps/static/js/details.js +++ b/cps/static/js/details.js @@ -28,14 +28,24 @@ $("#have_read_cb").on("change", function() { data: $(this).closest("form").serialize(), error: function(response) { var data = [{type:"danger", message:response.responseText}] - $("#flash_success").remove(); + // $("#flash_success").parent().remove(); $("#flash_danger").remove(); + $(".row-fluid.text-center").remove(); if (!jQuery.isEmptyObject(data)) { - data.forEach(function (item) { - $(".navbar").after('
' + - '
' + item.message + '
' + - '
'); - }); + $("#have_read_cb").prop("checked", !$("#have_read_cb").prop("checked")); + if($("#bookDetailsModal").is(":visible")) { + data.forEach(function (item) { + $(".modal-header").after('
' + item.message + '
'); + }); + } else + { + data.forEach(function (item) { + $(".navbar").after('
' + + '
' + item.message + '
' + + '
'); + }); + } } } }); @@ -59,17 +69,20 @@ $("#archived_cb").on("change", function() { ) }; - $("#shelf-actions").on("click", "[data-shelf-action]", function (e) { + $("#add-to-shelves, #remove-from-shelves").on("click", "[data-shelf-action]", function (e) { e.preventDefault(); - - $.get(this.href) + $.ajax({ + url: $(this).data('href'), + method:"post", + data: {csrf_token:$("input[name='csrf_token']").val()}, + }) .done(function() { var $this = $(this); switch ($this.data("shelf-action")) { case "add": $("#remove-from-shelves").append( templates.remove({ - add: this.href, + add: $this.data('href'), remove: $this.data("remove-href"), content: $("
").text(this.textContent).html() }) @@ -79,7 +92,7 @@ $("#archived_cb").on("change", function() { $("#add-to-shelves").append( templates.add({ add: $this.data("add-href"), - remove: this.href, + remove: $this.data('href'), content: $("
").text(this.textContent).html(), }) ); diff --git a/cps/static/js/edit_books.js b/cps/static/js/edit_books.js index 0bfe078c..c1eb319d 100644 --- a/cps/static/js/edit_books.js +++ b/cps/static/js/edit_books.js @@ -33,7 +33,7 @@ $(".datepicker").datepicker({ if (results) { pubDate = new Date(results[1], parseInt(results[2], 10) - 1, results[3]) || new Date(this.value); $(this).next('input') - .val(pubDate.toLocaleDateString(language)) + .val(pubDate.toLocaleDateString(language.replaceAll("_","-"))) .removeClass("hidden"); } }).trigger("change"); diff --git a/cps/static/js/get_meta.js b/cps/static/js/get_meta.js index 51ab740d..43a40fa6 100644 --- a/cps/static/js/get_meta.js +++ b/cps/static/js/get_meta.js @@ -26,21 +26,28 @@ $(function () { ) }; + function getUniqueValues(attribute_name, book){ + var presentArray = $.map($("#"+attribute_name).val().split(","), $.trim); + if ( presentArray.length === 1 && presentArray[0] === "") { + presentArray = []; + } + $.each(book[attribute_name], function(i, el) { + if ($.inArray(el, presentArray) === -1) presentArray.push(el); + }); + return presentArray + } + function populateForm (book) { tinymce.get("description").setContent(book.description); - var uniqueTags = $.map($("#tags").val().split(","), $.trim); - if ( uniqueTags.length == 1 && uniqueTags[0] == "") { - uniqueTags = []; - } - $.each(book.tags, function(i, el) { - if ($.inArray(el, uniqueTags) === -1) uniqueTags.push(el); - }); + var uniqueTags = getUniqueValues('tags', book) + var uniqueLanguages = getUniqueValues('languages', book) var ampSeparatedAuthors = (book.authors || []).join(" & "); $("#bookAuthor").val(ampSeparatedAuthors); $("#book_title").val(book.title); $("#tags").val(uniqueTags.join(", ")); + $("#languages").val(uniqueLanguages.join(", ")); $("#rating").data("rating").setValue(Math.round(book.rating)); - if(book.cover !== null){ + if(book.cover && $("#cover_url").length){ $(".cover img").attr("src", book.cover); $("#cover_url").val(book.cover); } @@ -48,7 +55,32 @@ $(function () { $("#publisher").val(book.publisher); if (typeof book.series !== "undefined") { $("#series").val(book.series); + $("#series_index").val(book.series_index); } + if (typeof book.identifiers !== "undefined") { + populateIdentifiers(book.identifiers) + } + } + + function populateIdentifiers(identifiers){ + for (const property in identifiers) { + console.log(`${property}: ${identifiers[property]}`); + if ($('input[name="identifier-type-'+property+'"]').length) { + $('input[name="identifier-val-'+property+'"]').val(identifiers[property]) + } + else { + addIdentifier(property, identifiers[property]) + } + } + } + + function addIdentifier(name, value){ + var line = ''; + line += ''; + line += ''; + line += ''+_("Remove")+''; + line += ''; + $("#identifier-table").append(line); } function doSearch (keyword) { @@ -60,14 +92,19 @@ $(function () { data: {"query": keyword}, dataType: "json", success: function success(data) { - $("#meta-info").html("
    "); - data.forEach(function(book) { - var $book = $(templates.bookResult(book)); - $book.find("img").on("click", function () { - populateForm(book); + if (data.length) { + $("#meta-info").html("
      "); + data.forEach(function(book) { + var $book = $(templates.bookResult(book)); + $book.find("img").on("click", function () { + populateForm(book); + }); + $("#book-list").append($book); }); - $("#book-list").append($book); - }); + } + else { + $("#meta-info").html("

      " + msg.no_result + "!

      " + $("#meta-info")[0].innerHTML) + } }, error: function error() { $("#meta-info").html("

      " + msg.search_error + "!

      " + $("#meta-info")[0].innerHTML); @@ -128,9 +165,7 @@ $(function () { e.preventDefault(); keyword = $("#keyword").val(); $('.pill').each(function(){ - // console.log($(this).data('control')); $(this).data("initial", $(this).prop('checked')); - // console.log($(this).data('initial')); }); doSearch(keyword); }); diff --git a/cps/static/js/libs/bootstrap-datepicker/locales/bootstrap-datepicker.ko.min.js b/cps/static/js/libs/bootstrap-datepicker/locales/bootstrap-datepicker.ko.min.js new file mode 100644 index 00000000..9751ee5c --- /dev/null +++ b/cps/static/js/libs/bootstrap-datepicker/locales/bootstrap-datepicker.ko.min.js @@ -0,0 +1 @@ +!function(a){a.fn.datepicker.dates.ko={days:["일요일","월요일","화요일","수요일","목요일","금요일","토요일"],daysShort:["일","월","화","수","목","금","토"],daysMin:["일","월","화","수","목","금","토"],months:["1월","2월","3월","4월","5월","6월","7월","8월","9월","10월","11월","12월"],monthsShort:["1월","2월","3월","4월","5월","6월","7월","8월","9월","10월","11월","12월"],today:"오늘",clear:"삭제",format:"yyyy-mm-dd",titleFormat:"yyyy년mm월",weekStart:0}}(jQuery); \ No newline at end of file diff --git a/cps/static/js/libs/tinymce/langs/ko.js b/cps/static/js/libs/tinymce/langs/ko.js new file mode 100644 index 00000000..c6b3f266 --- /dev/null +++ b/cps/static/js/libs/tinymce/langs/ko.js @@ -0,0 +1,261 @@ +tinymce.addI18n('ko_KR',{ +"Redo": "\ub2e4\uc2dc\uc2e4\ud589", +"Undo": "\uc2e4\ud589\ucde8\uc18c", +"Cut": "\uc798\ub77c\ub0b4\uae30", +"Copy": "\ubcf5\uc0ac\ud558\uae30", +"Paste": "\ubd99\uc5ec\ub123\uae30", +"Select all": "\uc804\uccb4\uc120\ud0dd", +"New document": "\uc0c8 \ubb38\uc11c", +"Ok": "\ud655\uc778", +"Cancel": "\ucde8\uc18c", +"Visual aids": "\uc2dc\uac01\uad50\uc7ac", +"Bold": "\uad75\uac8c", +"Italic": "\uae30\uc6b8\uc784\uaf34", +"Underline": "\ubc11\uc904", +"Strikethrough": "\ucde8\uc18c\uc120", +"Superscript": "\uc717\ucca8\uc790", +"Subscript": "\uc544\ub798\ucca8\uc790", +"Clear formatting": "\ud3ec\ub9f7\ucd08\uae30\ud654", +"Align left": "\uc67c\ucabd\uc815\ub82c", +"Align center": "\uac00\uc6b4\ub370\uc815\ub82c", +"Align right": "\uc624\ub978\ucabd\uc815\ub82c", +"Justify": "\uc591\ucabd\uc815\ub82c", +"Bullet list": "\uc810\ub9ac\uc2a4\ud2b8", +"Numbered list": "\uc22b\uc790\ub9ac\uc2a4\ud2b8", +"Decrease indent": "\ub0b4\uc5b4\uc4f0\uae30", +"Increase indent": "\ub4e4\uc5ec\uc4f0\uae30", +"Close": "\ub2eb\uae30", +"Formats": "\ud3ec\ub9f7", +"Your browser doesn't support direct access to the clipboard. Please use the Ctrl+X\/C\/V keyboard shortcuts instead.": "\ube0c\ub77c\uc6b0\uc838\uac00 \ud074\ub9bd\ubcf4\ub4dc \uc811\uadfc\uc744 \ud5c8\uc6a9\ud558\uc9c0 \uc54a\uc2b5\ub2c8\ub2e4. Ctrl+X\/C\/V \ud0a4\ub97c \uc774\uc6a9\ud574 \uc8fc\uc138\uc694.", +"Headers": "\uc2a4\ud0c0\uc77c", +"Header 1": "\uc81c\ubaa9 1", +"Header 2": "\uc81c\ubaa9 2", +"Header 3": "\uc81c\ubaa9 3", +"Header 4": "\uc81c\ubaa9 4", +"Header 5": "\uc81c\ubaa9 5", +"Header 6": "\uc81c\ubaa9 6", +"Headings": "\uc81c\ubaa9", +"Heading 1": "\uc81c\ubaa9 1", +"Heading 2": "\uc81c\ubaa9 2", +"Heading 3": "\uc81c\ubaa9 3", +"Heading 4": "\uc81c\ubaa9 4", +"Heading 5": "\uc81c\ubaa9 5", +"Heading 6": "\uc81c\ubaa9 6", +"Preformatted": "Preformatted", +"Div": "\uad6c\ubd84", +"Pre": "Pre", +"Code": "\ucf54\ub4dc", +"Paragraph": "\ub2e8\ub77d", +"Blockquote": "\uad6c\ud68d", +"Inline": "\ub77c\uc778 \uc124\uc815", +"Blocks": "\ube14\ub85d \uc124\uc815", +"Paste is now in plain text mode. Contents will now be pasted as plain text until you toggle this option off.": "\uc2a4\ud0c0\uc77c\ubcf5\uc0ac \ub044\uae30. \uc774 \uc635\uc158\uc744 \ub044\uae30 \uc804\uc5d0\ub294 \ubcf5\uc0ac \uc2dc, \uc2a4\ud0c0\uc77c\uc774 \ubcf5\uc0ac\ub418\uc9c0 \uc54a\uc2b5\ub2c8\ub2e4.", +"Font Family": "\uae00\uaf34", +"Font Sizes": "\ud3f0\ud2b8 \uc0ac\uc774\uc988", +"Class": "\ud074\ub798\uc2a4", +"Browse for an image": "\uc774\ubbf8\uc9c0 \ucc3e\uae30", +"OR": "\ud639\uc740", +"Drop an image here": "\uc774\ubbf8\uc9c0 \ub4dc\ub86d", +"Upload": "\uc5c5\ub85c\ub4dc", +"Block": "\ube14\ub85d", +"Align": "\uc815\ub82c", +"Default": "\uae30\ubcf8", +"Circle": "\uc6d0", +"Disc": "\uc6d0\ubc18", +"Square": "\uc0ac\uac01", +"Lower Alpha": "\uc54c\ud30c\ubcb3 \uc18c\ubb38\uc790", +"Lower Greek": "\uadf8\ub9ac\uc2a4\uc5b4 \uc18c\ubb38\uc790", +"Lower Roman": "\ub85c\ub9c8\uc790 \uc18c\ubb38\uc790", +"Upper Alpha": "\uc54c\ud30c\ubcb3 \uc18c\ubb38\uc790", +"Upper Roman": "\ub85c\ub9c8\uc790 \ub300\ubb38\uc790", +"Anchor": "\uc575\ucee4", +"Name": "\uc774\ub984", +"Id": "\uc544\uc774\ub514", +"Id should start with a letter, followed only by letters, numbers, dashes, dots, colons or underscores.": "\uc544\uc774\ub514\ub294 \ubb38\uc790, \uc22b\uc790, \ub300\uc2dc, \uc810, \ucf5c\ub860 \ub610\ub294 \ubc11\uc904\ub85c \uc2dc\uc791\ud574\uc57c\ud569\ub2c8\ub2e4.", +"You have unsaved changes are you sure you want to navigate away?": "\uc800\uc7a5\ud558\uc9c0 \uc54a\uc740 \uc815\ubcf4\uac00 \uc788\uc2b5\ub2c8\ub2e4. \uc774 \ud398\uc774\uc9c0\ub97c \ubc97\uc5b4\ub098\uc2dc\uaca0\uc2b5\ub2c8\uae4c?", +"Restore last draft": "\ub9c8\uc9c0\ub9c9 \ucd08\uc548 \ubcf5\uc6d0", +"Special character": "\ud2b9\uc218\ubb38\uc790", +"Source code": "\uc18c\uc2a4\ucf54\ub4dc", +"Insert\/Edit code sample": "\ucf54\ub4dc\uc0d8\ud50c \uc0bd\uc785\/\ud3b8\uc9d1", +"Language": "\uc5b8\uc5b4", +"Code sample": "\ucf54\ub4dc\uc0d8\ud50c", +"Color": "\uc0c9\uc0c1", +"R": "R", +"G": "G", +"B": "B", +"Left to right": "\uc67c\ucabd\uc5d0\uc11c \uc624\ub978\ucabd", +"Right to left": "\uc624\ub978\ucabd\uc5d0\uc11c \uc67c\ucabd", +"Emoticons": "\uc774\ubaa8\ud2f0\ucf58", +"Document properties": "\ubb38\uc11c \uc18d\uc131", +"Title": "\uc81c\ubaa9", +"Keywords": "\ud0a4\uc6cc\ub4dc", +"Description": "\uc124\uba85", +"Robots": "\ub85c\ubd07", +"Author": "\uc800\uc790", +"Encoding": "\uc778\ucf54\ub529", +"Fullscreen": "\uc804\uccb4\ud654\uba74", +"Action": "\ub3d9\uc791", +"Shortcut": "\ub2e8\ucd95\ud0a4", +"Help": "\ub3c4\uc6c0\ub9d0", +"Address": "\uc8fc\uc18c", +"Focus to menubar": "\uba54\ub274\uc5d0 \ud3ec\ucee4\uc2a4", +"Focus to toolbar": "\ud234\ubc14\uc5d0 \ud3ec\ucee4\uc2a4", +"Focus to element path": "element path\uc5d0 \ud3ec\ucee4\uc2a4", +"Focus to contextual toolbar": "\ucf04\ud14d\uc2a4\ud2b8 \ud234\ubc14\uc5d0 \ud3ec\ucee4\uc2a4", +"Insert link (if link plugin activated)": "\ub9c1\ud06c \uc0bd\uc785 (link \ud50c\ub7ec\uadf8\uc778\uc774 \ud65c\uc131\ud654\ub41c \uc0c1\ud0dc\uc5d0\uc11c)", +"Save (if save plugin activated)": "\uc800\uc7a5 (save \ud50c\ub7ec\uadf8\uc778\uc774 \ud65c\uc131\ud654\ub41c \uc0c1\ud0dc\uc5d0\uc11c)", +"Find (if searchreplace plugin activated)": "\ucc3e\uae30(searchreplace \ud50c\ub7ec\uadf8\uc778\uc774 \ud65c\uc131\ud654\ub41c \uc0c1\ud0dc\uc5d0\uc11c)", +"Plugins installed ({0}):": "\uc124\uce58\ub41c \ud50c\ub7ec\uadf8\uc778 ({0}):", +"Premium plugins:": "\uace0\uae09 \ud50c\ub7ec\uadf8\uc778", +"Learn more...": "\uc880 \ub354 \uc0b4\ud3b4\ubcf4\uae30", +"You are using {0}": "{0}\ub97c \uc0ac\uc6a9\uc911", +"Plugins": "\ud50c\ub7ec\uadf8\uc778", +"Handy Shortcuts": "\ub2e8\ucd95\ud0a4", +"Horizontal line": "\uac00\ub85c", +"Insert\/edit image": "\uc774\ubbf8\uc9c0 \uc0bd\uc785\/\uc218\uc815", +"Image description": "\uc774\ubbf8\uc9c0 \uc124\uba85", +"Source": "\uc18c\uc2a4", +"Dimensions": "\ud06c\uae30", +"Constrain proportions": "\uc791\uc5c5 \uc81c\ud55c", +"General": "\uc77c\ubc18", +"Advanced": "\uace0\uae09", +"Style": "\uc2a4\ud0c0\uc77c", +"Vertical space": "\uc218\uc9c1 \uacf5\ubc31", +"Horizontal space": "\uc218\ud3c9 \uacf5\ubc31", +"Border": "\ud14c\ub450\ub9ac", +"Insert image": "\uc774\ubbf8\uc9c0 \uc0bd\uc785", +"Image": "\uc774\ubbf8\uc9c0", +"Image list": "\uc774\ubbf8\uc9c0 \ubaa9\ub85d", +"Rotate counterclockwise": "\uc2dc\uacc4\ubc18\ub300\ubc29\ud5a5\uc73c\ub85c \ud68c\uc804", +"Rotate clockwise": "\uc2dc\uacc4\ubc29\ud5a5\uc73c\ub85c \ud68c\uc804", +"Flip vertically": "\uc218\uc9c1 \ub4a4\uc9d1\uae30", +"Flip horizontally": "\uc218\ud3c9 \ub4a4\uc9d1\uae30", +"Edit image": "\uc774\ubbf8\uc9c0 \ud3b8\uc9d1", +"Image options": "\uc774\ubbf8\uc9c0 \uc635\uc158", +"Zoom in": "\ud655\ub300", +"Zoom out": "\ucd95\uc18c", +"Crop": "\uc790\ub974\uae30", +"Resize": "\ud06c\uae30 \uc870\uc808", +"Orientation": "\ubc29\ud5a5", +"Brightness": "\ubc1d\uae30", +"Sharpen": "\uc120\uba85\ud558\uac8c", +"Contrast": "\ub300\ube44", +"Color levels": "\uc0c9\uc0c1\ub808\ubca8", +"Gamma": "\uac10\ub9c8", +"Invert": "\ubc18\uc804", +"Apply": "\uc801\uc6a9", +"Back": "\ub4a4\ub85c", +"Insert date\/time": "\ub0a0\uc9dc\/\uc2dc\uac04\uc0bd\uc785", +"Date\/time": "\ub0a0\uc9dc\/\uc2dc\uac04", +"Insert link": "\ub9c1\ud06c \uc0bd\uc785 ", +"Insert\/edit link": "\ub9c1\ud06c \uc0bd\uc785\/\uc218\uc815", +"Text to display": "\ubcf8\ubb38", +"Url": "\uc8fc\uc18c", +"Target": "\ub300\uc0c1", +"None": "\uc5c6\uc74c", +"New window": "\uc0c8\ucc3d", +"Remove link": "\ub9c1\ud06c\uc0ad\uc81c", +"Anchors": "\ucc45\uac08\ud53c", +"Link": "\ub9c1\ud06c", +"Paste or type a link": "\ub9c1\ud06c\ub97c \ubd99\uc5ec\ub123\uac70\ub098 \uc785\ub825\ud558\uc138\uc694", +"The URL you entered seems to be an email address. Do you want to add the required mailto: prefix?": "\ud604\uc7ac E-mail\uc8fc\uc18c\ub97c \uc785\ub825\ud558\uc168\uc2b5\ub2c8\ub2e4. E-mail \uc8fc\uc18c\uc5d0 \ub9c1\ud06c\ub97c \uac78\uae4c\uc694?", +"The URL you entered seems to be an external link. Do you want to add the required http:\/\/ prefix?": "\ud604\uc7ac \uc6f9\uc0ac\uc774\ud2b8 \uc8fc\uc18c\ub97c \uc785\ub825\ud558\uc168\uc2b5\ub2c8\ub2e4. \ud574\ub2f9 \uc8fc\uc18c\uc5d0 \ub9c1\ud06c\ub97c \uac78\uae4c\uc694?", +"Link list": "\ub9c1\ud06c \ub9ac\uc2a4\ud2b8", +"Insert video": "\ube44\ub514\uc624 \uc0bd\uc785", +"Insert\/edit video": "\ube44\ub514\uc624 \uc0bd\uc785\/\uc218\uc815", +"Insert\/edit media": "\ubbf8\ub514\uc5b4 \uc0bd\uc785\/\uc218\uc815", +"Alternative source": "\ub300\uccb4 \uc18c\uc2a4", +"Poster": "\ud3ec\uc2a4\ud130", +"Paste your embed code below:": "\uc544\ub798\uc5d0 \ucf54\ub4dc\ub97c \ubd99\uc5ec\ub123\uc73c\uc138\uc694:", +"Embed": "\uc0bd\uc785", +"Media": "\ubbf8\ub514\uc5b4", +"Nonbreaking space": "\ub744\uc5b4\uc4f0\uae30", +"Page break": "\ud398\uc774\uc9c0 \uad6c\ubd84\uc790", +"Paste as text": "\ud14d\uc2a4\ud2b8\ub85c \ubd99\uc5ec\ub123\uae30", +"Preview": "\ubbf8\ub9ac\ubcf4\uae30", +"Print": "\ucd9c\ub825", +"Save": "\uc800\uc7a5", +"Find": "\ucc3e\uae30", +"Replace with": "\uad50\uccb4", +"Replace": "\uad50\uccb4", +"Replace all": "\uc804\uccb4 \uad50\uccb4", +"Prev": "\uc774\uc804", +"Next": "\ub2e4\uc74c", +"Find and replace": "\ucc3e\uc544\uc11c \uad50\uccb4", +"Could not find the specified string.": "\ubb38\uc790\ub97c \ucc3e\uc744 \uc218 \uc5c6\uc2b5\ub2c8\ub2e4.", +"Match case": "\ub300\uc18c\ubb38\uc790 \uc77c\uce58", +"Whole words": "\uc804\uccb4 \ub2e8\uc5b4", +"Spellcheck": "\ubb38\ubc95\uccb4\ud06c", +"Ignore": "\ubb34\uc2dc", +"Ignore all": "\uc804\uccb4\ubb34\uc2dc", +"Finish": "\uc644\ub8cc", +"Add to Dictionary": "\uc0ac\uc804\uc5d0 \ucd94\uac00", +"Insert table": "\ud14c\uc774\ube14 \uc0bd\uc785", +"Table properties": "\ud14c\uc774\ube14 \uc18d\uc131", +"Delete table": "\ud14c\uc774\ube14 \uc0ad\uc81c", +"Cell": "\uc140", +"Row": "\uc5f4", +"Column": "\ud589", +"Cell properties": "\uc140 \uc18d", +"Merge cells": "\uc140 \ud569\uce58\uae30", +"Split cell": "\uc140 \ub098\ub204\uae30", +"Insert row before": "\uc774\uc804\uc5d0 \ud589 \uc0bd\uc785", +"Insert row after": "\ub2e4\uc74c\uc5d0 \ud589 \uc0bd\uc785", +"Delete row": "\ud589 \uc9c0\uc6b0\uae30", +"Row properties": "\ud589 \uc18d\uc131", +"Cut row": "\ud589 \uc798\ub77c\ub0b4\uae30", +"Copy row": "\ud589 \ubcf5\uc0ac", +"Paste row before": "\uc774\uc804\uc5d0 \ud589 \ubd99\uc5ec\ub123\uae30", +"Paste row after": "\ub2e4\uc74c\uc5d0 \ud589 \ubd99\uc5ec\ub123\uae30", +"Insert column before": "\uc774\uc804\uc5d0 \ud589 \uc0bd\uc785", +"Insert column after": "\ub2e4\uc74c\uc5d0 \uc5f4 \uc0bd\uc785", +"Delete column": "\uc5f4 \uc9c0\uc6b0\uae30", +"Cols": "\uc5f4", +"Rows": "\ud589", +"Width": "\ub113\uc774", +"Height": "\ub192\uc774", +"Cell spacing": "\uc140 \uac04\uaca9", +"Cell padding": "\uc140 \uc548\ucabd \uc5ec\ubc31", +"Caption": "\ucea1\uc158", +"Left": "\uc67c\ucabd", +"Center": "\uac00\uc6b4\ub370", +"Right": "\uc624\ub978\ucabd", +"Cell type": "\uc140 \ud0c0\uc785", +"Scope": "\ubc94\uc704", +"Alignment": "\uc815\ub82c", +"H Align": "\uac00\ub85c \uc815\ub82c", +"V Align": "\uc138\ub85c \uc815\ub82c", +"Top": "\uc0c1\ub2e8", +"Middle": "\uc911\uac04", +"Bottom": "\ud558\ub2e8", +"Header cell": "\ud5e4\ub354 \uc140", +"Row group": "\ud589 \uadf8\ub8f9", +"Column group": "\uc5f4 \uadf8\ub8f9", +"Row type": "\ud589 \ud0c0\uc785", +"Header": "\ud5e4\ub354", +"Body": "\ubc14\ub514", +"Footer": "\ud478\ud130", +"Border color": "\ud14c\ub450\ub9ac \uc0c9", +"Insert template": "\ud15c\ud50c\ub9bf \uc0bd\uc785", +"Templates": "\ud15c\ud50c\ub9bf", +"Template": "\ud15c\ud50c\ub9bf", +"Text color": "\ubb38\uc790 \uc0c9\uae54", +"Background color": "\ubc30\uacbd\uc0c9", +"Custom...": "\uc9c1\uc811 \uc0c9\uae54 \uc9c0\uc815\ud558\uae30", +"Custom color": "\uc9c1\uc811 \uc9c0\uc815\ud55c \uc0c9\uae54", +"No color": "\uc0c9\uc0c1 \uc5c6\uc74c", +"Table of Contents": "\ubaa9\ucc28", +"Show blocks": "\ube14\ub7ed \ubcf4\uc5ec\uc8fc\uae30", +"Show invisible characters": "\uc548\ubcf4\uc774\ub294 \ubb38\uc790 \ubcf4\uc774\uae30", +"Words: {0}": "\ub2e8\uc5b4: {0}", +"{0} words": "{0} \ub2e8\uc5b4", +"File": "\ud30c\uc77c", +"Edit": "\uc218\uc815", +"Insert": "\uc0bd\uc785", +"View": "\ubcf4\uae30", +"Format": "\ud3ec\ub9f7", +"Table": "\ud14c\uc774\ube14", +"Tools": "\ub3c4\uad6c", +"Powered by {0}": "Powered by {0}", +"Rich Text Area. Press ALT-F9 for menu. Press ALT-F10 for toolbar. Press ALT-0 for help": "\uc11c\uc2dd \uc788\ub294 \ud14d\uc2a4\ud2b8 \ud3b8\uc9d1\uae30 \uc785\ub2c8\ub2e4. ALT-F9\ub97c \ub204\ub974\uba74 \uba54\ub274, ALT-F10\ub97c \ub204\ub974\uba74 \ud234\ubc14, ALT-0\uc744 \ub204\ub974\uba74 \ub3c4\uc6c0\ub9d0\uc744 \ubcfc \uc218 \uc788\uc2b5\ub2c8\ub2e4." +}); diff --git a/cps/static/js/main.js b/cps/static/js/main.js old mode 100644 new mode 100755 index 585d2296..04d47d6b --- a/cps/static/js/main.js +++ b/cps/static/js/main.js @@ -20,6 +20,20 @@ function getPath() { return jsFileLocation.substr(0, jsFileLocation.search("/static/js/libs/jquery.min.js")); // the js folder path } +function postButton(event, action){ + event.preventDefault(); + var newForm = jQuery('
      ', { + "action": action, + 'target': "_top", + 'method': "post" + }).append(jQuery('', { + 'name': 'csrf_token', + 'value': $("input[name=\'csrf_token\']").val(), + 'type': 'hidden' + })).appendTo('body'); + newForm.submit(); +} + function elementSorter(a, b) { a = +a.slice(0, -2); b = +b.slice(0, -2); @@ -71,6 +85,22 @@ $(document).on("change", "select[data-controlall]", function() { } }); +/*$(document).on("click", "#sendbtn", function (event) { + postButton(event, $(this).data('action')); +}); + +$(document).on("click", ".sendbutton", function (event) { + // $(".sendbutton").on("click", "body", function(event) { + postButton(event, $(this).data('action')); +});*/ + +$(document).on("click", ".postAction", function (event) { + // $(".sendbutton").on("click", "body", function(event) { + postButton(event, $(this).data('action')); +}); + + + // Syntax has to be bind not on, otherwise problems with firefox $(".container-fluid").bind("dragenter dragover", function () { if($("#btn-upload").length && !$('body').hasClass('shelforder')) { @@ -168,18 +198,18 @@ function confirmDialog(id, dialogid, dataValue, yesFn, noFn) { $confirm.modal('show'); } -$("#delete_confirm").click(function() { +$("#delete_confirm").click(function(event) { //get data-id attribute of the clicked element var deleteId = $(this).data("delete-id"); var bookFormat = $(this).data("delete-format"); var ajaxResponse = $(this).data("ajax"); if (bookFormat) { - window.location.href = getPath() + "/delete/" + deleteId + "/" + bookFormat; + postButton(event, getPath() + "/delete/" + deleteId + "/" + bookFormat); } else { if (ajaxResponse) { path = getPath() + "/ajax/delete/" + deleteId; $.ajax({ - method:"get", + method:"post", url: path, timeout: 900, success:function(data) { @@ -198,8 +228,7 @@ $("#delete_confirm").click(function() { } }); } else { - window.location.href = getPath() + "/delete/" + deleteId; - + postButton(event, getPath() + "/delete/" + deleteId); } } @@ -352,8 +381,8 @@ $(function() { //extraScrollPx: 300 }); $loadMore.on( "append.infiniteScroll", function( event, response, path, data ) { + $(".pagination").addClass("hidden").html(() => $(response).find(".pagination").html()); if ($("body").hasClass("blur")) { - $(".pagination").addClass("hidden").html(() => $(response).find(".pagination").html()); $(" a:not(.dropdown-toggle) ") .removeAttr("data-toggle"); } @@ -376,9 +405,11 @@ $(function() { $("#restart").click(function() { $.ajax({ + method:"post", + contentType: "application/json; charset=utf-8", dataType: "json", - url: window.location.pathname + "/../../shutdown", - data: {"parameter":0}, + url: getPath() + "/shutdown", + data: JSON.stringify({"parameter":0}), success: function success() { $("#spinner").show(); setTimeout(restartTimer, 3000); @@ -387,9 +418,11 @@ $(function() { }); $("#shutdown").click(function() { $.ajax({ + method:"post", + contentType: "application/json; charset=utf-8", dataType: "json", - url: window.location.pathname + "/../../shutdown", - data: {"parameter":1}, + url: getPath() + "/shutdown", + data: JSON.stringify({"parameter":1}), success: function success(data) { return alert(data.text); } @@ -441,15 +474,28 @@ $(function() { } }); }); + $("#admin_refresh_cover_cache").click(function() { + confirmDialog("admin_refresh_cover_cache", "GeneralChangeModal", 0, function () { + $.ajax({ + method:"post", + contentType: "application/json; charset=utf-8", + dataType: "json", + url: getPath() + "/ajax/updateThumbnails", + }); + }); + }); + $("#restart_database").click(function() { $("#DialogHeader").addClass("hidden"); $("#DialogFinished").addClass("hidden"); $("#DialogContent").html(""); $("#spinner2").show(); $.ajax({ + method:"post", + contentType: "application/json; charset=utf-8", dataType: "json", url: getPath() + "/shutdown", - data: {"parameter":2}, + data: JSON.stringify({"parameter":2}), success: function success(data) { $("#spinner2").hide(); $("#DialogContent").html(data.text); @@ -480,6 +526,7 @@ $(function() { $("#bookDetailsModal") .on("show.bs.modal", function(e) { + $("#flash_danger").remove(); var $modalBody = $(this).find(".modal-body"); // Prevent static assets from loading multiple times @@ -500,6 +547,7 @@ $(function() { $("#modal_kobo_token") .on("show.bs.modal", function(e) { + $(e.relatedTarget).one('focus', function(e){$(this).blur();}); var $modalBody = $(this).find(".modal-body"); // Prevent static assets from loading multiple times @@ -527,7 +575,7 @@ $(function() { $(this).data('value'), function (value) { $.ajax({ - method: "get", + method: "post", url: getPath() + "/kobo_auth/deleteauthtoken/" + value, }); $("#config_delete_kobo_token").hide(); @@ -574,7 +622,7 @@ $(function() { function(value){ path = getPath() + "/ajax/fullsync" $.ajax({ - method:"get", + method:"post", url: path, timeout: 900, success:function(data) { @@ -638,7 +686,7 @@ $(function() { else { $("#InvalidDialog").modal('show'); } - } else { + } else { changeDbSettings(); } } @@ -679,13 +727,14 @@ $(function() { }); }); - $("#delete_shelf").click(function() { + $("#delete_shelf").click(function(event) { confirmDialog( $(this).attr('id'), "GeneralDeleteModal", $(this).data('value'), function(value){ - window.location.href = window.location.pathname + "/../../shelf/delete/" + value + postButton(event, $("#delete_shelf").data("action")); + // $("#delete_shelf").closest("form").submit() } ); @@ -734,7 +783,8 @@ $(function() { $("#DialogContent").html(""); $("#spinner2").show(); $.ajax({ - method:"get", + method:"post", + contentType: "application/json; charset=utf-8", dataType: "json", url: getPath() + "/import_ldap_users", success: function success(data) { @@ -768,4 +818,3 @@ $(function() { }); }); }); - diff --git a/cps/static/js/table.js b/cps/static/js/table.js index 48c7631a..548ca8c4 100644 --- a/cps/static/js/table.js +++ b/cps/static/js/table.js @@ -15,7 +15,7 @@ * along with this program. If not, see . */ -/* exported TableActions, RestrictionActions, EbookActions, responseHandler */ +/* exported TableActions, RestrictionActions, EbookActions, TaskActions, responseHandler */ /* global getPath, confirmDialog */ var selections = []; @@ -42,20 +42,38 @@ $(function() { }, 1000); } + $("#cancel_task_confirm").click(function() { + //get data-id attribute of the clicked element + var taskId = $(this).data("task-id"); + $.ajax({ + method: "post", + contentType: "application/json; charset=utf-8", + dataType: "json", + url: window.location.pathname + "/../ajax/canceltask", + data: JSON.stringify({"task_id": taskId}), + }); + }); + //triggered when modal is about to be shown + $("#cancelTaskModal").on("show.bs.modal", function(e) { + //get data-id attribute of the clicked element and store in button + var taskId = $(e.relatedTarget).data("task-id"); + $(e.currentTarget).find("#cancel_task_confirm").data("task-id", taskId); + }); + $("#books-table").on("check.bs.table check-all.bs.table uncheck.bs.table uncheck-all.bs.table", function (e, rowsAfter, rowsBefore) { var rows = rowsAfter; if (e.type === "uncheck-all") { - rows = rowsBefore; + selections = []; + } else { + var ids = $.map(!$.isArray(rows) ? [rows] : rows, function (row) { + return row.id; + }); + + var func = $.inArray(e.type, ["check", "check-all"]) > -1 ? "union" : "difference"; + selections = window._[func](selections, ids); } - - var ids = $.map(!$.isArray(rows) ? [rows] : rows, function (row) { - return row.id; - }); - - var func = $.inArray(e.type, ["check", "check-all"]) > -1 ? "union" : "difference"; - selections = window._[func](selections, ids); if (selections.length >= 2) { $("#merge_books").removeClass("disabled"); $("#merge_books").attr("aria-disabled", false); @@ -107,8 +125,9 @@ $(function() { url: window.location.pathname + "/../ajax/simulatemerge", data: JSON.stringify({"Merge_books":selections}), success: function success(booTitles) { + $('#merge_from').empty(); $.each(booTitles.from, function(i, item) { - $("- " + item + "").appendTo("#merge_from"); + $("- " + item + "

      ").appendTo("#merge_from"); }); $("#merge_to").text("- " + booTitles.to); @@ -531,7 +550,7 @@ $(function() { $("#user-table").on("click-cell.bs.table", function (field, value, row, $element) { if (value === "denied_column_value") { - ConfirmDialog("btndeluser", "GeneralDeleteModal", $element.id, user_handle); + confirmDialog("btndeluser", "GeneralDeleteModal", $element.id, user_handle); } }); @@ -540,14 +559,14 @@ $(function() { var rows = rowsAfter; if (e.type === "uncheck-all") { - rows = rowsBefore; + selections = []; + } else { + var ids = $.map(!$.isArray(rows) ? [rows] : rows, function (row) { + return row.id; + }); + var func = $.inArray(e.type, ["check", "check-all"]) > -1 ? "union" : "difference"; + selections = window._[func](selections, ids); } - - var ids = $.map(!$.isArray(rows) ? [rows] : rows, function (row) { - return row.id; - }); - var func = $.inArray(e.type, ["check", "check-all"]) > -1 ? "union" : "difference"; - selections = window._[func](selections, ids); handle_header_buttons(); }); }); @@ -581,6 +600,7 @@ function handle_header_buttons () { $(".header_select").removeAttr("disabled"); } } + /* Function for deleting domain restrictions */ function TableActions (value, row) { return [ @@ -618,6 +638,19 @@ function UserActions (value, row) { ].join(""); } +/* Function for cancelling tasks */ +function TaskActions (value, row) { + var cancellableStats = [0, 1, 2]; + if (row.task_id && row.is_cancellable && cancellableStats.includes(row.stat)) { + return [ + "
      ", + "", + "
      " + ].join(""); + } + return ''; +} + /* Function for keeping checked rows */ function responseHandler(res) { $.each(res.rows, function (i, row) { @@ -811,11 +844,13 @@ function checkboxChange(checkbox, userId, field, field_index) { function BookCheckboxChange(checkbox, userId, field) { var value = checkbox.checked ? "True" : "False"; + var element = checkbox; $.ajax({ method: "post", url: getPath() + "/ajax/editbooks/" + field, data: {"pk": userId, "value": value}, error: function(data) { + element.checked = !element.checked; handleListServerResponse([{type:"danger", message:data.responseText}]) }, success: handleListServerResponse diff --git a/cps/tasks/convert.py b/cps/tasks/convert.py index 59ad6909..3062850d 100644 --- a/cps/tasks/convert.py +++ b/cps/tasks/convert.py @@ -16,33 +16,35 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -import sys import os import re - from glob import glob from shutil import copyfile from markupsafe import escape from sqlalchemy.exc import SQLAlchemyError +from flask_babel import lazy_gettext as N_ from cps.services.worker import CalibreTask from cps import db from cps import logger, config from cps.subproc_wrapper import process_open from flask_babel import gettext as _ -from flask import url_for +from cps.kobo_sync_status import remove_synced_book +from cps.ub import init_db_thread from cps.tasks.mail import TaskEmail from cps import gdriveutils + + log = logger.create() class TaskConvert(CalibreTask): - def __init__(self, file_path, bookid, taskMessage, settings, kindle_mail, user=None): - super(TaskConvert, self).__init__(taskMessage) + def __init__(self, file_path, book_id, task_message, settings, kindle_mail, user=None): + super(TaskConvert, self).__init__(task_message) self.file_path = file_path - self.bookid = bookid + self.book_id = book_id self.title = "" self.settings = settings self.kindle_mail = kindle_mail @@ -53,10 +55,10 @@ class TaskConvert(CalibreTask): def run(self, worker_thread): self.worker_thread = worker_thread if config.config_use_google_drive: - worker_db = db.CalibreDB(expire_on_commit=False) - cur_book = worker_db.get_book(self.bookid) + worker_db = db.CalibreDB(expire_on_commit=False, init=True) + cur_book = worker_db.get_book(self.book_id) self.title = cur_book.title - data = worker_db.get_book_format(self.bookid, self.settings['old_book_format']) + data = worker_db.get_book_format(self.book_id, self.settings['old_book_format']) df = gdriveutils.getFileFromEbooksFolder(cur_book.path, data.name + "." + self.settings['old_book_format'].lower()) if df: @@ -87,7 +89,7 @@ class TaskConvert(CalibreTask): # if we're sending to kindle after converting, create a one-off task and run it immediately # todo: figure out how to incorporate this into the progress try: - EmailText = _(u"%(book)s send to Kindle", book=escape(self.title)) + EmailText = N_(u"%(book)s send to Kindle", book=escape(self.title)) worker_thread.add(self.user, TaskEmail(self.settings['subject'], self.results["path"], filename, @@ -102,9 +104,9 @@ class TaskConvert(CalibreTask): def _convert_ebook_format(self): error_message = None - local_db = db.CalibreDB(expire_on_commit=False) + local_db = db.CalibreDB(expire_on_commit=False, init=True) file_path = self.file_path - book_id = self.bookid + book_id = self.book_id format_old_ext = u'.' + self.settings['old_book_format'].lower() format_new_ext = u'.' + self.settings['new_book_format'].lower() @@ -112,15 +114,30 @@ class TaskConvert(CalibreTask): # if it does - mark the conversion task as complete and return a success # this will allow send to kindle workflow to continue to work if os.path.isfile(file_path + format_new_ext) or\ - local_db.get_book_format(self.bookid, self.settings['new_book_format']): + local_db.get_book_format(self.book_id, self.settings['new_book_format']): log.info("Book id %d already converted to %s", book_id, format_new_ext) cur_book = local_db.get_book(book_id) self.title = cur_book.title - self.results['path'] = file_path + self.results['path'] = cur_book.path self.results['title'] = self.title - self._handleSuccess() - local_db.session.close() - return os.path.basename(file_path + format_new_ext) + new_format = local_db.session.query(db.Data).filter(db.Data.book == book_id)\ + .filter(db.Data.format == self.settings['new_book_format'].upper()).one_or_none() + if not new_format: + new_format = db.Data(name=os.path.basename(file_path), + book_format=self.settings['new_book_format'].upper(), + book=book_id, uncompressed_size=os.path.getsize(file_path + format_new_ext)) + try: + local_db.session.merge(new_format) + local_db.session.commit() + except SQLAlchemyError as e: + local_db.session.rollback() + log.error("Database error: %s", e) + local_db.session.close() + self._handleError(N_("Database error: %(error)s.", error=e)) + return + self._handleSuccess() + local_db.session.close() + return os.path.basename(file_path + format_new_ext) else: log.info("Book id %d - target format of %s does not exist. Moving forward with convert.", book_id, @@ -133,26 +150,32 @@ class TaskConvert(CalibreTask): else: # check if calibre converter-executable is existing if not os.path.exists(config.config_converterpath): - # ToDo Text is not translated - self._handleError(_(u"Calibre ebook-convert %(tool)s not found", tool=config.config_converterpath)) + self._handleError(N_(u"Calibre ebook-convert %(tool)s not found", tool=config.config_converterpath)) return check, error_message = self._convert_calibre(file_path, format_old_ext, format_new_ext) if check == 0: cur_book = local_db.get_book(book_id) if os.path.isfile(file_path + format_new_ext): - new_format = db.Data(name=cur_book.data[0].name, + new_format = local_db.session.query(db.Data).filter(db.Data.book == book_id) \ + .filter(db.Data.format == self.settings['new_book_format'].upper()).one_or_none() + if not new_format: + new_format = db.Data(name=cur_book.data[0].name, book_format=self.settings['new_book_format'].upper(), book=book_id, uncompressed_size=os.path.getsize(file_path + format_new_ext)) - try: - local_db.session.merge(new_format) - local_db.session.commit() - except SQLAlchemyError as e: - local_db.session.rollback() - log.error("Database error: %s", e) - local_db.session.close() - self._handleError(error_message) - return + try: + local_db.session.merge(new_format) + local_db.session.commit() + if self.settings['new_book_format'].upper() in ['KEPUB', 'EPUB', 'EPUB3']: + ub_session = init_db_thread() + remove_synced_book(book_id, True, ub_session) + ub_session.close() + except SQLAlchemyError as e: + local_db.session.rollback() + log.error("Database error: %s", e) + local_db.session.close() + self._handleError(error_message) + return self.results['path'] = cur_book.path self.title = cur_book.title self.results['title'] = self.title @@ -160,11 +183,11 @@ class TaskConvert(CalibreTask): self._handleSuccess() return os.path.basename(file_path + format_new_ext) else: - error_message = _('%(format)s format not found on disk', format=format_new_ext.upper()) + error_message = N_('%(format)s format not found on disk', format=format_new_ext.upper()) local_db.session.close() log.info("ebook converter failed with error while converting book") if not error_message: - error_message = _('Ebook converter failed with unknown error') + error_message = N_('Ebook converter failed with unknown error') self._handleError(error_message) return @@ -174,7 +197,7 @@ class TaskConvert(CalibreTask): try: p = process_open(command, quotes) except OSError as e: - return 1, _(u"Kepubify-converter failed: %(error)s", error=e) + return 1, N_(u"Kepubify-converter failed: %(error)s", error=e) self.progress = 0.01 while True: nextline = p.stdout.readlines() @@ -195,7 +218,7 @@ class TaskConvert(CalibreTask): copyfile(converted_file[0], (file_path + format_new_ext)) os.unlink(converted_file[0]) else: - return 1, _(u"Converted file not found or more than one file in folder %(folder)s", + return 1, N_(u"Converted file not found or more than one file in folder %(folder)s", folder=os.path.dirname(file_path)) return check, None @@ -219,7 +242,7 @@ class TaskConvert(CalibreTask): p = process_open(command, quotes, newlines=False) except OSError as e: - return 1, _(u"Ebook-converter failed: %(error)s", error=e) + return 1, N_(u"Ebook-converter failed: %(error)s", error=e) while p.poll() is None: nextline = p.stdout.readline() @@ -242,12 +265,16 @@ class TaskConvert(CalibreTask): ele = ele.decode('utf-8', errors="ignore").strip('\n') log.debug(ele) if not ele.startswith('Traceback') and not ele.startswith(' File'): - error_message = _("Calibre failed with error: %(error)s", error=ele) + error_message = N_("Calibre failed with error: %(error)s", error=ele) return check, error_message @property def name(self): - return "Convert" + return N_("Convert") def __str__(self): - return "Convert {} {}".format(self.bookid, self.kindle_mail) + return "Convert {} {}".format(self.book_id, self.kindle_mail) + + @property + def is_cancellable(self): + return False diff --git a/cps/tasks/database.py b/cps/tasks/database.py new file mode 100644 index 00000000..afc4db2c --- /dev/null +++ b/cps/tasks/database.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- + +# This file is part of the Calibre-Web (https://github.com/janeczku/calibre-web) +# Copyright (C) 2020 mmonkey +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from urllib.request import urlopen + +from flask_babel import lazy_gettext as N_ + +from cps import config, logger +from cps.services.worker import CalibreTask + + +class TaskReconnectDatabase(CalibreTask): + def __init__(self, task_message=N_('Reconnecting Calibre database')): + super(TaskReconnectDatabase, self).__init__(task_message) + self.log = logger.create() + self.listen_address = config.get_config_ipaddress() + self.listen_port = config.config_port + + + def run(self, worker_thread): + address = self.listen_address if self.listen_address else 'localhost' + port = self.listen_port if self.listen_port else 8083 + + try: + urlopen('http://' + address + ':' + str(port) + '/reconnect') + self._handleSuccess() + except Exception as ex: + self._handleError('Unable to reconnect Calibre database: ' + str(ex)) + + @property + def name(self): + return "Reconnect Database" + + @property + def is_cancellable(self): + return False diff --git a/cps/tasks/mail.py b/cps/tasks/mail.py old mode 100644 new mode 100755 index 05b2175f..be240c79 --- a/cps/tasks/mail.py +++ b/cps/tasks/mail.py @@ -16,35 +16,25 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -import sys import os import smtplib import threading import socket import mimetypes -try: - from StringIO import StringIO - from email.MIMEBase import MIMEBase - from email.MIMEMultipart import MIMEMultipart - from email.MIMEText import MIMEText -except ImportError: - from io import StringIO - from email.mime.base import MIMEBase - from email.mime.multipart import MIMEMultipart - from email.mime.text import MIMEText - - - -from email import encoders -from email.utils import formatdate, make_msgid +from io import StringIO +from email.message import EmailMessage +from email.utils import formatdate, parseaddr from email.generator import Generator +from flask_babel import lazy_gettext as N_ +from email.utils import formatdate from cps.services.worker import CalibreTask from cps.services import gmail from cps import logger, config from cps import gdriveutils +import uuid log = logger.create() @@ -119,31 +109,48 @@ class EmailSSL(EmailBase, smtplib.SMTP_SSL): class TaskEmail(CalibreTask): - def __init__(self, subject, filepath, attachment, settings, recipient, taskMessage, text, internal=False): - super(TaskEmail, self).__init__(taskMessage) + def __init__(self, subject, filepath, attachment, settings, recipient, task_message, text, internal=False): + super(TaskEmail, self).__init__(task_message) self.subject = subject self.attachment = attachment self.settings = settings self.filepath = filepath - self.recipent = recipient + self.recipient = recipient self.text = text self.asyncSMTP = None self.results = dict() + # from calibre code: + # https://github.com/kovidgoyal/calibre/blob/731ccd92a99868de3e2738f65949f19768d9104c/src/calibre/utils/smtp.py#L60 + def get_msgid_domain(self): + try: + # Parse out the address from the From line, and then the domain from that + from_email = parseaddr(self.settings["mail_from"])[1] + msgid_domain = from_email.partition('@')[2].strip() + # This can sometimes sneak through parseaddr if the input is malformed + msgid_domain = msgid_domain.rstrip('>').strip() + except Exception: + msgid_domain = '' + return msgid_domain or 'calibre-web.com' + def prepare_message(self): - message = MIMEMultipart() - message['to'] = self.recipent - message['from'] = self.settings["mail_from"] - message['subject'] = self.subject - message['Message-Id'] = make_msgid('calibre-web') + message = EmailMessage() + # message = MIMEMultipart() + message['From'] = self.settings["mail_from"] + message['To'] = self.recipient + message['Subject'] = self.subject message['Date'] = formatdate(localtime=True) - text = self.text - msg = MIMEText(text.encode('UTF-8'), 'plain', 'UTF-8') - message.attach(msg) + message['Message-Id'] = "{}@{}".format(uuid.uuid4(), self.get_msgid_domain()) # f"<{uuid.uuid4()}@{get_msgid_domain(from_)}>" # make_msgid('calibre-web') + message.set_content(self.text.encode('UTF-8'), "text", "plain") if self.attachment: - result = self._get_attachment(self.filepath, self.attachment) - if result: - message.attach(result) + data = self._get_attachment(self.filepath, self.attachment) + if data: + # Set mimetype + content_type, encoding = mimetypes.guess_type(self.attachment) + if content_type is None or encoding is not None: + content_type = 'application/octet-stream' + main_type, sub_type = content_type.split('/', 1) + message.add_attachment(data, maintype=main_type, subtype=sub_type, filename=self.attachment) else: self._handleError(u"Attachment not found") return @@ -158,10 +165,10 @@ class TaskEmail(CalibreTask): else: self.send_gmail_email(msg) except MemoryError as e: - log.debug_or_exception(e) + log.error_or_exception(e, stacklevel=3) self._handleError(u'MemoryError sending e-mail: {}'.format(str(e))) except (smtplib.SMTPException, smtplib.SMTPAuthenticationError) as e: - log.debug_or_exception(e) + log.error_or_exception(e, stacklevel=3) if hasattr(e, "smtp_error"): text = e.smtp_error.decode('utf-8').replace("\n", '. ') elif hasattr(e, "message"): @@ -171,11 +178,11 @@ class TaskEmail(CalibreTask): else: text = '' self._handleError(u'Smtplib Error sending e-mail: {}'.format(text)) - except socket.error as e: - log.debug_or_exception(e) + except (socket.error) as e: + log.error_or_exception(e, stacklevel=3) self._handleError(u'Socket Error sending e-mail: {}'.format(e.strerror)) except Exception as ex: - log.debug_or_exception(ex) + log.error_or_exception(ex, stacklevel=3) self._handleError(u'Error sending e-mail: {}'.format(ex)) def send_standard_email(self, msg): @@ -203,7 +210,7 @@ class TaskEmail(CalibreTask): gen = Generator(fp, mangle_from_=False) gen.flatten(msg) - self.asyncSMTP.sendmail(self.settings["mail_from"], self.recipent, fp.getvalue()) + self.asyncSMTP.sendmail(self.settings["mail_from"], self.recipient, fp.getvalue()) self.asyncSMTP.quit() self._handleSuccess() log.debug("E-mail send successfully") @@ -226,15 +233,15 @@ class TaskEmail(CalibreTask): self._progress = x @classmethod - def _get_attachment(cls, bookpath, filename): + def _get_attachment(cls, book_path, filename): """Get file as MIMEBase message""" calibre_path = config.config_calibre_dir if config.config_use_google_drive: - df = gdriveutils.getFileFromEbooksFolder(bookpath, filename) + df = gdriveutils.getFileFromEbooksFolder(book_path, filename) if df: - datafile = os.path.join(calibre_path, bookpath, filename) - if not os.path.exists(os.path.join(calibre_path, bookpath)): - os.makedirs(os.path.join(calibre_path, bookpath)) + datafile = os.path.join(calibre_path, book_path, filename) + if not os.path.exists(os.path.join(calibre_path, book_path)): + os.makedirs(os.path.join(calibre_path, book_path)) df.GetContentFile(datafile) else: return None @@ -244,27 +251,22 @@ class TaskEmail(CalibreTask): os.remove(datafile) else: try: - file_ = open(os.path.join(calibre_path, bookpath, filename), 'rb') + file_ = open(os.path.join(calibre_path, book_path, filename), 'rb') data = file_.read() file_.close() except IOError as e: - log.debug_or_exception(e) + log.error_or_exception(e, stacklevel=3) log.error(u'The requested file could not be read. Maybe wrong permissions?') return None - # Set mimetype - content_type, encoding = mimetypes.guess_type(filename) - if content_type is None or encoding is not None: - content_type = 'application/octet-stream' - main_type, sub_type = content_type.split('/', 1) - attachment = MIMEBase(main_type, sub_type) - attachment.set_payload(data) - encoders.encode_base64(attachment) - attachment.add_header('Content-Disposition', 'attachment', filename=filename) - return attachment + return data @property def name(self): - return "E-mail" + return N_("E-mail") + + @property + def is_cancellable(self): + return False def __str__(self): return "E-mail {}, {}".format(self.name, self.subject) diff --git a/cps/tasks/thumbnail.py b/cps/tasks/thumbnail.py new file mode 100644 index 00000000..ace9cecc --- /dev/null +++ b/cps/tasks/thumbnail.py @@ -0,0 +1,514 @@ +# -*- coding: utf-8 -*- + +# This file is part of the Calibre-Web (https://github.com/janeczku/calibre-web) +# Copyright (C) 2020 monkey +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os +from urllib.request import urlopen + +from .. import constants +from cps import config, db, fs, gdriveutils, logger, ub +from cps.services.worker import CalibreTask, STAT_CANCELLED, STAT_ENDED +from datetime import datetime +from sqlalchemy import func, text, or_ +from flask_babel import lazy_gettext as N_ + +try: + from wand.image import Image + use_IM = True +except (ImportError, RuntimeError) as e: + use_IM = False + + +def get_resize_height(resolution): + return int(225 * resolution) + + +def get_resize_width(resolution, original_width, original_height): + height = get_resize_height(resolution) + percent = (height / float(original_height)) + width = int((float(original_width) * float(percent))) + return width if width % 2 == 0 else width + 1 + + +def get_best_fit(width, height, image_width, image_height): + resize_width = int(width / 2.0) + resize_height = int(height / 2.0) + aspect_ratio = image_width / image_height + + # If this image's aspect ratio is different from the first image, then resize this image + # to fill the width and height of the first image + if aspect_ratio < width / height: + resize_width = int(width / 2.0) + resize_height = image_height * int(width / 2.0) / image_width + + elif aspect_ratio > width / height: + resize_width = image_width * int(height / 2.0) / image_height + resize_height = int(height / 2.0) + + return {'width': resize_width, 'height': resize_height} + + +class TaskGenerateCoverThumbnails(CalibreTask): + def __init__(self, book_id=-1, task_message=''): + super(TaskGenerateCoverThumbnails, self).__init__(task_message) + self.log = logger.create() + self.book_id = book_id + self.app_db_session = ub.get_new_session_instance() + # self.calibre_db = db.CalibreDB(expire_on_commit=False, init=True) + self.cache = fs.FileSystem() + self.resolutions = [ + constants.COVER_THUMBNAIL_SMALL, + constants.COVER_THUMBNAIL_MEDIUM + ] + + def run(self, worker_thread): + if use_IM and self.stat != STAT_CANCELLED and self.stat != STAT_ENDED: + self.message = 'Scanning Books' + books_with_covers = self.get_books_with_covers(self.book_id) + count = len(books_with_covers) + + total_generated = 0 + for i, book in enumerate(books_with_covers): + + # Generate new thumbnails for missing covers + generated = self.create_book_cover_thumbnails(book) + + # Increment the progress + self.progress = (1.0 / count) * i + + if generated > 0: + total_generated += generated + self.message = N_(u'Generated %(count)s cover thumbnails', count=total_generated) + + # Check if job has been cancelled or ended + if self.stat == STAT_CANCELLED: + self.log.info(f'GenerateCoverThumbnails task has been cancelled.') + return + + if self.stat == STAT_ENDED: + self.log.info(f'GenerateCoverThumbnails task has been ended.') + return + + if total_generated == 0: + self.self_cleanup = True + + self._handleSuccess() + self.app_db_session.remove() + + def get_books_with_covers(self, book_id=-1): + filter_exp = (db.Books.id == book_id) if book_id != -1 else True + calibre_db = db.CalibreDB(expire_on_commit=False, init=True) + books_cover = calibre_db.session.query(db.Books).filter(db.Books.has_cover == 1).filter(filter_exp).all() + calibre_db.session.close() + return books_cover + + def get_book_cover_thumbnails(self, book_id): + return self.app_db_session \ + .query(ub.Thumbnail) \ + .filter(ub.Thumbnail.type == constants.THUMBNAIL_TYPE_COVER) \ + .filter(ub.Thumbnail.entity_id == book_id) \ + .filter(or_(ub.Thumbnail.expiration.is_(None), ub.Thumbnail.expiration > datetime.utcnow())) \ + .all() + + def create_book_cover_thumbnails(self, book): + generated = 0 + book_cover_thumbnails = self.get_book_cover_thumbnails(book.id) + + # Generate new thumbnails for missing covers + resolutions = list(map(lambda t: t.resolution, book_cover_thumbnails)) + missing_resolutions = list(set(self.resolutions).difference(resolutions)) + for resolution in missing_resolutions: + generated += 1 + self.create_book_cover_single_thumbnail(book, resolution) + + # Replace outdated or missing thumbnails + for thumbnail in book_cover_thumbnails: + if book.last_modified > thumbnail.generated_at: + generated += 1 + self.update_book_cover_thumbnail(book, thumbnail) + + elif not self.cache.get_cache_file_exists(thumbnail.filename, constants.CACHE_TYPE_THUMBNAILS): + generated += 1 + self.update_book_cover_thumbnail(book, thumbnail) + return generated + + def create_book_cover_single_thumbnail(self, book, resolution): + thumbnail = ub.Thumbnail() + thumbnail.type = constants.THUMBNAIL_TYPE_COVER + thumbnail.entity_id = book.id + thumbnail.format = 'jpeg' + thumbnail.resolution = resolution + + self.app_db_session.add(thumbnail) + try: + self.app_db_session.commit() + self.generate_book_thumbnail(book, thumbnail) + except Exception as ex: + self.log.debug('Error creating book thumbnail: ' + str(ex)) + self._handleError('Error creating book thumbnail: ' + str(ex)) + self.app_db_session.rollback() + + def update_book_cover_thumbnail(self, book, thumbnail): + thumbnail.generated_at = datetime.utcnow() + + try: + self.app_db_session.commit() + self.cache.delete_cache_file(thumbnail.filename, constants.CACHE_TYPE_THUMBNAILS) + self.generate_book_thumbnail(book, thumbnail) + except Exception as ex: + self.log.debug('Error updating book thumbnail: ' + str(ex)) + self._handleError('Error updating book thumbnail: ' + str(ex)) + self.app_db_session.rollback() + + def generate_book_thumbnail(self, book, thumbnail): + if book and thumbnail: + if config.config_use_google_drive: + if not gdriveutils.is_gdrive_ready(): + raise Exception('Google Drive is configured but not ready') + + web_content_link = gdriveutils.get_cover_via_gdrive(book.path) + if not web_content_link: + raise Exception('Google Drive cover url not found') + + stream = None + try: + stream = urlopen(web_content_link) + with Image(file=stream) as img: + height = get_resize_height(thumbnail.resolution) + if img.height > height: + width = get_resize_width(thumbnail.resolution, img.width, img.height) + img.resize(width=width, height=height, filter='lanczos') + img.format = thumbnail.format + filename = self.cache.get_cache_file_path(thumbnail.filename, + constants.CACHE_TYPE_THUMBNAILS) + img.save(filename=filename) + except Exception as ex: + # Bubble exception to calling function + self.log.debug('Error generating thumbnail file: ' + str(ex)) + raise ex + finally: + if stream is not None: + stream.close() + else: + book_cover_filepath = os.path.join(config.config_calibre_dir, book.path, 'cover.jpg') + if not os.path.isfile(book_cover_filepath): + raise Exception('Book cover file not found') + + with Image(filename=book_cover_filepath) as img: + height = get_resize_height(thumbnail.resolution) + if img.height > height: + width = get_resize_width(thumbnail.resolution, img.width, img.height) + img.resize(width=width, height=height, filter='lanczos') + img.format = thumbnail.format + filename = self.cache.get_cache_file_path(thumbnail.filename, constants.CACHE_TYPE_THUMBNAILS) + img.save(filename=filename) + + @property + def name(self): + return N_('Cover Thumbnails') + + def __str__(self): + if self.book_id > 0: + return "Add Cover Thumbnails for Book {}".format(self.book_id) + else: + return "Generate Cover Thumbnails" + + @property + def is_cancellable(self): + return True + + +class TaskGenerateSeriesThumbnails(CalibreTask): + def __init__(self, task_message=''): + super(TaskGenerateSeriesThumbnails, self).__init__(task_message) + self.log = logger.create() + self.app_db_session = ub.get_new_session_instance() + self.calibre_db = db.CalibreDB(expire_on_commit=False, init=True) + self.cache = fs.FileSystem() + self.resolutions = [ + constants.COVER_THUMBNAIL_SMALL, + constants.COVER_THUMBNAIL_MEDIUM, + ] + + def run(self, worker_thread): + if self.calibre_db.session and use_IM and self.stat != STAT_CANCELLED and self.stat != STAT_ENDED: + self.message = 'Scanning Series' + all_series = self.get_series_with_four_plus_books() + count = len(all_series) + + total_generated = 0 + for i, series in enumerate(all_series): + generated = 0 + series_thumbnails = self.get_series_thumbnails(series.id) + series_books = self.get_series_books(series.id) + + # Generate new thumbnails for missing covers + resolutions = list(map(lambda t: t.resolution, series_thumbnails)) + missing_resolutions = list(set(self.resolutions).difference(resolutions)) + for resolution in missing_resolutions: + generated += 1 + self.create_series_thumbnail(series, series_books, resolution) + + # Replace outdated or missing thumbnails + for thumbnail in series_thumbnails: + if any(book.last_modified > thumbnail.generated_at for book in series_books): + generated += 1 + self.update_series_thumbnail(series_books, thumbnail) + + elif not self.cache.get_cache_file_exists(thumbnail.filename, constants.CACHE_TYPE_THUMBNAILS): + generated += 1 + self.update_series_thumbnail(series_books, thumbnail) + + # Increment the progress + self.progress = (1.0 / count) * i + + if generated > 0: + total_generated += generated + self.message = N_('Generated {0} series thumbnails').format(total_generated) + + # Check if job has been cancelled or ended + if self.stat == STAT_CANCELLED: + self.log.info(f'GenerateSeriesThumbnails task has been cancelled.') + return + + if self.stat == STAT_ENDED: + self.log.info(f'GenerateSeriesThumbnails task has been ended.') + return + + if total_generated == 0: + self.self_cleanup = True + + self._handleSuccess() + self.app_db_session.remove() + + def get_series_with_four_plus_books(self): + return self.calibre_db.session \ + .query(db.Series) \ + .join(db.books_series_link) \ + .join(db.Books) \ + .filter(db.Books.has_cover == 1) \ + .group_by(text('books_series_link.series')) \ + .having(func.count('book_series_link') > 3) \ + .all() + + def get_series_books(self, series_id): + return self.calibre_db.session \ + .query(db.Books) \ + .join(db.books_series_link) \ + .join(db.Series) \ + .filter(db.Books.has_cover == 1) \ + .filter(db.Series.id == series_id) \ + .all() + + def get_series_thumbnails(self, series_id): + return self.app_db_session \ + .query(ub.Thumbnail) \ + .filter(ub.Thumbnail.type == constants.THUMBNAIL_TYPE_SERIES) \ + .filter(ub.Thumbnail.entity_id == series_id) \ + .filter(or_(ub.Thumbnail.expiration.is_(None), ub.Thumbnail.expiration > datetime.utcnow())) \ + .all() + + def create_series_thumbnail(self, series, series_books, resolution): + thumbnail = ub.Thumbnail() + thumbnail.type = constants.THUMBNAIL_TYPE_SERIES + thumbnail.entity_id = series.id + thumbnail.format = 'jpeg' + thumbnail.resolution = resolution + + self.app_db_session.add(thumbnail) + try: + self.app_db_session.commit() + self.generate_series_thumbnail(series_books, thumbnail) + except Exception as ex: + self.log.debug('Error creating book thumbnail: ' + str(ex)) + self._handleError('Error creating book thumbnail: ' + str(ex)) + self.app_db_session.rollback() + + def update_series_thumbnail(self, series_books, thumbnail): + thumbnail.generated_at = datetime.utcnow() + + try: + self.app_db_session.commit() + self.cache.delete_cache_file(thumbnail.filename, constants.CACHE_TYPE_THUMBNAILS) + self.generate_series_thumbnail(series_books, thumbnail) + except Exception as ex: + self.log.debug('Error updating book thumbnail: ' + str(ex)) + self._handleError('Error updating book thumbnail: ' + str(ex)) + self.app_db_session.rollback() + + def generate_series_thumbnail(self, series_books, thumbnail): + # Get the last four books in the series based on series_index + books = sorted(series_books, key=lambda b: float(b.series_index), reverse=True)[:4] + + top = 0 + left = 0 + width = 0 + height = 0 + with Image() as canvas: + for book in books: + if config.config_use_google_drive: + if not gdriveutils.is_gdrive_ready(): + raise Exception('Google Drive is configured but not ready') + + web_content_link = gdriveutils.get_cover_via_gdrive(book.path) + if not web_content_link: + raise Exception('Google Drive cover url not found') + + stream = None + try: + stream = urlopen(web_content_link) + with Image(file=stream) as img: + # Use the first image in this set to determine the width and height to scale the + # other images in this set + if width == 0 or height == 0: + width = get_resize_width(thumbnail.resolution, img.width, img.height) + height = get_resize_height(thumbnail.resolution) + canvas.blank(width, height) + + dimensions = get_best_fit(width, height, img.width, img.height) + + # resize and crop the image + img.resize(width=int(dimensions['width']), height=int(dimensions['height']), + filter='lanczos') + img.crop(width=int(width / 2.0), height=int(height / 2.0), gravity='center') + + # add the image to the canvas + canvas.composite(img, left, top) + + except Exception as ex: + self.log.debug('Error generating thumbnail file: ' + str(ex)) + raise ex + finally: + if stream is not None: + stream.close() + + book_cover_filepath = os.path.join(config.config_calibre_dir, book.path, 'cover.jpg') + if not os.path.isfile(book_cover_filepath): + raise Exception('Book cover file not found') + + with Image(filename=book_cover_filepath) as img: + # Use the first image in this set to determine the width and height to scale the + # other images in this set + if width == 0 or height == 0: + width = get_resize_width(thumbnail.resolution, img.width, img.height) + height = get_resize_height(thumbnail.resolution) + canvas.blank(width, height) + + dimensions = get_best_fit(width, height, img.width, img.height) + + # resize and crop the image + img.resize(width=int(dimensions['width']), height=int(dimensions['height']), filter='lanczos') + img.crop(width=int(width / 2.0), height=int(height / 2.0), gravity='center') + + # add the image to the canvas + canvas.composite(img, left, top) + + # set the coordinates for the next iteration + if left == 0 and top == 0: + left = int(width / 2.0) + elif left == int(width / 2.0) and top == 0: + left = 0 + top = int(height / 2.0) + else: + left = int(width / 2.0) + + canvas.format = thumbnail.format + filename = self.cache.get_cache_file_path(thumbnail.filename, constants.CACHE_TYPE_THUMBNAILS) + canvas.save(filename=filename) + + @property + def name(self): + return N_('Cover Thumbnails') + + def __str__(self): + return "GenerateSeriesThumbnails" + + @property + def is_cancellable(self): + return True + + +class TaskClearCoverThumbnailCache(CalibreTask): + def __init__(self, book_id, task_message=N_('Clearing cover thumbnail cache')): + super(TaskClearCoverThumbnailCache, self).__init__(task_message) + self.log = logger.create() + self.book_id = book_id + self.app_db_session = ub.get_new_session_instance() + self.cache = fs.FileSystem() + + def run(self, worker_thread): + if self.app_db_session: + if self.book_id == 0: # delete superfluous thumbnails + calibre_db = db.CalibreDB(expire_on_commit=False, init=True) + thumbnails = (calibre_db.session.query(ub.Thumbnail) + .join(db.Books, ub.Thumbnail.entity_id == db.Books.id, isouter=True) + .filter(db.Books.id == None) + .all()) + calibre_db.session.close() + elif self.book_id > 0: # make sure single book is selected + thumbnails = self.get_thumbnails_for_book(self.book_id) + if self.book_id < 0: + self.delete_all_thumbnails() + else: + for thumbnail in thumbnails: + self.delete_thumbnail(thumbnail) + self._handleSuccess() + self.app_db_session.remove() + + def get_thumbnails_for_book(self, book_id): + return self.app_db_session \ + .query(ub.Thumbnail) \ + .filter(ub.Thumbnail.type == constants.THUMBNAIL_TYPE_COVER) \ + .filter(ub.Thumbnail.entity_id == book_id) \ + .all() + + def delete_thumbnail(self, thumbnail): + try: + self.cache.delete_cache_file(thumbnail.filename, constants.CACHE_TYPE_THUMBNAILS) + self.app_db_session \ + .query(ub.Thumbnail) \ + .filter(ub.Thumbnail.type == constants.THUMBNAIL_TYPE_COVER) \ + .filter(ub.Thumbnail.entity_id == thumbnail.entity_id) \ + .delete() + self.app_db_session.commit() + except Exception as ex: + self.log.debug('Error deleting book thumbnail: ' + str(ex)) + self._handleError('Error deleting book thumbnail: ' + str(ex)) + + def delete_all_thumbnails(self): + try: + self.app_db_session.query(ub.Thumbnail).filter(ub.Thumbnail.type == constants.THUMBNAIL_TYPE_COVER).delete() + self.app_db_session.commit() + self.cache.delete_cache_dir(constants.CACHE_TYPE_THUMBNAILS) + except Exception as ex: + self.log.debug('Error deleting thumbnail directory: ' + str(ex)) + self._handleError('Error deleting thumbnail directory: ' + str(ex)) + + @property + def name(self): + return N_('Cover Thumbnails') + + # needed for logging + def __str__(self): + if self.book_id > 0: + return "Replace/Delete Cover Thumbnails for book " + str(self.book_id) + else: + return "Delete Thumbnail cache directory" + + @property + def is_cancellable(self): + return False diff --git a/cps/tasks/upload.py b/cps/tasks/upload.py index 2a667c28..bc8ba1e0 100644 --- a/cps/tasks/upload.py +++ b/cps/tasks/upload.py @@ -17,21 +17,29 @@ # along with this program. If not, see . from datetime import datetime + +from flask_babel import lazy_gettext as N_ + from cps.services.worker import CalibreTask, STAT_FINISH_SUCCESS class TaskUpload(CalibreTask): - def __init__(self, taskMessage): - super(TaskUpload, self).__init__(taskMessage) + def __init__(self, task_message, book_title): + super(TaskUpload, self).__init__(task_message) self.start_time = self.end_time = datetime.now() self.stat = STAT_FINISH_SUCCESS self.progress = 1 + self.book_title = book_title def run(self, worker_thread): """Upload task doesn't have anything to do, it's simply a way to add information to the task list""" @property def name(self): - return "Upload" + return N_("Upload") def __str__(self): - return "Upload {}".format(self.message) + return "Upload {}".format(self.book_title) + + @property + def is_cancellable(self): + return False diff --git a/cps/tasks_status.py b/cps/tasks_status.py new file mode 100644 index 00000000..e5f91975 --- /dev/null +++ b/cps/tasks_status.py @@ -0,0 +1,106 @@ +# This file is part of the Calibre-Web (https://github.com/janeczku/calibre-web) +# Copyright (C) 2022 OzzieIsaacs +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from markupsafe import escape + +from flask import Blueprint, jsonify +from flask_login import login_required, current_user +from flask_babel import gettext as _ +from flask_babel import format_datetime +from babel.units import format_unit + +from . import logger +from .render_template import render_title_template +from .services.worker import WorkerThread, STAT_WAITING, STAT_FAIL, STAT_STARTED, STAT_FINISH_SUCCESS, STAT_ENDED, \ + STAT_CANCELLED + +tasks = Blueprint('tasks', __name__) + +log = logger.create() + + +@tasks.route("/ajax/emailstat") +@login_required +def get_email_status_json(): + tasks = WorkerThread.get_instance().tasks + return jsonify(render_task_status(tasks)) + + +@tasks.route("/tasks") +@login_required +def get_tasks_status(): + # if current user admin, show all email, otherwise only own emails + tasks = WorkerThread.get_instance().tasks + answer = render_task_status(tasks) + return render_title_template('tasks.html', entries=answer, title=_(u"Tasks"), page="tasks") + + +# helper function to apply localize status information in tasklist entries +def render_task_status(tasklist): + rendered_tasklist = list() + for __, user, __, task, __ in tasklist: + if user == current_user.name or current_user.role_admin(): + ret = {} + if task.start_time: + ret['starttime'] = format_datetime(task.start_time, format='short') + ret['runtime'] = format_runtime(task.runtime) + + # localize the task status + if isinstance(task.stat, int): + if task.stat == STAT_WAITING: + ret['status'] = _(u'Waiting') + elif task.stat == STAT_FAIL: + ret['status'] = _(u'Failed') + elif task.stat == STAT_STARTED: + ret['status'] = _(u'Started') + elif task.stat == STAT_FINISH_SUCCESS: + ret['status'] = _(u'Finished') + elif task.stat == STAT_ENDED: + ret['status'] = _(u'Ended') + elif task.stat == STAT_CANCELLED: + ret['status'] = _(u'Cancelled') + else: + ret['status'] = _(u'Unknown Status') + + ret['taskMessage'] = "{}: {}".format(task.name, task.message) if task.message else task.name + ret['progress'] = "{} %".format(int(task.progress * 100)) + ret['user'] = escape(user) # prevent xss + + # Hidden fields + ret['task_id'] = task.id + ret['stat'] = task.stat + ret['is_cancellable'] = task.is_cancellable + + rendered_tasklist.append(ret) + + return rendered_tasklist + + +# helper function for displaying the runtime of tasks +def format_runtime(runtime): + ret_val = "" + if runtime.days: + ret_val = format_unit(runtime.days, 'duration-day', length="long") + ', ' + minutes, seconds = divmod(runtime.seconds, 60) + hours, minutes = divmod(minutes, 60) + # ToDo: locale.number_symbols._data['timeSeparator'] -> localize time separator ? + if hours: + ret_val += '{:d}:{:02d}:{:02d}s'.format(hours, minutes, seconds) + elif minutes: + ret_val += '{:2d}:{:02d}s'.format(minutes, seconds) + else: + ret_val += '{:2d}s'.format(seconds) + return ret_val diff --git a/cps/templates/admin.html b/cps/templates/admin.html index 9728cb6d..f1c3749a 100644 --- a/cps/templates/admin.html +++ b/cps/templates/admin.html @@ -47,7 +47,9 @@ {% endfor %} {% endif %} - {{_('Edit Users')}} + {% if not simple %} + {{_('Edit Users')}} + {% endif %} {{_('Add New User')}} {% if (config.config_login_type == 1) %}
      {{_('Import LDAP Users')}}
      @@ -159,21 +161,56 @@ {{_('Edit UI Configuration')}}
      +{% if feature_support['scheduler'] %} +
      +
      +

      {{_('Scheduled Tasks')}}

      +
      +
      +
      {{_('Time at which tasks start to run')}}
      +
      {{schedule_time}}
      +
      +
      +
      {{_('Maximum tasks duration')}}
      +
      {{schedule_duration}}
      +
      +
      +
      {{_('Generate book cover thumbnails')}}
      +
      {{ display_bool_setting(config.schedule_generate_book_covers) }}
      +
      + +
      +
      {{_('Reconnect to Calibre Library')}}
      +
      {{ display_bool_setting(config.schedule_reconnect) }}
      +
      -
      -

      {{_('Administration')}}

      - {{_('Download Debug Package')}} - {{_('View Logs')}} +
      + {{_('Edit Scheduled Tasks Settings')}} + {% if config.schedule_generate_book_covers %} + {{_('Refresh Thumbnail Cover Cache')}} + {% endif %}
      -
      -
      {{_('Reconnect Calibre Database')}}
      -
      {{_('Restart')}}
      -
      {{_('Shutdown')}}
      +
      +{% endif %} +
      +

      {{_('Administration')}}

      + {{_('Download Debug Package')}} + {{_('View Logs')}} +
      +
      +
      {{_('Reconnect Calibre Database')}}
      +
      +
      +
      {{_('Restart')}}
      +
      {{_('Shutdown')}}
      -

      {{_('Update')}}

      +

      {{_('Version Information')}}

      @@ -188,9 +225,8 @@
      - - {% if feature_support['updater'] %} + {% if feature_support['updater'] %}
      {{_('Check for Update')}}
      @@ -251,3 +287,6 @@
      {% endblock %} +{% block modal %} +{{ change_confirm_modal() }} +{% endblock %} diff --git a/cps/templates/author.html b/cps/templates/author.html index f2b71eab..b991e959 100644 --- a/cps/templates/author.html +++ b/cps/templates/author.html @@ -5,11 +5,11 @@ {% if author is not none %}
      {%if author.image_url is not none %} - {{author.name|safe}} + {{author.name}} {% endif %} {%if author.about is not none %} -

      {{author.about|safe}}

      +

      {{author.about}}

      {% endif %} - {{_("via")}} Goodreads @@ -31,28 +31,27 @@
      - {% if entries[0] %} {% for entry in entries %}
      - -

      {{entry.title|shortentitle}}

      +
      +

      {{entry.Books.title|shortentitle}}

      - {% for author in entry.authors %} + {% for author in entry.Books.authors %} {% if loop.index > g.config_authors_max and g.config_authors_max != 0 %} {% if not loop.first %} & {% endif %} - {{author.name.replace('|',',')|shortentitle(30)}} + {{author.name.replace('|',',')|shortentitle(30)}} {% if loop.last %} (...) {% endif %} @@ -60,26 +59,26 @@ {% if not loop.first %} & {% endif %} - {{author.name.replace('|',',')|shortentitle(30)}} + {{author.name.replace('|',',')|shortentitle(30)}} {% endif %} {% endfor %} - {% for format in entry.data %} + {% for format in entry.Books.data %} {% if format.format|lower in g.constants.EXTENSIONS_AUDIO %} {% endif %} {% endfor %}

      - {% if entry.series.__len__() > 0 %} + {% if entry.Books.series.__len__() > 0 %}

      - - {{entry.series[0].name}} + + {{entry.Books.series[0].name}} - ({{entry.series_index|formatseriesindex}}) + ({{entry.Books.series_index|formatseriesindex}})

      {% endif %} - {% if entry.ratings.__len__() > 0 %} + {% if entry.Books.ratings.__len__() > 0 %}
      - {% for number in range((entry.ratings[0].rating/2)|int(2)) %} + {% for number in range((entry.Books.ratings[0].rating/2)|int(2)) %} {% if loop.last and loop.index < 5 %} {% for numer in range(5 - loop.index) %} @@ -92,13 +91,12 @@
      {% endfor %} - {% endif %}
      {% if other_books and author is not none %}
      -

      {{_("More by")}} {{ author.name.replace('|',',')|safe }}

      +

      {{_("More by")}} {{ author.name.replace('|',',') }}

      {% for entry in other_books %}
      @@ -123,7 +121,7 @@

      {% if entry.series.__len__() > 0 %}

      - + {{entry.series[0].name}} ({{entry.series_index|formatseriesindex}}) diff --git a/cps/templates/book_edit.html b/cps/templates/book_edit.html index cac3219c..3ce1dbfa 100644 --- a/cps/templates/book_edit.html +++ b/cps/templates/book_edit.html @@ -3,7 +3,8 @@ {% if book %}

      - {{ book.title }} + +
      {% if g.user.role_delete_books() %}
      @@ -22,7 +23,7 @@ {% if source_formats|length > 0 and conversion_formats|length > 0 %}

      {{_('Convert book format:')}}

      - +
      @@ -48,7 +49,7 @@ {% endif %}
      - +
      @@ -226,7 +227,7 @@
      @@ -265,17 +266,17 @@ >

      - <%= title %> + <%= title %>

      -

      {{_('Author')}}:<%= authors.join(" & ") %>

      +

      {{_('Author')}}:<%= authors.join(" & ") %>

      <% if (publisher) { %> -

      {{_('Publisher')}}:<%= publisher %>

      +

      {{_('Publisher')}}:<%= publisher %>

      <% } %> <% if (description) { %> -

      {{_('Description')}}: <%= description %>

      +

      {{_('Description')}}: <%= description %>

      <% } %>

      {{_('Source')}}: - <%= source.description %> + <%= source.description %>

      diff --git a/cps/templates/book_exists_flash.html b/cps/templates/book_exists_flash.html index b0855120..b55192ce 100644 --- a/cps/templates/book_exists_flash.html +++ b/cps/templates/book_exists_flash.html @@ -1,3 +1,3 @@ - + {{entry.title|shortentitle}} - \ No newline at end of file + diff --git a/cps/templates/book_table.html b/cps/templates/book_table.html index c842811b..9ba173bb 100644 --- a/cps/templates/book_table.html +++ b/cps/templates/book_table.html @@ -6,7 +6,7 @@ data-escape="true" {% if g.user.role_edit() %} data-editable-type="text" - data-editable-url="{{ url_for('editbook.edit_list_book', param=parameter)}}" + data-editable-url="{{ url_for('edit-book.edit_list_book', param=parameter)}}" data-editable-title="{{ edit_text }}" data-edit="true" {% if validate %}data-edit-validate="{{ _('This Field is Required') }}" {% endif %} @@ -66,30 +66,30 @@ {{ text_table_row('authors', _('Enter Authors'),_('Authors'), true, true) }} {{ text_table_row('tags', _('Enter Categories'),_('Categories'), false, true) }} {{ text_table_row('series', _('Enter Series'),_('Series'), false, true) }} - {{_('Series Index')}} + {{_('Series Index')}} {{ text_table_row('languages', _('Enter Languages'),_('Languages'), false, true) }} {{ text_table_row('publishers', _('Enter Publishers'),_('Publishers'), false, true) }} - {{_('Comments')}} + {{_('Comments')}} {% if g.user.check_visibility(32768) %} {{ book_checkbox_row('is_archived', _('Archiv Status'), false)}} {% endif %} {{ book_checkbox_row('read_status', _('Read Status'), false)}} {% for c in cc %} {% if c.datatype == "int" %} - {{c.name}} + {{c.name}} {% elif c.datatype == "rating" %} - {{c.name}} + {{c.name}} {% elif c.datatype == "float" %} - {{c.name}} + {{c.name}} {% elif c.datatype == "enumeration" %} - {{c.name}} + {{c.name}} {% elif c.datatype in ["datetime"] %} {% elif c.datatype == "text" %} {{ text_table_row('custom_column_' + c.id|string, _('Enter ') + c.name, c.name, false, false) }} {% elif c.datatype == "comments" %} - {{c.name}} + {{c.name}} {% elif c.datatype == "bool" %} {{ book_checkbox_row('custom_column_' + c.id|string, c.name, false)}} {% else %} @@ -123,8 +123,8 @@
      diff --git a/cps/templates/config_view_edit.html b/cps/templates/config_view_edit.html index 1dc02a5e..e4fea44d 100644 --- a/cps/templates/config_view_edit.html +++ b/cps/templates/config_view_edit.html @@ -162,8 +162,10 @@
      + {% if not simple %} {{_('Add Allowed/Denied Tags')}} {{_('Add Allowed/Denied custom column values')}} + {% endif %}
      diff --git a/cps/templates/detail.html b/cps/templates/detail.html index 919afaff..3af205fb 100644 --- a/cps/templates/detail.html +++ b/cps/templates/detail.html @@ -4,7 +4,8 @@
      - {{ entry.title }} + +
      @@ -38,7 +39,7 @@ {% endif %} {% if g.user.kindle_mail and entry.kindle_list %} {% if entry.kindle_list.__len__() == 1 %} - {{entry.kindle_list[0]['text']}} +
      {{entry.kindle_list[0]['text']}}
      {% else %}
      @@ -70,9 +71,9 @@ {% endif %}
      {% endif %} - {% if entry.audioentries|length > 0 and g.user.role_viewer() %} + {% if entry.audio_entries|length > 0 and g.user.role_viewer() %}
      - {% if entry.audioentries|length > 1 %} + {% if entry.audio_entries|length > 1 %}
      {% endif %} @@ -99,7 +100,7 @@

      {{entry.title}}

      - {% for author in entry.authors %} + {% for author in entry.ordered_authors %} {{author.name.replace('|',',')}} {% if not loop.last %} & @@ -138,7 +139,7 @@

      {% for identifier in entry.identifiers %} - {{identifier.formatType()}} + {{identifier.format_type()}} {%endfor%}

      @@ -260,7 +261,7 @@ {% for shelf in g.shelves_access %} {% if not shelf.id in books_shelfs and ( not shelf.is_public or g.user.role_edit_shelfs() ) %}
    • - @@ -275,7 +276,7 @@ {% if books_shelfs %} {% for shelf in g.shelves_access %} {% if shelf.id in books_shelfs %} - @@ -295,7 +296,7 @@ {% if g.user.role_edit() %} {% endif %} @@ -309,13 +310,13 @@ {% block js %} diff --git a/cps/templates/discover.html b/cps/templates/discover.html deleted file mode 100644 index 74448b98..00000000 --- a/cps/templates/discover.html +++ /dev/null @@ -1,65 +0,0 @@ -{% extends "layout.html" %} -{% block body %} -
      -

      {{title}}

      -
      - {% for entry in entries %} -
      -
      - {% if entry.has_cover is defined %} - - - {{ entry.title }} - {% if entry.id in read_book_ids %}{% endif %} - - - {% endif %} -
      -
      - -

      {{entry.title|shortentitle}}

      -
      -

      - {% for author in entry.authors %} - {% if loop.index > g.config_authors_max and g.config_authors_max != 0 %} - {% if not loop.first %} - & - {% endif %} - {{author.name.replace('|',',')|shortentitle(30)}} - {% if loop.last %} - (...) - {% endif %} - {% else %} - {% if not loop.first %} - & - {% endif %} - {{author.name.replace('|',',')|shortentitle(30)}} - {% endif %} - {% endfor %} -

      - {% if entry.series.__len__() > 0 %} -

      - - {{entry.series[0].name}} - - ({{entry.series_index|formatseriesindex}}) -

      - {% endif %} - {% if entry.ratings.__len__() > 0 %} -
      - {% for number in range((entry.ratings[0].rating/2)|int(2)) %} - - {% if loop.last and loop.index < 5 %} - {% for numer in range(5 - loop.index) %} - - {% endfor %} - {% endif %} - {% endfor %} -
      - {% endif %} -
      -
      - {% endfor %} -
      -
      -{% endblock %} diff --git a/cps/templates/email_edit.html b/cps/templates/email_edit.html index 9f23f78b..2a844209 100644 --- a/cps/templates/email_edit.html +++ b/cps/templates/email_edit.html @@ -69,7 +69,7 @@ {% endif %} {{_('Back')}} - {% if g.allow_registration %} + {% if g.allow_registration and not simple%}

      {{_('Allowed Domains (Whitelist)')}}

      diff --git a/cps/templates/feed.xml b/cps/templates/feed.xml index 9073142e..940fb0da 100644 --- a/cps/templates/feed.xml +++ b/cps/templates/feed.xml @@ -40,35 +40,35 @@ {% if entries and entries[0] %} {% for entry in entries %} - {{entry.title}} - urn:uuid:{{entry.uuid}} - {{entry.atom_timestamp}} - {% if entry.authors.__len__() > 0 %} + {{entry.Books.title}} + urn:uuid:{{entry.Books.uuid}} + {{entry.Books.atom_timestamp}} + {% if entry.Books.authors.__len__() > 0 %} - {{entry.authors[0].name}} + {{entry.Books.authors[0].name}} {% endif %} - {% if entry.publishers.__len__() > 0 %} + {% if entry.Books.publishers.__len__() > 0 %} - {{entry.publishers[0].name}} + {{entry.Books.publishers[0].name}} {% endif %} - {% for lang in entry.languages %} + {% for lang in entry.Books.languages %} {{lang.lang_code}} {% endfor %} - {% for tag in entry.tags %} + {% for tag in entry.Books.tags %} {% endfor %} - {% if entry.comments[0] %}{{entry.comments[0].text|striptags}}{% endif %} - {% if entry.has_cover %} - - + {% if entry.Books.comments[0] %}{{entry.Books.comments[0].text|striptags}}{% endif %} + {% if entry.Books.has_cover %} + + {% endif %} - {% for format in entry.data %} - + {% for format in entry.Books.data %} + {% endfor %} {% endfor %} diff --git a/cps/templates/fragment.html b/cps/templates/fragment.html index 1421ea6a..f2e94fb2 100644 --- a/cps/templates/fragment.html +++ b/cps/templates/fragment.html @@ -1,3 +1,4 @@ +{% import 'image.html' as image %}
      {% block body %}{% endblock %}
      diff --git a/cps/templates/generate_kobo_auth_url.html b/cps/templates/generate_kobo_auth_url.html index fb62424c..b8b74bda 100644 --- a/cps/templates/generate_kobo_auth_url.html +++ b/cps/templates/generate_kobo_auth_url.html @@ -1,12 +1,15 @@ {% extends "fragment.html" %} {% block body %}
      -

      - {{_('Open the .kobo/Kobo eReader.conf file in a text editor and add (or edit):')}} +

      + {% if not warning %} + {{_('Open the .kobo/Kobo eReader.conf file in a text editor and add (or edit):')}} +

      + api_endpoint={{url_for("kobo.TopLevelEndpoint", auth_token=auth_token, _external=True)}} + {% else %} + {{warning}} +

      {{_('Kobo Token:')}} {{ auth_token }} + {% endif %}

      -

      - {% if not warning %}api_endpoint={{kobo_auth_url}}{% else %}{{warning}}{% endif %} -

      -

      {% endblock %} diff --git a/cps/templates/grid.html b/cps/templates/grid.html index b9d40961..638b7245 100644 --- a/cps/templates/grid.html +++ b/cps/templates/grid.html @@ -1,3 +1,4 @@ +{% import 'image.html' as image %} {% extends "layout.html" %} {% block body %}

      {{_(title)}}

      @@ -27,7 +28,7 @@
      - {{ entry[0].series[0].name }} + {{ image.series(entry[0].series[0], alt=entry[0].series[0].name|shortentitle) }} {{entry.count}} diff --git a/cps/templates/image.html b/cps/templates/image.html new file mode 100644 index 00000000..0bdba9a5 --- /dev/null +++ b/cps/templates/image.html @@ -0,0 +1,20 @@ +{% macro book_cover(book, alt=None) -%} + {%- set image_title = book.title if book.title else book.name -%} + {%- set image_alt = alt if alt else image_title -%} + {% set srcset = book|get_cover_srcset %} + {{ image_alt }} +{%- endmacro %} + +{% macro series(series, alt=None) -%} + {%- set image_alt = alt if alt else image_title -%} + {% set srcset = series|get_series_srcset %} + {{ book_title }} +{%- endmacro %} diff --git a/cps/templates/index.html b/cps/templates/index.html index 162adc7d..0bb3da72 100644 --- a/cps/templates/index.html +++ b/cps/templates/index.html @@ -1,30 +1,31 @@ +{% import 'image.html' as image %} {% extends "layout.html" %} {% block body %} -{% if g.user.show_detail_random() %} +{% if g.user.show_detail_random() and page != "discover" %}

      {{_('Discover (Random Books)')}}

      {% for entry in random %}
      - -

      {{entry.title|shortentitle}}

      +
      +

      {{entry.Books.title|shortentitle}}

      - {% for author in entry.authors %} + {% for author in entry.Books.authors %} {% if loop.index > g.config_authors_max and g.config_authors_max != 0 %} {% if not loop.first %} & {% endif %} - {{author.name.replace('|',',')|shortentitle(30)}} + {{author.name.replace('|',',')|shortentitle(30)}} {% if loop.last %} (...) {% endif %} @@ -32,21 +33,21 @@ {% if not loop.first %} & {% endif %} - {{author.name.replace('|',',')|shortentitle(30)}} + {{author.name.replace('|',',')|shortentitle(30)}} {% endif %} {% endfor %}

      - {% if entry.series.__len__() > 0 %} + {% if entry.Books.series.__len__() > 0 %}

      - - {{entry.series[0].name}} + + {{entry.Books.series[0].name}} - ({{entry.series_index|formatseriesindex}}) + ({{entry.Books.series_index|formatseriesindex}})

      {% endif %} - {% if entry.ratings.__len__() > 0 %} + {% if entry.Books.ratings.__len__() > 0 %}
      - {% for number in range((entry.ratings[0].rating/2)|int(2)) %} + {% for number in range((entry.Books.ratings[0].rating/2)|int(2)) %} {% if loop.last and loop.index < 5 %} {% for numer in range(5 - loop.index) %} @@ -64,6 +65,7 @@ {% endif %}

      {{title}}

      + {% if page != 'discover' %} - + {% endif %}
      {% if entries[0] %} {% for entry in entries %}
      - -

      {{entry.title|shortentitle}}

      +
      +

      {{entry.Books.title|shortentitle}}

      - {% for author in entry.authors %} + {% for author in entry.Books.authors %} {% if loop.index > g.config_authors_max and g.config_authors_max != 0 %} {% if not loop.first %} & {% endif %} - {{author.name.replace('|',',')|shortentitle(30)}} + {{author.name.replace('|',',')|shortentitle(30)}} {% if loop.last %} (...) {% endif %} @@ -114,26 +116,30 @@ {% if not loop.first %} & {% endif %} - {{author.name.replace('|',',')|shortentitle(30)}} + {{author.name.replace('|',',')|shortentitle(30)}} {% endif %} {% endfor %} - {% for format in entry.data %} + {% for format in entry.Books.data %} {% if format.format|lower in g.constants.EXTENSIONS_AUDIO %} {% endif %} {%endfor%}

      - {% if entry.series.__len__() > 0 %} + {% if entry.Books.series.__len__() > 0 %}

      - - {{entry.series[0].name}} + {% if page != "series" %} + + {{entry.Books.series[0].name}} - ({{entry.series_index|formatseriesindex}}) + {% else %} + {{entry.Books.series[0].name}} + {% endif %} + ({{entry.Books.series_index|formatseriesindex}})

      {% endif %} - {% if entry.ratings.__len__() > 0 %} + {% if entry.Books.ratings.__len__() > 0 %}
      - {% for number in range((entry.ratings[0].rating/2)|int(2)) %} + {% for number in range((entry.Books.ratings[0].rating/2)|int(2)) %} {% if loop.last and loop.index < 5 %} {% for numer in range(5 - loop.index) %} diff --git a/cps/templates/languages.html b/cps/templates/languages.html deleted file mode 100644 index 8331cb94..00000000 --- a/cps/templates/languages.html +++ /dev/null @@ -1,35 +0,0 @@ -{% extends "layout.html" %} -{% block body %} -

      {{title}}

      - -
      -
      - {% for lang in languages %} - {% if loop.index0 == (loop.length/2)|int and loop.length > 20 %} -
      -
      - {% endif %} -
      -
      {{lang[1]}}
      - -
      - {% endfor %} -
      -
      -{% endblock %} -{% block js %} - -{% endblock %} - diff --git a/cps/templates/layout.html b/cps/templates/layout.html index 8cb6b76e..7502514a 100644 --- a/cps/templates/layout.html +++ b/cps/templates/layout.html @@ -1,4 +1,5 @@ {% from 'modal_dialogs.html' import restrict_modal, delete_book, filechooser_modal, delete_confirm_modal, change_confirm_modal %} +{% import 'image.html' as image %} @@ -40,7 +41,7 @@ {{instance}}
      {% if g.user.is_authenticated or g.allow_anonymous %} - +
      @@ -53,14 +54,14 @@
      diff --git a/cps/templates/list.html b/cps/templates/list.html index 71dbea11..a9089823 100644 --- a/cps/templates/list.html +++ b/cps/templates/list.html @@ -14,7 +14,7 @@ {% endif %}
      {% for char in charlist%} -
      {{char.char}}
      +
      {{char[0]}}
      {% endfor %}
      @@ -29,8 +29,8 @@
      {% endif %} -
      -
      {{entry.count}}
      +
      +
      {{entry[1]}}
      {% if entry.name %}
      diff --git a/cps/templates/listenmp3.html b/cps/templates/listenmp3.html index 279ec28f..2067bf38 100644 --- a/cps/templates/listenmp3.html +++ b/cps/templates/listenmp3.html @@ -105,7 +105,7 @@ @@ -134,7 +134,7 @@ window.calibre = { filePath: "{{ url_for('static', filename='js/libs/') }}", cssPath: "{{ url_for('static', filename='css/') }}", bookUrl: "{{ url_for('static', filename=mp3file) }}/", - bookmarkUrl: "{{ url_for('web.bookmark', book_id=mp3file, book_format=audioformat.upper()) }}", + bookmarkUrl: "{{ url_for('web.set_bookmark', book_id=mp3file, book_format=audioformat.upper()) }}", bookmark: "{{ bookmark.bookmark_key if bookmark != None }}", useBookmarks: "{{ g.user.is_authenticated | tojson }}" }; diff --git a/cps/templates/read.html b/cps/templates/read.html index 1766eb1b..f69d662f 100644 --- a/cps/templates/read.html +++ b/cps/templates/read.html @@ -86,7 +86,7 @@ window.calibre = { filePath: "{{ url_for('static', filename='js/libs/') }}", cssPath: "{{ url_for('static', filename='css/') }}", - bookmarkUrl: "{{ url_for('web.bookmark', book_id=bookid, book_format='EPUB') }}", + bookmarkUrl: "{{ url_for('web.set_bookmark', book_id=bookid, book_format='EPUB') }}", bookUrl: "{{ url_for('web.serve_book', book_id=bookid, book_format='epub', anyname='file.epub') }}", bookmark: "{{ bookmark.bookmark_key if bookmark != None }}", useBookmarks: "{{ g.user.is_authenticated | tojson }}" diff --git a/cps/templates/schedule_edit.html b/cps/templates/schedule_edit.html new file mode 100644 index 00000000..83dd6c68 --- /dev/null +++ b/cps/templates/schedule_edit.html @@ -0,0 +1,44 @@ +{% extends "layout.html" %} +{% block header %} + + +{% endblock %} +{% block body %} +
      +

      {{title}}

      + + +
      + + +
      +
      + + +
      +
      + + +
      + +
      + + +
      + + + {{_('Cancel')}} + +
      +{% endblock %} diff --git a/cps/templates/search.html b/cps/templates/search.html index 7640192f..78e30494 100644 --- a/cps/templates/search.html +++ b/cps/templates/search.html @@ -1,3 +1,4 @@ +{% import 'image.html' as image %} {% extends "layout.html" %} {% block body %}
      @@ -9,6 +10,7 @@ {% if g.user.is_authenticated %} {% if g.user.shelf.all() or g.shelves_access %}