Merge branch 'Develop'
This commit is contained in:
commit
2d49589e4b
5
cps.py
5
cps.py
|
@ -16,6 +16,11 @@
|
|||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
try:
|
||||
from gevent import monkey
|
||||
monkey.patch_all()
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
import sys
|
||||
import os
|
||||
|
|
|
@ -186,4 +186,9 @@ def get_timezone():
|
|||
|
||||
from .updater import Updater
|
||||
updater_thread = Updater()
|
||||
|
||||
# Perform dry run of updater and exit afterwards
|
||||
if cli.dry_run:
|
||||
updater_thread.dry_run()
|
||||
sys.exit(0)
|
||||
updater_thread.start()
|
||||
|
|
67
cps/admin.py
67
cps/admin.py
|
@ -39,7 +39,7 @@ from sqlalchemy.orm.attributes import flag_modified
|
|||
from sqlalchemy.exc import IntegrityError, OperationalError, InvalidRequestError
|
||||
from sqlalchemy.sql.expression import func, or_, text
|
||||
|
||||
from . import constants, logger, helper, services
|
||||
from . import constants, logger, helper, services, cli
|
||||
from . import db, calibre_db, ub, web_server, get_locale, config, updater_thread, babel, gdriveutils, kobo_sync_status
|
||||
from .helper import check_valid_domain, send_test_mail, reset_password, generate_password_hash, check_email, \
|
||||
valid_email, check_username
|
||||
|
@ -47,10 +47,7 @@ from .gdriveutils import is_gdrive_ready, gdrive_support
|
|||
from .render_template import render_title_template, get_sidebar_config
|
||||
from . import debug_info, _BABEL_TRANSLATIONS
|
||||
|
||||
try:
|
||||
from functools import wraps
|
||||
except ImportError:
|
||||
pass # We're not using Python 3
|
||||
from functools import wraps
|
||||
|
||||
log = logger.create()
|
||||
|
||||
|
@ -158,6 +155,18 @@ def shutdown():
|
|||
return json.dumps(showtext), 400
|
||||
|
||||
|
||||
# method is available without login and not protected by CSRF to make it easy reachable, is per default switched of
|
||||
# needed for docker applications, as changes on metadata.db from host are not visible to application
|
||||
@admi.route("/reconnect", methods=['GET'])
|
||||
def reconnect():
|
||||
if cli.args.r:
|
||||
calibre_db.reconnect_db(config, ub.app_DB_path)
|
||||
return json.dumps({})
|
||||
else:
|
||||
log.debug("'/reconnect' was accessed but is not enabled")
|
||||
abort(404)
|
||||
|
||||
|
||||
@admi.route("/admin/view")
|
||||
@login_required
|
||||
@admin_required
|
||||
|
@ -187,6 +196,7 @@ def admin():
|
|||
feature_support=feature_support, kobo_support=kobo_support,
|
||||
title=_(u"Admin page"), page="admin")
|
||||
|
||||
|
||||
@admi.route("/admin/dbconfig", methods=["GET", "POST"])
|
||||
@login_required
|
||||
@admin_required
|
||||
|
@ -227,6 +237,7 @@ def ajax_db_config():
|
|||
def calibreweb_alive():
|
||||
return "", 200
|
||||
|
||||
|
||||
@admi.route("/admin/viewconfig")
|
||||
@login_required
|
||||
@admin_required
|
||||
|
@ -243,6 +254,7 @@ def view_configuration():
|
|||
translations=translations,
|
||||
title=_(u"UI Configuration"), page="uiconfig")
|
||||
|
||||
|
||||
@admi.route("/admin/usertable")
|
||||
@login_required
|
||||
@admin_required
|
||||
|
@ -304,8 +316,8 @@ def list_users():
|
|||
|
||||
if search:
|
||||
all_user = all_user.filter(or_(func.lower(ub.User.name).ilike("%" + search + "%"),
|
||||
func.lower(ub.User.kindle_mail).ilike("%" + search + "%"),
|
||||
func.lower(ub.User.email).ilike("%" + search + "%")))
|
||||
func.lower(ub.User.kindle_mail).ilike("%" + search + "%"),
|
||||
func.lower(ub.User.email).ilike("%" + search + "%")))
|
||||
if state:
|
||||
users = calibre_db.get_checkbox_sorted(all_user.all(), state, off, limit, request.args.get("order", "").lower())
|
||||
else:
|
||||
|
@ -325,12 +337,14 @@ def list_users():
|
|||
response.headers["Content-Type"] = "application/json; charset=utf-8"
|
||||
return response
|
||||
|
||||
|
||||
@admi.route("/ajax/deleteuser", methods=['POST'])
|
||||
@login_required
|
||||
@admin_required
|
||||
def delete_user():
|
||||
user_ids = request.form.to_dict(flat=False)
|
||||
users = None
|
||||
message = ""
|
||||
if "userid[]" in user_ids:
|
||||
users = ub.session.query(ub.User).filter(ub.User.id.in_(user_ids['userid[]'])).all()
|
||||
elif "userid" in user_ids:
|
||||
|
@ -358,6 +372,7 @@ def delete_user():
|
|||
success.extend(errors)
|
||||
return Response(json.dumps(success), mimetype='application/json')
|
||||
|
||||
|
||||
@admi.route("/ajax/getlocale")
|
||||
@login_required
|
||||
@admin_required
|
||||
|
@ -417,9 +432,9 @@ def edit_list_user(param):
|
|||
if user.name == "Guest":
|
||||
raise Exception(_("Guest Name can't be changed"))
|
||||
user.name = check_username(vals['value'])
|
||||
elif param =='email':
|
||||
elif param == 'email':
|
||||
user.email = check_email(vals['value'])
|
||||
elif param =='kobo_only_shelves_sync':
|
||||
elif param == 'kobo_only_shelves_sync':
|
||||
user.kobo_only_shelves_sync = int(vals['value'] == 'true')
|
||||
elif param == 'kindle_mail':
|
||||
user.kindle_mail = valid_email(vals['value']) if vals['value'] else ""
|
||||
|
@ -439,8 +454,8 @@ def edit_list_user(param):
|
|||
ub.User.id != user.id).count():
|
||||
return Response(
|
||||
json.dumps([{'type': "danger",
|
||||
'message':_(u"No admin user remaining, can't remove admin role",
|
||||
nick=user.name)}]), mimetype='application/json')
|
||||
'message': _(u"No admin user remaining, can't remove admin role",
|
||||
nick=user.name)}]), mimetype='application/json')
|
||||
user.role &= ~value
|
||||
else:
|
||||
raise Exception(_("Value has to be true or false"))
|
||||
|
@ -503,6 +518,7 @@ def update_table_settings():
|
|||
return "Invalid request", 400
|
||||
return ""
|
||||
|
||||
|
||||
def check_valid_read_column(column):
|
||||
if column != "0":
|
||||
if not calibre_db.session.query(db.Custom_Columns).filter(db.Custom_Columns.id == column) \
|
||||
|
@ -510,6 +526,7 @@ def check_valid_read_column(column):
|
|||
return False
|
||||
return True
|
||||
|
||||
|
||||
def check_valid_restricted_column(column):
|
||||
if column != "0":
|
||||
if not calibre_db.session.query(db.Custom_Columns).filter(db.Custom_Columns.id == column) \
|
||||
|
@ -548,7 +565,6 @@ def update_view_configuration():
|
|||
_config_string(to_save, "config_default_language")
|
||||
_config_string(to_save, "config_default_locale")
|
||||
|
||||
|
||||
config.config_default_role = constants.selected_roles(to_save)
|
||||
config.config_default_role &= ~constants.ROLE_ANONYMOUS
|
||||
|
||||
|
@ -585,13 +601,15 @@ def load_dialogtexts(element_id):
|
|||
elif element_id == "restrictions":
|
||||
texts["main"] = _('Are you sure you want to change the selected restrictions for the selected user(s)?')
|
||||
elif element_id == "sidebar_view":
|
||||
texts["main"] = _('Are you sure you want to change the selected visibility restrictions for the selected user(s)?')
|
||||
texts["main"] = _('Are you sure you want to change the selected visibility restrictions '
|
||||
'for the selected user(s)?')
|
||||
elif element_id == "kobo_only_shelves_sync":
|
||||
texts["main"] = _('Are you sure you want to change shelf sync behavior for the selected user(s)?')
|
||||
elif element_id == "db_submit":
|
||||
texts["main"] = _('Are you sure you want to change Calibre library location?')
|
||||
elif element_id == "btnfullsync":
|
||||
texts["main"] = _("Are you sure you want delete Calibre-Web's sync database to force a full sync with your Kobo Reader?")
|
||||
texts["main"] = _("Are you sure you want delete Calibre-Web's sync database "
|
||||
"to force a full sync with your Kobo Reader?")
|
||||
return json.dumps(texts)
|
||||
|
||||
|
||||
|
@ -762,6 +780,7 @@ def prepare_tags(user, action, tags_name, id_list):
|
|||
def add_user_0_restriction(res_type):
|
||||
return add_restriction(res_type, 0)
|
||||
|
||||
|
||||
@admi.route("/ajax/addrestriction/<int:res_type>/<int:user_id>", methods=['POST'])
|
||||
@login_required
|
||||
@admin_required
|
||||
|
@ -868,8 +887,8 @@ def delete_restriction(res_type, user_id):
|
|||
@admin_required
|
||||
def list_restriction(res_type, user_id):
|
||||
if res_type == 0: # Tags as template
|
||||
restrict = [{'Element': x, 'type':_('Deny'), 'id': 'd'+str(i) }
|
||||
for i,x in enumerate(config.list_denied_tags()) if x != '']
|
||||
restrict = [{'Element': x, 'type': _('Deny'), 'id': 'd'+str(i)}
|
||||
for i, x in enumerate(config.list_denied_tags()) if x != '']
|
||||
allow = [{'Element': x, 'type': _('Allow'), 'id': 'a'+str(i)}
|
||||
for i, x in enumerate(config.list_allowed_tags()) if x != '']
|
||||
json_dumps = restrict + allow
|
||||
|
@ -906,6 +925,7 @@ def list_restriction(res_type, user_id):
|
|||
response.headers["Content-Type"] = "application/json; charset=utf-8"
|
||||
return response
|
||||
|
||||
|
||||
@admi.route("/ajax/fullsync", methods=["POST"])
|
||||
@login_required
|
||||
def ajax_fullsync():
|
||||
|
@ -1167,7 +1187,7 @@ def simulatedbchange():
|
|||
|
||||
def _db_simulate_change():
|
||||
param = request.form.to_dict()
|
||||
to_save = {}
|
||||
to_save = dict()
|
||||
to_save['config_calibre_dir'] = re.sub(r'[\\/]metadata\.db$',
|
||||
'',
|
||||
param['config_calibre_dir'],
|
||||
|
@ -1225,6 +1245,7 @@ def _db_configuration_update_helper():
|
|||
config.save()
|
||||
return _db_configuration_result(None, gdrive_error)
|
||||
|
||||
|
||||
def _configuration_update_helper():
|
||||
reboot_required = False
|
||||
to_save = request.form.to_dict()
|
||||
|
@ -1314,6 +1335,7 @@ def _configuration_update_helper():
|
|||
|
||||
return _configuration_result(None, reboot_required)
|
||||
|
||||
|
||||
def _configuration_result(error_flash=None, reboot=False):
|
||||
resp = {}
|
||||
if error_flash:
|
||||
|
@ -1321,9 +1343,9 @@ def _configuration_result(error_flash=None, reboot=False):
|
|||
config.load()
|
||||
resp['result'] = [{'type': "danger", 'message': error_flash}]
|
||||
else:
|
||||
resp['result'] = [{'type': "success", 'message':_(u"Calibre-Web configuration updated")}]
|
||||
resp['result'] = [{'type': "success", 'message': _(u"Calibre-Web configuration updated")}]
|
||||
resp['reboot'] = reboot
|
||||
resp['config_upload']= config.config_upload_formats
|
||||
resp['config_upload'] = config.config_upload_formats
|
||||
return Response(json.dumps(resp), mimetype='application/json')
|
||||
|
||||
|
||||
|
@ -1405,6 +1427,7 @@ def _handle_new_user(to_save, content, languages, translations, kobo_support):
|
|||
log.error("Settings DB is not Writeable")
|
||||
flash(_("Settings DB is not Writeable"), category="error")
|
||||
|
||||
|
||||
def _delete_user(content):
|
||||
if ub.session.query(ub.User).filter(ub.User.role.op('&')(constants.ROLE_ADMIN) == constants.ROLE_ADMIN,
|
||||
ub.User.id != content.id).count():
|
||||
|
@ -1428,7 +1451,7 @@ def _delete_user(content):
|
|||
ub.session.delete(kobo_entry)
|
||||
ub.session_commit()
|
||||
log.info("User {} deleted".format(content.name))
|
||||
return(_("User '%(nick)s' deleted", nick=content.name))
|
||||
return _("User '%(nick)s' deleted", nick=content.name)
|
||||
else:
|
||||
log.warning(_("Can't delete Guest User"))
|
||||
raise Exception(_("Can't delete Guest User"))
|
||||
|
@ -1726,7 +1749,7 @@ def get_updater_status():
|
|||
if request.method == "POST":
|
||||
commit = request.form.to_dict()
|
||||
if "start" in commit and commit['start'] == 'True':
|
||||
text = {
|
||||
txt = {
|
||||
"1": _(u'Requesting update package'),
|
||||
"2": _(u'Downloading update package'),
|
||||
"3": _(u'Unzipping update package'),
|
||||
|
@ -1741,7 +1764,7 @@ def get_updater_status():
|
|||
"12": _(u'Update failed:') + u' ' + _(u'Update file could not be saved in temp dir'),
|
||||
"13": _(u'Update failed:') + u' ' + _(u'Files could not be replaced during update')
|
||||
}
|
||||
status['text'] = text
|
||||
status['text'] = txt
|
||||
updater_thread.status = 0
|
||||
updater_thread.resume()
|
||||
status['status'] = updater_thread.get_update_status()
|
||||
|
|
11
cps/cli.py
11
cps/cli.py
|
@ -40,12 +40,15 @@ parser.add_argument('-c', metavar='path',
|
|||
help='path and name to SSL certfile, e.g. /opt/test.cert, works only in combination with keyfile')
|
||||
parser.add_argument('-k', metavar='path',
|
||||
help='path and name to SSL keyfile, e.g. /opt/test.key, works only in combination with certfile')
|
||||
parser.add_argument('-v', '--version', action='version', help='Shows version number and exits Calibre-web',
|
||||
parser.add_argument('-v', '--version', action='version', help='Shows version number and exits Calibre-Web',
|
||||
version=version_info())
|
||||
parser.add_argument('-i', metavar='ip-address', help='Server IP-Address to listen')
|
||||
parser.add_argument('-s', metavar='user:pass', help='Sets specific username to new password')
|
||||
parser.add_argument('-s', metavar='user:pass', help='Sets specific username to new password and exits Calibre-Web')
|
||||
parser.add_argument('-f', action='store_true', help='Flag is depreciated and will be removed in next version')
|
||||
parser.add_argument('-l', action='store_true', help='Allow loading covers from localhost')
|
||||
parser.add_argument('-d', action='store_true', help='Dry run of updater to check file permissions in advance '
|
||||
'and exits Calibre-Web')
|
||||
parser.add_argument('-r', action='store_true', help='Enable public database reconnect route under /reconnect')
|
||||
args = parser.parse_args()
|
||||
|
||||
settingspath = args.p or os.path.join(_CONFIG_DIR, "app.db")
|
||||
|
@ -78,6 +81,9 @@ if (args.k and not args.c) or (not args.k and args.c):
|
|||
if args.k == "":
|
||||
keyfilepath = ""
|
||||
|
||||
|
||||
# dry run updater
|
||||
dry_run = args.d or None
|
||||
# load covers from localhost
|
||||
allow_localhost = args.l or None
|
||||
# handle and check ip address argument
|
||||
|
@ -106,3 +112,4 @@ if user_credentials and ":" not in user_credentials:
|
|||
|
||||
if args.f:
|
||||
print("Warning: -f flag is depreciated and will be removed in next version")
|
||||
|
||||
|
|
|
@ -21,22 +21,22 @@ import os
|
|||
from collections import namedtuple
|
||||
from sqlalchemy import __version__ as sql_version
|
||||
|
||||
sqlalchemy_version2 = ([int(x) for x in sql_version.split('.')] >= [2,0,0])
|
||||
sqlalchemy_version2 = ([int(x) for x in sql_version.split('.')] >= [2, 0, 0])
|
||||
|
||||
# if installed via pip this variable is set to true (empty file with name .HOMEDIR present)
|
||||
HOME_CONFIG = os.path.isfile(os.path.join(os.path.dirname(os.path.abspath(__file__)), '.HOMEDIR'))
|
||||
|
||||
#In executables updater is not available, so variable is set to False there
|
||||
# In executables updater is not available, so variable is set to False there
|
||||
UPDATER_AVAILABLE = True
|
||||
|
||||
# Base dir is parent of current file, necessary if called from different folder
|
||||
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)),os.pardir))
|
||||
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir))
|
||||
STATIC_DIR = os.path.join(BASE_DIR, 'cps', 'static')
|
||||
TEMPLATES_DIR = os.path.join(BASE_DIR, 'cps', 'templates')
|
||||
TRANSLATIONS_DIR = os.path.join(BASE_DIR, 'cps', 'translations')
|
||||
|
||||
if HOME_CONFIG:
|
||||
home_dir = os.path.join(os.path.expanduser("~"),".calibre-web")
|
||||
home_dir = os.path.join(os.path.expanduser("~"), ".calibre-web")
|
||||
if not os.path.exists(home_dir):
|
||||
os.makedirs(home_dir)
|
||||
CONFIG_DIR = os.environ.get('CALIBRE_DBPATH', home_dir)
|
||||
|
@ -133,11 +133,14 @@ except ValueError:
|
|||
del env_CALIBRE_PORT
|
||||
|
||||
|
||||
EXTENSIONS_AUDIO = {'mp3', 'mp4', 'ogg', 'opus', 'wav', 'flac', 'm4a', 'm4b'}
|
||||
EXTENSIONS_CONVERT_FROM = ['pdf', 'epub', 'mobi', 'azw3', 'docx', 'rtf', 'fb2', 'lit', 'lrf', 'txt', 'htmlz', 'rtf', 'odt','cbz','cbr']
|
||||
EXTENSIONS_CONVERT_TO = ['pdf', 'epub', 'mobi', 'azw3', 'docx', 'rtf', 'fb2', 'lit', 'lrf', 'txt', 'htmlz', 'rtf', 'odt']
|
||||
EXTENSIONS_UPLOAD = {'txt', 'pdf', 'epub', 'kepub', 'mobi', 'azw', 'azw3', 'cbr', 'cbz', 'cbt', 'djvu', 'prc', 'doc', 'docx',
|
||||
'fb2', 'html', 'rtf', 'lit', 'odt', 'mp3', 'mp4', 'ogg', 'opus', 'wav', 'flac', 'm4a', 'm4b'}
|
||||
EXTENSIONS_AUDIO = {'mp3', 'mp4', 'ogg', 'opus', 'wav', 'flac', 'm4a', 'm4b'}
|
||||
EXTENSIONS_CONVERT_FROM = ['pdf', 'epub', 'mobi', 'azw3', 'docx', 'rtf', 'fb2', 'lit', 'lrf',
|
||||
'txt', 'htmlz', 'rtf', 'odt', 'cbz', 'cbr']
|
||||
EXTENSIONS_CONVERT_TO = ['pdf', 'epub', 'mobi', 'azw3', 'docx', 'rtf', 'fb2',
|
||||
'lit', 'lrf', 'txt', 'htmlz', 'rtf', 'odt']
|
||||
EXTENSIONS_UPLOAD = {'txt', 'pdf', 'epub', 'kepub', 'mobi', 'azw', 'azw3', 'cbr', 'cbz', 'cbt', 'djvu',
|
||||
'prc', 'doc', 'docx', 'fb2', 'html', 'rtf', 'lit', 'odt', 'mp3', 'mp4', 'ogg',
|
||||
'opus', 'wav', 'flac', 'm4a', 'm4b'}
|
||||
|
||||
|
||||
def has_flag(value, bit_flag):
|
||||
|
@ -153,7 +156,7 @@ BookMeta = namedtuple('BookMeta', 'file_path, extension, title, author, cover, d
|
|||
|
||||
STABLE_VERSION = {'version': '0.6.17 Beta'}
|
||||
|
||||
NIGHTLY_VERSION = {}
|
||||
NIGHTLY_VERSION = dict()
|
||||
NIGHTLY_VERSION[0] = '$Format:%H$'
|
||||
NIGHTLY_VERSION[1] = '$Format:%cI$'
|
||||
# NIGHTLY_VERSION[0] = 'bb7d2c6273ae4560e83950d36d64533343623a57'
|
||||
|
|
186
cps/db.py
186
cps/db.py
|
@ -41,8 +41,6 @@ from sqlalchemy.pool import StaticPool
|
|||
from sqlalchemy.sql.expression import and_, true, false, text, func, or_
|
||||
from sqlalchemy.ext.associationproxy import association_proxy
|
||||
from flask_login import current_user
|
||||
from babel import Locale as LC
|
||||
from babel.core import UnknownLocaleError
|
||||
from flask_babel import gettext as _
|
||||
from flask import flash
|
||||
|
||||
|
@ -341,15 +339,15 @@ class Books(Base):
|
|||
isbn = Column(String(collation='NOCASE'), default="")
|
||||
flags = Column(Integer, nullable=False, default=1)
|
||||
|
||||
authors = relationship('Authors', secondary=books_authors_link, backref='books')
|
||||
tags = relationship('Tags', secondary=books_tags_link, backref='books', order_by="Tags.name")
|
||||
comments = relationship('Comments', backref='books')
|
||||
data = relationship('Data', backref='books')
|
||||
series = relationship('Series', secondary=books_series_link, backref='books')
|
||||
ratings = relationship('Ratings', secondary=books_ratings_link, backref='books')
|
||||
languages = relationship('Languages', secondary=books_languages_link, backref='books')
|
||||
publishers = relationship('Publishers', secondary=books_publishers_link, backref='books')
|
||||
identifiers = relationship('Identifiers', backref='books')
|
||||
authors = relationship(Authors, secondary=books_authors_link, backref='books')
|
||||
tags = relationship(Tags, secondary=books_tags_link, backref='books', order_by="Tags.name")
|
||||
comments = relationship(Comments, backref='books')
|
||||
data = relationship(Data, backref='books')
|
||||
series = relationship(Series, secondary=books_series_link, backref='books')
|
||||
ratings = relationship(Ratings, secondary=books_ratings_link, backref='books')
|
||||
languages = relationship(Languages, secondary=books_languages_link, backref='books')
|
||||
publishers = relationship(Publishers, secondary=books_publishers_link, backref='books')
|
||||
identifiers = relationship(Identifiers, backref='books')
|
||||
|
||||
def __init__(self, title, sort, author_sort, timestamp, pubdate, series_index, last_modified, path, has_cover,
|
||||
authors, tags, languages=None):
|
||||
|
@ -605,6 +603,26 @@ class CalibreDB():
|
|||
return self.session.query(Books).filter(Books.id == book_id). \
|
||||
filter(self.common_filters(allow_show_archived)).first()
|
||||
|
||||
def get_book_read_archived(self, book_id, read_column, allow_show_archived=False):
|
||||
if not read_column:
|
||||
bd = (self.session.query(Books, ub.ReadBook.read_status, ub.ArchivedBook.is_archived).select_from(Books)
|
||||
.join(ub.ReadBook, and_(ub.ReadBook.user_id == int(current_user.id), ub.ReadBook.book_id == book_id),
|
||||
isouter=True))
|
||||
else:
|
||||
try:
|
||||
read_column = cc_classes[read_column]
|
||||
bd = (self.session.query(Books, read_column.value, ub.ArchivedBook.is_archived).select_from(Books)
|
||||
.join(read_column, read_column.book == book_id,
|
||||
isouter=True))
|
||||
except (KeyError, AttributeError):
|
||||
log.error("Custom Column No.%d is not existing in calibre database", read_column)
|
||||
# Skip linking read column and return None instead of read status
|
||||
bd = self.session.query(Books, None, ub.ArchivedBook.is_archived)
|
||||
return (bd.filter(Books.id == book_id)
|
||||
.join(ub.ArchivedBook, and_(Books.id == ub.ArchivedBook.book_id,
|
||||
int(current_user.id) == ub.ArchivedBook.user_id), isouter=True)
|
||||
.filter(self.common_filters(allow_show_archived)).first())
|
||||
|
||||
def get_book_by_uuid(self, book_uuid):
|
||||
return self.session.query(Books).filter(Books.uuid == book_uuid).first()
|
||||
|
||||
|
@ -659,9 +677,12 @@ class CalibreDB():
|
|||
pos_content_cc_filter, ~neg_content_cc_filter, archived_filter)
|
||||
|
||||
@staticmethod
|
||||
def get_checkbox_sorted(inputlist, state, offset, limit, order):
|
||||
def get_checkbox_sorted(inputlist, state, offset, limit, order, combo=False):
|
||||
outcome = list()
|
||||
elementlist = {ele.id: ele for ele in inputlist}
|
||||
if combo:
|
||||
elementlist = {ele[0].id: ele for ele in inputlist}
|
||||
else:
|
||||
elementlist = {ele.id: ele for ele in inputlist}
|
||||
for entry in state:
|
||||
try:
|
||||
outcome.append(elementlist[entry])
|
||||
|
@ -675,11 +696,13 @@ class CalibreDB():
|
|||
return outcome[offset:offset + limit]
|
||||
|
||||
# Fill indexpage with all requested data from database
|
||||
def fill_indexpage(self, page, pagesize, database, db_filter, order, *join):
|
||||
return self.fill_indexpage_with_archived_books(page, pagesize, database, db_filter, order, False, *join)
|
||||
def fill_indexpage(self, page, pagesize, database, db_filter, order,
|
||||
join_archive_read=False, config_read_column=0, *join):
|
||||
return self.fill_indexpage_with_archived_books(page, database, pagesize, db_filter, order, False,
|
||||
join_archive_read, config_read_column, *join)
|
||||
|
||||
def fill_indexpage_with_archived_books(self, page, pagesize, database, db_filter, order, allow_show_archived,
|
||||
*join):
|
||||
def fill_indexpage_with_archived_books(self, page, database, pagesize, db_filter, order, allow_show_archived,
|
||||
join_archive_read, config_read_column, *join):
|
||||
pagesize = pagesize or self.config.config_books_per_page
|
||||
if current_user.show_detail_random():
|
||||
randm = self.session.query(Books) \
|
||||
|
@ -688,20 +711,43 @@ class CalibreDB():
|
|||
.limit(self.config.config_random_books).all()
|
||||
else:
|
||||
randm = false()
|
||||
if join_archive_read:
|
||||
if not config_read_column:
|
||||
query = (self.session.query(database, ub.ReadBook.read_status, ub.ArchivedBook.is_archived)
|
||||
.select_from(Books)
|
||||
.outerjoin(ub.ReadBook,
|
||||
and_(ub.ReadBook.user_id == int(current_user.id), ub.ReadBook.book_id == Books.id)))
|
||||
else:
|
||||
try:
|
||||
read_column = cc_classes[config_read_column]
|
||||
query = (self.session.query(database, read_column.value, ub.ArchivedBook.is_archived)
|
||||
.select_from(Books)
|
||||
.outerjoin(read_column, read_column.book == Books.id))
|
||||
except (KeyError, AttributeError):
|
||||
log.error("Custom Column No.%d is not existing in calibre database", read_column)
|
||||
# Skip linking read column and return None instead of read status
|
||||
query =self.session.query(database, None, ub.ArchivedBook.is_archived)
|
||||
query = query.outerjoin(ub.ArchivedBook, and_(Books.id == ub.ArchivedBook.book_id,
|
||||
int(current_user.id) == ub.ArchivedBook.user_id))
|
||||
else:
|
||||
query = self.session.query(database)
|
||||
off = int(int(pagesize) * (page - 1))
|
||||
query = self.session.query(database)
|
||||
if len(join) == 6:
|
||||
query = query.outerjoin(join[0], join[1]).outerjoin(join[2]).outerjoin(join[3], join[4]).outerjoin(join[5])
|
||||
if len(join) == 5:
|
||||
query = query.outerjoin(join[0], join[1]).outerjoin(join[2]).outerjoin(join[3], join[4])
|
||||
if len(join) == 4:
|
||||
query = query.outerjoin(join[0], join[1]).outerjoin(join[2]).outerjoin(join[3])
|
||||
if len(join) == 3:
|
||||
query = query.outerjoin(join[0], join[1]).outerjoin(join[2])
|
||||
elif len(join) == 2:
|
||||
query = query.outerjoin(join[0], join[1])
|
||||
elif len(join) == 1:
|
||||
query = query.outerjoin(join[0])
|
||||
|
||||
indx = len(join)
|
||||
element = 0
|
||||
while indx:
|
||||
if indx >= 3:
|
||||
query = query.outerjoin(join[element], join[element+1]).outerjoin(join[element+2])
|
||||
indx -= 3
|
||||
element += 3
|
||||
elif indx == 2:
|
||||
query = query.outerjoin(join[element], join[element+1])
|
||||
indx -= 2
|
||||
element += 2
|
||||
elif indx == 1:
|
||||
query = query.outerjoin(join[element])
|
||||
indx -= 1
|
||||
element += 1
|
||||
query = query.filter(db_filter)\
|
||||
.filter(self.common_filters(allow_show_archived))
|
||||
entries = list()
|
||||
|
@ -712,28 +758,40 @@ class CalibreDB():
|
|||
entries = query.order_by(*order).offset(off).limit(pagesize).all()
|
||||
except Exception as ex:
|
||||
log.debug_or_exception(ex)
|
||||
#for book in entries:
|
||||
# book = self.order_authors(book)
|
||||
# display authors in right order
|
||||
entries = self.order_authors(entries, True, join_archive_read)
|
||||
return entries, randm, pagination
|
||||
|
||||
# Orders all Authors in the list according to authors sort
|
||||
def order_authors(self, entry):
|
||||
sort_authors = entry.author_sort.split('&')
|
||||
authors_ordered = list()
|
||||
error = False
|
||||
ids = [a.id for a in entry.authors]
|
||||
for auth in sort_authors:
|
||||
results = self.session.query(Authors).filter(Authors.sort == auth.lstrip().strip()).all()
|
||||
# ToDo: How to handle not found authorname
|
||||
if not len(results):
|
||||
error = True
|
||||
break
|
||||
for r in results:
|
||||
if r.id in ids:
|
||||
authors_ordered.append(r)
|
||||
if not error:
|
||||
entry.authors = authors_ordered
|
||||
return entry
|
||||
def order_authors(self, entries, list_return=False, combined=False):
|
||||
for entry in entries:
|
||||
if combined:
|
||||
sort_authors = entry.Books.author_sort.split('&')
|
||||
ids = [a.id for a in entry.Books.authors]
|
||||
|
||||
else:
|
||||
sort_authors = entry.author_sort.split('&')
|
||||
ids = [a.id for a in entry.authors]
|
||||
authors_ordered = list()
|
||||
error = False
|
||||
for auth in sort_authors:
|
||||
results = self.session.query(Authors).filter(Authors.sort == auth.lstrip().strip()).all()
|
||||
# ToDo: How to handle not found authorname
|
||||
if not len(results):
|
||||
error = True
|
||||
break
|
||||
for r in results:
|
||||
if r.id in ids:
|
||||
authors_ordered.append(r)
|
||||
if not error:
|
||||
if combined:
|
||||
entry.Books.authors = authors_ordered
|
||||
else:
|
||||
entry.authors = authors_ordered
|
||||
if list_return:
|
||||
return entries
|
||||
else:
|
||||
return authors_ordered
|
||||
|
||||
def get_typeahead(self, database, query, replace=('', ''), tag_filter=true()):
|
||||
query = query or ''
|
||||
|
@ -754,14 +812,29 @@ class CalibreDB():
|
|||
return self.session.query(Books) \
|
||||
.filter(and_(Books.authors.any(and_(*q)), func.lower(Books.title).ilike("%" + title + "%"))).first()
|
||||
|
||||
def search_query(self, term, *join):
|
||||
def search_query(self, term, config_read_column, *join):
|
||||
term.strip().lower()
|
||||
self.session.connection().connection.connection.create_function("lower", 1, lcase)
|
||||
q = list()
|
||||
authorterms = re.split("[, ]+", term)
|
||||
for authorterm in authorterms:
|
||||
q.append(Books.authors.any(func.lower(Authors.name).ilike("%" + authorterm + "%")))
|
||||
query = self.session.query(Books)
|
||||
if not config_read_column:
|
||||
query = (self.session.query(Books, ub.ArchivedBook.is_archived, ub.ReadBook).select_from(Books)
|
||||
.outerjoin(ub.ReadBook, and_(Books.id == ub.ReadBook.book_id,
|
||||
int(current_user.id) == ub.ReadBook.user_id)))
|
||||
else:
|
||||
try:
|
||||
read_column = cc_classes[config_read_column]
|
||||
query = (self.session.query(Books, ub.ArchivedBook.is_archived, read_column.value).select_from(Books)
|
||||
.outerjoin(read_column, read_column.book == Books.id))
|
||||
except (KeyError, AttributeError):
|
||||
log.error("Custom Column No.%d is not existing in calibre database", config_read_column)
|
||||
# Skip linking read column
|
||||
query = self.session.query(Books, ub.ArchivedBook.is_archived, None)
|
||||
query = query.outerjoin(ub.ArchivedBook, and_(Books.id == ub.ArchivedBook.book_id,
|
||||
int(current_user.id) == ub.ArchivedBook.user_id))
|
||||
|
||||
if len(join) == 6:
|
||||
query = query.outerjoin(join[0], join[1]).outerjoin(join[2]).outerjoin(join[3], join[4]).outerjoin(join[5])
|
||||
if len(join) == 3:
|
||||
|
@ -779,10 +852,11 @@ class CalibreDB():
|
|||
))
|
||||
|
||||
# read search results from calibre-database and return it (function is used for feed and simple search
|
||||
def get_search_results(self, term, offset=None, order=None, limit=None, *join):
|
||||
def get_search_results(self, term, offset=None, order=None, limit=None, allow_show_archived=False,
|
||||
config_read_column=False, *join):
|
||||
order = order[0] if order else [Books.sort]
|
||||
pagination = None
|
||||
result = self.search_query(term, *join).order_by(*order).all()
|
||||
result = self.search_query(term, config_read_column, *join).order_by(*order).all()
|
||||
result_count = len(result)
|
||||
if offset != None and limit != None:
|
||||
offset = int(offset)
|
||||
|
@ -792,8 +866,10 @@ class CalibreDB():
|
|||
offset = 0
|
||||
limit_all = result_count
|
||||
|
||||
ub.store_ids(result)
|
||||
return result[offset:limit_all], result_count, pagination
|
||||
ub.store_combo_ids(result)
|
||||
entries = self.order_authors(result[offset:limit_all], list_return=True, combined=True)
|
||||
|
||||
return entries, result_count, pagination
|
||||
|
||||
# Creates for all stored languages a translated speaking name in the array for the UI
|
||||
def speaking_language(self, languages=None, return_all_languages=False, with_count=False, reverse_order=False):
|
||||
|
|
145
cps/editbooks.py
145
cps/editbooks.py
|
@ -45,6 +45,7 @@ from .services.worker import WorkerThread
|
|||
from .tasks.upload import TaskUpload
|
||||
from .render_template import render_title_template
|
||||
from .usermanagement import login_required_if_no_ano
|
||||
from .kobo_sync_status import change_archived_books
|
||||
|
||||
|
||||
editbook = Blueprint('editbook', __name__)
|
||||
|
@ -81,7 +82,6 @@ def search_objects_remove(db_book_object, db_type, input_elements):
|
|||
type_elements = c_elements.name
|
||||
for inp_element in input_elements:
|
||||
if inp_element.lower() == type_elements.lower():
|
||||
# if inp_element == type_elements:
|
||||
found = True
|
||||
break
|
||||
# if the element was not found in the new list, add it to remove list
|
||||
|
@ -131,7 +131,6 @@ def add_objects(db_book_object, db_object, db_session, db_type, add_elements):
|
|||
# check if a element with that name exists
|
||||
db_element = db_session.query(db_object).filter(db_filter == add_element).first()
|
||||
# if no element is found add it
|
||||
# if new_element is None:
|
||||
if db_type == 'author':
|
||||
new_element = db_object(add_element, helper.get_sorted_author(add_element.replace('|', ',')), "")
|
||||
elif db_type == 'series':
|
||||
|
@ -158,7 +157,7 @@ def add_objects(db_book_object, db_object, db_session, db_type, add_elements):
|
|||
def create_objects_for_addition(db_element, add_element, db_type):
|
||||
if db_type == 'custom':
|
||||
if db_element.value != add_element:
|
||||
db_element.value = add_element # ToDo: Before new_element, but this is not plausible
|
||||
db_element.value = add_element
|
||||
elif db_type == 'languages':
|
||||
if db_element.lang_code != add_element:
|
||||
db_element.lang_code = add_element
|
||||
|
@ -169,7 +168,7 @@ def create_objects_for_addition(db_element, add_element, db_type):
|
|||
elif db_type == 'author':
|
||||
if db_element.name != add_element:
|
||||
db_element.name = add_element
|
||||
db_element.sort = add_element.replace('|', ',')
|
||||
db_element.sort = helper.get_sorted_author(add_element.replace('|', ','))
|
||||
elif db_type == 'publisher':
|
||||
if db_element.name != add_element:
|
||||
db_element.name = add_element
|
||||
|
@ -374,7 +373,7 @@ def render_edit_book(book_id):
|
|||
for lang in book.languages:
|
||||
lang.language_name = isoLanguages.get_language_name(get_locale(), lang.lang_code)
|
||||
|
||||
book = calibre_db.order_authors(book)
|
||||
book.authors = calibre_db.order_authors([book])
|
||||
|
||||
author_names = []
|
||||
for authr in book.authors:
|
||||
|
@ -707,6 +706,7 @@ def handle_title_on_edit(book, book_title):
|
|||
|
||||
def handle_author_on_edit(book, author_name, update_stored=True):
|
||||
# handle author(s)
|
||||
# renamed = False
|
||||
input_authors = author_name.split('&')
|
||||
input_authors = list(map(lambda it: it.strip().replace(',', '|'), input_authors))
|
||||
# Remove duplicates in authors list
|
||||
|
@ -715,6 +715,18 @@ def handle_author_on_edit(book, author_name, update_stored=True):
|
|||
if input_authors == ['']:
|
||||
input_authors = [_(u'Unknown')] # prevent empty Author
|
||||
|
||||
renamed = list()
|
||||
for in_aut in input_authors:
|
||||
renamed_author = calibre_db.session.query(db.Authors).filter(db.Authors.name == in_aut).first()
|
||||
if renamed_author and in_aut != renamed_author.name:
|
||||
renamed.append(renamed_author.name)
|
||||
all_books = calibre_db.session.query(db.Books) \
|
||||
.filter(db.Books.authors.any(db.Authors.name == renamed_author.name)).all()
|
||||
sorted_renamed_author = helper.get_sorted_author(renamed_author.name)
|
||||
sorted_old_author = helper.get_sorted_author(in_aut)
|
||||
for one_book in all_books:
|
||||
one_book.author_sort = one_book.author_sort.replace(sorted_renamed_author, sorted_old_author)
|
||||
|
||||
change = modify_database_object(input_authors, book.authors, db.Authors, calibre_db.session, 'author')
|
||||
|
||||
# Search for each author if author is in database, if not, author name and sorted author name is generated new
|
||||
|
@ -731,7 +743,7 @@ def handle_author_on_edit(book, author_name, update_stored=True):
|
|||
if book.author_sort != sort_authors and update_stored:
|
||||
book.author_sort = sort_authors
|
||||
change = True
|
||||
return input_authors, change
|
||||
return input_authors, change, renamed
|
||||
|
||||
|
||||
@editbook.route("/admin/book/<int:book_id>", methods=['GET', 'POST'])
|
||||
|
@ -771,7 +783,7 @@ def edit_book(book_id):
|
|||
# handle book title
|
||||
title_change = handle_title_on_edit(book, to_save["book_title"])
|
||||
|
||||
input_authors, authorchange = handle_author_on_edit(book, to_save["author_name"])
|
||||
input_authors, authorchange, renamed = handle_author_on_edit(book, to_save["author_name"])
|
||||
if authorchange or title_change:
|
||||
edited_books_id = book.id
|
||||
modif_date = True
|
||||
|
@ -781,13 +793,15 @@ def edit_book(book_id):
|
|||
|
||||
error = False
|
||||
if edited_books_id:
|
||||
error = helper.update_dir_stucture(edited_books_id, config.config_calibre_dir, input_authors[0])
|
||||
error = helper.update_dir_structure(edited_books_id, config.config_calibre_dir, input_authors[0],
|
||||
renamed_author=renamed)
|
||||
|
||||
if not error:
|
||||
if "cover_url" in to_save:
|
||||
if to_save["cover_url"]:
|
||||
if not current_user.role_upload():
|
||||
return "", (403)
|
||||
calibre_db.session.rollback()
|
||||
return "", 403
|
||||
if to_save["cover_url"].endswith('/static/generic_cover.jpg'):
|
||||
book.has_cover = 0
|
||||
else:
|
||||
|
@ -905,6 +919,18 @@ def prepare_authors_on_upload(title, authr):
|
|||
if input_authors == ['']:
|
||||
input_authors = [_(u'Unknown')] # prevent empty Author
|
||||
|
||||
renamed = list()
|
||||
for in_aut in input_authors:
|
||||
renamed_author = calibre_db.session.query(db.Authors).filter(db.Authors.name == in_aut).first()
|
||||
if renamed_author and in_aut != renamed_author.name:
|
||||
renamed.append(renamed_author.name)
|
||||
all_books = calibre_db.session.query(db.Books) \
|
||||
.filter(db.Books.authors.any(db.Authors.name == renamed_author.name)).all()
|
||||
sorted_renamed_author = helper.get_sorted_author(renamed_author.name)
|
||||
sorted_old_author = helper.get_sorted_author(in_aut)
|
||||
for one_book in all_books:
|
||||
one_book.author_sort = one_book.author_sort.replace(sorted_renamed_author, sorted_old_author)
|
||||
|
||||
sort_authors_list = list()
|
||||
db_author = None
|
||||
for inp in input_authors:
|
||||
|
@ -921,16 +947,16 @@ def prepare_authors_on_upload(title, authr):
|
|||
sort_author = stored_author.sort
|
||||
sort_authors_list.append(sort_author)
|
||||
sort_authors = ' & '.join(sort_authors_list)
|
||||
return sort_authors, input_authors, db_author
|
||||
return sort_authors, input_authors, db_author, renamed
|
||||
|
||||
|
||||
def create_book_on_upload(modif_date, meta):
|
||||
title = meta.title
|
||||
authr = meta.author
|
||||
sort_authors, input_authors, db_author = prepare_authors_on_upload(title, authr)
|
||||
sort_authors, input_authors, db_author, renamed_authors = prepare_authors_on_upload(title, authr)
|
||||
|
||||
title_dir = helper.get_valid_filename(title)
|
||||
author_dir = helper.get_valid_filename(db_author.name)
|
||||
title_dir = helper.get_valid_filename(title, chars=96)
|
||||
author_dir = helper.get_valid_filename(db_author.name, chars=96)
|
||||
|
||||
# combine path and normalize path from windows systems
|
||||
path = os.path.join(author_dir, title_dir).replace('\\', '/')
|
||||
|
@ -969,7 +995,7 @@ def create_book_on_upload(modif_date, meta):
|
|||
|
||||
# flush content, get db_book.id available
|
||||
calibre_db.session.flush()
|
||||
return db_book, input_authors, title_dir
|
||||
return db_book, input_authors, title_dir, renamed_authors
|
||||
|
||||
def file_handling_on_upload(requested_file):
|
||||
# check if file extension is correct
|
||||
|
@ -1001,9 +1027,10 @@ def move_coverfile(meta, db_book):
|
|||
coverfile = meta.cover
|
||||
else:
|
||||
coverfile = os.path.join(constants.STATIC_DIR, 'generic_cover.jpg')
|
||||
new_coverpath = os.path.join(config.config_calibre_dir, db_book.path, "cover.jpg")
|
||||
new_coverpath = os.path.join(config.config_calibre_dir, db_book.path)
|
||||
try:
|
||||
copyfile(coverfile, new_coverpath)
|
||||
os.makedirs(new_coverpath, exist_ok=True)
|
||||
copyfile(coverfile, os.path.join(new_coverpath, "cover.jpg"))
|
||||
if meta.cover:
|
||||
os.unlink(meta.cover)
|
||||
except OSError as e:
|
||||
|
@ -1031,19 +1058,28 @@ def upload():
|
|||
if error:
|
||||
return error
|
||||
|
||||
db_book, input_authors, title_dir = create_book_on_upload(modif_date, meta)
|
||||
db_book, input_authors, title_dir, renamed_authors = create_book_on_upload(modif_date, meta)
|
||||
|
||||
# Comments needs book id therefore only possible after flush
|
||||
modif_date |= edit_book_comments(Markup(meta.description).unescape(), db_book)
|
||||
|
||||
book_id = db_book.id
|
||||
title = db_book.title
|
||||
|
||||
error = helper.update_dir_structure_file(book_id,
|
||||
config.config_calibre_dir,
|
||||
input_authors[0],
|
||||
meta.file_path,
|
||||
title_dir + meta.extension.lower())
|
||||
if config.config_use_google_drive:
|
||||
helper.upload_new_file_gdrive(book_id,
|
||||
input_authors[0],
|
||||
renamed_authors,
|
||||
title,
|
||||
title_dir,
|
||||
meta.file_path,
|
||||
meta.extension.lower())
|
||||
else:
|
||||
error = helper.update_dir_structure(book_id,
|
||||
config.config_calibre_dir,
|
||||
input_authors[0],
|
||||
meta.file_path,
|
||||
title_dir + meta.extension.lower(),
|
||||
renamed_author=renamed_authors)
|
||||
|
||||
move_coverfile(meta, db_book)
|
||||
|
||||
|
@ -1071,6 +1107,7 @@ def upload():
|
|||
flash(_(u"Database error: %(error)s.", error=e), category="error")
|
||||
return Response(json.dumps({"location": url_for("web.index")}), mimetype='application/json')
|
||||
|
||||
|
||||
@editbook.route("/admin/book/convert/<int:book_id>", methods=['POST'])
|
||||
@login_required_if_no_ano
|
||||
@edit_required
|
||||
|
@ -1114,24 +1151,24 @@ def table_get_custom_enum(c_id):
|
|||
def edit_list_book(param):
|
||||
vals = request.form.to_dict()
|
||||
book = calibre_db.get_book(vals['pk'])
|
||||
ret = ""
|
||||
if param =='series_index':
|
||||
# ret = ""
|
||||
if param == 'series_index':
|
||||
edit_book_series_index(vals['value'], book)
|
||||
ret = Response(json.dumps({'success': True, 'newValue': book.series_index}), mimetype='application/json')
|
||||
elif param =='tags':
|
||||
elif param == 'tags':
|
||||
edit_book_tags(vals['value'], book)
|
||||
ret = Response(json.dumps({'success': True, 'newValue': ', '.join([tag.name for tag in book.tags])}),
|
||||
mimetype='application/json')
|
||||
elif param =='series':
|
||||
elif param == 'series':
|
||||
edit_book_series(vals['value'], book)
|
||||
ret = Response(json.dumps({'success': True, 'newValue': ', '.join([serie.name for serie in book.series])}),
|
||||
mimetype='application/json')
|
||||
elif param =='publishers':
|
||||
elif param == 'publishers':
|
||||
edit_book_publisher(vals['value'], book)
|
||||
ret = Response(json.dumps({'success': True,
|
||||
ret = Response(json.dumps({'success': True,
|
||||
'newValue': ', '.join([publisher.name for publisher in book.publishers])}),
|
||||
mimetype='application/json')
|
||||
elif param =='languages':
|
||||
elif param == 'languages':
|
||||
invalid = list()
|
||||
edit_book_languages(vals['value'], book, invalid=invalid)
|
||||
if invalid:
|
||||
|
@ -1142,39 +1179,51 @@ def edit_list_book(param):
|
|||
lang_names = list()
|
||||
for lang in book.languages:
|
||||
lang_names.append(isoLanguages.get_language_name(get_locale(), lang.lang_code))
|
||||
ret = Response(json.dumps({'success': True, 'newValue': ', '.join(lang_names)}),
|
||||
ret = Response(json.dumps({'success': True, 'newValue': ', '.join(lang_names)}),
|
||||
mimetype='application/json')
|
||||
elif param =='author_sort':
|
||||
elif param == 'author_sort':
|
||||
book.author_sort = vals['value']
|
||||
ret = Response(json.dumps({'success': True, 'newValue': book.author_sort}),
|
||||
mimetype='application/json')
|
||||
elif param == 'title':
|
||||
sort = book.sort
|
||||
handle_title_on_edit(book, vals.get('value', ""))
|
||||
helper.update_dir_stucture(book.id, config.config_calibre_dir)
|
||||
helper.update_dir_structure(book.id, config.config_calibre_dir)
|
||||
ret = Response(json.dumps({'success': True, 'newValue': book.title}),
|
||||
mimetype='application/json')
|
||||
elif param =='sort':
|
||||
elif param == 'sort':
|
||||
book.sort = vals['value']
|
||||
ret = Response(json.dumps({'success': True, 'newValue': book.sort}),
|
||||
mimetype='application/json')
|
||||
elif param =='comments':
|
||||
elif param == 'comments':
|
||||
edit_book_comments(vals['value'], book)
|
||||
ret = Response(json.dumps({'success': True, 'newValue': book.comments[0].text}),
|
||||
mimetype='application/json')
|
||||
elif param =='authors':
|
||||
input_authors, __ = handle_author_on_edit(book, vals['value'], vals.get('checkA', None) == "true")
|
||||
helper.update_dir_stucture(book.id, config.config_calibre_dir, input_authors[0])
|
||||
elif param == 'authors':
|
||||
input_authors, __, renamed = handle_author_on_edit(book, vals['value'], vals.get('checkA', None) == "true")
|
||||
helper.update_dir_structure(book.id, config.config_calibre_dir, input_authors[0], renamed_author=renamed)
|
||||
ret = Response(json.dumps({'success': True,
|
||||
'newValue': ' & '.join([author.replace('|',',') for author in input_authors])}),
|
||||
mimetype='application/json')
|
||||
elif param == 'is_archived':
|
||||
change_archived_books(book.id, vals['value'] == "True")
|
||||
ret = ""
|
||||
elif param == 'read_status':
|
||||
ret = helper.edit_book_read_status(book.id, vals['value'] == "True")
|
||||
if ret:
|
||||
return ret, 400
|
||||
elif param.startswith("custom_column_"):
|
||||
new_val = dict()
|
||||
new_val[param] = vals['value']
|
||||
edit_single_cc_data(book.id, book, param[14:], new_val)
|
||||
ret = Response(json.dumps({'success': True, 'newValue': vals['value']}),
|
||||
mimetype='application/json')
|
||||
|
||||
# ToDo: Very hacky find better solution
|
||||
if vals['value'] in ["True", "False"]:
|
||||
ret = ""
|
||||
else:
|
||||
ret = Response(json.dumps({'success': True, 'newValue': vals['value']}),
|
||||
mimetype='application/json')
|
||||
else:
|
||||
return _("Parameter not found"), 400
|
||||
book.last_modified = datetime.utcnow()
|
||||
try:
|
||||
calibre_db.session.commit()
|
||||
|
@ -1234,8 +1283,8 @@ def merge_list_book():
|
|||
if to_book:
|
||||
for file in to_book.data:
|
||||
to_file.append(file.format)
|
||||
to_name = helper.get_valid_filename(to_book.title) + ' - ' + \
|
||||
helper.get_valid_filename(to_book.authors[0].name)
|
||||
to_name = helper.get_valid_filename(to_book.title, chars=96) + ' - ' + \
|
||||
helper.get_valid_filename(to_book.authors[0].name, chars=96)
|
||||
for book_id in vals:
|
||||
from_book = calibre_db.get_book(book_id)
|
||||
if from_book:
|
||||
|
@ -1257,6 +1306,7 @@ def merge_list_book():
|
|||
return json.dumps({'success': True})
|
||||
return ""
|
||||
|
||||
|
||||
@editbook.route("/ajax/xchange", methods=['POST'])
|
||||
@login_required
|
||||
@edit_required
|
||||
|
@ -1267,13 +1317,13 @@ def table_xchange_author_title():
|
|||
modif_date = False
|
||||
book = calibre_db.get_book(val)
|
||||
authors = book.title
|
||||
entries = calibre_db.order_authors(book)
|
||||
book.authors = calibre_db.order_authors([book])
|
||||
author_names = []
|
||||
for authr in entries.authors:
|
||||
for authr in book.authors:
|
||||
author_names.append(authr.name.replace('|', ','))
|
||||
|
||||
title_change = handle_title_on_edit(book, " ".join(author_names))
|
||||
input_authors, authorchange = handle_author_on_edit(book, authors)
|
||||
input_authors, authorchange, renamed = handle_author_on_edit(book, authors)
|
||||
if authorchange or title_change:
|
||||
edited_books_id = book.id
|
||||
modif_date = True
|
||||
|
@ -1282,7 +1332,8 @@ def table_xchange_author_title():
|
|||
gdriveutils.updateGdriveCalibreFromLocal()
|
||||
|
||||
if edited_books_id:
|
||||
helper.update_dir_stucture(edited_books_id, config.config_calibre_dir, input_authors[0])
|
||||
helper.update_dir_structure(edited_books_id, config.config_calibre_dir, input_authors[0],
|
||||
renamed_author=renamed)
|
||||
if modif_date:
|
||||
book.last_modified = datetime.utcnow()
|
||||
try:
|
||||
|
|
|
@ -35,10 +35,10 @@ except ImportError:
|
|||
from sqlalchemy.exc import OperationalError, InvalidRequestError
|
||||
from sqlalchemy.sql.expression import text
|
||||
|
||||
try:
|
||||
from six import __version__ as six_version
|
||||
except ImportError:
|
||||
six_version = "not installed"
|
||||
#try:
|
||||
# from six import __version__ as six_version
|
||||
#except ImportError:
|
||||
# six_version = "not installed"
|
||||
try:
|
||||
from httplib2 import __version__ as httplib2_version
|
||||
except ImportError:
|
||||
|
@ -362,16 +362,27 @@ def moveGdriveFolderRemote(origin_file, target_folder):
|
|||
children = drive.auth.service.children().list(folderId=previous_parents).execute()
|
||||
gFileTargetDir = getFileFromEbooksFolder(None, target_folder)
|
||||
if not gFileTargetDir:
|
||||
# Folder is not existing, create, and move folder
|
||||
gFileTargetDir = drive.CreateFile(
|
||||
{'title': target_folder, 'parents': [{"kind": "drive#fileLink", 'id': getEbooksFolderId()}],
|
||||
"mimeType": "application/vnd.google-apps.folder"})
|
||||
gFileTargetDir.Upload()
|
||||
# Move the file to the new folder
|
||||
drive.auth.service.files().update(fileId=origin_file['id'],
|
||||
addParents=gFileTargetDir['id'],
|
||||
removeParents=previous_parents,
|
||||
fields='id, parents').execute()
|
||||
# Move the file to the new folder
|
||||
drive.auth.service.files().update(fileId=origin_file['id'],
|
||||
addParents=gFileTargetDir['id'],
|
||||
removeParents=previous_parents,
|
||||
fields='id, parents').execute()
|
||||
|
||||
elif gFileTargetDir['title'] != target_folder:
|
||||
# Folder is not existing, create, and move folder
|
||||
drive.auth.service.files().patch(fileId=origin_file['id'],
|
||||
body={'title': target_folder},
|
||||
fields='title').execute()
|
||||
else:
|
||||
# Move the file to the new folder
|
||||
drive.auth.service.files().update(fileId=origin_file['id'],
|
||||
addParents=gFileTargetDir['id'],
|
||||
removeParents=previous_parents,
|
||||
fields='id, parents').execute()
|
||||
# if previous_parents has no children anymore, delete original fileparent
|
||||
if len(children['items']) == 1:
|
||||
deleteDatabaseEntry(previous_parents)
|
||||
|
@ -419,24 +430,24 @@ def uploadFileToEbooksFolder(destFile, f):
|
|||
splitDir = destFile.split('/')
|
||||
for i, x in enumerate(splitDir):
|
||||
if i == len(splitDir)-1:
|
||||
existingFiles = drive.ListFile({'q': "title = '%s' and '%s' in parents and trashed = false" %
|
||||
existing_Files = drive.ListFile({'q': "title = '%s' and '%s' in parents and trashed = false" %
|
||||
(x.replace("'", r"\'"), parent['id'])}).GetList()
|
||||
if len(existingFiles) > 0:
|
||||
driveFile = existingFiles[0]
|
||||
if len(existing_Files) > 0:
|
||||
driveFile = existing_Files[0]
|
||||
else:
|
||||
driveFile = drive.CreateFile({'title': x,
|
||||
'parents': [{"kind": "drive#fileLink", 'id': parent['id']}], })
|
||||
driveFile.SetContentFile(f)
|
||||
driveFile.Upload()
|
||||
else:
|
||||
existingFolder = drive.ListFile({'q': "title = '%s' and '%s' in parents and trashed = false" %
|
||||
existing_Folder = drive.ListFile({'q': "title = '%s' and '%s' in parents and trashed = false" %
|
||||
(x.replace("'", r"\'"), parent['id'])}).GetList()
|
||||
if len(existingFolder) == 0:
|
||||
if len(existing_Folder) == 0:
|
||||
parent = drive.CreateFile({'title': x, 'parents': [{"kind": "drive#fileLink", 'id': parent['id']}],
|
||||
"mimeType": "application/vnd.google-apps.folder"})
|
||||
parent.Upload()
|
||||
else:
|
||||
parent = existingFolder[0]
|
||||
parent = existing_Folder[0]
|
||||
|
||||
|
||||
def watchChange(drive, channel_id, channel_type, channel_address,
|
||||
|
@ -678,5 +689,5 @@ def get_error_text(client_secrets=None):
|
|||
|
||||
|
||||
def get_versions():
|
||||
return {'six': six_version,
|
||||
return { # 'six': six_version,
|
||||
'httplib2': httplib2_version}
|
||||
|
|
309
cps/helper.py
309
cps/helper.py
|
@ -35,6 +35,7 @@ from flask import send_from_directory, make_response, redirect, abort, url_for
|
|||
from flask_babel import gettext as _
|
||||
from flask_login import current_user
|
||||
from sqlalchemy.sql.expression import true, false, and_, text, func
|
||||
from sqlalchemy.exc import InvalidRequestError, OperationalError
|
||||
from werkzeug.datastructures import Headers
|
||||
from werkzeug.security import generate_password_hash
|
||||
from markupsafe import escape
|
||||
|
@ -48,7 +49,7 @@ except ImportError:
|
|||
|
||||
from . import calibre_db, cli
|
||||
from .tasks.convert import TaskConvert
|
||||
from . import logger, config, get_locale, db, ub
|
||||
from . import logger, config, get_locale, db, ub, kobo_sync_status
|
||||
from . import gdriveutils as gd
|
||||
from .constants import STATIC_DIR as _STATIC_DIR
|
||||
from .subproc_wrapper import process_wait
|
||||
|
@ -220,7 +221,7 @@ def send_mail(book_id, book_format, convert, kindle_mail, calibrepath, user_id):
|
|||
return _(u"The requested file could not be read. Maybe wrong permissions?")
|
||||
|
||||
|
||||
def get_valid_filename(value, replace_whitespace=True):
|
||||
def get_valid_filename(value, replace_whitespace=True, chars=128):
|
||||
"""
|
||||
Returns the given string converted to a string that can be used for a clean
|
||||
filename. Limits num characters to 128 max.
|
||||
|
@ -242,7 +243,7 @@ def get_valid_filename(value, replace_whitespace=True):
|
|||
value = re.sub(r'[*+:\\\"/<>?]+', u'_', value, flags=re.U)
|
||||
# pipe has to be replaced with comma
|
||||
value = re.sub(r'[|]+', u',', value, flags=re.U)
|
||||
value = value[:128].strip()
|
||||
value = value[:chars].strip()
|
||||
if not value:
|
||||
raise ValueError("Filename cannot be empty")
|
||||
return value
|
||||
|
@ -289,6 +290,53 @@ def get_sorted_author(value):
|
|||
value2 = value
|
||||
return value2
|
||||
|
||||
def edit_book_read_status(book_id, read_status=None):
|
||||
if not config.config_read_column:
|
||||
book = ub.session.query(ub.ReadBook).filter(and_(ub.ReadBook.user_id == int(current_user.id),
|
||||
ub.ReadBook.book_id == book_id)).first()
|
||||
if book:
|
||||
if read_status is None:
|
||||
if book.read_status == ub.ReadBook.STATUS_FINISHED:
|
||||
book.read_status = ub.ReadBook.STATUS_UNREAD
|
||||
else:
|
||||
book.read_status = ub.ReadBook.STATUS_FINISHED
|
||||
else:
|
||||
book.read_status = ub.ReadBook.STATUS_FINISHED if read_status else ub.ReadBook.STATUS_UNREAD
|
||||
else:
|
||||
readBook = ub.ReadBook(user_id=current_user.id, book_id = book_id)
|
||||
readBook.read_status = ub.ReadBook.STATUS_FINISHED
|
||||
book = readBook
|
||||
if not book.kobo_reading_state:
|
||||
kobo_reading_state = ub.KoboReadingState(user_id=current_user.id, book_id=book_id)
|
||||
kobo_reading_state.current_bookmark = ub.KoboBookmark()
|
||||
kobo_reading_state.statistics = ub.KoboStatistics()
|
||||
book.kobo_reading_state = kobo_reading_state
|
||||
ub.session.merge(book)
|
||||
ub.session_commit("Book {} readbit toggled".format(book_id))
|
||||
else:
|
||||
try:
|
||||
calibre_db.update_title_sort(config)
|
||||
book = calibre_db.get_filtered_book(book_id)
|
||||
read_status = getattr(book, 'custom_column_' + str(config.config_read_column))
|
||||
if len(read_status):
|
||||
if read_status is None:
|
||||
read_status[0].value = not read_status[0].value
|
||||
else:
|
||||
read_status[0].value = read_status is True
|
||||
calibre_db.session.commit()
|
||||
else:
|
||||
cc_class = db.cc_classes[config.config_read_column]
|
||||
new_cc = cc_class(value=read_status or 1, book=book_id)
|
||||
calibre_db.session.add(new_cc)
|
||||
calibre_db.session.commit()
|
||||
except (KeyError, AttributeError):
|
||||
log.error(u"Custom Column No.%d is not existing in calibre database", config.config_read_column)
|
||||
return "Custom Column No.{} is not existing in calibre database".format(config.config_read_column)
|
||||
except (OperationalError, InvalidRequestError) as e:
|
||||
calibre_db.session.rollback()
|
||||
log.error(u"Read status could not set: {}".format(e))
|
||||
return "Read status could not set: {}".format(e), 400
|
||||
return ""
|
||||
|
||||
# Deletes a book fro the local filestorage, returns True if deleting is successfull, otherwise false
|
||||
def delete_book_file(book, calibrepath, book_format=None):
|
||||
|
@ -331,14 +379,79 @@ def delete_book_file(book, calibrepath, book_format=None):
|
|||
path=book.path)
|
||||
|
||||
|
||||
def clean_author_database(renamed_author, calibre_path="", local_book=None, gdrive=None):
|
||||
valid_filename_authors = [get_valid_filename(r, chars=96) for r in renamed_author]
|
||||
for r in renamed_author:
|
||||
if local_book:
|
||||
all_books = [local_book]
|
||||
else:
|
||||
all_books = calibre_db.session.query(db.Books) \
|
||||
.filter(db.Books.authors.any(db.Authors.name == r)).all()
|
||||
for book in all_books:
|
||||
book_author_path = book.path.split('/')[0]
|
||||
if book_author_path in valid_filename_authors or local_book:
|
||||
new_author = calibre_db.session.query(db.Authors).filter(db.Authors.name == r).first()
|
||||
all_new_authordir = get_valid_filename(new_author.name, chars=96)
|
||||
all_titledir = book.path.split('/')[1]
|
||||
all_new_path = os.path.join(calibre_path, all_new_authordir, all_titledir)
|
||||
all_new_name = get_valid_filename(book.title, chars=42) + ' - ' \
|
||||
+ get_valid_filename(new_author.name, chars=42)
|
||||
# change location in database to new author/title path
|
||||
book.path = os.path.join(all_new_authordir, all_titledir).replace('\\', '/')
|
||||
for file_format in book.data:
|
||||
if not gdrive:
|
||||
shutil.move(os.path.normcase(os.path.join(all_new_path,
|
||||
file_format.name + '.' + file_format.format.lower())),
|
||||
os.path.normcase(os.path.join(all_new_path,
|
||||
all_new_name + '.' + file_format.format.lower())))
|
||||
else:
|
||||
gFile = gd.getFileFromEbooksFolder(all_new_path,
|
||||
file_format.name + '.' + file_format.format.lower())
|
||||
if gFile:
|
||||
gd.moveGdriveFileRemote(gFile, all_new_name + u'.' + file_format.format.lower())
|
||||
gd.updateDatabaseOnEdit(gFile['id'], all_new_name + u'.' + file_format.format.lower())
|
||||
else:
|
||||
log.error("File {} not found on gdrive"
|
||||
.format(all_new_path, file_format.name + '.' + file_format.format.lower()))
|
||||
file_format.name = all_new_name
|
||||
|
||||
|
||||
def rename_all_authors(first_author, renamed_author, calibre_path="", localbook=None, gdrive=False):
|
||||
# Create new_author_dir from parameter or from database
|
||||
# Create new title_dir from database and add id
|
||||
if first_author:
|
||||
new_authordir = get_valid_filename(first_author, chars=96)
|
||||
for r in renamed_author:
|
||||
new_author = calibre_db.session.query(db.Authors).filter(db.Authors.name == r).first()
|
||||
old_author_dir = get_valid_filename(r, chars=96)
|
||||
new_author_rename_dir = get_valid_filename(new_author.name, chars=96)
|
||||
if gdrive:
|
||||
gFile = gd.getFileFromEbooksFolder(None, old_author_dir)
|
||||
if gFile:
|
||||
gd.moveGdriveFolderRemote(gFile, new_author_rename_dir)
|
||||
else:
|
||||
if os.path.isdir(os.path.join(calibre_path, old_author_dir)):
|
||||
try:
|
||||
old_author_path = os.path.join(calibre_path, old_author_dir)
|
||||
new_author_path = os.path.join(calibre_path, new_author_rename_dir)
|
||||
shutil.move(os.path.normcase(old_author_path), os.path.normcase(new_author_path))
|
||||
except (OSError) as ex:
|
||||
log.error("Rename author from: %s to %s: %s", old_author_path, new_author_path, ex)
|
||||
log.debug(ex, exc_info=True)
|
||||
return _("Rename author from: '%(src)s' to '%(dest)s' failed with error: %(error)s",
|
||||
src=old_author_path, dest=new_author_path, error=str(ex))
|
||||
else:
|
||||
new_authordir = get_valid_filename(localbook.authors[0].name, chars=96)
|
||||
return new_authordir
|
||||
|
||||
# Moves files in file storage during author/title rename, or from temp dir to file storage
|
||||
def update_dir_structure_file(book_id, calibrepath, first_author, orignal_filepath, db_filename):
|
||||
def update_dir_structure_file(book_id, calibre_path, first_author, original_filepath, db_filename, renamed_author):
|
||||
# get book database entry from id, if original path overwrite source with original_filepath
|
||||
localbook = calibre_db.get_book(book_id)
|
||||
if orignal_filepath:
|
||||
path = orignal_filepath
|
||||
if original_filepath:
|
||||
path = original_filepath
|
||||
else:
|
||||
path = os.path.join(calibrepath, localbook.path)
|
||||
path = os.path.join(calibre_path, localbook.path)
|
||||
|
||||
# Create (current) authordir and titledir from database
|
||||
authordir = localbook.path.split('/')[0]
|
||||
|
@ -346,106 +459,130 @@ def update_dir_structure_file(book_id, calibrepath, first_author, orignal_filepa
|
|||
|
||||
# Create new_authordir from parameter or from database
|
||||
# Create new titledir from database and add id
|
||||
new_authordir = rename_all_authors(first_author, renamed_author, calibre_path, localbook)
|
||||
if first_author:
|
||||
new_authordir = get_valid_filename(first_author)
|
||||
else:
|
||||
new_authordir = get_valid_filename(localbook.authors[0].name)
|
||||
new_titledir = get_valid_filename(localbook.title) + " (" + str(book_id) + ")"
|
||||
if first_author.lower() in [r.lower() for r in renamed_author]:
|
||||
if os.path.isdir(os.path.join(calibre_path, new_authordir)):
|
||||
path = os.path.join(calibre_path, new_authordir, titledir)
|
||||
|
||||
if titledir != new_titledir or authordir != new_authordir or orignal_filepath:
|
||||
new_path = os.path.join(calibrepath, new_authordir, new_titledir)
|
||||
new_name = get_valid_filename(localbook.title) + ' - ' + get_valid_filename(new_authordir)
|
||||
try:
|
||||
if orignal_filepath:
|
||||
if not os.path.isdir(new_path):
|
||||
os.makedirs(new_path)
|
||||
shutil.move(os.path.normcase(path), os.path.normcase(os.path.join(new_path, db_filename)))
|
||||
log.debug("Moving title: %s to %s/%s", path, new_path, new_name)
|
||||
# Check new path is not valid path
|
||||
else:
|
||||
if not os.path.exists(new_path):
|
||||
# move original path to new path
|
||||
log.debug("Moving title: %s to %s", path, new_path)
|
||||
shutil.move(os.path.normcase(path), os.path.normcase(new_path))
|
||||
else: # path is valid copy only files to new location (merge)
|
||||
log.info("Moving title: %s into existing: %s", path, new_path)
|
||||
# Take all files and subfolder from old path (strange command)
|
||||
for dir_name, __, file_list in os.walk(path):
|
||||
for file in file_list:
|
||||
shutil.move(os.path.normcase(os.path.join(dir_name, file)),
|
||||
os.path.normcase(os.path.join(new_path + dir_name[len(path):], file)))
|
||||
# os.unlink(os.path.normcase(os.path.join(dir_name, file)))
|
||||
# change location in database to new author/title path
|
||||
localbook.path = os.path.join(new_authordir, new_titledir).replace('\\','/')
|
||||
except (OSError) as ex:
|
||||
log.error("Rename title from: %s to %s: %s", path, new_path, ex)
|
||||
log.debug(ex, exc_info=True)
|
||||
return _("Rename title from: '%(src)s' to '%(dest)s' failed with error: %(error)s",
|
||||
src=path, dest=new_path, error=str(ex))
|
||||
new_titledir = get_valid_filename(localbook.title, chars=96) + " (" + str(book_id) + ")"
|
||||
|
||||
# Rename all files from old names to new names
|
||||
try:
|
||||
for file_format in localbook.data:
|
||||
shutil.move(os.path.normcase(
|
||||
os.path.join(new_path, file_format.name + '.' + file_format.format.lower())),
|
||||
os.path.normcase(os.path.join(new_path, new_name + '.' + file_format.format.lower())))
|
||||
file_format.name = new_name
|
||||
if not orignal_filepath and len(os.listdir(os.path.dirname(path))) == 0:
|
||||
shutil.rmtree(os.path.dirname(path))
|
||||
except (OSError) as ex:
|
||||
log.error("Rename file in path %s to %s: %s", new_path, new_name, ex)
|
||||
log.debug(ex, exc_info=True)
|
||||
return _("Rename file in path '%(src)s' to '%(dest)s' failed with error: %(error)s",
|
||||
src=new_path, dest=new_name, error=str(ex))
|
||||
return False
|
||||
if titledir != new_titledir or authordir != new_authordir or original_filepath:
|
||||
error = move_files_on_change(calibre_path,
|
||||
new_authordir,
|
||||
new_titledir,
|
||||
localbook,
|
||||
db_filename,
|
||||
original_filepath,
|
||||
path)
|
||||
if error:
|
||||
return error
|
||||
|
||||
def update_dir_structure_gdrive(book_id, first_author):
|
||||
# Rename all files from old names to new names
|
||||
return rename_files_on_change(first_author, renamed_author, localbook, original_filepath, path, calibre_path)
|
||||
|
||||
|
||||
def upload_new_file_gdrive(book_id, first_author, renamed_author, title, title_dir, original_filepath, filename_ext):
|
||||
error = False
|
||||
book = calibre_db.get_book(book_id)
|
||||
file_name = get_valid_filename(title, chars=42) + ' - ' + \
|
||||
get_valid_filename(first_author, chars=42) + \
|
||||
filename_ext
|
||||
rename_all_authors(first_author, renamed_author, gdrive=True)
|
||||
gdrive_path = os.path.join(get_valid_filename(first_author, chars=96),
|
||||
title_dir + " (" + str(book_id) + ")")
|
||||
book.path = gdrive_path.replace("\\", "/")
|
||||
gd.uploadFileToEbooksFolder(os.path.join(gdrive_path, file_name).replace("\\", "/"), original_filepath)
|
||||
error |= rename_files_on_change(first_author, renamed_author, localbook=book, gdrive=True)
|
||||
return error
|
||||
|
||||
|
||||
def update_dir_structure_gdrive(book_id, first_author, renamed_author):
|
||||
error = False
|
||||
book = calibre_db.get_book(book_id)
|
||||
path = book.path
|
||||
|
||||
authordir = book.path.split('/')[0]
|
||||
if first_author:
|
||||
new_authordir = get_valid_filename(first_author)
|
||||
else:
|
||||
new_authordir = get_valid_filename(book.authors[0].name)
|
||||
titledir = book.path.split('/')[1]
|
||||
new_titledir = get_valid_filename(book.title) + u" (" + str(book_id) + u")"
|
||||
new_authordir = rename_all_authors(first_author, renamed_author, gdrive=True)
|
||||
new_titledir = get_valid_filename(book.title, chars=96) + u" (" + str(book_id) + u")"
|
||||
|
||||
if titledir != new_titledir:
|
||||
gFile = gd.getFileFromEbooksFolder(os.path.dirname(book.path), titledir)
|
||||
if gFile:
|
||||
gFile['title'] = new_titledir
|
||||
gFile.Upload()
|
||||
gd.moveGdriveFileRemote(gFile, new_titledir)
|
||||
book.path = book.path.split('/')[0] + u'/' + new_titledir
|
||||
path = book.path
|
||||
gd.updateDatabaseOnEdit(gFile['id'], book.path) # only child folder affected
|
||||
else:
|
||||
error = _(u'File %(file)s not found on Google Drive', file=book.path) # file not found
|
||||
|
||||
if authordir != new_authordir:
|
||||
if authordir != new_authordir and authordir not in renamed_author:
|
||||
gFile = gd.getFileFromEbooksFolder(os.path.dirname(book.path), new_titledir)
|
||||
if gFile:
|
||||
gd.moveGdriveFolderRemote(gFile, new_authordir)
|
||||
book.path = new_authordir + u'/' + book.path.split('/')[1]
|
||||
path = book.path
|
||||
gd.updateDatabaseOnEdit(gFile['id'], book.path)
|
||||
else:
|
||||
error = _(u'File %(file)s not found on Google Drive', file=authordir) # file not found
|
||||
# Rename all files from old names to new names
|
||||
|
||||
if authordir != new_authordir or titledir != new_titledir:
|
||||
new_name = get_valid_filename(book.title) + u' - ' + get_valid_filename(new_authordir)
|
||||
for file_format in book.data:
|
||||
gFile = gd.getFileFromEbooksFolder(path, file_format.name + u'.' + file_format.format.lower())
|
||||
if not gFile:
|
||||
error = _(u'File %(file)s not found on Google Drive', file=file_format.name) # file not found
|
||||
break
|
||||
gd.moveGdriveFileRemote(gFile, new_name + u'.' + file_format.format.lower())
|
||||
file_format.name = new_name
|
||||
# change location in database to new author/title path
|
||||
book.path = os.path.join(new_authordir, new_titledir).replace('\\', '/')
|
||||
error |= rename_files_on_change(first_author, renamed_author, book, gdrive=True)
|
||||
return error
|
||||
|
||||
|
||||
def move_files_on_change(calibre_path, new_authordir, new_titledir, localbook, db_filename, original_filepath, path):
|
||||
new_path = os.path.join(calibre_path, new_authordir, new_titledir)
|
||||
new_name = get_valid_filename(localbook.title, chars=96) + ' - ' + new_authordir
|
||||
try:
|
||||
if original_filepath:
|
||||
if not os.path.isdir(new_path):
|
||||
os.makedirs(new_path)
|
||||
shutil.move(os.path.normcase(original_filepath), os.path.normcase(os.path.join(new_path, db_filename)))
|
||||
log.debug("Moving title: %s to %s/%s", original_filepath, new_path, new_name)
|
||||
else:
|
||||
# Check new path is not valid path
|
||||
if not os.path.exists(new_path):
|
||||
# move original path to new path
|
||||
log.debug("Moving title: %s to %s", path, new_path)
|
||||
shutil.move(os.path.normcase(path), os.path.normcase(new_path))
|
||||
else: # path is valid copy only files to new location (merge)
|
||||
log.info("Moving title: %s into existing: %s", path, new_path)
|
||||
# Take all files and subfolder from old path (strange command)
|
||||
for dir_name, __, file_list in os.walk(path):
|
||||
for file in file_list:
|
||||
shutil.move(os.path.normcase(os.path.join(dir_name, file)),
|
||||
os.path.normcase(os.path.join(new_path + dir_name[len(path):], file)))
|
||||
# change location in database to new author/title path
|
||||
localbook.path = os.path.join(new_authordir, new_titledir).replace('\\','/')
|
||||
except OSError as ex:
|
||||
log.error("Rename title from: %s to %s: %s", path, new_path, ex)
|
||||
log.debug(ex, exc_info=True)
|
||||
return _("Rename title from: '%(src)s' to '%(dest)s' failed with error: %(error)s",
|
||||
src=path, dest=new_path, error=str(ex))
|
||||
return False
|
||||
|
||||
|
||||
def rename_files_on_change(first_author,
|
||||
renamed_author,
|
||||
localbook,
|
||||
orignal_filepath="",
|
||||
path="",
|
||||
calibre_path="",
|
||||
gdrive=False):
|
||||
# Rename all files from old names to new names
|
||||
try:
|
||||
clean_author_database(renamed_author, calibre_path, gdrive=gdrive)
|
||||
if first_author and first_author not in renamed_author:
|
||||
clean_author_database([first_author], calibre_path, localbook, gdrive)
|
||||
if not gdrive and not renamed_author and not orignal_filepath and len(os.listdir(os.path.dirname(path))) == 0:
|
||||
shutil.rmtree(os.path.dirname(path))
|
||||
except (OSError, FileNotFoundError) as ex:
|
||||
log.error("Error in rename file in path %s", ex)
|
||||
log.debug(ex, exc_info=True)
|
||||
return _("Error in rename file in path: %(error)s", error=str(ex))
|
||||
return False
|
||||
|
||||
|
||||
def delete_book_gdrive(book, book_format):
|
||||
error = None
|
||||
if book_format:
|
||||
|
@ -524,11 +661,21 @@ def valid_email(email):
|
|||
# ################################# External interface #################################
|
||||
|
||||
|
||||
def update_dir_stucture(book_id, calibrepath, first_author=None, orignal_filepath=None, db_filename=None):
|
||||
def update_dir_structure(book_id,
|
||||
calibre_path,
|
||||
first_author=None, # change author of book to this author
|
||||
original_filepath=None,
|
||||
db_filename=None,
|
||||
renamed_author=None):
|
||||
renamed_author = renamed_author or []
|
||||
if config.config_use_google_drive:
|
||||
return update_dir_structure_gdrive(book_id, first_author)
|
||||
return update_dir_structure_gdrive(book_id, first_author, renamed_author)
|
||||
else:
|
||||
return update_dir_structure_file(book_id, calibrepath, first_author, orignal_filepath, db_filename)
|
||||
return update_dir_structure_file(book_id,
|
||||
calibre_path,
|
||||
first_author,
|
||||
original_filepath,
|
||||
db_filename, renamed_author)
|
||||
|
||||
|
||||
def delete_book(book, calibrepath, book_format):
|
||||
|
|
21
cps/kobo.py
21
cps/kobo.py
|
@ -129,7 +129,7 @@ def convert_to_kobo_timestamp_string(timestamp):
|
|||
return timestamp.strftime("%Y-%m-%dT%H:%M:%SZ")
|
||||
except AttributeError as exc:
|
||||
log.debug("Timestamp not valid: {}".format(exc))
|
||||
return datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ")
|
||||
return datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
|
||||
|
||||
|
||||
@kobo.route("/v1/library/sync")
|
||||
|
@ -395,7 +395,7 @@ def create_book_entitlement(book, archived):
|
|||
book_uuid = str(book.uuid)
|
||||
return {
|
||||
"Accessibility": "Full",
|
||||
"ActivePeriod": {"From": convert_to_kobo_timestamp_string(datetime.datetime.now())},
|
||||
"ActivePeriod": {"From": convert_to_kobo_timestamp_string(datetime.datetime.utcnow())},
|
||||
"Created": convert_to_kobo_timestamp_string(book.timestamp),
|
||||
"CrossRevisionId": book_uuid,
|
||||
"Id": book_uuid,
|
||||
|
@ -943,26 +943,15 @@ def TopLevelEndpoint():
|
|||
@kobo.route("/v1/library/<book_uuid>", methods=["DELETE"])
|
||||
@requires_kobo_auth
|
||||
def HandleBookDeletionRequest(book_uuid):
|
||||
log.info("Kobo book deletion request received for book %s" % book_uuid)
|
||||
log.info("Kobo book delete request received for book %s" % book_uuid)
|
||||
book = calibre_db.get_book_by_uuid(book_uuid)
|
||||
if not book:
|
||||
log.info(u"Book %s not found in database", book_uuid)
|
||||
return redirect_or_proxy_request()
|
||||
|
||||
book_id = book.id
|
||||
archived_book = (
|
||||
ub.session.query(ub.ArchivedBook)
|
||||
.filter(ub.ArchivedBook.book_id == book_id)
|
||||
.first()
|
||||
)
|
||||
if not archived_book:
|
||||
archived_book = ub.ArchivedBook(user_id=current_user.id, book_id=book_id)
|
||||
archived_book.is_archived = True
|
||||
archived_book.last_modified = datetime.datetime.utcnow()
|
||||
|
||||
ub.session.merge(archived_book)
|
||||
ub.session_commit()
|
||||
if archived_book.is_archived:
|
||||
is_archived = kobo_sync_status.change_archived_books(book_id, True)
|
||||
if is_archived:
|
||||
kobo_sync_status.remove_synced_book(book_id)
|
||||
return "", 204
|
||||
|
||||
|
|
|
@ -58,7 +58,7 @@ def change_archived_books(book_id, state=None, message=None):
|
|||
archived_book = ub.ArchivedBook(user_id=current_user.id, book_id=book_id)
|
||||
|
||||
archived_book.is_archived = state if state else not archived_book.is_archived
|
||||
archived_book.last_modified = datetime.datetime.utcnow()
|
||||
archived_book.last_modified = datetime.datetime.utcnow() # toDo. Check utc timestamp
|
||||
|
||||
ub.session.merge(archived_book)
|
||||
ub.session_commit(message)
|
||||
|
|
122
cps/metadata_provider/amazon.py
Normal file
122
cps/metadata_provider/amazon.py
Normal file
|
@ -0,0 +1,122 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# This file is part of the Calibre-Web (https://github.com/janeczku/calibre-web)
|
||||
# Copyright (C) 2022 quarz12
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import concurrent.futures
|
||||
import requests
|
||||
from bs4 import BeautifulSoup as BS # requirement
|
||||
|
||||
try:
|
||||
import cchardet #optional for better speed
|
||||
except ImportError:
|
||||
pass
|
||||
from cps.services.Metadata import MetaRecord, MetaSourceInfo, Metadata
|
||||
#from time import time
|
||||
from operator import itemgetter
|
||||
|
||||
class Amazon(Metadata):
|
||||
__name__ = "Amazon"
|
||||
__id__ = "amazon"
|
||||
headers = {'upgrade-insecure-requests': '1',
|
||||
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36',
|
||||
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
|
||||
'sec-gpc': '1',
|
||||
'sec-fetch-site': 'none',
|
||||
'sec-fetch-mode': 'navigate',
|
||||
'sec-fetch-user': '?1',
|
||||
'sec-fetch-dest': 'document',
|
||||
'accept-encoding': 'gzip, deflate, br',
|
||||
'accept-language': 'en-US,en;q=0.9'}
|
||||
session = requests.Session()
|
||||
session.headers=headers
|
||||
|
||||
def search(
|
||||
self, query: str, generic_cover: str = "", locale: str = "en"
|
||||
):
|
||||
#timer=time()
|
||||
def inner(link,index)->[dict,int]:
|
||||
with self.session as session:
|
||||
r = session.get(f"https://www.amazon.com/{link}")
|
||||
r.raise_for_status()
|
||||
long_soup = BS(r.text, "lxml") #~4sec :/
|
||||
soup2 = long_soup.find("div", attrs={"cel_widget_id": "dpx-books-ppd_csm_instrumentation_wrapper"})
|
||||
if soup2 is None:
|
||||
return
|
||||
try:
|
||||
match = MetaRecord(
|
||||
title = "",
|
||||
authors = "",
|
||||
source=MetaSourceInfo(
|
||||
id=self.__id__,
|
||||
description="Amazon Books",
|
||||
link="https://amazon.com/"
|
||||
),
|
||||
url = f"https://www.amazon.com/{link}",
|
||||
#the more searches the slower, these are too hard to find in reasonable time or might not even exist
|
||||
publisher= "", # very unreliable
|
||||
publishedDate= "", # very unreliable
|
||||
id = None, # ?
|
||||
tags = [] # dont exist on amazon
|
||||
)
|
||||
|
||||
try:
|
||||
match.description = "\n".join(
|
||||
soup2.find("div", attrs={"data-feature-name": "bookDescription"}).stripped_strings)\
|
||||
.replace("\xa0"," ")[:-9].strip().strip("\n")
|
||||
except (AttributeError, TypeError):
|
||||
return None # if there is no description it is not a book and therefore should be ignored
|
||||
try:
|
||||
match.title = soup2.find("span", attrs={"id": "productTitle"}).text
|
||||
except (AttributeError, TypeError):
|
||||
match.title = ""
|
||||
try:
|
||||
match.authors = [next(
|
||||
filter(lambda i: i != " " and i != "\n" and not i.startswith("{"),
|
||||
x.findAll(text=True))).strip()
|
||||
for x in soup2.findAll("span", attrs={"class": "author"})]
|
||||
except (AttributeError, TypeError, StopIteration):
|
||||
match.authors = ""
|
||||
try:
|
||||
match.rating = int(
|
||||
soup2.find("span", class_="a-icon-alt").text.split(" ")[0].split(".")[
|
||||
0]) # first number in string
|
||||
except (AttributeError, ValueError):
|
||||
match.rating = 0
|
||||
try:
|
||||
match.cover = soup2.find("img", attrs={"class": "a-dynamic-image frontImage"})["src"]
|
||||
except (AttributeError, TypeError):
|
||||
match.cover = ""
|
||||
return match, index
|
||||
except Exception as e:
|
||||
print(e)
|
||||
return
|
||||
|
||||
val = list()
|
||||
if self.active:
|
||||
results = self.session.get(
|
||||
f"https://www.amazon.com/s?k={query.replace(' ', '+')}&i=digital-text&sprefix={query.replace(' ', '+')}"
|
||||
f"%2Cdigital-text&ref=nb_sb_noss",
|
||||
headers=self.headers)
|
||||
results.raise_for_status()
|
||||
soup = BS(results.text, 'html.parser')
|
||||
links_list = [next(filter(lambda i: "digital-text" in i["href"], x.findAll("a")))["href"] for x in
|
||||
soup.findAll("div", attrs={"data-component-type": "s-search-result"})]
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
|
||||
fut = {executor.submit(inner, link, index) for index, link in enumerate(links_list[:5])}
|
||||
val=list(map(lambda x : x.result() ,concurrent.futures.as_completed(fut)))
|
||||
result=list(filter(lambda x: x, val))
|
||||
return [x[0] for x in sorted(result, key=itemgetter(1))] #sort by amazons listing order for best relevance
|
|
@ -17,49 +17,68 @@
|
|||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# ComicVine api document: https://comicvine.gamespot.com/api/documentation
|
||||
from typing import Dict, List, Optional
|
||||
from urllib.parse import quote
|
||||
|
||||
import requests
|
||||
from cps.services.Metadata import Metadata
|
||||
from cps.services.Metadata import MetaRecord, MetaSourceInfo, Metadata
|
||||
|
||||
|
||||
class ComicVine(Metadata):
|
||||
__name__ = "ComicVine"
|
||||
__id__ = "comicvine"
|
||||
DESCRIPTION = "ComicVine Books"
|
||||
META_URL = "https://comicvine.gamespot.com/"
|
||||
API_KEY = "57558043c53943d5d1e96a9ad425b0eb85532ee6"
|
||||
BASE_URL = (
|
||||
f"https://comicvine.gamespot.com/api/search?api_key={API_KEY}"
|
||||
f"&resources=issue&query="
|
||||
)
|
||||
QUERY_PARAMS = "&sort=name:desc&format=json"
|
||||
HEADERS = {"User-Agent": "Not Evil Browser"}
|
||||
|
||||
def search(self, query, generic_cover=""):
|
||||
def search(
|
||||
self, query: str, generic_cover: str = "", locale: str = "en"
|
||||
) -> Optional[List[MetaRecord]]:
|
||||
val = list()
|
||||
apikey = "57558043c53943d5d1e96a9ad425b0eb85532ee6"
|
||||
if self.active:
|
||||
headers = {
|
||||
'User-Agent': 'Not Evil Browser'
|
||||
}
|
||||
|
||||
result = requests.get("https://comicvine.gamespot.com/api/search?api_key="
|
||||
+ apikey + "&resources=issue&query=" + query + "&sort=name:desc&format=json", headers=headers)
|
||||
for r in result.json().get('results'):
|
||||
seriesTitle = r['volume'].get('name', "")
|
||||
if r.get('store_date'):
|
||||
dateFomers = r.get('store_date')
|
||||
else:
|
||||
dateFomers = r.get('date_added')
|
||||
v = dict()
|
||||
v['id'] = r['id']
|
||||
v['title'] = seriesTitle + " #" + r.get('issue_number', "0") + " - " + ( r.get('name', "") or "")
|
||||
v['authors'] = r.get('authors', [])
|
||||
v['description'] = r.get('description', "")
|
||||
v['publisher'] = ""
|
||||
v['publishedDate'] = dateFomers
|
||||
v['tags'] = ["Comics", seriesTitle]
|
||||
v['rating'] = 0
|
||||
v['series'] = seriesTitle
|
||||
v['cover'] = r['image'].get('original_url')
|
||||
v['source'] = {
|
||||
"id": self.__id__,
|
||||
"description": "ComicVine Books",
|
||||
"link": "https://comicvine.gamespot.com/"
|
||||
}
|
||||
v['url'] = r.get('site_detail_url', "")
|
||||
val.append(v)
|
||||
title_tokens = list(self.get_title_tokens(query, strip_joiners=False))
|
||||
if title_tokens:
|
||||
tokens = [quote(t.encode("utf-8")) for t in title_tokens]
|
||||
query = "%20".join(tokens)
|
||||
result = requests.get(
|
||||
f"{ComicVine.BASE_URL}{query}{ComicVine.QUERY_PARAMS}",
|
||||
headers=ComicVine.HEADERS,
|
||||
)
|
||||
for result in result.json()["results"]:
|
||||
match = self._parse_search_result(
|
||||
result=result, generic_cover=generic_cover, locale=locale
|
||||
)
|
||||
val.append(match)
|
||||
return val
|
||||
|
||||
|
||||
def _parse_search_result(
|
||||
self, result: Dict, generic_cover: str, locale: str
|
||||
) -> MetaRecord:
|
||||
series = result["volume"].get("name", "")
|
||||
series_index = result.get("issue_number", 0)
|
||||
issue_name = result.get("name", "")
|
||||
match = MetaRecord(
|
||||
id=result["id"],
|
||||
title=f"{series}#{series_index} - {issue_name}",
|
||||
authors=result.get("authors", []),
|
||||
url=result.get("site_detail_url", ""),
|
||||
source=MetaSourceInfo(
|
||||
id=self.__id__,
|
||||
description=ComicVine.DESCRIPTION,
|
||||
link=ComicVine.META_URL,
|
||||
),
|
||||
series=series,
|
||||
)
|
||||
match.cover = result["image"].get("original_url", generic_cover)
|
||||
match.description = result.get("description", "")
|
||||
match.publishedDate = result.get("store_date", result.get("date_added"))
|
||||
match.series_index = series_index
|
||||
match.tags = ["Comics", series]
|
||||
match.identifiers = {"comicvine": match.id}
|
||||
return match
|
||||
|
|
|
@ -17,39 +17,93 @@
|
|||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Google Books api document: https://developers.google.com/books/docs/v1/using
|
||||
|
||||
from typing import Dict, List, Optional
|
||||
from urllib.parse import quote
|
||||
|
||||
import requests
|
||||
from cps.services.Metadata import Metadata
|
||||
|
||||
from cps.isoLanguages import get_lang3, get_language_name
|
||||
from cps.services.Metadata import MetaRecord, MetaSourceInfo, Metadata
|
||||
|
||||
|
||||
class Google(Metadata):
|
||||
__name__ = "Google"
|
||||
__id__ = "google"
|
||||
DESCRIPTION = "Google Books"
|
||||
META_URL = "https://books.google.com/"
|
||||
BOOK_URL = "https://books.google.com/books?id="
|
||||
SEARCH_URL = "https://www.googleapis.com/books/v1/volumes?q="
|
||||
ISBN_TYPE = "ISBN_13"
|
||||
|
||||
def search(self, query, generic_cover=""):
|
||||
def search(
|
||||
self, query: str, generic_cover: str = "", locale: str = "en"
|
||||
) -> Optional[List[MetaRecord]]:
|
||||
val = list()
|
||||
if self.active:
|
||||
val = list()
|
||||
result = requests.get("https://www.googleapis.com/books/v1/volumes?q="+query.replace(" ","+"))
|
||||
for r in result.json().get('items'):
|
||||
v = dict()
|
||||
v['id'] = r['id']
|
||||
v['title'] = r['volumeInfo'].get('title',"")
|
||||
v['authors'] = r['volumeInfo'].get('authors', [])
|
||||
v['description'] = r['volumeInfo'].get('description', "")
|
||||
v['publisher'] = r['volumeInfo'].get('publisher', "")
|
||||
v['publishedDate'] = r['volumeInfo'].get('publishedDate', "")
|
||||
v['tags'] = r['volumeInfo'].get('categories', [])
|
||||
v['rating'] = r['volumeInfo'].get('averageRating', 0)
|
||||
if r['volumeInfo'].get('imageLinks'):
|
||||
v['cover'] = r['volumeInfo']['imageLinks']['thumbnail'].replace("http://", "https://")
|
||||
else:
|
||||
v['cover'] = "/../../../static/generic_cover.jpg"
|
||||
v['source'] = {
|
||||
"id": self.__id__,
|
||||
"description": "Google Books",
|
||||
"link": "https://books.google.com/"}
|
||||
v['url'] = "https://books.google.com/books?id=" + r['id']
|
||||
val.append(v)
|
||||
return val
|
||||
|
||||
title_tokens = list(self.get_title_tokens(query, strip_joiners=False))
|
||||
if title_tokens:
|
||||
tokens = [quote(t.encode("utf-8")) for t in title_tokens]
|
||||
query = "+".join(tokens)
|
||||
results = requests.get(Google.SEARCH_URL + query)
|
||||
for result in results.json()["items"]:
|
||||
val.append(
|
||||
self._parse_search_result(
|
||||
result=result, generic_cover=generic_cover, locale=locale
|
||||
)
|
||||
)
|
||||
return val
|
||||
|
||||
def _parse_search_result(
|
||||
self, result: Dict, generic_cover: str, locale: str
|
||||
) -> MetaRecord:
|
||||
match = MetaRecord(
|
||||
id=result["id"],
|
||||
title=result["volumeInfo"]["title"],
|
||||
authors=result["volumeInfo"].get("authors", []),
|
||||
url=Google.BOOK_URL + result["id"],
|
||||
source=MetaSourceInfo(
|
||||
id=self.__id__,
|
||||
description=Google.DESCRIPTION,
|
||||
link=Google.META_URL,
|
||||
),
|
||||
)
|
||||
|
||||
match.cover = self._parse_cover(result=result, generic_cover=generic_cover)
|
||||
match.description = result["volumeInfo"].get("description", "")
|
||||
match.languages = self._parse_languages(result=result, locale=locale)
|
||||
match.publisher = result["volumeInfo"].get("publisher", "")
|
||||
match.publishedDate = result["volumeInfo"].get("publishedDate", "")
|
||||
match.rating = result["volumeInfo"].get("averageRating", 0)
|
||||
match.series, match.series_index = "", 1
|
||||
match.tags = result["volumeInfo"].get("categories", [])
|
||||
|
||||
match.identifiers = {"google": match.id}
|
||||
match = self._parse_isbn(result=result, match=match)
|
||||
return match
|
||||
|
||||
@staticmethod
|
||||
def _parse_isbn(result: Dict, match: MetaRecord) -> MetaRecord:
|
||||
identifiers = result["volumeInfo"].get("industryIdentifiers", [])
|
||||
for identifier in identifiers:
|
||||
if identifier.get("type") == Google.ISBN_TYPE:
|
||||
match.identifiers["isbn"] = identifier.get("identifier")
|
||||
break
|
||||
return match
|
||||
|
||||
@staticmethod
|
||||
def _parse_cover(result: Dict, generic_cover: str) -> str:
|
||||
if result["volumeInfo"].get("imageLinks"):
|
||||
cover_url = result["volumeInfo"]["imageLinks"]["thumbnail"]
|
||||
return cover_url.replace("http://", "https://")
|
||||
return generic_cover
|
||||
|
||||
@staticmethod
|
||||
def _parse_languages(result: Dict, locale: str) -> List[str]:
|
||||
language_iso2 = result["volumeInfo"].get("language", "")
|
||||
languages = (
|
||||
[get_language_name(locale, get_lang3(language_iso2))]
|
||||
if language_iso2
|
||||
else []
|
||||
)
|
||||
return languages
|
||||
|
|
337
cps/metadata_provider/lubimyczytac.py
Normal file
337
cps/metadata_provider/lubimyczytac.py
Normal file
|
@ -0,0 +1,337 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# This file is part of the Calibre-Web (https://github.com/janeczku/calibre-web)
|
||||
# Copyright (C) 2021 OzzieIsaacs
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
import datetime
|
||||
import json
|
||||
import re
|
||||
from multiprocessing.pool import ThreadPool
|
||||
from typing import List, Optional, Tuple, Union
|
||||
from urllib.parse import quote
|
||||
|
||||
import requests
|
||||
from dateutil import parser
|
||||
from html2text import HTML2Text
|
||||
from lxml.html import HtmlElement, fromstring, tostring
|
||||
from markdown2 import Markdown
|
||||
|
||||
from cps.isoLanguages import get_language_name
|
||||
from cps.services.Metadata import MetaRecord, MetaSourceInfo, Metadata
|
||||
|
||||
SYMBOLS_TO_TRANSLATE = (
|
||||
"öÖüÜóÓőŐúÚéÉáÁűŰíÍąĄćĆęĘłŁńŃóÓśŚźŹżŻ",
|
||||
"oOuUoOoOuUeEaAuUiIaAcCeElLnNoOsSzZzZ",
|
||||
)
|
||||
SYMBOL_TRANSLATION_MAP = dict(
|
||||
[(ord(a), ord(b)) for (a, b) in zip(*SYMBOLS_TO_TRANSLATE)]
|
||||
)
|
||||
|
||||
|
||||
def get_int_or_float(value: str) -> Union[int, float]:
|
||||
number_as_float = float(value)
|
||||
number_as_int = int(number_as_float)
|
||||
return number_as_int if number_as_float == number_as_int else number_as_float
|
||||
|
||||
|
||||
def strip_accents(s: Optional[str]) -> Optional[str]:
|
||||
return s.translate(SYMBOL_TRANSLATION_MAP) if s is not None else s
|
||||
|
||||
|
||||
def sanitize_comments_html(html: str) -> str:
|
||||
text = html2text(html)
|
||||
md = Markdown()
|
||||
html = md.convert(text)
|
||||
return html
|
||||
|
||||
|
||||
def html2text(html: str) -> str:
|
||||
# replace <u> tags with <span> as <u> becomes emphasis in html2text
|
||||
if isinstance(html, bytes):
|
||||
html = html.decode("utf-8")
|
||||
html = re.sub(
|
||||
r"<\s*(?P<solidus>/?)\s*[uU]\b(?P<rest>[^>]*)>",
|
||||
r"<\g<solidus>span\g<rest>>",
|
||||
html,
|
||||
)
|
||||
h2t = HTML2Text()
|
||||
h2t.body_width = 0
|
||||
h2t.single_line_break = True
|
||||
h2t.emphasis_mark = "*"
|
||||
return h2t.handle(html)
|
||||
|
||||
|
||||
class LubimyCzytac(Metadata):
|
||||
__name__ = "LubimyCzytac.pl"
|
||||
__id__ = "lubimyczytac"
|
||||
|
||||
BASE_URL = "https://lubimyczytac.pl"
|
||||
|
||||
BOOK_SEARCH_RESULT_XPATH = (
|
||||
"*//div[@class='listSearch']//div[@class='authorAllBooks__single']"
|
||||
)
|
||||
SINGLE_BOOK_RESULT_XPATH = ".//div[contains(@class,'authorAllBooks__singleText')]"
|
||||
TITLE_PATH = "/div/a[contains(@class,'authorAllBooks__singleTextTitle')]"
|
||||
TITLE_TEXT_PATH = f"{TITLE_PATH}//text()"
|
||||
URL_PATH = f"{TITLE_PATH}/@href"
|
||||
AUTHORS_PATH = "/div/a[contains(@href,'autor')]//text()"
|
||||
|
||||
SIBLINGS = "/following-sibling::dd"
|
||||
|
||||
CONTAINER = "//section[@class='container book']"
|
||||
PUBLISHER = f"{CONTAINER}//dt[contains(text(),'Wydawnictwo:')]{SIBLINGS}/a/text()"
|
||||
LANGUAGES = f"{CONTAINER}//dt[contains(text(),'Język:')]{SIBLINGS}/text()"
|
||||
DESCRIPTION = f"{CONTAINER}//div[@class='collapse-content']"
|
||||
SERIES = f"{CONTAINER}//span/a[contains(@href,'/cykl/')]/text()"
|
||||
|
||||
DETAILS = "//div[@id='book-details']"
|
||||
PUBLISH_DATE = "//dt[contains(@title,'Data pierwszego wydania"
|
||||
FIRST_PUBLISH_DATE = f"{DETAILS}{PUBLISH_DATE} oryginalnego')]{SIBLINGS}[1]/text()"
|
||||
FIRST_PUBLISH_DATE_PL = f"{DETAILS}{PUBLISH_DATE} polskiego')]{SIBLINGS}[1]/text()"
|
||||
TAGS = "//nav[@aria-label='breadcrumb']//a[contains(@href,'/ksiazki/k/')]/text()"
|
||||
|
||||
RATING = "//meta[@property='books:rating:value']/@content"
|
||||
COVER = "//meta[@property='og:image']/@content"
|
||||
ISBN = "//meta[@property='books:isbn']/@content"
|
||||
META_TITLE = "//meta[@property='og:description']/@content"
|
||||
|
||||
SUMMARY = "//script[@type='application/ld+json']//text()"
|
||||
|
||||
def search(
|
||||
self, query: str, generic_cover: str = "", locale: str = "en"
|
||||
) -> Optional[List[MetaRecord]]:
|
||||
if self.active:
|
||||
result = requests.get(self._prepare_query(title=query))
|
||||
root = fromstring(result.text)
|
||||
lc_parser = LubimyCzytacParser(root=root, metadata=self)
|
||||
matches = lc_parser.parse_search_results()
|
||||
if matches:
|
||||
with ThreadPool(processes=10) as pool:
|
||||
final_matches = pool.starmap(
|
||||
lc_parser.parse_single_book,
|
||||
[(match, generic_cover, locale) for match in matches],
|
||||
)
|
||||
return final_matches
|
||||
return matches
|
||||
|
||||
def _prepare_query(self, title: str) -> str:
|
||||
query = ""
|
||||
characters_to_remove = "\?()\/"
|
||||
pattern = "[" + characters_to_remove + "]"
|
||||
title = re.sub(pattern, "", title)
|
||||
title = title.replace("_", " ")
|
||||
if '"' in title or ",," in title:
|
||||
title = title.split('"')[0].split(",,")[0]
|
||||
|
||||
if "/" in title:
|
||||
title_tokens = [
|
||||
token for token in title.lower().split(" ") if len(token) > 1
|
||||
]
|
||||
else:
|
||||
title_tokens = list(self.get_title_tokens(title, strip_joiners=False))
|
||||
if title_tokens:
|
||||
tokens = [quote(t.encode("utf-8")) for t in title_tokens]
|
||||
query = query + "%20".join(tokens)
|
||||
if not query:
|
||||
return ""
|
||||
return f"{LubimyCzytac.BASE_URL}/szukaj/ksiazki?phrase={query}"
|
||||
|
||||
|
||||
class LubimyCzytacParser:
|
||||
PAGES_TEMPLATE = "<p id='strony'>Książka ma {0} stron(y).</p>"
|
||||
PUBLISH_DATE_TEMPLATE = "<p id='pierwsze_wydanie'>Data pierwszego wydania: {0}</p>"
|
||||
PUBLISH_DATE_PL_TEMPLATE = (
|
||||
"<p id='pierwsze_wydanie'>Data pierwszego wydania w Polsce: {0}</p>"
|
||||
)
|
||||
|
||||
def __init__(self, root: HtmlElement, metadata: Metadata) -> None:
|
||||
self.root = root
|
||||
self.metadata = metadata
|
||||
|
||||
def parse_search_results(self) -> List[MetaRecord]:
|
||||
matches = []
|
||||
results = self.root.xpath(LubimyCzytac.BOOK_SEARCH_RESULT_XPATH)
|
||||
for result in results:
|
||||
title = self._parse_xpath_node(
|
||||
root=result,
|
||||
xpath=f"{LubimyCzytac.SINGLE_BOOK_RESULT_XPATH}"
|
||||
f"{LubimyCzytac.TITLE_TEXT_PATH}",
|
||||
)
|
||||
|
||||
book_url = self._parse_xpath_node(
|
||||
root=result,
|
||||
xpath=f"{LubimyCzytac.SINGLE_BOOK_RESULT_XPATH}"
|
||||
f"{LubimyCzytac.URL_PATH}",
|
||||
)
|
||||
authors = self._parse_xpath_node(
|
||||
root=result,
|
||||
xpath=f"{LubimyCzytac.SINGLE_BOOK_RESULT_XPATH}"
|
||||
f"{LubimyCzytac.AUTHORS_PATH}",
|
||||
take_first=False,
|
||||
)
|
||||
if not all([title, book_url, authors]):
|
||||
continue
|
||||
matches.append(
|
||||
MetaRecord(
|
||||
id=book_url.replace(f"/ksiazka/", "").split("/")[0],
|
||||
title=title,
|
||||
authors=[strip_accents(author) for author in authors],
|
||||
url=LubimyCzytac.BASE_URL + book_url,
|
||||
source=MetaSourceInfo(
|
||||
id=self.metadata.__id__,
|
||||
description=self.metadata.__name__,
|
||||
link=LubimyCzytac.BASE_URL,
|
||||
),
|
||||
)
|
||||
)
|
||||
return matches
|
||||
|
||||
def parse_single_book(
|
||||
self, match: MetaRecord, generic_cover: str, locale: str
|
||||
) -> MetaRecord:
|
||||
response = requests.get(match.url)
|
||||
self.root = fromstring(response.text)
|
||||
match.cover = self._parse_cover(generic_cover=generic_cover)
|
||||
match.description = self._parse_description()
|
||||
match.languages = self._parse_languages(locale=locale)
|
||||
match.publisher = self._parse_publisher()
|
||||
match.publishedDate = self._parse_from_summary(attribute_name="datePublished")
|
||||
match.rating = self._parse_rating()
|
||||
match.series, match.series_index = self._parse_series()
|
||||
match.tags = self._parse_tags()
|
||||
match.identifiers = {
|
||||
"isbn": self._parse_isbn(),
|
||||
"lubimyczytac": match.id,
|
||||
}
|
||||
return match
|
||||
|
||||
def _parse_xpath_node(
|
||||
self,
|
||||
xpath: str,
|
||||
root: HtmlElement = None,
|
||||
take_first: bool = True,
|
||||
strip_element: bool = True,
|
||||
) -> Optional[Union[str, List[str]]]:
|
||||
root = root if root is not None else self.root
|
||||
node = root.xpath(xpath)
|
||||
if not node:
|
||||
return None
|
||||
return (
|
||||
(node[0].strip() if strip_element else node[0])
|
||||
if take_first
|
||||
else [x.strip() for x in node]
|
||||
)
|
||||
|
||||
def _parse_cover(self, generic_cover) -> Optional[str]:
|
||||
return (
|
||||
self._parse_xpath_node(xpath=LubimyCzytac.COVER, take_first=True)
|
||||
or generic_cover
|
||||
)
|
||||
|
||||
def _parse_publisher(self) -> Optional[str]:
|
||||
return self._parse_xpath_node(xpath=LubimyCzytac.PUBLISHER, take_first=True)
|
||||
|
||||
def _parse_languages(self, locale: str) -> List[str]:
|
||||
languages = list()
|
||||
lang = self._parse_xpath_node(xpath=LubimyCzytac.LANGUAGES, take_first=True)
|
||||
if lang:
|
||||
if "polski" in lang:
|
||||
languages.append("pol")
|
||||
if "angielski" in lang:
|
||||
languages.append("eng")
|
||||
return [get_language_name(locale, language) for language in languages]
|
||||
|
||||
def _parse_series(self) -> Tuple[Optional[str], Optional[Union[float, int]]]:
|
||||
series_index = 0
|
||||
series = self._parse_xpath_node(xpath=LubimyCzytac.SERIES, take_first=True)
|
||||
if series:
|
||||
if "tom " in series:
|
||||
series_name, series_info = series.split(" (tom ", 1)
|
||||
series_info = series_info.replace(" ", "").replace(")", "")
|
||||
# Check if book is not a bundle, i.e. chapter 1-3
|
||||
if "-" in series_info:
|
||||
series_info = series_info.split("-", 1)[0]
|
||||
if series_info.replace(".", "").isdigit() is True:
|
||||
series_index = get_int_or_float(series_info)
|
||||
return series_name, series_index
|
||||
return None, None
|
||||
|
||||
def _parse_tags(self) -> List[str]:
|
||||
tags = self._parse_xpath_node(xpath=LubimyCzytac.TAGS, take_first=False)
|
||||
return [
|
||||
strip_accents(w.replace(", itd.", " itd."))
|
||||
for w in tags
|
||||
if isinstance(w, str)
|
||||
]
|
||||
|
||||
def _parse_from_summary(self, attribute_name: str) -> Optional[str]:
|
||||
value = None
|
||||
summary_text = self._parse_xpath_node(xpath=LubimyCzytac.SUMMARY)
|
||||
if summary_text:
|
||||
data = json.loads(summary_text)
|
||||
value = data.get(attribute_name)
|
||||
return value.strip() if value is not None else value
|
||||
|
||||
def _parse_rating(self) -> Optional[str]:
|
||||
rating = self._parse_xpath_node(xpath=LubimyCzytac.RATING)
|
||||
return round(float(rating.replace(",", ".")) / 2) if rating else rating
|
||||
|
||||
def _parse_date(self, xpath="first_publish") -> Optional[datetime.datetime]:
|
||||
options = {
|
||||
"first_publish": LubimyCzytac.FIRST_PUBLISH_DATE,
|
||||
"first_publish_pl": LubimyCzytac.FIRST_PUBLISH_DATE_PL,
|
||||
}
|
||||
date = self._parse_xpath_node(xpath=options.get(xpath))
|
||||
return parser.parse(date) if date else None
|
||||
|
||||
def _parse_isbn(self) -> Optional[str]:
|
||||
return self._parse_xpath_node(xpath=LubimyCzytac.ISBN)
|
||||
|
||||
def _parse_description(self) -> str:
|
||||
description = ""
|
||||
description_node = self._parse_xpath_node(
|
||||
xpath=LubimyCzytac.DESCRIPTION, strip_element=False
|
||||
)
|
||||
if description_node is not None:
|
||||
for source in self.root.xpath('//p[@class="source"]'):
|
||||
source.getparent().remove(source)
|
||||
description = tostring(description_node, method="html")
|
||||
description = sanitize_comments_html(description)
|
||||
|
||||
else:
|
||||
description_node = self._parse_xpath_node(xpath=LubimyCzytac.META_TITLE)
|
||||
if description_node is not None:
|
||||
description = description_node
|
||||
description = sanitize_comments_html(description)
|
||||
description = self._add_extra_info_to_description(description=description)
|
||||
return description
|
||||
|
||||
def _add_extra_info_to_description(self, description: str) -> str:
|
||||
pages = self._parse_from_summary(attribute_name="numberOfPages")
|
||||
if pages:
|
||||
description += LubimyCzytacParser.PAGES_TEMPLATE.format(pages)
|
||||
|
||||
first_publish_date = self._parse_date()
|
||||
if first_publish_date:
|
||||
description += LubimyCzytacParser.PUBLISH_DATE_TEMPLATE.format(
|
||||
first_publish_date.strftime("%d.%m.%Y")
|
||||
)
|
||||
|
||||
first_publish_date_pl = self._parse_date(xpath="first_publish_pl")
|
||||
if first_publish_date_pl:
|
||||
description += LubimyCzytacParser.PUBLISH_DATE_PL_TEMPLATE.format(
|
||||
first_publish_date_pl.strftime("%d.%m.%Y")
|
||||
)
|
||||
|
||||
return description
|
|
@ -15,6 +15,9 @@
|
|||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
import itertools
|
||||
from typing import Dict, List, Optional
|
||||
from urllib.parse import quote
|
||||
|
||||
try:
|
||||
from fake_useragent.errors import FakeUserAgentError
|
||||
|
@ -25,43 +28,46 @@ try:
|
|||
except FakeUserAgentError:
|
||||
raise ImportError("No module named 'scholarly'")
|
||||
|
||||
from cps.services.Metadata import Metadata
|
||||
from cps.services.Metadata import MetaRecord, MetaSourceInfo, Metadata
|
||||
|
||||
class scholar(Metadata):
|
||||
__name__ = "Google Scholar"
|
||||
__id__ = "googlescholar"
|
||||
META_URL = "https://scholar.google.com/"
|
||||
|
||||
def search(self, query, generic_cover=""):
|
||||
def search(
|
||||
self, query: str, generic_cover: str = "", locale: str = "en"
|
||||
) -> Optional[List[MetaRecord]]:
|
||||
val = list()
|
||||
if self.active:
|
||||
scholar_gen = scholarly.search_pubs(' '.join(query.split('+')))
|
||||
i = 0
|
||||
for publication in scholar_gen:
|
||||
v = dict()
|
||||
v['id'] = publication['url_scholarbib'].split(':')[1]
|
||||
v['title'] = publication['bib'].get('title')
|
||||
v['authors'] = publication['bib'].get('author', [])
|
||||
v['description'] = publication['bib'].get('abstract', "")
|
||||
v['publisher'] = publication['bib'].get('venue', "")
|
||||
if publication['bib'].get('pub_year'):
|
||||
v['publishedDate'] = publication['bib'].get('pub_year')+"-01-01"
|
||||
else:
|
||||
v['publishedDate'] = ""
|
||||
v['tags'] = []
|
||||
v['rating'] = 0
|
||||
v['series'] = ""
|
||||
v['cover'] = ""
|
||||
v['url'] = publication.get('pub_url') or publication.get('eprint_url') or "",
|
||||
v['source'] = {
|
||||
"id": self.__id__,
|
||||
"description": "Google Scholar",
|
||||
"link": "https://scholar.google.com/"
|
||||
}
|
||||
val.append(v)
|
||||
i += 1
|
||||
if (i >= 10):
|
||||
break
|
||||
title_tokens = list(self.get_title_tokens(query, strip_joiners=False))
|
||||
if title_tokens:
|
||||
tokens = [quote(t.encode("utf-8")) for t in title_tokens]
|
||||
query = " ".join(tokens)
|
||||
scholar_gen = itertools.islice(scholarly.search_pubs(query), 10)
|
||||
for result in scholar_gen:
|
||||
match = self._parse_search_result(
|
||||
result=result, generic_cover=generic_cover, locale=locale
|
||||
)
|
||||
val.append(match)
|
||||
return val
|
||||
|
||||
def _parse_search_result(
|
||||
self, result: Dict, generic_cover: str, locale: str
|
||||
) -> MetaRecord:
|
||||
match = MetaRecord(
|
||||
id=result.get("pub_url", result.get("eprint_url", "")),
|
||||
title=result["bib"].get("title"),
|
||||
authors=result["bib"].get("author", []),
|
||||
url=result.get("pub_url", result.get("eprint_url", "")),
|
||||
source=MetaSourceInfo(
|
||||
id=self.__id__, description=self.__name__, link=scholar.META_URL
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
match.cover = result.get("image", {}).get("original_url", generic_cover)
|
||||
match.description = result["bib"].get("abstract", "")
|
||||
match.publisher = result["bib"].get("venue", "")
|
||||
match.publishedDate = result["bib"].get("pub_year") + "-01-01"
|
||||
match.identifiers = {"scholar": match.id}
|
||||
return match
|
||||
|
|
26
cps/opds.py
26
cps/opds.py
|
@ -21,13 +21,14 @@
|
|||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import datetime
|
||||
from urllib.parse import unquote_plus
|
||||
from functools import wraps
|
||||
|
||||
from flask import Blueprint, request, render_template, Response, g, make_response, abort
|
||||
from flask_login import current_user
|
||||
from sqlalchemy.sql.expression import func, text, or_, and_, true
|
||||
from werkzeug.security import check_password_hash
|
||||
|
||||
from tornado.httputil import HTTPServerRequest
|
||||
from . import constants, logger, config, db, calibre_db, ub, services, get_locale, isoLanguages
|
||||
from .helper import get_download_link, get_book_cover
|
||||
from .pagination import Pagination
|
||||
|
@ -81,10 +82,12 @@ def feed_osd():
|
|||
|
||||
|
||||
@opds.route("/opds/search", defaults={'query': ""})
|
||||
@opds.route("/opds/search/<query>")
|
||||
@opds.route("/opds/search/<path:query>")
|
||||
@requires_basic_auth_if_no_ano
|
||||
def feed_cc_search(query):
|
||||
return feed_search(query.strip())
|
||||
# Handle strange query from Libera Reader with + instead of spaces
|
||||
plus_query = unquote_plus(request.base_url.split('/opds/search/')[1]).strip()
|
||||
return feed_search(plus_query)
|
||||
|
||||
|
||||
@opds.route("/opds/search", methods=["GET"])
|
||||
|
@ -429,17 +432,9 @@ def feed_languagesindex():
|
|||
if current_user.filter_language() == u"all":
|
||||
languages = calibre_db.speaking_language()
|
||||
else:
|
||||
#try:
|
||||
# cur_l = LC.parse(current_user.filter_language())
|
||||
#except UnknownLocaleError:
|
||||
# cur_l = None
|
||||
languages = calibre_db.session.query(db.Languages).filter(
|
||||
db.Languages.lang_code == current_user.filter_language()).all()
|
||||
languages[0].name = isoLanguages.get_language_name(get_locale(), languages[0].lang_code)
|
||||
#if cur_l:
|
||||
# languages[0].name = cur_l.get_language_name(get_locale())
|
||||
#else:
|
||||
# languages[0].name = _(isoLanguages.get(part3=languages[0].lang_code).name)
|
||||
pagination = Pagination((int(off) / (int(config.config_books_per_page)) + 1), config.config_books_per_page,
|
||||
len(languages))
|
||||
return render_xml_template('feed.xml', listelements=languages, folder='opds.feed_languages', pagination=pagination)
|
||||
|
@ -524,10 +519,11 @@ def get_metadata_calibre_companion(uuid, library):
|
|||
|
||||
def feed_search(term):
|
||||
if term:
|
||||
entries, __, ___ = calibre_db.get_search_results(term)
|
||||
entriescount = len(entries) if len(entries) > 0 else 1
|
||||
pagination = Pagination(1, entriescount, entriescount)
|
||||
return render_xml_template('feed.xml', searchterm=term, entries=entries, pagination=pagination)
|
||||
entries, __, ___ = calibre_db.get_search_results(term, config_read_column=config.config_read_column)
|
||||
entries_count = len(entries) if len(entries) > 0 else 1
|
||||
pagination = Pagination(1, entries_count, entries_count)
|
||||
items = [entry[0] for entry in entries]
|
||||
return render_xml_template('feed.xml', searchterm=term, entries=items, pagination=pagination)
|
||||
else:
|
||||
return render_xml_template('feed.xml', searchterm="")
|
||||
|
||||
|
|
|
@ -16,25 +16,27 @@
|
|||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import os
|
||||
import json
|
||||
import importlib
|
||||
import sys
|
||||
import inspect
|
||||
import datetime
|
||||
import concurrent.futures
|
||||
import importlib
|
||||
import inspect
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
# from time import time
|
||||
from dataclasses import asdict
|
||||
|
||||
from flask import Blueprint, request, Response, url_for
|
||||
from flask import Blueprint, Response, request, url_for
|
||||
from flask_login import current_user
|
||||
from flask_login import login_required
|
||||
from sqlalchemy.exc import InvalidRequestError, OperationalError
|
||||
from sqlalchemy.orm.attributes import flag_modified
|
||||
from sqlalchemy.exc import OperationalError, InvalidRequestError
|
||||
|
||||
from . import constants, logger, ub
|
||||
from cps.services.Metadata import Metadata
|
||||
from . import constants, get_locale, logger, ub
|
||||
|
||||
# current_milli_time = lambda: int(round(time() * 1000))
|
||||
|
||||
meta = Blueprint('metadata', __name__)
|
||||
meta = Blueprint("metadata", __name__)
|
||||
|
||||
log = logger.create()
|
||||
|
||||
|
@ -42,43 +44,55 @@ new_list = list()
|
|||
meta_dir = os.path.join(constants.BASE_DIR, "cps", "metadata_provider")
|
||||
modules = os.listdir(os.path.join(constants.BASE_DIR, "cps", "metadata_provider"))
|
||||
for f in modules:
|
||||
if os.path.isfile(os.path.join(meta_dir, f)) and not f.endswith('__init__.py'):
|
||||
if os.path.isfile(os.path.join(meta_dir, f)) and not f.endswith("__init__.py"):
|
||||
a = os.path.basename(f)[:-3]
|
||||
try:
|
||||
importlib.import_module("cps.metadata_provider." + a)
|
||||
new_list.append(a)
|
||||
except ImportError:
|
||||
log.error("Import error for metadata source: {}".format(a))
|
||||
except ImportError as e:
|
||||
log.error("Import error for metadata source: {} - {}".format(a, e))
|
||||
pass
|
||||
|
||||
|
||||
def list_classes(provider_list):
|
||||
classes = list()
|
||||
for element in provider_list:
|
||||
for name, obj in inspect.getmembers(sys.modules["cps.metadata_provider." + element]):
|
||||
if inspect.isclass(obj) and name != "Metadata" and issubclass(obj, Metadata):
|
||||
for name, obj in inspect.getmembers(
|
||||
sys.modules["cps.metadata_provider." + element]
|
||||
):
|
||||
if (
|
||||
inspect.isclass(obj)
|
||||
and name != "Metadata"
|
||||
and issubclass(obj, Metadata)
|
||||
):
|
||||
classes.append(obj())
|
||||
return classes
|
||||
|
||||
|
||||
cl = list_classes(new_list)
|
||||
|
||||
|
||||
@meta.route("/metadata/provider")
|
||||
@login_required
|
||||
def metadata_provider():
|
||||
active = current_user.view_settings.get('metadata', {})
|
||||
active = current_user.view_settings.get("metadata", {})
|
||||
provider = list()
|
||||
for c in cl:
|
||||
ac = active.get(c.__id__, True)
|
||||
provider.append({"name": c.__name__, "active": ac, "initial": ac, "id": c.__id__})
|
||||
return Response(json.dumps(provider), mimetype='application/json')
|
||||
provider.append(
|
||||
{"name": c.__name__, "active": ac, "initial": ac, "id": c.__id__}
|
||||
)
|
||||
return Response(json.dumps(provider), mimetype="application/json")
|
||||
|
||||
@meta.route("/metadata/provider", methods=['POST'])
|
||||
@meta.route("/metadata/provider/<prov_name>", methods=['POST'])
|
||||
|
||||
@meta.route("/metadata/provider", methods=["POST"])
|
||||
@meta.route("/metadata/provider/<prov_name>", methods=["POST"])
|
||||
@login_required
|
||||
def metadata_change_active_provider(prov_name):
|
||||
new_state = request.get_json()
|
||||
active = current_user.view_settings.get('metadata', {})
|
||||
active[new_state['id']] = new_state['value']
|
||||
current_user.view_settings['metadata'] = active
|
||||
active = current_user.view_settings.get("metadata", {})
|
||||
active[new_state["id"]] = new_state["value"]
|
||||
current_user.view_settings["metadata"] = active
|
||||
try:
|
||||
try:
|
||||
flag_modified(current_user, "view_settings")
|
||||
|
@ -89,29 +103,33 @@ def metadata_change_active_provider(prov_name):
|
|||
log.error("Invalid request received: {}".format(request))
|
||||
return "Invalid request", 400
|
||||
if "initial" in new_state and prov_name:
|
||||
for c in cl:
|
||||
if c.__id__ == prov_name:
|
||||
data = c.search(new_state.get('query', ""))
|
||||
break
|
||||
return Response(json.dumps(data), mimetype='application/json')
|
||||
data = []
|
||||
provider = next((c for c in cl if c.__id__ == prov_name), None)
|
||||
if provider is not None:
|
||||
data = provider.search(new_state.get("query", ""))
|
||||
return Response(
|
||||
json.dumps([asdict(x) for x in data]), mimetype="application/json"
|
||||
)
|
||||
return ""
|
||||
|
||||
@meta.route("/metadata/search", methods=['POST'])
|
||||
|
||||
@meta.route("/metadata/search", methods=["POST"])
|
||||
@login_required
|
||||
def metadata_search():
|
||||
query = request.form.to_dict().get('query')
|
||||
query = request.form.to_dict().get("query")
|
||||
data = list()
|
||||
active = current_user.view_settings.get('metadata', {})
|
||||
active = current_user.view_settings.get("metadata", {})
|
||||
locale = get_locale()
|
||||
if query:
|
||||
generic_cover = ""
|
||||
static_cover = url_for("static", filename="generic_cover.jpg")
|
||||
# start = current_milli_time()
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
|
||||
meta = {executor.submit(c.search, query, generic_cover): c for c in cl if active.get(c.__id__, True)}
|
||||
meta = {
|
||||
executor.submit(c.search, query, static_cover, locale): c
|
||||
for c in cl
|
||||
if active.get(c.__id__, True)
|
||||
}
|
||||
for future in concurrent.futures.as_completed(meta):
|
||||
data.extend(future.result())
|
||||
return Response(json.dumps(data), mimetype='application/json')
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
data.extend([asdict(x) for x in future.result()])
|
||||
# log.info({'Time elapsed {}'.format(current_milli_time()-start)})
|
||||
return Response(json.dumps(data), mimetype="application/json")
|
||||
|
|
|
@ -15,13 +15,97 @@
|
|||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
import abc
|
||||
import dataclasses
|
||||
import os
|
||||
import re
|
||||
from typing import Dict, Generator, List, Optional, Union
|
||||
|
||||
from cps import constants
|
||||
|
||||
|
||||
class Metadata():
|
||||
@dataclasses.dataclass
|
||||
class MetaSourceInfo:
|
||||
id: str
|
||||
description: str
|
||||
link: str
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class MetaRecord:
|
||||
id: Union[str, int]
|
||||
title: str
|
||||
authors: List[str]
|
||||
url: str
|
||||
source: MetaSourceInfo
|
||||
cover: str = os.path.join(constants.STATIC_DIR, 'generic_cover.jpg')
|
||||
description: Optional[str] = ""
|
||||
series: Optional[str] = None
|
||||
series_index: Optional[Union[int, float]] = 0
|
||||
identifiers: Dict[str, Union[str, int]] = dataclasses.field(default_factory=dict)
|
||||
publisher: Optional[str] = None
|
||||
publishedDate: Optional[str] = None
|
||||
rating: Optional[int] = 0
|
||||
languages: Optional[List[str]] = dataclasses.field(default_factory=list)
|
||||
tags: Optional[List[str]] = dataclasses.field(default_factory=list)
|
||||
|
||||
|
||||
class Metadata:
|
||||
__name__ = "Generic"
|
||||
__id__ = "generic"
|
||||
|
||||
def __init__(self):
|
||||
self.active = True
|
||||
|
||||
def set_status(self, state):
|
||||
self.active = state
|
||||
|
||||
@abc.abstractmethod
|
||||
def search(
|
||||
self, query: str, generic_cover: str = "", locale: str = "en"
|
||||
) -> Optional[List[MetaRecord]]:
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def get_title_tokens(
|
||||
title: str, strip_joiners: bool = True
|
||||
) -> Generator[str, None, None]:
|
||||
"""
|
||||
Taken from calibre source code
|
||||
It's a simplified (cut out what is unnecessary) version of
|
||||
https://github.com/kovidgoyal/calibre/blob/99d85b97918625d172227c8ffb7e0c71794966c0/
|
||||
src/calibre/ebooks/metadata/sources/base.py#L363-L367
|
||||
(src/calibre/ebooks/metadata/sources/base.py - lines 363-398)
|
||||
"""
|
||||
title_patterns = [
|
||||
(re.compile(pat, re.IGNORECASE), repl)
|
||||
for pat, repl in [
|
||||
# Remove things like: (2010) (Omnibus) etc.
|
||||
(
|
||||
r"(?i)[({\[](\d{4}|omnibus|anthology|hardcover|"
|
||||
r"audiobook|audio\scd|paperback|turtleback|"
|
||||
r"mass\s*market|edition|ed\.)[\])}]",
|
||||
"",
|
||||
),
|
||||
# Remove any strings that contain the substring edition inside
|
||||
# parentheses
|
||||
(r"(?i)[({\[].*?(edition|ed.).*?[\]})]", ""),
|
||||
# Remove commas used a separators in numbers
|
||||
(r"(\d+),(\d+)", r"\1\2"),
|
||||
# Remove hyphens only if they have whitespace before them
|
||||
(r"(\s-)", " "),
|
||||
# Replace other special chars with a space
|
||||
(r"""[:,;!@$%^&*(){}.`~"\s\[\]/]《》「」“”""", " "),
|
||||
]
|
||||
]
|
||||
|
||||
for pat, repl in title_patterns:
|
||||
title = pat.sub(repl, title)
|
||||
|
||||
tokens = title.split()
|
||||
for token in tokens:
|
||||
token = token.strip().strip('"').strip("'")
|
||||
if token and (
|
||||
not strip_joiners or token.lower() not in ("a", "and", "the", "&")
|
||||
):
|
||||
yield token
|
||||
|
|
|
@ -135,12 +135,9 @@ class SyncToken:
|
|||
archive_last_modified = get_datetime_from_json(data_json, "archive_last_modified")
|
||||
reading_state_last_modified = get_datetime_from_json(data_json, "reading_state_last_modified")
|
||||
tags_last_modified = get_datetime_from_json(data_json, "tags_last_modified")
|
||||
# books_last_id = data_json["books_last_id"]
|
||||
except TypeError:
|
||||
log.error("SyncToken timestamps don't parse to a datetime.")
|
||||
return SyncToken(raw_kobo_store_token=raw_kobo_store_token)
|
||||
#except KeyError:
|
||||
# books_last_id = -1
|
||||
|
||||
return SyncToken(
|
||||
raw_kobo_store_token=raw_kobo_store_token,
|
||||
|
@ -149,7 +146,6 @@ class SyncToken:
|
|||
archive_last_modified=archive_last_modified,
|
||||
reading_state_last_modified=reading_state_last_modified,
|
||||
tags_last_modified=tags_last_modified,
|
||||
#books_last_id=books_last_id
|
||||
)
|
||||
|
||||
def set_kobo_store_header(self, store_headers):
|
||||
|
@ -173,7 +169,6 @@ class SyncToken:
|
|||
"archive_last_modified": to_epoch_timestamp(self.archive_last_modified),
|
||||
"reading_state_last_modified": to_epoch_timestamp(self.reading_state_last_modified),
|
||||
"tags_last_modified": to_epoch_timestamp(self.tags_last_modified),
|
||||
#"books_last_id":self.books_last_id
|
||||
},
|
||||
}
|
||||
return b64encode_json(token)
|
||||
|
@ -183,5 +178,5 @@ class SyncToken:
|
|||
self.books_last_modified,
|
||||
self.archive_last_modified,
|
||||
self.reading_state_last_modified,
|
||||
self.tags_last_modified, self.raw_kobo_store_token)
|
||||
#self.books_last_id)
|
||||
self.tags_last_modified,
|
||||
self.raw_kobo_store_token)
|
||||
|
|
|
@ -439,6 +439,7 @@ def render_show_shelf(shelf_type, shelf_id, page_no, sort_param):
|
|||
db.Books,
|
||||
ub.BookShelf.shelf == shelf_id,
|
||||
[ub.BookShelf.order.asc()],
|
||||
False, 0,
|
||||
ub.BookShelf, ub.BookShelf.book_id == db.Books.id)
|
||||
# delete chelf entries where book is not existent anymore, can happen if book is deleted outside calibre-web
|
||||
wrong_entries = calibre_db.session.query(ub.BookShelf) \
|
||||
|
|
|
@ -26,19 +26,26 @@ $(function () {
|
|||
)
|
||||
};
|
||||
|
||||
function getUniqueValues(attribute_name, book){
|
||||
var presentArray = $.map($("#"+attribute_name).val().split(","), $.trim);
|
||||
if ( presentArray.length === 1 && presentArray[0] === "") {
|
||||
presentArray = [];
|
||||
}
|
||||
$.each(book[attribute_name], function(i, el) {
|
||||
if ($.inArray(el, presentArray) === -1) presentArray.push(el);
|
||||
});
|
||||
return presentArray
|
||||
}
|
||||
|
||||
function populateForm (book) {
|
||||
tinymce.get("description").setContent(book.description);
|
||||
var uniqueTags = $.map($("#tags").val().split(","), $.trim);
|
||||
if ( uniqueTags.length == 1 && uniqueTags[0] == "") {
|
||||
uniqueTags = [];
|
||||
}
|
||||
$.each(book.tags, function(i, el) {
|
||||
if ($.inArray(el, uniqueTags) === -1) uniqueTags.push(el);
|
||||
});
|
||||
var uniqueTags = getUniqueValues('tags', book)
|
||||
var uniqueLanguages = getUniqueValues('languages', book)
|
||||
var ampSeparatedAuthors = (book.authors || []).join(" & ");
|
||||
$("#bookAuthor").val(ampSeparatedAuthors);
|
||||
$("#book_title").val(book.title);
|
||||
$("#tags").val(uniqueTags.join(", "));
|
||||
$("#languages").val(uniqueLanguages.join(", "));
|
||||
$("#rating").data("rating").setValue(Math.round(book.rating));
|
||||
if(book.cover && $("#cover_url").length){
|
||||
$(".cover img").attr("src", book.cover);
|
||||
|
@ -48,7 +55,32 @@ $(function () {
|
|||
$("#publisher").val(book.publisher);
|
||||
if (typeof book.series !== "undefined") {
|
||||
$("#series").val(book.series);
|
||||
$("#series_index").val(book.series_index);
|
||||
}
|
||||
if (typeof book.identifiers !== "undefined") {
|
||||
populateIdentifiers(book.identifiers)
|
||||
}
|
||||
}
|
||||
|
||||
function populateIdentifiers(identifiers){
|
||||
for (const property in identifiers) {
|
||||
console.log(`${property}: ${identifiers[property]}`);
|
||||
if ($('input[name="identifier-type-'+property+'"]').length) {
|
||||
$('input[name="identifier-val-'+property+'"]').val(identifiers[property])
|
||||
}
|
||||
else {
|
||||
addIdentifier(property, identifiers[property])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function addIdentifier(name, value){
|
||||
var line = '<tr>';
|
||||
line += '<td><input type="text" class="form-control" name="identifier-type-'+ name +'" required="required" placeholder="' + _("Identifier Type") +'" value="'+ name +'"></td>';
|
||||
line += '<td><input type="text" class="form-control" name="identifier-val-'+ name +'" required="required" placeholder="' + _("Identifier Value") +'" value="'+ value +'"></td>';
|
||||
line += '<td><a class="btn btn-default" onclick="removeIdentifierLine(this)">'+_("Remove")+'</a></td>';
|
||||
line += '</tr>';
|
||||
$("#identifier-table").append(line);
|
||||
}
|
||||
|
||||
function doSearch (keyword) {
|
||||
|
|
|
@ -636,6 +636,13 @@ function checkboxFormatter(value, row){
|
|||
else
|
||||
return '<input type="checkbox" class="chk" data-pk="' + row.id + '" data-name="' + this.field + '" onchange="checkboxChange(this, ' + row.id + ', \'' + this.name + '\', ' + this.column + ')">';
|
||||
}
|
||||
function bookCheckboxFormatter(value, row){
|
||||
if (value)
|
||||
return '<input type="checkbox" class="chk" data-pk="' + row.id + '" data-name="' + this.field + '" checked onchange="BookCheckboxChange(this, ' + row.id + ', \'' + this.name + '\')">';
|
||||
else
|
||||
return '<input type="checkbox" class="chk" data-pk="' + row.id + '" data-name="' + this.field + '" onchange="BookCheckboxChange(this, ' + row.id + ', \'' + this.name + '\')">';
|
||||
}
|
||||
|
||||
|
||||
function singlecheckboxFormatter(value, row){
|
||||
if (value)
|
||||
|
@ -802,6 +809,20 @@ function checkboxChange(checkbox, userId, field, field_index) {
|
|||
});
|
||||
}
|
||||
|
||||
function BookCheckboxChange(checkbox, userId, field) {
|
||||
var value = checkbox.checked ? "True" : "False";
|
||||
$.ajax({
|
||||
method: "post",
|
||||
url: getPath() + "/ajax/editbooks/" + field,
|
||||
data: {"pk": userId, "value": value},
|
||||
error: function(data) {
|
||||
handleListServerResponse([{type:"danger", message:data.responseText}])
|
||||
},
|
||||
success: handleListServerResponse
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
function selectHeader(element, field) {
|
||||
if (element.value !== "None") {
|
||||
confirmDialog(element.id, "GeneralChangeModal", 0, function () {
|
||||
|
|
|
@ -14,13 +14,13 @@
|
|||
>{{ show_text }}</th>
|
||||
{%- endmacro %}
|
||||
|
||||
{% macro book_checkbox_row(parameter, array_field, show_text, element, value, sort) -%}
|
||||
<!--th data-name="{{parameter}}" data-field="{{parameter}}"
|
||||
{% macro book_checkbox_row(parameter, show_text, sort) -%}
|
||||
<th data-name="{{parameter}}" data-field="{{parameter}}"
|
||||
{% if sort %}data-sortable="true" {% endif %}
|
||||
data-visible="{{visiblility.get(parameter)}}"
|
||||
data-formatter="checkboxFormatter">
|
||||
data-formatter="bookCheckboxFormatter">
|
||||
{{show_text}}
|
||||
</th-->
|
||||
</th>
|
||||
{%- endmacro %}
|
||||
|
||||
|
||||
|
@ -71,7 +71,10 @@
|
|||
<!--th data-field="pubdate" data-type="date" data-visible="{{visiblility.get('pubdate')}}" data-viewformat="dd.mm.yyyy" id="pubdate" data-sortable="true">{{_('Publishing Date')}}</th-->
|
||||
{{ text_table_row('publishers', _('Enter Publishers'),_('Publishers'), false, true) }}
|
||||
<th data-field="comments" id="comments" data-escape="true" data-editable-mode="popup" data-visible="{{visiblility.get('comments')}}" data-sortable="false" {% if g.user.role_edit() %} data-editable-type="wysihtml5" data-editable-url="{{ url_for('editbook.edit_list_book', param='comments')}}" data-edit="true" data-editable-title="{{_('Enter comments')}}"{% endif %}>{{_('Comments')}}</th>
|
||||
<!-- data-editable-formatter="comment_display" -->
|
||||
{% if g.user.check_visibility(32768) %}
|
||||
{{ book_checkbox_row('is_archived', _('Archiv Status'), false)}}
|
||||
{% endif %}
|
||||
{{ book_checkbox_row('read_status', _('Read Status'), false)}}
|
||||
{% for c in cc %}
|
||||
{% if c.datatype == "int" %}
|
||||
<th data-field="custom_column_{{ c.id|string }}" id="custom_column_{{ c.id|string }}" data-visible="{{visiblility.get('custom_column_'+ c.id|string)}}" data-sortable="false" {% if g.user.role_edit() %} data-editable-type="number" data-editable-placeholder="1" data-editable-step="1" data-editable-url="{{ url_for('editbook.edit_list_book', param='custom_column_'+ c.id|string)}}" data-edit="true" data-editable-title="{{_('Enter ') + c.name}}"{% endif %}>{{c.name}}</th>
|
||||
|
@ -88,7 +91,7 @@
|
|||
{% elif c.datatype == "comments" %}
|
||||
<th data-field="custom_column_{{ c.id|string }}" id="custom_column_{{ c.id|string }}" data-escape="true" data-editable-mode="popup" data-visible="{{visiblility.get('custom_column_'+ c.id|string)}}" data-sortable="false" {% if g.user.role_edit() %} data-editable-type="wysihtml5" data-editable-url="{{ url_for('editbook.edit_list_book', param='custom_column_'+ c.id|string)}}" data-edit="true" data-editable-title="{{_('Enter ') + c.name}}"{% endif %}>{{c.name}}</th>
|
||||
{% elif c.datatype == "bool" %}
|
||||
{{ book_checkbox_row('custom_column_' + c.id|string, _('Enter ') + c.name, c.name, visiblility, all_roles, false)}}
|
||||
{{ book_checkbox_row('custom_column_' + c.id|string, c.name, false)}}
|
||||
{% else %}
|
||||
<!--{{ text_table_row('custom_column_' + c.id|string, _('Enter ') + c.name, c.name, false, false) }} -->
|
||||
{% endif %}
|
||||
|
|
|
@ -36,9 +36,9 @@
|
|||
</div>
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% if g.user.kindle_mail and kindle_list %}
|
||||
{% if kindle_list.__len__() == 1 %}
|
||||
<div id="sendbtn" data-action="{{url_for('web.send_to_kindle', book_id=entry.id, book_format=kindle_list[0]['format'], convert=kindle_list[0]['convert'])}}" data-text="{{_('Send to Kindle')}}" class="btn btn-primary postAction" role="button"><span class="glyphicon glyphicon-send"></span> {{kindle_list[0]['text']}}</div>
|
||||
{% if g.user.kindle_mail and entry.kindle_list %}
|
||||
{% if entry.kindle_list.__len__() == 1 %}
|
||||
<div id="sendbtn" data-action="{{url_for('web.send_to_kindle', book_id=entry.id, book_format=entry.kindle_list[0]['format'], convert=entry.kindle_list[0]['convert'])}}" data-text="{{_('Send to Kindle')}}" class="btn btn-primary postAction" role="button"><span class="glyphicon glyphicon-send"></span> {{entry.kindle_list[0]['text']}}</div>
|
||||
{% else %}
|
||||
<div class="btn-group" role="group">
|
||||
<button id="sendbtn2" type="button" class="btn btn-primary dropdown-toggle" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
|
||||
|
@ -46,52 +46,52 @@
|
|||
<span class="caret"></span>
|
||||
</button>
|
||||
<ul class="dropdown-menu" aria-labelledby="send-to-kindle">
|
||||
{% for format in kindle_list %}
|
||||
{% for format in entry.kindle_list %}
|
||||
<li><a class="postAction" data-action="{{url_for('web.send_to_kindle', book_id=entry.id, book_format=format['format'], convert=format['convert'])}}">{{format['text']}}</a></li>
|
||||
{%endfor%}
|
||||
</ul>
|
||||
</div>
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% if reader_list and g.user.role_viewer() %}
|
||||
{% if entry.reader_list and g.user.role_viewer() %}
|
||||
<div class="btn-group" role="group">
|
||||
{% if reader_list|length > 1 %}
|
||||
{% if entry.reader_list|length > 1 %}
|
||||
<button id="read-in-browser" type="button" class="btn btn-primary dropdown-toggle" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
|
||||
<span class="glyphicon glyphicon-book"></span> {{_('Read in Browser')}}
|
||||
<span class="caret"></span>
|
||||
</button>
|
||||
<ul class="dropdown-menu" aria-labelledby="read-in-browser">
|
||||
{% for format in reader_list %}
|
||||
{% for format in entry.reader_list %}
|
||||
<li><a target="_blank" href="{{ url_for('web.read_book', book_id=entry.id, book_format=format) }}">{{format}}</a></li>
|
||||
{%endfor%}
|
||||
</ul>
|
||||
{% else %}
|
||||
<a target="_blank" href="{{url_for('web.read_book', book_id=entry.id, book_format=reader_list[0])}}" id="readbtn" class="btn btn-primary" role="button"><span class="glyphicon glyphicon-book"></span> {{_('Read in Browser')}} - {{reader_list[0]}}</a>
|
||||
<a target="_blank" href="{{url_for('web.read_book', book_id=entry.id, book_format=entry.reader_list[0])}}" id="readbtn" class="btn btn-primary" role="button"><span class="glyphicon glyphicon-book"></span> {{_('Read in Browser')}} - {{entry.reader_list[0]}}</a>
|
||||
{% endif %}
|
||||
</div>
|
||||
{% endif %}
|
||||
{% if audioentries|length > 0 and g.user.role_viewer() %}
|
||||
{% if entry.audioentries|length > 0 and g.user.role_viewer() %}
|
||||
<div class="btn-group" role="group">
|
||||
{% if audioentries|length > 1 %}
|
||||
{% if entry.audioentries|length > 1 %}
|
||||
<button id="listen-in-browser" type="button" class="btn btn-primary dropdown-toggle" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
|
||||
<span class="glyphicon glyphicon-music"></span> {{_('Listen in Browser')}}
|
||||
<span class="caret"></span>
|
||||
</button>
|
||||
<ul class="dropdown-menu" aria-labelledby="listen-in-browser">
|
||||
{% for format in reader_list %}
|
||||
{% for format in entry.reader_list %}
|
||||
<li><a target="_blank" href="{{ url_for('web.read_book', book_id=entry.id, book_format=format) }}">{{format}}</a></li>
|
||||
{%endfor%}
|
||||
</ul>
|
||||
<ul class="dropdown-menu" aria-labelledby="listen-in-browser">
|
||||
|
||||
{% for format in entry.data %}
|
||||
{% if format.format|lower in audioentries %}
|
||||
{% if format.format|lower in entry.audioentries %}
|
||||
<li><a target="_blank" href="{{ url_for('web.read_book', book_id=entry.id, book_format=format.format|lower) }}">{{format.format|lower }}</a></li>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% else %}
|
||||
<a target="_blank" href="{{url_for('web.read_book', book_id=entry.id, book_format=audioentries[0])}}" id="listenbtn" class="btn btn-primary" role="button"><span class="glyphicon glyphicon-music"></span> {{_('Listen in Browser')}} - {{audioentries[0]}}</a>
|
||||
<a target="_blank" href="{{url_for('web.read_book', book_id=entry.id, book_format=entry.audioentries[0])}}" id="listenbtn" class="btn btn-primary" role="button"><span class="glyphicon glyphicon-music"></span> {{_('Listen in Browser')}} - {{entry.audioentries[0]}}</a>
|
||||
{% endif %}
|
||||
</div>
|
||||
{% endif %}
|
||||
|
@ -218,7 +218,7 @@
|
|||
<form id="have_read_form" action="{{ url_for('web.toggle_read', book_id=entry.id)}}" method="POST">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}">
|
||||
<label class="block-label">
|
||||
<input id="have_read_cb" data-checked="{{_('Mark As Unread')}}" data-unchecked="{{_('Mark As Read')}}" type="checkbox" {% if have_read %}checked{% endif %} >
|
||||
<input id="have_read_cb" data-checked="{{_('Mark As Unread')}}" data-unchecked="{{_('Mark As Read')}}" type="checkbox" {% if entry.read_status %}checked{% endif %} >
|
||||
<span>{{_('Read')}}</span>
|
||||
</label>
|
||||
</form>
|
||||
|
@ -228,7 +228,7 @@
|
|||
<form id="archived_form" action="{{ url_for('web.toggle_archived', book_id=entry.id)}}" method="POST">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}">
|
||||
<label class="block-label">
|
||||
<input id="archived_cb" data-checked="{{_('Restore from archive')}}" data-unchecked="{{_('Add to archive')}}" type="checkbox" {% if is_archived %}checked{% endif %} >
|
||||
<input id="archived_cb" data-checked="{{_('Restore from archive')}}" data-unchecked="{{_('Add to archive')}}" type="checkbox" {% if entry.is_archived %}checked{% endif %} >
|
||||
<span>{{_('Archived')}}</span>
|
||||
</label>
|
||||
</form>
|
||||
|
|
|
@ -42,21 +42,21 @@
|
|||
{% for entry in entries %}
|
||||
<div class="col-sm-3 col-lg-2 col-xs-6 book">
|
||||
<div class="cover">
|
||||
{% if entry.has_cover is defined %}
|
||||
<a href="{{ url_for('web.show_book', book_id=entry.id) }}" data-toggle="modal" data-target="#bookDetailsModal" data-remote="false">
|
||||
<span class="img" title="{{entry.title}}" >
|
||||
<img src="{{ url_for('web.get_cover', book_id=entry.id) }}" alt="{{ entry.title }}" />
|
||||
{% if entry.id in read_book_ids %}<span class="badge read glyphicon glyphicon-ok"></span>{% endif %}
|
||||
{% if entry.Books.has_cover is defined %}
|
||||
<a href="{{ url_for('web.show_book', book_id=entry.Books.id) }}" data-toggle="modal" data-target="#bookDetailsModal" data-remote="false">
|
||||
<span class="img" title="{{entry.Books.title}}" >
|
||||
<img src="{{ url_for('web.get_cover', book_id=entry.Books.id) }}" alt="{{ entry.Books.title }}" />
|
||||
{% if entry.Books.id in read_book_ids %}<span class="badge read glyphicon glyphicon-ok"></span>{% endif %}
|
||||
</span>
|
||||
</a>
|
||||
{% endif %}
|
||||
</div>
|
||||
<div class="meta">
|
||||
<a href="{{ url_for('web.show_book', book_id=entry.id) }}" data-toggle="modal" data-target="#bookDetailsModal" data-remote="false">
|
||||
<p title="{{entry.title}}" class="title">{{entry.title|shortentitle}}</p>
|
||||
<a href="{{ url_for('web.show_book', book_id=entry.Books.id) }}" data-toggle="modal" data-target="#bookDetailsModal" data-remote="false">
|
||||
<p title="{{entry.Books.title}}" class="title">{{entry.Books.title|shortentitle}}</p>
|
||||
</a>
|
||||
<p class="author">
|
||||
{% for author in entry.authors %}
|
||||
{% for author in entry.Books.authors %}
|
||||
{% if loop.index > g.config_authors_max and g.config_authors_max != 0 %}
|
||||
{% if not loop.first %}
|
||||
<span class="author-hidden-divider">&</span>
|
||||
|
@ -72,24 +72,24 @@
|
|||
<a class="author-name" href="{{url_for('web.books_list', data='author', sort_param='new', book_id=author.id) }}">{{author.name.replace('|',',')|shortentitle(30)}}</a>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% for format in entry.data %}
|
||||
{% for format in entry.Books.data %}
|
||||
{% if format.format|lower in g.constants.EXTENSIONS_AUDIO %}
|
||||
<span class="glyphicon glyphicon-music"></span>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</p>
|
||||
{% if entry.series.__len__() > 0 %}
|
||||
{% if entry.Books.series.__len__() > 0 %}
|
||||
<p class="series">
|
||||
<a href="{{url_for('web.books_list', data='series', sort_param='new', book_id=entry.series[0].id )}}">
|
||||
{{entry.series[0].name}}
|
||||
<a href="{{url_for('web.books_list', data='series', sort_param='new', book_id=entry.Books.series[0].id )}}">
|
||||
{{entry.Books.series[0].name}}
|
||||
</a>
|
||||
({{entry.series_index|formatseriesindex}})
|
||||
({{entry.Books.series_index|formatseriesindex}})
|
||||
</p>
|
||||
{% endif %}
|
||||
|
||||
{% if entry.ratings.__len__() > 0 %}
|
||||
{% if entry.Books.ratings.__len__() > 0 %}
|
||||
<div class="rating">
|
||||
{% for number in range((entry.ratings[0].rating/2)|int(2)) %}
|
||||
{% for number in range((entry.Books.ratings[0].rating/2)|int(2)) %}
|
||||
<span class="glyphicon glyphicon-star good"></span>
|
||||
{% if loop.last and loop.index < 5 %}
|
||||
{% for numer in range(5 - loop.index) %}
|
||||
|
|
|
@ -90,7 +90,7 @@ def delete_user_session(user_id, session_key):
|
|||
session.query(User_Sessions).filter(User_Sessions.user_id==user_id,
|
||||
User_Sessions.session_key==session_key).delete()
|
||||
session.commit()
|
||||
except (exc.OperationalError, exc.InvalidRequestError):
|
||||
except (exc.OperationalError, exc.InvalidRequestError) as e:
|
||||
session.rollback()
|
||||
log.exception(e)
|
||||
|
||||
|
@ -112,6 +112,12 @@ def store_ids(result):
|
|||
ids.append(element.id)
|
||||
searched_ids[current_user.id] = ids
|
||||
|
||||
def store_combo_ids(result):
|
||||
ids = list()
|
||||
for element in result:
|
||||
ids.append(element[0].id)
|
||||
searched_ids[current_user.id] = ids
|
||||
|
||||
|
||||
class UserBase:
|
||||
|
||||
|
|
|
@ -53,12 +53,10 @@ class Updater(threading.Thread):
|
|||
def __init__(self):
|
||||
threading.Thread.__init__(self)
|
||||
self.paused = False
|
||||
# self.pause_cond = threading.Condition(threading.Lock())
|
||||
self.can_run = threading.Event()
|
||||
self.pause()
|
||||
self.status = -1
|
||||
self.updateIndex = None
|
||||
# self.run()
|
||||
|
||||
def get_current_version_info(self):
|
||||
if config.config_updatechannel == constants.UPDATE_STABLE:
|
||||
|
@ -85,15 +83,15 @@ class Updater(threading.Thread):
|
|||
log.debug(u'Extracting zipfile')
|
||||
tmp_dir = gettempdir()
|
||||
z.extractall(tmp_dir)
|
||||
foldername = os.path.join(tmp_dir, z.namelist()[0])[:-1]
|
||||
if not os.path.isdir(foldername):
|
||||
folder_name = os.path.join(tmp_dir, z.namelist()[0])[:-1]
|
||||
if not os.path.isdir(folder_name):
|
||||
self.status = 11
|
||||
log.info(u'Extracted contents of zipfile not found in temp folder')
|
||||
self.pause()
|
||||
return False
|
||||
self.status = 4
|
||||
log.debug(u'Replacing files')
|
||||
if self.update_source(foldername, constants.BASE_DIR):
|
||||
if self.update_source(folder_name, constants.BASE_DIR):
|
||||
self.status = 6
|
||||
log.debug(u'Preparing restart of server')
|
||||
time.sleep(2)
|
||||
|
@ -184,29 +182,30 @@ class Updater(threading.Thread):
|
|||
return rf
|
||||
|
||||
@classmethod
|
||||
def check_permissions(cls, root_src_dir, root_dst_dir):
|
||||
def check_permissions(cls, root_src_dir, root_dst_dir, log_function):
|
||||
access = True
|
||||
remove_path = len(root_src_dir) + 1
|
||||
for src_dir, __, files in os.walk(root_src_dir):
|
||||
root_dir = os.path.join(root_dst_dir, src_dir[remove_path:])
|
||||
# Skip non existing folders on check
|
||||
if not os.path.isdir(root_dir): # root_dir.lstrip(os.sep).startswith('.') or
|
||||
# Skip non-existing folders on check
|
||||
if not os.path.isdir(root_dir):
|
||||
continue
|
||||
if not os.access(root_dir, os.R_OK|os.W_OK):
|
||||
log.debug("Missing permissions for {}".format(root_dir))
|
||||
if not os.access(root_dir, os.R_OK | os.W_OK):
|
||||
log_function("Missing permissions for {}".format(root_dir))
|
||||
access = False
|
||||
for file_ in files:
|
||||
curr_file = os.path.join(root_dir, file_)
|
||||
# Skip non existing files on check
|
||||
if not os.path.isfile(curr_file): # or curr_file.startswith('.'):
|
||||
# Skip non-existing files on check
|
||||
if not os.path.isfile(curr_file): # or curr_file.startswith('.'):
|
||||
continue
|
||||
if not os.access(curr_file, os.R_OK|os.W_OK):
|
||||
log.debug("Missing permissions for {}".format(curr_file))
|
||||
if not os.access(curr_file, os.R_OK | os.W_OK):
|
||||
log_function("Missing permissions for {}".format(curr_file))
|
||||
access = False
|
||||
return access
|
||||
|
||||
@classmethod
|
||||
def moveallfiles(cls, root_src_dir, root_dst_dir):
|
||||
def move_all_files(cls, root_src_dir, root_dst_dir):
|
||||
permission = None
|
||||
new_permissions = os.stat(root_dst_dir)
|
||||
log.debug('Performing Update on OS-System: %s', sys.platform)
|
||||
change_permissions = not (sys.platform == "win32" or sys.platform == "darwin")
|
||||
|
@ -258,18 +257,11 @@ class Updater(threading.Thread):
|
|||
def update_source(self, source, destination):
|
||||
# destination files
|
||||
old_list = list()
|
||||
exclude = (
|
||||
os.sep + 'app.db', os.sep + 'calibre-web.log1', os.sep + 'calibre-web.log2', os.sep + 'gdrive.db',
|
||||
os.sep + 'vendor', os.sep + 'calibre-web.log', os.sep + '.git', os.sep + 'client_secrets.json',
|
||||
os.sep + 'gdrive_credentials', os.sep + 'settings.yaml', os.sep + 'venv', os.sep + 'virtualenv',
|
||||
os.sep + 'access.log', os.sep + 'access.log1', os.sep + 'access.log2',
|
||||
os.sep + '.calibre-web.log.swp', os.sep + '_sqlite3.so', os.sep + 'cps' + os.sep + '.HOMEDIR',
|
||||
os.sep + 'gmail.json'
|
||||
)
|
||||
exclude = self._add_excluded_files(log.info)
|
||||
additional_path = self.is_venv()
|
||||
if additional_path:
|
||||
exclude = exclude + (additional_path,)
|
||||
|
||||
exclude.append(additional_path)
|
||||
exclude = tuple(exclude)
|
||||
# check if we are in a package, rename cps.py to __init__.py
|
||||
if constants.HOME_CONFIG:
|
||||
shutil.move(os.path.join(source, 'cps.py'), os.path.join(source, '__init__.py'))
|
||||
|
@ -293,8 +285,8 @@ class Updater(threading.Thread):
|
|||
|
||||
remove_items = self.reduce_dirs(rf, new_list)
|
||||
|
||||
if self.check_permissions(source, destination):
|
||||
self.moveallfiles(source, destination)
|
||||
if self.check_permissions(source, destination, log.debug):
|
||||
self.move_all_files(source, destination)
|
||||
|
||||
for item in remove_items:
|
||||
item_path = os.path.join(destination, item[1:])
|
||||
|
@ -332,6 +324,12 @@ class Updater(threading.Thread):
|
|||
log.debug("Stable version: {}".format(constants.STABLE_VERSION))
|
||||
return constants.STABLE_VERSION # Current version
|
||||
|
||||
@classmethod
|
||||
def dry_run(cls):
|
||||
cls._add_excluded_files(print)
|
||||
cls.check_permissions(constants.BASE_DIR, constants.BASE_DIR, print)
|
||||
print("\n*** Finished ***")
|
||||
|
||||
@staticmethod
|
||||
def _populate_parent_commits(update_data, status, locale, tz, parents):
|
||||
try:
|
||||
|
@ -340,6 +338,7 @@ class Updater(threading.Thread):
|
|||
remaining_parents_cnt = 10
|
||||
except (IndexError, KeyError):
|
||||
remaining_parents_cnt = None
|
||||
parent_commit = None
|
||||
|
||||
if remaining_parents_cnt is not None:
|
||||
while True:
|
||||
|
@ -391,6 +390,30 @@ class Updater(threading.Thread):
|
|||
status['message'] = _(u'General error')
|
||||
return status, update_data
|
||||
|
||||
@staticmethod
|
||||
def _add_excluded_files(log_function):
|
||||
excluded_files = [
|
||||
os.sep + 'app.db', os.sep + 'calibre-web.log1', os.sep + 'calibre-web.log2', os.sep + 'gdrive.db',
|
||||
os.sep + 'vendor', os.sep + 'calibre-web.log', os.sep + '.git', os.sep + 'client_secrets.json',
|
||||
os.sep + 'gdrive_credentials', os.sep + 'settings.yaml', os.sep + 'venv', os.sep + 'virtualenv',
|
||||
os.sep + 'access.log', os.sep + 'access.log1', os.sep + 'access.log2',
|
||||
os.sep + '.calibre-web.log.swp', os.sep + '_sqlite3.so', os.sep + 'cps' + os.sep + '.HOMEDIR',
|
||||
os.sep + 'gmail.json', os.sep + 'exclude.txt'
|
||||
]
|
||||
try:
|
||||
with open(os.path.join(constants.BASE_DIR, "exclude.txt"), "r") as f:
|
||||
lines = f.readlines()
|
||||
for line in lines:
|
||||
processed_line = line.strip("\n\r ").strip("\"'").lstrip("\\/ ").\
|
||||
replace("\\", os.sep).replace("/", os.sep)
|
||||
if os.path.exists(os.path.join(constants.BASE_DIR, processed_line)):
|
||||
excluded_files.append(os.sep + processed_line)
|
||||
else:
|
||||
log_function("File list for updater: {} not found".format(line))
|
||||
except (PermissionError, FileNotFoundError):
|
||||
log_function("Excluded file list for updater not found, or not accessible")
|
||||
return excluded_files
|
||||
|
||||
def _nightly_available_updates(self, request_method, locale):
|
||||
tz = datetime.timedelta(seconds=time.timezone if (time.localtime().tm_isdst == 0) else time.altzone)
|
||||
if request_method == "GET":
|
||||
|
@ -449,7 +472,7 @@ class Updater(threading.Thread):
|
|||
return ''
|
||||
|
||||
def _stable_updater_set_status(self, i, newer, status, parents, commit):
|
||||
if i == -1 and newer == False:
|
||||
if i == -1 and newer is False:
|
||||
status.update({
|
||||
'update': True,
|
||||
'success': True,
|
||||
|
@ -458,7 +481,7 @@ class Updater(threading.Thread):
|
|||
'history': parents
|
||||
})
|
||||
self.updateFile = commit[0]['zipball_url']
|
||||
elif i == -1 and newer == True:
|
||||
elif i == -1 and newer is True:
|
||||
status.update({
|
||||
'update': True,
|
||||
'success': True,
|
||||
|
@ -495,6 +518,7 @@ class Updater(threading.Thread):
|
|||
return status, parents
|
||||
|
||||
def _stable_available_updates(self, request_method):
|
||||
status = None
|
||||
if request_method == "GET":
|
||||
parents = []
|
||||
# repository_url = 'https://api.github.com/repos/flatpak/flatpak/releases' # test URL
|
||||
|
@ -539,7 +563,7 @@ class Updater(threading.Thread):
|
|||
except ValueError:
|
||||
current_version[2] = int(current_version[2].split(' ')[0])-1
|
||||
|
||||
# Check if major versions are identical search for newest non equal commit and update to this one
|
||||
# Check if major versions are identical search for newest non-equal commit and update to this one
|
||||
if major_version_update == current_version[0]:
|
||||
if (minor_version_update == current_version[1] and
|
||||
patch_version_update > current_version[2]) or \
|
||||
|
@ -552,7 +576,7 @@ class Updater(threading.Thread):
|
|||
i -= 1
|
||||
continue
|
||||
if major_version_update > current_version[0]:
|
||||
# found update update to last version before major update, unless current version is on last version
|
||||
# found update to last version before major update, unless current version is on last version
|
||||
# before major update
|
||||
if i == (len(commit) - 1):
|
||||
i -= 1
|
||||
|
|
231
cps/web.py
231
cps/web.py
|
@ -50,7 +50,8 @@ from . import calibre_db, kobo_sync_status
|
|||
from .gdriveutils import getFileFromEbooksFolder, do_gdrive_download
|
||||
from .helper import check_valid_domain, render_task_status, check_email, check_username, \
|
||||
get_cc_columns, get_book_cover, get_download_link, send_mail, generate_random_password, \
|
||||
send_registration_mail, check_send_to_kindle, check_read_formats, tags_filters, reset_password, valid_email
|
||||
send_registration_mail, check_send_to_kindle, check_read_formats, tags_filters, reset_password, valid_email, \
|
||||
edit_book_read_status
|
||||
from .pagination import Pagination
|
||||
from .redirect import redirect_back
|
||||
from .usermanagement import login_required_if_no_ano
|
||||
|
@ -154,46 +155,12 @@ def bookmark(book_id, book_format):
|
|||
@web.route("/ajax/toggleread/<int:book_id>", methods=['POST'])
|
||||
@login_required
|
||||
def toggle_read(book_id):
|
||||
if not config.config_read_column:
|
||||
book = ub.session.query(ub.ReadBook).filter(and_(ub.ReadBook.user_id == int(current_user.id),
|
||||
ub.ReadBook.book_id == book_id)).first()
|
||||
if book:
|
||||
if book.read_status == ub.ReadBook.STATUS_FINISHED:
|
||||
book.read_status = ub.ReadBook.STATUS_UNREAD
|
||||
else:
|
||||
book.read_status = ub.ReadBook.STATUS_FINISHED
|
||||
else:
|
||||
readBook = ub.ReadBook(user_id=current_user.id, book_id = book_id)
|
||||
readBook.read_status = ub.ReadBook.STATUS_FINISHED
|
||||
book = readBook
|
||||
if not book.kobo_reading_state:
|
||||
kobo_reading_state = ub.KoboReadingState(user_id=current_user.id, book_id=book_id)
|
||||
kobo_reading_state.current_bookmark = ub.KoboBookmark()
|
||||
kobo_reading_state.statistics = ub.KoboStatistics()
|
||||
book.kobo_reading_state = kobo_reading_state
|
||||
ub.session.merge(book)
|
||||
ub.session_commit("Book {} readbit toggled".format(book_id))
|
||||
message = edit_book_read_status(book_id)
|
||||
if message:
|
||||
return message, 400
|
||||
else:
|
||||
try:
|
||||
calibre_db.update_title_sort(config)
|
||||
book = calibre_db.get_filtered_book(book_id)
|
||||
read_status = getattr(book, 'custom_column_' + str(config.config_read_column))
|
||||
if len(read_status):
|
||||
read_status[0].value = not read_status[0].value
|
||||
calibre_db.session.commit()
|
||||
else:
|
||||
cc_class = db.cc_classes[config.config_read_column]
|
||||
new_cc = cc_class(value=1, book=book_id)
|
||||
calibre_db.session.add(new_cc)
|
||||
calibre_db.session.commit()
|
||||
except (KeyError, AttributeError):
|
||||
log.error(u"Custom Column No.%d is not existing in calibre database", config.config_read_column)
|
||||
return "Custom Column No.{} is not existing in calibre database".format(config.config_read_column), 400
|
||||
except (OperationalError, InvalidRequestError) as e:
|
||||
calibre_db.session.rollback()
|
||||
log.error(u"Read status could not set: {}".format(e))
|
||||
return "Read status could not set: {}".format(e), 400
|
||||
return ""
|
||||
return message
|
||||
|
||||
|
||||
@web.route("/ajax/togglearchived/<int:book_id>", methods=['POST'])
|
||||
@login_required
|
||||
|
@ -409,6 +376,7 @@ def render_books_list(data, sort, book_id, page):
|
|||
else:
|
||||
website = data or "newest"
|
||||
entries, random, pagination = calibre_db.fill_indexpage(page, 0, db.Books, True, order[0],
|
||||
False, 0,
|
||||
db.books_series_link,
|
||||
db.Books.id == db.books_series_link.c.book,
|
||||
db.Series)
|
||||
|
@ -422,6 +390,7 @@ def render_rated_books(page, book_id, order):
|
|||
db.Books,
|
||||
db.Books.ratings.any(db.Ratings.rating > 9),
|
||||
order[0],
|
||||
False, 0,
|
||||
db.books_series_link,
|
||||
db.Books.id == db.books_series_link.c.book,
|
||||
db.Series)
|
||||
|
@ -490,6 +459,7 @@ def render_downloaded_books(page, order, user_id):
|
|||
db.Books,
|
||||
ub.Downloads.user_id == user_id,
|
||||
order[0],
|
||||
False, 0,
|
||||
db.books_series_link,
|
||||
db.Books.id == db.books_series_link.c.book,
|
||||
db.Series,
|
||||
|
@ -516,6 +486,7 @@ def render_author_books(page, author_id, order):
|
|||
db.Books,
|
||||
db.Books.authors.any(db.Authors.id == author_id),
|
||||
[order[0][0], db.Series.name, db.Books.series_index],
|
||||
False, 0,
|
||||
db.books_series_link,
|
||||
db.Books.id == db.books_series_link.c.book,
|
||||
db.Series)
|
||||
|
@ -534,7 +505,6 @@ def render_author_books(page, author_id, order):
|
|||
if services.goodreads_support and config.config_use_goodreads:
|
||||
author_info = services.goodreads_support.get_author_info(author_name)
|
||||
other_books = services.goodreads_support.get_other_books(author_info, entries)
|
||||
|
||||
return render_title_template('author.html', entries=entries, pagination=pagination, id=author_id,
|
||||
title=_(u"Author: %(name)s", name=author_name), author=author_info,
|
||||
other_books=other_books, page="author", order=order[1])
|
||||
|
@ -547,6 +517,7 @@ def render_publisher_books(page, book_id, order):
|
|||
db.Books,
|
||||
db.Books.publishers.any(db.Publishers.id == book_id),
|
||||
[db.Series.name, order[0][0], db.Books.series_index],
|
||||
False, 0,
|
||||
db.books_series_link,
|
||||
db.Books.id == db.books_series_link.c.book,
|
||||
db.Series)
|
||||
|
@ -608,6 +579,7 @@ def render_category_books(page, book_id, order):
|
|||
db.Books,
|
||||
db.Books.tags.any(db.Tags.id == book_id),
|
||||
[order[0][0], db.Series.name, db.Books.series_index],
|
||||
False, 0,
|
||||
db.books_series_link,
|
||||
db.Books.id == db.books_series_link.c.book,
|
||||
db.Series)
|
||||
|
@ -643,6 +615,7 @@ def render_read_books(page, are_read, as_xml=False, order=None):
|
|||
db.Books,
|
||||
db_filter,
|
||||
sort,
|
||||
False, 0,
|
||||
db.books_series_link,
|
||||
db.Books.id == db.books_series_link.c.book,
|
||||
db.Series,
|
||||
|
@ -657,6 +630,7 @@ def render_read_books(page, are_read, as_xml=False, order=None):
|
|||
db.Books,
|
||||
db_filter,
|
||||
sort,
|
||||
False, 0,
|
||||
db.books_series_link,
|
||||
db.Books.id == db.books_series_link.c.book,
|
||||
db.Series,
|
||||
|
@ -694,11 +668,12 @@ def render_archived_books(page, sort):
|
|||
|
||||
archived_filter = db.Books.id.in_(archived_book_ids)
|
||||
|
||||
entries, random, pagination = calibre_db.fill_indexpage_with_archived_books(page, 0,
|
||||
db.Books,
|
||||
entries, random, pagination = calibre_db.fill_indexpage_with_archived_books(page, db.Books,
|
||||
0,
|
||||
archived_filter,
|
||||
order,
|
||||
allow_show_archived=True)
|
||||
True,
|
||||
False, 0)
|
||||
|
||||
name = _(u'Archived Books') + ' (' + str(len(archived_book_ids)) + ')'
|
||||
pagename = "archived"
|
||||
|
@ -739,7 +714,13 @@ def render_prepare_search_form(cc):
|
|||
|
||||
def render_search_results(term, offset=None, order=None, limit=None):
|
||||
join = db.books_series_link, db.Books.id == db.books_series_link.c.book, db.Series
|
||||
entries, result_count, pagination = calibre_db.get_search_results(term, offset, order, limit, *join)
|
||||
entries, result_count, pagination = calibre_db.get_search_results(term,
|
||||
offset,
|
||||
order,
|
||||
limit,
|
||||
False,
|
||||
config.config_read_column,
|
||||
*join)
|
||||
return render_title_template('search.html',
|
||||
searchterm=term,
|
||||
pagination=pagination,
|
||||
|
@ -795,13 +776,13 @@ def list_books():
|
|||
state = json.loads(request.args.get("state", "[]"))
|
||||
elif sort == "tags":
|
||||
order = [db.Tags.name.asc()] if order == "asc" else [db.Tags.name.desc()]
|
||||
join = db.books_tags_link,db.Books.id == db.books_tags_link.c.book, db.Tags
|
||||
join = db.books_tags_link, db.Books.id == db.books_tags_link.c.book, db.Tags
|
||||
elif sort == "series":
|
||||
order = [db.Series.name.asc()] if order == "asc" else [db.Series.name.desc()]
|
||||
join = db.books_series_link,db.Books.id == db.books_series_link.c.book, db.Series
|
||||
join = db.books_series_link, db.Books.id == db.books_series_link.c.book, db.Series
|
||||
elif sort == "publishers":
|
||||
order = [db.Publishers.name.asc()] if order == "asc" else [db.Publishers.name.desc()]
|
||||
join = db.books_publishers_link,db.Books.id == db.books_publishers_link.c.book, db.Publishers
|
||||
join = db.books_publishers_link, db.Books.id == db.books_publishers_link.c.book, db.Publishers
|
||||
elif sort == "authors":
|
||||
order = [db.Authors.name.asc(), db.Series.name, db.Books.series_index] if order == "asc" \
|
||||
else [db.Authors.name.desc(), db.Series.name.desc(), db.Books.series_index.desc()]
|
||||
|
@ -815,25 +796,62 @@ def list_books():
|
|||
elif not state:
|
||||
order = [db.Books.timestamp.desc()]
|
||||
|
||||
total_count = filtered_count = calibre_db.session.query(db.Books).filter(calibre_db.common_filters(False)).count()
|
||||
|
||||
total_count = filtered_count = calibre_db.session.query(db.Books).filter(calibre_db.common_filters(allow_show_archived=True)).count()
|
||||
if state is not None:
|
||||
if search:
|
||||
books = calibre_db.search_query(search).all()
|
||||
books = calibre_db.search_query(search, config.config_read_column).all()
|
||||
filtered_count = len(books)
|
||||
else:
|
||||
books = calibre_db.session.query(db.Books).filter(calibre_db.common_filters()).all()
|
||||
entries = calibre_db.get_checkbox_sorted(books, state, off, limit, order)
|
||||
if not config.config_read_column:
|
||||
books = (calibre_db.session.query(db.Books, ub.ReadBook.read_status, ub.ArchivedBook.is_archived)
|
||||
.select_from(db.Books)
|
||||
.outerjoin(ub.ReadBook,
|
||||
and_(ub.ReadBook.user_id == int(current_user.id),
|
||||
ub.ReadBook.book_id == db.Books.id)))
|
||||
else:
|
||||
try:
|
||||
read_column = db.cc_classes[config.config_read_column]
|
||||
books = (calibre_db.session.query(db.Books, read_column.value, ub.ArchivedBook.is_archived)
|
||||
.select_from(db.Books)
|
||||
.outerjoin(read_column, read_column.book == db.Books.id))
|
||||
except (KeyError, AttributeError):
|
||||
log.error("Custom Column No.%d is not existing in calibre database", read_column)
|
||||
# Skip linking read column and return None instead of read status
|
||||
books =calibre_db.session.query(db.Books, None, ub.ArchivedBook.is_archived)
|
||||
books = (books.outerjoin(ub.ArchivedBook, and_(db.Books.id == ub.ArchivedBook.book_id,
|
||||
int(current_user.id) == ub.ArchivedBook.user_id))
|
||||
.filter(calibre_db.common_filters(allow_show_archived=True)).all())
|
||||
entries = calibre_db.get_checkbox_sorted(books, state, off, limit, order, True)
|
||||
elif search:
|
||||
entries, filtered_count, __ = calibre_db.get_search_results(search, off, [order,''], limit, *join)
|
||||
entries, filtered_count, __ = calibre_db.get_search_results(search,
|
||||
off,
|
||||
[order,''],
|
||||
limit,
|
||||
True,
|
||||
config.config_read_column,
|
||||
*join)
|
||||
else:
|
||||
entries, __, __ = calibre_db.fill_indexpage((int(off) / (int(limit)) + 1), limit, db.Books, True, order, *join)
|
||||
entries, __, __ = calibre_db.fill_indexpage_with_archived_books((int(off) / (int(limit)) + 1),
|
||||
db.Books,
|
||||
limit,
|
||||
True,
|
||||
order,
|
||||
True,
|
||||
True,
|
||||
config.config_read_column,
|
||||
*join)
|
||||
|
||||
result = list()
|
||||
for entry in entries:
|
||||
for index in range(0, len(entry.languages)):
|
||||
entry.languages[index].language_name = isoLanguages.get_language_name(get_locale(), entry.languages[
|
||||
val = entry[0]
|
||||
val.read_status = entry[1] == ub.ReadBook.STATUS_FINISHED
|
||||
val.is_archived = entry[2] is True
|
||||
for index in range(0, len(val.languages)):
|
||||
val.languages[index].language_name = isoLanguages.get_language_name(get_locale(), val.languages[
|
||||
index].lang_code)
|
||||
table_entries = {'totalNotFiltered': total_count, 'total': filtered_count, "rows": entries}
|
||||
result.append(val)
|
||||
|
||||
table_entries = {'totalNotFiltered': total_count, 'total': filtered_count, "rows": result}
|
||||
js_list = json.dumps(table_entries, cls=db.AlchemyEncoder)
|
||||
|
||||
response = make_response(js_list)
|
||||
|
@ -843,8 +861,6 @@ def list_books():
|
|||
@web.route("/ajax/table_settings", methods=['POST'])
|
||||
@login_required
|
||||
def update_table_settings():
|
||||
# vals = request.get_json()
|
||||
# ToDo: Save table settings
|
||||
current_user.view_settings['table'] = json.loads(request.data)
|
||||
try:
|
||||
try:
|
||||
|
@ -1055,13 +1071,6 @@ def get_tasks_status():
|
|||
return render_title_template('tasks.html', entries=answer, title=_(u"Tasks"), page="tasks")
|
||||
|
||||
|
||||
# method is available without login and not protected by CSRF to make it easy reachable
|
||||
@app.route("/reconnect", methods=['GET'])
|
||||
def reconnect():
|
||||
calibre_db.reconnect_db(config, ub.app_DB_path)
|
||||
return json.dumps({})
|
||||
|
||||
|
||||
# ################################### Search functions ################################################################
|
||||
|
||||
@web.route("/search", methods=["GET"])
|
||||
|
@ -1259,7 +1268,24 @@ def render_adv_search_results(term, offset=None, order=None, limit=None):
|
|||
|
||||
cc = get_cc_columns(filter_config_custom_read=True)
|
||||
calibre_db.session.connection().connection.connection.create_function("lower", 1, db.lcase)
|
||||
q = calibre_db.session.query(db.Books).outerjoin(db.books_series_link, db.Books.id == db.books_series_link.c.book)\
|
||||
if not config.config_read_column:
|
||||
query = (calibre_db.session.query(db.Books, ub.ArchivedBook.is_archived, ub.ReadBook).select_from(db.Books)
|
||||
.outerjoin(ub.ReadBook, and_(db.Books.id == ub.ReadBook.book_id,
|
||||
int(current_user.id) == ub.ReadBook.user_id)))
|
||||
else:
|
||||
try:
|
||||
read_column = cc[config.config_read_column]
|
||||
query = (calibre_db.session.query(db.Books, ub.ArchivedBook.is_archived, read_column.value)
|
||||
.select_from(db.Books)
|
||||
.outerjoin(read_column, read_column.book == db.Books.id))
|
||||
except (KeyError, AttributeError):
|
||||
log.error("Custom Column No.%d is not existing in calibre database", config.config_read_column)
|
||||
# Skip linking read column
|
||||
query = calibre_db.session.query(db.Books, ub.ArchivedBook.is_archived, None)
|
||||
query = query.outerjoin(ub.ArchivedBook, and_(db.Books.id == ub.ArchivedBook.book_id,
|
||||
int(current_user.id) == ub.ArchivedBook.user_id))
|
||||
|
||||
q = query.outerjoin(db.books_series_link, db.Books.id == db.books_series_link.c.book)\
|
||||
.outerjoin(db.Series)\
|
||||
.filter(calibre_db.common_filters(True))
|
||||
|
||||
|
@ -1323,7 +1349,7 @@ def render_adv_search_results(term, offset=None, order=None, limit=None):
|
|||
rating_high,
|
||||
rating_low,
|
||||
read_status)
|
||||
q = q.filter()
|
||||
# q = q.filter()
|
||||
if author_name:
|
||||
q = q.filter(db.Books.authors.any(func.lower(db.Authors.name).ilike("%" + author_name + "%")))
|
||||
if book_title:
|
||||
|
@ -1354,7 +1380,7 @@ def render_adv_search_results(term, offset=None, order=None, limit=None):
|
|||
|
||||
q = q.order_by(*sort).all()
|
||||
flask_session['query'] = json.dumps(term)
|
||||
ub.store_ids(q)
|
||||
ub.store_combo_ids(q)
|
||||
result_count = len(q)
|
||||
if offset is not None and limit is not None:
|
||||
offset = int(offset)
|
||||
|
@ -1363,16 +1389,16 @@ def render_adv_search_results(term, offset=None, order=None, limit=None):
|
|||
else:
|
||||
offset = 0
|
||||
limit_all = result_count
|
||||
entries = calibre_db.order_authors(q[offset:limit_all], list_return=True, combined=True)
|
||||
return render_title_template('search.html',
|
||||
adv_searchterm=searchterm,
|
||||
pagination=pagination,
|
||||
entries=q[offset:limit_all],
|
||||
entries=entries,
|
||||
result_count=result_count,
|
||||
title=_(u"Advanced Search"), page="advsearch",
|
||||
order=order[1])
|
||||
|
||||
|
||||
|
||||
@web.route("/advsearch", methods=['GET'])
|
||||
@login_required_if_no_ano
|
||||
def advanced_search_form():
|
||||
|
@ -1748,63 +1774,40 @@ def read_book(book_id, book_format):
|
|||
@web.route("/book/<int:book_id>")
|
||||
@login_required_if_no_ano
|
||||
def show_book(book_id):
|
||||
entries = calibre_db.get_filtered_book(book_id, allow_show_archived=True)
|
||||
entries = calibre_db.get_book_read_archived(book_id, config.config_read_column, allow_show_archived=True)
|
||||
if entries:
|
||||
for index in range(0, len(entries.languages)):
|
||||
entries.languages[index].language_name = isoLanguages.get_language_name(get_locale(), entries.languages[
|
||||
read_book = entries[1]
|
||||
archived_book = entries[2]
|
||||
entry = entries[0]
|
||||
entry.read_status = read_book == ub.ReadBook.STATUS_FINISHED
|
||||
entry.is_archived = archived_book
|
||||
for index in range(0, len(entry.languages)):
|
||||
entry.languages[index].language_name = isoLanguages.get_language_name(get_locale(), entry.languages[
|
||||
index].lang_code)
|
||||
cc = get_cc_columns(filter_config_custom_read=True)
|
||||
book_in_shelfs = []
|
||||
shelfs = ub.session.query(ub.BookShelf).filter(ub.BookShelf.book_id == book_id).all()
|
||||
for entry in shelfs:
|
||||
book_in_shelfs.append(entry.shelf)
|
||||
for sh in shelfs:
|
||||
book_in_shelfs.append(sh.shelf)
|
||||
|
||||
if not current_user.is_anonymous:
|
||||
if not config.config_read_column:
|
||||
matching_have_read_book = ub.session.query(ub.ReadBook). \
|
||||
filter(and_(ub.ReadBook.user_id == int(current_user.id), ub.ReadBook.book_id == book_id)).all()
|
||||
have_read = len(
|
||||
matching_have_read_book) > 0 and matching_have_read_book[0].read_status == ub.ReadBook.STATUS_FINISHED
|
||||
else:
|
||||
try:
|
||||
matching_have_read_book = getattr(entries, 'custom_column_' + str(config.config_read_column))
|
||||
have_read = len(matching_have_read_book) > 0 and matching_have_read_book[0].value
|
||||
except (KeyError, AttributeError):
|
||||
log.error("Custom Column No.%d is not existing in calibre database", config.config_read_column)
|
||||
have_read = None
|
||||
entry.tags = sort(entry.tags, key=lambda tag: tag.name)
|
||||
|
||||
archived_book = ub.session.query(ub.ArchivedBook).\
|
||||
filter(and_(ub.ArchivedBook.user_id == int(current_user.id),
|
||||
ub.ArchivedBook.book_id == book_id)).first()
|
||||
is_archived = archived_book and archived_book.is_archived
|
||||
entry.authors = calibre_db.order_authors([entry])
|
||||
|
||||
else:
|
||||
have_read = None
|
||||
is_archived = None
|
||||
entry.kindle_list = check_send_to_kindle(entry)
|
||||
entry.reader_list = check_read_formats(entry)
|
||||
|
||||
entries.tags = sort(entries.tags, key=lambda tag: tag.name)
|
||||
|
||||
entries = calibre_db.order_authors(entries)
|
||||
|
||||
kindle_list = check_send_to_kindle(entries)
|
||||
reader_list = check_read_formats(entries)
|
||||
|
||||
audioentries = []
|
||||
for media_format in entries.data:
|
||||
entry.audioentries = []
|
||||
for media_format in entry.data:
|
||||
if media_format.format.lower() in constants.EXTENSIONS_AUDIO:
|
||||
audioentries.append(media_format.format.lower())
|
||||
entry.audioentries.append(media_format.format.lower())
|
||||
|
||||
return render_title_template('detail.html',
|
||||
entry=entries,
|
||||
audioentries=audioentries,
|
||||
entry=entry,
|
||||
cc=cc,
|
||||
is_xhr=request.headers.get('X-Requested-With')=='XMLHttpRequest',
|
||||
title=entries.title,
|
||||
title=entry.title,
|
||||
books_shelfs=book_in_shelfs,
|
||||
have_read=have_read,
|
||||
is_archived=is_archived,
|
||||
kindle_list=kindle_list,
|
||||
reader_list=reader_list,
|
||||
page="book")
|
||||
else:
|
||||
log.debug(u"Oops! Selected book title is unavailable. File does not exist or is not accessible")
|
||||
|
|
0
exclude.txt
Normal file
0
exclude.txt
Normal file
|
@ -10,7 +10,7 @@ pyasn1>=0.1.9,<0.5.0
|
|||
PyDrive2>=1.3.1,<1.11.0
|
||||
PyYAML>=3.12
|
||||
rsa>=3.4.2,<4.9.0
|
||||
six>=1.10.0,<1.17.0
|
||||
# six>=1.10.0,<1.17.0
|
||||
|
||||
# Gmail
|
||||
google-auth-oauthlib>=0.4.3,<0.5.0
|
||||
|
@ -31,6 +31,11 @@ SQLAlchemy-Utils>=0.33.5,<0.39.0
|
|||
# metadata extraction
|
||||
rarfile>=2.7
|
||||
scholarly>=1.2.0,<1.6
|
||||
markdown2>=2.0.0,<2.5.0
|
||||
html2text>=2020.1.16,<2022.1.1
|
||||
python-dateutil>=2.1,<2.9.0
|
||||
beautifulsoup4>=4.0.1,<4.2.0
|
||||
cchardet>=2.0.0,<2.2.0
|
||||
|
||||
# Comics
|
||||
natsort>=2.2.0,<8.2.0
|
||||
|
|
|
@ -86,6 +86,11 @@ oauth =
|
|||
metadata =
|
||||
rarfile>=2.7
|
||||
scholarly>=1.2.0,<1.6
|
||||
markdown2>=2.0.0,<2.5.0
|
||||
html2text>=2020.1.16,<2022.1.1
|
||||
python-dateutil>=2.1,<2.9.0
|
||||
beautifulsoup4>=4.0.1,<4.2.0
|
||||
cchardet>=2.0.0,<2.2.0
|
||||
comics =
|
||||
natsort>=2.2.0,<8.2.0
|
||||
comicapi>=2.2.0,<2.3.0
|
||||
|
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user