Merge branch 'master' into Develop
# Conflicts: # cps/book_formats.py # cps/helper.py # cps/web.py
This commit is contained in:
commit
2de4bfdcf2
|
@ -35,6 +35,7 @@ from babel import Locale as LC
|
|||
from babel import negotiate_locale
|
||||
import os
|
||||
import ub
|
||||
import sys
|
||||
from ub import Config, Settings
|
||||
try:
|
||||
import cPickle
|
||||
|
@ -72,8 +73,14 @@ config = Config()
|
|||
|
||||
import db
|
||||
|
||||
with open(os.path.join(config.get_main_dir, 'cps/translations/iso639.pickle'), 'rb') as f:
|
||||
try:
|
||||
with open(os.path.join(config.get_main_dir, 'cps/translations/iso639.pickle'), 'rb') as f:
|
||||
language_table = cPickle.load(f)
|
||||
except cPickle.UnpicklingError as error:
|
||||
# app.logger.error("Can't read file cps/translations/iso639.pickle: %s", error)
|
||||
print("Can't read file cps/translations/iso639.pickle: %s" % error)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
searched_ids = {}
|
||||
|
||||
|
|
11
cps/db.py
11
cps/db.py
|
@ -27,6 +27,7 @@ import ast
|
|||
from cps import config
|
||||
import ub
|
||||
import sys
|
||||
import unidecode
|
||||
|
||||
session = None
|
||||
cc_exceptions = ['datetime', 'comments', 'float', 'composite', 'series']
|
||||
|
@ -46,7 +47,7 @@ def title_sort(title):
|
|||
|
||||
|
||||
def lcase(s):
|
||||
return s.lower()
|
||||
return unidecode.unidecode(s.lower())
|
||||
|
||||
|
||||
def ucase(s):
|
||||
|
@ -112,6 +113,8 @@ class Identifiers(Base):
|
|||
return u"Google Books"
|
||||
elif self.type == "kobo":
|
||||
return u"Kobo"
|
||||
if self.type == "lubimyczytac":
|
||||
return u"Lubimyczytac"
|
||||
else:
|
||||
return self.type
|
||||
|
||||
|
@ -130,6 +133,8 @@ class Identifiers(Base):
|
|||
return u"https://books.google.com/books?id={0}".format(self.val)
|
||||
elif self.type == "kobo":
|
||||
return u"https://www.kobo.com/ebook/{0}".format(self.val)
|
||||
elif self.type == "lubimyczytac":
|
||||
return u" http://lubimyczytac.pl/ksiazka/{0}".format(self.val)
|
||||
elif self.type == "url":
|
||||
return u"{0}".format(self.val)
|
||||
else:
|
||||
|
@ -355,8 +360,8 @@ def setup_db():
|
|||
ub.session.commit()
|
||||
config.loadSettings()
|
||||
conn.connection.create_function('title_sort', 1, title_sort)
|
||||
conn.connection.create_function('lower', 1, lcase)
|
||||
conn.connection.create_function('upper', 1, ucase)
|
||||
# conn.connection.create_function('lower', 1, lcase)
|
||||
# conn.connection.create_function('upper', 1, ucase)
|
||||
|
||||
if not cc_classes:
|
||||
cc = conn.execute("SELECT id, datatype FROM custom_columns")
|
||||
|
|
|
@ -364,7 +364,8 @@ def upload_single_file(request, book, book_id):
|
|||
global_WorkerThread.add_upload(current_user.nickname,
|
||||
"<a href=\"" + url_for('web.show_book', book_id=book.id) + "\">" + uploadText + "</a>")
|
||||
|
||||
def upload_cover(request, book):
|
||||
|
||||
def upload_single_file(request, book, book_id):
|
||||
if 'btn-upload-cover' in request.files:
|
||||
requested_file = request.files['btn-upload-cover']
|
||||
# check for empty request
|
||||
|
@ -380,17 +381,38 @@ def upload_cover(request, book):
|
|||
except OSError:
|
||||
flash(_(u"Failed to create path for cover %(path)s (Permission denied).", cover=filepath),
|
||||
category="error")
|
||||
return redirect(url_for('web.show_book', book_id=book.id))
|
||||
return redirect(url_for('show_book', book_id=book.id))
|
||||
try:
|
||||
requested_file.save(saved_filename)
|
||||
# im=Image.open(saved_filename)
|
||||
book.has_cover = 1
|
||||
except IOError:
|
||||
flash(_(u"Cover-file is not a valid image file" % saved_filename), category="error")
|
||||
return redirect(url_for('web.show_book', book_id=book.id))
|
||||
except OSError:
|
||||
flash(_(u"Failed to store cover-file %(cover)s.", cover=saved_filename), category="error")
|
||||
return redirect(url_for('web.show_book', book_id=book.id))
|
||||
except IOError:
|
||||
flash(_(u"Cover-file is not a valid image file" % saved_filename), category="error")
|
||||
return redirect(url_for('web.show_book', book_id=book.id))
|
||||
if helper.save_cover(requested_file, book.path) is True:
|
||||
return True
|
||||
else:
|
||||
# ToDo Message not always coorect
|
||||
flash(_(u"Cover is not a supported imageformat (jpg/png/webp), can't save"), category="error")
|
||||
return False
|
||||
return None
|
||||
|
||||
|
||||
def upload_cover(request, book):
|
||||
if 'btn-upload-cover' in request.files:
|
||||
requested_file = request.files['btn-upload-cover']
|
||||
# check for empty request
|
||||
if requested_file.filename != '':
|
||||
if helper.save_cover(requested_file, book.path) is True:
|
||||
return True
|
||||
else:
|
||||
# ToDo Message not always coorect
|
||||
flash(_(u"Cover is not a supported imageformat (jpg/png/webp), can't save"), category="error")
|
||||
return False
|
||||
return None
|
||||
|
||||
@editbook.route("/admin/book/<int:book_id>", methods=['GET', 'POST'])
|
||||
@login_required_if_no_ano
|
||||
|
@ -411,7 +433,8 @@ def edit_book(book_id):
|
|||
return redirect(url_for("web.index"))
|
||||
|
||||
upload_single_file(request, book, book_id)
|
||||
upload_cover(request, book)
|
||||
if upload_cover(request, book) is True:
|
||||
book.has_cover = 1
|
||||
try:
|
||||
to_save = request.form.to_dict()
|
||||
# Update book
|
||||
|
@ -457,7 +480,7 @@ def edit_book(book_id):
|
|||
|
||||
if not error:
|
||||
if to_save["cover_url"]:
|
||||
if helper.save_cover(to_save["cover_url"], book.path) is True:
|
||||
if helper.save_cover_from_url(to_save["cover_url"], book.path) is True:
|
||||
book.has_cover = 1
|
||||
else:
|
||||
flash(_(u"Cover is not a jpg file, can't save"), category="error")
|
||||
|
|
103
cps/helper.py
103
cps/helper.py
|
@ -23,6 +23,7 @@ from cps import config, global_WorkerThread, get_locale, db, mimetypes
|
|||
from flask import current_app as app
|
||||
from tempfile import gettempdir
|
||||
import sys
|
||||
import io
|
||||
import os
|
||||
import re
|
||||
import unicodedata
|
||||
|
@ -72,6 +73,12 @@ try:
|
|||
except ImportError:
|
||||
pass # We're not using Python 3
|
||||
|
||||
try:
|
||||
from PIL import Image
|
||||
use_PIL = True
|
||||
except ImportError:
|
||||
use_PIL = False
|
||||
|
||||
|
||||
def update_download(book_id, user_id):
|
||||
check = ub.session.query(ub.Downloads).filter(ub.Downloads.user_id == user_id).filter(ub.Downloads.book_id ==
|
||||
|
@ -459,27 +466,71 @@ def get_book_cover(cover_path):
|
|||
return send_from_directory(os.path.join(config.config_calibre_dir, cover_path), "cover.jpg")
|
||||
|
||||
|
||||
# saves book cover to gdrive or locally
|
||||
def save_cover(url, book_path):
|
||||
# saves book cover from url
|
||||
def save_cover_from_url(url, book_path):
|
||||
img = requests.get(url)
|
||||
if img.headers.get('content-type') != 'image/jpeg':
|
||||
app.logger.error("Cover is no jpg file, can't save")
|
||||
return save_cover(img, book_path)
|
||||
|
||||
|
||||
def save_cover_from_filestorage(filepath, saved_filename, img):
|
||||
if hasattr(img, '_content'):
|
||||
f = open(os.path.join(filepath, saved_filename), "wb")
|
||||
f.write(img._content)
|
||||
f.close()
|
||||
else:
|
||||
# check if file path exists, otherwise create it, copy file to calibre path and delete temp file
|
||||
if not os.path.exists(filepath):
|
||||
try:
|
||||
os.makedirs(filepath)
|
||||
except OSError:
|
||||
app.logger.error(u"Failed to create path for cover")
|
||||
return False
|
||||
try:
|
||||
img.save(os.path.join(filepath, saved_filename))
|
||||
except OSError:
|
||||
app.logger.error(u"Failed to store cover-file")
|
||||
return False
|
||||
except IOError:
|
||||
app.logger.error(u"Cover-file is not a valid image file")
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
# saves book cover to gdrive or locally
|
||||
def save_cover(img, book_path):
|
||||
content_type = img.headers.get('content-type')
|
||||
|
||||
if use_PIL:
|
||||
if content_type not in ('image/jpeg', 'image/png', 'image/webp'):
|
||||
app.logger.error("Only jpg/jpeg/png/webp files are supported as coverfile")
|
||||
return False
|
||||
# convert to jpg because calibre only supports jpg
|
||||
if content_type in ('image/png', 'image/webp'):
|
||||
if hasattr(img,'stream'):
|
||||
imgc = Image.open(img.stream)
|
||||
else:
|
||||
imgc = Image.open(io.BytesIO(img.content))
|
||||
im = imgc.convert('RGB')
|
||||
tmp_bytesio = io.BytesIO()
|
||||
im.save(tmp_bytesio, format='JPEG')
|
||||
img._content = tmp_bytesio.getvalue()
|
||||
else:
|
||||
if content_type not in ('image/jpeg'):
|
||||
app.logger.error("Only jpg/jpeg files are supported as coverfile")
|
||||
return False
|
||||
|
||||
if config.config_use_google_drive:
|
||||
if ub.config.config_use_google_drive:
|
||||
tmpDir = gettempdir()
|
||||
f = open(os.path.join(tmpDir, "uploaded_cover.jpg"), "wb")
|
||||
f.write(img.content)
|
||||
f.close()
|
||||
gd.uploadFileToEbooksFolder(os.path.join(book_path, 'cover.jpg'), os.path.join(tmpDir, f.name))
|
||||
if save_cover_from_filestorage(tmpDir, "uploaded_cover.jpg", img) is True:
|
||||
gd.uploadFileToEbooksFolder(os.path.join(book_path, 'cover.jpg'),
|
||||
os.path.join(tmpDir, "uploaded_cover.jpg"))
|
||||
app.logger.info("Cover is saved on Google Drive")
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
else:
|
||||
return save_cover_from_filestorage(os.path.join(config.config_calibre_dir, book_path), "cover.jpg", img)
|
||||
|
||||
f = open(os.path.join(config.config_calibre_dir, book_path, "cover.jpg"), "wb")
|
||||
f.write(img.content)
|
||||
f.close()
|
||||
app.logger.info("Cover is saved")
|
||||
return True
|
||||
|
||||
|
||||
def do_download_file(book, book_format, data, headers):
|
||||
|
@ -504,7 +555,6 @@ def do_download_file(book, book_format, data, headers):
|
|||
|
||||
|
||||
|
||||
|
||||
def check_unrar(unrarLocation):
|
||||
error = False
|
||||
if os.path.exists(unrarLocation):
|
||||
|
@ -652,28 +702,23 @@ def fill_indexpage(page, database, db_filter, order, *join):
|
|||
|
||||
# read search results from calibre-database and return it (function is used for feed and simple search
|
||||
def get_search_results(term):
|
||||
def get_search_results(term):
|
||||
db.session.connection().connection.connection.create_function("lower", 1, db.lcase)
|
||||
q = list()
|
||||
authorterms = re.split("[, ]+", term)
|
||||
for authorterm in authorterms:
|
||||
q.append(db.Books.authors.any(db.or_(db.Authors.name.ilike("%" + authorterm + "%"),
|
||||
db.Authors.name.ilike("%" + unidecode.unidecode(authorterm) + "%"))))
|
||||
db.session.connection().connection.connection.create_function("lower", 1, db.lcase)
|
||||
db.Books.authors.any(db.or_(db.Authors.name.ilike("%" + term + "%"),
|
||||
db.Authors.name.ilike("%" + unidecode.unidecode(term) + "%")))
|
||||
q.append(db.Books.authors.any(db.func.lower(db.Authors.name).ilike("%" + authorterm + "%")))
|
||||
|
||||
db.Books.authors.any(db.func.lower(db.Authors.name).ilike("%" + term + "%"))
|
||||
|
||||
return db.session.query(db.Books).filter(common_filters()).filter(
|
||||
db.or_(db.Books.tags.any(db.Tags.name.ilike("%" + term + "%")),
|
||||
db.Books.series.any(db.Series.name.ilike("%" + term + "%")),
|
||||
db.or_(db.Books.tags.any(db.func.lower(db.Tags.name).ilike("%" + term + "%")),
|
||||
db.Books.series.any(db.func.lower(db.Series.name).ilike("%" + term + "%")),
|
||||
db.Books.authors.any(and_(*q)),
|
||||
db.Books.publishers.any(db.Publishers.name.ilike("%" + term + "%")),
|
||||
db.Books.title.ilike("%" + term + "%"),
|
||||
db.Books.tags.any(db.Tags.name.ilike("%" + unidecode.unidecode(term) + "%")),
|
||||
db.Books.series.any(db.Series.name.ilike("%" + unidecode.unidecode(term) + "%")),
|
||||
db.Books.publishers.any(db.Publishers.name.ilike("%" + unidecode.unidecode(term) + "%")),
|
||||
db.Books.title.ilike("%" + unidecode.unidecode(term) + "%")
|
||||
db.Books.publishers.any(db.func.lower(db.Publishers.name).ilike("%" + term + "%")),
|
||||
db.func.lower(db.Books.title).ilike("%" + term + "%")
|
||||
)).all()
|
||||
|
||||
|
||||
def get_unique_other_books(library_books, author_books):
|
||||
# Get all identifiers (ISBN, Goodreads, etc) and filter author's books by that list so we show fewer duplicates
|
||||
# Note: Not all images will be shown, even though they're available on Goodreads.com.
|
||||
|
|
|
@ -1,6 +1,21 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# This file is part of the Calibre-Web (https://github.com/janeczku/calibre-web)
|
||||
# Copyright (C) 2019 pwr
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
try:
|
||||
from iso639 import languages, __version__
|
||||
|
|
|
@ -312,14 +312,14 @@ def feed_get_cover(book_id):
|
|||
return helper.get_book_cover(book.path)
|
||||
|
||||
@opds.route("/opds/readbooks/")
|
||||
@login_required_if_no_ano
|
||||
@requires_basic_auth_if_no_ano
|
||||
def feed_read_books():
|
||||
off = request.args.get("offset") or 0
|
||||
return render_read_books(int(off) / (int(config.config_books_per_page)) + 1, True, True)
|
||||
|
||||
|
||||
@opds.route("/opds/unreadbooks/")
|
||||
@login_required_if_no_ano
|
||||
@requires_basic_auth_if_no_ano
|
||||
def feed_unread_books():
|
||||
off = request.args.get("offset") or 0
|
||||
return render_read_books(int(off) / (int(config.config_books_per_page)) + 1, False, True)
|
||||
|
|
|
@ -1,21 +1,41 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# This file is part of the Calibre-Web (https://github.com/janeczku/calibre-web)
|
||||
# Copyright (C) 2018 cervinko, janeczku, OzzieIsaacs
|
||||
# Flask License
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
# Copyright © 2010 by the Pallets team, cervinko, janeczku, OzzieIsaacs
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
# Some rights reserved.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
# Redistribution and use in source and binary forms of the software as
|
||||
# well as documentation, with or without modification, are permitted
|
||||
# provided that the following conditions are met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright notice,
|
||||
# this list of conditions and the following disclaimer.
|
||||
#
|
||||
# * Redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in the
|
||||
# documentation and/or other materials provided with the distribution.
|
||||
#
|
||||
# * Neither the name of the copyright holder nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND
|
||||
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
|
||||
# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||||
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
||||
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
|
||||
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
# THIS SOFTWARE AND DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
# SUCH DAMAGE.
|
||||
#
|
||||
# Inspired by http://flask.pocoo.org/snippets/35/
|
||||
|
||||
|
||||
class ReverseProxied(object):
|
||||
|
|
|
@ -25,15 +25,12 @@ var ggResults = [];
|
|||
|
||||
$(function () {
|
||||
var msg = i18nMsg;
|
||||
var douban = "https://api.douban.com";
|
||||
var dbSearch = "/v2/book/search";
|
||||
// var dbGetInfo = "/v2/book/";
|
||||
// var db_get_info_by_isbn = "/v2/book/isbn/ ";
|
||||
var dbDone = false;
|
||||
/*var douban = "https://api.douban.com";
|
||||
var dbSearch = "/v2/book/search";*/
|
||||
var dbDone = true;
|
||||
|
||||
var google = "https://www.googleapis.com/";
|
||||
var ggSearch = "/books/v1/volumes";
|
||||
// var gg_get_info = "/books/v1/volumes/";
|
||||
var ggDone = false;
|
||||
|
||||
var showFlag = 0;
|
||||
|
@ -96,7 +93,7 @@ $(function () {
|
|||
});
|
||||
ggDone = false;
|
||||
}
|
||||
if (dbDone && dbResults.length > 0) {
|
||||
/*if (dbDone && dbResults.length > 0) {
|
||||
dbResults.forEach(function(result) {
|
||||
var book = {
|
||||
id: result.id,
|
||||
|
@ -130,7 +127,7 @@ $(function () {
|
|||
$("#book-list").append($book);
|
||||
});
|
||||
dbDone = false;
|
||||
}
|
||||
}*/
|
||||
}
|
||||
|
||||
function ggSearchBook (title) {
|
||||
|
@ -150,7 +147,7 @@ $(function () {
|
|||
});
|
||||
}
|
||||
|
||||
function dbSearchBook (title) {
|
||||
/*function dbSearchBook (title) {
|
||||
$.ajax({
|
||||
url: douban + dbSearch + "?q=" + title + "&fields=all&count=10",
|
||||
type: "GET",
|
||||
|
@ -160,7 +157,7 @@ $(function () {
|
|||
dbResults = data.books;
|
||||
},
|
||||
error: function error() {
|
||||
$("#meta-info").html("<p class=\"text-danger\">" + msg.search_error + "!</p>");
|
||||
$("#meta-info").html("<p class=\"text-danger\">" + msg.search_error + "!</p>"+ $("#meta-info")[0].innerHTML)
|
||||
},
|
||||
complete: function complete() {
|
||||
dbDone = true;
|
||||
|
@ -168,14 +165,13 @@ $(function () {
|
|||
$("#show-douban").trigger("change");
|
||||
}
|
||||
});
|
||||
}
|
||||
}*/
|
||||
|
||||
function doSearch (keyword) {
|
||||
showFlag = 0;
|
||||
$("#meta-info").text(msg.loading);
|
||||
// var keyword = $("#keyword").val();
|
||||
if (keyword) {
|
||||
dbSearchBook(keyword);
|
||||
// dbSearchBook(keyword);
|
||||
ggSearchBook(keyword);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -90,7 +90,7 @@
|
|||
<div class="form-group" aria-label="Upload cover from local drive">
|
||||
<label class="btn btn-primary btn-file" for="btn-upload-cover">{{ _('Upload Cover from local drive') }}</label>
|
||||
<div class="upload-cover-input-text" id="upload-cover"></div>
|
||||
<input id="btn-upload-cover" name="btn-upload-cover" type="file">
|
||||
<input id="btn-upload-cover" name="btn-upload-cover" type="file" accept=".jpg, .jpeg, .png, .webp">
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label for="pubdate">{{_('Publishing date')}}</label>
|
||||
|
@ -223,8 +223,8 @@
|
|||
</div>
|
||||
<div class="modal-body">
|
||||
<div class="text-center padded-bottom">
|
||||
<input type="checkbox" id="show-douban" class="pill" data-control="douban" checked>
|
||||
<label for="show-douban">Douban <span class="glyphicon glyphicon-ok"></span></label>
|
||||
<!--input type="checkbox" id="show-douban" class="pill" data-control="douban" checked>
|
||||
<label for="show-douban">Douban <span class="glyphicon glyphicon-ok"></span></label-->
|
||||
|
||||
<input type="checkbox" id="show-google" class="pill" data-control="google" checked>
|
||||
<label for="show-google">Google <span class="glyphicon glyphicon-ok"></span></label>
|
||||
|
|
|
@ -243,7 +243,7 @@ class Updater(threading.Thread):
|
|||
|
||||
@classmethod
|
||||
def _stable_version_info(self):
|
||||
return {'version': '0.6.1'} # Current version
|
||||
return {'version': '0.6.2'} # Current version
|
||||
|
||||
def _nightly_available_updates(self, request_method):
|
||||
tz = datetime.timedelta(seconds=time.timezone if (time.localtime().tm_isdst == 0) else time.altzone)
|
||||
|
|
|
@ -25,12 +25,12 @@ import os
|
|||
from flask_babel import gettext as _
|
||||
import comic
|
||||
from cps import app
|
||||
|
||||
try:
|
||||
from lxml.etree import LXML_VERSION as lxmlversion
|
||||
except ImportError:
|
||||
lxmlversion = None
|
||||
|
||||
|
||||
try:
|
||||
from wand.image import Image
|
||||
from wand import version as ImageVersion
|
||||
|
@ -39,6 +39,7 @@ try:
|
|||
except (ImportError, RuntimeError) as e:
|
||||
app.logger.warning('cannot import Image, generating pdf covers for pdf uploads will not work: %s', e)
|
||||
use_generic_pdf_cover = True
|
||||
|
||||
try:
|
||||
from PyPDF2 import PdfFileReader
|
||||
from PyPDF2 import __version__ as PyPdfVersion
|
||||
|
@ -61,6 +62,14 @@ except ImportError as e:
|
|||
app.logger.warning('cannot import fb2, extracting fb2 metadata will not work: %s', e)
|
||||
use_fb2_meta = False
|
||||
|
||||
try:
|
||||
from PIL import Image
|
||||
from PIL import __version__ as PILversion
|
||||
use_PIL = True
|
||||
except ImportError:
|
||||
use_PIL = False
|
||||
|
||||
|
||||
__author__ = 'lemmsh'
|
||||
|
||||
BookMeta = namedtuple('BookMeta', 'file_path, extension, title, author, cover, description, tags, series, series_id, languages')
|
||||
|
@ -138,6 +147,48 @@ def pdf_preview(tmp_file_path, tmp_dir):
|
|||
if use_generic_pdf_cover:
|
||||
return None
|
||||
else:
|
||||
if use_PIL:
|
||||
try:
|
||||
input1 = PdfFileReader(open(tmp_file_path, 'rb'), strict=False)
|
||||
page0 = input1.getPage(0)
|
||||
xObject = page0['/Resources']['/XObject'].getObject()
|
||||
|
||||
for obj in xObject:
|
||||
if xObject[obj]['/Subtype'] == '/Image':
|
||||
size = (xObject[obj]['/Width'], xObject[obj]['/Height'])
|
||||
data = xObject[obj]._data # xObject[obj].getData()
|
||||
if xObject[obj]['/ColorSpace'] == '/DeviceRGB':
|
||||
mode = "RGB"
|
||||
else:
|
||||
mode = "P"
|
||||
if '/Filter' in xObject[obj]:
|
||||
if xObject[obj]['/Filter'] == '/FlateDecode':
|
||||
img = Image.frombytes(mode, size, data)
|
||||
cover_file_name = os.path.splitext(tmp_file_path)[0] + ".cover.png"
|
||||
img.save(filename=os.path.join(tmp_dir, cover_file_name))
|
||||
return cover_file_name
|
||||
# img.save(obj[1:] + ".png")
|
||||
elif xObject[obj]['/Filter'] == '/DCTDecode':
|
||||
cover_file_name = os.path.splitext(tmp_file_path)[0] + ".cover.jpg"
|
||||
img = open(cover_file_name, "wb")
|
||||
img.write(data)
|
||||
img.close()
|
||||
return cover_file_name
|
||||
elif xObject[obj]['/Filter'] == '/JPXDecode':
|
||||
cover_file_name = os.path.splitext(tmp_file_path)[0] + ".cover.jp2"
|
||||
img = open(cover_file_name, "wb")
|
||||
img.write(data)
|
||||
img.close()
|
||||
return cover_file_name
|
||||
else:
|
||||
img = Image.frombytes(mode, size, data)
|
||||
cover_file_name = os.path.splitext(tmp_file_path)[0] + ".cover.png"
|
||||
img.save(filename=os.path.join(tmp_dir, cover_file_name))
|
||||
return cover_file_name
|
||||
# img.save(obj[1:] + ".png")
|
||||
except Exception as ex:
|
||||
print(ex)
|
||||
|
||||
try:
|
||||
cover_file_name = os.path.splitext(tmp_file_path)[0] + ".cover.jpg"
|
||||
with Image(filename=tmp_file_path + "[0]", resolution=150) as img:
|
||||
|
@ -145,12 +196,13 @@ def pdf_preview(tmp_file_path, tmp_dir):
|
|||
img.save(filename=os.path.join(tmp_dir, cover_file_name))
|
||||
return cover_file_name
|
||||
except PolicyError as ex:
|
||||
logger.warning('Pdf extraction forbidden by Imagemagick policy: %s', ex)
|
||||
app.logger.warning('Pdf extraction forbidden by Imagemagick policy: %s', ex)
|
||||
return None
|
||||
except Exception as ex:
|
||||
logger.warning('Cannot extract cover image, using default: %s', ex)
|
||||
app.logger.warning('Cannot extract cover image, using default: %s', ex)
|
||||
return None
|
||||
|
||||
|
||||
def get_versions():
|
||||
if not use_generic_pdf_cover:
|
||||
IVersion = ImageVersion.MAGICK_VERSION
|
||||
|
@ -166,7 +218,15 @@ def get_versions():
|
|||
XVersion = 'v'+'.'.join(map(str, lxmlversion))
|
||||
else:
|
||||
XVersion = _(u'not installed')
|
||||
return {'Image Magick': IVersion, 'PyPdf': PVersion, 'lxml':XVersion, 'Wand Version': WVersion}
|
||||
if use_PIL:
|
||||
PILVersion = 'v' + PILversion
|
||||
else:
|
||||
PILVersion = _(u'not installed')
|
||||
return {'Image Magick': IVersion,
|
||||
'PyPdf': PVersion,
|
||||
'lxml':XVersion,
|
||||
'Wand': WVersion,
|
||||
'Pillow': PILVersion}
|
||||
|
||||
|
||||
def upload(uploadfile):
|
||||
|
|
33
cps/web.py
33
cps/web.py
|
@ -41,18 +41,11 @@ from sqlalchemy.sql.expression import text, func, true, false, not_
|
|||
import json
|
||||
import datetime
|
||||
import isoLanguages
|
||||
from pytz import __version__ as pytzVersion
|
||||
from uuid import uuid4
|
||||
import os.path
|
||||
import sys
|
||||
import re
|
||||
import db
|
||||
from shutil import move, copyfile
|
||||
import gdriveutils
|
||||
from redirect import redirect_back
|
||||
from cps import lm, babel, ub, config, get_locale, language_table, app, db
|
||||
from pagination import Pagination
|
||||
import unidecode
|
||||
|
||||
|
||||
feature_support = dict()
|
||||
|
@ -374,7 +367,8 @@ def get_comic_book(book_id, book_format, page):
|
|||
# ################################### Typeahead ##################################################################
|
||||
|
||||
def get_typeahead(database, query, replace=('','')):
|
||||
entries = db.session.query(database).filter(database.name.ilike("%" + query + "%")).all()
|
||||
db.session.connection().connection.connection.create_function("lower", 1, db.lcase)
|
||||
entries = db.session.query(database).filter(db.func.lower(database.name).ilike("%" + query + "%")).all()
|
||||
json_dumps = json.dumps([dict(name=r.name.replace(*replace)) for r in entries])
|
||||
return json_dumps
|
||||
|
||||
|
@ -428,12 +422,13 @@ def get_matching_tags():
|
|||
tag_dict = {'tags': []}
|
||||
if request.method == "GET":
|
||||
q = db.session.query(db.Books)
|
||||
db.session.connection().connection.connection.create_function("lower", 1, db.lcase)
|
||||
author_input = request.args.get('author_name')
|
||||
title_input = request.args.get('book_title')
|
||||
include_tag_inputs = request.args.getlist('include_tag')
|
||||
exclude_tag_inputs = request.args.getlist('exclude_tag')
|
||||
q = q.filter(db.Books.authors.any(db.Authors.name.ilike("%" + author_input + "%")),
|
||||
db.Books.title.ilike("%" + title_input + "%"))
|
||||
q = q.filter(db.Books.authors.any(db.func.lower(db.Authors.name).ilike("%" + author_input + "%")),
|
||||
db.func.lower(db.Books.title).ilike("%" + title_input + "%"))
|
||||
if len(include_tag_inputs) > 0:
|
||||
for tag in include_tag_inputs:
|
||||
q = q.filter(db.Books.tags.any(db.Tags.id == tag))
|
||||
|
@ -874,20 +869,15 @@ def advanced_search():
|
|||
searchterm = " + ".join(filter(None, searchterm))
|
||||
q = q.filter()
|
||||
if author_name:
|
||||
q = q.filter(db.Books.authors.any(db.or_(db.Authors.name.ilike("%" + author_name + "%"),
|
||||
db.Authors.name.ilike("%" + unidecode.unidecode(author_name)
|
||||
+ "%"))))
|
||||
q = q.filter(db.Books.authors.any(db.func.lower(db.Authors.name).ilike("%" + author_name + "%")))
|
||||
if book_title:
|
||||
q = q.filter(db.or_(db.Books.title.ilike("%" + book_title + "%"),
|
||||
db.Books.title.ilike("%" + unidecode.unidecode(book_title) + "%")))
|
||||
q = q.filter(db.func.lower(db.Books.title).ilike("%" + book_title + "%"))
|
||||
if pub_start:
|
||||
q = q.filter(db.Books.pubdate >= pub_start)
|
||||
if pub_end:
|
||||
q = q.filter(db.Books.pubdate <= pub_end)
|
||||
if publisher:
|
||||
q = q.filter(db.Books.publishers.any(db.or_(db.Publishers.name.ilike("%" + publisher + "%"),
|
||||
db.Publishers.name.ilike("%" + unidecode.unidecode(publisher)
|
||||
+ "%"),)))
|
||||
q = q.filter(db.Books.publishers.any(db.func.lower(db.Publishers.name).ilike("%" + publisher + "%")))
|
||||
for tag in include_tag_inputs:
|
||||
q = q.filter(db.Books.tags.any(db.Tags.id == tag))
|
||||
for tag in exclude_tag_inputs:
|
||||
|
@ -910,9 +900,7 @@ def advanced_search():
|
|||
rating_low = int(rating_low) * 2
|
||||
q = q.filter(db.Books.ratings.any(db.Ratings.rating >= rating_low))
|
||||
if description:
|
||||
q = q.filter(db.Books.comments.any(db.or_(db.Comments.text.ilike("%" + description + "%"),
|
||||
db.Comments.text.ilike("%" + unidecode.unidecode(description)
|
||||
+ "%"))))
|
||||
q = q.filter(db.Books.comments.any(db.func.lower(db.Comments.text).ilike("%" + description + "%")))
|
||||
|
||||
# search custom culumns
|
||||
for c in cc:
|
||||
|
@ -927,8 +915,7 @@ def advanced_search():
|
|||
db.cc_classes[c.id].value == custom_query))
|
||||
else:
|
||||
q = q.filter(getattr(db.Books, 'custom_column_'+str(c.id)).any(
|
||||
db.or_(db.cc_classes[c.id].value.ilike("%" + custom_query + "%"),
|
||||
db.cc_classes[c.id].value.ilike("%" + unidecode.unidecode(custom_query) + "%"))))
|
||||
db.func.lower(db.cc_classes[c.id].value).ilike("%" + custom_query + "%")))
|
||||
q = q.all()
|
||||
ids = list()
|
||||
for element in q:
|
||||
|
|
|
@ -13,3 +13,4 @@ SQLAlchemy>=1.1.0
|
|||
tornado>=4.1
|
||||
Wand>=0.4.4
|
||||
unidecode>=0.04.19
|
||||
Pillow>=5.4.0
|
Loading…
Reference in New Issue
Block a user