2017-01-29 20:06:08 +00:00
|
|
|
#!/usr/bin/env python
|
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
2019-01-20 18:37:45 +00:00
|
|
|
# This file is part of the Calibre-Web (https://github.com/janeczku/calibre-web)
|
2019-02-27 17:20:50 +00:00
|
|
|
# Copyright (C) 2012-2019 lemmsh cervinko Kennyl matthazinski OzzieIsaacs
|
2019-01-20 18:37:45 +00:00
|
|
|
#
|
|
|
|
# This program is free software: you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation, either version 3 of the License, or
|
|
|
|
# (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
2019-06-06 15:10:22 +00:00
|
|
|
from __future__ import division, print_function, unicode_literals
|
2019-02-08 19:11:44 +00:00
|
|
|
import os
|
2019-06-06 15:10:22 +00:00
|
|
|
import hashlib
|
|
|
|
from tempfile import gettempdir
|
|
|
|
|
2019-02-08 19:11:44 +00:00
|
|
|
from flask_babel import gettext as _
|
2019-06-06 15:10:22 +00:00
|
|
|
|
|
|
|
from . import logger, comic
|
|
|
|
from .constants import BookMeta
|
|
|
|
|
|
|
|
|
2019-06-10 10:18:11 +00:00
|
|
|
# log = logger.create()
|
2019-06-06 15:10:22 +00:00
|
|
|
|
2019-04-20 16:32:46 +00:00
|
|
|
|
2019-02-08 19:11:44 +00:00
|
|
|
try:
|
|
|
|
from lxml.etree import LXML_VERSION as lxmlversion
|
|
|
|
except ImportError:
|
|
|
|
lxmlversion = None
|
|
|
|
|
|
|
|
try:
|
|
|
|
from wand.image import Image
|
|
|
|
from wand import version as ImageVersion
|
2019-02-09 17:46:36 +00:00
|
|
|
from wand.exceptions import PolicyError
|
2019-02-08 19:11:44 +00:00
|
|
|
use_generic_pdf_cover = False
|
|
|
|
except (ImportError, RuntimeError) as e:
|
2019-06-10 10:18:11 +00:00
|
|
|
logger.warning('cannot import Image, generating pdf covers for pdf uploads will not work: %s', e)
|
2019-02-08 19:11:44 +00:00
|
|
|
use_generic_pdf_cover = True
|
2019-04-20 16:32:46 +00:00
|
|
|
|
2019-02-08 19:11:44 +00:00
|
|
|
try:
|
|
|
|
from PyPDF2 import PdfFileReader
|
|
|
|
from PyPDF2 import __version__ as PyPdfVersion
|
|
|
|
use_pdf_meta = True
|
|
|
|
except ImportError as e:
|
2019-06-10 10:18:11 +00:00
|
|
|
logger.warning('cannot import PyPDF2, extracting pdf metadata will not work: %s', e)
|
2019-02-08 19:11:44 +00:00
|
|
|
use_pdf_meta = False
|
|
|
|
|
|
|
|
try:
|
2019-06-06 15:10:22 +00:00
|
|
|
from . import epub
|
2019-02-08 19:11:44 +00:00
|
|
|
use_epub_meta = True
|
|
|
|
except ImportError as e:
|
2019-06-10 10:18:11 +00:00
|
|
|
logger.warning('cannot import epub, extracting epub metadata will not work: %s', e)
|
2019-02-08 19:11:44 +00:00
|
|
|
use_epub_meta = False
|
|
|
|
|
|
|
|
try:
|
2019-06-06 15:10:22 +00:00
|
|
|
from . import fb2
|
2019-02-08 19:11:44 +00:00
|
|
|
use_fb2_meta = True
|
|
|
|
except ImportError as e:
|
2019-06-10 10:18:11 +00:00
|
|
|
logger.warning('cannot import fb2, extracting fb2 metadata will not work: %s', e)
|
2019-02-08 19:11:44 +00:00
|
|
|
use_fb2_meta = False
|
|
|
|
|
2019-04-20 16:32:46 +00:00
|
|
|
try:
|
|
|
|
from PIL import Image
|
|
|
|
from PIL import __version__ as PILversion
|
|
|
|
use_PIL = True
|
2019-06-06 15:10:22 +00:00
|
|
|
except ImportError as e:
|
2019-06-10 10:18:11 +00:00
|
|
|
logger.warning('cannot import Pillow, using png and webp images as cover will not work: %s', e)
|
2019-04-22 17:11:25 +00:00
|
|
|
use_generic_pdf_cover = True
|
2019-04-20 16:32:46 +00:00
|
|
|
use_PIL = False
|
|
|
|
|
|
|
|
|
2019-02-27 17:20:50 +00:00
|
|
|
|
2019-06-02 07:33:19 +00:00
|
|
|
__author__ = 'lemmsh'
|
2019-02-27 17:20:50 +00:00
|
|
|
|
2019-02-08 19:11:44 +00:00
|
|
|
|
|
|
|
def process(tmp_file_path, original_file_name, original_file_extension):
|
|
|
|
meta = None
|
|
|
|
try:
|
|
|
|
if ".PDF" == original_file_extension.upper():
|
|
|
|
meta = pdf_meta(tmp_file_path, original_file_name, original_file_extension)
|
|
|
|
if ".EPUB" == original_file_extension.upper() and use_epub_meta is True:
|
|
|
|
meta = epub.get_epub_info(tmp_file_path, original_file_name, original_file_extension)
|
|
|
|
if ".FB2" == original_file_extension.upper() and use_fb2_meta is True:
|
|
|
|
meta = fb2.get_fb2_info(tmp_file_path, original_file_extension)
|
|
|
|
if original_file_extension.upper() in ['.CBZ', '.CBT']:
|
|
|
|
meta = comic.get_comic_info(tmp_file_path, original_file_name, original_file_extension)
|
|
|
|
|
|
|
|
except Exception as ex:
|
2019-06-10 10:18:11 +00:00
|
|
|
logger.warning('cannot parse metadata, using default: %s', ex)
|
2019-02-08 19:11:44 +00:00
|
|
|
|
|
|
|
if meta and meta.title.strip() and meta.author.strip():
|
|
|
|
return meta
|
|
|
|
else:
|
|
|
|
return default_meta(tmp_file_path, original_file_name, original_file_extension)
|
|
|
|
|
|
|
|
|
|
|
|
def default_meta(tmp_file_path, original_file_name, original_file_extension):
|
|
|
|
return BookMeta(
|
|
|
|
file_path=tmp_file_path,
|
|
|
|
extension=original_file_extension,
|
|
|
|
title=original_file_name,
|
|
|
|
author=u"Unknown",
|
|
|
|
cover=None,
|
|
|
|
description="",
|
|
|
|
tags="",
|
|
|
|
series="",
|
|
|
|
series_id="",
|
|
|
|
languages="")
|
|
|
|
|
|
|
|
|
|
|
|
def pdf_meta(tmp_file_path, original_file_name, original_file_extension):
|
|
|
|
|
|
|
|
if use_pdf_meta:
|
|
|
|
pdf = PdfFileReader(open(tmp_file_path, 'rb'))
|
|
|
|
doc_info = pdf.getDocumentInfo()
|
|
|
|
else:
|
|
|
|
doc_info = None
|
|
|
|
|
|
|
|
if doc_info is not None:
|
|
|
|
author = doc_info.author if doc_info.author else u"Unknown"
|
|
|
|
title = doc_info.title if doc_info.title else original_file_name
|
|
|
|
subject = doc_info.subject
|
|
|
|
else:
|
|
|
|
author = u"Unknown"
|
|
|
|
title = original_file_name
|
|
|
|
subject = ""
|
|
|
|
return BookMeta(
|
|
|
|
file_path=tmp_file_path,
|
|
|
|
extension=original_file_extension,
|
|
|
|
title=title,
|
|
|
|
author=author,
|
|
|
|
cover=pdf_preview(tmp_file_path, original_file_name),
|
|
|
|
description=subject,
|
|
|
|
tags="",
|
|
|
|
series="",
|
|
|
|
series_id="",
|
|
|
|
languages="")
|
|
|
|
|
|
|
|
|
|
|
|
def pdf_preview(tmp_file_path, tmp_dir):
|
|
|
|
if use_generic_pdf_cover:
|
|
|
|
return None
|
|
|
|
else:
|
2019-04-20 16:32:46 +00:00
|
|
|
if use_PIL:
|
|
|
|
try:
|
|
|
|
input1 = PdfFileReader(open(tmp_file_path, 'rb'), strict=False)
|
|
|
|
page0 = input1.getPage(0)
|
|
|
|
xObject = page0['/Resources']['/XObject'].getObject()
|
|
|
|
|
|
|
|
for obj in xObject:
|
|
|
|
if xObject[obj]['/Subtype'] == '/Image':
|
|
|
|
size = (xObject[obj]['/Width'], xObject[obj]['/Height'])
|
|
|
|
data = xObject[obj]._data # xObject[obj].getData()
|
|
|
|
if xObject[obj]['/ColorSpace'] == '/DeviceRGB':
|
|
|
|
mode = "RGB"
|
|
|
|
else:
|
|
|
|
mode = "P"
|
|
|
|
if '/Filter' in xObject[obj]:
|
|
|
|
if xObject[obj]['/Filter'] == '/FlateDecode':
|
|
|
|
img = Image.frombytes(mode, size, data)
|
|
|
|
cover_file_name = os.path.splitext(tmp_file_path)[0] + ".cover.png"
|
|
|
|
img.save(filename=os.path.join(tmp_dir, cover_file_name))
|
|
|
|
return cover_file_name
|
|
|
|
# img.save(obj[1:] + ".png")
|
|
|
|
elif xObject[obj]['/Filter'] == '/DCTDecode':
|
|
|
|
cover_file_name = os.path.splitext(tmp_file_path)[0] + ".cover.jpg"
|
|
|
|
img = open(cover_file_name, "wb")
|
|
|
|
img.write(data)
|
|
|
|
img.close()
|
|
|
|
return cover_file_name
|
|
|
|
elif xObject[obj]['/Filter'] == '/JPXDecode':
|
|
|
|
cover_file_name = os.path.splitext(tmp_file_path)[0] + ".cover.jp2"
|
|
|
|
img = open(cover_file_name, "wb")
|
|
|
|
img.write(data)
|
|
|
|
img.close()
|
|
|
|
return cover_file_name
|
|
|
|
else:
|
|
|
|
img = Image.frombytes(mode, size, data)
|
|
|
|
cover_file_name = os.path.splitext(tmp_file_path)[0] + ".cover.png"
|
|
|
|
img.save(filename=os.path.join(tmp_dir, cover_file_name))
|
|
|
|
return cover_file_name
|
|
|
|
# img.save(obj[1:] + ".png")
|
|
|
|
except Exception as ex:
|
|
|
|
print(ex)
|
|
|
|
|
2019-02-09 17:46:36 +00:00
|
|
|
try:
|
|
|
|
cover_file_name = os.path.splitext(tmp_file_path)[0] + ".cover.jpg"
|
|
|
|
with Image(filename=tmp_file_path + "[0]", resolution=150) as img:
|
|
|
|
img.compression_quality = 88
|
|
|
|
img.save(filename=os.path.join(tmp_dir, cover_file_name))
|
|
|
|
return cover_file_name
|
|
|
|
except PolicyError as ex:
|
2019-06-10 10:18:11 +00:00
|
|
|
logger.warning('Pdf extraction forbidden by Imagemagick policy: %s', ex)
|
2019-02-09 17:46:36 +00:00
|
|
|
return None
|
|
|
|
except Exception as ex:
|
2019-06-10 10:18:11 +00:00
|
|
|
logger.warning('Cannot extract cover image, using default: %s', ex)
|
2019-02-09 17:46:36 +00:00
|
|
|
return None
|
2019-02-08 19:11:44 +00:00
|
|
|
|
2019-04-20 16:32:46 +00:00
|
|
|
|
2019-02-08 19:11:44 +00:00
|
|
|
def get_versions():
|
|
|
|
if not use_generic_pdf_cover:
|
|
|
|
IVersion = ImageVersion.MAGICK_VERSION
|
|
|
|
WVersion = ImageVersion.VERSION
|
|
|
|
else:
|
|
|
|
IVersion = _(u'not installed')
|
|
|
|
WVersion = _(u'not installed')
|
|
|
|
if use_pdf_meta:
|
|
|
|
PVersion='v'+PyPdfVersion
|
|
|
|
else:
|
|
|
|
PVersion=_(u'not installed')
|
|
|
|
if lxmlversion:
|
|
|
|
XVersion = 'v'+'.'.join(map(str, lxmlversion))
|
|
|
|
else:
|
|
|
|
XVersion = _(u'not installed')
|
2019-04-20 16:32:46 +00:00
|
|
|
if use_PIL:
|
|
|
|
PILVersion = 'v' + PILversion
|
|
|
|
else:
|
|
|
|
PILVersion = _(u'not installed')
|
|
|
|
return {'Image Magick': IVersion,
|
|
|
|
'PyPdf': PVersion,
|
|
|
|
'lxml':XVersion,
|
|
|
|
'Wand': WVersion,
|
|
|
|
'Pillow': PILVersion}
|
2019-02-08 19:11:44 +00:00
|
|
|
|
2016-12-23 08:53:39 +00:00
|
|
|
|
2017-11-30 15:49:46 +00:00
|
|
|
def upload(uploadfile):
|
2016-10-30 10:44:02 +00:00
|
|
|
tmp_dir = os.path.join(gettempdir(), 'calibre_web')
|
|
|
|
|
2016-06-05 15:41:47 +00:00
|
|
|
if not os.path.isdir(tmp_dir):
|
|
|
|
os.mkdir(tmp_dir)
|
|
|
|
|
2017-11-30 15:49:46 +00:00
|
|
|
filename = uploadfile.filename
|
2016-06-05 15:41:47 +00:00
|
|
|
filename_root, file_extension = os.path.splitext(filename)
|
|
|
|
md5 = hashlib.md5()
|
2016-10-30 10:44:02 +00:00
|
|
|
md5.update(filename.encode('utf-8'))
|
2016-06-05 15:41:47 +00:00
|
|
|
tmp_file_path = os.path.join(tmp_dir, md5.hexdigest())
|
2017-11-30 15:49:46 +00:00
|
|
|
uploadfile.save(tmp_file_path)
|
2019-02-09 17:46:36 +00:00
|
|
|
meta = process(tmp_file_path, filename_root, file_extension)
|
2016-06-05 15:41:47 +00:00
|
|
|
return meta
|