diff --git a/.gitignore b/.gitignore index 4ed21c61..be017750 100644 --- a/.gitignore +++ b/.gitignore @@ -23,3 +23,9 @@ cps/static/[0-9]* *.bak *.log.* tags + +settings.yaml +gdrive_credentials + +#kindlegen +vendor/kindlegen diff --git a/cps.py b/cps.py index fdc5bce7..180be500 100755 --- a/cps.py +++ b/cps.py @@ -6,20 +6,33 @@ import sys base_path = os.path.dirname(os.path.abspath(__file__)) # Insert local directories into path -sys.path.insert(0, os.path.join(base_path, 'vendor')) +sys.path.append(base_path) +sys.path.append(os.path.join(base_path, 'cps')) +sys.path.append(os.path.join(base_path, 'vendor')) from cps import web -from tornado.wsgi import WSGIContainer -from tornado.httpserver import HTTPServer -from tornado.ioloop import IOLoop +try: + from gevent.wsgi import WSGIServer + gevent_present = True +except ImportError: + from tornado.wsgi import WSGIContainer + from tornado.httpserver import HTTPServer + from tornado.ioloop import IOLoop + gevent_present = False if __name__ == '__main__': if web.ub.DEVELOPMENT: web.app.run(host="0.0.0.0", port=web.ub.config.config_port, debug=True) else: - http_server = HTTPServer(WSGIContainer(web.app)) - http_server.listen(web.ub.config.config_port) - IOLoop.instance().start() + if gevent_present: + web.app.logger.info('Attempting to start gevent') + web.start_gevent() + else: + web.app.logger.info('Falling back to Tornado') + http_server = HTTPServer(WSGIContainer(web.app)) + http_server.listen(web.ub.config.config_port) + IOLoop.instance().start() + IOLoop.instance().close(True) if web.helper.global_task == 0: web.app.logger.info("Performing restart of Calibre-web") diff --git a/cps/book_formats.py b/cps/book_formats.py index 64ec86e8..f7622ced 100644 --- a/cps/book_formats.py +++ b/cps/book_formats.py @@ -14,28 +14,28 @@ try: from wand.image import Image from wand import version as ImageVersion use_generic_pdf_cover = False -except ImportError, e: +except ImportError as e: logger.warning('cannot import Image, generating pdf covers for pdf uploads will not work: %s', e) use_generic_pdf_cover = True try: from PyPDF2 import PdfFileReader from PyPDF2 import __version__ as PyPdfVersion use_pdf_meta = True -except ImportError, e: +except ImportError as e: logger.warning('cannot import PyPDF2, extracting pdf metadata will not work: %s', e) use_pdf_meta = False try: import epub use_epub_meta = True -except ImportError, e: +except ImportError as e: logger.warning('cannot import epub, extracting epub metadata will not work: %s', e) use_epub_meta = False try: import fb2 use_fb2_meta = True -except ImportError, e: +except ImportError as e: logger.warning('cannot import fb2, extracting fb2 metadata will not work: %s', e) use_fb2_meta = False @@ -48,7 +48,7 @@ def process(tmp_file_path, original_file_name, original_file_extension): return epub.get_epub_info(tmp_file_path, original_file_name, original_file_extension) if ".FB2" == original_file_extension.upper() and use_fb2_meta is True: return fb2.get_fb2_info(tmp_file_path, original_file_extension) - except Exception, e: + except Exception as e: logger.warning('cannot parse metadata, using default: %s', e) return default_meta(tmp_file_path, original_file_name, original_file_extension) @@ -63,7 +63,8 @@ def default_meta(tmp_file_path, original_file_name, original_file_extension): description="", tags="", series="", - series_id="") + series_id="", + languages="") def pdf_meta(tmp_file_path, original_file_name, original_file_extension): @@ -91,7 +92,8 @@ def pdf_meta(tmp_file_path, original_file_name, original_file_extension): description=subject, tags="", series="", - series_id="") + series_id="", + languages="") def pdf_preview(tmp_file_path, tmp_dir): diff --git a/cps/db.py b/cps/db.py index f25848c7..ccc055e9 100755 --- a/cps/db.py +++ b/cps/db.py @@ -11,10 +11,8 @@ from ub import config import ub session = None -cc_exceptions = None +cc_exceptions = ['datetime', 'int', 'comments', 'float', 'composite', 'series'] cc_classes = None -cc_ids = None -books_custom_column_links = None engine = None @@ -247,7 +245,7 @@ class Books(Base): identifiers = relationship('Identifiers', backref='books') def __init__(self, title, sort, author_sort, timestamp, pubdate, series_index, last_modified, path, has_cover, - authors, tags): + authors, tags, languages = None): self.title = title self.sort = sort self.author_sort = author_sort @@ -283,22 +281,19 @@ class Custom_Columns(Base): def setup_db(): - global session - global cc_exceptions - global cc_classes - global cc_ids - global books_custom_column_links global engine + global session + global cc_classes if config.config_calibre_dir is None or config.config_calibre_dir == u'': return False dbpath = os.path.join(config.config_calibre_dir, "metadata.db") - engine = create_engine('sqlite:///{0}'.format(dbpath.encode('utf-8')), echo=False, isolation_level="SERIALIZABLE") + #engine = create_engine('sqlite:///{0}'.format(dbpath.encode('utf-8')), echo=False, isolation_level="SERIALIZABLE") + engine = create_engine('sqlite:///'+ dbpath, echo=False, isolation_level="SERIALIZABLE") try: conn = engine.connect() - - except: + except Exception as e: content = ub.session.query(ub.Settings).first() content.config_calibre_dir = None content.db_configured = False @@ -311,43 +306,43 @@ def setup_db(): config.loadSettings() conn.connection.create_function('title_sort', 1, title_sort) - cc = conn.execute("SELECT id, datatype FROM custom_columns") + if not cc_classes: + cc = conn.execute("SELECT id, datatype FROM custom_columns") - cc_ids = [] - cc_exceptions = ['datetime', 'int', 'comments', 'float', 'composite', 'series'] - books_custom_column_links = {} - cc_classes = {} - for row in cc: - if row.datatype not in cc_exceptions: - books_custom_column_links[row.id] = Table('books_custom_column_' + str(row.id) + '_link', Base.metadata, - Column('book', Integer, ForeignKey('books.id'), - primary_key=True), - Column('value', Integer, - ForeignKey('custom_column_' + str(row.id) + '.id'), - primary_key=True) - ) - cc_ids.append([row.id, row.datatype]) - if row.datatype == 'bool': - ccdict = {'__tablename__': 'custom_column_' + str(row.id), - 'id': Column(Integer, primary_key=True), - 'book': Column(Integer, ForeignKey('books.id')), - 'value': Column(Boolean)} + cc_ids = [] + books_custom_column_links = {} + cc_classes = {} + for row in cc: + if row.datatype not in cc_exceptions: + books_custom_column_links[row.id] = Table('books_custom_column_' + str(row.id) + '_link', Base.metadata, + Column('book', Integer, ForeignKey('books.id'), + primary_key=True), + Column('value', Integer, + ForeignKey('custom_column_' + str(row.id) + '.id'), + primary_key=True) + ) + cc_ids.append([row.id, row.datatype]) + if row.datatype == 'bool': + ccdict = {'__tablename__': 'custom_column_' + str(row.id), + 'id': Column(Integer, primary_key=True), + 'book': Column(Integer, ForeignKey('books.id')), + 'value': Column(Boolean)} + else: + ccdict = {'__tablename__': 'custom_column_' + str(row.id), + 'id': Column(Integer, primary_key=True), + 'value': Column(String)} + cc_classes[row.id] = type('Custom_Column_' + str(row.id), (Base,), ccdict) + + for id in cc_ids: + if id[1] == 'bool': + setattr(Books, 'custom_column_' + str(id[0]), relationship(cc_classes[id[0]], + primaryjoin=( + Books.id == cc_classes[id[0]].book), + backref='books')) else: - ccdict = {'__tablename__': 'custom_column_' + str(row.id), - 'id': Column(Integer, primary_key=True), - 'value': Column(String)} - cc_classes[row.id] = type('Custom_Column_' + str(row.id), (Base,), ccdict) - - for id in cc_ids: - if id[1] == 'bool': - setattr(Books, 'custom_column_' + str(id[0]), relationship(cc_classes[id[0]], - primaryjoin=( - Books.id == cc_classes[id[0]].book), - backref='books')) - else: - setattr(Books, 'custom_column_' + str(id[0]), relationship(cc_classes[id[0]], - secondary=books_custom_column_links[id[0]], - backref='books')) + setattr(Books, 'custom_column_' + str(id[0]), relationship(cc_classes[id[0]], + secondary=books_custom_column_links[id[0]], + backref='books')) # Base.metadata.create_all(engine) Session = sessionmaker() diff --git a/cps/epub.py b/cps/epub.py index f9d46362..446efe9b 100644 --- a/cps/epub.py +++ b/cps/epub.py @@ -5,7 +5,7 @@ import zipfile from lxml import etree import os import uploader - +from iso639 import languages as isoLanguages def extractCover(zip, coverFile, coverpath, tmp_file_name): if coverFile is None: @@ -41,23 +41,53 @@ def get_epub_info(tmp_file_path, original_file_name, original_file_extension): p = tree.xpath('/pkg:package/pkg:metadata', namespaces=ns)[0] epub_metadata = {} - for s in ['title', 'description', 'creator']: + + for s in ['title', 'description', 'creator', 'language']: tmp = p.xpath('dc:%s/text()' % s, namespaces=ns) if len(tmp) > 0: epub_metadata[s] = p.xpath('dc:%s/text()' % s, namespaces=ns)[0] else: epub_metadata[s] = "Unknown" + if epub_metadata['description'] == "Unknown": + description = tree.xpath("//*[local-name() = 'description']/text()") + if len(description) > 0: + epub_metadata['description'] = description + else: + epub_metadata['description'] = "" + + if epub_metadata['language'] == "Unknown": + epub_metadata['language'] == "" + else: + lang = epub_metadata['language'].split('-', 1)[0].lower() + if len(lang) == 2: + epub_metadata['language'] = isoLanguages.get(part1=lang).name + elif len(lang) == 3: + epub_metadata['language'] = isoLanguages.get(part3=lang).name + else: + epub_metadata['language'] = "" + coversection = tree.xpath("/pkg:package/pkg:manifest/pkg:item[@id='cover-image']/@href", namespaces=ns) + coverfile = None if len(coversection) > 0: coverfile = extractCover(zip, coversection[0], coverpath, tmp_file_path) else: - coversection = tree.xpath("/pkg:package/pkg:manifest/pkg:item[@id='cover']/@href", namespaces=ns) - if len(coversection) > 0: - coverfile = extractCover(zip, coversection[0], coverpath, tmp_file_path) - else: - coverfile = None - + meta_cover = tree.xpath("/pkg:package/pkg:metadata/pkg:meta[@name='cover']/@content", namespaces=ns) + if len(meta_cover) > 0: + coversection = tree.xpath("/pkg:package/pkg:manifest/pkg:item[@id='"+meta_cover[0]+"']/@href", namespaces=ns) + if len(coversection) > 0: + filetype = coversection[0].rsplit('.',1)[-1] + if filetype == "xhtml" or filetype == "html": #if cover is (x)html format + markup = zip.read(os.path.join(coverpath,coversection[0])) + markupTree = etree.fromstring(markup) + #no matter xhtml or html with no namespace + imgsrc = markupTree.xpath("//*[local-name() = 'img']/@src") + #imgsrc maybe startwith "../"" so fullpath join then relpath to cwd + filename = os.path.relpath(os.path.join(os.path.dirname(os.path.join(coverpath, coversection[0])), imgsrc[0])) + coverfile = extractCover(zip, filename, "", tmp_file_path) + else: + coverfile = extractCover(zip, coversection[0], coverpath, tmp_file_path) + if epub_metadata['title'] is None: title = original_file_name else: @@ -72,4 +102,5 @@ def get_epub_info(tmp_file_path, original_file_name, original_file_extension): description=epub_metadata['description'], tags="", series="", - series_id="") + series_id="", + languages=epub_metadata['language']) diff --git a/cps/fb2.py b/cps/fb2.py index 205f69ce..8e3e39b8 100644 --- a/cps/fb2.py +++ b/cps/fb2.py @@ -4,8 +4,10 @@ from lxml import etree import os import uploader -import StringIO - +try: + from io import StringIO +except ImportError as e: + import StringIO def get_fb2_info(tmp_file_path, original_file_extension): @@ -37,16 +39,16 @@ def get_fb2_info(tmp_file_path, original_file_extension): first_name = u'' return first_name + ' ' + middle_name + ' ' + last_name - author = unicode(", ".join(map(get_author, authors))) + author = str(", ".join(map(get_author, authors))) title = tree.xpath('/fb:FictionBook/fb:description/fb:title-info/fb:book-title/text()', namespaces=ns) if len(title): - title = unicode(title[0]) + title = str(title[0]) else: title = u'' description = tree.xpath('/fb:FictionBook/fb:description/fb:publish-info/fb:book-name/text()', namespaces=ns) if len(description): - description = unicode(description[0]) + description = str(description[0]) else: description = u'' @@ -59,4 +61,5 @@ def get_fb2_info(tmp_file_path, original_file_extension): description=description, tags="", series="", - series_id="") + series_id="", + languages="") diff --git a/cps/gdriveutils.py b/cps/gdriveutils.py new file mode 100644 index 00000000..55341419 --- /dev/null +++ b/cps/gdriveutils.py @@ -0,0 +1,370 @@ +try: + from pydrive.auth import GoogleAuth + from pydrive.drive import GoogleDrive + from apiclient import errors +except ImportError: + pass +import os, time + +from ub import config + +from sqlalchemy import * +from sqlalchemy import exc +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.orm import * + + +import web + + +dbpath = os.path.join(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + os.sep + ".." + os.sep), "gdrive.db") +engine = create_engine('sqlite:///{0}'.format(dbpath), echo=False) +Base = declarative_base() + +# Open session for database connection +Session = sessionmaker() +Session.configure(bind=engine) +session = scoped_session(Session) + +class GdriveId(Base): + __tablename__='gdrive_ids' + + id = Column(Integer, primary_key=True) + gdrive_id = Column(Integer, unique=True) + path = Column(String) + __table_args__ = (UniqueConstraint('gdrive_id', 'path', name='_gdrive_path_uc'),) + + def __repr__(self): + return str(self.path) + +class PermissionAdded(Base): + __tablename__='permissions_added' + + id = Column(Integer, primary_key=True) + gdrive_id = Column(Integer, unique=True) + + def __repr__(self): + return str(self.gdrive_id) + +def migrate(): + if not engine.dialect.has_table(engine.connect(), "permissions_added"): + PermissionAdded.__table__.create(bind = engine) + for sql in session.execute("select sql from sqlite_master where type='table'"): + if 'CREATE TABLE gdrive_ids' in sql[0]: + currUniqueConstraint='UNIQUE (gdrive_id)' + if currUniqueConstraint in sql[0]: + sql=sql[0].replace(currUniqueConstraint, 'UNIQUE (gdrive_id, path)') + sql=sql.replace(GdriveId.__tablename__, GdriveId.__tablename__+ '2') + session.execute(sql) + session.execute('INSERT INTO gdrive_ids2 (id, gdrive_id, path) SELECT id, gdrive_id, path FROM gdrive_ids;') + session.commit() + session.execute('DROP TABLE %s' % 'gdrive_ids') + session.execute('ALTER TABLE gdrive_ids2 RENAME to gdrive_ids') + break + +if not os.path.exists(dbpath): + try: + Base.metadata.create_all(engine) + except Exception: + raise + +migrate() + +def getDrive(gauth=None): + if not gauth: + gauth=GoogleAuth(settings_file='settings.yaml') + # Try to load saved client credentials + gauth.LoadCredentialsFile("gdrive_credentials") + if gauth.access_token_expired: + # Refresh them if expired + gauth.Refresh() + else: + # Initialize the saved creds + gauth.Authorize() + # Save the current credentials to a file + return GoogleDrive(gauth) + +def getEbooksFolder(drive=None): + if not drive: + drive = getDrive() + if drive.auth.access_token_expired: + drive.auth.Refresh() + ebooksFolder= "title = '%s' and 'root' in parents and mimeType = 'application/vnd.google-apps.folder' and trashed = false" % config.config_google_drive_folder + + fileList = drive.ListFile({'q': ebooksFolder}).GetList() + return fileList[0] + +def getEbooksFolderId(drive=None): + storedPathName=session.query(GdriveId).filter(GdriveId.path == '/').first() + if storedPathName: + return storedPathName.gdrive_id + else: + gDriveId=GdriveId() + gDriveId.gdrive_id=getEbooksFolder(drive)['id'] + gDriveId.path='/' + session.merge(gDriveId) + session.commit() + return + +def getFolderInFolder(parentId, folderName, drive=None): + if not drive: + drive = getDrive() + if drive.auth.access_token_expired: + drive.auth.Refresh() + folder= "title = '%s' and '%s' in parents and mimeType = 'application/vnd.google-apps.folder' and trashed = false" % (folderName.replace("'", "\\'"), parentId) + fileList = drive.ListFile({'q': folder}).GetList() + return fileList[0] + +def getFile(pathId, fileName, drive=None): + if not drive: + drive = getDrive() + if drive.auth.access_token_expired: + drive.auth.Refresh() + metaDataFile="'%s' in parents and trashed = false and title = '%s'" % (pathId, fileName.replace("'", "\\'")) + + fileList = drive.ListFile({'q': metaDataFile}).GetList() + return fileList[0] + +def getFolderId(path, drive=None): + if not drive: + drive=getDrive() + if drive.auth.access_token_expired: + drive.auth.Refresh() + currentFolderId=getEbooksFolderId(drive) + sqlCheckPath=path if path[-1] =='/' else path + '/' + storedPathName=session.query(GdriveId).filter(GdriveId.path == sqlCheckPath).first() + + if not storedPathName: + dbChange=False + s=path.split('/') + for i, x in enumerate(s): + if len(x) > 0: + currentPath="/".join(s[:i+1]) + if currentPath[-1] != '/': + currentPath = currentPath + '/' + storedPathName=session.query(GdriveId).filter(GdriveId.path == currentPath).first() + if storedPathName: + currentFolderId=storedPathName.gdrive_id + else: + currentFolderId=getFolderInFolder(currentFolderId, x, drive)['id'] + gDriveId=GdriveId() + gDriveId.gdrive_id=currentFolderId + gDriveId.path=currentPath + session.merge(gDriveId) + dbChange=True + if dbChange: + session.commit() + else: + currentFolderId=storedPathName.gdrive_id + return currentFolderId + + +def getFileFromEbooksFolder(drive, path, fileName): + if not drive: + drive=getDrive() + if drive.auth.access_token_expired: + drive.auth.Refresh() + if path: + sqlCheckPath=path if path[-1] =='/' else path + '/' + folderId=getFolderId(path, drive) + else: + folderId=getEbooksFolderId(drive) + + return getFile(folderId, fileName, drive) + +def copyDriveFileRemote(drive, origin_file_id, copy_title): + if not drive: + drive=getDrive() + if drive.auth.access_token_expired: + drive.auth.Refresh() + copied_file = {'title': copy_title} + try: + file_data = drive.auth.service.files().copy( + fileId=origin_file_id, body=copied_file).execute() + return drive.CreateFile({'id': file_data['id']}) + except errors.HttpError as error: + print ('An error occurred: %s' % error) + return None + +def downloadFile(drive, path, filename, output): + if not drive: + drive=getDrive() + if drive.auth.access_token_expired: + drive.auth.Refresh() + f=getFileFromEbooksFolder(drive, path, filename) + f.GetContentFile(output) + +def backupCalibreDbAndOptionalDownload(drive, f=None): + pass + if not drive: + drive=getDrive() + if drive.auth.access_token_expired: + drive.auth.Refresh() + metaDataFile="'%s' in parents and title = 'metadata.db' and trashed = false" % getEbooksFolderId() + + fileList = drive.ListFile({'q': metaDataFile}).GetList() + + databaseFile=fileList[0] + + if f: + databaseFile.GetContentFile(f) + +def copyToDrive(drive, uploadFile, createRoot, replaceFiles, + ignoreFiles=[], + parent=None, prevDir=''): + if not drive: + drive=getDrive() + if drive.auth.access_token_expired: + drive.auth.Refresh() + isInitial=not bool(parent) + if not parent: + parent=getEbooksFolder(drive) + if os.path.isdir(os.path.join(prevDir,uploadFile)): + existingFolder=drive.ListFile({'q' : "title = '%s' and '%s' in parents and trashed = false" % (os.path.basename(uploadFile), parent['id'])}).GetList() + if len(existingFolder) == 0 and (not isInitial or createRoot): + parent = drive.CreateFile({'title': os.path.basename(uploadFile), 'parents' : [{"kind": "drive#fileLink", 'id' : parent['id']}], + "mimeType": "application/vnd.google-apps.folder" }) + parent.Upload() + else: + if (not isInitial or createRoot) and len(existingFolder) > 0: + parent=existingFolder[0] + for f in os.listdir(os.path.join(prevDir,uploadFile)): + if f not in ignoreFiles: + copyToDrive(drive, f, True, replaceFiles, ignoreFiles, parent, os.path.join(prevDir,uploadFile)) + else: + if os.path.basename(uploadFile) not in ignoreFiles: + existingFiles=drive.ListFile({'q' : "title = '%s' and '%s' in parents and trashed = false" % (os.path.basename(uploadFile), parent['id'])}).GetList() + if len(existingFiles) > 0: + driveFile=existingFiles[0] + else: + driveFile = drive.CreateFile({'title': os.path.basename(uploadFile), 'parents' : [{"kind": "drive#fileLink", 'id' : parent['id']}], }) + driveFile.SetContentFile(os.path.join(prevDir,uploadFile)) + driveFile.Upload() + +def uploadFileToEbooksFolder(drive, destFile, f): + if not drive: + drive=getDrive() + if drive.auth.access_token_expired: + drive.auth.Refresh() + parent=getEbooksFolder(drive) + splitDir=destFile.split('/') + for i, x in enumerate(splitDir): + if i == len(splitDir)-1: + existingFiles=drive.ListFile({'q' : "title = '%s' and '%s' in parents and trashed = false" % (x, parent['id'])}).GetList() + if len(existingFiles) > 0: + driveFile=existingFiles[0] + else: + driveFile = drive.CreateFile({'title': x, 'parents' : [{"kind": "drive#fileLink", 'id' : parent['id']}], }) + driveFile.SetContentFile(f) + driveFile.Upload() + else: + existingFolder=drive.ListFile({'q' : "title = '%s' and '%s' in parents and trashed = false" % (x, parent['id'])}).GetList() + if len(existingFolder) == 0: + parent = drive.CreateFile({'title': x, 'parents' : [{"kind": "drive#fileLink", 'id' : parent['id']}], + "mimeType": "application/vnd.google-apps.folder" }) + parent.Upload() + else: + parent=existingFolder[0] + + +def watchChange(drive, channel_id, channel_type, channel_address, + channel_token=None, expiration=None): + if not drive: + drive=getDrive() + if drive.auth.access_token_expired: + drive.auth.Refresh() + """Watch for all changes to a user's Drive. + Args: + service: Drive API service instance. + channel_id: Unique string that identifies this channel. + channel_type: Type of delivery mechanism used for this channel. + channel_address: Address where notifications are delivered. + channel_token: An arbitrary string delivered to the target address with + each notification delivered over this channel. Optional. + channel_address: Address where notifications are delivered. Optional. + Returns: + The created channel if successful + Raises: + apiclient.errors.HttpError: if http request to create channel fails. + """ + body = { + 'id': channel_id, + 'type': channel_type, + 'address': channel_address + } + if channel_token: + body['token'] = channel_token + if expiration: + body['expiration'] = expiration + return drive.auth.service.changes().watch(body=body).execute() + +def watchFile(drive, file_id, channel_id, channel_type, channel_address, + channel_token=None, expiration=None): + """Watch for any changes to a specific file. + Args: + service: Drive API service instance. + file_id: ID of the file to watch. + channel_id: Unique string that identifies this channel. + channel_type: Type of delivery mechanism used for this channel. + channel_address: Address where notifications are delivered. + channel_token: An arbitrary string delivered to the target address with + each notification delivered over this channel. Optional. + channel_address: Address where notifications are delivered. Optional. + Returns: + The created channel if successful + Raises: + apiclient.errors.HttpError: if http request to create channel fails. + """ + if not drive: + drive=getDrive() + if drive.auth.access_token_expired: + drive.auth.Refresh() + + body = { + 'id': channel_id, + 'type': channel_type, + 'address': channel_address + } + if channel_token: + body['token'] = channel_token + if expiration: + body['expiration'] = expiration + return drive.auth.service.files().watch(fileId=file_id, body=body).execute() + +def stopChannel(drive, channel_id, resource_id): + """Stop watching to a specific channel. + Args: + service: Drive API service instance. + channel_id: ID of the channel to stop. + resource_id: Resource ID of the channel to stop. + Raises: + apiclient.errors.HttpError: if http request to create channel fails. + """ + if not drive: + drive=getDrive() + if drive.auth.access_token_expired: + drive.auth.Refresh() + service=drive.auth.service + body = { + 'id': channel_id, + 'resourceId': resource_id + } + return drive.auth.service.channels().stop(body=body).execute() + +def getChangeById (drive, change_id): + if not drive: + drive=getDrive() + if drive.auth.access_token_expired: + drive.auth.Refresh() + """Print a single Change resource information. + + Args: + service: Drive API service instance. + change_id: ID of the Change resource to retrieve. + """ + try: + change = drive.auth.service.changes().get(changeId=change_id).execute() + return change + except errors.HttpError, error: + web.app.logger.exception(error) + return None diff --git a/cps/helper.py b/cps/helper.py index 54fa1946..1a89c86c 100755 --- a/cps/helper.py +++ b/cps/helper.py @@ -13,11 +13,18 @@ import os import traceback import re import unicodedata -from StringIO import StringIO +try: + from StringIO import StringIO + from email.MIMEBase import MIMEBase + from email.MIMEMultipart import MIMEMultipart + from email.MIMEText import MIMEText +except ImportError as e: + from io import StringIO + from email.mime.base import MIMEBase + from email.mime.multipart import MIMEMultipart + from email.mime.text import MIMEText + from email import encoders -from email.MIMEBase import MIMEBase -from email.MIMEMultipart import MIMEMultipart -from email.MIMEText import MIMEText from email.generator import Generator from email.utils import formatdate from email.utils import make_msgid @@ -28,11 +35,16 @@ import shutil import requests import zipfile from tornado.ioloop import IOLoop +try: + import gdriveutils as gd +except ImportError: + pass +import web try: import unidecode use_unidecode=True -except: +except Exception as e: use_unidecode=False # Global variables @@ -147,7 +159,7 @@ def send_raw_email(kindle_mail, msg): smtplib.stderr = org_stderr - except (socket.error, smtplib.SMTPRecipientsRefused, smtplib.SMTPException), e: + except (socket.error, smtplib.SMTPRecipientsRefused, smtplib.SMTPException) as e: app.logger.error(traceback.print_exc()) return _("Failed to send mail: %s" % str(e)) @@ -239,7 +251,10 @@ def get_valid_filename(value, replace_whitespace=True): value=value.replace(u'ß',u'ss') value = unicodedata.normalize('NFKD', value) re_slugify = re.compile('[\W\s-]', re.UNICODE) - value = unicode(re_slugify.sub('', value).strip()) + if type(value) is str: #Python3 str, Python2 unicode + value = re_slugify.sub('', value).strip() + else: + value = unicode(re_slugify.sub('', value).strip()) if replace_whitespace: #*+:\"/<>? werden durch _ ersetzt value = re.sub('[\*\+:\\\"/<>\?]+', u'_', value, flags=re.U) @@ -280,6 +295,30 @@ def update_dir_stucture(book_id, calibrepath): book.path = new_authordir + '/' + book.path.split('/')[1] db.session.commit() +def update_dir_structure_gdrive(book_id): + db.session.connection().connection.connection.create_function("title_sort", 1, db.title_sort) + book = db.session.query(db.Books).filter(db.Books.id == book_id).first() + + authordir = book.path.split('/')[0] + new_authordir = get_valid_filename(book.authors[0].name) + titledir = book.path.split('/')[1] + new_titledir = get_valid_filename(book.title) + " (" + str(book_id) + ")" + + if titledir != new_titledir: + print (titledir) + gFile=gd.getFileFromEbooksFolder(web.Gdrive.Instance().drive,os.path.dirname(book.path),titledir) + gFile['title']= new_titledir + gFile.Upload() + book.path = book.path.split('/')[0] + '/' + new_titledir + + if authordir != new_authordir: + gFile=gd.getFileFromEbooksFolder(web.Gdrive.Instance().drive,None,authordir) + gFile['title']= new_authordir + gFile.Upload() + book.path = new_authordir + '/' + book.path.split('/')[1] + + db.session.commit() + class Updater(threading.Thread): def __init__(self): @@ -305,9 +344,13 @@ class Updater(threading.Thread): ub.session.close() ub.engine.dispose() self.status=6 - # stop tornado server - server = IOLoop.instance() - server.add_callback(server.stop) + + if web.gevent_server: + web.gevent_server.stop() + else: + # stop tornado server + server = IOLoop.instance() + server.add_callback(server.stop) self.status=7 def get_update_status(self): @@ -379,7 +422,7 @@ class Updater(threading.Thread): try: os.chown(dst_file, permission.st_uid, permission.st_uid) # print('Permissions: User '+str(new_permissions.st_uid)+' Group '+str(new_permissions.st_uid)) - except: + except Exception as e: e = sys.exc_info() logging.getLogger('cps.web').debug('Fail '+str(dst_file)+' error: '+str(e)) return @@ -421,7 +464,7 @@ class Updater(threading.Thread): logging.getLogger('cps.web').debug("Delete file " + item_path) log_from_thread("Delete file " + item_path) os.remove(item_path) - except: + except Exception as e: logging.getLogger('cps.web').debug("Could not remove:" + item_path) shutil.rmtree(source, ignore_errors=True) diff --git a/cps/static/js/get_meta.js b/cps/static/js/get_meta.js new file mode 100644 index 00000000..2cec1252 --- /dev/null +++ b/cps/static/js/get_meta.js @@ -0,0 +1,180 @@ +/* + * Get Metadata from Douban Books api and Google Books api + * Created by idalin + * Google Books api document: https://developers.google.com/books/docs/v1/using + * Douban Books api document: https://developers.douban.com/wiki/?title=book_v2 (Chinese Only) + */ + +$(document).ready(function () { + var msg = i18n_msg; + var douban = 'https://api.douban.com'; + var db_search = '/v2/book/search'; + var db_get_info = '/v2/book/'; + var db_get_info_by_isbn = '/v2/book/isbn/ '; + var db_done = false; + + var google = 'https://www.googleapis.com/'; + var gg_search = '/books/v1/volumes'; + var gg_get_info = '/books/v1/volumes/'; + var gg_done = false; + + var db_results = []; + var gg_results = []; + var show_flag = 0; + String.prototype.replaceAll = function (s1, s2) {   + return this.replace(new RegExp(s1, "gm"), s2);   + } + + gg_search_book = function (title) { + title = title.replaceAll(/\s+/, '+'); + var url = google + gg_search + '?q=' + title; + $.ajax({ + url: url, + type: "GET", + dataType: "jsonp", + jsonp: 'callback', + success: function (data) { + gg_results = data.items; + }, + complete: function () { + gg_done = true; + show_result(); + } + }); + } + + get_meta = function (source, id) { + var meta; + if (source == 'google') {; + meta = gg_results[id]; + $('#description').val(meta.volumeInfo.description); + $('#bookAuthor').val(meta.volumeInfo.authors.join(' & ')); + $('#book_title').val(meta.volumeInfo.title); + if (meta.volumeInfo.categories) { + var tags = meta.volumeInfo.categories.join(','); + $('#tags').val(tags); + } + if (meta.volumeInfo.averageRating) { + $('#rating').val(Math.round(meta.volumeInfo.averageRating)); + } + return; + } + if (source == 'douban') { + meta = db_results[id]; + $('#description').val(meta.summary); + $('#bookAuthor').val(meta.author.join(' & ')); + $('#book_title').val(meta.title); + var tags = ''; + for (var i = 0; i < meta.tags.length; i++) { + tags = tags + meta.tags[i].title + ','; + } + $('#tags').val(tags); + $('#rating').val(Math.round(meta.rating.average / 2)); + return; + } + } + do_search = function (keyword) { + show_flag = 0; + $('#meta-info').text(msg.loading); + var keyword = $('#keyword').val(); + if (keyword) { + db_search_book(keyword); + gg_search_book(keyword); + } + } + + db_search_book = function (title) { + var url = douban + db_search + '?q=' + title + '&fields=all&count=10'; + $.ajax({ + url: url, + type: "GET", + dataType: "jsonp", + jsonp: 'callback', + success: function (data) { + db_results = data.books; + }, + error: function () { + $('#meta-info').html('

'+ msg.search_error+'!

'); + }, + complete: function () { + db_done = true; + show_result(); + } + }); + } + + show_result = function () { + show_flag++; + if (show_flag == 1) { + $('#meta-info').html(''); + } + if (gg_done && db_done) { + if (!gg_results && !db_results) { + $('#meta-info').html('

'+ msg.no_result +'

'); + return; + } + } + if (gg_done && gg_results.length > 0) { + for (var i = 0; i < gg_results.length; i++) { + var book = gg_results[i]; + var book_cover; + if (book.volumeInfo.imageLinks) { + book_cover = book.volumeInfo.imageLinks.thumbnail; + } else { + book_cover = '/static/generic_cover.jpg'; + } + var book_html = '
  • ' + + 'Cover' + + '
    ' + + '

    ' + book.volumeInfo.title + '

    ' + + '

    '+ msg.author +':' + book.volumeInfo.authors + '

    ' + + '

    '+ msg.publisher + ':' + book.volumeInfo.publisher + '

    ' + + '

    '+ msg.description + ':' + book.volumeInfo.description + '

    ' + + '

    '+ msg.source + ':Google Books

    ' + + '
    ' + + '
  • '; + $("#book-list").append(book_html); + } + gg_done = false; + } + if (db_done && db_results.length > 0) { + for (var i = 0; i < db_results.length; i++) { + var book = db_results[i]; + var book_html = '
  • ' + + 'Cover' + + '
    ' + + '

    ' + book.title + '

    ' + + '

    ' + msg.author + ':' + book.author + '

    ' + + '

    ' + msg.publisher + ':' + book.publisher + '

    ' + + '

    ' + msg.description + ':' + book.summary + '

    ' + + '

    ' + msg.source + ':Douban Books

    ' + + '
    ' + + '
  • '; + $("#book-list").append(book_html); + } + db_done = false; + } + } + + $('#do-search').click(function () { + var keyword = $('#keyword').val(); + if (keyword) { + do_search(keyword); + } + }); + + $('#get_meta').click(function () { + var book_title = $('#book_title').val(); + if (book_title) { + $('#keyword').val(book_title); + do_search(book_title); + } + }); + +}); \ No newline at end of file diff --git a/cps/static/js/libs/bootstrap-rating-input.min.js b/cps/static/js/libs/bootstrap-rating-input.min.js new file mode 100644 index 00000000..0398742e --- /dev/null +++ b/cps/static/js/libs/bootstrap-rating-input.min.js @@ -0,0 +1 @@ +!function(a){"use strict";function b(a){return"[data-value"+(a?"="+a:"")+"]"}function c(a,b,c){var d=c.activeIcon,e=c.inactiveIcon;a.removeClass(b?e:d).addClass(b?d:e)}function d(b,c){var d=a.extend({},i,b.data(),c);return d.inline=""===d.inline||d.inline,d.readonly=""===d.readonly||d.readonly,d.clearable===!1?d.clearableLabel="":d.clearableLabel=d.clearable,d.clearable=""===d.clearable||d.clearable,d}function e(b,c){if(c.inline)var d=a('');else var d=a('
    ');d.addClass(b.attr("class")),d.removeClass("rating");for(var e=c.min;e<=c.max;e++)d.append('');return c.clearable&&!c.readonly&&d.append(" ").append(''+c.clearableLabel+""),d}var f="rating-clear",g="."+f,h="hidden",i={min:1,max:5,"empty-value":0,iconLib:"glyphicon",activeIcon:"glyphicon-star",inactiveIcon:"glyphicon-star-empty",clearable:!1,clearableIcon:"glyphicon-remove",clearableRemain:!1,inline:!1,readonly:!1},j=function(a,b){var c=this.$input=a;this.options=d(c,b);var f=this.$el=e(c,this.options);c.addClass(h).before(f),c.attr("type","hidden"),this.highlight(c.val())};j.VERSION="0.4.0",j.DEFAULTS=i,j.prototype={clear:function(){this.setValue(this.options["empty-value"])},setValue:function(a){this.highlight(a),this.updateInput(a)},highlight:function(a,d){var e=this.options,f=this.$el;if(a>=this.options.min&&a<=this.options.max){var i=f.find(b(a));c(i.prevAll("i").andSelf(),!0,e),c(i.nextAll("i"),!1,e)}else c(f.find(b()),!1,e);d||(this.options.clearableRemain?f.find(g).removeClass(h):a&&a!=this.options["empty-value"]?f.find(g).removeClass(h):f.find(g).addClass(h))},updateInput:function(a){var b=this.$input;b.val()!=a&&b.val(a).change()}};var k=a.fn.rating=function(c){return this.filter("input[type=number]").each(function(){var d=a(this),e="object"==typeof c&&c||{},f=new j(d,e);f.options.readonly||f.$el.on("mouseenter",b(),function(){f.highlight(a(this).data("value"),!0)}).on("mouseleave",b(),function(){f.highlight(d.val(),!0)}).on("click",b(),function(){f.setValue(a(this).data("value"))}).on("click",g,function(){f.clear()})})};k.Constructor=j,a(function(){a("input.rating[type=number]").each(function(){a(this).rating()})})}(jQuery); \ No newline at end of file diff --git a/cps/static/js/libs/jquery.form.js b/cps/static/js/libs/jquery.form.js new file mode 100644 index 00000000..591ad6f1 --- /dev/null +++ b/cps/static/js/libs/jquery.form.js @@ -0,0 +1,1277 @@ +/*! + * jQuery Form Plugin + * version: 3.51.0-2014.06.20 + * Requires jQuery v1.5 or later + * Copyright (c) 2014 M. Alsup + * Examples and documentation at: http://malsup.com/jquery/form/ + * Project repository: https://github.com/malsup/form + * Dual licensed under the MIT and GPL licenses. + * https://github.com/malsup/form#copyright-and-license + */ +/*global ActiveXObject */ + +// AMD support +(function (factory) { + "use strict"; + if (typeof define === 'function' && define.amd) { + // using AMD; register as anon module + define(['jquery'], factory); + } else { + // no AMD; invoke directly + factory( (typeof(jQuery) != 'undefined') ? jQuery : window.Zepto ); + } +} + +(function($) { +"use strict"; + +/* + Usage Note: + ----------- + Do not use both ajaxSubmit and ajaxForm on the same form. These + functions are mutually exclusive. Use ajaxSubmit if you want + to bind your own submit handler to the form. For example, + + $(document).ready(function() { + $('#myForm').on('submit', function(e) { + e.preventDefault(); // <-- important + $(this).ajaxSubmit({ + target: '#output' + }); + }); + }); + + Use ajaxForm when you want the plugin to manage all the event binding + for you. For example, + + $(document).ready(function() { + $('#myForm').ajaxForm({ + target: '#output' + }); + }); + + You can also use ajaxForm with delegation (requires jQuery v1.7+), so the + form does not have to exist when you invoke ajaxForm: + + $('#myForm').ajaxForm({ + delegation: true, + target: '#output' + }); + + When using ajaxForm, the ajaxSubmit function will be invoked for you + at the appropriate time. +*/ + +/** + * Feature detection + */ +var feature = {}; +feature.fileapi = $("").get(0).files !== undefined; +feature.formdata = window.FormData !== undefined; + +var hasProp = !!$.fn.prop; + +// attr2 uses prop when it can but checks the return type for +// an expected string. this accounts for the case where a form +// contains inputs with names like "action" or "method"; in those +// cases "prop" returns the element +$.fn.attr2 = function() { + if ( ! hasProp ) { + return this.attr.apply(this, arguments); + } + var val = this.prop.apply(this, arguments); + if ( ( val && val.jquery ) || typeof val === 'string' ) { + return val; + } + return this.attr.apply(this, arguments); +}; + +/** + * ajaxSubmit() provides a mechanism for immediately submitting + * an HTML form using AJAX. + */ +$.fn.ajaxSubmit = function(options) { + /*jshint scripturl:true */ + + // fast fail if nothing selected (http://dev.jquery.com/ticket/2752) + if (!this.length) { + log('ajaxSubmit: skipping submit process - no element selected'); + return this; + } + + var method, action, url, $form = this; + + if (typeof options == 'function') { + options = { success: options }; + } + else if ( options === undefined ) { + options = {}; + } + + method = options.type || this.attr2('method'); + action = options.url || this.attr2('action'); + + url = (typeof action === 'string') ? $.trim(action) : ''; + url = url || window.location.href || ''; + if (url) { + // clean url (don't include hash vaue) + url = (url.match(/^([^#]+)/)||[])[1]; + } + + options = $.extend(true, { + url: url, + success: $.ajaxSettings.success, + type: method || $.ajaxSettings.type, + iframeSrc: /^https/i.test(window.location.href || '') ? 'javascript:false' : 'about:blank' + }, options); + + // hook for manipulating the form data before it is extracted; + // convenient for use with rich editors like tinyMCE or FCKEditor + var veto = {}; + this.trigger('form-pre-serialize', [this, options, veto]); + if (veto.veto) { + log('ajaxSubmit: submit vetoed via form-pre-serialize trigger'); + return this; + } + + // provide opportunity to alter form data before it is serialized + if (options.beforeSerialize && options.beforeSerialize(this, options) === false) { + log('ajaxSubmit: submit aborted via beforeSerialize callback'); + return this; + } + + var traditional = options.traditional; + if ( traditional === undefined ) { + traditional = $.ajaxSettings.traditional; + } + + var elements = []; + var qx, a = this.formToArray(options.semantic, elements); + if (options.data) { + options.extraData = options.data; + qx = $.param(options.data, traditional); + } + + // give pre-submit callback an opportunity to abort the submit + if (options.beforeSubmit && options.beforeSubmit(a, this, options) === false) { + log('ajaxSubmit: submit aborted via beforeSubmit callback'); + return this; + } + + // fire vetoable 'validate' event + this.trigger('form-submit-validate', [a, this, options, veto]); + if (veto.veto) { + log('ajaxSubmit: submit vetoed via form-submit-validate trigger'); + return this; + } + + var q = $.param(a, traditional); + if (qx) { + q = ( q ? (q + '&' + qx) : qx ); + } + if (options.type.toUpperCase() == 'GET') { + options.url += (options.url.indexOf('?') >= 0 ? '&' : '?') + q; + options.data = null; // data is null for 'get' + } + else { + options.data = q; // data is the query string for 'post' + } + + var callbacks = []; + if (options.resetForm) { + callbacks.push(function() { $form.resetForm(); }); + } + if (options.clearForm) { + callbacks.push(function() { $form.clearForm(options.includeHidden); }); + } + + // perform a load on the target only if dataType is not provided + if (!options.dataType && options.target) { + var oldSuccess = options.success || function(){}; + callbacks.push(function(data) { + var fn = options.replaceTarget ? 'replaceWith' : 'html'; + $(options.target)[fn](data).each(oldSuccess, arguments); + }); + } + else if (options.success) { + callbacks.push(options.success); + } + + options.success = function(data, status, xhr) { // jQuery 1.4+ passes xhr as 3rd arg + var context = options.context || this ; // jQuery 1.4+ supports scope context + for (var i=0, max=callbacks.length; i < max; i++) { + callbacks[i].apply(context, [data, status, xhr || $form, $form]); + } + }; + + if (options.error) { + var oldError = options.error; + options.error = function(xhr, status, error) { + var context = options.context || this; + oldError.apply(context, [xhr, status, error, $form]); + }; + } + + if (options.complete) { + var oldComplete = options.complete; + options.complete = function(xhr, status) { + var context = options.context || this; + oldComplete.apply(context, [xhr, status, $form]); + }; + } + + // are there files to upload? + + // [value] (issue #113), also see comment: + // https://github.com/malsup/form/commit/588306aedba1de01388032d5f42a60159eea9228#commitcomment-2180219 + var fileInputs = $('input[type=file]:enabled', this).filter(function() { return $(this).val() !== ''; }); + + var hasFileInputs = fileInputs.length > 0; + var mp = 'multipart/form-data'; + var multipart = ($form.attr('enctype') == mp || $form.attr('encoding') == mp); + + var fileAPI = feature.fileapi && feature.formdata; + log("fileAPI :" + fileAPI); + var shouldUseFrame = (hasFileInputs || multipart) && !fileAPI; + + var jqxhr; + + // options.iframe allows user to force iframe mode + // 06-NOV-09: now defaulting to iframe mode if file input is detected + if (options.iframe !== false && (options.iframe || shouldUseFrame)) { + // hack to fix Safari hang (thanks to Tim Molendijk for this) + // see: http://groups.google.com/group/jquery-dev/browse_thread/thread/36395b7ab510dd5d + if (options.closeKeepAlive) { + $.get(options.closeKeepAlive, function() { + jqxhr = fileUploadIframe(a); + }); + } + else { + jqxhr = fileUploadIframe(a); + } + } + else if ((hasFileInputs || multipart) && fileAPI) { + jqxhr = fileUploadXhr(a); + } + else { + jqxhr = $.ajax(options); + } + + $form.removeData('jqxhr').data('jqxhr', jqxhr); + + // clear element array + for (var k=0; k < elements.length; k++) { + elements[k] = null; + } + + // fire 'notify' event + this.trigger('form-submit-notify', [this, options]); + return this; + + // utility fn for deep serialization + function deepSerialize(extraData){ + var serialized = $.param(extraData, options.traditional).split('&'); + var len = serialized.length; + var result = []; + var i, part; + for (i=0; i < len; i++) { + // #252; undo param space replacement + serialized[i] = serialized[i].replace(/\+/g,' '); + part = serialized[i].split('='); + // #278; use array instead of object storage, favoring array serializations + result.push([decodeURIComponent(part[0]), decodeURIComponent(part[1])]); + } + return result; + } + + // XMLHttpRequest Level 2 file uploads (big hat tip to francois2metz) + function fileUploadXhr(a) { + var formdata = new FormData(); + + for (var i=0; i < a.length; i++) { + formdata.append(a[i].name, a[i].value); + } + + if (options.extraData) { + var serializedData = deepSerialize(options.extraData); + for (i=0; i < serializedData.length; i++) { + if (serializedData[i]) { + formdata.append(serializedData[i][0], serializedData[i][1]); + } + } + } + + options.data = null; + + var s = $.extend(true, {}, $.ajaxSettings, options, { + contentType: false, + processData: false, + cache: false, + type: method || 'POST' + }); + + if (options.uploadProgress) { + // workaround because jqXHR does not expose upload property + s.xhr = function() { + var xhr = $.ajaxSettings.xhr(); + if (xhr.upload) { + xhr.upload.addEventListener('progress', function(event) { + var percent = 0; + var position = event.loaded || event.position; /*event.position is deprecated*/ + var total = event.total; + if (event.lengthComputable) { + percent = Math.ceil(position / total * 100); + } + options.uploadProgress(event, position, total, percent); + }, false); + } + return xhr; + }; + } + + s.data = null; + var beforeSend = s.beforeSend; + s.beforeSend = function(xhr, o) { + //Send FormData() provided by user + if (options.formData) { + o.data = options.formData; + } + else { + o.data = formdata; + } + if(beforeSend) { + beforeSend.call(this, xhr, o); + } + }; + return $.ajax(s); + } + + // private function for handling file uploads (hat tip to YAHOO!) + function fileUploadIframe(a) { + var form = $form[0], el, i, s, g, id, $io, io, xhr, sub, n, timedOut, timeoutHandle; + var deferred = $.Deferred(); + + // #341 + deferred.abort = function(status) { + xhr.abort(status); + }; + + if (a) { + // ensure that every serialized input is still enabled + for (i=0; i < elements.length; i++) { + el = $(elements[i]); + if ( hasProp ) { + el.prop('disabled', false); + } + else { + el.removeAttr('disabled'); + } + } + } + + s = $.extend(true, {}, $.ajaxSettings, options); + s.context = s.context || s; + id = 'jqFormIO' + (new Date().getTime()); + if (s.iframeTarget) { + $io = $(s.iframeTarget); + n = $io.attr2('name'); + if (!n) { + $io.attr2('name', id); + } + else { + id = n; + } + } + else { + $io = $('