From b867dc9be2a68c7ced47333a243b16498b441034 Mon Sep 17 00:00:00 2001 From: Giovanni Harting <539@idlegandalf.com> Date: Fri, 5 Feb 2021 16:30:46 +0100 Subject: [PATCH] Rename; Removed deps from repo --- .dockerignore | 11 - .github/FUNDING.yml | 3 - .github/pull_request_template.md | 20 - .github/workflows/publish-docker.yml | 115 - .github/workflows/publish-installers.yml | 194 - .github/workflows/publish-snap.yml | 94 - .github/workflows/pull-requests.yml | 28 - .gitignore | 343 +- Dockerfile | 26 - Tautulli.py => JellyPy.py | 191 +- PlexPy.py | 24 - README.md | 10 +- data/interfaces/default/settings.html | 10 +- {plexpy => jellypy}/__init__.py | 118 +- {plexpy => jellypy}/activity_handler.py | 84 +- {plexpy => jellypy}/activity_pinger.py | 70 +- {plexpy => jellypy}/activity_processor.py | 32 +- {plexpy => jellypy}/api2.py | 90 +- {plexpy => jellypy}/classes.py | 6 +- {plexpy => jellypy}/common.py | 6 +- {plexpy => jellypy}/config.py | 32 +- {plexpy => jellypy}/database.py | 24 +- {plexpy => jellypy}/datafactory.py | 40 +- {plexpy => jellypy}/datatables.py | 10 +- {plexpy => jellypy}/exceptions.py | 0 {plexpy => jellypy}/exporter.py | 26 +- {plexpy => jellypy}/graphs.py | 38 +- {plexpy => jellypy}/helpers.py | 84 +- {plexpy => jellypy}/http_handler.py | 24 +- {plexpy => jellypy}/libraries.py | 86 +- {plexpy => jellypy}/lock.py | 6 +- {plexpy => jellypy}/log_reader.py | 14 +- {plexpy => jellypy}/logger.py | 14 +- {plexpy => jellypy}/macos.py | 56 +- {plexpy => jellypy}/mobile_app.py | 10 +- {plexpy => jellypy}/newsletter_handler.py | 16 +- {plexpy => jellypy}/newsletters.py | 58 +- {plexpy => jellypy}/notification_handler.py | 126 +- {plexpy => jellypy}/notifiers.py | 86 +- {plexpy => jellypy}/plex.py | 6 +- {plexpy => jellypy}/plexivity_import.py | 14 +- {plexpy => jellypy}/plextv.py | 86 +- {plexpy => jellypy}/plexwatch_import.py | 14 +- {plexpy => jellypy}/pmsconnect.py | 68 +- {plexpy => jellypy}/request.py | 18 +- {plexpy => jellypy}/session.py | 10 +- {plexpy => jellypy}/users.py | 28 +- {plexpy => jellypy}/version.py | 0 {plexpy => jellypy}/versioncheck.py | 248 +- {plexpy => jellypy}/web_socket.py | 84 +- {plexpy => jellypy}/webauth.py | 68 +- {plexpy => jellypy}/webserve.py | 618 +- {plexpy => jellypy}/webstart.py | 66 +- {plexpy => jellypy}/windows.py | 54 +- lib/IPy.py | 1652 ---- lib/UniversalAnalytics/HTTPLog.py | 121 - lib/UniversalAnalytics/Tracker.py | 424 - lib/UniversalAnalytics/__init__.py | 1 - lib/appdirs.py | 608 -- lib/apscheduler/__init__.py | 10 - lib/apscheduler/events.py | 94 - lib/apscheduler/executors/__init__.py | 0 lib/apscheduler/executors/asyncio.py | 59 - lib/apscheduler/executors/base.py | 146 - lib/apscheduler/executors/base_py3.py | 41 - lib/apscheduler/executors/debug.py | 20 - lib/apscheduler/executors/gevent.py | 30 - lib/apscheduler/executors/pool.py | 54 - lib/apscheduler/executors/tornado.py | 54 - lib/apscheduler/executors/twisted.py | 25 - lib/apscheduler/job.py | 301 - lib/apscheduler/jobstores/__init__.py | 0 lib/apscheduler/jobstores/base.py | 143 - lib/apscheduler/jobstores/memory.py | 108 - lib/apscheduler/jobstores/mongodb.py | 141 - lib/apscheduler/jobstores/redis.py | 150 - lib/apscheduler/jobstores/rethinkdb.py | 155 - lib/apscheduler/jobstores/sqlalchemy.py | 154 - lib/apscheduler/jobstores/zookeeper.py | 179 - lib/apscheduler/schedulers/__init__.py | 12 - lib/apscheduler/schedulers/asyncio.py | 68 - lib/apscheduler/schedulers/background.py | 41 - lib/apscheduler/schedulers/base.py | 1022 --- lib/apscheduler/schedulers/blocking.py | 33 - lib/apscheduler/schedulers/gevent.py | 35 - lib/apscheduler/schedulers/qt.py | 43 - lib/apscheduler/schedulers/tornado.py | 63 - lib/apscheduler/schedulers/twisted.py | 62 - lib/apscheduler/triggers/__init__.py | 0 lib/apscheduler/triggers/base.py | 48 - lib/apscheduler/triggers/combining.py | 95 - lib/apscheduler/triggers/cron/__init__.py | 238 - lib/apscheduler/triggers/cron/expressions.py | 251 - lib/apscheduler/triggers/cron/fields.py | 111 - lib/apscheduler/triggers/date.py | 51 - lib/apscheduler/triggers/interval.py | 106 - lib/apscheduler/util.py | 429 - lib/argparse.py | 2392 ------ lib/arrow/__init__.py | 8 - lib/arrow/api.py | 55 - lib/arrow/arrow.py | 948 -- lib/arrow/factory.py | 254 - lib/arrow/formatter.py | 105 - lib/arrow/locales.py | 2011 ----- lib/arrow/parser.py | 328 - lib/arrow/util.py | 47 - lib/backports/__init__.py | 1 - lib/backports/csv.py | 979 --- lib/backports/functools_lru_cache.py | 196 - lib/bleach/__init__.py | 401 - lib/bleach/callbacks.py | 20 - lib/bleach/encoding.py | 62 - lib/bleach/sanitizer.py | 148 - lib/bs4/__init__.py | 655 -- lib/bs4/builder/__init__.py | 392 - lib/bs4/builder/_html5lib.py | 455 - lib/bs4/builder/_htmlparser.py | 358 - lib/bs4/builder/_lxml.py | 302 - lib/bs4/check_block.py | 4 - lib/bs4/dammit.py | 872 -- lib/bs4/diagnose.py | 224 - lib/bs4/element.py | 1602 ---- lib/bs4/formatter.py | 99 - lib/bs4/testing.py | 992 --- lib/bs4/tests/__init__.py | 1 - lib/bs4/tests/test_builder_registry.py | 147 - lib/bs4/tests/test_docs.py | 36 - lib/bs4/tests/test_html5lib.py | 184 - lib/bs4/tests/test_htmlparser.py | 61 - lib/bs4/tests/test_lxml.py | 115 - lib/bs4/tests/test_soup.py | 682 -- lib/bs4/tests/test_tree.py | 2254 ----- lib/certgen.py | 106 - lib/certifi/__init__.py | 3 - lib/certifi/__main__.py | 12 - lib/certifi/cacert.pem | 4606 ---------- lib/certifi/core.py | 60 - lib/chardet/__init__.py | 39 - lib/chardet/big5freq.py | 386 - lib/chardet/big5prober.py | 47 - lib/chardet/chardistribution.py | 233 - lib/chardet/charsetgroupprober.py | 106 - lib/chardet/charsetprober.py | 145 - lib/chardet/cli/__init__.py | 1 - lib/chardet/cli/chardetect.py | 85 - lib/chardet/codingstatemachine.py | 88 - lib/chardet/compat.py | 34 - lib/chardet/cp949prober.py | 49 - lib/chardet/enums.py | 76 - lib/chardet/escprober.py | 101 - lib/chardet/escsm.py | 246 - lib/chardet/eucjpprober.py | 92 - lib/chardet/euckrfreq.py | 195 - lib/chardet/euckrprober.py | 47 - lib/chardet/euctwfreq.py | 387 - lib/chardet/euctwprober.py | 46 - lib/chardet/gb2312freq.py | 283 - lib/chardet/gb2312prober.py | 46 - lib/chardet/hebrewprober.py | 292 - lib/chardet/jisfreq.py | 325 - lib/chardet/jpcntx.py | 233 - lib/chardet/langbulgarianmodel.py | 228 - lib/chardet/langcyrillicmodel.py | 333 - lib/chardet/langgreekmodel.py | 225 - lib/chardet/langhebrewmodel.py | 200 - lib/chardet/langhungarianmodel.py | 225 - lib/chardet/langthaimodel.py | 199 - lib/chardet/langturkishmodel.py | 193 - lib/chardet/latin1prober.py | 145 - lib/chardet/mbcharsetprober.py | 91 - lib/chardet/mbcsgroupprober.py | 54 - lib/chardet/mbcssm.py | 572 -- lib/chardet/sbcharsetprober.py | 132 - lib/chardet/sbcsgroupprober.py | 73 - lib/chardet/sjisprober.py | 92 - lib/chardet/universaldetector.py | 286 - lib/chardet/utf8prober.py | 82 - lib/chardet/version.py | 9 - lib/cheroot/__init__.py | 15 - lib/cheroot/__main__.py | 6 - lib/cheroot/_compat.py | 110 - lib/cheroot/cli.py | 234 - lib/cheroot/connections.py | 279 - lib/cheroot/errors.py | 58 - lib/cheroot/makefile.py | 447 - lib/cheroot/server.py | 2114 ----- lib/cheroot/ssl/__init__.py | 52 - lib/cheroot/ssl/builtin.py | 210 - lib/cheroot/ssl/pyopenssl.py | 343 - lib/cheroot/test/__init__.py | 1 - lib/cheroot/test/conftest.py | 69 - lib/cheroot/test/helper.py | 168 - lib/cheroot/test/test__compat.py | 62 - lib/cheroot/test/test_conn.py | 980 --- lib/cheroot/test/test_core.py | 415 - lib/cheroot/test/test_dispatch.py | 55 - lib/cheroot/test/test_errors.py | 30 - lib/cheroot/test/test_makefile.py | 52 - lib/cheroot/test/test_server.py | 235 - lib/cheroot/test/test_ssl.py | 474 - lib/cheroot/test/webtest.py | 605 -- lib/cheroot/testing.py | 153 - lib/cheroot/workers/__init__.py | 1 - lib/cheroot/workers/threadpool.py | 323 - lib/cheroot/wsgi.py | 434 - lib/cherrypy/__init__.py | 370 - lib/cherrypy/__main__.py | 5 - lib/cherrypy/_cpchecker.py | 325 - lib/cherrypy/_cpcompat.py | 162 - lib/cherrypy/_cpconfig.py | 296 - lib/cherrypy/_cpdispatch.py | 686 -- lib/cherrypy/_cperror.py | 619 -- lib/cherrypy/_cplogging.py | 482 -- lib/cherrypy/_cpmodpy.py | 356 - lib/cherrypy/_cpnative_server.py | 168 - lib/cherrypy/_cpreqbody.py | 1000 --- lib/cherrypy/_cprequest.py | 930 -- lib/cherrypy/_cpserver.py | 252 - lib/cherrypy/_cptools.py | 509 -- lib/cherrypy/_cptree.py | 313 - lib/cherrypy/_cpwsgi.py | 467 - lib/cherrypy/_cpwsgi_server.py | 110 - lib/cherrypy/_helper.py | 344 - lib/cherrypy/daemon.py | 107 - lib/cherrypy/favicon.ico | Bin 1406 -> 0 bytes lib/cherrypy/lib/__init__.py | 96 - lib/cherrypy/lib/auth_basic.py | 120 - lib/cherrypy/lib/auth_digest.py | 464 - lib/cherrypy/lib/caching.py | 482 -- lib/cherrypy/lib/covercp.py | 391 - lib/cherrypy/lib/cpstats.py | 696 -- lib/cherrypy/lib/cptools.py | 640 -- lib/cherrypy/lib/encoding.py | 436 - lib/cherrypy/lib/gctools.py | 218 - lib/cherrypy/lib/httputil.py | 582 -- lib/cherrypy/lib/jsontools.py | 88 - lib/cherrypy/lib/locking.py | 47 - lib/cherrypy/lib/profiler.py | 221 - lib/cherrypy/lib/reprconf.py | 516 -- lib/cherrypy/lib/sessions.py | 919 -- lib/cherrypy/lib/static.py | 390 - lib/cherrypy/lib/xmlrpcutil.py | 61 - lib/cherrypy/process/__init__.py | 17 - lib/cherrypy/process/plugins.py | 752 -- lib/cherrypy/process/servers.py | 416 - lib/cherrypy/process/win32.py | 183 - lib/cherrypy/process/wspbus.py | 590 -- lib/cherrypy/scaffold/__init__.py | 63 - lib/cherrypy/scaffold/apache-fcgi.conf | 22 - lib/cherrypy/scaffold/example.conf | 3 - lib/cherrypy/scaffold/site.conf | 14 - .../static/made_with_cherrypy_small.png | Bin 6347 -> 0 bytes lib/cherrypy/test/__init__.py | 24 - lib/cherrypy/test/_test_decorators.py | 39 - lib/cherrypy/test/_test_states_demo.py | 69 - lib/cherrypy/test/benchmark.py | 425 - lib/cherrypy/test/checkerdemo.py | 49 - lib/cherrypy/test/fastcgi.conf | 18 - lib/cherrypy/test/fcgi.conf | 14 - lib/cherrypy/test/helper.py | 542 -- lib/cherrypy/test/logtest.py | 228 - lib/cherrypy/test/modfastcgi.py | 136 - lib/cherrypy/test/modfcgid.py | 124 - lib/cherrypy/test/modpy.py | 164 - lib/cherrypy/test/modwsgi.py | 154 - lib/cherrypy/test/sessiondemo.py | 161 - lib/cherrypy/test/static/404.html | 5 - lib/cherrypy/test/static/dirback.jpg | Bin 16585 -> 0 bytes lib/cherrypy/test/static/index.html | 1 - lib/cherrypy/test/style.css | 1 - lib/cherrypy/test/test.pem | 38 - lib/cherrypy/test/test_auth_basic.py | 135 - lib/cherrypy/test/test_auth_digest.py | 134 - lib/cherrypy/test/test_bus.py | 274 - lib/cherrypy/test/test_caching.py | 392 - lib/cherrypy/test/test_compat.py | 34 - lib/cherrypy/test/test_config.py | 303 - lib/cherrypy/test/test_config_server.py | 126 - lib/cherrypy/test/test_conn.py | 873 -- lib/cherrypy/test/test_core.py | 823 -- .../test/test_dynamicobjectmapping.py | 424 - lib/cherrypy/test/test_encoding.py | 433 - lib/cherrypy/test/test_etags.py | 84 - lib/cherrypy/test/test_http.py | 307 - lib/cherrypy/test/test_httputil.py | 80 - lib/cherrypy/test/test_iterator.py | 196 - lib/cherrypy/test/test_json.py | 102 - lib/cherrypy/test/test_logging.py | 209 - lib/cherrypy/test/test_mime.py | 134 - lib/cherrypy/test/test_misc_tools.py | 210 - lib/cherrypy/test/test_native.py | 38 - lib/cherrypy/test/test_objectmapping.py | 430 - lib/cherrypy/test/test_params.py | 61 - lib/cherrypy/test/test_plugins.py | 14 - lib/cherrypy/test/test_proxy.py | 154 - lib/cherrypy/test/test_refleaks.py | 66 - lib/cherrypy/test/test_request_obj.py | 932 -- lib/cherrypy/test/test_routes.py | 80 - lib/cherrypy/test/test_session.py | 512 -- lib/cherrypy/test/test_sessionauthenticate.py | 61 - lib/cherrypy/test/test_states.py | 473 - lib/cherrypy/test/test_static.py | 438 - lib/cherrypy/test/test_tools.py | 468 - lib/cherrypy/test/test_tutorials.py | 210 - lib/cherrypy/test/test_virtualhost.py | 113 - lib/cherrypy/test/test_wsgi_ns.py | 93 - lib/cherrypy/test/test_wsgi_unix_socket.py | 93 - lib/cherrypy/test/test_wsgi_vhost.py | 35 - lib/cherrypy/test/test_wsgiapps.py | 120 - lib/cherrypy/test/test_xmlrpc.py | 183 - lib/cherrypy/test/webtest.py | 11 - lib/cherrypy/tutorial/README.rst | 16 - lib/cherrypy/tutorial/__init__.py | 3 - lib/cherrypy/tutorial/custom_error.html | 14 - lib/cherrypy/tutorial/pdf_file.pdf | Bin 85698 -> 0 bytes lib/cherrypy/tutorial/tut01_helloworld.py | 34 - lib/cherrypy/tutorial/tut02_expose_methods.py | 32 - lib/cherrypy/tutorial/tut03_get_and_post.py | 51 - lib/cherrypy/tutorial/tut04_complex_site.py | 103 - .../tutorial/tut05_derived_objects.py | 80 - lib/cherrypy/tutorial/tut06_default_method.py | 61 - lib/cherrypy/tutorial/tut07_sessions.py | 41 - .../tutorial/tut08_generators_and_yield.py | 44 - lib/cherrypy/tutorial/tut09_files.py | 105 - lib/cherrypy/tutorial/tut10_http_errors.py | 84 - lib/cherrypy/tutorial/tutorial.conf | 4 - lib/cloudinary/__init__.py | 793 -- lib/cloudinary/api.py | 653 -- lib/cloudinary/auth_token.py | 49 - lib/cloudinary/cache/__init__.py | 0 lib/cloudinary/cache/adapter/__init__.py | 0 lib/cloudinary/cache/adapter/cache_adapter.py | 63 - .../cache/adapter/key_value_cache_adapter.py | 61 - .../cache/responsive_breakpoints_cache.py | 124 - lib/cloudinary/cache/storage/__init__.py | 0 .../storage/file_system_key_value_storage.py | 79 - .../cache/storage/key_value_storage.py | 51 - lib/cloudinary/compat.py | 35 - lib/cloudinary/exceptions.py | 33 - lib/cloudinary/forms.py | 142 - lib/cloudinary/http_client.py | 43 - lib/cloudinary/models.py | 133 - lib/cloudinary/poster/__init__.py | 31 - lib/cloudinary/poster/encode.py | 456 - lib/cloudinary/poster/streaminghttp.py | 209 - lib/cloudinary/search.py | 60 - .../static/html/cloudinary_cors.html | 43 - .../static/js/canvas-to-blob.min.js | 2 - lib/cloudinary/static/js/jquery.cloudinary.js | 4752 ---------- .../static/js/jquery.fileupload-image.js | 326 - .../static/js/jquery.fileupload-process.js | 178 - .../static/js/jquery.fileupload-validate.js | 125 - lib/cloudinary/static/js/jquery.fileupload.js | 1502 ---- .../static/js/jquery.iframe-transport.js | 224 - lib/cloudinary/static/js/jquery.ui.widget.js | 752 -- .../static/js/load-image.all.min.js | 2 - .../templates/cloudinary_direct_upload.html | 12 - .../templates/cloudinary_includes.html | 14 - .../templates/cloudinary_js_config.html | 3 - lib/cloudinary/templatetags/__init__.py | 1 - lib/cloudinary/templatetags/cloudinary.py | 86 - lib/cloudinary/uploader.py | 426 - lib/cloudinary/utils.py | 1334 --- lib/concurrent/LICENSE | 21 - lib/concurrent/__init__.py | 3 - lib/concurrent/futures/__init__.py | 23 - lib/concurrent/futures/_base.py | 605 -- lib/concurrent/futures/_compat.py | 111 - lib/concurrent/futures/process.py | 363 - lib/concurrent/futures/thread.py | 138 - lib/configobj/__init__.py | 2453 ------ lib/configobj/_version.py | 2 - lib/configobj/validate.py | 1474 ---- lib/contextlib2.py | 518 -- lib/dateutil/__init__.py | 2 - lib/dateutil/easter.py | 89 - lib/dateutil/parser.py | 1205 --- lib/dateutil/relativedelta.py | 450 - lib/dateutil/rrule.py | 1375 --- lib/dateutil/tz.py | 986 --- lib/dateutil/tzwin.py | 184 - lib/distro.py | 1216 --- lib/dns/__init__.py | 54 - lib/dns/_compat.py | 21 - lib/dns/dnssec.py | 457 - lib/dns/e164.py | 84 - lib/dns/edns.py | 150 - lib/dns/entropy.py | 127 - lib/dns/exception.py | 124 - lib/dns/flags.py | 112 - lib/dns/grange.py | 65 - lib/dns/hash.py | 32 - lib/dns/inet.py | 111 - lib/dns/ipv4.py | 59 - lib/dns/ipv6.py | 172 - lib/dns/message.py | 1153 --- lib/dns/name.py | 763 -- lib/dns/namedict.py | 104 - lib/dns/node.py | 178 - lib/dns/opcode.py | 109 - lib/dns/query.py | 536 -- lib/dns/rcode.py | 125 - lib/dns/rdata.py | 464 - lib/dns/rdataclass.py | 118 - lib/dns/rdataset.py | 338 - lib/dns/rdatatype.py | 253 - lib/dns/rdtypes/ANY/AFSDB.py | 53 - lib/dns/rdtypes/ANY/CAA.py | 74 - lib/dns/rdtypes/ANY/CDNSKEY.py | 25 - lib/dns/rdtypes/ANY/CDS.py | 21 - lib/dns/rdtypes/ANY/CERT.py | 122 - lib/dns/rdtypes/ANY/CNAME.py | 25 - lib/dns/rdtypes/ANY/CSYNC.py | 124 - lib/dns/rdtypes/ANY/DLV.py | 21 - lib/dns/rdtypes/ANY/DNAME.py | 24 - lib/dns/rdtypes/ANY/DNSKEY.py | 25 - lib/dns/rdtypes/ANY/DS.py | 21 - lib/dns/rdtypes/ANY/EUI48.py | 29 - lib/dns/rdtypes/ANY/EUI64.py | 29 - lib/dns/rdtypes/ANY/GPOS.py | 160 - lib/dns/rdtypes/ANY/HINFO.py | 85 - lib/dns/rdtypes/ANY/HIP.py | 113 - lib/dns/rdtypes/ANY/ISDN.py | 98 - lib/dns/rdtypes/ANY/LOC.py | 327 - lib/dns/rdtypes/ANY/MX.py | 21 - lib/dns/rdtypes/ANY/NS.py | 21 - lib/dns/rdtypes/ANY/NSEC.py | 126 - lib/dns/rdtypes/ANY/NSEC3.py | 192 - lib/dns/rdtypes/ANY/NSEC3PARAM.py | 89 - lib/dns/rdtypes/ANY/PTR.py | 21 - lib/dns/rdtypes/ANY/RP.py | 80 - lib/dns/rdtypes/ANY/RRSIG.py | 156 - lib/dns/rdtypes/ANY/RT.py | 21 - lib/dns/rdtypes/ANY/SOA.py | 114 - lib/dns/rdtypes/ANY/SPF.py | 23 - lib/dns/rdtypes/ANY/SSHFP.py | 78 - lib/dns/rdtypes/ANY/TLSA.py | 83 - lib/dns/rdtypes/ANY/TXT.py | 21 - lib/dns/rdtypes/ANY/URI.py | 81 - lib/dns/rdtypes/ANY/X25.py | 65 - lib/dns/rdtypes/ANY/__init__.py | 50 - lib/dns/rdtypes/IN/A.py | 53 - lib/dns/rdtypes/IN/AAAA.py | 54 - lib/dns/rdtypes/IN/APL.py | 162 - lib/dns/rdtypes/IN/DHCID.py | 60 - lib/dns/rdtypes/IN/IPSECKEY.py | 149 - lib/dns/rdtypes/IN/KX.py | 21 - lib/dns/rdtypes/IN/NAPTR.py | 125 - lib/dns/rdtypes/IN/NSAP.py | 59 - lib/dns/rdtypes/IN/NSAP_PTR.py | 21 - lib/dns/rdtypes/IN/PX.py | 87 - lib/dns/rdtypes/IN/SRV.py | 81 - lib/dns/rdtypes/IN/WKS.py | 106 - lib/dns/rdtypes/IN/__init__.py | 30 - lib/dns/rdtypes/__init__.py | 24 - lib/dns/rdtypes/dnskeybase.py | 136 - lib/dns/rdtypes/dsbase.py | 84 - lib/dns/rdtypes/euibase.py | 71 - lib/dns/rdtypes/mxbase.py | 101 - lib/dns/rdtypes/nsbase.py | 81 - lib/dns/rdtypes/txtbase.py | 91 - lib/dns/renderer.py | 330 - lib/dns/resolver.py | 1343 --- lib/dns/reversename.py | 89 - lib/dns/rrset.py | 181 - lib/dns/set.py | 265 - lib/dns/tokenizer.py | 564 -- lib/dns/tsig.py | 233 - lib/dns/tsigkeyring.py | 46 - lib/dns/ttl.py | 68 - lib/dns/update.py | 249 - lib/dns/version.py | 34 - lib/dns/wiredata.py | 84 - lib/dns/zone.py | 1064 --- lib/facebook/__init__.py | 478 -- lib/facebook/version.py | 17 - lib/funcsigs/__init__.py | 829 -- lib/funcsigs/version.py | 1 - lib/future/__init__.py | 93 - lib/future/backports/__init__.py | 26 - lib/future/backports/_markupbase.py | 422 - lib/future/backports/datetime.py | 2152 ----- lib/future/backports/email/__init__.py | 78 - lib/future/backports/email/_encoded_words.py | 232 - .../backports/email/_header_value_parser.py | 2965 ------- lib/future/backports/email/_parseaddr.py | 546 -- lib/future/backports/email/_policybase.py | 365 - lib/future/backports/email/base64mime.py | 120 - lib/future/backports/email/charset.py | 409 - lib/future/backports/email/encoders.py | 90 - lib/future/backports/email/errors.py | 111 - lib/future/backports/email/feedparser.py | 525 -- lib/future/backports/email/generator.py | 498 -- lib/future/backports/email/header.py | 581 -- lib/future/backports/email/headerregistry.py | 592 -- lib/future/backports/email/iterators.py | 74 - lib/future/backports/email/message.py | 882 -- lib/future/backports/email/mime/__init__.py | 0 .../backports/email/mime/application.py | 39 - lib/future/backports/email/mime/audio.py | 74 - lib/future/backports/email/mime/base.py | 25 - lib/future/backports/email/mime/image.py | 48 - lib/future/backports/email/mime/message.py | 36 - lib/future/backports/email/mime/multipart.py | 49 - .../backports/email/mime/nonmultipart.py | 24 - lib/future/backports/email/mime/text.py | 44 - lib/future/backports/email/parser.py | 135 - lib/future/backports/email/policy.py | 193 - lib/future/backports/email/quoprimime.py | 326 - lib/future/backports/email/utils.py | 400 - lib/future/backports/html/__init__.py | 27 - lib/future/backports/html/entities.py | 2514 ------ lib/future/backports/html/parser.py | 536 -- lib/future/backports/http/__init__.py | 0 lib/future/backports/http/client.py | 1346 --- lib/future/backports/http/cookiejar.py | 2110 ----- lib/future/backports/http/cookies.py | 598 -- lib/future/backports/http/server.py | 1226 --- lib/future/backports/misc.py | 944 -- lib/future/backports/socket.py | 454 - lib/future/backports/socketserver.py | 747 -- lib/future/backports/test/__init__.py | 9 - lib/future/backports/test/badcert.pem | 36 - lib/future/backports/test/badkey.pem | 40 - lib/future/backports/test/dh512.pem | 9 - .../test/https_svn_python_org_root.pem | 41 - lib/future/backports/test/keycert.passwd.pem | 33 - lib/future/backports/test/keycert.pem | 31 - lib/future/backports/test/keycert2.pem | 31 - lib/future/backports/test/nokia.pem | 31 - lib/future/backports/test/nullbytecert.pem | 90 - lib/future/backports/test/nullcert.pem | 0 lib/future/backports/test/pystone.py | 272 - lib/future/backports/test/sha256.pem | 128 - lib/future/backports/test/ssl_cert.pem | 15 - lib/future/backports/test/ssl_key.passwd.pem | 18 - lib/future/backports/test/ssl_key.pem | 16 - lib/future/backports/test/ssl_servers.py | 207 - lib/future/backports/test/support.py | 2048 ----- lib/future/backports/total_ordering.py | 38 - lib/future/backports/urllib/__init__.py | 0 lib/future/backports/urllib/error.py | 75 - lib/future/backports/urllib/parse.py | 991 --- lib/future/backports/urllib/request.py | 2647 ------ lib/future/backports/urllib/response.py | 103 - lib/future/backports/urllib/robotparser.py | 211 - lib/future/backports/xmlrpc/__init__.py | 1 - lib/future/backports/xmlrpc/client.py | 1496 ---- lib/future/backports/xmlrpc/server.py | 999 --- lib/future/builtins/__init__.py | 51 - lib/future/builtins/disabled.py | 66 - lib/future/builtins/iterators.py | 52 - lib/future/builtins/misc.py | 135 - lib/future/builtins/new_min_max.py | 59 - lib/future/builtins/newnext.py | 70 - lib/future/builtins/newround.py | 102 - lib/future/builtins/newsuper.py | 114 - lib/future/moves/__init__.py | 8 - lib/future/moves/_dummy_thread.py | 8 - lib/future/moves/_markupbase.py | 8 - lib/future/moves/_thread.py | 8 - lib/future/moves/builtins.py | 10 - lib/future/moves/collections.py | 18 - lib/future/moves/configparser.py | 8 - lib/future/moves/copyreg.py | 12 - lib/future/moves/dbm/__init__.py | 20 - lib/future/moves/dbm/dumb.py | 9 - lib/future/moves/dbm/gnu.py | 9 - lib/future/moves/dbm/ndbm.py | 9 - lib/future/moves/html/__init__.py | 31 - lib/future/moves/html/entities.py | 8 - lib/future/moves/html/parser.py | 8 - lib/future/moves/http/__init__.py | 4 - lib/future/moves/http/client.py | 8 - lib/future/moves/http/cookiejar.py | 8 - lib/future/moves/http/cookies.py | 9 - lib/future/moves/http/server.py | 20 - lib/future/moves/itertools.py | 8 - lib/future/moves/pickle.py | 11 - lib/future/moves/queue.py | 8 - lib/future/moves/reprlib.py | 8 - lib/future/moves/socketserver.py | 8 - lib/future/moves/subprocess.py | 11 - lib/future/moves/sys.py | 8 - lib/future/moves/test/__init__.py | 5 - lib/future/moves/test/support.py | 10 - lib/future/moves/tkinter/__init__.py | 27 - lib/future/moves/tkinter/colorchooser.py | 12 - lib/future/moves/tkinter/commondialog.py | 12 - lib/future/moves/tkinter/constants.py | 12 - lib/future/moves/tkinter/dialog.py | 12 - lib/future/moves/tkinter/dnd.py | 12 - lib/future/moves/tkinter/filedialog.py | 12 - lib/future/moves/tkinter/font.py | 12 - lib/future/moves/tkinter/messagebox.py | 12 - lib/future/moves/tkinter/scrolledtext.py | 12 - lib/future/moves/tkinter/simpledialog.py | 12 - lib/future/moves/tkinter/tix.py | 12 - lib/future/moves/tkinter/ttk.py | 12 - lib/future/moves/urllib/__init__.py | 5 - lib/future/moves/urllib/error.py | 16 - lib/future/moves/urllib/parse.py | 28 - lib/future/moves/urllib/request.py | 94 - lib/future/moves/urllib/response.py | 12 - lib/future/moves/urllib/robotparser.py | 8 - lib/future/moves/winreg.py | 8 - lib/future/moves/xmlrpc/__init__.py | 0 lib/future/moves/xmlrpc/client.py | 7 - lib/future/moves/xmlrpc/server.py | 7 - lib/future/standard_library/__init__.py | 815 -- lib/future/tests/__init__.py | 0 lib/future/tests/base.py | 539 -- lib/future/types/__init__.py | 257 - lib/future/types/newbytes.py | 460 - lib/future/types/newdict.py | 111 - lib/future/types/newint.py | 381 - lib/future/types/newlist.py | 95 - lib/future/types/newmemoryview.py | 29 - lib/future/types/newobject.py | 117 - lib/future/types/newopen.py | 32 - lib/future/types/newrange.py | 170 - lib/future/types/newstr.py | 426 - lib/future/utils/__init__.py | 767 -- lib/future/utils/surrogateescape.py | 198 - lib/future_fstrings.py | 297 - lib/gntp/LICENSE | 20 - lib/gntp/__init__.py | 0 lib/gntp/cli.py | 141 - lib/gntp/config.py | 77 - lib/gntp/core.py | 511 -- lib/gntp/errors.py | 25 - lib/gntp/notifier.py | 265 - lib/gntp/shim.py | 45 - lib/gntp/version.py | 4 - lib/hashing_passwords.py | 61 - lib/html5lib/__init__.py | 23 - lib/html5lib/constants.py | 3104 ------- lib/html5lib/filters/__init__.py | 0 lib/html5lib/filters/_base.py | 12 - .../filters/alphabeticalattributes.py | 20 - lib/html5lib/filters/inject_meta_charset.py | 65 - lib/html5lib/filters/lint.py | 93 - lib/html5lib/filters/optionaltags.py | 205 - lib/html5lib/filters/sanitizer.py | 12 - lib/html5lib/filters/whitespace.py | 38 - lib/html5lib/html5parser.py | 2713 ------ lib/html5lib/ihatexml.py | 285 - lib/html5lib/inputstream.py | 886 -- lib/html5lib/sanitizer.py | 271 - lib/html5lib/serializer/__init__.py | 16 - lib/html5lib/serializer/htmlserializer.py | 320 - lib/html5lib/tokenizer.py | 1731 ---- lib/html5lib/treeadapters/__init__.py | 0 lib/html5lib/treeadapters/sax.py | 44 - lib/html5lib/treebuilders/__init__.py | 76 - lib/html5lib/treebuilders/_base.py | 377 - lib/html5lib/treebuilders/dom.py | 227 - lib/html5lib/treebuilders/etree.py | 337 - lib/html5lib/treebuilders/etree_lxml.py | 369 - lib/html5lib/treewalkers/__init__.py | 57 - lib/html5lib/treewalkers/_base.py | 200 - lib/html5lib/treewalkers/dom.py | 46 - lib/html5lib/treewalkers/etree.py | 138 - lib/html5lib/treewalkers/genshistream.py | 69 - lib/html5lib/treewalkers/lxmletree.py | 204 - lib/html5lib/treewalkers/pulldom.py | 63 - lib/html5lib/trie/__init__.py | 12 - lib/html5lib/trie/_base.py | 37 - lib/html5lib/trie/datrie.py | 44 - lib/html5lib/trie/py.py | 67 - lib/html5lib/utils.py | 82 - lib/httpagentparser/__init__.py | 675 -- lib/httpagentparser/more.py | 29 - lib/idna/__init__.py | 2 - lib/idna/codec.py | 118 - lib/idna/compat.py | 12 - lib/idna/core.py | 387 - lib/idna/idnadata.py | 1585 ---- lib/idna/intranges.py | 53 - lib/idna/package_data.py | 2 - lib/idna/uts46data.py | 7634 ----------------- lib/ipaddr.py | 1928 ----- lib/ipaddress.py | 2417 ------ lib/ipwhois/__init__.py | 29 - lib/ipwhois/asn.py | 956 --- lib/ipwhois/data/iso_3166-1.csv | 252 - lib/ipwhois/data/iso_3166-1_list_en.xml | 1003 --- lib/ipwhois/exceptions.py | 127 - lib/ipwhois/experimental.py | 457 - lib/ipwhois/hr.py | 509 -- lib/ipwhois/ipwhois.py | 346 - lib/ipwhois/net.py | 936 -- lib/ipwhois/nir.py | 682 -- lib/ipwhois/rdap.py | 898 -- lib/ipwhois/scripts/ipwhois_cli.py | 1499 ---- lib/ipwhois/scripts/ipwhois_utils_cli.py | 269 - lib/ipwhois/utils.py | 631 -- lib/ipwhois/whois.py | 809 -- lib/jaraco/__init__.py | 1 - lib/jaraco/functools.py | 467 - lib/jwt/__init__.py | 29 - lib/jwt/__main__.py | 135 - lib/jwt/algorithms.py | 290 - lib/jwt/api_jws.py | 189 - lib/jwt/api_jwt.py | 187 - lib/jwt/compat.py | 52 - lib/jwt/contrib/__init__.py | 0 lib/jwt/contrib/algorithms/__init__.py | 0 lib/jwt/contrib/algorithms/py_ecdsa.py | 60 - lib/jwt/contrib/algorithms/pycrypto.py | 47 - lib/jwt/exceptions.py | 48 - lib/jwt/utils.py | 67 - lib/libfuturize/__init__.py | 1 - lib/libfuturize/fixer_util.py | 520 -- lib/libfuturize/fixes/__init__.py | 97 - lib/libfuturize/fixes/fix_UserDict.py | 102 - lib/libfuturize/fixes/fix_absolute_import.py | 91 - ...future__imports_except_unicode_literals.py | 26 - lib/libfuturize/fixes/fix_basestring.py | 17 - lib/libfuturize/fixes/fix_bytes.py | 24 - lib/libfuturize/fixes/fix_cmp.py | 33 - lib/libfuturize/fixes/fix_division.py | 12 - lib/libfuturize/fixes/fix_division_safe.py | 104 - lib/libfuturize/fixes/fix_execfile.py | 37 - lib/libfuturize/fixes/fix_future_builtins.py | 59 - .../fixes/fix_future_standard_library.py | 24 - .../fix_future_standard_library_urllib.py | 28 - lib/libfuturize/fixes/fix_input.py | 32 - lib/libfuturize/fixes/fix_metaclass.py | 262 - lib/libfuturize/fixes/fix_next_call.py | 104 - lib/libfuturize/fixes/fix_object.py | 17 - lib/libfuturize/fixes/fix_oldstr_wrap.py | 39 - .../fixes/fix_order___future__imports.py | 36 - lib/libfuturize/fixes/fix_print.py | 94 - .../fixes/fix_print_with_import.py | 22 - lib/libfuturize/fixes/fix_raise.py | 107 - .../fixes/fix_remove_old__future__imports.py | 26 - lib/libfuturize/fixes/fix_unicode_keep_u.py | 24 - .../fixes/fix_unicode_literals_import.py | 18 - .../fixes/fix_xrange_with_import.py | 20 - lib/libfuturize/main.py | 322 - lib/libpasteurize/__init__.py | 1 - lib/libpasteurize/fixes/__init__.py | 54 - lib/libpasteurize/fixes/feature_base.py | 57 - .../fixes/fix_add_all__future__imports.py | 24 - .../fixes/fix_add_all_future_builtins.py | 37 - .../fix_add_future_standard_library_import.py | 23 - lib/libpasteurize/fixes/fix_annotations.py | 48 - lib/libpasteurize/fixes/fix_division.py | 28 - lib/libpasteurize/fixes/fix_features.py | 86 - lib/libpasteurize/fixes/fix_fullargspec.py | 16 - .../fixes/fix_future_builtins.py | 46 - lib/libpasteurize/fixes/fix_getcwd.py | 26 - lib/libpasteurize/fixes/fix_imports.py | 112 - lib/libpasteurize/fixes/fix_imports2.py | 174 - lib/libpasteurize/fixes/fix_kwargs.py | 147 - lib/libpasteurize/fixes/fix_memoryview.py | 21 - lib/libpasteurize/fixes/fix_metaclass.py | 78 - lib/libpasteurize/fixes/fix_newstyle.py | 33 - lib/libpasteurize/fixes/fix_next.py | 43 - lib/libpasteurize/fixes/fix_printfunction.py | 17 - lib/libpasteurize/fixes/fix_raise.py | 25 - lib/libpasteurize/fixes/fix_raise_.py | 35 - lib/libpasteurize/fixes/fix_throw.py | 23 - lib/libpasteurize/fixes/fix_unpacking.py | 120 - lib/libpasteurize/main.py | 204 - lib/logutils/__init__.py | 195 - lib/logutils/adapter.py | 116 - lib/logutils/colorize.py | 194 - lib/logutils/dictconfig.py | 573 -- lib/logutils/http.py | 90 - lib/logutils/queue.py | 225 - lib/logutils/redis.py | 75 - lib/logutils/testing.py | 156 - lib/mako/__init__.py | 8 - lib/mako/_ast_util.py | 716 -- lib/mako/ast.py | 205 - lib/mako/cache.py | 240 - lib/mako/cmd.py | 103 - lib/mako/codegen.py | 1318 --- lib/mako/compat.py | 166 - lib/mako/exceptions.py | 430 - lib/mako/ext/__init__.py | 0 lib/mako/ext/autohandler.py | 70 - lib/mako/ext/babelplugin.py | 58 - lib/mako/ext/beaker_cache.py | 82 - lib/mako/ext/extract.py | 125 - lib/mako/ext/linguaplugin.py | 57 - lib/mako/ext/preprocessors.py | 20 - lib/mako/ext/pygmentplugin.py | 157 - lib/mako/ext/turbogears.py | 61 - lib/mako/filters.py | 219 - lib/mako/lexer.py | 490 -- lib/mako/lookup.py | 372 - lib/mako/parsetree.py | 665 -- lib/mako/pygen.py | 305 - lib/mako/pyparser.py | 242 - lib/mako/runtime.py | 970 --- lib/mako/template.py | 780 -- lib/mako/util.py | 400 - lib/more_itertools/__init__.py | 2 - lib/more_itertools/more.py | 2333 ----- lib/more_itertools/recipes.py | 577 -- lib/more_itertools/tests/__init__.py | 0 lib/more_itertools/tests/test_more.py | 2313 ----- lib/more_itertools/tests/test_recipes.py | 616 -- lib/musicbrainzngs/__init__.py | 2 - lib/musicbrainzngs/caa.py | 187 - lib/musicbrainzngs/compat.py | 61 - lib/musicbrainzngs/mbxml.py | 817 -- lib/musicbrainzngs/musicbrainz.py | 1312 --- lib/musicbrainzngs/util.py | 44 - lib/oauthlib/__init__.py | 25 - lib/oauthlib/common.py | 443 - lib/oauthlib/oauth1/__init__.py | 19 - lib/oauthlib/oauth1/rfc5849/__init__.py | 327 - .../oauth1/rfc5849/endpoints/__init__.py | 9 - .../oauth1/rfc5849/endpoints/access_token.py | 215 - .../oauth1/rfc5849/endpoints/authorization.py | 161 - lib/oauthlib/oauth1/rfc5849/endpoints/base.py | 216 - .../rfc5849/endpoints/pre_configured.py | 14 - .../oauth1/rfc5849/endpoints/request_token.py | 209 - .../oauth1/rfc5849/endpoints/resource.py | 165 - .../rfc5849/endpoints/signature_only.py | 79 - lib/oauthlib/oauth1/rfc5849/errors.py | 79 - lib/oauthlib/oauth1/rfc5849/parameters.py | 137 - .../oauth1/rfc5849/request_validator.py | 823 -- lib/oauthlib/oauth1/rfc5849/signature.py | 611 -- lib/oauthlib/oauth1/rfc5849/utils.py | 89 - lib/oauthlib/oauth2/__init__.py | 34 - lib/oauthlib/oauth2/rfc6749/__init__.py | 66 - .../oauth2/rfc6749/clients/__init__.py | 16 - .../rfc6749/clients/backend_application.py | 61 - lib/oauthlib/oauth2/rfc6749/clients/base.py | 490 -- .../rfc6749/clients/legacy_application.py | 73 - .../rfc6749/clients/mobile_application.py | 173 - .../rfc6749/clients/service_application.py | 176 - .../oauth2/rfc6749/clients/web_application.py | 176 - .../oauth2/rfc6749/endpoints/__init__.py | 19 - .../oauth2/rfc6749/endpoints/authorization.py | 114 - lib/oauthlib/oauth2/rfc6749/endpoints/base.py | 65 - .../rfc6749/endpoints/pre_configured.py | 209 - .../oauth2/rfc6749/endpoints/resource.py | 87 - .../oauth2/rfc6749/endpoints/revocation.py | 130 - .../oauth2/rfc6749/endpoints/token.py | 100 - lib/oauthlib/oauth2/rfc6749/errors.py | 271 - .../oauth2/rfc6749/grant_types/__init__.py | 12 - .../rfc6749/grant_types/authorization_code.py | 402 - .../oauth2/rfc6749/grant_types/base.py | 41 - .../rfc6749/grant_types/client_credentials.py | 112 - .../oauth2/rfc6749/grant_types/implicit.py | 345 - .../rfc6749/grant_types/refresh_token.py | 116 - .../resource_owner_password_credentials.py | 194 - lib/oauthlib/oauth2/rfc6749/parameters.py | 406 - .../oauth2/rfc6749/request_validator.py | 465 - lib/oauthlib/oauth2/rfc6749/tokens.py | 298 - lib/oauthlib/oauth2/rfc6749/utils.py | 92 - lib/oauthlib/signals.py | 41 - lib/oauthlib/uri_validate.py | 215 - lib/osxnotify/__init__.py | 0 lib/osxnotify/appIcon.icns | Bin 860162 -> 0 bytes lib/osxnotify/registerapp.py | 133 - lib/paho/__init__.py | 0 lib/paho/mqtt/__init__.py | 1 - lib/paho/mqtt/client.py | 2337 ----- lib/paho/mqtt/publish.py | 217 - lib/past/__init__.py | 90 - lib/past/builtins/__init__.py | 72 - lib/past/builtins/misc.py | 94 - lib/past/builtins/noniterators.py | 272 - lib/past/translation/__init__.py | 485 -- lib/past/types/__init__.py | 29 - lib/past/types/basestring.py | 39 - lib/past/types/olddict.py | 96 - lib/past/types/oldstr.py | 135 - lib/past/utils/__init__.py | 97 - lib/plexapi/__init__.py | 51 - lib/plexapi/alert.py | 87 - lib/plexapi/audio.py | 380 - lib/plexapi/base.py | 825 -- lib/plexapi/client.py | 567 -- lib/plexapi/compat.py | 118 - lib/plexapi/config.py | 64 - lib/plexapi/exceptions.py | 31 - lib/plexapi/gdm.py | 148 - lib/plexapi/library.py | 1350 --- lib/plexapi/media.py | 929 -- lib/plexapi/myplex.py | 1296 --- lib/plexapi/photo.py | 203 - lib/plexapi/playlist.py | 308 - lib/plexapi/playqueue.py | 75 - lib/plexapi/server.py | 653 -- lib/plexapi/settings.py | 160 - lib/plexapi/sonos.py | 116 - lib/plexapi/sync.py | 312 - lib/plexapi/utils.py | 406 - lib/plexapi/video.py | 782 -- lib/portend.py | 222 - lib/profilehooks.py | 852 -- lib/pytz/__init__.py | 1551 ---- lib/pytz/exceptions.py | 48 - lib/pytz/lazy.py | 172 - lib/pytz/reference.py | 140 - lib/pytz/tests/test_docs.py | 34 - lib/pytz/tests/test_lazy.py | 315 - lib/pytz/tests/test_tzinfo.py | 869 -- lib/pytz/tzfile.py | 134 - lib/pytz/tzinfo.py | 577 -- lib/pytz/zoneinfo/Africa/Abidjan | Bin 148 -> 0 bytes lib/pytz/zoneinfo/Africa/Accra | Bin 816 -> 0 bytes lib/pytz/zoneinfo/Africa/Addis_Ababa | Bin 251 -> 0 bytes lib/pytz/zoneinfo/Africa/Algiers | Bin 735 -> 0 bytes lib/pytz/zoneinfo/Africa/Asmara | Bin 251 -> 0 bytes lib/pytz/zoneinfo/Africa/Asmera | Bin 251 -> 0 bytes lib/pytz/zoneinfo/Africa/Bamako | Bin 148 -> 0 bytes lib/pytz/zoneinfo/Africa/Bangui | Bin 149 -> 0 bytes lib/pytz/zoneinfo/Africa/Banjul | Bin 148 -> 0 bytes lib/pytz/zoneinfo/Africa/Bissau | Bin 194 -> 0 bytes lib/pytz/zoneinfo/Africa/Blantyre | Bin 149 -> 0 bytes lib/pytz/zoneinfo/Africa/Brazzaville | Bin 149 -> 0 bytes lib/pytz/zoneinfo/Africa/Bujumbura | Bin 149 -> 0 bytes lib/pytz/zoneinfo/Africa/Cairo | Bin 1955 -> 0 bytes lib/pytz/zoneinfo/Africa/Casablanca | Bin 2429 -> 0 bytes lib/pytz/zoneinfo/Africa/Ceuta | Bin 2036 -> 0 bytes lib/pytz/zoneinfo/Africa/Conakry | Bin 148 -> 0 bytes lib/pytz/zoneinfo/Africa/Dakar | Bin 148 -> 0 bytes lib/pytz/zoneinfo/Africa/Dar_es_Salaam | Bin 251 -> 0 bytes lib/pytz/zoneinfo/Africa/Djibouti | Bin 251 -> 0 bytes lib/pytz/zoneinfo/Africa/Douala | Bin 149 -> 0 bytes lib/pytz/zoneinfo/Africa/El_Aaiun | Bin 2295 -> 0 bytes lib/pytz/zoneinfo/Africa/Freetown | Bin 148 -> 0 bytes lib/pytz/zoneinfo/Africa/Gaborone | Bin 149 -> 0 bytes lib/pytz/zoneinfo/Africa/Harare | Bin 149 -> 0 bytes lib/pytz/zoneinfo/Africa/Johannesburg | Bin 246 -> 0 bytes lib/pytz/zoneinfo/Africa/Juba | Bin 653 -> 0 bytes lib/pytz/zoneinfo/Africa/Kampala | Bin 251 -> 0 bytes lib/pytz/zoneinfo/Africa/Khartoum | Bin 679 -> 0 bytes lib/pytz/zoneinfo/Africa/Kigali | Bin 149 -> 0 bytes lib/pytz/zoneinfo/Africa/Kinshasa | Bin 149 -> 0 bytes lib/pytz/zoneinfo/Africa/Lagos | Bin 149 -> 0 bytes lib/pytz/zoneinfo/Africa/Libreville | Bin 149 -> 0 bytes lib/pytz/zoneinfo/Africa/Lome | Bin 148 -> 0 bytes lib/pytz/zoneinfo/Africa/Luanda | Bin 149 -> 0 bytes lib/pytz/zoneinfo/Africa/Lubumbashi | Bin 149 -> 0 bytes lib/pytz/zoneinfo/Africa/Lusaka | Bin 149 -> 0 bytes lib/pytz/zoneinfo/Africa/Malabo | Bin 149 -> 0 bytes lib/pytz/zoneinfo/Africa/Maputo | Bin 149 -> 0 bytes lib/pytz/zoneinfo/Africa/Maseru | Bin 246 -> 0 bytes lib/pytz/zoneinfo/Africa/Mbabane | Bin 246 -> 0 bytes lib/pytz/zoneinfo/Africa/Mogadishu | Bin 251 -> 0 bytes lib/pytz/zoneinfo/Africa/Monrovia | Bin 208 -> 0 bytes lib/pytz/zoneinfo/Africa/Nairobi | Bin 251 -> 0 bytes lib/pytz/zoneinfo/Africa/Ndjamena | Bin 199 -> 0 bytes lib/pytz/zoneinfo/Africa/Niamey | Bin 149 -> 0 bytes lib/pytz/zoneinfo/Africa/Nouakchott | Bin 148 -> 0 bytes lib/pytz/zoneinfo/Africa/Ouagadougou | Bin 148 -> 0 bytes lib/pytz/zoneinfo/Africa/Porto-Novo | Bin 149 -> 0 bytes lib/pytz/zoneinfo/Africa/Sao_Tome | Bin 254 -> 0 bytes lib/pytz/zoneinfo/Africa/Timbuktu | Bin 148 -> 0 bytes lib/pytz/zoneinfo/Africa/Tripoli | Bin 625 -> 0 bytes lib/pytz/zoneinfo/Africa/Tunis | Bin 689 -> 0 bytes lib/pytz/zoneinfo/Africa/Windhoek | Bin 955 -> 0 bytes lib/pytz/zoneinfo/America/Adak | Bin 2356 -> 0 bytes lib/pytz/zoneinfo/America/Anchorage | Bin 2371 -> 0 bytes lib/pytz/zoneinfo/America/Anguilla | Bin 148 -> 0 bytes lib/pytz/zoneinfo/America/Antigua | Bin 148 -> 0 bytes lib/pytz/zoneinfo/America/Araguaina | Bin 884 -> 0 bytes .../zoneinfo/America/Argentina/Buenos_Aires | Bin 1076 -> 0 bytes lib/pytz/zoneinfo/America/Argentina/Catamarca | Bin 1076 -> 0 bytes .../zoneinfo/America/Argentina/ComodRivadavia | Bin 1076 -> 0 bytes lib/pytz/zoneinfo/America/Argentina/Cordoba | Bin 1076 -> 0 bytes lib/pytz/zoneinfo/America/Argentina/Jujuy | Bin 1048 -> 0 bytes lib/pytz/zoneinfo/America/Argentina/La_Rioja | Bin 1090 -> 0 bytes lib/pytz/zoneinfo/America/Argentina/Mendoza | Bin 1076 -> 0 bytes .../zoneinfo/America/Argentina/Rio_Gallegos | Bin 1076 -> 0 bytes lib/pytz/zoneinfo/America/Argentina/Salta | Bin 1048 -> 0 bytes lib/pytz/zoneinfo/America/Argentina/San_Juan | Bin 1090 -> 0 bytes lib/pytz/zoneinfo/America/Argentina/San_Luis | Bin 1102 -> 0 bytes lib/pytz/zoneinfo/America/Argentina/Tucuman | Bin 1104 -> 0 bytes lib/pytz/zoneinfo/America/Argentina/Ushuaia | Bin 1076 -> 0 bytes lib/pytz/zoneinfo/America/Aruba | Bin 186 -> 0 bytes lib/pytz/zoneinfo/America/Asuncion | Bin 2044 -> 0 bytes lib/pytz/zoneinfo/America/Atikokan | Bin 336 -> 0 bytes lib/pytz/zoneinfo/America/Atka | Bin 2356 -> 0 bytes lib/pytz/zoneinfo/America/Bahia | Bin 1024 -> 0 bytes lib/pytz/zoneinfo/America/Bahia_Banderas | Bin 1546 -> 0 bytes lib/pytz/zoneinfo/America/Barbados | Bin 314 -> 0 bytes lib/pytz/zoneinfo/America/Belem | Bin 576 -> 0 bytes lib/pytz/zoneinfo/America/Belize | Bin 948 -> 0 bytes lib/pytz/zoneinfo/America/Blanc-Sablon | Bin 298 -> 0 bytes lib/pytz/zoneinfo/America/Boa_Vista | Bin 632 -> 0 bytes lib/pytz/zoneinfo/America/Bogota | Bin 246 -> 0 bytes lib/pytz/zoneinfo/America/Boise | Bin 2394 -> 0 bytes lib/pytz/zoneinfo/America/Buenos_Aires | Bin 1076 -> 0 bytes lib/pytz/zoneinfo/America/Cambridge_Bay | Bin 2084 -> 0 bytes lib/pytz/zoneinfo/America/Campo_Grande | Bin 1444 -> 0 bytes lib/pytz/zoneinfo/America/Cancun | Bin 782 -> 0 bytes lib/pytz/zoneinfo/America/Caracas | Bin 264 -> 0 bytes lib/pytz/zoneinfo/America/Catamarca | Bin 1076 -> 0 bytes lib/pytz/zoneinfo/America/Cayenne | Bin 198 -> 0 bytes lib/pytz/zoneinfo/America/Cayman | Bin 182 -> 0 bytes lib/pytz/zoneinfo/America/Chicago | Bin 3576 -> 0 bytes lib/pytz/zoneinfo/America/Chihuahua | Bin 1484 -> 0 bytes lib/pytz/zoneinfo/America/Coral_Harbour | Bin 336 -> 0 bytes lib/pytz/zoneinfo/America/Cordoba | Bin 1076 -> 0 bytes lib/pytz/zoneinfo/America/Costa_Rica | Bin 316 -> 0 bytes lib/pytz/zoneinfo/America/Creston | Bin 208 -> 0 bytes lib/pytz/zoneinfo/America/Cuiaba | Bin 1416 -> 0 bytes lib/pytz/zoneinfo/America/Curacao | Bin 186 -> 0 bytes lib/pytz/zoneinfo/America/Danmarkshavn | Bin 698 -> 0 bytes lib/pytz/zoneinfo/America/Dawson | Bin 2084 -> 0 bytes lib/pytz/zoneinfo/America/Dawson_Creek | Bin 1050 -> 0 bytes lib/pytz/zoneinfo/America/Denver | Bin 2444 -> 0 bytes lib/pytz/zoneinfo/America/Detroit | Bin 2230 -> 0 bytes lib/pytz/zoneinfo/America/Dominica | Bin 148 -> 0 bytes lib/pytz/zoneinfo/America/Edmonton | Bin 2332 -> 0 bytes lib/pytz/zoneinfo/America/Eirunepe | Bin 656 -> 0 bytes lib/pytz/zoneinfo/America/El_Salvador | Bin 224 -> 0 bytes lib/pytz/zoneinfo/America/Ensenada | Bin 2342 -> 0 bytes lib/pytz/zoneinfo/America/Fort_Nelson | Bin 2240 -> 0 bytes lib/pytz/zoneinfo/America/Fort_Wayne | Bin 1666 -> 0 bytes lib/pytz/zoneinfo/America/Fortaleza | Bin 716 -> 0 bytes lib/pytz/zoneinfo/America/Glace_Bay | Bin 2192 -> 0 bytes lib/pytz/zoneinfo/America/Godthab | Bin 1878 -> 0 bytes lib/pytz/zoneinfo/America/Goose_Bay | Bin 3210 -> 0 bytes lib/pytz/zoneinfo/America/Grand_Turk | Bin 1848 -> 0 bytes lib/pytz/zoneinfo/America/Grenada | Bin 148 -> 0 bytes lib/pytz/zoneinfo/America/Guadeloupe | Bin 148 -> 0 bytes lib/pytz/zoneinfo/America/Guatemala | Bin 280 -> 0 bytes lib/pytz/zoneinfo/America/Guayaquil | Bin 246 -> 0 bytes lib/pytz/zoneinfo/America/Guyana | Bin 236 -> 0 bytes lib/pytz/zoneinfo/America/Halifax | Bin 3424 -> 0 bytes lib/pytz/zoneinfo/America/Havana | Bin 2416 -> 0 bytes lib/pytz/zoneinfo/America/Hermosillo | Bin 416 -> 0 bytes .../zoneinfo/America/Indiana/Indianapolis | Bin 1666 -> 0 bytes lib/pytz/zoneinfo/America/Indiana/Knox | Bin 2428 -> 0 bytes lib/pytz/zoneinfo/America/Indiana/Marengo | Bin 1722 -> 0 bytes lib/pytz/zoneinfo/America/Indiana/Petersburg | Bin 1904 -> 0 bytes lib/pytz/zoneinfo/America/Indiana/Tell_City | Bin 1684 -> 0 bytes lib/pytz/zoneinfo/America/Indiana/Vevay | Bin 1414 -> 0 bytes lib/pytz/zoneinfo/America/Indiana/Vincennes | Bin 1694 -> 0 bytes lib/pytz/zoneinfo/America/Indiana/Winamac | Bin 1778 -> 0 bytes lib/pytz/zoneinfo/America/Indianapolis | Bin 1666 -> 0 bytes lib/pytz/zoneinfo/America/Inuvik | Bin 1894 -> 0 bytes lib/pytz/zoneinfo/America/Iqaluit | Bin 2032 -> 0 bytes lib/pytz/zoneinfo/America/Jamaica | Bin 482 -> 0 bytes lib/pytz/zoneinfo/America/Jujuy | Bin 1048 -> 0 bytes lib/pytz/zoneinfo/America/Juneau | Bin 2353 -> 0 bytes lib/pytz/zoneinfo/America/Kentucky/Louisville | Bin 2772 -> 0 bytes lib/pytz/zoneinfo/America/Kentucky/Monticello | Bin 2352 -> 0 bytes lib/pytz/zoneinfo/America/Knox_IN | Bin 2428 -> 0 bytes lib/pytz/zoneinfo/America/Kralendijk | Bin 186 -> 0 bytes lib/pytz/zoneinfo/America/La_Paz | Bin 232 -> 0 bytes lib/pytz/zoneinfo/America/Lima | Bin 406 -> 0 bytes lib/pytz/zoneinfo/America/Los_Angeles | Bin 2836 -> 0 bytes lib/pytz/zoneinfo/America/Louisville | Bin 2772 -> 0 bytes lib/pytz/zoneinfo/America/Lower_Princes | Bin 186 -> 0 bytes lib/pytz/zoneinfo/America/Maceio | Bin 744 -> 0 bytes lib/pytz/zoneinfo/America/Managua | Bin 430 -> 0 bytes lib/pytz/zoneinfo/America/Manaus | Bin 604 -> 0 bytes lib/pytz/zoneinfo/America/Marigot | Bin 148 -> 0 bytes lib/pytz/zoneinfo/America/Martinique | Bin 232 -> 0 bytes lib/pytz/zoneinfo/America/Matamoros | Bin 1390 -> 0 bytes lib/pytz/zoneinfo/America/Mazatlan | Bin 1526 -> 0 bytes lib/pytz/zoneinfo/America/Mendoza | Bin 1076 -> 0 bytes lib/pytz/zoneinfo/America/Menominee | Bin 2274 -> 0 bytes lib/pytz/zoneinfo/America/Merida | Bin 1422 -> 0 bytes lib/pytz/zoneinfo/America/Metlakatla | Bin 1423 -> 0 bytes lib/pytz/zoneinfo/America/Mexico_City | Bin 1584 -> 0 bytes lib/pytz/zoneinfo/America/Miquelon | Bin 1666 -> 0 bytes lib/pytz/zoneinfo/America/Moncton | Bin 3154 -> 0 bytes lib/pytz/zoneinfo/America/Monterrey | Bin 1390 -> 0 bytes lib/pytz/zoneinfo/America/Montevideo | Bin 1510 -> 0 bytes lib/pytz/zoneinfo/America/Montreal | Bin 3494 -> 0 bytes lib/pytz/zoneinfo/America/Montserrat | Bin 148 -> 0 bytes lib/pytz/zoneinfo/America/Nassau | Bin 2258 -> 0 bytes lib/pytz/zoneinfo/America/New_York | Bin 3536 -> 0 bytes lib/pytz/zoneinfo/America/Nipigon | Bin 2122 -> 0 bytes lib/pytz/zoneinfo/America/Nome | Bin 2367 -> 0 bytes lib/pytz/zoneinfo/America/Noronha | Bin 716 -> 0 bytes lib/pytz/zoneinfo/America/North_Dakota/Beulah | Bin 2380 -> 0 bytes lib/pytz/zoneinfo/America/North_Dakota/Center | Bin 2380 -> 0 bytes .../zoneinfo/America/North_Dakota/New_Salem | Bin 2380 -> 0 bytes lib/pytz/zoneinfo/America/Ojinaga | Bin 1484 -> 0 bytes lib/pytz/zoneinfo/America/Panama | Bin 182 -> 0 bytes lib/pytz/zoneinfo/America/Pangnirtung | Bin 2094 -> 0 bytes lib/pytz/zoneinfo/America/Paramaribo | Bin 262 -> 0 bytes lib/pytz/zoneinfo/America/Phoenix | Bin 328 -> 0 bytes lib/pytz/zoneinfo/America/Port-au-Prince | Bin 1434 -> 0 bytes lib/pytz/zoneinfo/America/Port_of_Spain | Bin 148 -> 0 bytes lib/pytz/zoneinfo/America/Porto_Acre | Bin 628 -> 0 bytes lib/pytz/zoneinfo/America/Porto_Velho | Bin 576 -> 0 bytes lib/pytz/zoneinfo/America/Puerto_Rico | Bin 246 -> 0 bytes lib/pytz/zoneinfo/America/Punta_Arenas | Bin 1902 -> 0 bytes lib/pytz/zoneinfo/America/Rainy_River | Bin 2122 -> 0 bytes lib/pytz/zoneinfo/America/Rankin_Inlet | Bin 1892 -> 0 bytes lib/pytz/zoneinfo/America/Recife | Bin 716 -> 0 bytes lib/pytz/zoneinfo/America/Regina | Bin 980 -> 0 bytes lib/pytz/zoneinfo/America/Resolute | Bin 1892 -> 0 bytes lib/pytz/zoneinfo/America/Rio_Branco | Bin 628 -> 0 bytes lib/pytz/zoneinfo/America/Rosario | Bin 1076 -> 0 bytes lib/pytz/zoneinfo/America/Santa_Isabel | Bin 2342 -> 0 bytes lib/pytz/zoneinfo/America/Santarem | Bin 602 -> 0 bytes lib/pytz/zoneinfo/America/Santiago | Bin 2529 -> 0 bytes lib/pytz/zoneinfo/America/Santo_Domingo | Bin 458 -> 0 bytes lib/pytz/zoneinfo/America/Sao_Paulo | Bin 1444 -> 0 bytes lib/pytz/zoneinfo/America/Scoresbysund | Bin 1916 -> 0 bytes lib/pytz/zoneinfo/America/Shiprock | Bin 2444 -> 0 bytes lib/pytz/zoneinfo/America/Sitka | Bin 2329 -> 0 bytes lib/pytz/zoneinfo/America/St_Barthelemy | Bin 148 -> 0 bytes lib/pytz/zoneinfo/America/St_Johns | Bin 3655 -> 0 bytes lib/pytz/zoneinfo/America/St_Kitts | Bin 148 -> 0 bytes lib/pytz/zoneinfo/America/St_Lucia | Bin 148 -> 0 bytes lib/pytz/zoneinfo/America/St_Thomas | Bin 148 -> 0 bytes lib/pytz/zoneinfo/America/St_Vincent | Bin 148 -> 0 bytes lib/pytz/zoneinfo/America/Swift_Current | Bin 560 -> 0 bytes lib/pytz/zoneinfo/America/Tegucigalpa | Bin 252 -> 0 bytes lib/pytz/zoneinfo/America/Thule | Bin 1502 -> 0 bytes lib/pytz/zoneinfo/America/Thunder_Bay | Bin 2202 -> 0 bytes lib/pytz/zoneinfo/America/Tijuana | Bin 2342 -> 0 bytes lib/pytz/zoneinfo/America/Toronto | Bin 3494 -> 0 bytes lib/pytz/zoneinfo/America/Tortola | Bin 148 -> 0 bytes lib/pytz/zoneinfo/America/Vancouver | Bin 2892 -> 0 bytes lib/pytz/zoneinfo/America/Virgin | Bin 148 -> 0 bytes lib/pytz/zoneinfo/America/Whitehorse | Bin 2084 -> 0 bytes lib/pytz/zoneinfo/America/Winnipeg | Bin 2868 -> 0 bytes lib/pytz/zoneinfo/America/Yakutat | Bin 2305 -> 0 bytes lib/pytz/zoneinfo/America/Yellowknife | Bin 1966 -> 0 bytes lib/pytz/zoneinfo/Antarctica/Casey | Bin 297 -> 0 bytes lib/pytz/zoneinfo/Antarctica/Davis | Bin 297 -> 0 bytes lib/pytz/zoneinfo/Antarctica/DumontDUrville | Bin 194 -> 0 bytes lib/pytz/zoneinfo/Antarctica/Macquarie | Bin 1520 -> 0 bytes lib/pytz/zoneinfo/Antarctica/Mawson | Bin 199 -> 0 bytes lib/pytz/zoneinfo/Antarctica/McMurdo | Bin 2437 -> 0 bytes lib/pytz/zoneinfo/Antarctica/Palmer | Bin 1418 -> 0 bytes lib/pytz/zoneinfo/Antarctica/Rothera | Bin 164 -> 0 bytes lib/pytz/zoneinfo/Antarctica/South_Pole | Bin 2437 -> 0 bytes lib/pytz/zoneinfo/Antarctica/Syowa | Bin 165 -> 0 bytes lib/pytz/zoneinfo/Antarctica/Troll | Bin 1162 -> 0 bytes lib/pytz/zoneinfo/Antarctica/Vostok | Bin 165 -> 0 bytes lib/pytz/zoneinfo/Arctic/Longyearbyen | Bin 2228 -> 0 bytes lib/pytz/zoneinfo/Asia/Aden | Bin 165 -> 0 bytes lib/pytz/zoneinfo/Asia/Almaty | Bin 997 -> 0 bytes lib/pytz/zoneinfo/Asia/Amman | Bin 1853 -> 0 bytes lib/pytz/zoneinfo/Asia/Anadyr | Bin 1188 -> 0 bytes lib/pytz/zoneinfo/Asia/Aqtau | Bin 983 -> 0 bytes lib/pytz/zoneinfo/Asia/Aqtobe | Bin 1011 -> 0 bytes lib/pytz/zoneinfo/Asia/Ashgabat | Bin 619 -> 0 bytes lib/pytz/zoneinfo/Asia/Ashkhabad | Bin 619 -> 0 bytes lib/pytz/zoneinfo/Asia/Atyrau | Bin 991 -> 0 bytes lib/pytz/zoneinfo/Asia/Baghdad | Bin 983 -> 0 bytes lib/pytz/zoneinfo/Asia/Bahrain | Bin 199 -> 0 bytes lib/pytz/zoneinfo/Asia/Baku | Bin 1227 -> 0 bytes lib/pytz/zoneinfo/Asia/Bangkok | Bin 199 -> 0 bytes lib/pytz/zoneinfo/Asia/Barnaul | Bin 1221 -> 0 bytes lib/pytz/zoneinfo/Asia/Beirut | Bin 2154 -> 0 bytes lib/pytz/zoneinfo/Asia/Bishkek | Bin 983 -> 0 bytes lib/pytz/zoneinfo/Asia/Brunei | Bin 203 -> 0 bytes lib/pytz/zoneinfo/Asia/Calcutta | Bin 285 -> 0 bytes lib/pytz/zoneinfo/Asia/Chita | Bin 1221 -> 0 bytes lib/pytz/zoneinfo/Asia/Choibalsan | Bin 949 -> 0 bytes lib/pytz/zoneinfo/Asia/Chongqing | Bin 533 -> 0 bytes lib/pytz/zoneinfo/Asia/Chungking | Bin 533 -> 0 bytes lib/pytz/zoneinfo/Asia/Colombo | Bin 372 -> 0 bytes lib/pytz/zoneinfo/Asia/Dacca | Bin 337 -> 0 bytes lib/pytz/zoneinfo/Asia/Damascus | Bin 2294 -> 0 bytes lib/pytz/zoneinfo/Asia/Dhaka | Bin 337 -> 0 bytes lib/pytz/zoneinfo/Asia/Dili | Bin 227 -> 0 bytes lib/pytz/zoneinfo/Asia/Dubai | Bin 165 -> 0 bytes lib/pytz/zoneinfo/Asia/Dushanbe | Bin 591 -> 0 bytes lib/pytz/zoneinfo/Asia/Famagusta | Bin 2028 -> 0 bytes lib/pytz/zoneinfo/Asia/Gaza | Bin 2316 -> 0 bytes lib/pytz/zoneinfo/Asia/Harbin | Bin 533 -> 0 bytes lib/pytz/zoneinfo/Asia/Hebron | Bin 2344 -> 0 bytes lib/pytz/zoneinfo/Asia/Ho_Chi_Minh | Bin 351 -> 0 bytes lib/pytz/zoneinfo/Asia/Hong_Kong | Bin 1203 -> 0 bytes lib/pytz/zoneinfo/Asia/Hovd | Bin 891 -> 0 bytes lib/pytz/zoneinfo/Asia/Irkutsk | Bin 1243 -> 0 bytes lib/pytz/zoneinfo/Asia/Istanbul | Bin 1947 -> 0 bytes lib/pytz/zoneinfo/Asia/Jakarta | Bin 355 -> 0 bytes lib/pytz/zoneinfo/Asia/Jayapura | Bin 221 -> 0 bytes lib/pytz/zoneinfo/Asia/Jerusalem | Bin 2288 -> 0 bytes lib/pytz/zoneinfo/Asia/Kabul | Bin 208 -> 0 bytes lib/pytz/zoneinfo/Asia/Kamchatka | Bin 1166 -> 0 bytes lib/pytz/zoneinfo/Asia/Karachi | Bin 379 -> 0 bytes lib/pytz/zoneinfo/Asia/Kashgar | Bin 165 -> 0 bytes lib/pytz/zoneinfo/Asia/Kathmandu | Bin 212 -> 0 bytes lib/pytz/zoneinfo/Asia/Katmandu | Bin 212 -> 0 bytes lib/pytz/zoneinfo/Asia/Khandyga | Bin 1271 -> 0 bytes lib/pytz/zoneinfo/Asia/Kolkata | Bin 285 -> 0 bytes lib/pytz/zoneinfo/Asia/Krasnoyarsk | Bin 1207 -> 0 bytes lib/pytz/zoneinfo/Asia/Kuala_Lumpur | Bin 383 -> 0 bytes lib/pytz/zoneinfo/Asia/Kuching | Bin 483 -> 0 bytes lib/pytz/zoneinfo/Asia/Kuwait | Bin 165 -> 0 bytes lib/pytz/zoneinfo/Asia/Macao | Bin 1227 -> 0 bytes lib/pytz/zoneinfo/Asia/Macau | Bin 1227 -> 0 bytes lib/pytz/zoneinfo/Asia/Magadan | Bin 1222 -> 0 bytes lib/pytz/zoneinfo/Asia/Makassar | Bin 254 -> 0 bytes lib/pytz/zoneinfo/Asia/Manila | Bin 328 -> 0 bytes lib/pytz/zoneinfo/Asia/Muscat | Bin 165 -> 0 bytes lib/pytz/zoneinfo/Asia/Nicosia | Bin 2002 -> 0 bytes lib/pytz/zoneinfo/Asia/Novokuznetsk | Bin 1165 -> 0 bytes lib/pytz/zoneinfo/Asia/Novosibirsk | Bin 1221 -> 0 bytes lib/pytz/zoneinfo/Asia/Omsk | Bin 1207 -> 0 bytes lib/pytz/zoneinfo/Asia/Oral | Bin 1005 -> 0 bytes lib/pytz/zoneinfo/Asia/Phnom_Penh | Bin 199 -> 0 bytes lib/pytz/zoneinfo/Asia/Pontianak | Bin 353 -> 0 bytes lib/pytz/zoneinfo/Asia/Pyongyang | Bin 237 -> 0 bytes lib/pytz/zoneinfo/Asia/Qatar | Bin 199 -> 0 bytes lib/pytz/zoneinfo/Asia/Qostanay | Bin 1011 -> 0 bytes lib/pytz/zoneinfo/Asia/Qyzylorda | Bin 1025 -> 0 bytes lib/pytz/zoneinfo/Asia/Rangoon | Bin 268 -> 0 bytes lib/pytz/zoneinfo/Asia/Riyadh | Bin 165 -> 0 bytes lib/pytz/zoneinfo/Asia/Saigon | Bin 351 -> 0 bytes lib/pytz/zoneinfo/Asia/Sakhalin | Bin 1202 -> 0 bytes lib/pytz/zoneinfo/Asia/Samarkand | Bin 577 -> 0 bytes lib/pytz/zoneinfo/Asia/Seoul | Bin 617 -> 0 bytes lib/pytz/zoneinfo/Asia/Shanghai | Bin 533 -> 0 bytes lib/pytz/zoneinfo/Asia/Singapore | Bin 383 -> 0 bytes lib/pytz/zoneinfo/Asia/Srednekolymsk | Bin 1208 -> 0 bytes lib/pytz/zoneinfo/Asia/Taipei | Bin 761 -> 0 bytes lib/pytz/zoneinfo/Asia/Tashkent | Bin 591 -> 0 bytes lib/pytz/zoneinfo/Asia/Tbilisi | Bin 1035 -> 0 bytes lib/pytz/zoneinfo/Asia/Tehran | Bin 2582 -> 0 bytes lib/pytz/zoneinfo/Asia/Tel_Aviv | Bin 2288 -> 0 bytes lib/pytz/zoneinfo/Asia/Thimbu | Bin 203 -> 0 bytes lib/pytz/zoneinfo/Asia/Thimphu | Bin 203 -> 0 bytes lib/pytz/zoneinfo/Asia/Tokyo | Bin 309 -> 0 bytes lib/pytz/zoneinfo/Asia/Tomsk | Bin 1221 -> 0 bytes lib/pytz/zoneinfo/Asia/Ujung_Pandang | Bin 254 -> 0 bytes lib/pytz/zoneinfo/Asia/Ulaanbaatar | Bin 891 -> 0 bytes lib/pytz/zoneinfo/Asia/Ulan_Bator | Bin 891 -> 0 bytes lib/pytz/zoneinfo/Asia/Urumqi | Bin 165 -> 0 bytes lib/pytz/zoneinfo/Asia/Ust-Nera | Bin 1252 -> 0 bytes lib/pytz/zoneinfo/Asia/Vientiane | Bin 199 -> 0 bytes lib/pytz/zoneinfo/Asia/Vladivostok | Bin 1208 -> 0 bytes lib/pytz/zoneinfo/Asia/Yakutsk | Bin 1207 -> 0 bytes lib/pytz/zoneinfo/Asia/Yangon | Bin 268 -> 0 bytes lib/pytz/zoneinfo/Asia/Yekaterinburg | Bin 1243 -> 0 bytes lib/pytz/zoneinfo/Asia/Yerevan | Bin 1151 -> 0 bytes lib/pytz/zoneinfo/Atlantic/Azores | Bin 3484 -> 0 bytes lib/pytz/zoneinfo/Atlantic/Bermuda | Bin 1978 -> 0 bytes lib/pytz/zoneinfo/Atlantic/Canary | Bin 1897 -> 0 bytes lib/pytz/zoneinfo/Atlantic/Cape_Verde | Bin 270 -> 0 bytes lib/pytz/zoneinfo/Atlantic/Faeroe | Bin 1815 -> 0 bytes lib/pytz/zoneinfo/Atlantic/Faroe | Bin 1815 -> 0 bytes lib/pytz/zoneinfo/Atlantic/Jan_Mayen | Bin 2228 -> 0 bytes lib/pytz/zoneinfo/Atlantic/Madeira | Bin 3475 -> 0 bytes lib/pytz/zoneinfo/Atlantic/Reykjavik | Bin 1162 -> 0 bytes lib/pytz/zoneinfo/Atlantic/South_Georgia | Bin 164 -> 0 bytes lib/pytz/zoneinfo/Atlantic/St_Helena | Bin 148 -> 0 bytes lib/pytz/zoneinfo/Atlantic/Stanley | Bin 1214 -> 0 bytes lib/pytz/zoneinfo/Australia/ACT | Bin 2204 -> 0 bytes lib/pytz/zoneinfo/Australia/Adelaide | Bin 2222 -> 0 bytes lib/pytz/zoneinfo/Australia/Brisbane | Bin 433 -> 0 bytes lib/pytz/zoneinfo/Australia/Broken_Hill | Bin 2243 -> 0 bytes lib/pytz/zoneinfo/Australia/Canberra | Bin 2204 -> 0 bytes lib/pytz/zoneinfo/Australia/Currie | Bin 2204 -> 0 bytes lib/pytz/zoneinfo/Australia/Darwin | Bin 304 -> 0 bytes lib/pytz/zoneinfo/Australia/Eucla | Bin 484 -> 0 bytes lib/pytz/zoneinfo/Australia/Hobart | Bin 2316 -> 0 bytes lib/pytz/zoneinfo/Australia/LHI | Bin 1860 -> 0 bytes lib/pytz/zoneinfo/Australia/Lindeman | Bin 489 -> 0 bytes lib/pytz/zoneinfo/Australia/Lord_Howe | Bin 1860 -> 0 bytes lib/pytz/zoneinfo/Australia/Melbourne | Bin 2204 -> 0 bytes lib/pytz/zoneinfo/Australia/NSW | Bin 2204 -> 0 bytes lib/pytz/zoneinfo/Australia/North | Bin 304 -> 0 bytes lib/pytz/zoneinfo/Australia/Perth | Bin 460 -> 0 bytes lib/pytz/zoneinfo/Australia/Queensland | Bin 433 -> 0 bytes lib/pytz/zoneinfo/Australia/South | Bin 2222 -> 0 bytes lib/pytz/zoneinfo/Australia/Sydney | Bin 2204 -> 0 bytes lib/pytz/zoneinfo/Australia/Tasmania | Bin 2316 -> 0 bytes lib/pytz/zoneinfo/Australia/Victoria | Bin 2204 -> 0 bytes lib/pytz/zoneinfo/Australia/West | Bin 460 -> 0 bytes lib/pytz/zoneinfo/Australia/Yancowinna | Bin 2243 -> 0 bytes lib/pytz/zoneinfo/Brazil/Acre | Bin 628 -> 0 bytes lib/pytz/zoneinfo/Brazil/DeNoronha | Bin 716 -> 0 bytes lib/pytz/zoneinfo/Brazil/East | Bin 1444 -> 0 bytes lib/pytz/zoneinfo/Brazil/West | Bin 604 -> 0 bytes lib/pytz/zoneinfo/CET | Bin 2094 -> 0 bytes lib/pytz/zoneinfo/CST6CDT | Bin 2310 -> 0 bytes lib/pytz/zoneinfo/Canada/Atlantic | Bin 3424 -> 0 bytes lib/pytz/zoneinfo/Canada/Central | Bin 2868 -> 0 bytes lib/pytz/zoneinfo/Canada/Eastern | Bin 3494 -> 0 bytes lib/pytz/zoneinfo/Canada/Mountain | Bin 2332 -> 0 bytes lib/pytz/zoneinfo/Canada/Newfoundland | Bin 3655 -> 0 bytes lib/pytz/zoneinfo/Canada/Pacific | Bin 2892 -> 0 bytes lib/pytz/zoneinfo/Canada/Saskatchewan | Bin 980 -> 0 bytes lib/pytz/zoneinfo/Canada/Yukon | Bin 2084 -> 0 bytes lib/pytz/zoneinfo/Chile/Continental | Bin 2529 -> 0 bytes lib/pytz/zoneinfo/Chile/EasterIsland | Bin 2233 -> 0 bytes lib/pytz/zoneinfo/Cuba | Bin 2416 -> 0 bytes lib/pytz/zoneinfo/EET | Bin 1908 -> 0 bytes lib/pytz/zoneinfo/EST | Bin 114 -> 0 bytes lib/pytz/zoneinfo/EST5EDT | Bin 2310 -> 0 bytes lib/pytz/zoneinfo/Egypt | Bin 1955 -> 0 bytes lib/pytz/zoneinfo/Eire | Bin 3492 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT | Bin 114 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT+0 | Bin 114 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT+1 | Bin 116 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT+10 | Bin 117 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT+11 | Bin 117 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT+12 | Bin 117 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT+2 | Bin 116 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT+3 | Bin 116 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT+4 | Bin 116 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT+5 | Bin 116 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT+6 | Bin 116 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT+7 | Bin 116 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT+8 | Bin 116 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT+9 | Bin 116 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT-0 | Bin 114 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT-1 | Bin 117 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT-10 | Bin 118 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT-11 | Bin 118 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT-12 | Bin 118 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT-13 | Bin 118 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT-14 | Bin 118 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT-2 | Bin 117 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT-3 | Bin 117 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT-4 | Bin 117 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT-5 | Bin 117 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT-6 | Bin 117 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT-7 | Bin 117 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT-8 | Bin 117 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT-9 | Bin 117 -> 0 bytes lib/pytz/zoneinfo/Etc/GMT0 | Bin 114 -> 0 bytes lib/pytz/zoneinfo/Etc/Greenwich | Bin 114 -> 0 bytes lib/pytz/zoneinfo/Etc/UCT | Bin 114 -> 0 bytes lib/pytz/zoneinfo/Etc/UTC | Bin 114 -> 0 bytes lib/pytz/zoneinfo/Etc/Universal | Bin 114 -> 0 bytes lib/pytz/zoneinfo/Etc/Zulu | Bin 114 -> 0 bytes lib/pytz/zoneinfo/Europe/Amsterdam | Bin 2910 -> 0 bytes lib/pytz/zoneinfo/Europe/Andorra | Bin 1742 -> 0 bytes lib/pytz/zoneinfo/Europe/Astrakhan | Bin 1165 -> 0 bytes lib/pytz/zoneinfo/Europe/Athens | Bin 2262 -> 0 bytes lib/pytz/zoneinfo/Europe/Belfast | Bin 3648 -> 0 bytes lib/pytz/zoneinfo/Europe/Belgrade | Bin 1920 -> 0 bytes lib/pytz/zoneinfo/Europe/Berlin | Bin 2298 -> 0 bytes lib/pytz/zoneinfo/Europe/Bratislava | Bin 2301 -> 0 bytes lib/pytz/zoneinfo/Europe/Brussels | Bin 2933 -> 0 bytes lib/pytz/zoneinfo/Europe/Bucharest | Bin 2184 -> 0 bytes lib/pytz/zoneinfo/Europe/Budapest | Bin 2368 -> 0 bytes lib/pytz/zoneinfo/Europe/Busingen | Bin 1909 -> 0 bytes lib/pytz/zoneinfo/Europe/Chisinau | Bin 2390 -> 0 bytes lib/pytz/zoneinfo/Europe/Copenhagen | Bin 2137 -> 0 bytes lib/pytz/zoneinfo/Europe/Dublin | Bin 3492 -> 0 bytes lib/pytz/zoneinfo/Europe/Gibraltar | Bin 3052 -> 0 bytes lib/pytz/zoneinfo/Europe/Guernsey | Bin 3648 -> 0 bytes lib/pytz/zoneinfo/Europe/Helsinki | Bin 1900 -> 0 bytes lib/pytz/zoneinfo/Europe/Isle_of_Man | Bin 3648 -> 0 bytes lib/pytz/zoneinfo/Europe/Istanbul | Bin 1947 -> 0 bytes lib/pytz/zoneinfo/Europe/Jersey | Bin 3648 -> 0 bytes lib/pytz/zoneinfo/Europe/Kaliningrad | Bin 1493 -> 0 bytes lib/pytz/zoneinfo/Europe/Kiev | Bin 2088 -> 0 bytes lib/pytz/zoneinfo/Europe/Kirov | Bin 1153 -> 0 bytes lib/pytz/zoneinfo/Europe/Lisbon | Bin 3469 -> 0 bytes lib/pytz/zoneinfo/Europe/Ljubljana | Bin 1920 -> 0 bytes lib/pytz/zoneinfo/Europe/London | Bin 3648 -> 0 bytes lib/pytz/zoneinfo/Europe/Luxembourg | Bin 2946 -> 0 bytes lib/pytz/zoneinfo/Europe/Madrid | Bin 2614 -> 0 bytes lib/pytz/zoneinfo/Europe/Malta | Bin 2620 -> 0 bytes lib/pytz/zoneinfo/Europe/Mariehamn | Bin 1900 -> 0 bytes lib/pytz/zoneinfo/Europe/Minsk | Bin 1321 -> 0 bytes lib/pytz/zoneinfo/Europe/Monaco | Bin 2944 -> 0 bytes lib/pytz/zoneinfo/Europe/Moscow | Bin 1535 -> 0 bytes lib/pytz/zoneinfo/Europe/Nicosia | Bin 2002 -> 0 bytes lib/pytz/zoneinfo/Europe/Oslo | Bin 2228 -> 0 bytes lib/pytz/zoneinfo/Europe/Paris | Bin 2962 -> 0 bytes lib/pytz/zoneinfo/Europe/Podgorica | Bin 1920 -> 0 bytes lib/pytz/zoneinfo/Europe/Prague | Bin 2301 -> 0 bytes lib/pytz/zoneinfo/Europe/Riga | Bin 2198 -> 0 bytes lib/pytz/zoneinfo/Europe/Rome | Bin 2641 -> 0 bytes lib/pytz/zoneinfo/Europe/Samara | Bin 1215 -> 0 bytes lib/pytz/zoneinfo/Europe/San_Marino | Bin 2641 -> 0 bytes lib/pytz/zoneinfo/Europe/Sarajevo | Bin 1920 -> 0 bytes lib/pytz/zoneinfo/Europe/Saratov | Bin 1183 -> 0 bytes lib/pytz/zoneinfo/Europe/Simferopol | Bin 1453 -> 0 bytes lib/pytz/zoneinfo/Europe/Skopje | Bin 1920 -> 0 bytes lib/pytz/zoneinfo/Europe/Sofia | Bin 2077 -> 0 bytes lib/pytz/zoneinfo/Europe/Stockholm | Bin 1909 -> 0 bytes lib/pytz/zoneinfo/Europe/Tallinn | Bin 2148 -> 0 bytes lib/pytz/zoneinfo/Europe/Tirane | Bin 2084 -> 0 bytes lib/pytz/zoneinfo/Europe/Tiraspol | Bin 2390 -> 0 bytes lib/pytz/zoneinfo/Europe/Ulyanovsk | Bin 1267 -> 0 bytes lib/pytz/zoneinfo/Europe/Uzhgorod | Bin 2050 -> 0 bytes lib/pytz/zoneinfo/Europe/Vaduz | Bin 1909 -> 0 bytes lib/pytz/zoneinfo/Europe/Vatican | Bin 2641 -> 0 bytes lib/pytz/zoneinfo/Europe/Vienna | Bin 2200 -> 0 bytes lib/pytz/zoneinfo/Europe/Vilnius | Bin 2162 -> 0 bytes lib/pytz/zoneinfo/Europe/Volgograd | Bin 1165 -> 0 bytes lib/pytz/zoneinfo/Europe/Warsaw | Bin 2654 -> 0 bytes lib/pytz/zoneinfo/Europe/Zagreb | Bin 1920 -> 0 bytes lib/pytz/zoneinfo/Europe/Zaporozhye | Bin 2106 -> 0 bytes lib/pytz/zoneinfo/Europe/Zurich | Bin 1909 -> 0 bytes lib/pytz/zoneinfo/Factory | Bin 116 -> 0 bytes lib/pytz/zoneinfo/GB | Bin 3648 -> 0 bytes lib/pytz/zoneinfo/GB-Eire | Bin 3648 -> 0 bytes lib/pytz/zoneinfo/GMT | Bin 114 -> 0 bytes lib/pytz/zoneinfo/GMT+0 | Bin 114 -> 0 bytes lib/pytz/zoneinfo/GMT-0 | Bin 114 -> 0 bytes lib/pytz/zoneinfo/GMT0 | Bin 114 -> 0 bytes lib/pytz/zoneinfo/Greenwich | Bin 114 -> 0 bytes lib/pytz/zoneinfo/HST | Bin 115 -> 0 bytes lib/pytz/zoneinfo/Hongkong | Bin 1203 -> 0 bytes lib/pytz/zoneinfo/Iceland | Bin 1162 -> 0 bytes lib/pytz/zoneinfo/Indian/Antananarivo | Bin 251 -> 0 bytes lib/pytz/zoneinfo/Indian/Chagos | Bin 199 -> 0 bytes lib/pytz/zoneinfo/Indian/Christmas | Bin 165 -> 0 bytes lib/pytz/zoneinfo/Indian/Cocos | Bin 174 -> 0 bytes lib/pytz/zoneinfo/Indian/Comoro | Bin 251 -> 0 bytes lib/pytz/zoneinfo/Indian/Kerguelen | Bin 165 -> 0 bytes lib/pytz/zoneinfo/Indian/Mahe | Bin 165 -> 0 bytes lib/pytz/zoneinfo/Indian/Maldives | Bin 199 -> 0 bytes lib/pytz/zoneinfo/Indian/Mauritius | Bin 241 -> 0 bytes lib/pytz/zoneinfo/Indian/Mayotte | Bin 251 -> 0 bytes lib/pytz/zoneinfo/Indian/Reunion | Bin 165 -> 0 bytes lib/pytz/zoneinfo/Iran | Bin 2582 -> 0 bytes lib/pytz/zoneinfo/Israel | Bin 2288 -> 0 bytes lib/pytz/zoneinfo/Jamaica | Bin 482 -> 0 bytes lib/pytz/zoneinfo/Japan | Bin 309 -> 0 bytes lib/pytz/zoneinfo/Kwajalein | Bin 316 -> 0 bytes lib/pytz/zoneinfo/Libya | Bin 625 -> 0 bytes lib/pytz/zoneinfo/MET | Bin 2094 -> 0 bytes lib/pytz/zoneinfo/MST | Bin 114 -> 0 bytes lib/pytz/zoneinfo/MST7MDT | Bin 2310 -> 0 bytes lib/pytz/zoneinfo/Mexico/BajaNorte | Bin 2342 -> 0 bytes lib/pytz/zoneinfo/Mexico/BajaSur | Bin 1526 -> 0 bytes lib/pytz/zoneinfo/Mexico/General | Bin 1584 -> 0 bytes lib/pytz/zoneinfo/NZ | Bin 2437 -> 0 bytes lib/pytz/zoneinfo/NZ-CHAT | Bin 2068 -> 0 bytes lib/pytz/zoneinfo/Navajo | Bin 2444 -> 0 bytes lib/pytz/zoneinfo/PRC | Bin 533 -> 0 bytes lib/pytz/zoneinfo/PST8PDT | Bin 2310 -> 0 bytes lib/pytz/zoneinfo/Pacific/Apia | Bin 1097 -> 0 bytes lib/pytz/zoneinfo/Pacific/Auckland | Bin 2437 -> 0 bytes lib/pytz/zoneinfo/Pacific/Bougainville | Bin 268 -> 0 bytes lib/pytz/zoneinfo/Pacific/Chatham | Bin 2068 -> 0 bytes lib/pytz/zoneinfo/Pacific/Chuuk | Bin 269 -> 0 bytes lib/pytz/zoneinfo/Pacific/Easter | Bin 2233 -> 0 bytes lib/pytz/zoneinfo/Pacific/Efate | Bin 466 -> 0 bytes lib/pytz/zoneinfo/Pacific/Enderbury | Bin 234 -> 0 bytes lib/pytz/zoneinfo/Pacific/Fakaofo | Bin 200 -> 0 bytes lib/pytz/zoneinfo/Pacific/Fiji | Bin 1077 -> 0 bytes lib/pytz/zoneinfo/Pacific/Funafuti | Bin 166 -> 0 bytes lib/pytz/zoneinfo/Pacific/Galapagos | Bin 238 -> 0 bytes lib/pytz/zoneinfo/Pacific/Gambier | Bin 164 -> 0 bytes lib/pytz/zoneinfo/Pacific/Guadalcanal | Bin 166 -> 0 bytes lib/pytz/zoneinfo/Pacific/Guam | Bin 494 -> 0 bytes lib/pytz/zoneinfo/Pacific/Honolulu | Bin 329 -> 0 bytes lib/pytz/zoneinfo/Pacific/Johnston | Bin 329 -> 0 bytes lib/pytz/zoneinfo/Pacific/Kiritimati | Bin 238 -> 0 bytes lib/pytz/zoneinfo/Pacific/Kosrae | Bin 351 -> 0 bytes lib/pytz/zoneinfo/Pacific/Kwajalein | Bin 316 -> 0 bytes lib/pytz/zoneinfo/Pacific/Majuro | Bin 310 -> 0 bytes lib/pytz/zoneinfo/Pacific/Marquesas | Bin 173 -> 0 bytes lib/pytz/zoneinfo/Pacific/Midway | Bin 175 -> 0 bytes lib/pytz/zoneinfo/Pacific/Nauru | Bin 252 -> 0 bytes lib/pytz/zoneinfo/Pacific/Niue | Bin 241 -> 0 bytes lib/pytz/zoneinfo/Pacific/Norfolk | Bin 880 -> 0 bytes lib/pytz/zoneinfo/Pacific/Noumea | Bin 304 -> 0 bytes lib/pytz/zoneinfo/Pacific/Pago_Pago | Bin 175 -> 0 bytes lib/pytz/zoneinfo/Pacific/Palau | Bin 180 -> 0 bytes lib/pytz/zoneinfo/Pacific/Pitcairn | Bin 202 -> 0 bytes lib/pytz/zoneinfo/Pacific/Pohnpei | Bin 303 -> 0 bytes lib/pytz/zoneinfo/Pacific/Ponape | Bin 303 -> 0 bytes lib/pytz/zoneinfo/Pacific/Port_Moresby | Bin 186 -> 0 bytes lib/pytz/zoneinfo/Pacific/Rarotonga | Bin 577 -> 0 bytes lib/pytz/zoneinfo/Pacific/Saipan | Bin 494 -> 0 bytes lib/pytz/zoneinfo/Pacific/Samoa | Bin 175 -> 0 bytes lib/pytz/zoneinfo/Pacific/Tahiti | Bin 165 -> 0 bytes lib/pytz/zoneinfo/Pacific/Tarawa | Bin 166 -> 0 bytes lib/pytz/zoneinfo/Pacific/Tongatapu | Bin 372 -> 0 bytes lib/pytz/zoneinfo/Pacific/Truk | Bin 269 -> 0 bytes lib/pytz/zoneinfo/Pacific/Wake | Bin 166 -> 0 bytes lib/pytz/zoneinfo/Pacific/Wallis | Bin 166 -> 0 bytes lib/pytz/zoneinfo/Pacific/Yap | Bin 269 -> 0 bytes lib/pytz/zoneinfo/Poland | Bin 2654 -> 0 bytes lib/pytz/zoneinfo/Portugal | Bin 3469 -> 0 bytes lib/pytz/zoneinfo/ROC | Bin 761 -> 0 bytes lib/pytz/zoneinfo/ROK | Bin 617 -> 0 bytes lib/pytz/zoneinfo/Singapore | Bin 383 -> 0 bytes lib/pytz/zoneinfo/Turkey | Bin 1947 -> 0 bytes lib/pytz/zoneinfo/UCT | Bin 114 -> 0 bytes lib/pytz/zoneinfo/US/Alaska | Bin 2371 -> 0 bytes lib/pytz/zoneinfo/US/Aleutian | Bin 2356 -> 0 bytes lib/pytz/zoneinfo/US/Arizona | Bin 328 -> 0 bytes lib/pytz/zoneinfo/US/Central | Bin 3576 -> 0 bytes lib/pytz/zoneinfo/US/East-Indiana | Bin 1666 -> 0 bytes lib/pytz/zoneinfo/US/Eastern | Bin 3536 -> 0 bytes lib/pytz/zoneinfo/US/Hawaii | Bin 329 -> 0 bytes lib/pytz/zoneinfo/US/Indiana-Starke | Bin 2428 -> 0 bytes lib/pytz/zoneinfo/US/Michigan | Bin 2230 -> 0 bytes lib/pytz/zoneinfo/US/Mountain | Bin 2444 -> 0 bytes lib/pytz/zoneinfo/US/Pacific | Bin 2836 -> 0 bytes lib/pytz/zoneinfo/US/Samoa | Bin 175 -> 0 bytes lib/pytz/zoneinfo/UTC | Bin 114 -> 0 bytes lib/pytz/zoneinfo/Universal | Bin 114 -> 0 bytes lib/pytz/zoneinfo/W-SU | Bin 1535 -> 0 bytes lib/pytz/zoneinfo/WET | Bin 1905 -> 0 bytes lib/pytz/zoneinfo/Zulu | Bin 114 -> 0 bytes lib/pytz/zoneinfo/iso3166.tab | 274 - lib/pytz/zoneinfo/leapseconds | 72 - lib/pytz/zoneinfo/posixrules | Bin 3536 -> 0 bytes lib/pytz/zoneinfo/tzdata.zi | 4405 ---------- lib/pytz/zoneinfo/zone.tab | 452 - lib/pytz/zoneinfo/zone1970.tab | 384 - lib/requests/__init__.py | 121 - lib/requests/__version__.py | 14 - lib/requests/_internal_utils.py | 42 - lib/requests/adapters.py | 525 -- lib/requests/api.py | 152 - lib/requests/auth.py | 293 - lib/requests/certs.py | 18 - lib/requests/compat.py | 69 - lib/requests/cookies.py | 542 -- lib/requests/exceptions.py | 122 - lib/requests/help.py | 120 - lib/requests/hooks.py | 34 - lib/requests/models.py | 948 -- lib/requests/packages.py | 14 - lib/requests/sessions.py | 737 -- lib/requests/status_codes.py | 91 - lib/requests/structures.py | 105 - lib/requests/utils.py | 904 -- lib/requests_oauthlib/__init__.py | 22 - .../compliance_fixes/__init__.py | 7 - .../compliance_fixes/douban.py | 18 - .../compliance_fixes/facebook.py | 33 - .../compliance_fixes/linkedin.py | 24 - .../compliance_fixes/mailchimp.py | 22 - .../compliance_fixes/slack.py | 37 - .../compliance_fixes/weibo.py | 17 - lib/requests_oauthlib/oauth1_auth.py | 95 - lib/requests_oauthlib/oauth1_session.py | 378 - lib/requests_oauthlib/oauth2_auth.py | 36 - lib/requests_oauthlib/oauth2_session.py | 359 - lib/rumps/__init__.py | 26 - lib/rumps/compat.py | 17 - lib/rumps/notifications.py | 10 - lib/rumps/packages/__init__.py | 0 lib/rumps/packages/ordereddict.py | 262 - lib/rumps/rumps.py | 1329 --- lib/rumps/utils.py | 27 - lib/sgmllib3.py | 547 -- lib/simplejson/__init__.py | 584 -- lib/simplejson/_speedups.c | 3384 -------- lib/simplejson/compat.py | 34 - lib/simplejson/decoder.py | 400 - lib/simplejson/encoder.py | 722 -- lib/simplejson/errors.py | 53 - lib/simplejson/ordered_dict.py | 103 - lib/simplejson/raw_json.py | 9 - lib/simplejson/scanner.py | 85 - lib/simplejson/tests/__init__.py | 74 - lib/simplejson/tests/test_bigint_as_string.py | 67 - .../tests/test_bitsize_int_as_string.py | 73 - lib/simplejson/tests/test_check_circular.py | 30 - lib/simplejson/tests/test_decimal.py | 71 - lib/simplejson/tests/test_decode.py | 119 - lib/simplejson/tests/test_default.py | 9 - lib/simplejson/tests/test_dump.py | 249 - .../tests/test_encode_basestring_ascii.py | 47 - lib/simplejson/tests/test_encode_for_html.py | 38 - lib/simplejson/tests/test_errors.py | 68 - lib/simplejson/tests/test_fail.py | 176 - lib/simplejson/tests/test_float.py | 35 - lib/simplejson/tests/test_for_json.py | 97 - lib/simplejson/tests/test_indent.py | 86 - lib/simplejson/tests/test_item_sort_key.py | 27 - lib/simplejson/tests/test_iterable.py | 31 - lib/simplejson/tests/test_namedtuple.py | 122 - lib/simplejson/tests/test_pass1.py | 71 - lib/simplejson/tests/test_pass2.py | 14 - lib/simplejson/tests/test_pass3.py | 20 - lib/simplejson/tests/test_raw_json.py | 47 - lib/simplejson/tests/test_recursion.py | 67 - lib/simplejson/tests/test_scanstring.py | 196 - lib/simplejson/tests/test_separators.py | 42 - lib/simplejson/tests/test_speedups.py | 114 - lib/simplejson/tests/test_str_subclass.py | 21 - lib/simplejson/tests/test_subclass.py | 37 - lib/simplejson/tests/test_tool.py | 114 - lib/simplejson/tests/test_tuple.py | 47 - lib/simplejson/tests/test_unicode.py | 154 - lib/simplejson/tool.py | 42 - lib/six.py | 963 --- lib/soupsieve/__init__.py | 127 - lib/soupsieve/__meta__.py | 190 - lib/soupsieve/css_match.py | 1542 ---- lib/soupsieve/css_parser.py | 1213 --- lib/soupsieve/css_types.py | 345 - lib/soupsieve/util.py | 170 - lib/systray/__init__.py | 2 - lib/systray/traybar.py | 322 - lib/systray/win32_adapter.py | 199 - lib/tempora/__init__.py | 506 -- lib/tempora/schedule.py | 202 - lib/tempora/tests/test_schedule.py | 118 - lib/tempora/timing.py | 258 - lib/tempora/utc.py | 39 - lib/tokenize_rt.py | 217 - lib/twitter/__init__.py | 56 - lib/twitter/_file_cache.py | 102 - lib/twitter/api.py | 5015 ----------- lib/twitter/error.py | 25 - lib/twitter/models.py | 539 -- lib/twitter/parse_tweet.py | 100 - lib/twitter/ratelimit.py | 191 - lib/twitter/twitter_utils.py | 314 - lib/tzlocal/CHANGES.txt | 189 - lib/tzlocal/LICENSE.txt | 19 - lib/tzlocal/__init__.py | 5 - lib/tzlocal/unix.py | 174 - lib/tzlocal/utils.py | 46 - lib/tzlocal/win32.py | 104 - lib/tzlocal/windows_tz.py | 697 -- lib/urllib3/__init__.py | 97 - lib/urllib3/_collections.py | 319 - lib/urllib3/connection.py | 373 - lib/urllib3/connectionpool.py | 905 -- lib/urllib3/contrib/__init__.py | 0 .../contrib/_securetransport/__init__.py | 0 .../contrib/_securetransport/bindings.py | 593 -- .../contrib/_securetransport/low_level.py | 343 - lib/urllib3/contrib/appengine.py | 296 - lib/urllib3/contrib/ntlmpool.py | 112 - lib/urllib3/contrib/pyopenssl.py | 455 - lib/urllib3/contrib/securetransport.py | 810 -- lib/urllib3/contrib/socks.py | 188 - lib/urllib3/exceptions.py | 246 - lib/urllib3/fields.py | 178 - lib/urllib3/filepost.py | 94 - lib/urllib3/packages/__init__.py | 5 - lib/urllib3/packages/backports/__init__.py | 0 lib/urllib3/packages/backports/makefile.py | 53 - lib/urllib3/packages/ordered_dict.py | 259 - lib/urllib3/packages/six.py | 868 -- .../packages/ssl_match_hostname/__init__.py | 19 - .../ssl_match_hostname/_implementation.py | 157 - lib/urllib3/poolmanager.py | 440 - lib/urllib3/request.py | 148 - lib/urllib3/response.py | 626 -- lib/urllib3/util/__init__.py | 54 - lib/urllib3/util/connection.py | 130 - lib/urllib3/util/request.py | 118 - lib/urllib3/util/response.py | 81 - lib/urllib3/util/retry.py | 401 - lib/urllib3/util/selectors.py | 581 -- lib/urllib3/util/ssl_.py | 341 - lib/urllib3/util/timeout.py | 242 - lib/urllib3/util/url.py | 230 - lib/urllib3/util/wait.py | 40 - lib/websocket/__init__.py | 29 - lib/websocket/_abnf.py | 447 - lib/websocket/_app.py | 352 - lib/websocket/_cookiejar.py | 52 - lib/websocket/_core.py | 516 -- lib/websocket/_exceptions.py | 88 - lib/websocket/_handshake.py | 211 - lib/websocket/_http.py | 330 - lib/websocket/_logging.py | 84 - lib/websocket/_socket.py | 166 - lib/websocket/_ssl_compat.py | 54 - lib/websocket/_url.py | 164 - lib/websocket/_utils.py | 111 - lib/websocket/tests/__init__.py | 0 lib/websocket/tests/data/header01.txt | 6 - lib/websocket/tests/data/header02.txt | 6 - lib/websocket/tests/test_cookiejar.py | 98 - lib/websocket/tests/test_websocket.py | 665 -- lib/xmltodict.py | 365 - lib/zc/__init__.py | 1 - lib/zc/lockfile/README.txt | 70 - lib/zc/lockfile/__init__.py | 125 - lib/zc/lockfile/tests.py | 201 - pylintrc | 284 - snap/snapcraft.yaml | 66 - start.bat | 1 - start.sh | 28 - 1680 files changed, 1603 insertions(+), 290357 deletions(-) delete mode 100644 .dockerignore delete mode 100644 .github/FUNDING.yml delete mode 100644 .github/pull_request_template.md delete mode 100644 .github/workflows/publish-docker.yml delete mode 100644 .github/workflows/publish-installers.yml delete mode 100644 .github/workflows/publish-snap.yml delete mode 100644 .github/workflows/pull-requests.yml delete mode 100644 Dockerfile rename Tautulli.py => JellyPy.py (59%) delete mode 100755 PlexPy.py rename {plexpy => jellypy}/__init__.py (97%) rename {plexpy => jellypy}/activity_handler.py (89%) rename {plexpy => jellypy}/activity_pinger.py (85%) rename {plexpy => jellypy}/activity_processor.py (97%) rename {plexpy => jellypy}/api2.py (92%) rename {plexpy => jellypy}/classes.py (96%) rename {plexpy => jellypy}/common.py (99%) rename {plexpy => jellypy}/config.py (97%) rename {plexpy => jellypy}/database.py (96%) rename {plexpy => jellypy}/datafactory.py (98%) rename {plexpy => jellypy}/datatables.py (98%) rename {plexpy => jellypy}/exceptions.py (100%) rename {plexpy => jellypy}/exporter.py (99%) rename {plexpy => jellypy}/graphs.py (98%) rename {plexpy => jellypy}/helpers.py (94%) rename {plexpy => jellypy}/http_handler.py (91%) rename {plexpy => jellypy}/libraries.py (94%) rename {plexpy => jellypy}/lock.py (98%) rename {plexpy => jellypy}/log_reader.py (90%) rename {plexpy => jellypy}/logger.py (98%) rename {plexpy => jellypy}/macos.py (81%) rename {plexpy => jellypy}/mobile_app.py (97%) rename {plexpy => jellypy}/newsletter_handler.py (96%) rename {plexpy => jellypy}/newsletters.py (95%) rename {plexpy => jellypy}/notification_handler.py (95%) rename {plexpy => jellypy}/notifiers.py (98%) rename {plexpy => jellypy}/plex.py (94%) rename {plexpy => jellypy}/plexivity_import.py (99%) rename {plexpy => jellypy}/plextv.py (94%) rename {plexpy => jellypy}/plexwatch_import.py (99%) rename {plexpy => jellypy}/pmsconnect.py (99%) rename {plexpy => jellypy}/request.py (97%) rename {plexpy => jellypy}/session.py (98%) rename {plexpy => jellypy}/users.py (98%) rename {plexpy => jellypy}/version.py (100%) rename {plexpy => jellypy}/versioncheck.py (70%) rename {plexpy => jellypy}/web_socket.py (79%) rename {plexpy => jellypy}/webauth.py (86%) rename {plexpy => jellypy}/webserve.py (92%) rename {plexpy => jellypy}/webstart.py (85%) rename {plexpy => jellypy}/windows.py (83%) delete mode 100644 lib/IPy.py delete mode 100644 lib/UniversalAnalytics/HTTPLog.py delete mode 100644 lib/UniversalAnalytics/Tracker.py delete mode 100644 lib/UniversalAnalytics/__init__.py delete mode 100644 lib/appdirs.py delete mode 100644 lib/apscheduler/__init__.py delete mode 100644 lib/apscheduler/events.py delete mode 100644 lib/apscheduler/executors/__init__.py delete mode 100644 lib/apscheduler/executors/asyncio.py delete mode 100644 lib/apscheduler/executors/base.py delete mode 100644 lib/apscheduler/executors/base_py3.py delete mode 100644 lib/apscheduler/executors/debug.py delete mode 100644 lib/apscheduler/executors/gevent.py delete mode 100644 lib/apscheduler/executors/pool.py delete mode 100644 lib/apscheduler/executors/tornado.py delete mode 100644 lib/apscheduler/executors/twisted.py delete mode 100644 lib/apscheduler/job.py delete mode 100644 lib/apscheduler/jobstores/__init__.py delete mode 100644 lib/apscheduler/jobstores/base.py delete mode 100644 lib/apscheduler/jobstores/memory.py delete mode 100644 lib/apscheduler/jobstores/mongodb.py delete mode 100644 lib/apscheduler/jobstores/redis.py delete mode 100644 lib/apscheduler/jobstores/rethinkdb.py delete mode 100644 lib/apscheduler/jobstores/sqlalchemy.py delete mode 100644 lib/apscheduler/jobstores/zookeeper.py delete mode 100644 lib/apscheduler/schedulers/__init__.py delete mode 100644 lib/apscheduler/schedulers/asyncio.py delete mode 100644 lib/apscheduler/schedulers/background.py delete mode 100644 lib/apscheduler/schedulers/base.py delete mode 100644 lib/apscheduler/schedulers/blocking.py delete mode 100644 lib/apscheduler/schedulers/gevent.py delete mode 100644 lib/apscheduler/schedulers/qt.py delete mode 100644 lib/apscheduler/schedulers/tornado.py delete mode 100644 lib/apscheduler/schedulers/twisted.py delete mode 100644 lib/apscheduler/triggers/__init__.py delete mode 100644 lib/apscheduler/triggers/base.py delete mode 100644 lib/apscheduler/triggers/combining.py delete mode 100644 lib/apscheduler/triggers/cron/__init__.py delete mode 100644 lib/apscheduler/triggers/cron/expressions.py delete mode 100644 lib/apscheduler/triggers/cron/fields.py delete mode 100644 lib/apscheduler/triggers/date.py delete mode 100644 lib/apscheduler/triggers/interval.py delete mode 100644 lib/apscheduler/util.py delete mode 100644 lib/argparse.py delete mode 100644 lib/arrow/__init__.py delete mode 100644 lib/arrow/api.py delete mode 100644 lib/arrow/arrow.py delete mode 100644 lib/arrow/factory.py delete mode 100644 lib/arrow/formatter.py delete mode 100644 lib/arrow/locales.py delete mode 100644 lib/arrow/parser.py delete mode 100644 lib/arrow/util.py delete mode 100644 lib/backports/__init__.py delete mode 100644 lib/backports/csv.py delete mode 100644 lib/backports/functools_lru_cache.py delete mode 100644 lib/bleach/__init__.py delete mode 100644 lib/bleach/callbacks.py delete mode 100644 lib/bleach/encoding.py delete mode 100644 lib/bleach/sanitizer.py delete mode 100644 lib/bs4/__init__.py delete mode 100644 lib/bs4/builder/__init__.py delete mode 100644 lib/bs4/builder/_html5lib.py delete mode 100644 lib/bs4/builder/_htmlparser.py delete mode 100644 lib/bs4/builder/_lxml.py delete mode 100644 lib/bs4/check_block.py delete mode 100644 lib/bs4/dammit.py delete mode 100644 lib/bs4/diagnose.py delete mode 100644 lib/bs4/element.py delete mode 100644 lib/bs4/formatter.py delete mode 100644 lib/bs4/testing.py delete mode 100644 lib/bs4/tests/__init__.py delete mode 100644 lib/bs4/tests/test_builder_registry.py delete mode 100644 lib/bs4/tests/test_docs.py delete mode 100644 lib/bs4/tests/test_html5lib.py delete mode 100644 lib/bs4/tests/test_htmlparser.py delete mode 100644 lib/bs4/tests/test_lxml.py delete mode 100644 lib/bs4/tests/test_soup.py delete mode 100644 lib/bs4/tests/test_tree.py delete mode 100644 lib/certgen.py delete mode 100644 lib/certifi/__init__.py delete mode 100644 lib/certifi/__main__.py delete mode 100644 lib/certifi/cacert.pem delete mode 100644 lib/certifi/core.py delete mode 100644 lib/chardet/__init__.py delete mode 100644 lib/chardet/big5freq.py delete mode 100644 lib/chardet/big5prober.py delete mode 100644 lib/chardet/chardistribution.py delete mode 100644 lib/chardet/charsetgroupprober.py delete mode 100644 lib/chardet/charsetprober.py delete mode 100644 lib/chardet/cli/__init__.py delete mode 100644 lib/chardet/cli/chardetect.py delete mode 100644 lib/chardet/codingstatemachine.py delete mode 100644 lib/chardet/compat.py delete mode 100644 lib/chardet/cp949prober.py delete mode 100644 lib/chardet/enums.py delete mode 100644 lib/chardet/escprober.py delete mode 100644 lib/chardet/escsm.py delete mode 100644 lib/chardet/eucjpprober.py delete mode 100644 lib/chardet/euckrfreq.py delete mode 100644 lib/chardet/euckrprober.py delete mode 100644 lib/chardet/euctwfreq.py delete mode 100644 lib/chardet/euctwprober.py delete mode 100644 lib/chardet/gb2312freq.py delete mode 100644 lib/chardet/gb2312prober.py delete mode 100644 lib/chardet/hebrewprober.py delete mode 100644 lib/chardet/jisfreq.py delete mode 100644 lib/chardet/jpcntx.py delete mode 100644 lib/chardet/langbulgarianmodel.py delete mode 100644 lib/chardet/langcyrillicmodel.py delete mode 100644 lib/chardet/langgreekmodel.py delete mode 100644 lib/chardet/langhebrewmodel.py delete mode 100644 lib/chardet/langhungarianmodel.py delete mode 100644 lib/chardet/langthaimodel.py delete mode 100644 lib/chardet/langturkishmodel.py delete mode 100644 lib/chardet/latin1prober.py delete mode 100644 lib/chardet/mbcharsetprober.py delete mode 100644 lib/chardet/mbcsgroupprober.py delete mode 100644 lib/chardet/mbcssm.py delete mode 100644 lib/chardet/sbcharsetprober.py delete mode 100644 lib/chardet/sbcsgroupprober.py delete mode 100644 lib/chardet/sjisprober.py delete mode 100644 lib/chardet/universaldetector.py delete mode 100644 lib/chardet/utf8prober.py delete mode 100644 lib/chardet/version.py delete mode 100644 lib/cheroot/__init__.py delete mode 100644 lib/cheroot/__main__.py delete mode 100644 lib/cheroot/_compat.py delete mode 100644 lib/cheroot/cli.py delete mode 100644 lib/cheroot/connections.py delete mode 100644 lib/cheroot/errors.py delete mode 100644 lib/cheroot/makefile.py delete mode 100644 lib/cheroot/server.py delete mode 100644 lib/cheroot/ssl/__init__.py delete mode 100644 lib/cheroot/ssl/builtin.py delete mode 100644 lib/cheroot/ssl/pyopenssl.py delete mode 100644 lib/cheroot/test/__init__.py delete mode 100644 lib/cheroot/test/conftest.py delete mode 100644 lib/cheroot/test/helper.py delete mode 100644 lib/cheroot/test/test__compat.py delete mode 100644 lib/cheroot/test/test_conn.py delete mode 100644 lib/cheroot/test/test_core.py delete mode 100644 lib/cheroot/test/test_dispatch.py delete mode 100644 lib/cheroot/test/test_errors.py delete mode 100644 lib/cheroot/test/test_makefile.py delete mode 100644 lib/cheroot/test/test_server.py delete mode 100644 lib/cheroot/test/test_ssl.py delete mode 100644 lib/cheroot/test/webtest.py delete mode 100644 lib/cheroot/testing.py delete mode 100644 lib/cheroot/workers/__init__.py delete mode 100644 lib/cheroot/workers/threadpool.py delete mode 100644 lib/cheroot/wsgi.py delete mode 100644 lib/cherrypy/__init__.py delete mode 100644 lib/cherrypy/__main__.py delete mode 100644 lib/cherrypy/_cpchecker.py delete mode 100644 lib/cherrypy/_cpcompat.py delete mode 100644 lib/cherrypy/_cpconfig.py delete mode 100644 lib/cherrypy/_cpdispatch.py delete mode 100644 lib/cherrypy/_cperror.py delete mode 100644 lib/cherrypy/_cplogging.py delete mode 100644 lib/cherrypy/_cpmodpy.py delete mode 100644 lib/cherrypy/_cpnative_server.py delete mode 100644 lib/cherrypy/_cpreqbody.py delete mode 100644 lib/cherrypy/_cprequest.py delete mode 100644 lib/cherrypy/_cpserver.py delete mode 100644 lib/cherrypy/_cptools.py delete mode 100644 lib/cherrypy/_cptree.py delete mode 100644 lib/cherrypy/_cpwsgi.py delete mode 100644 lib/cherrypy/_cpwsgi_server.py delete mode 100644 lib/cherrypy/_helper.py delete mode 100644 lib/cherrypy/daemon.py delete mode 100644 lib/cherrypy/favicon.ico delete mode 100644 lib/cherrypy/lib/__init__.py delete mode 100644 lib/cherrypy/lib/auth_basic.py delete mode 100644 lib/cherrypy/lib/auth_digest.py delete mode 100644 lib/cherrypy/lib/caching.py delete mode 100644 lib/cherrypy/lib/covercp.py delete mode 100644 lib/cherrypy/lib/cpstats.py delete mode 100644 lib/cherrypy/lib/cptools.py delete mode 100644 lib/cherrypy/lib/encoding.py delete mode 100644 lib/cherrypy/lib/gctools.py delete mode 100644 lib/cherrypy/lib/httputil.py delete mode 100644 lib/cherrypy/lib/jsontools.py delete mode 100644 lib/cherrypy/lib/locking.py delete mode 100644 lib/cherrypy/lib/profiler.py delete mode 100644 lib/cherrypy/lib/reprconf.py delete mode 100644 lib/cherrypy/lib/sessions.py delete mode 100644 lib/cherrypy/lib/static.py delete mode 100644 lib/cherrypy/lib/xmlrpcutil.py delete mode 100644 lib/cherrypy/process/__init__.py delete mode 100644 lib/cherrypy/process/plugins.py delete mode 100644 lib/cherrypy/process/servers.py delete mode 100644 lib/cherrypy/process/win32.py delete mode 100644 lib/cherrypy/process/wspbus.py delete mode 100644 lib/cherrypy/scaffold/__init__.py delete mode 100644 lib/cherrypy/scaffold/apache-fcgi.conf delete mode 100644 lib/cherrypy/scaffold/example.conf delete mode 100644 lib/cherrypy/scaffold/site.conf delete mode 100644 lib/cherrypy/scaffold/static/made_with_cherrypy_small.png delete mode 100644 lib/cherrypy/test/__init__.py delete mode 100644 lib/cherrypy/test/_test_decorators.py delete mode 100644 lib/cherrypy/test/_test_states_demo.py delete mode 100644 lib/cherrypy/test/benchmark.py delete mode 100644 lib/cherrypy/test/checkerdemo.py delete mode 100644 lib/cherrypy/test/fastcgi.conf delete mode 100644 lib/cherrypy/test/fcgi.conf delete mode 100644 lib/cherrypy/test/helper.py delete mode 100644 lib/cherrypy/test/logtest.py delete mode 100644 lib/cherrypy/test/modfastcgi.py delete mode 100644 lib/cherrypy/test/modfcgid.py delete mode 100644 lib/cherrypy/test/modpy.py delete mode 100644 lib/cherrypy/test/modwsgi.py delete mode 100644 lib/cherrypy/test/sessiondemo.py delete mode 100644 lib/cherrypy/test/static/404.html delete mode 100644 lib/cherrypy/test/static/dirback.jpg delete mode 100644 lib/cherrypy/test/static/index.html delete mode 100644 lib/cherrypy/test/style.css delete mode 100644 lib/cherrypy/test/test.pem delete mode 100644 lib/cherrypy/test/test_auth_basic.py delete mode 100644 lib/cherrypy/test/test_auth_digest.py delete mode 100644 lib/cherrypy/test/test_bus.py delete mode 100644 lib/cherrypy/test/test_caching.py delete mode 100644 lib/cherrypy/test/test_compat.py delete mode 100644 lib/cherrypy/test/test_config.py delete mode 100644 lib/cherrypy/test/test_config_server.py delete mode 100644 lib/cherrypy/test/test_conn.py delete mode 100644 lib/cherrypy/test/test_core.py delete mode 100644 lib/cherrypy/test/test_dynamicobjectmapping.py delete mode 100644 lib/cherrypy/test/test_encoding.py delete mode 100644 lib/cherrypy/test/test_etags.py delete mode 100644 lib/cherrypy/test/test_http.py delete mode 100644 lib/cherrypy/test/test_httputil.py delete mode 100644 lib/cherrypy/test/test_iterator.py delete mode 100644 lib/cherrypy/test/test_json.py delete mode 100644 lib/cherrypy/test/test_logging.py delete mode 100644 lib/cherrypy/test/test_mime.py delete mode 100644 lib/cherrypy/test/test_misc_tools.py delete mode 100644 lib/cherrypy/test/test_native.py delete mode 100644 lib/cherrypy/test/test_objectmapping.py delete mode 100644 lib/cherrypy/test/test_params.py delete mode 100644 lib/cherrypy/test/test_plugins.py delete mode 100644 lib/cherrypy/test/test_proxy.py delete mode 100644 lib/cherrypy/test/test_refleaks.py delete mode 100644 lib/cherrypy/test/test_request_obj.py delete mode 100644 lib/cherrypy/test/test_routes.py delete mode 100644 lib/cherrypy/test/test_session.py delete mode 100644 lib/cherrypy/test/test_sessionauthenticate.py delete mode 100644 lib/cherrypy/test/test_states.py delete mode 100644 lib/cherrypy/test/test_static.py delete mode 100644 lib/cherrypy/test/test_tools.py delete mode 100644 lib/cherrypy/test/test_tutorials.py delete mode 100644 lib/cherrypy/test/test_virtualhost.py delete mode 100644 lib/cherrypy/test/test_wsgi_ns.py delete mode 100644 lib/cherrypy/test/test_wsgi_unix_socket.py delete mode 100644 lib/cherrypy/test/test_wsgi_vhost.py delete mode 100644 lib/cherrypy/test/test_wsgiapps.py delete mode 100644 lib/cherrypy/test/test_xmlrpc.py delete mode 100644 lib/cherrypy/test/webtest.py delete mode 100644 lib/cherrypy/tutorial/README.rst delete mode 100644 lib/cherrypy/tutorial/__init__.py delete mode 100644 lib/cherrypy/tutorial/custom_error.html delete mode 100644 lib/cherrypy/tutorial/pdf_file.pdf delete mode 100644 lib/cherrypy/tutorial/tut01_helloworld.py delete mode 100644 lib/cherrypy/tutorial/tut02_expose_methods.py delete mode 100644 lib/cherrypy/tutorial/tut03_get_and_post.py delete mode 100644 lib/cherrypy/tutorial/tut04_complex_site.py delete mode 100644 lib/cherrypy/tutorial/tut05_derived_objects.py delete mode 100644 lib/cherrypy/tutorial/tut06_default_method.py delete mode 100644 lib/cherrypy/tutorial/tut07_sessions.py delete mode 100644 lib/cherrypy/tutorial/tut08_generators_and_yield.py delete mode 100644 lib/cherrypy/tutorial/tut09_files.py delete mode 100644 lib/cherrypy/tutorial/tut10_http_errors.py delete mode 100644 lib/cherrypy/tutorial/tutorial.conf delete mode 100644 lib/cloudinary/__init__.py delete mode 100644 lib/cloudinary/api.py delete mode 100644 lib/cloudinary/auth_token.py delete mode 100644 lib/cloudinary/cache/__init__.py delete mode 100644 lib/cloudinary/cache/adapter/__init__.py delete mode 100644 lib/cloudinary/cache/adapter/cache_adapter.py delete mode 100644 lib/cloudinary/cache/adapter/key_value_cache_adapter.py delete mode 100644 lib/cloudinary/cache/responsive_breakpoints_cache.py delete mode 100644 lib/cloudinary/cache/storage/__init__.py delete mode 100644 lib/cloudinary/cache/storage/file_system_key_value_storage.py delete mode 100644 lib/cloudinary/cache/storage/key_value_storage.py delete mode 100644 lib/cloudinary/compat.py delete mode 100644 lib/cloudinary/exceptions.py delete mode 100644 lib/cloudinary/forms.py delete mode 100644 lib/cloudinary/http_client.py delete mode 100644 lib/cloudinary/models.py delete mode 100644 lib/cloudinary/poster/__init__.py delete mode 100644 lib/cloudinary/poster/encode.py delete mode 100644 lib/cloudinary/poster/streaminghttp.py delete mode 100644 lib/cloudinary/search.py delete mode 100644 lib/cloudinary/static/html/cloudinary_cors.html delete mode 100644 lib/cloudinary/static/js/canvas-to-blob.min.js delete mode 100644 lib/cloudinary/static/js/jquery.cloudinary.js delete mode 100644 lib/cloudinary/static/js/jquery.fileupload-image.js delete mode 100644 lib/cloudinary/static/js/jquery.fileupload-process.js delete mode 100644 lib/cloudinary/static/js/jquery.fileupload-validate.js delete mode 100644 lib/cloudinary/static/js/jquery.fileupload.js delete mode 100644 lib/cloudinary/static/js/jquery.iframe-transport.js delete mode 100644 lib/cloudinary/static/js/jquery.ui.widget.js delete mode 100644 lib/cloudinary/static/js/load-image.all.min.js delete mode 100644 lib/cloudinary/templates/cloudinary_direct_upload.html delete mode 100644 lib/cloudinary/templates/cloudinary_includes.html delete mode 100644 lib/cloudinary/templates/cloudinary_js_config.html delete mode 100644 lib/cloudinary/templatetags/__init__.py delete mode 100644 lib/cloudinary/templatetags/cloudinary.py delete mode 100644 lib/cloudinary/uploader.py delete mode 100644 lib/cloudinary/utils.py delete mode 100644 lib/concurrent/LICENSE delete mode 100644 lib/concurrent/__init__.py delete mode 100644 lib/concurrent/futures/__init__.py delete mode 100644 lib/concurrent/futures/_base.py delete mode 100644 lib/concurrent/futures/_compat.py delete mode 100644 lib/concurrent/futures/process.py delete mode 100644 lib/concurrent/futures/thread.py delete mode 100644 lib/configobj/__init__.py delete mode 100644 lib/configobj/_version.py delete mode 100644 lib/configobj/validate.py delete mode 100644 lib/contextlib2.py delete mode 100644 lib/dateutil/__init__.py delete mode 100644 lib/dateutil/easter.py delete mode 100644 lib/dateutil/parser.py delete mode 100644 lib/dateutil/relativedelta.py delete mode 100644 lib/dateutil/rrule.py delete mode 100644 lib/dateutil/tz.py delete mode 100644 lib/dateutil/tzwin.py delete mode 100644 lib/distro.py delete mode 100644 lib/dns/__init__.py delete mode 100644 lib/dns/_compat.py delete mode 100644 lib/dns/dnssec.py delete mode 100644 lib/dns/e164.py delete mode 100644 lib/dns/edns.py delete mode 100644 lib/dns/entropy.py delete mode 100644 lib/dns/exception.py delete mode 100644 lib/dns/flags.py delete mode 100644 lib/dns/grange.py delete mode 100644 lib/dns/hash.py delete mode 100644 lib/dns/inet.py delete mode 100644 lib/dns/ipv4.py delete mode 100644 lib/dns/ipv6.py delete mode 100644 lib/dns/message.py delete mode 100644 lib/dns/name.py delete mode 100644 lib/dns/namedict.py delete mode 100644 lib/dns/node.py delete mode 100644 lib/dns/opcode.py delete mode 100644 lib/dns/query.py delete mode 100644 lib/dns/rcode.py delete mode 100644 lib/dns/rdata.py delete mode 100644 lib/dns/rdataclass.py delete mode 100644 lib/dns/rdataset.py delete mode 100644 lib/dns/rdatatype.py delete mode 100644 lib/dns/rdtypes/ANY/AFSDB.py delete mode 100644 lib/dns/rdtypes/ANY/CAA.py delete mode 100644 lib/dns/rdtypes/ANY/CDNSKEY.py delete mode 100644 lib/dns/rdtypes/ANY/CDS.py delete mode 100644 lib/dns/rdtypes/ANY/CERT.py delete mode 100644 lib/dns/rdtypes/ANY/CNAME.py delete mode 100644 lib/dns/rdtypes/ANY/CSYNC.py delete mode 100644 lib/dns/rdtypes/ANY/DLV.py delete mode 100644 lib/dns/rdtypes/ANY/DNAME.py delete mode 100644 lib/dns/rdtypes/ANY/DNSKEY.py delete mode 100644 lib/dns/rdtypes/ANY/DS.py delete mode 100644 lib/dns/rdtypes/ANY/EUI48.py delete mode 100644 lib/dns/rdtypes/ANY/EUI64.py delete mode 100644 lib/dns/rdtypes/ANY/GPOS.py delete mode 100644 lib/dns/rdtypes/ANY/HINFO.py delete mode 100644 lib/dns/rdtypes/ANY/HIP.py delete mode 100644 lib/dns/rdtypes/ANY/ISDN.py delete mode 100644 lib/dns/rdtypes/ANY/LOC.py delete mode 100644 lib/dns/rdtypes/ANY/MX.py delete mode 100644 lib/dns/rdtypes/ANY/NS.py delete mode 100644 lib/dns/rdtypes/ANY/NSEC.py delete mode 100644 lib/dns/rdtypes/ANY/NSEC3.py delete mode 100644 lib/dns/rdtypes/ANY/NSEC3PARAM.py delete mode 100644 lib/dns/rdtypes/ANY/PTR.py delete mode 100644 lib/dns/rdtypes/ANY/RP.py delete mode 100644 lib/dns/rdtypes/ANY/RRSIG.py delete mode 100644 lib/dns/rdtypes/ANY/RT.py delete mode 100644 lib/dns/rdtypes/ANY/SOA.py delete mode 100644 lib/dns/rdtypes/ANY/SPF.py delete mode 100644 lib/dns/rdtypes/ANY/SSHFP.py delete mode 100644 lib/dns/rdtypes/ANY/TLSA.py delete mode 100644 lib/dns/rdtypes/ANY/TXT.py delete mode 100644 lib/dns/rdtypes/ANY/URI.py delete mode 100644 lib/dns/rdtypes/ANY/X25.py delete mode 100644 lib/dns/rdtypes/ANY/__init__.py delete mode 100644 lib/dns/rdtypes/IN/A.py delete mode 100644 lib/dns/rdtypes/IN/AAAA.py delete mode 100644 lib/dns/rdtypes/IN/APL.py delete mode 100644 lib/dns/rdtypes/IN/DHCID.py delete mode 100644 lib/dns/rdtypes/IN/IPSECKEY.py delete mode 100644 lib/dns/rdtypes/IN/KX.py delete mode 100644 lib/dns/rdtypes/IN/NAPTR.py delete mode 100644 lib/dns/rdtypes/IN/NSAP.py delete mode 100644 lib/dns/rdtypes/IN/NSAP_PTR.py delete mode 100644 lib/dns/rdtypes/IN/PX.py delete mode 100644 lib/dns/rdtypes/IN/SRV.py delete mode 100644 lib/dns/rdtypes/IN/WKS.py delete mode 100644 lib/dns/rdtypes/IN/__init__.py delete mode 100644 lib/dns/rdtypes/__init__.py delete mode 100644 lib/dns/rdtypes/dnskeybase.py delete mode 100644 lib/dns/rdtypes/dsbase.py delete mode 100644 lib/dns/rdtypes/euibase.py delete mode 100644 lib/dns/rdtypes/mxbase.py delete mode 100644 lib/dns/rdtypes/nsbase.py delete mode 100644 lib/dns/rdtypes/txtbase.py delete mode 100644 lib/dns/renderer.py delete mode 100644 lib/dns/resolver.py delete mode 100644 lib/dns/reversename.py delete mode 100644 lib/dns/rrset.py delete mode 100644 lib/dns/set.py delete mode 100644 lib/dns/tokenizer.py delete mode 100644 lib/dns/tsig.py delete mode 100644 lib/dns/tsigkeyring.py delete mode 100644 lib/dns/ttl.py delete mode 100644 lib/dns/update.py delete mode 100644 lib/dns/version.py delete mode 100644 lib/dns/wiredata.py delete mode 100644 lib/dns/zone.py delete mode 100644 lib/facebook/__init__.py delete mode 100644 lib/facebook/version.py delete mode 100644 lib/funcsigs/__init__.py delete mode 100644 lib/funcsigs/version.py delete mode 100644 lib/future/__init__.py delete mode 100644 lib/future/backports/__init__.py delete mode 100644 lib/future/backports/_markupbase.py delete mode 100644 lib/future/backports/datetime.py delete mode 100644 lib/future/backports/email/__init__.py delete mode 100644 lib/future/backports/email/_encoded_words.py delete mode 100644 lib/future/backports/email/_header_value_parser.py delete mode 100644 lib/future/backports/email/_parseaddr.py delete mode 100644 lib/future/backports/email/_policybase.py delete mode 100644 lib/future/backports/email/base64mime.py delete mode 100644 lib/future/backports/email/charset.py delete mode 100644 lib/future/backports/email/encoders.py delete mode 100644 lib/future/backports/email/errors.py delete mode 100644 lib/future/backports/email/feedparser.py delete mode 100644 lib/future/backports/email/generator.py delete mode 100644 lib/future/backports/email/header.py delete mode 100644 lib/future/backports/email/headerregistry.py delete mode 100644 lib/future/backports/email/iterators.py delete mode 100644 lib/future/backports/email/message.py delete mode 100644 lib/future/backports/email/mime/__init__.py delete mode 100644 lib/future/backports/email/mime/application.py delete mode 100644 lib/future/backports/email/mime/audio.py delete mode 100644 lib/future/backports/email/mime/base.py delete mode 100644 lib/future/backports/email/mime/image.py delete mode 100644 lib/future/backports/email/mime/message.py delete mode 100644 lib/future/backports/email/mime/multipart.py delete mode 100644 lib/future/backports/email/mime/nonmultipart.py delete mode 100644 lib/future/backports/email/mime/text.py delete mode 100644 lib/future/backports/email/parser.py delete mode 100644 lib/future/backports/email/policy.py delete mode 100644 lib/future/backports/email/quoprimime.py delete mode 100644 lib/future/backports/email/utils.py delete mode 100644 lib/future/backports/html/__init__.py delete mode 100644 lib/future/backports/html/entities.py delete mode 100644 lib/future/backports/html/parser.py delete mode 100644 lib/future/backports/http/__init__.py delete mode 100644 lib/future/backports/http/client.py delete mode 100644 lib/future/backports/http/cookiejar.py delete mode 100644 lib/future/backports/http/cookies.py delete mode 100644 lib/future/backports/http/server.py delete mode 100644 lib/future/backports/misc.py delete mode 100644 lib/future/backports/socket.py delete mode 100644 lib/future/backports/socketserver.py delete mode 100644 lib/future/backports/test/__init__.py delete mode 100644 lib/future/backports/test/badcert.pem delete mode 100644 lib/future/backports/test/badkey.pem delete mode 100644 lib/future/backports/test/dh512.pem delete mode 100644 lib/future/backports/test/https_svn_python_org_root.pem delete mode 100644 lib/future/backports/test/keycert.passwd.pem delete mode 100644 lib/future/backports/test/keycert.pem delete mode 100644 lib/future/backports/test/keycert2.pem delete mode 100644 lib/future/backports/test/nokia.pem delete mode 100644 lib/future/backports/test/nullbytecert.pem delete mode 100644 lib/future/backports/test/nullcert.pem delete mode 100644 lib/future/backports/test/pystone.py delete mode 100644 lib/future/backports/test/sha256.pem delete mode 100644 lib/future/backports/test/ssl_cert.pem delete mode 100644 lib/future/backports/test/ssl_key.passwd.pem delete mode 100644 lib/future/backports/test/ssl_key.pem delete mode 100644 lib/future/backports/test/ssl_servers.py delete mode 100644 lib/future/backports/test/support.py delete mode 100644 lib/future/backports/total_ordering.py delete mode 100644 lib/future/backports/urllib/__init__.py delete mode 100644 lib/future/backports/urllib/error.py delete mode 100644 lib/future/backports/urllib/parse.py delete mode 100644 lib/future/backports/urllib/request.py delete mode 100644 lib/future/backports/urllib/response.py delete mode 100644 lib/future/backports/urllib/robotparser.py delete mode 100644 lib/future/backports/xmlrpc/__init__.py delete mode 100644 lib/future/backports/xmlrpc/client.py delete mode 100644 lib/future/backports/xmlrpc/server.py delete mode 100644 lib/future/builtins/__init__.py delete mode 100644 lib/future/builtins/disabled.py delete mode 100644 lib/future/builtins/iterators.py delete mode 100644 lib/future/builtins/misc.py delete mode 100644 lib/future/builtins/new_min_max.py delete mode 100644 lib/future/builtins/newnext.py delete mode 100644 lib/future/builtins/newround.py delete mode 100644 lib/future/builtins/newsuper.py delete mode 100644 lib/future/moves/__init__.py delete mode 100644 lib/future/moves/_dummy_thread.py delete mode 100644 lib/future/moves/_markupbase.py delete mode 100644 lib/future/moves/_thread.py delete mode 100644 lib/future/moves/builtins.py delete mode 100644 lib/future/moves/collections.py delete mode 100644 lib/future/moves/configparser.py delete mode 100644 lib/future/moves/copyreg.py delete mode 100644 lib/future/moves/dbm/__init__.py delete mode 100644 lib/future/moves/dbm/dumb.py delete mode 100644 lib/future/moves/dbm/gnu.py delete mode 100644 lib/future/moves/dbm/ndbm.py delete mode 100644 lib/future/moves/html/__init__.py delete mode 100644 lib/future/moves/html/entities.py delete mode 100644 lib/future/moves/html/parser.py delete mode 100644 lib/future/moves/http/__init__.py delete mode 100644 lib/future/moves/http/client.py delete mode 100644 lib/future/moves/http/cookiejar.py delete mode 100644 lib/future/moves/http/cookies.py delete mode 100644 lib/future/moves/http/server.py delete mode 100644 lib/future/moves/itertools.py delete mode 100644 lib/future/moves/pickle.py delete mode 100644 lib/future/moves/queue.py delete mode 100644 lib/future/moves/reprlib.py delete mode 100644 lib/future/moves/socketserver.py delete mode 100644 lib/future/moves/subprocess.py delete mode 100644 lib/future/moves/sys.py delete mode 100644 lib/future/moves/test/__init__.py delete mode 100644 lib/future/moves/test/support.py delete mode 100644 lib/future/moves/tkinter/__init__.py delete mode 100644 lib/future/moves/tkinter/colorchooser.py delete mode 100644 lib/future/moves/tkinter/commondialog.py delete mode 100644 lib/future/moves/tkinter/constants.py delete mode 100644 lib/future/moves/tkinter/dialog.py delete mode 100644 lib/future/moves/tkinter/dnd.py delete mode 100644 lib/future/moves/tkinter/filedialog.py delete mode 100644 lib/future/moves/tkinter/font.py delete mode 100644 lib/future/moves/tkinter/messagebox.py delete mode 100644 lib/future/moves/tkinter/scrolledtext.py delete mode 100644 lib/future/moves/tkinter/simpledialog.py delete mode 100644 lib/future/moves/tkinter/tix.py delete mode 100644 lib/future/moves/tkinter/ttk.py delete mode 100644 lib/future/moves/urllib/__init__.py delete mode 100644 lib/future/moves/urllib/error.py delete mode 100644 lib/future/moves/urllib/parse.py delete mode 100644 lib/future/moves/urllib/request.py delete mode 100644 lib/future/moves/urllib/response.py delete mode 100644 lib/future/moves/urllib/robotparser.py delete mode 100644 lib/future/moves/winreg.py delete mode 100644 lib/future/moves/xmlrpc/__init__.py delete mode 100644 lib/future/moves/xmlrpc/client.py delete mode 100644 lib/future/moves/xmlrpc/server.py delete mode 100644 lib/future/standard_library/__init__.py delete mode 100644 lib/future/tests/__init__.py delete mode 100644 lib/future/tests/base.py delete mode 100644 lib/future/types/__init__.py delete mode 100644 lib/future/types/newbytes.py delete mode 100644 lib/future/types/newdict.py delete mode 100644 lib/future/types/newint.py delete mode 100644 lib/future/types/newlist.py delete mode 100644 lib/future/types/newmemoryview.py delete mode 100644 lib/future/types/newobject.py delete mode 100644 lib/future/types/newopen.py delete mode 100644 lib/future/types/newrange.py delete mode 100644 lib/future/types/newstr.py delete mode 100644 lib/future/utils/__init__.py delete mode 100644 lib/future/utils/surrogateescape.py delete mode 100644 lib/future_fstrings.py delete mode 100644 lib/gntp/LICENSE delete mode 100644 lib/gntp/__init__.py delete mode 100644 lib/gntp/cli.py delete mode 100644 lib/gntp/config.py delete mode 100644 lib/gntp/core.py delete mode 100644 lib/gntp/errors.py delete mode 100644 lib/gntp/notifier.py delete mode 100644 lib/gntp/shim.py delete mode 100644 lib/gntp/version.py delete mode 100644 lib/hashing_passwords.py delete mode 100644 lib/html5lib/__init__.py delete mode 100644 lib/html5lib/constants.py delete mode 100644 lib/html5lib/filters/__init__.py delete mode 100644 lib/html5lib/filters/_base.py delete mode 100644 lib/html5lib/filters/alphabeticalattributes.py delete mode 100644 lib/html5lib/filters/inject_meta_charset.py delete mode 100644 lib/html5lib/filters/lint.py delete mode 100644 lib/html5lib/filters/optionaltags.py delete mode 100644 lib/html5lib/filters/sanitizer.py delete mode 100644 lib/html5lib/filters/whitespace.py delete mode 100644 lib/html5lib/html5parser.py delete mode 100644 lib/html5lib/ihatexml.py delete mode 100644 lib/html5lib/inputstream.py delete mode 100644 lib/html5lib/sanitizer.py delete mode 100644 lib/html5lib/serializer/__init__.py delete mode 100644 lib/html5lib/serializer/htmlserializer.py delete mode 100644 lib/html5lib/tokenizer.py delete mode 100644 lib/html5lib/treeadapters/__init__.py delete mode 100644 lib/html5lib/treeadapters/sax.py delete mode 100644 lib/html5lib/treebuilders/__init__.py delete mode 100644 lib/html5lib/treebuilders/_base.py delete mode 100644 lib/html5lib/treebuilders/dom.py delete mode 100644 lib/html5lib/treebuilders/etree.py delete mode 100644 lib/html5lib/treebuilders/etree_lxml.py delete mode 100644 lib/html5lib/treewalkers/__init__.py delete mode 100644 lib/html5lib/treewalkers/_base.py delete mode 100644 lib/html5lib/treewalkers/dom.py delete mode 100644 lib/html5lib/treewalkers/etree.py delete mode 100644 lib/html5lib/treewalkers/genshistream.py delete mode 100644 lib/html5lib/treewalkers/lxmletree.py delete mode 100644 lib/html5lib/treewalkers/pulldom.py delete mode 100644 lib/html5lib/trie/__init__.py delete mode 100644 lib/html5lib/trie/_base.py delete mode 100644 lib/html5lib/trie/datrie.py delete mode 100644 lib/html5lib/trie/py.py delete mode 100644 lib/html5lib/utils.py delete mode 100644 lib/httpagentparser/__init__.py delete mode 100644 lib/httpagentparser/more.py delete mode 100644 lib/idna/__init__.py delete mode 100644 lib/idna/codec.py delete mode 100644 lib/idna/compat.py delete mode 100644 lib/idna/core.py delete mode 100644 lib/idna/idnadata.py delete mode 100644 lib/idna/intranges.py delete mode 100644 lib/idna/package_data.py delete mode 100644 lib/idna/uts46data.py delete mode 100644 lib/ipaddr.py delete mode 100644 lib/ipaddress.py delete mode 100644 lib/ipwhois/__init__.py delete mode 100644 lib/ipwhois/asn.py delete mode 100644 lib/ipwhois/data/iso_3166-1.csv delete mode 100644 lib/ipwhois/data/iso_3166-1_list_en.xml delete mode 100644 lib/ipwhois/exceptions.py delete mode 100644 lib/ipwhois/experimental.py delete mode 100644 lib/ipwhois/hr.py delete mode 100644 lib/ipwhois/ipwhois.py delete mode 100644 lib/ipwhois/net.py delete mode 100644 lib/ipwhois/nir.py delete mode 100644 lib/ipwhois/rdap.py delete mode 100644 lib/ipwhois/scripts/ipwhois_cli.py delete mode 100644 lib/ipwhois/scripts/ipwhois_utils_cli.py delete mode 100644 lib/ipwhois/utils.py delete mode 100644 lib/ipwhois/whois.py delete mode 100644 lib/jaraco/__init__.py delete mode 100644 lib/jaraco/functools.py delete mode 100644 lib/jwt/__init__.py delete mode 100644 lib/jwt/__main__.py delete mode 100644 lib/jwt/algorithms.py delete mode 100644 lib/jwt/api_jws.py delete mode 100644 lib/jwt/api_jwt.py delete mode 100644 lib/jwt/compat.py delete mode 100644 lib/jwt/contrib/__init__.py delete mode 100644 lib/jwt/contrib/algorithms/__init__.py delete mode 100644 lib/jwt/contrib/algorithms/py_ecdsa.py delete mode 100644 lib/jwt/contrib/algorithms/pycrypto.py delete mode 100644 lib/jwt/exceptions.py delete mode 100644 lib/jwt/utils.py delete mode 100644 lib/libfuturize/__init__.py delete mode 100644 lib/libfuturize/fixer_util.py delete mode 100644 lib/libfuturize/fixes/__init__.py delete mode 100644 lib/libfuturize/fixes/fix_UserDict.py delete mode 100644 lib/libfuturize/fixes/fix_absolute_import.py delete mode 100644 lib/libfuturize/fixes/fix_add__future__imports_except_unicode_literals.py delete mode 100644 lib/libfuturize/fixes/fix_basestring.py delete mode 100644 lib/libfuturize/fixes/fix_bytes.py delete mode 100644 lib/libfuturize/fixes/fix_cmp.py delete mode 100644 lib/libfuturize/fixes/fix_division.py delete mode 100644 lib/libfuturize/fixes/fix_division_safe.py delete mode 100644 lib/libfuturize/fixes/fix_execfile.py delete mode 100644 lib/libfuturize/fixes/fix_future_builtins.py delete mode 100644 lib/libfuturize/fixes/fix_future_standard_library.py delete mode 100644 lib/libfuturize/fixes/fix_future_standard_library_urllib.py delete mode 100644 lib/libfuturize/fixes/fix_input.py delete mode 100644 lib/libfuturize/fixes/fix_metaclass.py delete mode 100644 lib/libfuturize/fixes/fix_next_call.py delete mode 100644 lib/libfuturize/fixes/fix_object.py delete mode 100644 lib/libfuturize/fixes/fix_oldstr_wrap.py delete mode 100644 lib/libfuturize/fixes/fix_order___future__imports.py delete mode 100644 lib/libfuturize/fixes/fix_print.py delete mode 100644 lib/libfuturize/fixes/fix_print_with_import.py delete mode 100644 lib/libfuturize/fixes/fix_raise.py delete mode 100644 lib/libfuturize/fixes/fix_remove_old__future__imports.py delete mode 100644 lib/libfuturize/fixes/fix_unicode_keep_u.py delete mode 100644 lib/libfuturize/fixes/fix_unicode_literals_import.py delete mode 100644 lib/libfuturize/fixes/fix_xrange_with_import.py delete mode 100644 lib/libfuturize/main.py delete mode 100644 lib/libpasteurize/__init__.py delete mode 100644 lib/libpasteurize/fixes/__init__.py delete mode 100644 lib/libpasteurize/fixes/feature_base.py delete mode 100644 lib/libpasteurize/fixes/fix_add_all__future__imports.py delete mode 100644 lib/libpasteurize/fixes/fix_add_all_future_builtins.py delete mode 100644 lib/libpasteurize/fixes/fix_add_future_standard_library_import.py delete mode 100644 lib/libpasteurize/fixes/fix_annotations.py delete mode 100644 lib/libpasteurize/fixes/fix_division.py delete mode 100644 lib/libpasteurize/fixes/fix_features.py delete mode 100644 lib/libpasteurize/fixes/fix_fullargspec.py delete mode 100644 lib/libpasteurize/fixes/fix_future_builtins.py delete mode 100644 lib/libpasteurize/fixes/fix_getcwd.py delete mode 100644 lib/libpasteurize/fixes/fix_imports.py delete mode 100644 lib/libpasteurize/fixes/fix_imports2.py delete mode 100644 lib/libpasteurize/fixes/fix_kwargs.py delete mode 100644 lib/libpasteurize/fixes/fix_memoryview.py delete mode 100644 lib/libpasteurize/fixes/fix_metaclass.py delete mode 100644 lib/libpasteurize/fixes/fix_newstyle.py delete mode 100644 lib/libpasteurize/fixes/fix_next.py delete mode 100644 lib/libpasteurize/fixes/fix_printfunction.py delete mode 100644 lib/libpasteurize/fixes/fix_raise.py delete mode 100644 lib/libpasteurize/fixes/fix_raise_.py delete mode 100644 lib/libpasteurize/fixes/fix_throw.py delete mode 100644 lib/libpasteurize/fixes/fix_unpacking.py delete mode 100644 lib/libpasteurize/main.py delete mode 100644 lib/logutils/__init__.py delete mode 100644 lib/logutils/adapter.py delete mode 100644 lib/logutils/colorize.py delete mode 100644 lib/logutils/dictconfig.py delete mode 100644 lib/logutils/http.py delete mode 100644 lib/logutils/queue.py delete mode 100644 lib/logutils/redis.py delete mode 100644 lib/logutils/testing.py delete mode 100644 lib/mako/__init__.py delete mode 100644 lib/mako/_ast_util.py delete mode 100644 lib/mako/ast.py delete mode 100644 lib/mako/cache.py delete mode 100755 lib/mako/cmd.py delete mode 100644 lib/mako/codegen.py delete mode 100644 lib/mako/compat.py delete mode 100644 lib/mako/exceptions.py delete mode 100644 lib/mako/ext/__init__.py delete mode 100644 lib/mako/ext/autohandler.py delete mode 100644 lib/mako/ext/babelplugin.py delete mode 100644 lib/mako/ext/beaker_cache.py delete mode 100644 lib/mako/ext/extract.py delete mode 100644 lib/mako/ext/linguaplugin.py delete mode 100644 lib/mako/ext/preprocessors.py delete mode 100644 lib/mako/ext/pygmentplugin.py delete mode 100644 lib/mako/ext/turbogears.py delete mode 100644 lib/mako/filters.py delete mode 100644 lib/mako/lexer.py delete mode 100644 lib/mako/lookup.py delete mode 100644 lib/mako/parsetree.py delete mode 100644 lib/mako/pygen.py delete mode 100644 lib/mako/pyparser.py delete mode 100644 lib/mako/runtime.py delete mode 100644 lib/mako/template.py delete mode 100644 lib/mako/util.py delete mode 100644 lib/more_itertools/__init__.py delete mode 100644 lib/more_itertools/more.py delete mode 100644 lib/more_itertools/recipes.py delete mode 100644 lib/more_itertools/tests/__init__.py delete mode 100644 lib/more_itertools/tests/test_more.py delete mode 100644 lib/more_itertools/tests/test_recipes.py delete mode 100644 lib/musicbrainzngs/__init__.py delete mode 100644 lib/musicbrainzngs/caa.py delete mode 100644 lib/musicbrainzngs/compat.py delete mode 100644 lib/musicbrainzngs/mbxml.py delete mode 100644 lib/musicbrainzngs/musicbrainz.py delete mode 100644 lib/musicbrainzngs/util.py delete mode 100644 lib/oauthlib/__init__.py delete mode 100644 lib/oauthlib/common.py delete mode 100644 lib/oauthlib/oauth1/__init__.py delete mode 100644 lib/oauthlib/oauth1/rfc5849/__init__.py delete mode 100644 lib/oauthlib/oauth1/rfc5849/endpoints/__init__.py delete mode 100644 lib/oauthlib/oauth1/rfc5849/endpoints/access_token.py delete mode 100644 lib/oauthlib/oauth1/rfc5849/endpoints/authorization.py delete mode 100644 lib/oauthlib/oauth1/rfc5849/endpoints/base.py delete mode 100644 lib/oauthlib/oauth1/rfc5849/endpoints/pre_configured.py delete mode 100644 lib/oauthlib/oauth1/rfc5849/endpoints/request_token.py delete mode 100644 lib/oauthlib/oauth1/rfc5849/endpoints/resource.py delete mode 100644 lib/oauthlib/oauth1/rfc5849/endpoints/signature_only.py delete mode 100644 lib/oauthlib/oauth1/rfc5849/errors.py delete mode 100644 lib/oauthlib/oauth1/rfc5849/parameters.py delete mode 100644 lib/oauthlib/oauth1/rfc5849/request_validator.py delete mode 100644 lib/oauthlib/oauth1/rfc5849/signature.py delete mode 100644 lib/oauthlib/oauth1/rfc5849/utils.py delete mode 100644 lib/oauthlib/oauth2/__init__.py delete mode 100644 lib/oauthlib/oauth2/rfc6749/__init__.py delete mode 100644 lib/oauthlib/oauth2/rfc6749/clients/__init__.py delete mode 100644 lib/oauthlib/oauth2/rfc6749/clients/backend_application.py delete mode 100644 lib/oauthlib/oauth2/rfc6749/clients/base.py delete mode 100644 lib/oauthlib/oauth2/rfc6749/clients/legacy_application.py delete mode 100644 lib/oauthlib/oauth2/rfc6749/clients/mobile_application.py delete mode 100644 lib/oauthlib/oauth2/rfc6749/clients/service_application.py delete mode 100644 lib/oauthlib/oauth2/rfc6749/clients/web_application.py delete mode 100644 lib/oauthlib/oauth2/rfc6749/endpoints/__init__.py delete mode 100644 lib/oauthlib/oauth2/rfc6749/endpoints/authorization.py delete mode 100644 lib/oauthlib/oauth2/rfc6749/endpoints/base.py delete mode 100644 lib/oauthlib/oauth2/rfc6749/endpoints/pre_configured.py delete mode 100644 lib/oauthlib/oauth2/rfc6749/endpoints/resource.py delete mode 100644 lib/oauthlib/oauth2/rfc6749/endpoints/revocation.py delete mode 100644 lib/oauthlib/oauth2/rfc6749/endpoints/token.py delete mode 100644 lib/oauthlib/oauth2/rfc6749/errors.py delete mode 100644 lib/oauthlib/oauth2/rfc6749/grant_types/__init__.py delete mode 100644 lib/oauthlib/oauth2/rfc6749/grant_types/authorization_code.py delete mode 100644 lib/oauthlib/oauth2/rfc6749/grant_types/base.py delete mode 100644 lib/oauthlib/oauth2/rfc6749/grant_types/client_credentials.py delete mode 100644 lib/oauthlib/oauth2/rfc6749/grant_types/implicit.py delete mode 100644 lib/oauthlib/oauth2/rfc6749/grant_types/refresh_token.py delete mode 100644 lib/oauthlib/oauth2/rfc6749/grant_types/resource_owner_password_credentials.py delete mode 100644 lib/oauthlib/oauth2/rfc6749/parameters.py delete mode 100644 lib/oauthlib/oauth2/rfc6749/request_validator.py delete mode 100644 lib/oauthlib/oauth2/rfc6749/tokens.py delete mode 100644 lib/oauthlib/oauth2/rfc6749/utils.py delete mode 100644 lib/oauthlib/signals.py delete mode 100644 lib/oauthlib/uri_validate.py delete mode 100755 lib/osxnotify/__init__.py delete mode 100644 lib/osxnotify/appIcon.icns delete mode 100644 lib/osxnotify/registerapp.py delete mode 100644 lib/paho/__init__.py delete mode 100644 lib/paho/mqtt/__init__.py delete mode 100644 lib/paho/mqtt/client.py delete mode 100644 lib/paho/mqtt/publish.py delete mode 100644 lib/past/__init__.py delete mode 100644 lib/past/builtins/__init__.py delete mode 100644 lib/past/builtins/misc.py delete mode 100644 lib/past/builtins/noniterators.py delete mode 100644 lib/past/translation/__init__.py delete mode 100644 lib/past/types/__init__.py delete mode 100644 lib/past/types/basestring.py delete mode 100644 lib/past/types/olddict.py delete mode 100644 lib/past/types/oldstr.py delete mode 100644 lib/past/utils/__init__.py delete mode 100644 lib/plexapi/__init__.py delete mode 100644 lib/plexapi/alert.py delete mode 100644 lib/plexapi/audio.py delete mode 100644 lib/plexapi/base.py delete mode 100644 lib/plexapi/client.py delete mode 100644 lib/plexapi/compat.py delete mode 100644 lib/plexapi/config.py delete mode 100644 lib/plexapi/exceptions.py delete mode 100644 lib/plexapi/gdm.py delete mode 100644 lib/plexapi/library.py delete mode 100644 lib/plexapi/media.py delete mode 100644 lib/plexapi/myplex.py delete mode 100644 lib/plexapi/photo.py delete mode 100644 lib/plexapi/playlist.py delete mode 100644 lib/plexapi/playqueue.py delete mode 100644 lib/plexapi/server.py delete mode 100644 lib/plexapi/settings.py delete mode 100644 lib/plexapi/sonos.py delete mode 100644 lib/plexapi/sync.py delete mode 100644 lib/plexapi/utils.py delete mode 100644 lib/plexapi/video.py delete mode 100644 lib/portend.py delete mode 100644 lib/profilehooks.py delete mode 100644 lib/pytz/__init__.py delete mode 100644 lib/pytz/exceptions.py delete mode 100644 lib/pytz/lazy.py delete mode 100644 lib/pytz/reference.py delete mode 100644 lib/pytz/tests/test_docs.py delete mode 100644 lib/pytz/tests/test_lazy.py delete mode 100644 lib/pytz/tests/test_tzinfo.py delete mode 100644 lib/pytz/tzfile.py delete mode 100644 lib/pytz/tzinfo.py delete mode 100644 lib/pytz/zoneinfo/Africa/Abidjan delete mode 100644 lib/pytz/zoneinfo/Africa/Accra delete mode 100644 lib/pytz/zoneinfo/Africa/Addis_Ababa delete mode 100644 lib/pytz/zoneinfo/Africa/Algiers delete mode 100644 lib/pytz/zoneinfo/Africa/Asmara delete mode 100644 lib/pytz/zoneinfo/Africa/Asmera delete mode 100644 lib/pytz/zoneinfo/Africa/Bamako delete mode 100644 lib/pytz/zoneinfo/Africa/Bangui delete mode 100644 lib/pytz/zoneinfo/Africa/Banjul delete mode 100644 lib/pytz/zoneinfo/Africa/Bissau delete mode 100644 lib/pytz/zoneinfo/Africa/Blantyre delete mode 100644 lib/pytz/zoneinfo/Africa/Brazzaville delete mode 100644 lib/pytz/zoneinfo/Africa/Bujumbura delete mode 100644 lib/pytz/zoneinfo/Africa/Cairo delete mode 100644 lib/pytz/zoneinfo/Africa/Casablanca delete mode 100644 lib/pytz/zoneinfo/Africa/Ceuta delete mode 100644 lib/pytz/zoneinfo/Africa/Conakry delete mode 100644 lib/pytz/zoneinfo/Africa/Dakar delete mode 100644 lib/pytz/zoneinfo/Africa/Dar_es_Salaam delete mode 100644 lib/pytz/zoneinfo/Africa/Djibouti delete mode 100644 lib/pytz/zoneinfo/Africa/Douala delete mode 100644 lib/pytz/zoneinfo/Africa/El_Aaiun delete mode 100644 lib/pytz/zoneinfo/Africa/Freetown delete mode 100644 lib/pytz/zoneinfo/Africa/Gaborone delete mode 100644 lib/pytz/zoneinfo/Africa/Harare delete mode 100644 lib/pytz/zoneinfo/Africa/Johannesburg delete mode 100644 lib/pytz/zoneinfo/Africa/Juba delete mode 100644 lib/pytz/zoneinfo/Africa/Kampala delete mode 100644 lib/pytz/zoneinfo/Africa/Khartoum delete mode 100644 lib/pytz/zoneinfo/Africa/Kigali delete mode 100644 lib/pytz/zoneinfo/Africa/Kinshasa delete mode 100644 lib/pytz/zoneinfo/Africa/Lagos delete mode 100644 lib/pytz/zoneinfo/Africa/Libreville delete mode 100644 lib/pytz/zoneinfo/Africa/Lome delete mode 100644 lib/pytz/zoneinfo/Africa/Luanda delete mode 100644 lib/pytz/zoneinfo/Africa/Lubumbashi delete mode 100644 lib/pytz/zoneinfo/Africa/Lusaka delete mode 100644 lib/pytz/zoneinfo/Africa/Malabo delete mode 100644 lib/pytz/zoneinfo/Africa/Maputo delete mode 100644 lib/pytz/zoneinfo/Africa/Maseru delete mode 100644 lib/pytz/zoneinfo/Africa/Mbabane delete mode 100644 lib/pytz/zoneinfo/Africa/Mogadishu delete mode 100644 lib/pytz/zoneinfo/Africa/Monrovia delete mode 100644 lib/pytz/zoneinfo/Africa/Nairobi delete mode 100644 lib/pytz/zoneinfo/Africa/Ndjamena delete mode 100644 lib/pytz/zoneinfo/Africa/Niamey delete mode 100644 lib/pytz/zoneinfo/Africa/Nouakchott delete mode 100644 lib/pytz/zoneinfo/Africa/Ouagadougou delete mode 100644 lib/pytz/zoneinfo/Africa/Porto-Novo delete mode 100644 lib/pytz/zoneinfo/Africa/Sao_Tome delete mode 100644 lib/pytz/zoneinfo/Africa/Timbuktu delete mode 100644 lib/pytz/zoneinfo/Africa/Tripoli delete mode 100644 lib/pytz/zoneinfo/Africa/Tunis delete mode 100644 lib/pytz/zoneinfo/Africa/Windhoek delete mode 100644 lib/pytz/zoneinfo/America/Adak delete mode 100644 lib/pytz/zoneinfo/America/Anchorage delete mode 100644 lib/pytz/zoneinfo/America/Anguilla delete mode 100644 lib/pytz/zoneinfo/America/Antigua delete mode 100644 lib/pytz/zoneinfo/America/Araguaina delete mode 100644 lib/pytz/zoneinfo/America/Argentina/Buenos_Aires delete mode 100644 lib/pytz/zoneinfo/America/Argentina/Catamarca delete mode 100644 lib/pytz/zoneinfo/America/Argentina/ComodRivadavia delete mode 100644 lib/pytz/zoneinfo/America/Argentina/Cordoba delete mode 100644 lib/pytz/zoneinfo/America/Argentina/Jujuy delete mode 100644 lib/pytz/zoneinfo/America/Argentina/La_Rioja delete mode 100644 lib/pytz/zoneinfo/America/Argentina/Mendoza delete mode 100644 lib/pytz/zoneinfo/America/Argentina/Rio_Gallegos delete mode 100644 lib/pytz/zoneinfo/America/Argentina/Salta delete mode 100644 lib/pytz/zoneinfo/America/Argentina/San_Juan delete mode 100644 lib/pytz/zoneinfo/America/Argentina/San_Luis delete mode 100644 lib/pytz/zoneinfo/America/Argentina/Tucuman delete mode 100644 lib/pytz/zoneinfo/America/Argentina/Ushuaia delete mode 100644 lib/pytz/zoneinfo/America/Aruba delete mode 100644 lib/pytz/zoneinfo/America/Asuncion delete mode 100644 lib/pytz/zoneinfo/America/Atikokan delete mode 100644 lib/pytz/zoneinfo/America/Atka delete mode 100644 lib/pytz/zoneinfo/America/Bahia delete mode 100644 lib/pytz/zoneinfo/America/Bahia_Banderas delete mode 100644 lib/pytz/zoneinfo/America/Barbados delete mode 100644 lib/pytz/zoneinfo/America/Belem delete mode 100644 lib/pytz/zoneinfo/America/Belize delete mode 100644 lib/pytz/zoneinfo/America/Blanc-Sablon delete mode 100644 lib/pytz/zoneinfo/America/Boa_Vista delete mode 100644 lib/pytz/zoneinfo/America/Bogota delete mode 100644 lib/pytz/zoneinfo/America/Boise delete mode 100644 lib/pytz/zoneinfo/America/Buenos_Aires delete mode 100644 lib/pytz/zoneinfo/America/Cambridge_Bay delete mode 100644 lib/pytz/zoneinfo/America/Campo_Grande delete mode 100644 lib/pytz/zoneinfo/America/Cancun delete mode 100644 lib/pytz/zoneinfo/America/Caracas delete mode 100644 lib/pytz/zoneinfo/America/Catamarca delete mode 100644 lib/pytz/zoneinfo/America/Cayenne delete mode 100644 lib/pytz/zoneinfo/America/Cayman delete mode 100644 lib/pytz/zoneinfo/America/Chicago delete mode 100644 lib/pytz/zoneinfo/America/Chihuahua delete mode 100644 lib/pytz/zoneinfo/America/Coral_Harbour delete mode 100644 lib/pytz/zoneinfo/America/Cordoba delete mode 100644 lib/pytz/zoneinfo/America/Costa_Rica delete mode 100644 lib/pytz/zoneinfo/America/Creston delete mode 100644 lib/pytz/zoneinfo/America/Cuiaba delete mode 100644 lib/pytz/zoneinfo/America/Curacao delete mode 100644 lib/pytz/zoneinfo/America/Danmarkshavn delete mode 100644 lib/pytz/zoneinfo/America/Dawson delete mode 100644 lib/pytz/zoneinfo/America/Dawson_Creek delete mode 100644 lib/pytz/zoneinfo/America/Denver delete mode 100644 lib/pytz/zoneinfo/America/Detroit delete mode 100644 lib/pytz/zoneinfo/America/Dominica delete mode 100644 lib/pytz/zoneinfo/America/Edmonton delete mode 100644 lib/pytz/zoneinfo/America/Eirunepe delete mode 100644 lib/pytz/zoneinfo/America/El_Salvador delete mode 100644 lib/pytz/zoneinfo/America/Ensenada delete mode 100644 lib/pytz/zoneinfo/America/Fort_Nelson delete mode 100644 lib/pytz/zoneinfo/America/Fort_Wayne delete mode 100644 lib/pytz/zoneinfo/America/Fortaleza delete mode 100644 lib/pytz/zoneinfo/America/Glace_Bay delete mode 100644 lib/pytz/zoneinfo/America/Godthab delete mode 100644 lib/pytz/zoneinfo/America/Goose_Bay delete mode 100644 lib/pytz/zoneinfo/America/Grand_Turk delete mode 100644 lib/pytz/zoneinfo/America/Grenada delete mode 100644 lib/pytz/zoneinfo/America/Guadeloupe delete mode 100644 lib/pytz/zoneinfo/America/Guatemala delete mode 100644 lib/pytz/zoneinfo/America/Guayaquil delete mode 100644 lib/pytz/zoneinfo/America/Guyana delete mode 100644 lib/pytz/zoneinfo/America/Halifax delete mode 100644 lib/pytz/zoneinfo/America/Havana delete mode 100644 lib/pytz/zoneinfo/America/Hermosillo delete mode 100644 lib/pytz/zoneinfo/America/Indiana/Indianapolis delete mode 100644 lib/pytz/zoneinfo/America/Indiana/Knox delete mode 100644 lib/pytz/zoneinfo/America/Indiana/Marengo delete mode 100644 lib/pytz/zoneinfo/America/Indiana/Petersburg delete mode 100644 lib/pytz/zoneinfo/America/Indiana/Tell_City delete mode 100644 lib/pytz/zoneinfo/America/Indiana/Vevay delete mode 100644 lib/pytz/zoneinfo/America/Indiana/Vincennes delete mode 100644 lib/pytz/zoneinfo/America/Indiana/Winamac delete mode 100644 lib/pytz/zoneinfo/America/Indianapolis delete mode 100644 lib/pytz/zoneinfo/America/Inuvik delete mode 100644 lib/pytz/zoneinfo/America/Iqaluit delete mode 100644 lib/pytz/zoneinfo/America/Jamaica delete mode 100644 lib/pytz/zoneinfo/America/Jujuy delete mode 100644 lib/pytz/zoneinfo/America/Juneau delete mode 100644 lib/pytz/zoneinfo/America/Kentucky/Louisville delete mode 100644 lib/pytz/zoneinfo/America/Kentucky/Monticello delete mode 100644 lib/pytz/zoneinfo/America/Knox_IN delete mode 100644 lib/pytz/zoneinfo/America/Kralendijk delete mode 100644 lib/pytz/zoneinfo/America/La_Paz delete mode 100644 lib/pytz/zoneinfo/America/Lima delete mode 100644 lib/pytz/zoneinfo/America/Los_Angeles delete mode 100644 lib/pytz/zoneinfo/America/Louisville delete mode 100644 lib/pytz/zoneinfo/America/Lower_Princes delete mode 100644 lib/pytz/zoneinfo/America/Maceio delete mode 100644 lib/pytz/zoneinfo/America/Managua delete mode 100644 lib/pytz/zoneinfo/America/Manaus delete mode 100644 lib/pytz/zoneinfo/America/Marigot delete mode 100644 lib/pytz/zoneinfo/America/Martinique delete mode 100644 lib/pytz/zoneinfo/America/Matamoros delete mode 100644 lib/pytz/zoneinfo/America/Mazatlan delete mode 100644 lib/pytz/zoneinfo/America/Mendoza delete mode 100644 lib/pytz/zoneinfo/America/Menominee delete mode 100644 lib/pytz/zoneinfo/America/Merida delete mode 100644 lib/pytz/zoneinfo/America/Metlakatla delete mode 100644 lib/pytz/zoneinfo/America/Mexico_City delete mode 100644 lib/pytz/zoneinfo/America/Miquelon delete mode 100644 lib/pytz/zoneinfo/America/Moncton delete mode 100644 lib/pytz/zoneinfo/America/Monterrey delete mode 100644 lib/pytz/zoneinfo/America/Montevideo delete mode 100644 lib/pytz/zoneinfo/America/Montreal delete mode 100644 lib/pytz/zoneinfo/America/Montserrat delete mode 100644 lib/pytz/zoneinfo/America/Nassau delete mode 100644 lib/pytz/zoneinfo/America/New_York delete mode 100644 lib/pytz/zoneinfo/America/Nipigon delete mode 100644 lib/pytz/zoneinfo/America/Nome delete mode 100644 lib/pytz/zoneinfo/America/Noronha delete mode 100644 lib/pytz/zoneinfo/America/North_Dakota/Beulah delete mode 100644 lib/pytz/zoneinfo/America/North_Dakota/Center delete mode 100644 lib/pytz/zoneinfo/America/North_Dakota/New_Salem delete mode 100644 lib/pytz/zoneinfo/America/Ojinaga delete mode 100644 lib/pytz/zoneinfo/America/Panama delete mode 100644 lib/pytz/zoneinfo/America/Pangnirtung delete mode 100644 lib/pytz/zoneinfo/America/Paramaribo delete mode 100644 lib/pytz/zoneinfo/America/Phoenix delete mode 100644 lib/pytz/zoneinfo/America/Port-au-Prince delete mode 100644 lib/pytz/zoneinfo/America/Port_of_Spain delete mode 100644 lib/pytz/zoneinfo/America/Porto_Acre delete mode 100644 lib/pytz/zoneinfo/America/Porto_Velho delete mode 100644 lib/pytz/zoneinfo/America/Puerto_Rico delete mode 100644 lib/pytz/zoneinfo/America/Punta_Arenas delete mode 100644 lib/pytz/zoneinfo/America/Rainy_River delete mode 100644 lib/pytz/zoneinfo/America/Rankin_Inlet delete mode 100644 lib/pytz/zoneinfo/America/Recife delete mode 100644 lib/pytz/zoneinfo/America/Regina delete mode 100644 lib/pytz/zoneinfo/America/Resolute delete mode 100644 lib/pytz/zoneinfo/America/Rio_Branco delete mode 100644 lib/pytz/zoneinfo/America/Rosario delete mode 100644 lib/pytz/zoneinfo/America/Santa_Isabel delete mode 100644 lib/pytz/zoneinfo/America/Santarem delete mode 100644 lib/pytz/zoneinfo/America/Santiago delete mode 100644 lib/pytz/zoneinfo/America/Santo_Domingo delete mode 100644 lib/pytz/zoneinfo/America/Sao_Paulo delete mode 100644 lib/pytz/zoneinfo/America/Scoresbysund delete mode 100644 lib/pytz/zoneinfo/America/Shiprock delete mode 100644 lib/pytz/zoneinfo/America/Sitka delete mode 100644 lib/pytz/zoneinfo/America/St_Barthelemy delete mode 100644 lib/pytz/zoneinfo/America/St_Johns delete mode 100644 lib/pytz/zoneinfo/America/St_Kitts delete mode 100644 lib/pytz/zoneinfo/America/St_Lucia delete mode 100644 lib/pytz/zoneinfo/America/St_Thomas delete mode 100644 lib/pytz/zoneinfo/America/St_Vincent delete mode 100644 lib/pytz/zoneinfo/America/Swift_Current delete mode 100644 lib/pytz/zoneinfo/America/Tegucigalpa delete mode 100644 lib/pytz/zoneinfo/America/Thule delete mode 100644 lib/pytz/zoneinfo/America/Thunder_Bay delete mode 100644 lib/pytz/zoneinfo/America/Tijuana delete mode 100644 lib/pytz/zoneinfo/America/Toronto delete mode 100644 lib/pytz/zoneinfo/America/Tortola delete mode 100644 lib/pytz/zoneinfo/America/Vancouver delete mode 100644 lib/pytz/zoneinfo/America/Virgin delete mode 100644 lib/pytz/zoneinfo/America/Whitehorse delete mode 100644 lib/pytz/zoneinfo/America/Winnipeg delete mode 100644 lib/pytz/zoneinfo/America/Yakutat delete mode 100644 lib/pytz/zoneinfo/America/Yellowknife delete mode 100644 lib/pytz/zoneinfo/Antarctica/Casey delete mode 100644 lib/pytz/zoneinfo/Antarctica/Davis delete mode 100644 lib/pytz/zoneinfo/Antarctica/DumontDUrville delete mode 100644 lib/pytz/zoneinfo/Antarctica/Macquarie delete mode 100644 lib/pytz/zoneinfo/Antarctica/Mawson delete mode 100644 lib/pytz/zoneinfo/Antarctica/McMurdo delete mode 100644 lib/pytz/zoneinfo/Antarctica/Palmer delete mode 100644 lib/pytz/zoneinfo/Antarctica/Rothera delete mode 100644 lib/pytz/zoneinfo/Antarctica/South_Pole delete mode 100644 lib/pytz/zoneinfo/Antarctica/Syowa delete mode 100644 lib/pytz/zoneinfo/Antarctica/Troll delete mode 100644 lib/pytz/zoneinfo/Antarctica/Vostok delete mode 100644 lib/pytz/zoneinfo/Arctic/Longyearbyen delete mode 100644 lib/pytz/zoneinfo/Asia/Aden delete mode 100644 lib/pytz/zoneinfo/Asia/Almaty delete mode 100644 lib/pytz/zoneinfo/Asia/Amman delete mode 100644 lib/pytz/zoneinfo/Asia/Anadyr delete mode 100644 lib/pytz/zoneinfo/Asia/Aqtau delete mode 100644 lib/pytz/zoneinfo/Asia/Aqtobe delete mode 100644 lib/pytz/zoneinfo/Asia/Ashgabat delete mode 100644 lib/pytz/zoneinfo/Asia/Ashkhabad delete mode 100644 lib/pytz/zoneinfo/Asia/Atyrau delete mode 100644 lib/pytz/zoneinfo/Asia/Baghdad delete mode 100644 lib/pytz/zoneinfo/Asia/Bahrain delete mode 100644 lib/pytz/zoneinfo/Asia/Baku delete mode 100644 lib/pytz/zoneinfo/Asia/Bangkok delete mode 100644 lib/pytz/zoneinfo/Asia/Barnaul delete mode 100644 lib/pytz/zoneinfo/Asia/Beirut delete mode 100644 lib/pytz/zoneinfo/Asia/Bishkek delete mode 100644 lib/pytz/zoneinfo/Asia/Brunei delete mode 100644 lib/pytz/zoneinfo/Asia/Calcutta delete mode 100644 lib/pytz/zoneinfo/Asia/Chita delete mode 100644 lib/pytz/zoneinfo/Asia/Choibalsan delete mode 100644 lib/pytz/zoneinfo/Asia/Chongqing delete mode 100644 lib/pytz/zoneinfo/Asia/Chungking delete mode 100644 lib/pytz/zoneinfo/Asia/Colombo delete mode 100644 lib/pytz/zoneinfo/Asia/Dacca delete mode 100644 lib/pytz/zoneinfo/Asia/Damascus delete mode 100644 lib/pytz/zoneinfo/Asia/Dhaka delete mode 100644 lib/pytz/zoneinfo/Asia/Dili delete mode 100644 lib/pytz/zoneinfo/Asia/Dubai delete mode 100644 lib/pytz/zoneinfo/Asia/Dushanbe delete mode 100644 lib/pytz/zoneinfo/Asia/Famagusta delete mode 100644 lib/pytz/zoneinfo/Asia/Gaza delete mode 100644 lib/pytz/zoneinfo/Asia/Harbin delete mode 100644 lib/pytz/zoneinfo/Asia/Hebron delete mode 100644 lib/pytz/zoneinfo/Asia/Ho_Chi_Minh delete mode 100644 lib/pytz/zoneinfo/Asia/Hong_Kong delete mode 100644 lib/pytz/zoneinfo/Asia/Hovd delete mode 100644 lib/pytz/zoneinfo/Asia/Irkutsk delete mode 100644 lib/pytz/zoneinfo/Asia/Istanbul delete mode 100644 lib/pytz/zoneinfo/Asia/Jakarta delete mode 100644 lib/pytz/zoneinfo/Asia/Jayapura delete mode 100644 lib/pytz/zoneinfo/Asia/Jerusalem delete mode 100644 lib/pytz/zoneinfo/Asia/Kabul delete mode 100644 lib/pytz/zoneinfo/Asia/Kamchatka delete mode 100644 lib/pytz/zoneinfo/Asia/Karachi delete mode 100644 lib/pytz/zoneinfo/Asia/Kashgar delete mode 100644 lib/pytz/zoneinfo/Asia/Kathmandu delete mode 100644 lib/pytz/zoneinfo/Asia/Katmandu delete mode 100644 lib/pytz/zoneinfo/Asia/Khandyga delete mode 100644 lib/pytz/zoneinfo/Asia/Kolkata delete mode 100644 lib/pytz/zoneinfo/Asia/Krasnoyarsk delete mode 100644 lib/pytz/zoneinfo/Asia/Kuala_Lumpur delete mode 100644 lib/pytz/zoneinfo/Asia/Kuching delete mode 100644 lib/pytz/zoneinfo/Asia/Kuwait delete mode 100644 lib/pytz/zoneinfo/Asia/Macao delete mode 100644 lib/pytz/zoneinfo/Asia/Macau delete mode 100644 lib/pytz/zoneinfo/Asia/Magadan delete mode 100644 lib/pytz/zoneinfo/Asia/Makassar delete mode 100644 lib/pytz/zoneinfo/Asia/Manila delete mode 100644 lib/pytz/zoneinfo/Asia/Muscat delete mode 100644 lib/pytz/zoneinfo/Asia/Nicosia delete mode 100644 lib/pytz/zoneinfo/Asia/Novokuznetsk delete mode 100644 lib/pytz/zoneinfo/Asia/Novosibirsk delete mode 100644 lib/pytz/zoneinfo/Asia/Omsk delete mode 100644 lib/pytz/zoneinfo/Asia/Oral delete mode 100644 lib/pytz/zoneinfo/Asia/Phnom_Penh delete mode 100644 lib/pytz/zoneinfo/Asia/Pontianak delete mode 100644 lib/pytz/zoneinfo/Asia/Pyongyang delete mode 100644 lib/pytz/zoneinfo/Asia/Qatar delete mode 100644 lib/pytz/zoneinfo/Asia/Qostanay delete mode 100644 lib/pytz/zoneinfo/Asia/Qyzylorda delete mode 100644 lib/pytz/zoneinfo/Asia/Rangoon delete mode 100644 lib/pytz/zoneinfo/Asia/Riyadh delete mode 100644 lib/pytz/zoneinfo/Asia/Saigon delete mode 100644 lib/pytz/zoneinfo/Asia/Sakhalin delete mode 100644 lib/pytz/zoneinfo/Asia/Samarkand delete mode 100644 lib/pytz/zoneinfo/Asia/Seoul delete mode 100644 lib/pytz/zoneinfo/Asia/Shanghai delete mode 100644 lib/pytz/zoneinfo/Asia/Singapore delete mode 100644 lib/pytz/zoneinfo/Asia/Srednekolymsk delete mode 100644 lib/pytz/zoneinfo/Asia/Taipei delete mode 100644 lib/pytz/zoneinfo/Asia/Tashkent delete mode 100644 lib/pytz/zoneinfo/Asia/Tbilisi delete mode 100644 lib/pytz/zoneinfo/Asia/Tehran delete mode 100644 lib/pytz/zoneinfo/Asia/Tel_Aviv delete mode 100644 lib/pytz/zoneinfo/Asia/Thimbu delete mode 100644 lib/pytz/zoneinfo/Asia/Thimphu delete mode 100644 lib/pytz/zoneinfo/Asia/Tokyo delete mode 100644 lib/pytz/zoneinfo/Asia/Tomsk delete mode 100644 lib/pytz/zoneinfo/Asia/Ujung_Pandang delete mode 100644 lib/pytz/zoneinfo/Asia/Ulaanbaatar delete mode 100644 lib/pytz/zoneinfo/Asia/Ulan_Bator delete mode 100644 lib/pytz/zoneinfo/Asia/Urumqi delete mode 100644 lib/pytz/zoneinfo/Asia/Ust-Nera delete mode 100644 lib/pytz/zoneinfo/Asia/Vientiane delete mode 100644 lib/pytz/zoneinfo/Asia/Vladivostok delete mode 100644 lib/pytz/zoneinfo/Asia/Yakutsk delete mode 100644 lib/pytz/zoneinfo/Asia/Yangon delete mode 100644 lib/pytz/zoneinfo/Asia/Yekaterinburg delete mode 100644 lib/pytz/zoneinfo/Asia/Yerevan delete mode 100644 lib/pytz/zoneinfo/Atlantic/Azores delete mode 100644 lib/pytz/zoneinfo/Atlantic/Bermuda delete mode 100644 lib/pytz/zoneinfo/Atlantic/Canary delete mode 100644 lib/pytz/zoneinfo/Atlantic/Cape_Verde delete mode 100644 lib/pytz/zoneinfo/Atlantic/Faeroe delete mode 100644 lib/pytz/zoneinfo/Atlantic/Faroe delete mode 100644 lib/pytz/zoneinfo/Atlantic/Jan_Mayen delete mode 100644 lib/pytz/zoneinfo/Atlantic/Madeira delete mode 100644 lib/pytz/zoneinfo/Atlantic/Reykjavik delete mode 100644 lib/pytz/zoneinfo/Atlantic/South_Georgia delete mode 100644 lib/pytz/zoneinfo/Atlantic/St_Helena delete mode 100644 lib/pytz/zoneinfo/Atlantic/Stanley delete mode 100644 lib/pytz/zoneinfo/Australia/ACT delete mode 100644 lib/pytz/zoneinfo/Australia/Adelaide delete mode 100644 lib/pytz/zoneinfo/Australia/Brisbane delete mode 100644 lib/pytz/zoneinfo/Australia/Broken_Hill delete mode 100644 lib/pytz/zoneinfo/Australia/Canberra delete mode 100644 lib/pytz/zoneinfo/Australia/Currie delete mode 100644 lib/pytz/zoneinfo/Australia/Darwin delete mode 100644 lib/pytz/zoneinfo/Australia/Eucla delete mode 100644 lib/pytz/zoneinfo/Australia/Hobart delete mode 100644 lib/pytz/zoneinfo/Australia/LHI delete mode 100644 lib/pytz/zoneinfo/Australia/Lindeman delete mode 100644 lib/pytz/zoneinfo/Australia/Lord_Howe delete mode 100644 lib/pytz/zoneinfo/Australia/Melbourne delete mode 100644 lib/pytz/zoneinfo/Australia/NSW delete mode 100644 lib/pytz/zoneinfo/Australia/North delete mode 100644 lib/pytz/zoneinfo/Australia/Perth delete mode 100644 lib/pytz/zoneinfo/Australia/Queensland delete mode 100644 lib/pytz/zoneinfo/Australia/South delete mode 100644 lib/pytz/zoneinfo/Australia/Sydney delete mode 100644 lib/pytz/zoneinfo/Australia/Tasmania delete mode 100644 lib/pytz/zoneinfo/Australia/Victoria delete mode 100644 lib/pytz/zoneinfo/Australia/West delete mode 100644 lib/pytz/zoneinfo/Australia/Yancowinna delete mode 100644 lib/pytz/zoneinfo/Brazil/Acre delete mode 100644 lib/pytz/zoneinfo/Brazil/DeNoronha delete mode 100644 lib/pytz/zoneinfo/Brazil/East delete mode 100644 lib/pytz/zoneinfo/Brazil/West delete mode 100644 lib/pytz/zoneinfo/CET delete mode 100644 lib/pytz/zoneinfo/CST6CDT delete mode 100644 lib/pytz/zoneinfo/Canada/Atlantic delete mode 100644 lib/pytz/zoneinfo/Canada/Central delete mode 100644 lib/pytz/zoneinfo/Canada/Eastern delete mode 100644 lib/pytz/zoneinfo/Canada/Mountain delete mode 100644 lib/pytz/zoneinfo/Canada/Newfoundland delete mode 100644 lib/pytz/zoneinfo/Canada/Pacific delete mode 100644 lib/pytz/zoneinfo/Canada/Saskatchewan delete mode 100644 lib/pytz/zoneinfo/Canada/Yukon delete mode 100644 lib/pytz/zoneinfo/Chile/Continental delete mode 100644 lib/pytz/zoneinfo/Chile/EasterIsland delete mode 100644 lib/pytz/zoneinfo/Cuba delete mode 100644 lib/pytz/zoneinfo/EET delete mode 100644 lib/pytz/zoneinfo/EST delete mode 100644 lib/pytz/zoneinfo/EST5EDT delete mode 100644 lib/pytz/zoneinfo/Egypt delete mode 100644 lib/pytz/zoneinfo/Eire delete mode 100644 lib/pytz/zoneinfo/Etc/GMT delete mode 100644 lib/pytz/zoneinfo/Etc/GMT+0 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT+1 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT+10 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT+11 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT+12 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT+2 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT+3 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT+4 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT+5 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT+6 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT+7 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT+8 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT+9 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT-0 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT-1 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT-10 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT-11 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT-12 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT-13 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT-14 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT-2 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT-3 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT-4 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT-5 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT-6 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT-7 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT-8 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT-9 delete mode 100644 lib/pytz/zoneinfo/Etc/GMT0 delete mode 100644 lib/pytz/zoneinfo/Etc/Greenwich delete mode 100644 lib/pytz/zoneinfo/Etc/UCT delete mode 100644 lib/pytz/zoneinfo/Etc/UTC delete mode 100644 lib/pytz/zoneinfo/Etc/Universal delete mode 100644 lib/pytz/zoneinfo/Etc/Zulu delete mode 100644 lib/pytz/zoneinfo/Europe/Amsterdam delete mode 100644 lib/pytz/zoneinfo/Europe/Andorra delete mode 100644 lib/pytz/zoneinfo/Europe/Astrakhan delete mode 100644 lib/pytz/zoneinfo/Europe/Athens delete mode 100644 lib/pytz/zoneinfo/Europe/Belfast delete mode 100644 lib/pytz/zoneinfo/Europe/Belgrade delete mode 100644 lib/pytz/zoneinfo/Europe/Berlin delete mode 100644 lib/pytz/zoneinfo/Europe/Bratislava delete mode 100644 lib/pytz/zoneinfo/Europe/Brussels delete mode 100644 lib/pytz/zoneinfo/Europe/Bucharest delete mode 100644 lib/pytz/zoneinfo/Europe/Budapest delete mode 100644 lib/pytz/zoneinfo/Europe/Busingen delete mode 100644 lib/pytz/zoneinfo/Europe/Chisinau delete mode 100644 lib/pytz/zoneinfo/Europe/Copenhagen delete mode 100644 lib/pytz/zoneinfo/Europe/Dublin delete mode 100644 lib/pytz/zoneinfo/Europe/Gibraltar delete mode 100644 lib/pytz/zoneinfo/Europe/Guernsey delete mode 100644 lib/pytz/zoneinfo/Europe/Helsinki delete mode 100644 lib/pytz/zoneinfo/Europe/Isle_of_Man delete mode 100644 lib/pytz/zoneinfo/Europe/Istanbul delete mode 100644 lib/pytz/zoneinfo/Europe/Jersey delete mode 100644 lib/pytz/zoneinfo/Europe/Kaliningrad delete mode 100644 lib/pytz/zoneinfo/Europe/Kiev delete mode 100644 lib/pytz/zoneinfo/Europe/Kirov delete mode 100644 lib/pytz/zoneinfo/Europe/Lisbon delete mode 100644 lib/pytz/zoneinfo/Europe/Ljubljana delete mode 100644 lib/pytz/zoneinfo/Europe/London delete mode 100644 lib/pytz/zoneinfo/Europe/Luxembourg delete mode 100644 lib/pytz/zoneinfo/Europe/Madrid delete mode 100644 lib/pytz/zoneinfo/Europe/Malta delete mode 100644 lib/pytz/zoneinfo/Europe/Mariehamn delete mode 100644 lib/pytz/zoneinfo/Europe/Minsk delete mode 100644 lib/pytz/zoneinfo/Europe/Monaco delete mode 100644 lib/pytz/zoneinfo/Europe/Moscow delete mode 100644 lib/pytz/zoneinfo/Europe/Nicosia delete mode 100644 lib/pytz/zoneinfo/Europe/Oslo delete mode 100644 lib/pytz/zoneinfo/Europe/Paris delete mode 100644 lib/pytz/zoneinfo/Europe/Podgorica delete mode 100644 lib/pytz/zoneinfo/Europe/Prague delete mode 100644 lib/pytz/zoneinfo/Europe/Riga delete mode 100644 lib/pytz/zoneinfo/Europe/Rome delete mode 100644 lib/pytz/zoneinfo/Europe/Samara delete mode 100644 lib/pytz/zoneinfo/Europe/San_Marino delete mode 100644 lib/pytz/zoneinfo/Europe/Sarajevo delete mode 100644 lib/pytz/zoneinfo/Europe/Saratov delete mode 100644 lib/pytz/zoneinfo/Europe/Simferopol delete mode 100644 lib/pytz/zoneinfo/Europe/Skopje delete mode 100644 lib/pytz/zoneinfo/Europe/Sofia delete mode 100644 lib/pytz/zoneinfo/Europe/Stockholm delete mode 100644 lib/pytz/zoneinfo/Europe/Tallinn delete mode 100644 lib/pytz/zoneinfo/Europe/Tirane delete mode 100644 lib/pytz/zoneinfo/Europe/Tiraspol delete mode 100644 lib/pytz/zoneinfo/Europe/Ulyanovsk delete mode 100644 lib/pytz/zoneinfo/Europe/Uzhgorod delete mode 100644 lib/pytz/zoneinfo/Europe/Vaduz delete mode 100644 lib/pytz/zoneinfo/Europe/Vatican delete mode 100644 lib/pytz/zoneinfo/Europe/Vienna delete mode 100644 lib/pytz/zoneinfo/Europe/Vilnius delete mode 100644 lib/pytz/zoneinfo/Europe/Volgograd delete mode 100644 lib/pytz/zoneinfo/Europe/Warsaw delete mode 100644 lib/pytz/zoneinfo/Europe/Zagreb delete mode 100644 lib/pytz/zoneinfo/Europe/Zaporozhye delete mode 100644 lib/pytz/zoneinfo/Europe/Zurich delete mode 100644 lib/pytz/zoneinfo/Factory delete mode 100644 lib/pytz/zoneinfo/GB delete mode 100644 lib/pytz/zoneinfo/GB-Eire delete mode 100644 lib/pytz/zoneinfo/GMT delete mode 100644 lib/pytz/zoneinfo/GMT+0 delete mode 100644 lib/pytz/zoneinfo/GMT-0 delete mode 100644 lib/pytz/zoneinfo/GMT0 delete mode 100644 lib/pytz/zoneinfo/Greenwich delete mode 100644 lib/pytz/zoneinfo/HST delete mode 100644 lib/pytz/zoneinfo/Hongkong delete mode 100644 lib/pytz/zoneinfo/Iceland delete mode 100644 lib/pytz/zoneinfo/Indian/Antananarivo delete mode 100644 lib/pytz/zoneinfo/Indian/Chagos delete mode 100644 lib/pytz/zoneinfo/Indian/Christmas delete mode 100644 lib/pytz/zoneinfo/Indian/Cocos delete mode 100644 lib/pytz/zoneinfo/Indian/Comoro delete mode 100644 lib/pytz/zoneinfo/Indian/Kerguelen delete mode 100644 lib/pytz/zoneinfo/Indian/Mahe delete mode 100644 lib/pytz/zoneinfo/Indian/Maldives delete mode 100644 lib/pytz/zoneinfo/Indian/Mauritius delete mode 100644 lib/pytz/zoneinfo/Indian/Mayotte delete mode 100644 lib/pytz/zoneinfo/Indian/Reunion delete mode 100644 lib/pytz/zoneinfo/Iran delete mode 100644 lib/pytz/zoneinfo/Israel delete mode 100644 lib/pytz/zoneinfo/Jamaica delete mode 100644 lib/pytz/zoneinfo/Japan delete mode 100644 lib/pytz/zoneinfo/Kwajalein delete mode 100644 lib/pytz/zoneinfo/Libya delete mode 100644 lib/pytz/zoneinfo/MET delete mode 100644 lib/pytz/zoneinfo/MST delete mode 100644 lib/pytz/zoneinfo/MST7MDT delete mode 100644 lib/pytz/zoneinfo/Mexico/BajaNorte delete mode 100644 lib/pytz/zoneinfo/Mexico/BajaSur delete mode 100644 lib/pytz/zoneinfo/Mexico/General delete mode 100644 lib/pytz/zoneinfo/NZ delete mode 100644 lib/pytz/zoneinfo/NZ-CHAT delete mode 100644 lib/pytz/zoneinfo/Navajo delete mode 100644 lib/pytz/zoneinfo/PRC delete mode 100644 lib/pytz/zoneinfo/PST8PDT delete mode 100644 lib/pytz/zoneinfo/Pacific/Apia delete mode 100644 lib/pytz/zoneinfo/Pacific/Auckland delete mode 100644 lib/pytz/zoneinfo/Pacific/Bougainville delete mode 100644 lib/pytz/zoneinfo/Pacific/Chatham delete mode 100644 lib/pytz/zoneinfo/Pacific/Chuuk delete mode 100644 lib/pytz/zoneinfo/Pacific/Easter delete mode 100644 lib/pytz/zoneinfo/Pacific/Efate delete mode 100644 lib/pytz/zoneinfo/Pacific/Enderbury delete mode 100644 lib/pytz/zoneinfo/Pacific/Fakaofo delete mode 100644 lib/pytz/zoneinfo/Pacific/Fiji delete mode 100644 lib/pytz/zoneinfo/Pacific/Funafuti delete mode 100644 lib/pytz/zoneinfo/Pacific/Galapagos delete mode 100644 lib/pytz/zoneinfo/Pacific/Gambier delete mode 100644 lib/pytz/zoneinfo/Pacific/Guadalcanal delete mode 100644 lib/pytz/zoneinfo/Pacific/Guam delete mode 100644 lib/pytz/zoneinfo/Pacific/Honolulu delete mode 100644 lib/pytz/zoneinfo/Pacific/Johnston delete mode 100644 lib/pytz/zoneinfo/Pacific/Kiritimati delete mode 100644 lib/pytz/zoneinfo/Pacific/Kosrae delete mode 100644 lib/pytz/zoneinfo/Pacific/Kwajalein delete mode 100644 lib/pytz/zoneinfo/Pacific/Majuro delete mode 100644 lib/pytz/zoneinfo/Pacific/Marquesas delete mode 100644 lib/pytz/zoneinfo/Pacific/Midway delete mode 100644 lib/pytz/zoneinfo/Pacific/Nauru delete mode 100644 lib/pytz/zoneinfo/Pacific/Niue delete mode 100644 lib/pytz/zoneinfo/Pacific/Norfolk delete mode 100644 lib/pytz/zoneinfo/Pacific/Noumea delete mode 100644 lib/pytz/zoneinfo/Pacific/Pago_Pago delete mode 100644 lib/pytz/zoneinfo/Pacific/Palau delete mode 100644 lib/pytz/zoneinfo/Pacific/Pitcairn delete mode 100644 lib/pytz/zoneinfo/Pacific/Pohnpei delete mode 100644 lib/pytz/zoneinfo/Pacific/Ponape delete mode 100644 lib/pytz/zoneinfo/Pacific/Port_Moresby delete mode 100644 lib/pytz/zoneinfo/Pacific/Rarotonga delete mode 100644 lib/pytz/zoneinfo/Pacific/Saipan delete mode 100644 lib/pytz/zoneinfo/Pacific/Samoa delete mode 100644 lib/pytz/zoneinfo/Pacific/Tahiti delete mode 100644 lib/pytz/zoneinfo/Pacific/Tarawa delete mode 100644 lib/pytz/zoneinfo/Pacific/Tongatapu delete mode 100644 lib/pytz/zoneinfo/Pacific/Truk delete mode 100644 lib/pytz/zoneinfo/Pacific/Wake delete mode 100644 lib/pytz/zoneinfo/Pacific/Wallis delete mode 100644 lib/pytz/zoneinfo/Pacific/Yap delete mode 100644 lib/pytz/zoneinfo/Poland delete mode 100644 lib/pytz/zoneinfo/Portugal delete mode 100644 lib/pytz/zoneinfo/ROC delete mode 100644 lib/pytz/zoneinfo/ROK delete mode 100644 lib/pytz/zoneinfo/Singapore delete mode 100644 lib/pytz/zoneinfo/Turkey delete mode 100644 lib/pytz/zoneinfo/UCT delete mode 100644 lib/pytz/zoneinfo/US/Alaska delete mode 100644 lib/pytz/zoneinfo/US/Aleutian delete mode 100644 lib/pytz/zoneinfo/US/Arizona delete mode 100644 lib/pytz/zoneinfo/US/Central delete mode 100644 lib/pytz/zoneinfo/US/East-Indiana delete mode 100644 lib/pytz/zoneinfo/US/Eastern delete mode 100644 lib/pytz/zoneinfo/US/Hawaii delete mode 100644 lib/pytz/zoneinfo/US/Indiana-Starke delete mode 100644 lib/pytz/zoneinfo/US/Michigan delete mode 100644 lib/pytz/zoneinfo/US/Mountain delete mode 100644 lib/pytz/zoneinfo/US/Pacific delete mode 100644 lib/pytz/zoneinfo/US/Samoa delete mode 100644 lib/pytz/zoneinfo/UTC delete mode 100644 lib/pytz/zoneinfo/Universal delete mode 100644 lib/pytz/zoneinfo/W-SU delete mode 100644 lib/pytz/zoneinfo/WET delete mode 100644 lib/pytz/zoneinfo/Zulu delete mode 100644 lib/pytz/zoneinfo/iso3166.tab delete mode 100644 lib/pytz/zoneinfo/leapseconds delete mode 100644 lib/pytz/zoneinfo/posixrules delete mode 100644 lib/pytz/zoneinfo/tzdata.zi delete mode 100644 lib/pytz/zoneinfo/zone.tab delete mode 100644 lib/pytz/zoneinfo/zone1970.tab delete mode 100644 lib/requests/__init__.py delete mode 100644 lib/requests/__version__.py delete mode 100644 lib/requests/_internal_utils.py delete mode 100644 lib/requests/adapters.py delete mode 100644 lib/requests/api.py delete mode 100644 lib/requests/auth.py delete mode 100644 lib/requests/certs.py delete mode 100644 lib/requests/compat.py delete mode 100644 lib/requests/cookies.py delete mode 100644 lib/requests/exceptions.py delete mode 100644 lib/requests/help.py delete mode 100644 lib/requests/hooks.py delete mode 100644 lib/requests/models.py delete mode 100644 lib/requests/packages.py delete mode 100644 lib/requests/sessions.py delete mode 100644 lib/requests/status_codes.py delete mode 100644 lib/requests/structures.py delete mode 100644 lib/requests/utils.py delete mode 100644 lib/requests_oauthlib/__init__.py delete mode 100644 lib/requests_oauthlib/compliance_fixes/__init__.py delete mode 100644 lib/requests_oauthlib/compliance_fixes/douban.py delete mode 100644 lib/requests_oauthlib/compliance_fixes/facebook.py delete mode 100644 lib/requests_oauthlib/compliance_fixes/linkedin.py delete mode 100644 lib/requests_oauthlib/compliance_fixes/mailchimp.py delete mode 100644 lib/requests_oauthlib/compliance_fixes/slack.py delete mode 100644 lib/requests_oauthlib/compliance_fixes/weibo.py delete mode 100644 lib/requests_oauthlib/oauth1_auth.py delete mode 100644 lib/requests_oauthlib/oauth1_session.py delete mode 100644 lib/requests_oauthlib/oauth2_auth.py delete mode 100644 lib/requests_oauthlib/oauth2_session.py delete mode 100644 lib/rumps/__init__.py delete mode 100644 lib/rumps/compat.py delete mode 100644 lib/rumps/notifications.py delete mode 100644 lib/rumps/packages/__init__.py delete mode 100644 lib/rumps/packages/ordereddict.py delete mode 100644 lib/rumps/rumps.py delete mode 100644 lib/rumps/utils.py delete mode 100644 lib/sgmllib3.py delete mode 100644 lib/simplejson/__init__.py delete mode 100644 lib/simplejson/_speedups.c delete mode 100644 lib/simplejson/compat.py delete mode 100644 lib/simplejson/decoder.py delete mode 100644 lib/simplejson/encoder.py delete mode 100644 lib/simplejson/errors.py delete mode 100644 lib/simplejson/ordered_dict.py delete mode 100644 lib/simplejson/raw_json.py delete mode 100644 lib/simplejson/scanner.py delete mode 100644 lib/simplejson/tests/__init__.py delete mode 100644 lib/simplejson/tests/test_bigint_as_string.py delete mode 100644 lib/simplejson/tests/test_bitsize_int_as_string.py delete mode 100644 lib/simplejson/tests/test_check_circular.py delete mode 100644 lib/simplejson/tests/test_decimal.py delete mode 100644 lib/simplejson/tests/test_decode.py delete mode 100644 lib/simplejson/tests/test_default.py delete mode 100644 lib/simplejson/tests/test_dump.py delete mode 100644 lib/simplejson/tests/test_encode_basestring_ascii.py delete mode 100644 lib/simplejson/tests/test_encode_for_html.py delete mode 100644 lib/simplejson/tests/test_errors.py delete mode 100644 lib/simplejson/tests/test_fail.py delete mode 100644 lib/simplejson/tests/test_float.py delete mode 100644 lib/simplejson/tests/test_for_json.py delete mode 100644 lib/simplejson/tests/test_indent.py delete mode 100644 lib/simplejson/tests/test_item_sort_key.py delete mode 100644 lib/simplejson/tests/test_iterable.py delete mode 100644 lib/simplejson/tests/test_namedtuple.py delete mode 100644 lib/simplejson/tests/test_pass1.py delete mode 100644 lib/simplejson/tests/test_pass2.py delete mode 100644 lib/simplejson/tests/test_pass3.py delete mode 100644 lib/simplejson/tests/test_raw_json.py delete mode 100644 lib/simplejson/tests/test_recursion.py delete mode 100644 lib/simplejson/tests/test_scanstring.py delete mode 100644 lib/simplejson/tests/test_separators.py delete mode 100644 lib/simplejson/tests/test_speedups.py delete mode 100644 lib/simplejson/tests/test_str_subclass.py delete mode 100644 lib/simplejson/tests/test_subclass.py delete mode 100644 lib/simplejson/tests/test_tool.py delete mode 100644 lib/simplejson/tests/test_tuple.py delete mode 100644 lib/simplejson/tests/test_unicode.py delete mode 100644 lib/simplejson/tool.py delete mode 100644 lib/six.py delete mode 100644 lib/soupsieve/__init__.py delete mode 100644 lib/soupsieve/__meta__.py delete mode 100644 lib/soupsieve/css_match.py delete mode 100644 lib/soupsieve/css_parser.py delete mode 100644 lib/soupsieve/css_types.py delete mode 100644 lib/soupsieve/util.py delete mode 100644 lib/systray/__init__.py delete mode 100644 lib/systray/traybar.py delete mode 100644 lib/systray/win32_adapter.py delete mode 100644 lib/tempora/__init__.py delete mode 100644 lib/tempora/schedule.py delete mode 100644 lib/tempora/tests/test_schedule.py delete mode 100644 lib/tempora/timing.py delete mode 100644 lib/tempora/utc.py delete mode 100644 lib/tokenize_rt.py delete mode 100644 lib/twitter/__init__.py delete mode 100644 lib/twitter/_file_cache.py delete mode 100644 lib/twitter/api.py delete mode 100644 lib/twitter/error.py delete mode 100644 lib/twitter/models.py delete mode 100644 lib/twitter/parse_tweet.py delete mode 100644 lib/twitter/ratelimit.py delete mode 100644 lib/twitter/twitter_utils.py delete mode 100644 lib/tzlocal/CHANGES.txt delete mode 100644 lib/tzlocal/LICENSE.txt delete mode 100644 lib/tzlocal/__init__.py delete mode 100644 lib/tzlocal/unix.py delete mode 100644 lib/tzlocal/utils.py delete mode 100644 lib/tzlocal/win32.py delete mode 100644 lib/tzlocal/windows_tz.py delete mode 100644 lib/urllib3/__init__.py delete mode 100644 lib/urllib3/_collections.py delete mode 100644 lib/urllib3/connection.py delete mode 100644 lib/urllib3/connectionpool.py delete mode 100644 lib/urllib3/contrib/__init__.py delete mode 100644 lib/urllib3/contrib/_securetransport/__init__.py delete mode 100644 lib/urllib3/contrib/_securetransport/bindings.py delete mode 100644 lib/urllib3/contrib/_securetransport/low_level.py delete mode 100644 lib/urllib3/contrib/appengine.py delete mode 100644 lib/urllib3/contrib/ntlmpool.py delete mode 100644 lib/urllib3/contrib/pyopenssl.py delete mode 100644 lib/urllib3/contrib/securetransport.py delete mode 100644 lib/urllib3/contrib/socks.py delete mode 100644 lib/urllib3/exceptions.py delete mode 100644 lib/urllib3/fields.py delete mode 100644 lib/urllib3/filepost.py delete mode 100644 lib/urllib3/packages/__init__.py delete mode 100644 lib/urllib3/packages/backports/__init__.py delete mode 100644 lib/urllib3/packages/backports/makefile.py delete mode 100644 lib/urllib3/packages/ordered_dict.py delete mode 100644 lib/urllib3/packages/six.py delete mode 100644 lib/urllib3/packages/ssl_match_hostname/__init__.py delete mode 100644 lib/urllib3/packages/ssl_match_hostname/_implementation.py delete mode 100644 lib/urllib3/poolmanager.py delete mode 100644 lib/urllib3/request.py delete mode 100644 lib/urllib3/response.py delete mode 100644 lib/urllib3/util/__init__.py delete mode 100644 lib/urllib3/util/connection.py delete mode 100644 lib/urllib3/util/request.py delete mode 100644 lib/urllib3/util/response.py delete mode 100644 lib/urllib3/util/retry.py delete mode 100644 lib/urllib3/util/selectors.py delete mode 100644 lib/urllib3/util/ssl_.py delete mode 100644 lib/urllib3/util/timeout.py delete mode 100644 lib/urllib3/util/url.py delete mode 100644 lib/urllib3/util/wait.py delete mode 100644 lib/websocket/__init__.py delete mode 100644 lib/websocket/_abnf.py delete mode 100644 lib/websocket/_app.py delete mode 100644 lib/websocket/_cookiejar.py delete mode 100644 lib/websocket/_core.py delete mode 100644 lib/websocket/_exceptions.py delete mode 100644 lib/websocket/_handshake.py delete mode 100644 lib/websocket/_http.py delete mode 100644 lib/websocket/_logging.py delete mode 100644 lib/websocket/_socket.py delete mode 100644 lib/websocket/_ssl_compat.py delete mode 100644 lib/websocket/_url.py delete mode 100644 lib/websocket/_utils.py delete mode 100644 lib/websocket/tests/__init__.py delete mode 100644 lib/websocket/tests/data/header01.txt delete mode 100644 lib/websocket/tests/data/header02.txt delete mode 100644 lib/websocket/tests/test_cookiejar.py delete mode 100644 lib/websocket/tests/test_websocket.py delete mode 100644 lib/xmltodict.py delete mode 100644 lib/zc/__init__.py delete mode 100644 lib/zc/lockfile/README.txt delete mode 100644 lib/zc/lockfile/__init__.py delete mode 100644 lib/zc/lockfile/tests.py delete mode 100644 pylintrc delete mode 100644 snap/snapcraft.yaml delete mode 100755 start.bat delete mode 100755 start.sh diff --git a/.dockerignore b/.dockerignore deleted file mode 100644 index 8f266283..00000000 --- a/.dockerignore +++ /dev/null @@ -1,11 +0,0 @@ -.git -.github -.gitignore -contrib -init-scripts -package -pylintrc -snap -*.md -!CHANGELOG*.md -start.bat diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml deleted file mode 100644 index 79319ae1..00000000 --- a/.github/FUNDING.yml +++ /dev/null @@ -1,3 +0,0 @@ -github: JonnyWong16 -patreon: Tautulli -custom: ["https://bit.ly/2InPp15", "https://bit.ly/2WTq83m"] \ No newline at end of file diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md deleted file mode 100644 index 603ba49a..00000000 --- a/.github/pull_request_template.md +++ /dev/null @@ -1,20 +0,0 @@ -## Description - -Please include a summary of the change and which issue is fixed. - -Fixes Tautulli/Tautulli-Issues#(issue) - -## Type of change - -Please delete options that are not relevant. - -- [ ] Bug fix (non-breaking change which fixes an issue) -- [ ] New feature (non-breaking change which adds functionality) -- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected) - -## Checklist: - -- [ ] My code follows the style guidelines of this project -- [ ] I have performed a self-review of my own code -- [ ] I have commented my code, particularly in hard-to-understand areas -- [ ] I have added or updated the docstring for new or existing methods diff --git a/.github/workflows/publish-docker.yml b/.github/workflows/publish-docker.yml deleted file mode 100644 index 5ef71863..00000000 --- a/.github/workflows/publish-docker.yml +++ /dev/null @@ -1,115 +0,0 @@ -name: Publish Docker - -on: - push: - branches: [master, beta, nightly] - tags: [v*] - pull_request: ~ - -jobs: - build-docker: - name: Build Docker Image - runs-on: ubuntu-latest - steps: - - name: Checkout Code - uses: actions/checkout@v2 - - - name: Prepare - id: prepare - run: | - if [[ $GITHUB_REF == refs/tags/* ]]; then - echo ::set-output name=tag::${GITHUB_REF#refs/tags/} - elif [[ $GITHUB_REF == refs/heads/master ]]; then - echo ::set-output name=tag::latest - else - echo ::set-output name=tag::${GITHUB_REF#refs/heads/} - fi - if [[ $GITHUB_REF == refs/tags/*-beta ]]; then - echo ::set-output name=branch::beta - elif [[ $GITHUB_REF == refs/tags/* ]]; then - echo ::set-output name=branch::master - else - echo ::set-output name=branch::${GITHUB_REF#refs/heads/} - fi - echo ::set-output name=commit::${GITHUB_SHA} - echo ::set-output name=build_date::$(date -u +'%Y-%m-%dT%H:%M:%SZ') - echo ::set-output name=docker_platforms::linux/amd64,linux/arm64/v8,linux/arm/v7,linux/arm/v6 - echo ::set-output name=docker_image::${{ secrets.DOCKER_REPO }}/tautulli - - - name: Set Up QEMU - uses: docker/setup-qemu-action@v1 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 - id: buildx - with: - version: latest - - - name: Cache Docker Layers - uses: actions/cache@v2 - with: - path: /tmp/.buildx-cache - key: ${{ runner.os }}-buildx-${{ github.sha }} - restore-keys: | - ${{ runner.os }}-buildx- - - - name: Login to DockerHub - uses: docker/login-action@v1 - if: success() && github.event_name != 'pull_request' - with: - username: ${{ secrets.DOCKER_USERNAME }} - password: ${{ secrets.DOCKER_PASSWORD }} - - - name: Login to GitHub Container Registry - uses: docker/login-action@v1 - if: success() && github.event_name != 'pull_request' - with: - registry: ghcr.io - username: ${{ secrets.DOCKER_USERNAME }} - password: ${{ secrets.GHCR_TOKEN }} - - - name: Docker Build and Push - uses: docker/build-push-action@v2 - if: success() - with: - context: . - file: ./Dockerfile - push: ${{ github.event_name != 'pull_request' }} - platforms: ${{ steps.prepare.outputs.docker_platforms }} - build-args: | - TAG=${{ steps.prepare.outputs.tag }} - BRANCH=${{ steps.prepare.outputs.branch }} - COMMIT=${{ steps.prepare.outputs.commit }} - BUILD_DATE=${{ steps.prepare.outputs.build_date }} - tags: | - ${{ steps.prepare.outputs.docker_image }}:${{ steps.prepare.outputs.tag }} - ghcr.io/${{ steps.prepare.outputs.docker_image }}:${{ steps.prepare.outputs.tag }} - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache - - discord: - name: Discord Notification - needs: build-docker - if: always() && github.event_name != 'pull_request' - runs-on: ubuntu-latest - steps: - - name: Get Build Job Status - uses: technote-space/workflow-conclusion-action@v1 - - - name: Combine Job Status - id: status - run: | - failures=(neutral, skipped, timed_out, action_required) - if [[ ${array[@]} =~ $WORKFLOW_CONCLUSION ]]; then - echo ::set-output name=status::failure - else - echo ::set-output name=status::$WORKFLOW_CONCLUSION - fi - - - name: Post Status to Discord - uses: sarisia/actions-status-discord@v1 - with: - webhook: ${{ secrets.DISCORD_WEBHOOK }} - status: ${{ steps.status.outputs.status }} - title: ${{ github.workflow }} - nofail: true diff --git a/.github/workflows/publish-installers.yml b/.github/workflows/publish-installers.yml deleted file mode 100644 index d7140f18..00000000 --- a/.github/workflows/publish-installers.yml +++ /dev/null @@ -1,194 +0,0 @@ -name: Publish Installers - -on: - push: - branches: [master, beta, nightly] - tags: [v*] - pull_request: ~ - -jobs: - build-installer: - name: Build ${{ matrix.os_upper }} Installer - runs-on: ${{ matrix.os }}-latest - strategy: - fail-fast: false - matrix: - include: - - os: 'windows' - os_upper: 'Windows' - ext: 'exe' - - os: 'macos' - os_upper: 'MacOS' - ext: 'pkg' - - steps: - - name: Checkout Code - uses: actions/checkout@v2 - - - name: Set Release Version - id: get_version - shell: bash - run: | - if [[ $GITHUB_REF == refs/tags/* ]]; then - echo "VERSION=${GITHUB_REF#refs/tags/v}" >> $GITHUB_ENV - VERSION_NSIS=${GITHUB_REF#refs/tags/v}.1 - echo ::set-output name=VERSION_NSIS::${VERSION_NSIS/%-beta.1/.0} - echo ::set-output name=VERSION::${GITHUB_REF#refs/tags/v} - echo ::set-output name=RELEASE_VERSION::${GITHUB_REF#refs/tags/} - else - echo "VERSION=0.0.0" >> $GITHUB_ENV - echo ::set-output name=VERSION_NSIS::0.0.0.0 - echo ::set-output name=VERSION::0.0.0 - echo ::set-output name=RELEASE_VERSION::${GITHUB_SHA::7} - fi - if [[ $GITHUB_REF == refs/tags/*-beta ]]; then - echo "beta" > branch.txt - elif [[ $GITHUB_REF == refs/tags/* ]]; then - echo "master" > branch.txt - else - echo ${GITHUB_REF#refs/heads/} > branch.txt - fi - echo $GITHUB_SHA > version.txt - - - name: Set Up Python - uses: actions/setup-python@v2 - with: - python-version: 3.8 - - - name: Cache Dependencies - uses: actions/cache@v2 - with: - path: ~\AppData\Local\pip\Cache - key: ${{ runner.os }}-pip-${{ hashFiles(format('package/requirements-{0}.txt', matrix.os)) }} - restore-keys: ${{ runner.os }}-pip- - - - name: Install Dependencies - run: | - python -m pip install --upgrade pip - pip install -r package/requirements-${{ matrix.os }}.txt - - - name: Build Package - run: | - pyinstaller -y ./package/Tautulli-${{ matrix.os }}.spec - - - name: Move Windows Updater Files - if: matrix.os == 'windows' - run: | - Move-Item dist\updater\* dist\Tautulli\ -Force - - - name: Create Windows Installer - uses: joncloud/makensis-action@v3.4 - if: matrix.os == 'windows' - with: - script-file: ./package/Tautulli.nsi - arguments: > - /DVERSION=${{ steps.get_version.outputs.VERSION_NSIS }} - /DINSTALLER_NAME=..\Tautulli-windows-${{ steps.get_version.outputs.RELEASE_VERSION }}-x64.exe - additional-plugin-paths: package/nsis-plugins - - - name: Create MacOS Installer - if: matrix.os == 'macos' - run: | - sudo pkgbuild \ - --install-location /Applications \ - --version ${{ steps.get_version.outputs.VERSION }} \ - --component ./dist/Tautulli.app \ - --scripts ./package/macos-scripts \ - Tautulli-macos-${{ steps.get_version.outputs.RELEASE_VERSION }}-x64.pkg - - - name: Upload Installer - uses: actions/upload-artifact@v2 - with: - name: Tautulli-${{ matrix.os }}-installer - path: Tautulli-${{ matrix.os }}-${{ steps.get_version.outputs.RELEASE_VERSION }}-x64.${{ matrix.ext }} - - release: - name: Release Installers - needs: build-installer - if: always() && startsWith(github.ref, 'refs/tags/') && github.event_name != 'pull_request' - runs-on: ubuntu-latest - steps: - - name: Get Build Job Status - uses: technote-space/workflow-conclusion-action@v1 - - - name: Checkout Code - uses: actions/checkout@v2 - - - name: Set Release Version - id: get_version - run: | - echo ::set-output name=RELEASE_VERSION::${GITHUB_REF#refs/tags/} - - - name: Download Installers - if: env.WORKFLOW_CONCLUSION == 'success' - uses: actions/download-artifact@v2 - - - name: Get Changelog - id: get_changelog - run: | - echo ::set-output name=CHANGELOG::"$( sed -n '/^## /{p; :loop n; p; /^## /q; b loop}' CHANGELOG.md \ - | sed '$d' | sed '$d' | sed '$d' | sed ':a;N;$!ba;s/\n/%0A/g' )" - - - name: Create Release - uses: actions/create-release@v1 - id: create_release - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - tag_name: ${{ steps.get_version.outputs.RELEASE_VERSION }} - release_name: Tautulli ${{ steps.get_version.outputs.RELEASE_VERSION }} - body: | - ## Changelog - - ##${{ steps.get_changelog.outputs.CHANGELOG }} - draft: false - prerelease: ${{ endsWith(steps.get_version.outputs.RELEASE_VERSION, '-beta') }} - - - name: Upload Windows Installer - uses: actions/upload-release-asset@v1 - if: env.WORKFLOW_CONCLUSION == 'success' - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ steps.create_release.outputs.upload_url }} - asset_path: Tautulli-windows-installer/Tautulli-windows-${{ steps.get_version.outputs.RELEASE_VERSION }}-x64.exe - asset_name: Tautulli-windows-${{ steps.get_version.outputs.RELEASE_VERSION }}-x64.exe - asset_content_type: application/vnd.microsoft.portable-executable - - - name: Upload MacOS Installer - uses: actions/upload-release-asset@v1 - if: env.WORKFLOW_CONCLUSION == 'success' - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ steps.create_release.outputs.upload_url }} - asset_path: Tautulli-macos-installer/Tautulli-macos-${{ steps.get_version.outputs.RELEASE_VERSION }}-x64.pkg - asset_name: Tautulli-macos-${{ steps.get_version.outputs.RELEASE_VERSION }}-x64.pkg - asset_content_type: application/vnd.apple.installer+xml - - discord: - name: Discord Notification - needs: [build-installer, release] - if: always() && github.event_name != 'pull_request' - runs-on: ubuntu-latest - steps: - - name: Get Build Job Status - uses: technote-space/workflow-conclusion-action@v1 - - - name: Combine Job Status - id: status - run: | - failures=(neutral, skipped, timed_out, action_required) - if [[ ${array[@]} =~ $WORKFLOW_CONCLUSION ]]; then - echo ::set-output name=status::failure - else - echo ::set-output name=status::$WORKFLOW_CONCLUSION - fi - - - name: Post Status to Discord - uses: sarisia/actions-status-discord@v1 - with: - webhook: ${{ secrets.DISCORD_WEBHOOK }} - status: ${{ steps.status.outputs.status }} - title: ${{ github.workflow }} - nofail: true diff --git a/.github/workflows/publish-snap.yml b/.github/workflows/publish-snap.yml deleted file mode 100644 index 482e8e39..00000000 --- a/.github/workflows/publish-snap.yml +++ /dev/null @@ -1,94 +0,0 @@ -name: Publish Snap - -on: - push: - branches: [master, beta, nightly] - tags: [v*] - pull_request: ~ - -jobs: - build-snap: - name: Build Snap Package (${{ matrix.architecture }}) - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - architecture: - - i386 - - amd64 - - arm64 - - armhf - - ppc64el - #- s390x # broken at the moment - steps: - - name: Checkout Code - uses: actions/checkout@v2 - - - name: Prepare - id: prepare - run: | - git fetch --prune --unshallow --tags - if [[ $GITHUB_REF == refs/tags/*-beta || $GITHUB_REF == refs/heads/beta ]]; then - echo ::set-output name=RELEASE::beta - elif [[ $GITHUB_REF == refs/tags/* || $GITHUB_REF == refs/heads/master ]]; then - echo ::set-output name=RELEASE::stable - else - echo ::set-output name=RELEASE::edge - fi - - - name: Set Up QEMU - uses: docker/setup-qemu-action@v1 - - - name: Build Snap Package - uses: diddlesnaps/snapcraft-multiarch-action@v1 - id: build - with: - architecture: ${{ matrix.architecture }} - - - name: Upload Snap Package - uses: actions/upload-artifact@v2 - with: - name: Tautulli-snap-package-${{ matrix.architecture }} - path: ${{ steps.build.outputs.snap }} - - - name: Review Snap Package - uses: diddlesnaps/snapcraft-review-tools-action@v1 - with: - snap: ${{ steps.build.outputs.snap }} - - - name: Publish Snap Package - uses: snapcore/action-publish@v1 - if: > - github.event_name != 'pull_request' && - (startsWith(github.ref, 'refs/tags/') || github.ref == 'refs/heads/nightly') - with: - store_login: ${{ secrets.SNAP_LOGIN }} - snap: ${{ steps.build.outputs.snap }} - release: ${{ steps.prepare.outputs.RELEASE }} - - discord: - name: Discord Notification - needs: build-snap - if: always() && github.event_name != 'pull_request' - runs-on: ubuntu-latest - steps: - - name: Get Build Job Status - uses: technote-space/workflow-conclusion-action@v1 - - - name: Combine Job Status - id: status - run: | - failures=(neutral, skipped, timed_out, action_required) - if [[ ${array[@]} =~ $WORKFLOW_CONCLUSION ]]; then - echo ::set-output name=status::failure - else - echo ::set-output name=status::$WORKFLOW_CONCLUSION - fi - - - name: Post Status to Discord - uses: sarisia/actions-status-discord@v1 - with: - webhook: ${{ secrets.DISCORD_WEBHOOK }} - status: ${{ steps.status.outputs.status }} - title: ${{ github.workflow }} - nofail: true diff --git a/.github/workflows/pull-requests.yml b/.github/workflows/pull-requests.yml deleted file mode 100644 index b4a13f3e..00000000 --- a/.github/workflows/pull-requests.yml +++ /dev/null @@ -1,28 +0,0 @@ -name: Pull Requests - -on: - pull_request_target: - types: [opened, synchronize, edited, reopened] - -jobs: - check-branch: - name: Check Pull Request - runs-on: ubuntu-latest - steps: - - name: Checkout Code - uses: actions/checkout@v2 - - - name: Comment on Pull Request - uses: mshick/add-pr-comment@v1 - if: github.base_ref != 'nightly' - with: - message: Pull requests must be made to the `nightly` branch. Thanks. - repo-token: ${{ secrets.GITHUB_TOKEN }} - repo-token-user-login: 'github-actions[bot]' - - - name: Fail Workflow - if: github.base_ref != 'nightly' - run: | - echo Base: ${{ github.base_ref }} - echo Head: ${{ github.head_ref }} - exit 1 diff --git a/.gitignore b/.gitignore index 25777166..3c45bb08 100644 --- a/.gitignore +++ b/.gitignore @@ -1,92 +1,275 @@ -# Compiled source # -################### -__pycache__ -*.pyc -*.py~ -*.pyproj -*.sln +# Created by https://www.toptal.com/developers/gitignore/api/pycharm+all,python,linux,windows +# Edit at https://www.toptal.com/developers/gitignore?templates=pycharm+all,python,linux,windows -# PlexPy files # -###################### +### Linux ### +*~ + +# temporary files which can be created if a process still has a handle open of a deleted file +.fuse_hidden* + +# KDE directory preferences +.directory + +# Linux trash folder which might appear on any partition or disk +.Trash-* + +# .nfs files are created when an open file is removed but is still being accessed +.nfs* + +### PyCharm+all ### +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff +.idea/**/workspace.xml +.idea/**/tasks.xml +.idea/**/usage.statistics.xml +.idea/**/dictionaries +.idea/**/shelf + +# Generated files +.idea/**/contentModel.xml + +# Sensitive or high-churn files +.idea/**/dataSources/ +.idea/**/dataSources.ids +.idea/**/dataSources.local.xml +.idea/**/sqlDataSources.xml +.idea/**/dynamic.xml +.idea/**/uiDesigner.xml +.idea/**/dbnavigator.xml + +# Gradle +.idea/**/gradle.xml +.idea/**/libraries + +# Gradle and Maven with auto-import +# When using Gradle or Maven with auto-import, you should exclude module files, +# since they will be recreated, and may cause churn. Uncomment if using +# auto-import. +# .idea/artifacts +# .idea/compiler.xml +# .idea/jarRepositories.xml +# .idea/modules.xml +# .idea/*.iml +# .idea/modules +# *.iml +# *.ipr + +# CMake +cmake-build-*/ + +# Mongo Explorer plugin +.idea/**/mongoSettings.xml + +# File-based project format +*.iws + +# IntelliJ +out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Cursive Clojure plugin +.idea/replstate.xml + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties + +# Editor-based Rest Client +.idea/httpRequests + +# Android studio 3.1+ serialized cache file +.idea/caches/build_file_checksums.ser + +### PyCharm+all Patch ### +# Ignores the whole .idea folder and all .iml files +# See https://github.com/joeblau/gitignore.io/issues/186 and https://github.com/joeblau/gitignore.io/issues/360 + +.idea/ + +# Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-249601023 + +*.iml +modules.xml +.idea/misc.xml +*.ipr + +# Sonarlint plugin +.idea/sonarlint + +### Python ### +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +pytestdebug.log + +# Translations +*.mo +*.pot + +# Django stuff: *.log -*.db* -*.db-journal -*.ini -release.lock -version.lock -logs/* -backups/* -cache/* -exports/* -newsletters/* -*.mmdb -version.txt -branch.txt +local_settings.py +db.sqlite3 +db.sqlite3-journal -# HTTPS Cert/Key # -################## -/*.crt -/*.key -/*.csr -/*.pem +# Flask stuff: +instance/ +.webassets-cache -# Mergetool -*.orgin +# Scrapy stuff: +.scrapy -# OS generated files # -###################### -.DS_Store? -.DS_Store -ehthumbs.db -Icon? +# Sphinx documentation +docs/_build/ +doc/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ +pythonenv* + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# profiling data +.prof + +### Windows ### +# Windows thumbnail cache files Thumbs.db +Thumbs.db:encryptable +ehthumbs.db +ehthumbs_vista.db -#Ignore files generated by PyCharm -*.idea/* +# Dump file +*.stackdump -#Ignore files generated by vi -*.swp +# Folder config file +[Dd]esktop.ini -#Ignore files build by Visual Studio -*.obj -*.exe -*.pdb -*.user -*.aps -*.pch -*.vspscc -*_i.c -*_p.c -*.ncb -*.suo -*.tlb -*.tlh -*.bak -*.cache -*.ilk -[Bb]in -[Dd]ebug*/ -*.lib -*.sbr -obj/ -[Rr]elease*/ -_ReSharper*/ -[Tt]est[Rr]esult* -/cache -/logs -.project -.pydevproject +# Recycle Bin used on file shares +$RECYCLE.BIN/ -#Ignore files generated by pyinstaller -/build -/dist +# Windows Installer files +*.cab +*.msi +*.msix +*.msm +*.msp -#snapcraft specifics -/parts/ -/stage/ -/prime/ -*.snap -.snapcraft -*_source.tar.bz2 -snap/.snapcraft \ No newline at end of file +# Windows shortcuts +*.lnk + +# End of https://www.toptal.com/developers/gitignore/api/pycharm+all,python,linux,windows diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index f82e7551..00000000 --- a/Dockerfile +++ /dev/null @@ -1,26 +0,0 @@ -FROM tautulli/tautulli-baseimage:python3 - -LABEL maintainer="Tautulli" - -ARG BRANCH -ARG COMMIT - -ENV TAUTULLI_DOCKER=True -ENV TZ=UTC - -WORKDIR /app - -RUN \ - groupadd -g 1000 tautulli && \ - useradd -u 1000 -g 1000 tautulli && \ - echo ${BRANCH} > /app/branch.txt && \ - echo ${COMMIT} > /app/version.txt - -COPY . /app - -CMD [ "python", "Tautulli.py", "--datadir", "/config" ] -ENTRYPOINT [ "./start.sh" ] - -VOLUME /config -EXPOSE 8181 -HEALTHCHECK --start-period=90s CMD curl -ILfSs http://localhost:8181/status > /dev/null || curl -ILfkSs https://localhost:8181/status > /dev/null || exit 1 diff --git a/Tautulli.py b/JellyPy.py similarity index 59% rename from Tautulli.py rename to JellyPy.py index 069f771b..d6687532 100755 --- a/Tautulli.py +++ b/JellyPy.py @@ -20,11 +20,6 @@ import os import sys -# Ensure lib added to path, before any other imports -sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), 'lib')) - -from future.builtins import str - import appdirs import argparse import datetime @@ -36,16 +31,16 @@ import time import threading import tzlocal -import plexpy -from plexpy import common, config, database, helpers, logger, webstart +import jellypy +from jellypy import common, config, database, helpers, logger, webstart if common.PLATFORM == 'Windows': - from plexpy import windows + from jellypy import windows elif common.PLATFORM == 'Darwin': - from plexpy import macos + from jellypy import macos # Register signals, such as CTRL + C -signal.signal(signal.SIGINT, plexpy.sig_handler) -signal.signal(signal.SIGTERM, plexpy.sig_handler) +signal.signal(signal.SIGINT, jellypy.sig_handler) +signal.signal(signal.SIGTERM, jellypy.sig_handler) def main(): @@ -56,28 +51,28 @@ def main(): # Fixed paths to Tautulli if hasattr(sys, 'frozen') and hasattr(sys, '_MEIPASS'): - plexpy.FROZEN = True - plexpy.FULL_PATH = os.path.abspath(sys.executable) - plexpy.PROG_DIR = sys._MEIPASS + jellypy.FROZEN = True + jellypy.FULL_PATH = os.path.abspath(sys.executable) + jellypy.PROG_DIR = sys._MEIPASS else: - plexpy.FULL_PATH = os.path.abspath(__file__) - plexpy.PROG_DIR = os.path.dirname(plexpy.FULL_PATH) + jellypy.FULL_PATH = os.path.abspath(__file__) + jellypy.PROG_DIR = os.path.dirname(jellypy.FULL_PATH) - plexpy.ARGS = sys.argv[1:] + jellypy.ARGS = sys.argv[1:] # From sickbeard - plexpy.SYS_PLATFORM = sys.platform - plexpy.SYS_ENCODING = None + jellypy.SYS_PLATFORM = sys.platform + jellypy.SYS_ENCODING = None try: locale.setlocale(locale.LC_ALL, "") - plexpy.SYS_LANGUAGE, plexpy.SYS_ENCODING = locale.getdefaultlocale() + jellypy.SYS_LANGUAGE, jellypy.SYS_ENCODING = locale.getdefaultlocale() except (locale.Error, IOError): pass # for OSes that are poorly configured I'll just force UTF-8 - if not plexpy.SYS_ENCODING or plexpy.SYS_ENCODING in ('ANSI_X3.4-1968', 'US-ASCII', 'ASCII'): - plexpy.SYS_ENCODING = 'UTF-8' + if not jellypy.SYS_ENCODING or jellypy.SYS_ENCODING in ('ANSI_X3.4-1968', 'US-ASCII', 'ASCII'): + jellypy.SYS_ENCODING = 'UTF-8' # Set up and gather command line arguments parser = argparse.ArgumentParser( @@ -107,50 +102,50 @@ def main(): args = parser.parse_args() if args.verbose: - plexpy.VERBOSE = True + jellypy.VERBOSE = True if args.quiet: - plexpy.QUIET = True + jellypy.QUIET = True # Do an intial setup of the logger. # Require verbose for pre-initilization to see critical errors - logger.initLogger(console=not plexpy.QUIET, log_dir=False, verbose=True) + logger.initLogger(console=not jellypy.QUIET, log_dir=False, verbose=True) try: - plexpy.SYS_TIMEZONE = tzlocal.get_localzone() + jellypy.SYS_TIMEZONE = tzlocal.get_localzone() except (pytz.UnknownTimeZoneError, LookupError, ValueError) as e: logger.error("Could not determine system timezone: %s" % e) - plexpy.SYS_TIMEZONE = pytz.UTC + jellypy.SYS_TIMEZONE = pytz.UTC - plexpy.SYS_UTC_OFFSET = datetime.datetime.now(plexpy.SYS_TIMEZONE).strftime('%z') + jellypy.SYS_UTC_OFFSET = datetime.datetime.now(jellypy.SYS_TIMEZONE).strftime('%z') if helpers.bool_true(os.getenv('TAUTULLI_DOCKER', False)): - plexpy.DOCKER = True + jellypy.DOCKER = True if helpers.bool_true(os.getenv('TAUTULLI_SNAP', False)): - plexpy.SNAP = True + jellypy.SNAP = True if args.dev: - plexpy.DEV = True + jellypy.DEV = True logger.debug("Tautulli is running in the dev environment.") if args.daemon: if sys.platform == 'win32': logger.warn("Daemonizing not supported under Windows, starting normally") else: - plexpy.DAEMON = True - plexpy.QUIET = True + jellypy.DAEMON = True + jellypy.QUIET = True if args.nofork: - plexpy.NOFORK = True + jellypy.NOFORK = True logger.info("Tautulli is running as a service, it will not fork when restarted.") if args.pidfile: - plexpy.PIDFILE = str(args.pidfile) + jellypy.PIDFILE = str(args.pidfile) - # If the pidfile already exists, plexpy may still be running, so + # If the pidfile already exists, jellypy may still be running, so # exit - if os.path.exists(plexpy.PIDFILE): + if os.path.exists(jellypy.PIDFILE): try: - with open(plexpy.PIDFILE, 'r') as fp: + with open(jellypy.PIDFILE, 'r') as fp: pid = int(fp.read()) except IOError as e: raise SystemExit("Unable to read PID file: %s", e) @@ -160,20 +155,20 @@ def main(): except OSError: logger.warn("PID file '%s' already exists, but PID %d is " "not running. Ignoring PID file." % - (plexpy.PIDFILE, pid)) + (jellypy.PIDFILE, pid)) else: - # The pidfile exists and points to a live PID. plexpy may + # The pidfile exists and points to a live PID. jellypy may # still be running, so exit. raise SystemExit("PID file '%s' already exists. Exiting." % - plexpy.PIDFILE) + jellypy.PIDFILE) # The pidfile is only useful in daemon mode, make sure we can write the # file properly - if plexpy.DAEMON: - plexpy.CREATEPID = True + if jellypy.DAEMON: + jellypy.CREATEPID = True try: - with open(plexpy.PIDFILE, 'w') as fp: + with open(jellypy.PIDFILE, 'w') as fp: fp.write("pid\n") except IOError as e: raise SystemExit("Unable to write PID file: %s", e) @@ -183,107 +178,107 @@ def main(): # Determine which data directory and config file to use if args.datadir: - plexpy.DATA_DIR = args.datadir - elif plexpy.FROZEN: - plexpy.DATA_DIR = appdirs.user_data_dir("Tautulli", False) + jellypy.DATA_DIR = args.datadir + elif jellypy.FROZEN: + jellypy.DATA_DIR = appdirs.user_data_dir("Tautulli", False) else: - plexpy.DATA_DIR = plexpy.PROG_DIR + jellypy.DATA_DIR = jellypy.PROG_DIR # Migrate Snap data dir - if plexpy.SNAP: + if jellypy.SNAP: snap_common = os.environ['SNAP_COMMON'] old_data_dir = os.path.join(snap_common, 'Tautulli') if os.path.exists(old_data_dir) and os.listdir(old_data_dir): - plexpy.SNAP_MIGRATE = True + jellypy.SNAP_MIGRATE = True logger.info("Migrating Snap user data.") - shutil.move(old_data_dir, plexpy.DATA_DIR) + shutil.move(old_data_dir, jellypy.DATA_DIR) if args.config: config_file = args.config else: - config_file = os.path.join(plexpy.DATA_DIR, config.FILENAME) + config_file = os.path.join(jellypy.DATA_DIR, config.FILENAME) # Try to create the DATA_DIR if it doesn't exist - if not os.path.exists(plexpy.DATA_DIR): + if not os.path.exists(jellypy.DATA_DIR): try: - os.makedirs(plexpy.DATA_DIR) + os.makedirs(jellypy.DATA_DIR) except OSError: raise SystemExit( - 'Could not create data directory: ' + plexpy.DATA_DIR + '. Exiting....') + 'Could not create data directory: ' + jellypy.DATA_DIR + '. Exiting....') # Make sure the DATA_DIR is writeable - if not os.access(plexpy.DATA_DIR, os.W_OK): + if not os.access(jellypy.DATA_DIR, os.W_OK): raise SystemExit( - 'Cannot write to the data directory: ' + plexpy.DATA_DIR + '. Exiting...') + 'Cannot write to the data directory: ' + jellypy.DATA_DIR + '. Exiting...') # Put the database in the DATA_DIR - plexpy.DB_FILE = os.path.join(plexpy.DATA_DIR, database.FILENAME) + jellypy.DB_FILE = os.path.join(jellypy.DATA_DIR, database.FILENAME) - # Move 'plexpy.db' to 'tautulli.db' - if os.path.isfile(os.path.join(plexpy.DATA_DIR, 'plexpy.db')) and \ - not os.path.isfile(os.path.join(plexpy.DATA_DIR, plexpy.DB_FILE)): + # Move 'jellypy.db' to 'tautulli.db' + if os.path.isfile(os.path.join(jellypy.DATA_DIR, 'jellypy.db')) and \ + not os.path.isfile(os.path.join(jellypy.DATA_DIR, jellypy.DB_FILE)): try: - os.rename(os.path.join(plexpy.DATA_DIR, 'plexpy.db'), plexpy.DB_FILE) + os.rename(os.path.join(jellypy.DATA_DIR, 'jellypy.db'), jellypy.DB_FILE) except OSError as e: - raise SystemExit("Unable to rename plexpy.db to tautulli.db: %s", e) + raise SystemExit("Unable to rename jellypy.db to tautulli.db: %s", e) - if plexpy.DAEMON: - plexpy.daemonize() + if jellypy.DAEMON: + jellypy.daemonize() # Read config and start logging - plexpy.initialize(config_file) + jellypy.initialize(config_file) # Start the background threads - plexpy.start() + jellypy.start() # Force the http port if neccessary if args.port: - plexpy.HTTP_PORT = args.port - logger.info('Using forced web server port: %i', plexpy.HTTP_PORT) + jellypy.HTTP_PORT = args.port + logger.info('Using forced web server port: %i', jellypy.HTTP_PORT) else: - plexpy.HTTP_PORT = int(plexpy.CONFIG.HTTP_PORT) + jellypy.HTTP_PORT = int(jellypy.CONFIG.HTTP_PORT) # Check if pyOpenSSL is installed. It is required for certificate generation # and for CherryPy. - if plexpy.CONFIG.ENABLE_HTTPS: + if jellypy.CONFIG.ENABLE_HTTPS: try: import OpenSSL except ImportError: logger.warn("The pyOpenSSL module is missing. Install this " "module to enable HTTPS. HTTPS will be disabled.") - plexpy.CONFIG.ENABLE_HTTPS = False + jellypy.CONFIG.ENABLE_HTTPS = False # Try to start the server. Will exit here is address is already in use. webstart.start() if common.PLATFORM == 'Windows': - if plexpy.CONFIG.SYS_TRAY_ICON: - plexpy.WIN_SYS_TRAY_ICON = windows.WindowsSystemTray() - plexpy.WIN_SYS_TRAY_ICON.start() + if jellypy.CONFIG.SYS_TRAY_ICON: + jellypy.WIN_SYS_TRAY_ICON = windows.WindowsSystemTray() + jellypy.WIN_SYS_TRAY_ICON.start() windows.set_startup() elif common.PLATFORM == 'Darwin': macos.set_startup() # Open webbrowser - if plexpy.CONFIG.LAUNCH_BROWSER and not args.nolaunch and not plexpy.DEV: - plexpy.launch_browser(plexpy.CONFIG.HTTP_HOST, plexpy.HTTP_PORT, - plexpy.HTTP_ROOT) + if jellypy.CONFIG.LAUNCH_BROWSER and not args.nolaunch and not jellypy.DEV: + jellypy.launch_browser(jellypy.CONFIG.HTTP_HOST, jellypy.HTTP_PORT, + jellypy.HTTP_ROOT) - if common.PLATFORM == 'Darwin' and plexpy.CONFIG.SYS_TRAY_ICON: + if common.PLATFORM == 'Darwin' and jellypy.CONFIG.SYS_TRAY_ICON: if not macos.HAS_PYOBJC: logger.warn("The pyobjc module is missing. Install this " "module to enable the MacOS menu bar icon.") - plexpy.CONFIG.SYS_TRAY_ICON = False + jellypy.CONFIG.SYS_TRAY_ICON = False - if plexpy.CONFIG.SYS_TRAY_ICON: + if jellypy.CONFIG.SYS_TRAY_ICON: # MacOS menu bar icon must be run on the main thread and is blocking # Start the rest of Tautulli on a new thread thread = threading.Thread(target=wait) thread.daemon = True thread.start() - plexpy.MAC_SYS_TRAY_ICON = macos.MacOSSystemTray() - plexpy.MAC_SYS_TRAY_ICON.start() + jellypy.MAC_SYS_TRAY_ICON = macos.MacOSSystemTray() + jellypy.MAC_SYS_TRAY_ICON.start() else: wait() else: @@ -295,29 +290,29 @@ def wait(): # Wait endlessly for a signal to happen while True: - if not plexpy.SIGNAL: + if not jellypy.SIGNAL: try: time.sleep(1) except KeyboardInterrupt: - plexpy.SIGNAL = 'shutdown' + jellypy.SIGNAL = 'shutdown' else: - logger.info('Received signal: %s', plexpy.SIGNAL) + logger.info('Received signal: %s', jellypy.SIGNAL) - if plexpy.SIGNAL == 'shutdown': - plexpy.shutdown() - elif plexpy.SIGNAL == 'restart': - plexpy.shutdown(restart=True) - elif plexpy.SIGNAL == 'checkout': - plexpy.shutdown(restart=True, checkout=True) - elif plexpy.SIGNAL == 'reset': - plexpy.shutdown(restart=True, reset=True) - elif plexpy.SIGNAL == 'update': - plexpy.shutdown(restart=True, update=True) + if jellypy.SIGNAL == 'shutdown': + jellypy.shutdown() + elif jellypy.SIGNAL == 'restart': + jellypy.shutdown(restart=True) + elif jellypy.SIGNAL == 'checkout': + jellypy.shutdown(restart=True, checkout=True) + elif jellypy.SIGNAL == 'reset': + jellypy.shutdown(restart=True, reset=True) + elif jellypy.SIGNAL == 'update': + jellypy.shutdown(restart=True, update=True) else: logger.error('Unknown signal. Shutting down...') - plexpy.shutdown() + jellypy.shutdown() - plexpy.SIGNAL = None + jellypy.SIGNAL = None if __name__ == "__main__": diff --git a/PlexPy.py b/PlexPy.py deleted file mode 100755 index eb9ceea4..00000000 --- a/PlexPy.py +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env python - -# -*- coding: utf-8 -*- - -# This file is part of Tautulli. -# -# Tautulli is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Tautulli is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Tautulli. If not, see . - -from Tautulli import main - -# Call main() from Tautulli.py -if __name__ == "__main__": - main() diff --git a/README.md b/README.md index 93175ddf..39b2643d 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,8 @@ -# Tautulli +# JellyPy -A python based web application for monitoring, analytics and notifications for [Plex Media Server](https://plex.tv). +A python based web application for monitoring, analytics and notifications for [Jellyfin](https://jellyfin.org/). -This project is based on code from [Headphones](https://github.com/rembo10/headphones) and [PlexWatchWeb](https://github.com/ecleese/plexWatchWeb). +This project is based on [Tautulli](https://github.com/Tautulli/Tautulli). ## Features @@ -63,4 +63,6 @@ This project is based on code from [Headphones](https://github.com/rembo10/headp This is free software under the GPL v3 open source license. Feel free to do with it what you wish, but any modification must be open sourced. A copy of the license is included. -This software includes Highsoft software libraries which you may freely distribute for non-commercial use. Commerical users must licence this software, for more information visit https://shop.highsoft.com/faq/non-commercial#non-commercial-redistribution. \ No newline at end of file +This software includes Highsoft software libraries which you may freely distribute for non-commercial use. Commerical users must licence this software, for more information visit https://shop.highsoft.com/faq/non-commercial#non-commercial-redistribution. + +[Tautulli]: https://github.com/Tautulli/Tautulli \ No newline at end of file diff --git a/data/interfaces/default/settings.html b/data/interfaces/default/settings.html index 2fb6cbb6..bb1339fe 100644 --- a/data/interfaces/default/settings.html +++ b/data/interfaces/default/settings.html @@ -2024,7 +2024,7 @@ Rating: {rating}/10 --> Rating: /10 cache: false, async: true, complete: function(xhr, status) { - $("#plexpy-configuration-table").html(xhr.responseText); + $("#jellypy-configuration-table").html(xhr.responseText); } }); } @@ -2035,7 +2035,7 @@ Rating: {rating}/10 --> Rating: /10 cache: false, async: true, complete: function(xhr, status) { - $("#plexpy-scheduler-table").html(xhr.responseText); + $("#jellypy-scheduler-table").html(xhr.responseText); } }); } @@ -2046,7 +2046,7 @@ Rating: {rating}/10 --> Rating: /10 cache: false, async: true, complete: function(xhr, status) { - $("#plexpy-notifiers-table").html(xhr.responseText); + $("#jellypy-notifiers-table").html(xhr.responseText); } }); } @@ -2071,7 +2071,7 @@ Rating: {rating}/10 --> Rating: /10 cache: false, async: true, complete: function(xhr, status) { - $("#plexpy-newsletters-table").html(xhr.responseText); + $("#jellypy-newsletters-table").html(xhr.responseText); } }); } @@ -2096,7 +2096,7 @@ Rating: {rating}/10 --> Rating: /10 cache: false, async: true, complete: function(xhr, status) { - $("#plexpy-mobile-devices-table").html(xhr.responseText); + $("#jellypy-mobile-devices-table").html(xhr.responseText); } }); } diff --git a/plexpy/__init__.py b/jellypy/__init__.py similarity index 97% rename from plexpy/__init__.py rename to jellypy/__init__.py index 139d656a..dd912be8 100644 --- a/plexpy/__init__.py +++ b/jellypy/__init__.py @@ -13,73 +13,48 @@ # You should have received a copy of the GNU General Public License # along with Tautulli. If not, see . -from __future__ import unicode_literals -from future.builtins import range - import datetime import os -import future.moves.queue as queue import sqlite3 -import sys import subprocess +import sys import threading import uuid +import future.moves.queue as queue +from future.builtins import range + # Some cut down versions of Python may not include this module and it's not critical for us try: import webbrowser + no_browser = False except ImportError: no_browser = True from apscheduler.schedulers.background import BackgroundScheduler from apscheduler.triggers.interval import IntervalTrigger -from UniversalAnalytics import Tracker import pytz -PYTHON2 = sys.version_info[0] == 2 - -if PYTHON2: - import activity_handler - import activity_pinger - import common - import database - import datafactory - import exporter - import libraries - import logger - import mobile_app - import newsletters - import newsletter_handler - import notification_handler - import notifiers - import plextv - import users - import versioncheck - import web_socket - import webstart - import config -else: - from plexpy import activity_handler - from plexpy import activity_pinger - from plexpy import common - from plexpy import database - from plexpy import datafactory - from plexpy import exporter - from plexpy import libraries - from plexpy import logger - from plexpy import mobile_app - from plexpy import newsletters - from plexpy import newsletter_handler - from plexpy import notification_handler - from plexpy import notifiers - from plexpy import plextv - from plexpy import users - from plexpy import versioncheck - from plexpy import web_socket - from plexpy import webstart - from plexpy import config - +from jellypy import activity_handler +from jellypy import activity_pinger +from jellypy import common +from jellypy import database +from jellypy import datafactory +from jellypy import exporter +from jellypy import libraries +from jellypy import logger +from jellypy import mobile_app +from jellypy import newsletters +from jellypy import newsletter_handler +from jellypy import notification_handler +from jellypy import notifiers +from jellypy import plextv +from jellypy import users +from jellypy import versioncheck +from jellypy import web_socket +from jellypy import webstart +from jellypy import config PROG_DIR = None FULL_PATH = None @@ -502,7 +477,7 @@ def initialize_scheduler(): def schedule_job(func, name, hours=0, minutes=0, seconds=0, args=None): """ - Start scheduled job if starting or restarting plexpy. + Start scheduled job if starting or restarting jellypy. Reschedule job if Interval Settings have changed. Remove job if if Interval Settings changed to 0 @@ -2388,47 +2363,6 @@ def generate_uuid(): return uuid.uuid4().hex -def initialize_tracker(): - data = { - 'dataSource': 'server', - 'appName': common.PRODUCT, - 'appVersion': common.RELEASE, - 'appId': INSTALL_TYPE, - 'appInstallerId': CONFIG.GIT_BRANCH, - 'dimension1': '{} {}'.format(common.PLATFORM, common.PLATFORM_RELEASE), # App Platform - 'dimension2': common.PLATFORM_LINUX_DISTRO, # Linux Distro - 'dimension3': common.PYTHON_VERSION, - 'userLanguage': SYS_LANGUAGE, - 'documentEncoding': SYS_ENCODING, - 'noninteractive': True - } - - tracker = Tracker.create('UA-111522699-2', client_id=CONFIG.PMS_UUID, hash_client_id=True, - user_agent=common.USER_AGENT) - tracker.set(data) - - return tracker - - -def analytics_event(category, action, label=None, value=None, **kwargs): - data = {'category': category, 'action': action} - - if label is not None: - data['label'] = label - - if value is not None: - data['value'] = value - - if kwargs: - data.update(kwargs) - - if TRACKER: - try: - TRACKER.send('event', data) - except Exception as e: - logger.warn("Failed to send analytics event for category '%s', action '%s': %s" % (category, action, e)) - - def check_folder_writable(folder, fallback, name): if not folder: folder = fallback @@ -2461,7 +2395,7 @@ def get_tautulli_info(): 'tautulli_version': common.RELEASE, 'tautulli_branch': CONFIG.GIT_BRANCH, 'tautulli_commit': CURRENT_VERSION, - 'tautulli_platform':common.PLATFORM, + 'tautulli_platform': common.PLATFORM, 'tautulli_platform_release': common.PLATFORM_RELEASE, 'tautulli_platform_version': common.PLATFORM_VERSION, 'tautulli_platform_linux_distro': common.PLATFORM_LINUX_DISTRO, diff --git a/plexpy/activity_handler.py b/jellypy/activity_handler.py similarity index 89% rename from plexpy/activity_handler.py rename to jellypy/activity_handler.py index 12861f57..19306958 100644 --- a/plexpy/activity_handler.py +++ b/jellypy/activity_handler.py @@ -24,8 +24,8 @@ import time from apscheduler.triggers.date import DateTrigger import pytz -import plexpy -if plexpy.PYTHON2: +import jellypy +if jellypy.PYTHON2: import activity_processor import datafactory import helpers @@ -33,12 +33,12 @@ if plexpy.PYTHON2: import notification_handler import pmsconnect else: - from plexpy import activity_processor - from plexpy import datafactory - from plexpy import helpers - from plexpy import logger - from plexpy import notification_handler - from plexpy import pmsconnect + from jellypy import activity_processor + from jellypy import datafactory + from jellypy import helpers + from jellypy import logger + from jellypy import notification_handler + from jellypy import pmsconnect ACTIVITY_SCHED = None @@ -134,7 +134,7 @@ class ActivityHandler(object): str(session['rating_key']), session['full_title'], '[Live TV]' if session['live'] else '')) # Send notification after updating db - #plexpy.NOTIFY_QUEUE.put({'stream_data': session.copy(), 'notify_action': 'on_play'}) + #jellypy.NOTIFY_QUEUE.put({'stream_data': session.copy(), 'notify_action': 'on_play'}) # Write the new session to our temp session table self.update_db_session(session=session, notify=True) @@ -162,7 +162,7 @@ class ActivityHandler(object): # Retrieve the session data from our temp table db_session = ap.get_session_by_key(session_key=self.get_session_key()) - plexpy.NOTIFY_QUEUE.put({'stream_data': db_session.copy(), 'notify_action': 'on_stop'}) + jellypy.NOTIFY_QUEUE.put({'stream_data': db_session.copy(), 'notify_action': 'on_stop'}) # Write it to the history table monitor_proc = activity_processor.ActivityProcessor() @@ -198,7 +198,7 @@ class ActivityHandler(object): db_session = ap.get_session_by_key(session_key=self.get_session_key()) if not still_paused: - plexpy.NOTIFY_QUEUE.put({'stream_data': db_session.copy(), 'notify_action': 'on_pause'}) + jellypy.NOTIFY_QUEUE.put({'stream_data': db_session.copy(), 'notify_action': 'on_pause'}) def on_resume(self): if self.is_valid_session(): @@ -214,7 +214,7 @@ class ActivityHandler(object): # Retrieve the session data from our temp table db_session = ap.get_session_by_key(session_key=self.get_session_key()) - plexpy.NOTIFY_QUEUE.put({'stream_data': db_session.copy(), 'notify_action': 'on_resume'}) + jellypy.NOTIFY_QUEUE.put({'stream_data': db_session.copy(), 'notify_action': 'on_resume'}) def on_change(self): if self.is_valid_session(): @@ -227,7 +227,7 @@ class ActivityHandler(object): ap = activity_processor.ActivityProcessor() db_session = ap.get_session_by_key(session_key=self.get_session_key()) - plexpy.NOTIFY_QUEUE.put({'stream_data': db_session.copy(), 'notify_action': 'on_change'}) + jellypy.NOTIFY_QUEUE.put({'stream_data': db_session.copy(), 'notify_action': 'on_change'}) def on_buffer(self): if self.is_valid_session(): @@ -255,14 +255,14 @@ class ActivityHandler(object): (self.get_session_key(), buffer_last_triggered)) time_since_last_trigger = helpers.timestamp() - int(buffer_last_triggered) - if current_buffer_count >= plexpy.CONFIG.BUFFER_THRESHOLD and time_since_last_trigger == 0 or \ - time_since_last_trigger >= plexpy.CONFIG.BUFFER_WAIT: + if current_buffer_count >= jellypy.CONFIG.BUFFER_THRESHOLD and time_since_last_trigger == 0 or \ + time_since_last_trigger >= jellypy.CONFIG.BUFFER_WAIT: ap.set_session_buffer_trigger_time(session_key=self.get_session_key()) # Retrieve the session data from our temp table db_session = ap.get_session_by_key(session_key=self.get_session_key()) - plexpy.NOTIFY_QUEUE.put({'stream_data': db_session.copy(), 'notify_action': 'on_buffer'}) + jellypy.NOTIFY_QUEUE.put({'stream_data': db_session.copy(), 'notify_action': 'on_buffer'}) def on_error(self): if self.is_valid_session(): @@ -275,7 +275,7 @@ class ActivityHandler(object): ap = activity_processor.ActivityProcessor() db_session = ap.get_session_by_key(session_key=self.get_session_key()) - plexpy.NOTIFY_QUEUE.put({'stream_data': db_session.copy(), 'notify_action': 'on_error'}) + jellypy.NOTIFY_QUEUE.put({'stream_data': db_session.copy(), 'notify_action': 'on_error'}) # This function receives events from our websocket connection def process(self): @@ -358,10 +358,10 @@ class ActivityHandler(object): # The only purpose of this is for notifications if not db_session['watched'] and this_state != 'buffering': progress_percent = helpers.get_percent(self.timeline['viewOffset'], db_session['duration']) - watched_percent = {'movie': plexpy.CONFIG.MOVIE_WATCHED_PERCENT, - 'episode': plexpy.CONFIG.TV_WATCHED_PERCENT, - 'track': plexpy.CONFIG.MUSIC_WATCHED_PERCENT, - 'clip': plexpy.CONFIG.TV_WATCHED_PERCENT + watched_percent = {'movie': jellypy.CONFIG.MOVIE_WATCHED_PERCENT, + 'episode': jellypy.CONFIG.TV_WATCHED_PERCENT, + 'track': jellypy.CONFIG.MUSIC_WATCHED_PERCENT, + 'clip': jellypy.CONFIG.TV_WATCHED_PERCENT } if progress_percent >= watched_percent.get(db_session['media_type'], 101): @@ -373,7 +373,7 @@ class ActivityHandler(object): session=db_session, notify_action='on_watched', notified=False) for d in watched_notifiers: - plexpy.NOTIFY_QUEUE.put({'stream_data': db_session.copy(), + jellypy.NOTIFY_QUEUE.put({'stream_data': db_session.copy(), 'notifier_id': d['notifier_id'], 'notify_action': 'on_watched'}) @@ -440,7 +440,7 @@ class TimelineHandler(object): # Add a new media item to the recently added queue if media_type and section_id > 0 and \ ((state_type == 0 and metadata_state == 'created')): # or \ - #(plexpy.CONFIG.NOTIFY_RECENTLY_ADDED_UPGRADE and state_type in (1, 5) and \ + #(jellypy.CONFIG.NOTIFY_RECENTLY_ADDED_UPGRADE and state_type in (1, 5) and \ #media_state == 'analyzing' and queue_size is None)): if media_type in ('episode', 'track'): @@ -467,7 +467,7 @@ class TimelineHandler(object): schedule_callback('rating_key-{}'.format(grandparent_rating_key), func=clear_recently_added_queue, args=[grandparent_rating_key, grandparent_title], - seconds=plexpy.CONFIG.NOTIFY_RECENTLY_ADDED_DELAY) + seconds=jellypy.CONFIG.NOTIFY_RECENTLY_ADDED_DELAY) elif media_type in ('season', 'album'): metadata = self.get_metadata() @@ -486,7 +486,7 @@ class TimelineHandler(object): schedule_callback('rating_key-{}'.format(parent_rating_key), func=clear_recently_added_queue, args=[parent_rating_key, parent_title], - seconds=plexpy.CONFIG.NOTIFY_RECENTLY_ADDED_DELAY) + seconds=jellypy.CONFIG.NOTIFY_RECENTLY_ADDED_DELAY) else: queue_set = RECENTLY_ADDED_QUEUE.get(rating_key, set()) @@ -499,7 +499,7 @@ class TimelineHandler(object): schedule_callback('rating_key-{}'.format(rating_key), func=clear_recently_added_queue, args=[rating_key, title], - seconds=plexpy.CONFIG.NOTIFY_RECENTLY_ADDED_DELAY) + seconds=jellypy.CONFIG.NOTIFY_RECENTLY_ADDED_DELAY) # A movie, show, or artist is done processing elif media_type in ('movie', 'show', 'artist') and section_id > 0 and \ @@ -536,10 +536,10 @@ class ReachabilityHandler(object): return helpers.bool_true(pref) def on_down(self, server_response): - plexpy.NOTIFY_QUEUE.put({'notify_action': 'on_extdown', 'remote_access_info': server_response}) + jellypy.NOTIFY_QUEUE.put({'notify_action': 'on_extdown', 'remote_access_info': server_response}) def on_up(self, server_response): - plexpy.NOTIFY_QUEUE.put({'notify_action': 'on_extup', 'remote_access_info': server_response}) + jellypy.NOTIFY_QUEUE.put({'notify_action': 'on_extup', 'remote_access_info': server_response}) def process(self): # Check if remote access is enabled @@ -547,7 +547,7 @@ class ReachabilityHandler(object): return # Do nothing if remote access is still up and hasn't changed - if self.is_reachable() and plexpy.PLEX_REMOTE_ACCESS_UP: + if self.is_reachable() and jellypy.PLEX_REMOTE_ACCESS_UP: return pms_connect = pmsconnect.PmsConnect() @@ -558,22 +558,22 @@ class ReachabilityHandler(object): if server_response['mapping_state'] == 'waiting': logger.warn("Tautulli ReachabilityHandler :: Remote access waiting for port mapping.") - elif plexpy.PLEX_REMOTE_ACCESS_UP is not False and server_response['reason']: + elif jellypy.PLEX_REMOTE_ACCESS_UP is not False and server_response['reason']: logger.warn("Tautulli ReachabilityHandler :: Remote access failed: %s" % server_response['reason']) logger.info("Tautulli ReachabilityHandler :: Plex remote access is down.") - plexpy.PLEX_REMOTE_ACCESS_UP = False + jellypy.PLEX_REMOTE_ACCESS_UP = False if not ACTIVITY_SCHED.get_job('on_extdown'): logger.debug("Tautulli ReachabilityHandler :: Schedule remote access down callback in %d seconds.", - plexpy.CONFIG.NOTIFY_REMOTE_ACCESS_THRESHOLD) + jellypy.CONFIG.NOTIFY_REMOTE_ACCESS_THRESHOLD) schedule_callback('on_extdown', func=self.on_down, args=[server_response], - seconds=plexpy.CONFIG.NOTIFY_REMOTE_ACCESS_THRESHOLD) + seconds=jellypy.CONFIG.NOTIFY_REMOTE_ACCESS_THRESHOLD) - elif plexpy.PLEX_REMOTE_ACCESS_UP is False and not server_response['reason']: + elif jellypy.PLEX_REMOTE_ACCESS_UP is False and not server_response['reason']: logger.info("Tautulli ReachabilityHandler :: Plex remote access is back up.") - plexpy.PLEX_REMOTE_ACCESS_UP = True + jellypy.PLEX_REMOTE_ACCESS_UP = True if ACTIVITY_SCHED.get_job('on_extdown'): logger.debug("Tautulli ReachabilityHandler :: Cancelling scheduled remote access down callback.") @@ -581,8 +581,8 @@ class ReachabilityHandler(object): else: self.on_up(server_response) - elif plexpy.PLEX_REMOTE_ACCESS_UP is None: - plexpy.PLEX_REMOTE_ACCESS_UP = self.is_reachable() + elif jellypy.PLEX_REMOTE_ACCESS_UP is None: + jellypy.PLEX_REMOTE_ACCESS_UP = self.is_reachable() def del_keys(key): @@ -626,7 +626,7 @@ def force_stop_stream(session_key, title, user): else: session['write_attempts'] += 1 - if session['write_attempts'] < plexpy.CONFIG.SESSION_DB_WRITE_ATTEMPTS: + if session['write_attempts'] < jellypy.CONFIG.SESSION_DB_WRITE_ATTEMPTS: logger.warn("Tautulli ActivityHandler :: Failed to write stream with sessionKey %s ratingKey %s to the database. " \ "Will try again in 30 seconds. Write attempt %s." % (session['session_key'], session['rating_key'], str(session['write_attempts']))) @@ -649,14 +649,14 @@ def force_stop_stream(session_key, title, user): def clear_recently_added_queue(rating_key, title): child_keys = RECENTLY_ADDED_QUEUE[rating_key] - if plexpy.CONFIG.NOTIFY_GROUP_RECENTLY_ADDED_GRANDPARENT and len(child_keys) > 1: + if jellypy.CONFIG.NOTIFY_GROUP_RECENTLY_ADDED_GRANDPARENT and len(child_keys) > 1: on_created(rating_key, child_keys=child_keys) elif child_keys: for child_key in child_keys: grandchild_keys = RECENTLY_ADDED_QUEUE.get(child_key, []) - if plexpy.CONFIG.NOTIFY_GROUP_RECENTLY_ADDED_PARENT and len(grandchild_keys) > 1: + if jellypy.CONFIG.NOTIFY_GROUP_RECENTLY_ADDED_PARENT and len(grandchild_keys) > 1: on_created(child_key, child_keys=grandchild_keys) elif grandchild_keys: @@ -697,7 +697,7 @@ def on_created(rating_key, **kwargs): if notify: data = {'timeline_data': metadata, 'notify_action': 'on_created'} data.update(kwargs) - plexpy.NOTIFY_QUEUE.put(data) + jellypy.NOTIFY_QUEUE.put(data) all_keys = [rating_key] if 'child_keys' in kwargs: @@ -714,7 +714,7 @@ def on_created(rating_key, **kwargs): def delete_metadata_cache(session_key): try: - os.remove(os.path.join(plexpy.CONFIG.CACHE_DIR, 'session_metadata/metadata-sessionKey-%s.json' % session_key)) + os.remove(os.path.join(jellypy.CONFIG.CACHE_DIR, 'session_metadata/metadata-sessionKey-%s.json' % session_key)) except OSError as e: logger.error("Tautulli ActivityHandler :: Failed to remove metadata cache file (sessionKey %s): %s" % (session_key, e)) diff --git a/plexpy/activity_pinger.py b/jellypy/activity_pinger.py similarity index 85% rename from plexpy/activity_pinger.py rename to jellypy/activity_pinger.py index e667e943..d8b01898 100644 --- a/plexpy/activity_pinger.py +++ b/jellypy/activity_pinger.py @@ -18,8 +18,8 @@ from future.builtins import str import threading -import plexpy -if plexpy.PYTHON2: +import jellypy +if jellypy.PYTHON2: import activity_handler import activity_processor import database @@ -31,16 +31,16 @@ if plexpy.PYTHON2: import pmsconnect import web_socket else: - from plexpy import activity_handler - from plexpy import activity_processor - from plexpy import database - from plexpy import helpers - from plexpy import libraries - from plexpy import logger - from plexpy import notification_handler - from plexpy import plextv - from plexpy import pmsconnect - from plexpy import web_socket + from jellypy import activity_handler + from jellypy import activity_processor + from jellypy import database + from jellypy import helpers + from jellypy import libraries + from jellypy import logger + from jellypy import notification_handler + from jellypy import plextv + from jellypy import pmsconnect + from jellypy import web_socket monitor_lock = threading.Lock() @@ -82,28 +82,28 @@ def check_active_sessions(ws_request=False): if session['state'] == 'paused': logger.debug("Tautulli Monitor :: Session %s paused." % stream['session_key']) - plexpy.NOTIFY_QUEUE.put({'stream_data': stream.copy(), 'notify_action': 'on_pause'}) + jellypy.NOTIFY_QUEUE.put({'stream_data': stream.copy(), 'notify_action': 'on_pause'}) if session['state'] == 'playing' and stream['state'] == 'paused': logger.debug("Tautulli Monitor :: Session %s resumed." % stream['session_key']) - plexpy.NOTIFY_QUEUE.put({'stream_data': stream.copy(), 'notify_action': 'on_resume'}) + jellypy.NOTIFY_QUEUE.put({'stream_data': stream.copy(), 'notify_action': 'on_resume'}) if session['state'] == 'error': logger.debug("Tautulli Monitor :: Session %s encountered an error." % stream['session_key']) - plexpy.NOTIFY_QUEUE.put({'stream_data': stream.copy(), 'notify_action': 'on_error'}) + jellypy.NOTIFY_QUEUE.put({'stream_data': stream.copy(), 'notify_action': 'on_error'}) if stream['state'] == 'paused' and not ws_request: # The stream is still paused so we need to increment the paused_counter # Using the set config parameter as the interval, probably not the most accurate but # it will have to do for now. If it's a websocket request don't use this method. - paused_counter = int(stream['paused_counter']) + plexpy.CONFIG.MONITORING_INTERVAL + paused_counter = int(stream['paused_counter']) + jellypy.CONFIG.MONITORING_INTERVAL monitor_db.action('UPDATE sessions SET paused_counter = ? ' 'WHERE session_key = ? AND rating_key = ?', [paused_counter, stream['session_key'], stream['rating_key']]) - if session['state'] == 'buffering' and plexpy.CONFIG.BUFFER_THRESHOLD > 0: + if session['state'] == 'buffering' and jellypy.CONFIG.BUFFER_THRESHOLD > 0: # The stream is buffering so we need to increment the buffer_count # We're going just increment on every monitor ping, # would be difficult to keep track otherwise @@ -117,11 +117,11 @@ def check_active_sessions(ws_request=False): 'WHERE session_key = ? AND rating_key = ?', [stream['session_key'], stream['rating_key']]) - if buffer_values[0]['buffer_count'] >= plexpy.CONFIG.BUFFER_THRESHOLD: + if buffer_values[0]['buffer_count'] >= jellypy.CONFIG.BUFFER_THRESHOLD: # Push any notifications - # Push it on it's own thread so we don't hold up our db actions # Our first buffer notification - if buffer_values[0]['buffer_count'] == plexpy.CONFIG.BUFFER_THRESHOLD: + if buffer_values[0]['buffer_count'] == jellypy.CONFIG.BUFFER_THRESHOLD: logger.info("Tautulli Monitor :: User '%s' has triggered a buffer warning." % stream['user']) # Set the buffer trigger time @@ -130,12 +130,12 @@ def check_active_sessions(ws_request=False): 'WHERE session_key = ? AND rating_key = ?', [stream['session_key'], stream['rating_key']]) - plexpy.NOTIFY_QUEUE.put({'stream_data': stream.copy(), 'notify_action': 'on_buffer'}) + jellypy.NOTIFY_QUEUE.put({'stream_data': stream.copy(), 'notify_action': 'on_buffer'}) else: # Subsequent buffer notifications after wait time if helpers.timestamp() > buffer_values[0]['buffer_last_triggered'] + \ - plexpy.CONFIG.BUFFER_WAIT: + jellypy.CONFIG.BUFFER_WAIT: logger.info("Tautulli Monitor :: User '%s' has triggered multiple buffer warnings." % stream['user']) # Set the buffer trigger time @@ -144,7 +144,7 @@ def check_active_sessions(ws_request=False): 'WHERE session_key = ? AND rating_key = ?', [stream['session_key'], stream['rating_key']]) - plexpy.NOTIFY_QUEUE.put({'stream_data': stream.copy(), 'notify_action': 'on_buffer'}) + jellypy.NOTIFY_QUEUE.put({'stream_data': stream.copy(), 'notify_action': 'on_buffer'}) logger.debug("Tautulli Monitor :: Session %s is buffering. Count is now %s. Last triggered %s." % (stream['session_key'], @@ -157,11 +157,11 @@ def check_active_sessions(ws_request=False): if session['state'] != 'buffering': progress_percent = helpers.get_percent(session['view_offset'], session['duration']) notify_states = notification_handler.get_notify_state(session=session) - if (session['media_type'] == 'movie' and progress_percent >= plexpy.CONFIG.MOVIE_WATCHED_PERCENT or - session['media_type'] == 'episode' and progress_percent >= plexpy.CONFIG.TV_WATCHED_PERCENT or - session['media_type'] == 'track' and progress_percent >= plexpy.CONFIG.MUSIC_WATCHED_PERCENT) \ + if (session['media_type'] == 'movie' and progress_percent >= jellypy.CONFIG.MOVIE_WATCHED_PERCENT or + session['media_type'] == 'episode' and progress_percent >= jellypy.CONFIG.TV_WATCHED_PERCENT or + session['media_type'] == 'track' and progress_percent >= jellypy.CONFIG.MUSIC_WATCHED_PERCENT) \ and not any(d['notify_action'] == 'on_watched' for d in notify_states): - plexpy.NOTIFY_QUEUE.put({'stream_data': stream.copy(), 'notify_action': 'on_watched'}) + jellypy.NOTIFY_QUEUE.put({'stream_data': stream.copy(), 'notify_action': 'on_watched'}) else: # The user has stopped playing a stream @@ -177,13 +177,13 @@ def check_active_sessions(ws_request=False): progress_percent = helpers.get_percent(stream['view_offset'], stream['duration']) notify_states = notification_handler.get_notify_state(session=stream) - if (stream['media_type'] == 'movie' and progress_percent >= plexpy.CONFIG.MOVIE_WATCHED_PERCENT or - stream['media_type'] == 'episode' and progress_percent >= plexpy.CONFIG.TV_WATCHED_PERCENT or - stream['media_type'] == 'track' and progress_percent >= plexpy.CONFIG.MUSIC_WATCHED_PERCENT) \ + if (stream['media_type'] == 'movie' and progress_percent >= jellypy.CONFIG.MOVIE_WATCHED_PERCENT or + stream['media_type'] == 'episode' and progress_percent >= jellypy.CONFIG.TV_WATCHED_PERCENT or + stream['media_type'] == 'track' and progress_percent >= jellypy.CONFIG.MUSIC_WATCHED_PERCENT) \ and not any(d['notify_action'] == 'on_watched' for d in notify_states): - plexpy.NOTIFY_QUEUE.put({'stream_data': stream.copy(), 'notify_action': 'on_watched'}) + jellypy.NOTIFY_QUEUE.put({'stream_data': stream.copy(), 'notify_action': 'on_watched'}) - plexpy.NOTIFY_QUEUE.put({'stream_data': stream.copy(), 'notify_action': 'on_stop'}) + jellypy.NOTIFY_QUEUE.put({'stream_data': stream.copy(), 'notify_action': 'on_stop'}) # Write the item history on playback stop row_id = monitor_process.write_session_history(session=stream) @@ -196,7 +196,7 @@ def check_active_sessions(ws_request=False): else: stream['write_attempts'] += 1 - if stream['write_attempts'] < plexpy.CONFIG.SESSION_DB_WRITE_ATTEMPTS: + if stream['write_attempts'] < jellypy.CONFIG.SESSION_DB_WRITE_ATTEMPTS: logger.warn("Tautulli Monitor :: Failed to write sessionKey %s ratingKey %s to the database. " \ "Will try again on the next pass. Write attempt %s." % (stream['session_key'], stream['rating_key'], str(stream['write_attempts']))) @@ -223,7 +223,7 @@ def check_active_sessions(ws_request=False): def connect_server(log=True, startup=False): - if plexpy.CONFIG.PMS_IS_CLOUD: + if jellypy.CONFIG.PMS_IS_CLOUD: if log: logger.info("Tautulli Monitor :: Checking for Plex Cloud server status...") @@ -264,12 +264,12 @@ def check_server_updates(): download_info = plex_tv.get_plex_downloads() if download_info: - logger.info("Tautulli Monitor :: Current PMS version: %s", plexpy.CONFIG.PMS_VERSION) + logger.info("Tautulli Monitor :: Current PMS version: %s", jellypy.CONFIG.PMS_VERSION) if download_info['update_available']: logger.info("Tautulli Monitor :: PMS update available version: %s", download_info['version']) - plexpy.NOTIFY_QUEUE.put({'notify_action': 'on_pmsupdate', 'pms_download_info': download_info}) + jellypy.NOTIFY_QUEUE.put({'notify_action': 'on_pmsupdate', 'pms_download_info': download_info}) else: logger.info("Tautulli Monitor :: No PMS update available.") diff --git a/plexpy/activity_processor.py b/jellypy/activity_processor.py similarity index 97% rename from plexpy/activity_processor.py rename to jellypy/activity_processor.py index 53cebc0e..3aece292 100644 --- a/plexpy/activity_processor.py +++ b/jellypy/activity_processor.py @@ -20,8 +20,8 @@ from future.builtins import object from collections import defaultdict import json -import plexpy -if plexpy.PYTHON2: +import jellypy +if jellypy.PYTHON2: import database import helpers import libraries @@ -29,12 +29,12 @@ if plexpy.PYTHON2: import pmsconnect import users else: - from plexpy import database - from plexpy import helpers - from plexpy import libraries - from plexpy import logger - from plexpy import pmsconnect - from plexpy import users + from jellypy import database + from jellypy import helpers + from jellypy import libraries + from jellypy import logger + from jellypy import pmsconnect + from jellypy import users class ActivityProcessor(object): @@ -165,7 +165,7 @@ class ActivityProcessor(object): # Check if any notification agents have notifications enabled if notify: session.update(timestamp) - plexpy.NOTIFY_QUEUE.put({'stream_data': session.copy(), 'notify_action': 'on_play'}) + jellypy.NOTIFY_QUEUE.put({'stream_data': session.copy(), 'notify_action': 'on_play'}) # Add Live TV library if it hasn't been added if values['live']: @@ -231,14 +231,14 @@ class ActivityProcessor(object): real_play_time = stopped - helpers.cast_to_int(session['started']) - helpers.cast_to_int(session['paused_counter']) - if not is_import and plexpy.CONFIG.LOGGING_IGNORE_INTERVAL: + if not is_import and jellypy.CONFIG.LOGGING_IGNORE_INTERVAL: if (session['media_type'] == 'movie' or session['media_type'] == 'episode') and \ - (real_play_time < int(plexpy.CONFIG.LOGGING_IGNORE_INTERVAL)): + (real_play_time < int(jellypy.CONFIG.LOGGING_IGNORE_INTERVAL)): logging_enabled = False logger.debug("Tautulli ActivityProcessor :: Play duration for session %s ratingKey %s is %s secs " "which is less than %s seconds, so we're not logging it." % (session['session_key'], session['rating_key'], str(real_play_time), - plexpy.CONFIG.LOGGING_IGNORE_INTERVAL)) + jellypy.CONFIG.LOGGING_IGNORE_INTERVAL)) if not is_import and session['media_type'] == 'track': if real_play_time < 15 and helpers.cast_to_int(session['duration']) >= 30: logging_enabled = False @@ -360,9 +360,9 @@ class ActivityProcessor(object): 'view_offset': result[1]['view_offset'], 'reference_id': result[1]['reference_id']} - watched_percent = {'movie': plexpy.CONFIG.MOVIE_WATCHED_PERCENT, - 'episode': plexpy.CONFIG.TV_WATCHED_PERCENT, - 'track': plexpy.CONFIG.MUSIC_WATCHED_PERCENT + watched_percent = {'movie': jellypy.CONFIG.MOVIE_WATCHED_PERCENT, + 'episode': jellypy.CONFIG.TV_WATCHED_PERCENT, + 'track': jellypy.CONFIG.MUSIC_WATCHED_PERCENT } prev_progress_percent = helpers.get_percent(prev_session['view_offset'], session['duration']) media_watched_percent = watched_percent.get(session['media_type'], 0) @@ -661,4 +661,4 @@ class ActivityProcessor(object): 'WHERE user_id = ? AND machine_id = ? AND media_type = ? ' 'ORDER BY stopped DESC', [user_id, machine_id, media_type]) - return int(started - last_session.get('stopped', 0) >= plexpy.CONFIG.NOTIFY_CONTINUED_SESSION_THRESHOLD) + return int(started - last_session.get('stopped', 0) >= jellypy.CONFIG.NOTIFY_CONTINUED_SESSION_THRESHOLD) diff --git a/plexpy/api2.py b/jellypy/api2.py similarity index 92% rename from plexpy/api2.py rename to jellypy/api2.py index a39b3022..0b14c0cf 100644 --- a/plexpy/api2.py +++ b/jellypy/api2.py @@ -35,8 +35,8 @@ import traceback import cherrypy import xmltodict -import plexpy -if plexpy.PYTHON2: +import jellypy +if jellypy.PYTHON2: import common import config import database @@ -51,19 +51,19 @@ if plexpy.PYTHON2: import plextv import users else: - from plexpy import common - from plexpy import config - from plexpy import database - from plexpy import helpers - from plexpy import libraries - from plexpy import logger - from plexpy import mobile_app - from plexpy import notification_handler - from plexpy import notifiers - from plexpy import newsletter_handler - from plexpy import newsletters - from plexpy import plextv - from plexpy import users + from jellypy import common + from jellypy import config + from jellypy import database + from jellypy import helpers + from jellypy import libraries + from jellypy import logger + from jellypy import mobile_app + from jellypy import notification_handler + from jellypy import notifiers + from jellypy import newsletter_handler + from jellypy import newsletters + from jellypy import plextv + from jellypy import users class API2(object): @@ -107,15 +107,15 @@ class API2(object): def _api_validate(self, *args, **kwargs): """ Sets class vars and remove unneeded parameters. """ - if not plexpy.CONFIG.API_ENABLED: + if not jellypy.CONFIG.API_ENABLED: self._api_msg = 'API not enabled' self._api_response_code = 404 - elif not plexpy.CONFIG.API_KEY: + elif not jellypy.CONFIG.API_KEY: self._api_msg = 'API key not generated' self._api_response_code = 401 - elif len(plexpy.CONFIG.API_KEY) != 32: + elif len(jellypy.CONFIG.API_KEY) != 32: self._api_msg = 'API key not generated correctly' self._api_response_code = 401 @@ -142,8 +142,8 @@ class API2(object): if 'app' in kwargs and helpers.bool_true(kwargs.pop('app')): self._api_app = True - if plexpy.CONFIG.API_ENABLED and not self._api_msg or self._api_cmd in ('get_apikey', 'docs', 'docs_md'): - if not self._api_app and self._api_apikey == plexpy.CONFIG.API_KEY: + if jellypy.CONFIG.API_ENABLED and not self._api_msg or self._api_cmd in ('get_apikey', 'docs', 'docs_md'): + if not self._api_app and self._api_apikey == jellypy.CONFIG.API_KEY: self._api_authenticated = True elif self._api_app and self._api_apikey == mobile_app.get_temp_device_token() and \ @@ -203,7 +203,7 @@ class API2(object): ] ``` """ - logfile = os.path.join(plexpy.CONFIG.LOG_DIR, logger.FILENAME) + logfile = os.path.join(jellypy.CONFIG.LOG_DIR, logger.FILENAME) templog = [] start = int(start) end = int(end) @@ -290,11 +290,11 @@ class API2(object): ``` """ - interface_dir = os.path.join(plexpy.PROG_DIR, 'data/interfaces/') + interface_dir = os.path.join(jellypy.PROG_DIR, 'data/interfaces/') interface_list = [name for name in os.listdir(interface_dir) if os.path.isdir(os.path.join(interface_dir, name))] - conf = plexpy.CONFIG._config + conf = jellypy.CONFIG._config config = {} # Truthify the dict @@ -332,7 +332,7 @@ class API2(object): None ``` """ - if not plexpy.CONFIG.API_SQL: + if not jellypy.CONFIG.API_SQL: self._api_msg = 'SQL not enabled for the API.' return @@ -342,12 +342,12 @@ class API2(object): # allow the user to shoot them self # in the foot but not in the head.. - if not len(os.listdir(plexpy.CONFIG.BACKUP_DIR)): + if not len(os.listdir(jellypy.CONFIG.BACKUP_DIR)): self.backup_db() else: # If the backup is less then 24 h old lets make a backup - if not any(os.path.getctime(os.path.join(plexpy.CONFIG.BACKUP_DIR, file_)) > (time.time() - 86400) - and file_.endswith('.db') for file_ in os.listdir(plexpy.CONFIG.BACKUP_DIR)): + if not any(os.path.getctime(os.path.join(jellypy.CONFIG.BACKUP_DIR, file_)) > (time.time() - 86400) + and file_.endswith('.db') for file_ in os.listdir(jellypy.CONFIG.BACKUP_DIR)): self.backup_db() db = database.MonitorDatabase() @@ -363,7 +363,7 @@ class API2(object): return data def backup_db(self): - """ Create a manual backup of the `plexpy.db` file.""" + """ Create a manual backup of the `jellypy.db` file.""" data = database.make_backup() self._api_result_type = 'success' if data else 'error' @@ -373,14 +373,14 @@ class API2(object): def restart(self, **kwargs): """ Restart Tautulli.""" - plexpy.SIGNAL = 'restart' + jellypy.SIGNAL = 'restart' self._api_msg = 'Restarting Tautulli' self._api_result_type = 'success' def update(self, **kwargs): """ Update Tautulli.""" - plexpy.SIGNAL = 'update' + jellypy.SIGNAL = 'update' self._api_msg = 'Updating Tautulli' self._api_result_type = 'success' @@ -472,9 +472,9 @@ class API2(object): mobile_app.set_temp_device_token(True) plex_server = plextv.get_server_resources(return_info=True) - tautulli = plexpy.get_tautulli_info() + tautulli = jellypy.get_tautulli_info() - data = {"server_id": plexpy.CONFIG.PMS_UUID} + data = {"server_id": jellypy.CONFIG.PMS_UUID} data.update(plex_server) data.update(tautulli) @@ -646,32 +646,32 @@ General optional parameters: """ data = None apikey = hashlib.sha224(str(random.getrandbits(256)).encode('utf-8')).hexdigest()[0:32] - if plexpy.CONFIG.HTTP_USERNAME and plexpy.CONFIG.HTTP_PASSWORD: + if jellypy.CONFIG.HTTP_USERNAME and jellypy.CONFIG.HTTP_PASSWORD: authenticated = False - if plexpy.CONFIG.HTTP_HASHED_PASSWORD and \ - username == plexpy.CONFIG.HTTP_USERNAME and check_hash(password, plexpy.CONFIG.HTTP_PASSWORD): + if jellypy.CONFIG.HTTP_HASHED_PASSWORD and \ + username == jellypy.CONFIG.HTTP_USERNAME and check_hash(password, jellypy.CONFIG.HTTP_PASSWORD): authenticated = True - elif not plexpy.CONFIG.HTTP_HASHED_PASSWORD and \ - username == plexpy.CONFIG.HTTP_USERNAME and password == plexpy.CONFIG.HTTP_PASSWORD: + elif not jellypy.CONFIG.HTTP_HASHED_PASSWORD and \ + username == jellypy.CONFIG.HTTP_USERNAME and password == jellypy.CONFIG.HTTP_PASSWORD: authenticated = True if authenticated: - if plexpy.CONFIG.API_KEY: - data = plexpy.CONFIG.API_KEY + if jellypy.CONFIG.API_KEY: + data = jellypy.CONFIG.API_KEY else: data = apikey - plexpy.CONFIG.API_KEY = apikey - plexpy.CONFIG.write() + jellypy.CONFIG.API_KEY = apikey + jellypy.CONFIG.write() else: self._api_msg = 'Authentication is enabled, please add the correct username and password to the parameters' else: - if plexpy.CONFIG.API_KEY: - data = plexpy.CONFIG.API_KEY + if jellypy.CONFIG.API_KEY: + data = jellypy.CONFIG.API_KEY else: # Make a apikey if the doesn't exist data = apikey - plexpy.CONFIG.API_KEY = apikey - plexpy.CONFIG.write() + jellypy.CONFIG.API_KEY = apikey + jellypy.CONFIG.write() return data diff --git a/plexpy/classes.py b/jellypy/classes.py similarity index 96% rename from plexpy/classes.py rename to jellypy/classes.py index 8480b952..cde85ab9 100644 --- a/plexpy/classes.py +++ b/jellypy/classes.py @@ -24,11 +24,11 @@ from __future__ import unicode_literals from future.moves.urllib.request import FancyURLopener -import plexpy -if plexpy.PYTHON2: +import jellypy +if jellypy.PYTHON2: from common import USER_AGENT else: - from plexpy.common import USER_AGENT + from jellypy.common import USER_AGENT class PlexPyURLopener(FancyURLopener): diff --git a/plexpy/common.py b/jellypy/common.py similarity index 99% rename from plexpy/common.py rename to jellypy/common.py index 840beefd..c62c2160 100644 --- a/plexpy/common.py +++ b/jellypy/common.py @@ -21,11 +21,11 @@ import distro import platform from collections import OrderedDict -import plexpy -if plexpy.PYTHON2: +import jellypy +if jellypy.PYTHON2: import version else: - from plexpy import version + from jellypy import version # Identify Our Application diff --git a/plexpy/config.py b/jellypy/config.py similarity index 97% rename from plexpy/config.py rename to jellypy/config.py index 2b4426dd..99a8ef9c 100644 --- a/plexpy/config.py +++ b/jellypy/config.py @@ -25,13 +25,13 @@ import threading from configobj import ConfigObj, ParseError -import plexpy -if plexpy.PYTHON2: +import jellypy +if jellypy.PYTHON2: import helpers import logger else: - from plexpy import helpers - from plexpy import logger + from jellypy import helpers + from jellypy import logger def bool_int(value): @@ -257,20 +257,20 @@ def import_tautulli_config(config=None, backup=False): # Remove keys that should not be imported for key in _DO_NOT_IMPORT_KEYS: delattr(imported_config, key) - if plexpy.DOCKER or plexpy.SNAP: + if jellypy.DOCKER or jellypy.SNAP: for key in _DO_NOT_IMPORT_KEYS_DOCKER: delattr(imported_config, key) # Merge the imported config file into the current config file - plexpy.CONFIG._config.merge(imported_config._config) - plexpy.CONFIG.write() + jellypy.CONFIG._config.merge(imported_config._config) + jellypy.CONFIG.write() logger.info("Tautulli Config :: Tautulli config import complete.") set_import_thread(None) set_is_importing(False) # Restart to apply changes - plexpy.SIGNAL = 'restart' + jellypy.SIGNAL = 'restart' def make_backup(cleanup=False, scheduler=False): @@ -280,15 +280,15 @@ def make_backup(cleanup=False, scheduler=False): backup_file = 'config.backup-{}.sched.ini'.format(helpers.now()) else: backup_file = 'config.backup-{}.ini'.format(helpers.now()) - backup_folder = plexpy.CONFIG.BACKUP_DIR + backup_folder = jellypy.CONFIG.BACKUP_DIR backup_file_fp = os.path.join(backup_folder, backup_file) # In case the user has deleted it manually if not os.path.exists(backup_folder): os.makedirs(backup_folder) - plexpy.CONFIG.write() - shutil.copyfile(plexpy.CONFIG_FILE, backup_file_fp) + jellypy.CONFIG.write() + shutil.copyfile(jellypy.CONFIG_FILE, backup_file_fp) if cleanup: now = time.time() @@ -296,17 +296,17 @@ def make_backup(cleanup=False, scheduler=False): for root, dirs, files in os.walk(backup_folder): ini_files = [os.path.join(root, f) for f in files if f.endswith('.sched.ini')] for file_ in ini_files: - if os.stat(file_).st_mtime < now - plexpy.CONFIG.BACKUP_DAYS * 86400: + if os.stat(file_).st_mtime < now - jellypy.CONFIG.BACKUP_DAYS * 86400: try: os.remove(file_) except OSError as e: logger.error("Tautulli Config :: Failed to delete %s from the backup folder: %s" % (file_, e)) if backup_file in os.listdir(backup_folder): - logger.debug("Tautulli Config :: Successfully backed up %s to %s" % (plexpy.CONFIG_FILE, backup_file)) + logger.debug("Tautulli Config :: Successfully backed up %s to %s" % (jellypy.CONFIG_FILE, backup_file)) return True else: - logger.error("Tautulli Config :: Failed to backup %s to %s" % (plexpy.CONFIG_FILE, backup_file)) + logger.error("Tautulli Config :: Failed to backup %s to %s" % (jellypy.CONFIG_FILE, backup_file)) return False @@ -530,7 +530,7 @@ class Config(object): self.CONFIG_VERSION = 14 if self.CONFIG_VERSION == 14: - if plexpy.DOCKER: + if jellypy.DOCKER: self.PLEXPY_AUTO_UPDATE = 0 self.CONFIG_VERSION = 15 @@ -542,7 +542,7 @@ class Config(object): self.CONFIG_VERSION = 16 if self.CONFIG_VERSION == 16: - if plexpy.SNAP: + if jellypy.SNAP: self.PLEXPY_AUTO_UPDATE = 0 self.CONFIG_VERSION = 17 diff --git a/plexpy/database.py b/jellypy/database.py similarity index 96% rename from plexpy/database.py rename to jellypy/database.py index cb89e465..4877f490 100644 --- a/plexpy/database.py +++ b/jellypy/database.py @@ -23,13 +23,13 @@ import shutil import threading import time -import plexpy -if plexpy.PYTHON2: +import jellypy +if jellypy.PYTHON2: import helpers import logger else: - from plexpy import helpers - from plexpy import logger + from jellypy import helpers + from jellypy import logger FILENAME = "tautulli.db" @@ -308,7 +308,7 @@ def optimize_db(): def db_filename(filename=FILENAME): """ Returns the filepath to the db """ - return os.path.join(plexpy.DATA_DIR, filename) + return os.path.join(jellypy.DATA_DIR, filename) def make_backup(cleanup=False, scheduler=False): @@ -320,13 +320,13 @@ def make_backup(cleanup=False, scheduler=False): corrupt = '' if not integrity: corrupt = '.corrupt' - plexpy.NOTIFY_QUEUE.put({'notify_action': 'on_plexpydbcorrupt'}) + jellypy.NOTIFY_QUEUE.put({'notify_action': 'on_plexpydbcorrupt'}) if scheduler: backup_file = 'tautulli.backup-{}{}.sched.db'.format(helpers.now(), corrupt) else: backup_file = 'tautulli.backup-{}{}.db'.format(helpers.now(), corrupt) - backup_folder = plexpy.CONFIG.BACKUP_DIR + backup_folder = jellypy.CONFIG.BACKUP_DIR backup_file_fp = os.path.join(backup_folder, backup_file) # In case the user has deleted it manually @@ -345,7 +345,7 @@ def make_backup(cleanup=False, scheduler=False): for root, dirs, files in os.walk(backup_folder): db_files = [os.path.join(root, f) for f in files if f.endswith('.sched.db')] for file_ in db_files: - if os.stat(file_).st_mtime < now - plexpy.CONFIG.BACKUP_DAYS * 86400: + if os.stat(file_).st_mtime < now - jellypy.CONFIG.BACKUP_DAYS * 86400: try: os.remove(file_) except OSError as e: @@ -361,10 +361,10 @@ def make_backup(cleanup=False, scheduler=False): def get_cache_size(): # This will protect against typecasting problems produced by empty string and None settings - if not plexpy.CONFIG.CACHE_SIZEMB: + if not jellypy.CONFIG.CACHE_SIZEMB: # sqlite will work with this (very slowly) return 0 - return int(plexpy.CONFIG.CACHE_SIZEMB) + return int(jellypy.CONFIG.CACHE_SIZEMB) def dict_factory(cursor, row): @@ -381,9 +381,9 @@ class MonitorDatabase(object): self.filename = filename self.connection = sqlite3.connect(db_filename(filename), timeout=20) # Set database synchronous mode (default NORMAL) - self.connection.execute("PRAGMA synchronous = %s" % plexpy.CONFIG.SYNCHRONOUS_MODE) + self.connection.execute("PRAGMA synchronous = %s" % jellypy.CONFIG.SYNCHRONOUS_MODE) # Set database journal mode (default WAL) - self.connection.execute("PRAGMA journal_mode = %s" % plexpy.CONFIG.JOURNAL_MODE) + self.connection.execute("PRAGMA journal_mode = %s" % jellypy.CONFIG.JOURNAL_MODE) # Set database cache size (default 32MB) self.connection.execute("PRAGMA cache_size = -%s" % (get_cache_size() * 1024)) self.connection.row_factory = dict_factory diff --git a/plexpy/datafactory.py b/jellypy/datafactory.py similarity index 98% rename from plexpy/datafactory.py rename to jellypy/datafactory.py index ef918bd2..6ba5a3ba 100644 --- a/plexpy/datafactory.py +++ b/jellypy/datafactory.py @@ -24,8 +24,8 @@ from future.builtins import object import json from itertools import groupby -import plexpy -if plexpy.PYTHON2: +import jellypy +if jellypy.PYTHON2: import common import database import datatables @@ -34,13 +34,13 @@ if plexpy.PYTHON2: import pmsconnect import session else: - from plexpy import common - from plexpy import database - from plexpy import datatables - from plexpy import helpers - from plexpy import logger - from plexpy import pmsconnect - from plexpy import session + from jellypy import common + from jellypy import database + from jellypy import datatables + from jellypy import helpers + from jellypy import logger + from jellypy import pmsconnect + from jellypy import session class DataFactory(object): @@ -58,10 +58,10 @@ class DataFactory(object): custom_where = [] if grouping is None: - grouping = plexpy.CONFIG.GROUP_HISTORY_TABLES + grouping = jellypy.CONFIG.GROUP_HISTORY_TABLES if include_activity is None: - include_activity = plexpy.CONFIG.HISTORY_TABLE_ACTIVITY + include_activity = jellypy.CONFIG.HISTORY_TABLE_ACTIVITY if session.get_session_user_id(): session_user_id = str(session.get_session_user_id()) @@ -218,11 +218,11 @@ class DataFactory(object): filter_duration = 0 total_duration = self.get_total_duration(custom_where=custom_where) - watched_percent = {'movie': plexpy.CONFIG.MOVIE_WATCHED_PERCENT, - 'episode': plexpy.CONFIG.TV_WATCHED_PERCENT, - 'track': plexpy.CONFIG.MUSIC_WATCHED_PERCENT, + watched_percent = {'movie': jellypy.CONFIG.MOVIE_WATCHED_PERCENT, + 'episode': jellypy.CONFIG.TV_WATCHED_PERCENT, + 'track': jellypy.CONFIG.MUSIC_WATCHED_PERCENT, 'photo': 0, - 'clip': plexpy.CONFIG.TV_WATCHED_PERCENT + 'clip': jellypy.CONFIG.TV_WATCHED_PERCENT } rows = [] @@ -309,13 +309,13 @@ class DataFactory(object): if stat_id: stats_cards = [stat_id] if grouping is None: - grouping = plexpy.CONFIG.GROUP_HISTORY_TABLES + grouping = jellypy.CONFIG.GROUP_HISTORY_TABLES if stats_cards is None: - stats_cards = plexpy.CONFIG.HOME_STATS_CARDS + stats_cards = jellypy.CONFIG.HOME_STATS_CARDS - movie_watched_percent = plexpy.CONFIG.MOVIE_WATCHED_PERCENT - tv_watched_percent = plexpy.CONFIG.TV_WATCHED_PERCENT - music_watched_percent = plexpy.CONFIG.MUSIC_WATCHED_PERCENT + movie_watched_percent = jellypy.CONFIG.MOVIE_WATCHED_PERCENT + tv_watched_percent = jellypy.CONFIG.TV_WATCHED_PERCENT + music_watched_percent = jellypy.CONFIG.MUSIC_WATCHED_PERCENT group_by = 'session_history.reference_id' if grouping else 'session_history.id' sort_type = 'total_duration' if stats_type == 'duration' else 'total_plays' diff --git a/plexpy/datatables.py b/jellypy/datatables.py similarity index 98% rename from plexpy/datatables.py rename to jellypy/datatables.py index 55056187..34305dfb 100644 --- a/plexpy/datatables.py +++ b/jellypy/datatables.py @@ -18,15 +18,15 @@ from future.builtins import object import re -import plexpy -if plexpy.PYTHON2: +import jellypy +if jellypy.PYTHON2: import database import helpers import logger else: - from plexpy import database - from plexpy import helpers - from plexpy import logger + from jellypy import database + from jellypy import helpers + from jellypy import logger class DataTables(object): diff --git a/plexpy/exceptions.py b/jellypy/exceptions.py similarity index 100% rename from plexpy/exceptions.py rename to jellypy/exceptions.py diff --git a/plexpy/exporter.py b/jellypy/exporter.py similarity index 99% rename from plexpy/exporter.py rename to jellypy/exporter.py index f75227ba..a199319b 100644 --- a/plexpy/exporter.py +++ b/jellypy/exporter.py @@ -29,8 +29,8 @@ from functools import partial, reduce from io import open from multiprocessing.dummy import Pool as ThreadPool -import plexpy -if plexpy.PYTHON2: +import jellypy +if jellypy.PYTHON2: import database import datatables import helpers @@ -38,12 +38,12 @@ if plexpy.PYTHON2: import users from plex import Plex else: - from plexpy import database - from plexpy import datatables - from plexpy import helpers - from plexpy import logger - from plexpy import users - from plexpy.plex import Plex + from jellypy import database + from jellypy import datatables + from jellypy import helpers + from jellypy import logger + from jellypy import users + from jellypy.plex import Plex class Export(object): @@ -1534,9 +1534,9 @@ class Export(object): user_tokens = user_data.get_tokens(user_id=self.user_id) plex_token = user_tokens['server_token'] else: - plex_token = plexpy.CONFIG.PMS_TOKEN + plex_token = jellypy.CONFIG.PMS_TOKEN - plex = Plex(plexpy.CONFIG.PMS_URL, plex_token) + plex = Plex(jellypy.CONFIG.PMS_URL, plex_token) if self.rating_key: logger.debug( @@ -1691,7 +1691,7 @@ class Export(object): self.total_items = len(items) logger.info("Tautulli Exporter :: Exporting %d item(s).", self.total_items) - pool = ThreadPool(processes=plexpy.CONFIG.EXPORT_THREADS) + pool = ThreadPool(processes=jellypy.CONFIG.EXPORT_THREADS) items = [ExportObject(self, item) for item in items] try: @@ -2107,7 +2107,7 @@ def delete_export(export_id): def delete_all_exports(): logger.info("Tautulli Exporter :: Deleting all exports from the export directory.") - export_dir = plexpy.CONFIG.EXPORT_DIR + export_dir = jellypy.CONFIG.EXPORT_DIR try: shutil.rmtree(export_dir, ignore_errors=True) except OSError as e: @@ -2233,7 +2233,7 @@ def format_export_filename(title, file_format): def get_export_dirpath(title, timestamp=None, images_directory=None): if timestamp: title = format_export_directory(title, timestamp) - dirpath = os.path.join(plexpy.CONFIG.EXPORT_DIR, title) + dirpath = os.path.join(jellypy.CONFIG.EXPORT_DIR, title) if images_directory: dirpath = os.path.join(dirpath, '{}.images'.format(images_directory)) return dirpath diff --git a/plexpy/graphs.py b/jellypy/graphs.py similarity index 98% rename from plexpy/graphs.py rename to jellypy/graphs.py index c2115302..33b55102 100644 --- a/plexpy/graphs.py +++ b/jellypy/graphs.py @@ -22,19 +22,19 @@ from future.builtins import object import datetime -import plexpy -if plexpy.PYTHON2: +import jellypy +if jellypy.PYTHON2: import common import database import logger import libraries import session else: - from plexpy import common - from plexpy import database - from plexpy import logger - from plexpy import libraries - from plexpy import session + from jellypy import common + from jellypy import database + from jellypy import logger + from jellypy import libraries + from jellypy import session class Graphs(object): @@ -55,7 +55,7 @@ class Graphs(object): user_cond = 'AND session_history.user_id = %s ' % user_id if grouping is None: - grouping = plexpy.CONFIG.GROUP_HISTORY_TABLES + grouping = jellypy.CONFIG.GROUP_HISTORY_TABLES group_by = 'session_history.reference_id' if grouping else 'session_history.id' @@ -169,7 +169,7 @@ class Graphs(object): user_cond = 'AND session_history.user_id = %s ' % user_id if grouping is None: - grouping = plexpy.CONFIG.GROUP_HISTORY_TABLES + grouping = jellypy.CONFIG.GROUP_HISTORY_TABLES group_by = 'session_history.reference_id' if grouping else 'session_history.id' @@ -227,7 +227,7 @@ class Graphs(object): logger.warn("Tautulli Graphs :: Unable to execute database query for get_total_plays_per_dayofweek: %s." % e) return None - if plexpy.CONFIG.WEEK_START_MONDAY: + if jellypy.CONFIG.WEEK_START_MONDAY: days_list = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'] else: @@ -300,7 +300,7 @@ class Graphs(object): user_cond = 'AND session_history.user_id = %s ' % user_id if grouping is None: - grouping = plexpy.CONFIG.GROUP_HISTORY_TABLES + grouping = jellypy.CONFIG.GROUP_HISTORY_TABLES group_by = 'session_history.reference_id' if grouping else 'session_history.id' @@ -413,7 +413,7 @@ class Graphs(object): user_cond = 'AND session_history.user_id = %s ' % user_id if grouping is None: - grouping = plexpy.CONFIG.GROUP_HISTORY_TABLES + grouping = jellypy.CONFIG.GROUP_HISTORY_TABLES group_by = 'session_history.reference_id' if grouping else 'session_history.id' @@ -534,7 +534,7 @@ class Graphs(object): user_cond = 'AND session_history.user_id = %s ' % user_id if grouping is None: - grouping = plexpy.CONFIG.GROUP_HISTORY_TABLES + grouping = jellypy.CONFIG.GROUP_HISTORY_TABLES group_by = 'session_history.reference_id' if grouping else 'session_history.id' @@ -630,7 +630,7 @@ class Graphs(object): user_cond = 'AND session_history.user_id = %s ' % user_id if grouping is None: - grouping = plexpy.CONFIG.GROUP_HISTORY_TABLES + grouping = jellypy.CONFIG.GROUP_HISTORY_TABLES group_by = 'session_history.reference_id' if grouping else 'session_history.id' @@ -739,7 +739,7 @@ class Graphs(object): user_cond = 'AND session_history.user_id = %s ' % user_id if grouping is None: - grouping = plexpy.CONFIG.GROUP_HISTORY_TABLES + grouping = jellypy.CONFIG.GROUP_HISTORY_TABLES group_by = 'session_history.reference_id' if grouping else 'session_history.id' @@ -845,7 +845,7 @@ class Graphs(object): user_cond = 'AND session_history.user_id = %s ' % user_id if grouping is None: - grouping = plexpy.CONFIG.GROUP_HISTORY_TABLES + grouping = jellypy.CONFIG.GROUP_HISTORY_TABLES group_by = 'session_history.reference_id' if grouping else 'session_history.id' @@ -931,7 +931,7 @@ class Graphs(object): user_cond = 'AND session_history.user_id = %s ' % user_id if grouping is None: - grouping = plexpy.CONFIG.GROUP_HISTORY_TABLES + grouping = jellypy.CONFIG.GROUP_HISTORY_TABLES group_by = 'session_history.reference_id' if grouping else 'session_history.id' @@ -1041,7 +1041,7 @@ class Graphs(object): user_cond = 'AND session_history.user_id = %s ' % user_id if grouping is None: - grouping = plexpy.CONFIG.GROUP_HISTORY_TABLES + grouping = jellypy.CONFIG.GROUP_HISTORY_TABLES group_by = 'session_history.reference_id' if grouping else 'session_history.id' @@ -1131,7 +1131,7 @@ class Graphs(object): user_cond = 'AND session_history.user_id = %s ' % user_id if grouping is None: - grouping = plexpy.CONFIG.GROUP_HISTORY_TABLES + grouping = jellypy.CONFIG.GROUP_HISTORY_TABLES group_by = 'session_history.reference_id' if grouping else 'session_history.id' diff --git a/plexpy/helpers.py b/jellypy/helpers.py similarity index 94% rename from plexpy/helpers.py rename to jellypy/helpers.py index 26c7378d..b57a7cac 100644 --- a/plexpy/helpers.py +++ b/jellypy/helpers.py @@ -52,17 +52,17 @@ from future.moves.urllib.parse import urlencode from xml.dom import minidom import xmltodict -import plexpy -if plexpy.PYTHON2: +import jellypy +if jellypy.PYTHON2: import common import logger import request from api2 import API2 else: - from plexpy import common - from plexpy import logger - from plexpy import request - from plexpy.api2 import API2 + from jellypy import common + from jellypy import logger + from jellypy import request + from jellypy.api2 import API2 def addtoapi(*dargs, **dkwargs): @@ -367,7 +367,7 @@ def replace_all(text, dic, normalize=False): else: j = unicodedata.normalize('NFC', j) except TypeError: - j = unicodedata.normalize('NFC', j.decode(plexpy.SYS_ENCODING, 'replace')) + j = unicodedata.normalize('NFC', j.decode(jellypy.SYS_ENCODING, 'replace')) text = text.replace(i, j) return text @@ -479,8 +479,8 @@ def create_https_certificates(ssl_cert, ssl_key): serial = timestamp() not_before = 0 not_after = 60 * 60 * 24 * 365 * 10 # ten years - domains = ['DNS:' + d.strip() for d in plexpy.CONFIG.HTTPS_DOMAIN.split(',') if d] - ips = ['IP:' + d.strip() for d in plexpy.CONFIG.HTTPS_IP.split(',') if d] + domains = ['DNS:' + d.strip() for d in jellypy.CONFIG.HTTPS_DOMAIN.split(',') if d] + ips = ['IP:' + d.strip() for d in jellypy.CONFIG.HTTPS_IP.split(',') if d] alt_names = ','.join(domains + ips).encode('utf-8') # Create the self-signed Tautulli certificate @@ -739,15 +739,15 @@ def anon_url(*url): """ Return a URL string consisting of the Anonymous redirect URL and an arbitrary number of values appended. """ - return '' if None in url else '%s%s' % (plexpy.CONFIG.ANON_REDIRECT, ''.join(str(s) for s in url)) + return '' if None in url else '%s%s' % (jellypy.CONFIG.ANON_REDIRECT, ''.join(str(s) for s in url)) def get_img_service(include_self=False): - if plexpy.CONFIG.NOTIFY_UPLOAD_POSTERS == 1: + if jellypy.CONFIG.NOTIFY_UPLOAD_POSTERS == 1: return 'imgur' - elif plexpy.CONFIG.NOTIFY_UPLOAD_POSTERS == 2 and include_self: + elif jellypy.CONFIG.NOTIFY_UPLOAD_POSTERS == 2 and include_self: return 'self-hosted' - elif plexpy.CONFIG.NOTIFY_UPLOAD_POSTERS == 3: + elif jellypy.CONFIG.NOTIFY_UPLOAD_POSTERS == 3: return 'cloudinary' else: return None @@ -757,11 +757,11 @@ def upload_to_imgur(img_data, img_title='', rating_key='', fallback=''): """ Uploads an image to Imgur """ img_url = delete_hash = '' - if not plexpy.CONFIG.IMGUR_CLIENT_ID: + if not jellypy.CONFIG.IMGUR_CLIENT_ID: logger.error("Tautulli Helpers :: Cannot upload image to Imgur. No Imgur client id specified in the settings.") return img_url, delete_hash - headers = {'Authorization': 'Client-ID %s' % plexpy.CONFIG.IMGUR_CLIENT_ID} + headers = {'Authorization': 'Client-ID %s' % jellypy.CONFIG.IMGUR_CLIENT_ID} data = {'image': base64.b64encode(img_data), 'title': img_title.encode('utf-8'), 'name': str(rating_key) + '.png', @@ -789,11 +789,11 @@ def upload_to_imgur(img_data, img_title='', rating_key='', fallback=''): def delete_from_imgur(delete_hash, img_title='', fallback=''): """ Deletes an image from Imgur """ - if not plexpy.CONFIG.IMGUR_CLIENT_ID: + if not jellypy.CONFIG.IMGUR_CLIENT_ID: logger.error("Tautulli Helpers :: Cannot delete image from Imgur. No Imgur client id specified in the settings.") return False - headers = {'Authorization': 'Client-ID %s' % plexpy.CONFIG.IMGUR_CLIENT_ID} + headers = {'Authorization': 'Client-ID %s' % jellypy.CONFIG.IMGUR_CLIENT_ID} response, err_msg, req_msg = request.request_response2('https://api.imgur.com/3/image/%s' % delete_hash, 'DELETE', headers=headers) @@ -813,18 +813,18 @@ def upload_to_cloudinary(img_data, img_title='', rating_key='', fallback=''): """ Uploads an image to Cloudinary """ img_url = '' - if not plexpy.CONFIG.CLOUDINARY_CLOUD_NAME or not plexpy.CONFIG.CLOUDINARY_API_KEY or not plexpy.CONFIG.CLOUDINARY_API_SECRET: + if not jellypy.CONFIG.CLOUDINARY_CLOUD_NAME or not jellypy.CONFIG.CLOUDINARY_API_KEY or not jellypy.CONFIG.CLOUDINARY_API_SECRET: logger.error("Tautulli Helpers :: Cannot upload image to Cloudinary. Cloudinary settings not specified in the settings.") return img_url cloudinary.config( - cloud_name=plexpy.CONFIG.CLOUDINARY_CLOUD_NAME, - api_key=plexpy.CONFIG.CLOUDINARY_API_KEY, - api_secret=plexpy.CONFIG.CLOUDINARY_API_SECRET + cloud_name=jellypy.CONFIG.CLOUDINARY_CLOUD_NAME, + api_key=jellypy.CONFIG.CLOUDINARY_API_KEY, + api_secret=jellypy.CONFIG.CLOUDINARY_API_SECRET ) # Cloudinary library has very poor support for non-ASCII characters on Python 2 - if plexpy.PYTHON2: + if jellypy.PYTHON2: _img_title = latinToAscii(img_title, replace=True) else: _img_title = img_title @@ -844,14 +844,14 @@ def upload_to_cloudinary(img_data, img_title='', rating_key='', fallback=''): def delete_from_cloudinary(rating_key=None, delete_all=False): """ Deletes an image from Cloudinary """ - if not plexpy.CONFIG.CLOUDINARY_CLOUD_NAME or not plexpy.CONFIG.CLOUDINARY_API_KEY or not plexpy.CONFIG.CLOUDINARY_API_SECRET: + if not jellypy.CONFIG.CLOUDINARY_CLOUD_NAME or not jellypy.CONFIG.CLOUDINARY_API_KEY or not jellypy.CONFIG.CLOUDINARY_API_SECRET: logger.error("Tautulli Helpers :: Cannot delete image from Cloudinary. Cloudinary settings not specified in the settings.") return False cloudinary.config( - cloud_name=plexpy.CONFIG.CLOUDINARY_CLOUD_NAME, - api_key=plexpy.CONFIG.CLOUDINARY_API_KEY, - api_secret=plexpy.CONFIG.CLOUDINARY_API_SECRET + cloud_name=jellypy.CONFIG.CLOUDINARY_CLOUD_NAME, + api_key=jellypy.CONFIG.CLOUDINARY_API_KEY, + api_secret=jellypy.CONFIG.CLOUDINARY_API_SECRET ) if delete_all: @@ -870,14 +870,14 @@ def cloudinary_transform(rating_key=None, width=1000, height=1500, opacity=100, img_format='png', img_title='', fallback=None): url = '' - if not plexpy.CONFIG.CLOUDINARY_CLOUD_NAME or not plexpy.CONFIG.CLOUDINARY_API_KEY or not plexpy.CONFIG.CLOUDINARY_API_SECRET: + if not jellypy.CONFIG.CLOUDINARY_CLOUD_NAME or not jellypy.CONFIG.CLOUDINARY_API_KEY or not jellypy.CONFIG.CLOUDINARY_API_SECRET: logger.error("Tautulli Helpers :: Cannot transform image on Cloudinary. Cloudinary settings not specified in the settings.") return url cloudinary.config( - cloud_name=plexpy.CONFIG.CLOUDINARY_CLOUD_NAME, - api_key=plexpy.CONFIG.CLOUDINARY_API_KEY, - api_secret=plexpy.CONFIG.CLOUDINARY_API_SECRET + cloud_name=jellypy.CONFIG.CLOUDINARY_CLOUD_NAME, + api_key=jellypy.CONFIG.CLOUDINARY_API_KEY, + api_secret=jellypy.CONFIG.CLOUDINARY_API_SECRET ) img_options = {'format': img_format, @@ -914,7 +914,7 @@ def cache_image(url, image=None): If no image is provided, tries to return the image from the cache directory. """ # Create image directory if it doesn't exist - imgdir = os.path.join(plexpy.CONFIG.CACHE_DIR, 'images/') + imgdir = os.path.join(jellypy.CONFIG.CACHE_DIR, 'images/') if not os.path.exists(imgdir): logger.debug("Tautulli Helpers :: Creating image cache directory at %s" % imgdir) os.makedirs(imgdir) @@ -1122,12 +1122,12 @@ def eval_logic_groups_to_bool(logic_groups, eval_conds): def get_plexpy_url(hostname=None): - if plexpy.CONFIG.ENABLE_HTTPS: + if jellypy.CONFIG.ENABLE_HTTPS: scheme = 'https' else: scheme = 'http' - if hostname is None and plexpy.CONFIG.HTTP_HOST == '0.0.0.0': + if hostname is None and jellypy.CONFIG.HTTP_HOST == '0.0.0.0': import socket try: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) @@ -1142,18 +1142,18 @@ def get_plexpy_url(hostname=None): if not hostname: hostname = 'localhost' - elif hostname == 'localhost' and plexpy.CONFIG.HTTP_HOST != '0.0.0.0': - hostname = plexpy.CONFIG.HTTP_HOST + elif hostname == 'localhost' and jellypy.CONFIG.HTTP_HOST != '0.0.0.0': + hostname = jellypy.CONFIG.HTTP_HOST else: - hostname = hostname or plexpy.CONFIG.HTTP_HOST + hostname = hostname or jellypy.CONFIG.HTTP_HOST - if plexpy.HTTP_PORT not in (80, 443): - port = ':' + str(plexpy.HTTP_PORT) + if jellypy.HTTP_PORT not in (80, 443): + port = ':' + str(jellypy.HTTP_PORT) else: port = '' - if plexpy.HTTP_ROOT is not None and plexpy.HTTP_ROOT.strip('/'): - root = '/' + plexpy.HTTP_ROOT.strip('/') + if jellypy.HTTP_ROOT is not None and jellypy.HTTP_ROOT.strip('/'): + root = '/' + jellypy.HTTP_ROOT.strip('/') else: root = '' @@ -1202,10 +1202,10 @@ def split_args(args=None): if isinstance(args, list): return args elif isinstance(args, str): - if plexpy.PYTHON2: + if jellypy.PYTHON2: args = args.encode('utf-8') args = shlex.split(args) - if plexpy.PYTHON2: + if jellypy.PYTHON2: args = [a.decode('utf-8') for a in args] return args return [] diff --git a/plexpy/http_handler.py b/jellypy/http_handler.py similarity index 91% rename from plexpy/http_handler.py rename to jellypy/http_handler.py index 910c1cfe..cfadb964 100644 --- a/plexpy/http_handler.py +++ b/jellypy/http_handler.py @@ -26,13 +26,13 @@ import certifi import requests import urllib3 -import plexpy -if plexpy.PYTHON2: +import jellypy +if jellypy.PYTHON2: import helpers import logger else: - from plexpy import helpers - from plexpy import logger + from jellypy import helpers + from jellypy import logger class HTTPHandler(object): @@ -53,14 +53,14 @@ class HTTPHandler(object): self.headers = headers else: self.headers = { - 'X-Plex-Product': plexpy.common.PRODUCT, - 'X-Plex-Version': plexpy.common.RELEASE, - 'X-Plex-Client-Identifier': plexpy.CONFIG.PMS_UUID, - 'X-Plex-Platform': plexpy.common.PLATFORM, - 'X-Plex-Platform-Version': plexpy.common.PLATFORM_RELEASE, - 'X-Plex-Device': '{} {}'.format(plexpy.common.PLATFORM, - plexpy.common.PLATFORM_RELEASE), - 'X-Plex-Device-Name': plexpy.common.PLATFORM_DEVICE_NAME + 'X-Plex-Product': jellypy.common.PRODUCT, + 'X-Plex-Version': jellypy.common.RELEASE, + 'X-Plex-Client-Identifier': jellypy.CONFIG.PMS_UUID, + 'X-Plex-Platform': jellypy.common.PLATFORM, + 'X-Plex-Platform-Version': jellypy.common.PLATFORM_RELEASE, + 'X-Plex-Device': '{} {}'.format(jellypy.common.PLATFORM, + jellypy.common.PLATFORM_RELEASE), + 'X-Plex-Device-Name': jellypy.common.PLATFORM_DEVICE_NAME } self.token = token diff --git a/plexpy/libraries.py b/jellypy/libraries.py similarity index 94% rename from plexpy/libraries.py rename to jellypy/libraries.py index 71930144..80ad13a7 100644 --- a/plexpy/libraries.py +++ b/jellypy/libraries.py @@ -23,8 +23,8 @@ from future.builtins import object import json import os -import plexpy -if plexpy.PYTHON2: +import jellypy +if jellypy.PYTHON2: import common import database import datatables @@ -36,22 +36,22 @@ if plexpy.PYTHON2: import users from plex import Plex else: - from plexpy import common - from plexpy import database - from plexpy import datatables - from plexpy import helpers - from plexpy import logger - from plexpy import plextv - from plexpy import pmsconnect - from plexpy import session - from plexpy import users - from plexpy.plex import Plex + from jellypy import common + from jellypy import database + from jellypy import datatables + from jellypy import helpers + from jellypy import logger + from jellypy import plextv + from jellypy import pmsconnect + from jellypy import session + from jellypy import users + from jellypy.plex import Plex def refresh_libraries(): logger.info("Tautulli Libraries :: Requesting libraries list refresh...") - server_id = plexpy.CONFIG.PMS_IDENTIFIER + server_id = jellypy.CONFIG.PMS_IDENTIFIER if not server_id: logger.error("Tautulli Libraries :: No PMS identifier, cannot refresh libraries. Verify server in settings.") return @@ -96,15 +96,15 @@ def refresh_libraries(): query = 'UPDATE library_sections SET is_active = 0 WHERE server_id != ? OR ' \ 'section_id NOT IN ({})'.format(', '.join(['?'] * len(section_ids))) - monitor_db.action(query=query, args=[plexpy.CONFIG.PMS_IDENTIFIER] + section_ids) + monitor_db.action(query=query, args=[jellypy.CONFIG.PMS_IDENTIFIER] + section_ids) - if plexpy.CONFIG.HOME_LIBRARY_CARDS == ['first_run_wizard']: - plexpy.CONFIG.__setattr__('HOME_LIBRARY_CARDS', library_keys) - plexpy.CONFIG.write() + if jellypy.CONFIG.HOME_LIBRARY_CARDS == ['first_run_wizard']: + jellypy.CONFIG.__setattr__('HOME_LIBRARY_CARDS', library_keys) + jellypy.CONFIG.write() else: - new_keys = plexpy.CONFIG.HOME_LIBRARY_CARDS + new_keys - plexpy.CONFIG.__setattr__('HOME_LIBRARY_CARDS', new_keys) - plexpy.CONFIG.write() + new_keys = jellypy.CONFIG.HOME_LIBRARY_CARDS + new_keys + jellypy.CONFIG.__setattr__('HOME_LIBRARY_CARDS', new_keys) + jellypy.CONFIG.write() logger.info("Tautulli Libraries :: Libraries list refreshed.") return True @@ -117,7 +117,7 @@ def add_live_tv_library(refresh=False): monitor_db = database.MonitorDatabase() result = monitor_db.select_single('SELECT * FROM library_sections ' 'WHERE section_id = ? and server_id = ?', - [common.LIVE_TV_SECTION_ID, plexpy.CONFIG.PMS_IDENTIFIER]) + [common.LIVE_TV_SECTION_ID, jellypy.CONFIG.PMS_IDENTIFIER]) if result and not refresh or not result and refresh: return @@ -125,9 +125,9 @@ def add_live_tv_library(refresh=False): if not refresh: logger.info("Tautulli Libraries :: Adding Live TV library to the database.") - section_keys = {'server_id': plexpy.CONFIG.PMS_IDENTIFIER, + section_keys = {'server_id': jellypy.CONFIG.PMS_IDENTIFIER, 'section_id': common.LIVE_TV_SECTION_ID} - section_values = {'server_id': plexpy.CONFIG.PMS_IDENTIFIER, + section_values = {'server_id': jellypy.CONFIG.PMS_IDENTIFIER, 'section_id': common.LIVE_TV_SECTION_ID, 'section_name': common.LIVE_TV_SECTION_NAME, 'section_type': 'live', @@ -148,7 +148,7 @@ def has_library_type(section_type): def get_collections(section_id=None): - plex = Plex(plexpy.CONFIG.PMS_URL, session.get_session_user_token()) + plex = Plex(jellypy.CONFIG.PMS_URL, session.get_session_user_token()) library = plex.get_library(section_id) if library.type not in ('movie', 'show', 'artist'): @@ -246,7 +246,7 @@ def get_playlists(section_id=None, user_id=None): if not plex_token: return [] - plex = Plex(plexpy.CONFIG.PMS_URL, plex_token) + plex = Plex(jellypy.CONFIG.PMS_URL, plex_token) if user_id: playlists = plex.plex.playlists() @@ -321,7 +321,7 @@ class Libraries(object): custom_where = [['library_sections.deleted_section', 0]] if grouping is None: - grouping = plexpy.CONFIG.GROUP_HISTORY_TABLES + grouping = jellypy.CONFIG.GROUP_HISTORY_TABLES if session.get_session_shared_libraries(): custom_where.append(['library_sections.section_id', session.get_session_shared_libraries()]) @@ -484,7 +484,7 @@ class Libraries(object): # Get play counts from the database monitor_db = database.MonitorDatabase() - if plexpy.CONFIG.GROUP_HISTORY_TABLES: + if jellypy.CONFIG.GROUP_HISTORY_TABLES: count_by = 'reference_id' else: count_by = 'id' @@ -517,7 +517,7 @@ class Libraries(object): # Import media info cache from json file if rating_key: try: - inFilePath = os.path.join(plexpy.CONFIG.CACHE_DIR,'media_info_%s-%s.json' % (section_id, rating_key)) + inFilePath = os.path.join(jellypy.CONFIG.CACHE_DIR, 'media_info_%s-%s.json' % (section_id, rating_key)) with open(inFilePath, 'r') as inFile: rows = json.load(inFile) library_count = len(rows) @@ -527,7 +527,7 @@ class Libraries(object): pass elif section_id: try: - inFilePath = os.path.join(plexpy.CONFIG.CACHE_DIR,'media_info_%s.json' % section_id) + inFilePath = os.path.join(jellypy.CONFIG.CACHE_DIR, 'media_info_%s.json' % section_id) with open(inFilePath, 'r') as inFile: rows = json.load(inFile) library_count = len(rows) @@ -594,14 +594,14 @@ class Libraries(object): # Cache the media info to a json file if rating_key: try: - outFilePath = os.path.join(plexpy.CONFIG.CACHE_DIR,'media_info_%s-%s.json' % (section_id, rating_key)) + outFilePath = os.path.join(jellypy.CONFIG.CACHE_DIR, 'media_info_%s-%s.json' % (section_id, rating_key)) with open(outFilePath, 'w') as outFile: json.dump(rows, outFile) except IOError as e: logger.debug("Tautulli Libraries :: Unable to create cache file for rating_key %s." % rating_key) elif section_id: try: - outFilePath = os.path.join(plexpy.CONFIG.CACHE_DIR,'media_info_%s.json' % section_id) + outFilePath = os.path.join(jellypy.CONFIG.CACHE_DIR, 'media_info_%s.json' % section_id) with open(outFilePath, 'w') as outFile: json.dump(rows, outFile) except IOError as e: @@ -694,7 +694,7 @@ class Libraries(object): if rating_key: #logger.debug("Tautulli Libraries :: Getting file sizes for rating_key %s." % rating_key) try: - inFilePath = os.path.join(plexpy.CONFIG.CACHE_DIR,'media_info_%s-%s.json' % (section_id, rating_key)) + inFilePath = os.path.join(jellypy.CONFIG.CACHE_DIR, 'media_info_%s-%s.json' % (section_id, rating_key)) with open(inFilePath, 'r') as inFile: rows = json.load(inFile) except IOError as e: @@ -704,7 +704,7 @@ class Libraries(object): elif section_id: logger.debug("Tautulli Libraries :: Getting file sizes for section_id %s." % section_id) try: - inFilePath = os.path.join(plexpy.CONFIG.CACHE_DIR,'media_info_%s.json' % section_id) + inFilePath = os.path.join(jellypy.CONFIG.CACHE_DIR, 'media_info_%s.json' % section_id) with open(inFilePath, 'r') as inFile: rows = json.load(inFile) except IOError as e: @@ -738,14 +738,14 @@ class Libraries(object): # Cache the media info to a json file if rating_key: try: - outFilePath = os.path.join(plexpy.CONFIG.CACHE_DIR,'media_info_%s-%s.json' % (section_id, rating_key)) + outFilePath = os.path.join(jellypy.CONFIG.CACHE_DIR, 'media_info_%s-%s.json' % (section_id, rating_key)) with open(outFilePath, 'w') as outFile: json.dump(rows, outFile) except IOError as e: logger.debug("Tautulli Libraries :: Unable to create cache file with file sizes for rating_key %s." % rating_key) elif section_id: try: - outFilePath = os.path.join(plexpy.CONFIG.CACHE_DIR,'media_info_%s.json' % section_id) + outFilePath = os.path.join(jellypy.CONFIG.CACHE_DIR, 'media_info_%s.json' % section_id) with open(outFilePath, 'w') as outFile: json.dump(rows, outFile) except IOError as e: @@ -796,7 +796,7 @@ class Libraries(object): return default_return if server_id is None: - server_id = plexpy.CONFIG.PMS_IDENTIFIER + server_id = jellypy.CONFIG.PMS_IDENTIFIER def get_library_details(section_id=section_id, server_id=server_id): monitor_db = database.MonitorDatabase() @@ -877,7 +877,7 @@ class Libraries(object): return [] if grouping is None: - grouping = plexpy.CONFIG.GROUP_HISTORY_TABLES + grouping = jellypy.CONFIG.GROUP_HISTORY_TABLES if query_days and query_days is not None: query_days = map(helpers.cast_to_int, query_days.split(',')) @@ -941,7 +941,7 @@ class Libraries(object): return [] if grouping is None: - grouping = plexpy.CONFIG.GROUP_HISTORY_TABLES + grouping = jellypy.CONFIG.GROUP_HISTORY_TABLES monitor_db = database.MonitorDatabase() @@ -1088,13 +1088,13 @@ class Libraries(object): return all(success) elif str(section_id).isdigit(): - server_id = server_id or plexpy.CONFIG.PMS_IDENTIFIER - if server_id == plexpy.CONFIG.PMS_IDENTIFIER: + server_id = server_id or jellypy.CONFIG.PMS_IDENTIFIER + if server_id == jellypy.CONFIG.PMS_IDENTIFIER: delete_success = database.delete_library_history(section_id=section_id) else: logger.warn("Tautulli Libraries :: Library history not deleted for library section_id %s " "because library server_id %s does not match Plex server identifier %s." - % (section_id, server_id, plexpy.CONFIG.PMS_IDENTIFIER)) + % (section_id, server_id, jellypy.CONFIG.PMS_IDENTIFIER)) delete_success = True if purge_only: @@ -1151,7 +1151,7 @@ class Libraries(object): try: if section_id.isdigit(): - [os.remove(os.path.join(plexpy.CONFIG.CACHE_DIR, f)) for f in os.listdir(plexpy.CONFIG.CACHE_DIR) + [os.remove(os.path.join(jellypy.CONFIG.CACHE_DIR, f)) for f in os.listdir(jellypy.CONFIG.CACHE_DIR) if f.startswith('media_info_%s' % section_id) and f.endswith('.json')] logger.debug("Tautulli Libraries :: Deleted media info table cache for section_id %s." % section_id) @@ -1167,7 +1167,7 @@ class Libraries(object): # Refresh the PMS_URL to make sure the server_id is updated plextv.get_server_resources() - server_id = plexpy.CONFIG.PMS_IDENTIFIER + server_id = jellypy.CONFIG.PMS_IDENTIFIER try: logger.debug("Tautulli Libraries :: Deleting libraries where server_id does not match %s." % server_id) diff --git a/plexpy/lock.py b/jellypy/lock.py similarity index 98% rename from plexpy/lock.py rename to jellypy/lock.py index 6cdc6800..a53b4ebf 100644 --- a/plexpy/lock.py +++ b/jellypy/lock.py @@ -22,11 +22,11 @@ import future.moves.queue as queue import time import threading -import plexpy -if plexpy.PYTHON2: +import jellypy +if jellypy.PYTHON2: import logger else: - from plexpy import logger + from jellypy import logger class TimedLock(object): diff --git a/plexpy/log_reader.py b/jellypy/log_reader.py similarity index 90% rename from plexpy/log_reader.py rename to jellypy/log_reader.py index 76d8b23c..e48508c3 100644 --- a/plexpy/log_reader.py +++ b/jellypy/log_reader.py @@ -20,23 +20,23 @@ from io import open import os -import plexpy -if plexpy.PYTHON2: +import jellypy +if jellypy.PYTHON2: import helpers import logger else: - from plexpy import helpers - from plexpy import logger + from jellypy import helpers + from jellypy import logger def get_log_tail(window=20, parsed=True, log_type="server"): - if plexpy.CONFIG.PMS_LOGS_FOLDER: + if jellypy.CONFIG.PMS_LOGS_FOLDER: log_file = "" if log_type == "server": - log_file = os.path.join(plexpy.CONFIG.PMS_LOGS_FOLDER, 'Plex Media Server.log') + log_file = os.path.join(jellypy.CONFIG.PMS_LOGS_FOLDER, 'Plex Media Server.log') elif log_type == "scanner": - log_file = os.path.join(plexpy.CONFIG.PMS_LOGS_FOLDER, 'Plex Media Scanner.log') + log_file = os.path.join(jellypy.CONFIG.PMS_LOGS_FOLDER, 'Plex Media Scanner.log') else: return [] diff --git a/plexpy/logger.py b/jellypy/logger.py similarity index 98% rename from plexpy/logger.py rename to jellypy/logger.py index 33e0d2b2..1a1e5263 100644 --- a/plexpy/logger.py +++ b/jellypy/logger.py @@ -32,13 +32,13 @@ import sys import threading import traceback -import plexpy -if plexpy.PYTHON2: +import jellypy +if jellypy.PYTHON2: import helpers from config import _BLACKLIST_KEYS, _WHITELIST_KEYS else: - from plexpy import helpers - from plexpy.config import _BLACKLIST_KEYS, _WHITELIST_KEYS + from jellypy import helpers + from jellypy.config import _BLACKLIST_KEYS, _WHITELIST_KEYS # These settings are for file logging only @@ -96,7 +96,7 @@ class BlacklistFilter(logging.Filter): super(BlacklistFilter, self).__init__() def filter(self, record): - if not plexpy.CONFIG.LOG_BLACKLIST: + if not jellypy.CONFIG.LOG_BLACKLIST: return True for item in _BLACKLIST_WORDS: @@ -131,7 +131,7 @@ class RegexFilter(logging.Filter): self.regex = re.compile(r'') def filter(self, record): - if not plexpy.CONFIG.LOG_BLACKLIST: + if not jellypy.CONFIG.LOG_BLACKLIST: return True try: @@ -349,7 +349,7 @@ def initLogger(console=False, log_dir=False, verbose=False): # Add filters to log handlers # Only add filters after the config file has been initialized # Nothing prior to initialization should contain sensitive information - if not plexpy.DEV and plexpy.CONFIG: + if not jellypy.DEV and jellypy.CONFIG: log_handlers = logger.handlers + \ logger_api.handlers + \ logger_plex_websocket.handlers + \ diff --git a/plexpy/macos.py b/jellypy/macos.py similarity index 81% rename from plexpy/macos.py rename to jellypy/macos.py index 4dafaa13..a6d00790 100644 --- a/plexpy/macos.py +++ b/jellypy/macos.py @@ -30,23 +30,23 @@ except ImportError: if HAS_PYOBJC: import rumps -import plexpy -if plexpy.PYTHON2: +import jellypy +if jellypy.PYTHON2: import common import logger import versioncheck else: - from plexpy import common - from plexpy import logger - from plexpy import versioncheck + from jellypy import common + from jellypy import logger + from jellypy import versioncheck class MacOSSystemTray(object): def __init__(self): - self.image_dir = os.path.join(plexpy.PROG_DIR, 'data/interfaces/', plexpy.CONFIG.INTERFACE, 'images') + self.image_dir = os.path.join(jellypy.PROG_DIR, 'data/interfaces/', jellypy.CONFIG.INTERFACE, 'images') self.icon = os.path.join(self.image_dir, 'logo-flat-white.ico') - if plexpy.UPDATE_AVAILABLE: + if jellypy.UPDATE_AVAILABLE: self.update_title = 'Check for Updates - Update Available!' else: self.update_title = 'Check for Updates' @@ -61,10 +61,10 @@ class MacOSSystemTray(object): rumps.MenuItem('Restart', callback=self.tray_restart), rumps.MenuItem('Quit', callback=self.tray_quit) ] - if not plexpy.FROZEN: + if not jellypy.FROZEN: self.menu.insert(6, rumps.MenuItem('Update', callback=self.tray_update)) - self.menu[2].state = plexpy.CONFIG.LAUNCH_STARTUP - self.menu[3].state = plexpy.CONFIG.LAUNCH_BROWSER + self.menu[2].state = jellypy.CONFIG.LAUNCH_STARTUP + self.menu[3].state = jellypy.CONFIG.LAUNCH_BROWSER self.tray_icon = rumps.App(common.PRODUCT, icon=self.icon, template=True, menu=self.menu, quit_button=None) @@ -84,50 +84,50 @@ class MacOSSystemTray(object): self.tray_icon.icon = kwargs['icon'] def tray_open(self, tray_icon): - plexpy.launch_browser(plexpy.CONFIG.HTTP_HOST, plexpy.HTTP_PORT, plexpy.HTTP_ROOT) + jellypy.launch_browser(jellypy.CONFIG.HTTP_HOST, jellypy.HTTP_PORT, jellypy.HTTP_ROOT) def tray_startup(self, tray_icon): - plexpy.CONFIG.LAUNCH_STARTUP = not plexpy.CONFIG.LAUNCH_STARTUP + jellypy.CONFIG.LAUNCH_STARTUP = not jellypy.CONFIG.LAUNCH_STARTUP set_startup() def tray_browser(self, tray_icon): - plexpy.CONFIG.LAUNCH_BROWSER = not plexpy.CONFIG.LAUNCH_BROWSER + jellypy.CONFIG.LAUNCH_BROWSER = not jellypy.CONFIG.LAUNCH_BROWSER set_startup() def tray_check_update(self, tray_icon): versioncheck.check_update() def tray_update(self, tray_icon): - if plexpy.UPDATE_AVAILABLE: - plexpy.SIGNAL = 'update' + if jellypy.UPDATE_AVAILABLE: + jellypy.SIGNAL = 'update' else: self.update_title = 'Check for Updates - No Update Available' self.menu[5].title = self.update_title def tray_restart(self, tray_icon): - plexpy.SIGNAL = 'restart' + jellypy.SIGNAL = 'restart' def tray_quit(self, tray_icon): - plexpy.SIGNAL = 'shutdown' + jellypy.SIGNAL = 'shutdown' def change_tray_update_icon(self): - if plexpy.UPDATE_AVAILABLE: + if jellypy.UPDATE_AVAILABLE: self.update_title = 'Check for Updates - Update Available!' else: self.update_title = 'Check for Updates' self.menu[5].title = self.update_title def change_tray_icons(self): - self.tray_icon.menu['Start Tautulli at Login'].state = plexpy.CONFIG.LAUNCH_STARTUP - self.tray_icon.menu['Open Browser when Tautulli Starts'].state = plexpy.CONFIG.LAUNCH_BROWSER + self.tray_icon.menu['Start Tautulli at Login'].state = jellypy.CONFIG.LAUNCH_STARTUP + self.tray_icon.menu['Open Browser when Tautulli Starts'].state = jellypy.CONFIG.LAUNCH_BROWSER def set_startup(): - if plexpy.MAC_SYS_TRAY_ICON: - plexpy.MAC_SYS_TRAY_ICON.change_tray_icons() + if jellypy.MAC_SYS_TRAY_ICON: + jellypy.MAC_SYS_TRAY_ICON.change_tray_icons() - if plexpy.INSTALL_TYPE == 'macos': - if plexpy.CONFIG.LAUNCH_STARTUP: + if jellypy.INSTALL_TYPE == 'macos': + if jellypy.CONFIG.LAUNCH_STARTUP: try: subprocess.Popen(['osascript', '-e', 'tell application "System Events"', @@ -162,11 +162,11 @@ def set_startup(): plist_file_path = os.path.join(launch_agents, plist_file) exe = sys.executable - run_args = [arg for arg in plexpy.ARGS if arg != '--nolaunch'] - if plexpy.FROZEN: + run_args = [arg for arg in jellypy.ARGS if arg != '--nolaunch'] + if jellypy.FROZEN: args = [exe] + run_args else: - args = [exe, plexpy.FULL_PATH] + run_args + args = [exe, jellypy.FULL_PATH] + run_args plist_dict = { 'Label': common.PRODUCT, @@ -174,7 +174,7 @@ def set_startup(): 'RunAtLoad': True } - if plexpy.CONFIG.LAUNCH_STARTUP: + if jellypy.CONFIG.LAUNCH_STARTUP: if not os.path.exists(launch_agents): try: os.makedirs(launch_agents) diff --git a/plexpy/mobile_app.py b/jellypy/mobile_app.py similarity index 97% rename from plexpy/mobile_app.py rename to jellypy/mobile_app.py index 3f784c40..bae39ef8 100644 --- a/plexpy/mobile_app.py +++ b/jellypy/mobile_app.py @@ -21,15 +21,15 @@ from future.builtins import str import requests import threading -import plexpy -if plexpy.PYTHON2: +import jellypy +if jellypy.PYTHON2: import database import helpers import logger else: - from plexpy import database - from plexpy import helpers - from plexpy import logger + from jellypy import database + from jellypy import helpers + from jellypy import logger TEMP_DEVICE_TOKEN = None diff --git a/plexpy/newsletter_handler.py b/jellypy/newsletter_handler.py similarity index 96% rename from plexpy/newsletter_handler.py rename to jellypy/newsletter_handler.py index 8458e144..acc1eb75 100644 --- a/plexpy/newsletter_handler.py +++ b/jellypy/newsletter_handler.py @@ -23,17 +23,17 @@ import os from apscheduler.triggers.cron import CronTrigger import email.utils -import plexpy -if plexpy.PYTHON2: +import jellypy +if jellypy.PYTHON2: import database import helpers import logger import newsletters else: - from plexpy import database - from plexpy import helpers - from plexpy import logger - from plexpy import newsletters + from jellypy import database + from jellypy import helpers + from jellypy import logger + from jellypy import newsletters NEWSLETTER_SCHED = None @@ -48,7 +48,7 @@ def add_newsletter_each(newsletter_id=None, notify_action=None, **kwargs): 'newsletter_id': newsletter_id, 'notify_action': notify_action} data.update(kwargs) - plexpy.NOTIFY_QUEUE.put(data) + jellypy.NOTIFY_QUEUE.put(data) def schedule_newsletters(newsletter_id=None): @@ -211,7 +211,7 @@ def get_newsletter(newsletter_uuid=None, newsletter_id_name=None): end_date.replace('-', ''), newsletter_uuid) - newsletter_folder = plexpy.CONFIG.NEWSLETTER_DIR or os.path.join(plexpy.DATA_DIR, 'newsletters') + newsletter_folder = jellypy.CONFIG.NEWSLETTER_DIR or os.path.join(jellypy.DATA_DIR, 'newsletters') newsletter_file_fp = os.path.join(newsletter_folder, newsletter_file) if newsletter_file in os.listdir(newsletter_folder): diff --git a/plexpy/newsletters.py b/jellypy/newsletters.py similarity index 95% rename from plexpy/newsletters.py rename to jellypy/newsletters.py index c74f2a96..4c8c7db7 100644 --- a/plexpy/newsletters.py +++ b/jellypy/newsletters.py @@ -29,8 +29,8 @@ from mako import exceptions import os import re -import plexpy -if plexpy.PYTHON2: +import jellypy +if jellypy.PYTHON2: import common import database import helpers @@ -40,14 +40,14 @@ if plexpy.PYTHON2: import pmsconnect from notifiers import send_notification, EMAIL else: - from plexpy import common - from plexpy import database - from plexpy import helpers - from plexpy import libraries - from plexpy import logger - from plexpy import newsletter_handler - from plexpy import pmsconnect - from plexpy.notifiers import send_notification, EMAIL + from jellypy import common + from jellypy import database + from jellypy import helpers + from jellypy import libraries + from jellypy import logger + from jellypy import newsletter_handler + from jellypy import pmsconnect + from jellypy.notifiers import send_notification, EMAIL AGENT_IDS = { @@ -319,14 +319,14 @@ def blacklist_logger(): def serve_template(templatename, **kwargs): - if plexpy.CONFIG.NEWSLETTER_CUSTOM_DIR: + if jellypy.CONFIG.NEWSLETTER_CUSTOM_DIR: logger.info("Tautulli Newsletters :: Using custom newsletter template directory.") - template_dir = plexpy.CONFIG.NEWSLETTER_CUSTOM_DIR + template_dir = jellypy.CONFIG.NEWSLETTER_CUSTOM_DIR else: - interface_dir = os.path.join(str(plexpy.PROG_DIR), 'data/interfaces/') - template_dir = os.path.join(str(interface_dir), plexpy.CONFIG.NEWSLETTER_TEMPLATES) + interface_dir = os.path.join(str(jellypy.PROG_DIR), 'data/interfaces/') + template_dir = os.path.join(str(interface_dir), jellypy.CONFIG.NEWSLETTER_TEMPLATES) - if not plexpy.CONFIG.NEWSLETTER_INLINE_STYLES: + if not jellypy.CONFIG.NEWSLETTER_INLINE_STYLES: templatename = templatename.replace('.html', '.internal.html') _hplookup = TemplateLookup(directories=[template_dir], default_filters=['unicode', 'h']) @@ -344,7 +344,7 @@ def generate_newsletter_uuid(): db = database.MonitorDatabase() while not uuid or uuid_exists: - uuid = plexpy.generate_uuid()[:8] + uuid = jellypy.generate_uuid()[:8] result = db.select_single( 'SELECT EXISTS(SELECT uuid FROM newsletter_log WHERE uuid = ?) as uuid_exists', [uuid]) uuid_exists = result['uuid_exists'] @@ -528,7 +528,7 @@ class Newsletter(object): def _save(self): newsletter_file = self.filename_formatted - newsletter_folder = plexpy.CONFIG.NEWSLETTER_DIR or os.path.join(plexpy.DATA_DIR, 'newsletters') + newsletter_folder = jellypy.CONFIG.NEWSLETTER_DIR or os.path.join(jellypy.DATA_DIR, 'newsletters') newsletter_file_fp = os.path.join(newsletter_folder, newsletter_file) # In case the user has deleted it manually @@ -552,7 +552,7 @@ class Newsletter(object): newsletter_stripped = ''.join(l.strip() for l in self.newsletter.splitlines()) plaintext = 'HTML email support is required to view the newsletter.\n' - if plexpy.CONFIG.NEWSLETTER_SELF_HOSTED and plexpy.CONFIG.HTTP_BASE_URL: + if jellypy.CONFIG.NEWSLETTER_SELF_HOSTED and jellypy.CONFIG.HTTP_BASE_URL: plaintext += self._DEFAULT_BODY.format(**self.parameters) email_reply_msg_id = self.email_reply_msg_id if self.config['threaded'] else None @@ -589,15 +589,15 @@ class Newsletter(object): return parameters def _build_params(self): - date_format = helpers.momentjs_to_arrow(plexpy.CONFIG.DATE_FORMAT) + date_format = helpers.momentjs_to_arrow(jellypy.CONFIG.DATE_FORMAT) - if plexpy.CONFIG.NEWSLETTER_SELF_HOSTED and plexpy.CONFIG.HTTP_BASE_URL: - base_url = plexpy.CONFIG.HTTP_BASE_URL + plexpy.HTTP_ROOT + 'newsletter/' + if jellypy.CONFIG.NEWSLETTER_SELF_HOSTED and jellypy.CONFIG.HTTP_BASE_URL: + base_url = jellypy.CONFIG.HTTP_BASE_URL + jellypy.HTTP_ROOT + 'newsletter/' else: base_url = helpers.get_plexpy_url() + '/newsletter/' parameters = { - 'server_name': plexpy.CONFIG.PMS_NAME, + 'server_name': jellypy.CONFIG.PMS_NAME, 'start_date': self.start_date.format(date_format), 'end_date': self.end_date.format(date_format), 'current_year': self.start_date.year, @@ -616,13 +616,13 @@ class Newsletter(object): 'newsletter_uuid': self.uuid, 'newsletter_id': self.newsletter_id, 'newsletter_id_name': self.newsletter_id_name, - 'newsletter_password': plexpy.CONFIG.NEWSLETTER_PASSWORD + 'newsletter_password': jellypy.CONFIG.NEWSLETTER_PASSWORD } return parameters def build_text(self): - from plexpy.notification_handler import CustomFormatter + from jellypy.notification_handler import CustomFormatter custom_formatter = CustomFormatter() try: @@ -655,7 +655,7 @@ class Newsletter(object): return subject, body, message def build_filename(self): - from plexpy.notification_handler import CustomFormatter + from jellypy.notification_handler import CustomFormatter custom_formatter = CustomFormatter() try: @@ -702,7 +702,7 @@ class RecentlyAdded(Newsletter): _TEMPLATE = 'recently_added.html' def _get_recently_added(self, media_type=None): - from plexpy.notification_handler import format_group_index + from jellypy.notification_handler import format_group_index pms_connect = pmsconnect.PmsConnect() @@ -818,7 +818,7 @@ class RecentlyAdded(Newsletter): return recently_added def retrieve_data(self): - from plexpy.notification_handler import get_img_info, set_hash_image_info + from jellypy.notification_handler import get_img_info, set_hash_image_info if not self.config['incl_libraries']: logger.warn("Tautulli Newsletters :: Failed to retrieve %s newsletter data: no libraries selected." % self.NAME) @@ -948,8 +948,8 @@ class RecentlyAdded(Newsletter): newsletter_libraries.append(s['section_name']) parameters['newsletter_libraries'] = ', '.join(sorted(newsletter_libraries)) - parameters['pms_identifier'] = plexpy.CONFIG.PMS_IDENTIFIER - parameters['pms_web_url'] = plexpy.CONFIG.PMS_WEB_URL + parameters['pms_identifier'] = jellypy.CONFIG.PMS_IDENTIFIER + parameters['pms_web_url'] = jellypy.CONFIG.PMS_WEB_URL return parameters diff --git a/plexpy/notification_handler.py b/jellypy/notification_handler.py similarity index 95% rename from plexpy/notification_handler.py rename to jellypy/notification_handler.py index 6a846316..0b89a39b 100644 --- a/plexpy/notification_handler.py +++ b/jellypy/notification_handler.py @@ -39,8 +39,8 @@ import time import musicbrainzngs -import plexpy -if plexpy.PYTHON2: +import jellypy +if jellypy.PYTHON2: import activity_processor import common import database @@ -52,20 +52,20 @@ if plexpy.PYTHON2: import request from newsletter_handler import notify as notify_newsletter else: - from plexpy import activity_processor - from plexpy import common - from plexpy import database - from plexpy import datafactory - from plexpy import logger - from plexpy import helpers - from plexpy import notifiers - from plexpy import pmsconnect - from plexpy import request - from plexpy.newsletter_handler import notify as notify_newsletter + from jellypy import activity_processor + from jellypy import common + from jellypy import database + from jellypy import datafactory + from jellypy import logger + from jellypy import helpers + from jellypy import notifiers + from jellypy import pmsconnect + from jellypy import request + from jellypy.newsletter_handler import notify as notify_newsletter def process_queue(): - queue = plexpy.NOTIFY_QUEUE + queue = jellypy.NOTIFY_QUEUE while True: params = queue.get() @@ -148,14 +148,14 @@ def add_notifier_each(notifier_id=None, notify_action=None, stream_data=None, ti 'timeline_data': timeline_data, 'parameters': parameters} data.update(kwargs) - plexpy.NOTIFY_QUEUE.put(data) + jellypy.NOTIFY_QUEUE.put(data) else: logger.debug("Tautulli NotificationHandler :: Custom notification conditions not satisfied, skipping notifier_id %s." % notifier['id']) # Add on_concurrent and on_newdevice to queue if action is on_play if notify_action == 'on_play': - plexpy.NOTIFY_QUEUE.put({'stream_data': stream_data.copy(), 'notify_action': 'on_concurrent'}) - plexpy.NOTIFY_QUEUE.put({'stream_data': stream_data.copy(), 'notify_action': 'on_newdevice'}) + jellypy.NOTIFY_QUEUE.put({'stream_data': stream_data.copy(), 'notify_action': 'on_concurrent'}) + jellypy.NOTIFY_QUEUE.put({'stream_data': stream_data.copy(), 'notify_action': 'on_newdevice'}) def notify_conditions(notify_action=None, stream_data=None, timeline_data=None): @@ -186,27 +186,27 @@ def notify_conditions(notify_action=None, stream_data=None, timeline_data=None): if result: user_sessions = [s for s in result['sessions'] if s['user_id'] == stream_data['user_id']] - if plexpy.CONFIG.NOTIFY_CONCURRENT_BY_IP: - evaluated = len(Counter(s['ip_address'] for s in user_sessions)) >= plexpy.CONFIG.NOTIFY_CONCURRENT_THRESHOLD + if jellypy.CONFIG.NOTIFY_CONCURRENT_BY_IP: + evaluated = len(Counter(s['ip_address'] for s in user_sessions)) >= jellypy.CONFIG.NOTIFY_CONCURRENT_THRESHOLD else: - evaluated = len(user_sessions) >= plexpy.CONFIG.NOTIFY_CONCURRENT_THRESHOLD + evaluated = len(user_sessions) >= jellypy.CONFIG.NOTIFY_CONCURRENT_THRESHOLD elif notify_action == 'on_newdevice': data_factory = datafactory.DataFactory() user_devices = data_factory.get_user_devices(user_id=stream_data['user_id'], - history_only=not plexpy.CONFIG.NOTIFY_NEW_DEVICE_INITIAL_ONLY) + history_only=not jellypy.CONFIG.NOTIFY_NEW_DEVICE_INITIAL_ONLY) evaluated = stream_data['machine_id'] not in user_devices elif stream_data['media_type'] in ('movie', 'episode', 'clip'): progress_percent = helpers.get_percent(stream_data['view_offset'], stream_data['duration']) if notify_action == 'on_stop': - evaluated = (plexpy.CONFIG.NOTIFY_CONSECUTIVE or - (stream_data['media_type'] == 'movie' and progress_percent < plexpy.CONFIG.MOVIE_WATCHED_PERCENT) or - (stream_data['media_type'] == 'episode' and progress_percent < plexpy.CONFIG.TV_WATCHED_PERCENT)) + evaluated = (jellypy.CONFIG.NOTIFY_CONSECUTIVE or + (stream_data['media_type'] == 'movie' and progress_percent < jellypy.CONFIG.MOVIE_WATCHED_PERCENT) or + (stream_data['media_type'] == 'episode' and progress_percent < jellypy.CONFIG.TV_WATCHED_PERCENT)) elif notify_action == 'on_resume': - evaluated = plexpy.CONFIG.NOTIFY_CONSECUTIVE or progress_percent < 99 + evaluated = jellypy.CONFIG.NOTIFY_CONSECUTIVE or progress_percent < 99 # All other activity notify actions else: @@ -497,9 +497,9 @@ def set_notify_success(notification_id): def build_media_notify_params(notify_action=None, session=None, timeline=None, manual_trigger=False, **kwargs): # Get time formats - date_format = helpers.momentjs_to_arrow(plexpy.CONFIG.DATE_FORMAT) - time_format = helpers.momentjs_to_arrow(plexpy.CONFIG.TIME_FORMAT) - duration_format = helpers.momentjs_to_arrow(plexpy.CONFIG.TIME_FORMAT, duration=True) + date_format = helpers.momentjs_to_arrow(jellypy.CONFIG.DATE_FORMAT) + time_format = helpers.momentjs_to_arrow(jellypy.CONFIG.TIME_FORMAT) + duration_format = helpers.momentjs_to_arrow(jellypy.CONFIG.TIME_FORMAT, duration=True) # Get metadata for the item if session: @@ -598,8 +598,8 @@ def build_media_notify_params(notify_action=None, session=None, timeline=None, m plex_web_rating_key = notify_params['rating_key'] notify_params['plex_url'] = '{web_url}#!/server/{pms_identifier}/details?key=%2Flibrary%2Fmetadata%2F{rating_key}'.format( - web_url=plexpy.CONFIG.PMS_WEB_URL, - pms_identifier=plexpy.CONFIG.PMS_IDENTIFIER, + web_url=jellypy.CONFIG.PMS_WEB_URL, + pms_identifier=jellypy.CONFIG.PMS_IDENTIFIER, rating_key=plex_web_rating_key) # Check external guids @@ -646,7 +646,7 @@ def build_media_notify_params(notify_action=None, session=None, timeline=None, m notify_params['lastfm_url'] = 'https://www.last.fm/music/' + notify_params['lastfm_id'] # Get TheMovieDB info (for movies and tv only) - if plexpy.CONFIG.THEMOVIEDB_LOOKUP and notify_params['media_type'] in ('movie', 'show', 'season', 'episode'): + if jellypy.CONFIG.THEMOVIEDB_LOOKUP and notify_params['media_type'] in ('movie', 'show', 'season', 'episode'): if notify_params.get('themoviedb_id'): themoveidb_json = get_themoviedb_info(rating_key=rating_key, media_type=notify_params['media_type'], @@ -689,7 +689,7 @@ def build_media_notify_params(notify_action=None, session=None, timeline=None, m notify_params['themoviedb_id'], 'show' if lookup_media_type == 'tv' else 'movie') # Get TVmaze info (for tv shows only) - if plexpy.CONFIG.TVMAZE_LOOKUP and notify_params['media_type'] in ('show', 'season', 'episode'): + if jellypy.CONFIG.TVMAZE_LOOKUP and notify_params['media_type'] in ('show', 'season', 'episode'): if notify_params.get('thetvdb_id') or notify_params.get('imdb_id') or notify_params.get('plex_id'): if notify_params['media_type'] == 'episode': lookup_key = notify_params['grandparent_rating_key'] @@ -716,7 +716,7 @@ def build_media_notify_params(notify_action=None, session=None, timeline=None, m notify_params['trakt_url'] = 'https://trakt.tv/search/imdb/' + notify_params['imdb_id'] # Get MusicBrainz info (for music only) - if plexpy.CONFIG.MUSICBRAINZ_LOOKUP and notify_params['media_type'] in ('artist', 'album', 'track'): + if jellypy.CONFIG.MUSICBRAINZ_LOOKUP and notify_params['media_type'] in ('artist', 'album', 'track'): artist = release = recording = tracks = tnum = None if notify_params['media_type'] == 'artist': musicbrainz_type = 'artist' @@ -772,13 +772,13 @@ def build_media_notify_params(notify_action=None, session=None, timeline=None, m img_info = get_img_info(img=poster_thumb, rating_key=poster_key, title=poster_title, fallback=fallback) poster_info = {'poster_title': img_info['img_title'], 'poster_url': img_info['img_url']} notify_params.update(poster_info) - elif img_service == 'self-hosted' and plexpy.CONFIG.HTTP_BASE_URL: + elif img_service == 'self-hosted' and jellypy.CONFIG.HTTP_BASE_URL: img_hash = set_hash_image_info(img=poster_thumb, fallback=fallback) poster_info = {'poster_title': poster_title, - 'poster_url': plexpy.CONFIG.HTTP_BASE_URL + plexpy.HTTP_ROOT + 'image/' + img_hash} + 'poster_url': jellypy.CONFIG.HTTP_BASE_URL + jellypy.HTTP_ROOT + 'image/' + img_hash} notify_params.update(poster_info) - if ((manual_trigger or plexpy.CONFIG.NOTIFY_GROUP_RECENTLY_ADDED_GRANDPARENT) + if ((manual_trigger or jellypy.CONFIG.NOTIFY_GROUP_RECENTLY_ADDED_GRANDPARENT) and notify_params['media_type'] in ('show', 'artist')): show_name = notify_params['title'] episode_name = '' @@ -797,7 +797,7 @@ def build_media_notify_params(notify_action=None, session=None, timeline=None, m child_count = len(child_num) grandchild_count = '' - elif ((manual_trigger or plexpy.CONFIG.NOTIFY_GROUP_RECENTLY_ADDED_PARENT) + elif ((manual_trigger or jellypy.CONFIG.NOTIFY_GROUP_RECENTLY_ADDED_PARENT) and notify_params['media_type'] in ('season', 'album')): show_name = notify_params['parent_title'] episode_name = '' @@ -850,16 +850,16 @@ def build_media_notify_params(notify_action=None, session=None, timeline=None, m available_params = { # Global paramaters 'tautulli_version': common.RELEASE, - 'tautulli_remote': plexpy.CONFIG.GIT_REMOTE, - 'tautulli_branch': plexpy.CONFIG.GIT_BRANCH, - 'tautulli_commit': plexpy.CURRENT_VERSION, - 'server_name': plexpy.CONFIG.PMS_NAME, - 'server_ip': plexpy.CONFIG.PMS_IP, - 'server_port': plexpy.CONFIG.PMS_PORT, - 'server_url': plexpy.CONFIG.PMS_URL, - 'server_machine_id': plexpy.CONFIG.PMS_IDENTIFIER, - 'server_platform': plexpy.CONFIG.PMS_PLATFORM, - 'server_version': plexpy.CONFIG.PMS_VERSION, + 'tautulli_remote': jellypy.CONFIG.GIT_REMOTE, + 'tautulli_branch': jellypy.CONFIG.GIT_BRANCH, + 'tautulli_commit': jellypy.CURRENT_VERSION, + 'server_name': jellypy.CONFIG.PMS_NAME, + 'server_ip': jellypy.CONFIG.PMS_IP, + 'server_port': jellypy.CONFIG.PMS_PORT, + 'server_url': jellypy.CONFIG.PMS_URL, + 'server_machine_id': jellypy.CONFIG.PMS_IDENTIFIER, + 'server_platform': jellypy.CONFIG.PMS_PLATFORM, + 'server_version': jellypy.CONFIG.PMS_VERSION, 'action': notify_action.split('on_')[-1], 'current_year': now.year, 'current_month': now.month, @@ -1096,8 +1096,8 @@ def build_media_notify_params(notify_action=None, session=None, timeline=None, m def build_server_notify_params(notify_action=None, **kwargs): # Get time formats - date_format = plexpy.CONFIG.DATE_FORMAT.replace('Do','') - time_format = plexpy.CONFIG.TIME_FORMAT.replace('Do','') + date_format = jellypy.CONFIG.DATE_FORMAT.replace('Do', '') + time_format = jellypy.CONFIG.TIME_FORMAT.replace('Do', '') update_channel = pmsconnect.PmsConnect().get_server_update_channel() @@ -1111,16 +1111,16 @@ def build_server_notify_params(notify_action=None, **kwargs): available_params = { # Global paramaters 'tautulli_version': common.RELEASE, - 'tautulli_remote': plexpy.CONFIG.GIT_REMOTE, - 'tautulli_branch': plexpy.CONFIG.GIT_BRANCH, - 'tautulli_commit': plexpy.CURRENT_VERSION, - 'server_name': plexpy.CONFIG.PMS_NAME, - 'server_ip': plexpy.CONFIG.PMS_IP, - 'server_port': plexpy.CONFIG.PMS_PORT, - 'server_url': plexpy.CONFIG.PMS_URL, - 'server_platform': plexpy.CONFIG.PMS_PLATFORM, - 'server_version': plexpy.CONFIG.PMS_VERSION, - 'server_machine_id': plexpy.CONFIG.PMS_IDENTIFIER, + 'tautulli_remote': jellypy.CONFIG.GIT_REMOTE, + 'tautulli_branch': jellypy.CONFIG.GIT_BRANCH, + 'tautulli_commit': jellypy.CURRENT_VERSION, + 'server_name': jellypy.CONFIG.PMS_NAME, + 'server_ip': jellypy.CONFIG.PMS_IP, + 'server_port': jellypy.CONFIG.PMS_PORT, + 'server_url': jellypy.CONFIG.PMS_URL, + 'server_platform': jellypy.CONFIG.PMS_PLATFORM, + 'server_version': jellypy.CONFIG.PMS_VERSION, + 'server_machine_id': jellypy.CONFIG.PMS_IDENTIFIER, 'action': notify_action.split('on_')[-1], 'current_year': now.year, 'current_month': now.month, @@ -1464,7 +1464,7 @@ def set_hash_image_info(img=None, rating_key=None, width=750, height=1000, rating_key = img_rating_key img_string = '{}.{}.{}.{}.{}.{}.{}.{}'.format( - plexpy.CONFIG.PMS_UUID, img, rating_key, width, height, opacity, background, blur, fallback) + jellypy.CONFIG.PMS_UUID, img, rating_key, width, height, opacity, background, blur, fallback) img_hash = hashlib.sha256(img_string.encode('utf-8')).hexdigest() if add_to_db: @@ -1572,7 +1572,7 @@ def lookup_themoviedb_by_id(rating_key=None, thetvdb_id=None, imdb_id=None, titl else: logger.debug("Tautulli NotificationHandler :: Looking up The Movie Database info for '{} ({})'.".format(title, year)) - params = {'api_key': plexpy.CONFIG.THEMOVIEDB_APIKEY} + params = {'api_key': jellypy.CONFIG.THEMOVIEDB_APIKEY} if thetvdb_id or imdb_id: params['external_source'] = 'tvdb_id' if thetvdb_id else 'imdb_id' @@ -1650,7 +1650,7 @@ def get_themoviedb_info(rating_key=None, media_type=None, themoviedb_id=None): logger.debug("Tautulli NotificationHandler :: Looking up The Movie Database info for themoviedb_id '{}'.".format(themoviedb_id)) - params = {'api_key': plexpy.CONFIG.THEMOVIEDB_APIKEY} + params = {'api_key': jellypy.CONFIG.THEMOVIEDB_APIKEY} response, err_msg, req_msg = request.request_response2('https://api.themoviedb.org/3/{}/{}'.format(media_type, themoviedb_id), params=params) if response and not err_msg: @@ -1870,7 +1870,7 @@ class CustomFormatter(Formatter): obj = self.convert_field(obj, conversion) # expand the format spec, if needed - if plexpy.PYTHON2: + if jellypy.PYTHON2: format_spec = self._vformat(format_spec, args, kwargs, used_args, recursion_depth - 1) else: @@ -1889,7 +1889,7 @@ class CustomFormatter(Formatter): result.append(suffix) # result.append(self.format_field(obj, format_spec)) - if plexpy.PYTHON2: + if jellypy.PYTHON2: return ''.join(result) else: return ''.join(result), auto_arg_index diff --git a/plexpy/notifiers.py b/jellypy/notifiers.py similarity index 98% rename from plexpy/notifiers.py rename to jellypy/notifiers.py index ebccce84..b477602f 100644 --- a/plexpy/notifiers.py +++ b/jellypy/notifiers.py @@ -57,8 +57,8 @@ import gntp.notifier import facebook import twitter -import plexpy -if plexpy.PYTHON2: +import jellypy +if jellypy.PYTHON2: import common import database import helpers @@ -68,14 +68,14 @@ if plexpy.PYTHON2: import request import users else: - from plexpy import common - from plexpy import database - from plexpy import helpers - from plexpy import logger - from plexpy import mobile_app - from plexpy import pmsconnect - from plexpy import request - from plexpy import users + from jellypy import common + from jellypy import database + from jellypy import helpers + from jellypy import logger + from jellypy import mobile_app + from jellypy import pmsconnect + from jellypy import request + from jellypy import users BROWSER_NOTIFIERS = {} @@ -943,7 +943,7 @@ class ANDROIDAPP(Notifier): 'cipher_text': base64.b64encode(encrypted_data), 'nonce': base64.b64encode(nonce), 'salt': base64.b64encode(salt), - 'server_id': plexpy.CONFIG.PMS_UUID} + 'server_id': jellypy.CONFIG.PMS_UUID} } else: logger.warn("Tautulli Notifiers :: PyCryptodome library is missing. " @@ -955,7 +955,7 @@ class ANDROIDAPP(Notifier): 'contents': {'en': 'Tautulli Notification'}, 'data': {'encrypted': False, 'plain_text': plaintext_data, - 'server_id': plexpy.CONFIG.PMS_UUID} + 'server_id': jellypy.CONFIG.PMS_UUID} } #logger.debug("OneSignal payload: {}".format(payload)) @@ -996,7 +996,7 @@ class ANDROIDAPP(Notifier): 'Instructions can be found in the ' 'FAQ.' , + % (jellypy.CONFIG.GIT_USER, jellypy.CONFIG.GIT_REPO)) + '" target="_blank">FAQ.' , 'input_type': 'help' }) else: @@ -1532,10 +1532,10 @@ class FACEBOOK(Notifier): def _get_authorization(self, app_id='', app_secret='', redirect_uri=''): # Temporarily store settings in the config so we can retrieve them in Facebook step 2. # Assume the user won't be requesting authorization for multiple Facebook notifiers at the same time. - plexpy.CONFIG.FACEBOOK_APP_ID = app_id - plexpy.CONFIG.FACEBOOK_APP_SECRET = app_secret - plexpy.CONFIG.FACEBOOK_REDIRECT_URI = redirect_uri - plexpy.CONFIG.FACEBOOK_TOKEN = 'temp' + jellypy.CONFIG.FACEBOOK_APP_ID = app_id + jellypy.CONFIG.FACEBOOK_APP_SECRET = app_secret + jellypy.CONFIG.FACEBOOK_REDIRECT_URI = redirect_uri + jellypy.CONFIG.FACEBOOK_TOKEN = 'temp' return facebook.auth_url(app_id=app_id, canvas_url=redirect_uri, @@ -1544,9 +1544,9 @@ class FACEBOOK(Notifier): def _get_credentials(self, code=''): logger.info("Tautulli Notifiers :: Requesting access token from {name}.".format(name=self.NAME)) - app_id = plexpy.CONFIG.FACEBOOK_APP_ID - app_secret = plexpy.CONFIG.FACEBOOK_APP_SECRET - redirect_uri = plexpy.CONFIG.FACEBOOK_REDIRECT_URI + app_id = jellypy.CONFIG.FACEBOOK_APP_ID + app_secret = jellypy.CONFIG.FACEBOOK_APP_SECRET + redirect_uri = jellypy.CONFIG.FACEBOOK_REDIRECT_URI try: # Request user access token @@ -1562,17 +1562,17 @@ class FACEBOOK(Notifier): response = api.extend_access_token(app_id=app_id, app_secret=app_secret) - plexpy.CONFIG.FACEBOOK_TOKEN = response['access_token'] + jellypy.CONFIG.FACEBOOK_TOKEN = response['access_token'] except Exception as e: logger.error("Tautulli Notifiers :: Error requesting {name} access token: {e}".format(name=self.NAME, e=e)) - plexpy.CONFIG.FACEBOOK_TOKEN = '' + jellypy.CONFIG.FACEBOOK_TOKEN = '' # Clear out temporary config values - plexpy.CONFIG.FACEBOOK_APP_ID = '' - plexpy.CONFIG.FACEBOOK_APP_SECRET = '' - plexpy.CONFIG.FACEBOOK_REDIRECT_URI = '' + jellypy.CONFIG.FACEBOOK_APP_ID = '' + jellypy.CONFIG.FACEBOOK_APP_SECRET = '' + jellypy.CONFIG.FACEBOOK_REDIRECT_URI = '' - return plexpy.CONFIG.FACEBOOK_TOKEN + return jellypy.CONFIG.FACEBOOK_TOKEN def _post_facebook(self, **data): if self.config['group_id']: @@ -1820,7 +1820,7 @@ class GROWL(Notifier): return False # Send it, including an image - image_file = os.path.join(str(plexpy.PROG_DIR), + image_file = os.path.join(str(jellypy.PROG_DIR), "data/interfaces/default/images/logo-circle.png") with open(image_file, 'rb') as f: @@ -2325,7 +2325,7 @@ class PLEX(Notifier): if self.config['image']: image = self.config['image'] else: - image = os.path.join(plexpy.DATA_DIR, os.path.abspath("data/interfaces/default/images/logo-circle.png")) + image = os.path.join(jellypy.DATA_DIR, os.path.abspath("data/interfaces/default/images/logo-circle.png")) for host in hosts: logger.info("Tautulli Notifiers :: Sending notification command to {name} @ {host}".format(name=self.NAME, host=host)) @@ -2430,8 +2430,8 @@ class PLEXMOBILEAPP(Notifier): 'to': self.config['user_ids'], 'data': { 'provider': { - 'identifier': plexpy.CONFIG.PMS_IDENTIFIER, - 'title': plexpy.CONFIG.PMS_NAME + 'identifier': jellypy.CONFIG.PMS_IDENTIFIER, + 'title': jellypy.CONFIG.PMS_NAME } } } @@ -2536,11 +2536,11 @@ class PLEXMOBILEAPP(Notifier): data['metadata'] = metadata data['uri'] = 'server://{}/com.plexapp.plugins.library/library/metadata/{}'.format( - plexpy.CONFIG.PMS_IDENTIFIER, uri_rating_key or pretty_metadata.parameters['rating_key'] + jellypy.CONFIG.PMS_IDENTIFIER, uri_rating_key or pretty_metadata.parameters['rating_key'] ) data['play'] = self.config['tap_action'] == 'play' - headers = {'X-Plex-Token': plexpy.CONFIG.PMS_TOKEN} + headers = {'X-Plex-Token': jellypy.CONFIG.PMS_TOKEN} return self.make_request(self.NOTIFICATION_URL, headers=headers, json=data) @@ -2977,7 +2977,7 @@ class SCRIPTS(Notifier): '.php': 'php', '.pl': 'perl', '.ps1': 'powershell -executionPolicy bypass -file', - '.py': 'python' if plexpy.FROZEN else sys.executable, + '.py': 'python' if jellypy.FROZEN else sys.executable, '.pyw': 'pythonw', '.rb': 'ruby', '.sh': '' @@ -3013,13 +3013,13 @@ class SCRIPTS(Notifier): def run_script(self, script, user_id): # Common environment variables custom_env = { - 'PLEX_URL': plexpy.CONFIG.PMS_URL, - 'PLEX_TOKEN': plexpy.CONFIG.PMS_TOKEN, + 'PLEX_URL': jellypy.CONFIG.PMS_URL, + 'PLEX_TOKEN': jellypy.CONFIG.PMS_TOKEN, 'PLEX_USER_TOKEN': '', 'TAUTULLI_URL': helpers.get_plexpy_url(hostname='localhost'), - 'TAUTULLI_PUBLIC_URL': plexpy.CONFIG.HTTP_BASE_URL + plexpy.HTTP_ROOT, - 'TAUTULLI_APIKEY': plexpy.CONFIG.API_KEY, - 'TAUTULLI_ENCODING': plexpy.SYS_ENCODING, + 'TAUTULLI_PUBLIC_URL': jellypy.CONFIG.HTTP_BASE_URL + jellypy.HTTP_ROOT, + 'TAUTULLI_APIKEY': jellypy.CONFIG.API_KEY, + 'TAUTULLI_ENCODING': jellypy.SYS_ENCODING, 'TAUTULLI_PYTHON_VERSION': common.PYTHON_VERSION } @@ -3027,10 +3027,10 @@ class SCRIPTS(Notifier): user_tokens = users.Users().get_tokens(user_id=user_id) custom_env['PLEX_USER_TOKEN'] = str(user_tokens['server_token']) - if self.pythonpath and plexpy.INSTALL_TYPE not in ('windows', 'macos'): + if self.pythonpath and jellypy.INSTALL_TYPE not in ('windows', 'macos'): custom_env['PYTHONPATH'] = os.pathsep.join([p for p in sys.path if p]) - if plexpy.PYTHON2: + if jellypy.PYTHON2: custom_env = {k.encode('utf-8'): v.encode('utf-8') for k, v in custom_env.items()} env = os.environ.copy() @@ -3137,8 +3137,8 @@ class SCRIPTS(Notifier): script.extend(script_args) - if plexpy.PYTHON2: - script = [s.encode(plexpy.SYS_ENCODING, 'ignore') for s in script] + if jellypy.PYTHON2: + script = [s.encode(jellypy.SYS_ENCODING, 'ignore') for s in script] logger.debug("Tautulli Notifiers :: Full script is: %s" % script) logger.debug("Tautulli Notifiers :: Executing script in a new thread.") @@ -3688,7 +3688,7 @@ class XBMC(Notifier): if self.config['image']: image = self.config['image'] else: - image = os.path.join(plexpy.DATA_DIR, os.path.abspath("data/interfaces/default/images/logo-circle.png")) + image = os.path.join(jellypy.DATA_DIR, os.path.abspath("data/interfaces/default/images/logo-circle.png")) for host in hosts: logger.info("Tautulli Notifiers :: Sending notification command to XMBC @ " + host) diff --git a/plexpy/plex.py b/jellypy/plex.py similarity index 94% rename from plexpy/plex.py rename to jellypy/plex.py index b46f0722..fb48a9c2 100644 --- a/plexpy/plex.py +++ b/jellypy/plex.py @@ -21,11 +21,11 @@ from future.builtins import str from plexapi.server import PlexServer -import plexpy -if plexpy.PYTHON2: +import jellypy +if jellypy.PYTHON2: import logger else: - from plexpy import logger + from jellypy import logger class Plex(object): diff --git a/plexpy/plexivity_import.py b/jellypy/plexivity_import.py similarity index 99% rename from plexpy/plexivity_import.py rename to jellypy/plexivity_import.py index 49e1d214..9103296b 100644 --- a/plexpy/plexivity_import.py +++ b/jellypy/plexivity_import.py @@ -22,19 +22,19 @@ import arrow import sqlite3 from xml.dom import minidom -import plexpy -if plexpy.PYTHON2: +import jellypy +if jellypy.PYTHON2: import activity_processor import database import helpers import logger import users else: - from plexpy import activity_processor - from plexpy import database - from plexpy import helpers - from plexpy import logger - from plexpy import users + from jellypy import activity_processor + from jellypy import database + from jellypy import helpers + from jellypy import logger + from jellypy import users def extract_plexivity_xml(xml=None): diff --git a/plexpy/plextv.py b/jellypy/plextv.py similarity index 94% rename from plexpy/plextv.py rename to jellypy/plextv.py index 0eb73652..85174c5a 100644 --- a/plexpy/plextv.py +++ b/jellypy/plextv.py @@ -23,8 +23,8 @@ from future.builtins import object import base64 import json -import plexpy -if plexpy.PYTHON2: +import jellypy +if jellypy.PYTHON2: import common import helpers import http_handler @@ -33,31 +33,31 @@ if plexpy.PYTHON2: import pmsconnect import session else: - from plexpy import common - from plexpy import helpers - from plexpy import http_handler - from plexpy import logger - from plexpy import users - from plexpy import pmsconnect - from plexpy import session + from jellypy import common + from jellypy import helpers + from jellypy import http_handler + from jellypy import logger + from jellypy import users + from jellypy import pmsconnect + from jellypy import session def get_server_resources(return_presence=False, return_server=False, return_info=False, **kwargs): if not return_presence and not return_info: logger.info("Tautulli PlexTV :: Requesting resources for server...") - server = {'pms_name': plexpy.CONFIG.PMS_NAME, - 'pms_version': plexpy.CONFIG.PMS_VERSION, - 'pms_platform': plexpy.CONFIG.PMS_PLATFORM, - 'pms_ip': plexpy.CONFIG.PMS_IP, - 'pms_port': plexpy.CONFIG.PMS_PORT, - 'pms_ssl': plexpy.CONFIG.PMS_SSL, - 'pms_is_remote': plexpy.CONFIG.PMS_IS_REMOTE, - 'pms_is_cloud': plexpy.CONFIG.PMS_IS_CLOUD, - 'pms_url': plexpy.CONFIG.PMS_URL, - 'pms_url_manual': plexpy.CONFIG.PMS_URL_MANUAL, - 'pms_identifier': plexpy.CONFIG.PMS_IDENTIFIER, - 'pms_plexpass': plexpy.CONFIG.PMS_PLEXPASS + server = {'pms_name': jellypy.CONFIG.PMS_NAME, + 'pms_version': jellypy.CONFIG.PMS_VERSION, + 'pms_platform': jellypy.CONFIG.PMS_PLATFORM, + 'pms_ip': jellypy.CONFIG.PMS_IP, + 'pms_port': jellypy.CONFIG.PMS_PORT, + 'pms_ssl': jellypy.CONFIG.PMS_SSL, + 'pms_is_remote': jellypy.CONFIG.PMS_IS_REMOTE, + 'pms_is_cloud': jellypy.CONFIG.PMS_IS_CLOUD, + 'pms_url': jellypy.CONFIG.PMS_URL, + 'pms_url_manual': jellypy.CONFIG.PMS_URL_MANUAL, + 'pms_identifier': jellypy.CONFIG.PMS_IDENTIFIER, + 'pms_plexpass': jellypy.CONFIG.PMS_PLEXPASS } if return_info: @@ -132,8 +132,8 @@ def get_server_resources(return_presence=False, return_server=False, return_info logger.info("Tautulli PlexTV :: Selected server: %s (%s) (%s - Version %s)", server['pms_name'], server['pms_url'], server['pms_platform'], server['pms_version']) - plexpy.CONFIG.process_kwargs(server) - plexpy.CONFIG.write() + jellypy.CONFIG.process_kwargs(server) + jellypy.CONFIG.write() class PlexTV(object): @@ -147,8 +147,8 @@ class PlexTV(object): self.token = token self.urls = 'https://plex.tv' - self.timeout = plexpy.CONFIG.PMS_TIMEOUT - self.ssl_verify = plexpy.CONFIG.VERIFY_SSL_CERT + self.timeout = jellypy.CONFIG.PMS_TIMEOUT + self.ssl_verify = jellypy.CONFIG.VERIFY_SSL_CERT if self.username is None and self.password is None: if not self.token: @@ -158,7 +158,7 @@ class PlexTV(object): user_tokens = user_data.get_tokens(user_id=session.get_session_user_id()) self.token = user_tokens['server_token'] else: - self.token = plexpy.CONFIG.PMS_TOKEN + self.token = jellypy.CONFIG.PMS_TOKEN if not self.token: logger.error("Tautulli PlexTV :: PlexTV called, but no token provided.") @@ -212,7 +212,7 @@ class PlexTV(object): if force: logger.debug("Tautulli PlexTV :: Forcing refresh of Plex.tv token.") devices_list = self.get_devices_list() - device_id = next((d for d in devices_list if d['device_identifier'] == plexpy.CONFIG.PMS_UUID), {}).get('device_id', None) + device_id = next((d for d in devices_list if d['device_identifier'] == jellypy.CONFIG.PMS_UUID), {}).get('device_id', None) if device_id: logger.debug("Tautulli PlexTV :: Removing Tautulli from Plex.tv devices.") @@ -228,8 +228,8 @@ class PlexTV(object): user = self.get_token() if user: token = user['auth_token'] - plexpy.CONFIG.__setattr__('PMS_TOKEN', token) - plexpy.CONFIG.write() + jellypy.CONFIG.__setattr__('PMS_TOKEN', token) + jellypy.CONFIG.write() logger.info("Tautulli PlexTV :: Updated Plex.tv token for Tautulli.") return token @@ -245,7 +245,7 @@ class PlexTV(object): return None for a in xml_head: - if helpers.get_xml_attr(a, 'clientIdentifier') == plexpy.CONFIG.PMS_IDENTIFIER \ + if helpers.get_xml_attr(a, 'clientIdentifier') == jellypy.CONFIG.PMS_IDENTIFIER \ and 'server' in helpers.get_xml_attr(a, 'provides'): server_token = helpers.get_xml_attr(a, 'accessToken') break @@ -412,7 +412,7 @@ class PlexTV(object): def get_full_users_list(self): own_account = self.get_plextv_user_details(output_format='xml') friends_list = self.get_plextv_friends(output_format='xml') - shared_servers = self.get_plextv_shared_servers(machine_id=plexpy.CONFIG.PMS_IDENTIFIER, + shared_servers = self.get_plextv_shared_servers(machine_id=jellypy.CONFIG.PMS_IDENTIFIER, output_format='xml') users_list = [] @@ -498,7 +498,7 @@ class PlexTV(object): rating_key_filter=None, sync_id_filter=None): if not machine_id: - machine_id = plexpy.CONFIG.PMS_IDENTIFIER + machine_id = jellypy.CONFIG.PMS_IDENTIFIER if isinstance(rating_key_filter, list): rating_key_filter = [str(k) for k in rating_key_filter] @@ -709,7 +709,7 @@ class PlexTV(object): return {} for a in xml_head: - if helpers.get_xml_attr(a, 'machineIdentifier') == plexpy.CONFIG.PMS_IDENTIFIER: + if helpers.get_xml_attr(a, 'machineIdentifier') == jellypy.CONFIG.PMS_IDENTIFIER: server_times = {"created_at": helpers.get_xml_attr(a, 'createdAt'), "updated_at": helpers.get_xml_attr(a, 'updatedAt'), "version": helpers.get_xml_attr(a, 'version') @@ -824,7 +824,7 @@ class PlexTV(object): return {} # Get the updates for the platform - pms_platform = common.PMS_PLATFORM_NAME_OVERRIDES.get(plexpy.CONFIG.PMS_PLATFORM, plexpy.CONFIG.PMS_PLATFORM) + pms_platform = common.PMS_PLATFORM_NAME_OVERRIDES.get(jellypy.CONFIG.PMS_PLATFORM, jellypy.CONFIG.PMS_PLATFORM) platform_downloads = available_downloads.get('computer').get(pms_platform) or \ available_downloads.get('nas').get(pms_platform) @@ -833,12 +833,12 @@ class PlexTV(object): % pms_platform) return {} - v_old = helpers.cast_to_int("".join(v.zfill(4) for v in plexpy.CONFIG.PMS_VERSION.split('-')[0].split('.')[:4])) + v_old = helpers.cast_to_int("".join(v.zfill(4) for v in jellypy.CONFIG.PMS_VERSION.split('-')[0].split('.')[:4])) v_new = helpers.cast_to_int("".join(v.zfill(4) for v in platform_downloads.get('version', '').split('-')[0].split('.')[:4])) if not v_old: logger.error("Tautulli PlexTV :: Unable to retrieve Plex updates: Invalid current server version: %s." - % plexpy.CONFIG.PMS_VERSION) + % jellypy.CONFIG.PMS_VERSION) return {} if not v_new: logger.error("Tautulli PlexTV :: Unable to retrieve Plex updates: Invalid new server version: %s." @@ -847,8 +847,8 @@ class PlexTV(object): # Get proper download releases = platform_downloads.get('releases', [{}]) - release = next((r for r in releases if r['distro'] == plexpy.CONFIG.PMS_UPDATE_DISTRO and - r['build'] == plexpy.CONFIG.PMS_UPDATE_DISTRO_BUILD), releases[0]) + release = next((r for r in releases if r['distro'] == jellypy.CONFIG.PMS_UPDATE_DISTRO and + r['build'] == jellypy.CONFIG.PMS_UPDATE_DISTRO_BUILD), releases[0]) download_info = {'update_available': v_new > v_old, 'platform': platform_downloads.get('name'), @@ -876,13 +876,13 @@ class PlexTV(object): return False if subscription and helpers.get_xml_attr(subscription[0], 'active') == '1': - plexpy.CONFIG.__setattr__('PMS_PLEXPASS', 1) - plexpy.CONFIG.write() + jellypy.CONFIG.__setattr__('PMS_PLEXPASS', 1) + jellypy.CONFIG.write() return True else: logger.debug("Tautulli PlexTV :: Plex Pass subscription not found.") - plexpy.CONFIG.__setattr__('PMS_PLEXPASS', 0) - plexpy.CONFIG.write() + jellypy.CONFIG.__setattr__('PMS_PLEXPASS', 0) + jellypy.CONFIG.write() return False def get_devices_list(self): @@ -925,7 +925,7 @@ class PlexTV(object): for info in status_info: servers = info.getElementsByTagName('server') for s in servers: - if helpers.get_xml_attr(s, 'address') == plexpy.CONFIG.PMS_IP: + if helpers.get_xml_attr(s, 'address') == jellypy.CONFIG.PMS_IP: if helpers.get_xml_attr(info, 'running') == '1': return True else: diff --git a/plexpy/plexwatch_import.py b/jellypy/plexwatch_import.py similarity index 99% rename from plexpy/plexwatch_import.py rename to jellypy/plexwatch_import.py index a942deb7..edd1aeae 100644 --- a/plexpy/plexwatch_import.py +++ b/jellypy/plexwatch_import.py @@ -21,19 +21,19 @@ from future.builtins import str import sqlite3 from xml.dom import minidom -import plexpy -if plexpy.PYTHON2: +import jellypy +if jellypy.PYTHON2: import activity_processor import database import helpers import logger import users else: - from plexpy import activity_processor - from plexpy import database - from plexpy import helpers - from plexpy import logger - from plexpy import users + from jellypy import activity_processor + from jellypy import database + from jellypy import helpers + from jellypy import logger + from jellypy import users def extract_plexwatch_xml(xml=None): diff --git a/plexpy/pmsconnect.py b/jellypy/pmsconnect.py similarity index 99% rename from plexpy/pmsconnect.py rename to jellypy/pmsconnect.py index cd51c3d8..6ad95a85 100644 --- a/plexpy/pmsconnect.py +++ b/jellypy/pmsconnect.py @@ -26,8 +26,8 @@ import time from future.moves.urllib.parse import quote, quote_plus, urlencode from xml.dom.minidom import Node -import plexpy -if plexpy.PYTHON2: +import jellypy +if jellypy.PYTHON2: import activity_processor import common import helpers @@ -38,15 +38,15 @@ if plexpy.PYTHON2: import session import users else: - from plexpy import activity_processor - from plexpy import common - from plexpy import helpers - from plexpy import http_handler - from plexpy import libraries - from plexpy import logger - from plexpy import plextv - from plexpy import session - from plexpy import users + from jellypy import activity_processor + from jellypy import common + from jellypy import helpers + from jellypy import http_handler + from jellypy import libraries + from jellypy import logger + from jellypy import plextv + from jellypy import session + from jellypy import users def get_server_friendly_name(): @@ -57,13 +57,13 @@ def get_server_friendly_name(): if not server_name: servers_info = PmsConnect().get_servers_info() for server in servers_info: - if server['machine_identifier'] == plexpy.CONFIG.PMS_IDENTIFIER: + if server['machine_identifier'] == jellypy.CONFIG.PMS_IDENTIFIER: server_name = server['name'] break - if server_name and server_name != plexpy.CONFIG.PMS_NAME: - plexpy.CONFIG.__setattr__('PMS_NAME', server_name) - plexpy.CONFIG.write() + if server_name and server_name != jellypy.CONFIG.PMS_NAME: + jellypy.CONFIG.__setattr__('PMS_NAME', server_name) + jellypy.CONFIG.write() logger.info("Tautulli Pmsconnect :: Server name retrieved.") return server_name @@ -78,12 +78,12 @@ class PmsConnect(object): self.url = url self.token = token - if not self.url and plexpy.CONFIG.PMS_URL: - self.url = plexpy.CONFIG.PMS_URL + if not self.url and jellypy.CONFIG.PMS_URL: + self.url = jellypy.CONFIG.PMS_URL elif not self.url: - self.url = 'http://{hostname}:{port}'.format(hostname=plexpy.CONFIG.PMS_IP, - port=plexpy.CONFIG.PMS_PORT) - self.timeout = plexpy.CONFIG.PMS_TIMEOUT + self.url = 'http://{hostname}:{port}'.format(hostname=jellypy.CONFIG.PMS_IP, + port=jellypy.CONFIG.PMS_PORT) + self.timeout = jellypy.CONFIG.PMS_TIMEOUT if not self.token: # Check if we should use the admin token, or the guest server token @@ -92,7 +92,7 @@ class PmsConnect(object): user_tokens = user_data.get_tokens(user_id=session.get_session_user_id()) self.token = user_tokens['server_token'] else: - self.token = plexpy.CONFIG.PMS_TOKEN + self.token = jellypy.CONFIG.PMS_TOKEN self.request_handler = http_handler.HTTPHandler(urls=self.url, token=self.token, @@ -625,7 +625,7 @@ class PmsConnect(object): metadata = {} if not skip_cache and cache_key: - in_file_folder = os.path.join(plexpy.CONFIG.CACHE_DIR, 'session_metadata') + in_file_folder = os.path.join(jellypy.CONFIG.CACHE_DIR, 'session_metadata') in_file_path = os.path.join(in_file_folder, 'metadata-sessionKey-%s.json' % cache_key) if not os.path.exists(in_file_folder): @@ -640,7 +640,7 @@ class PmsConnect(object): if metadata: _cache_time = metadata.pop('_cache_time', 0) # Return cached metadata if less than cache_seconds ago - if return_cache or helpers.timestamp() - _cache_time <= plexpy.CONFIG.METADATA_CACHE_SECONDS: + if return_cache or helpers.timestamp() - _cache_time <= jellypy.CONFIG.METADATA_CACHE_SECONDS: return metadata if rating_key: @@ -649,7 +649,7 @@ class PmsConnect(object): metadata_xml = self.get_sync_item(str(sync_id), output_format='xml') elif plex_guid.startswith(('plex://movie', 'plex://episode')): rating_key = plex_guid.rsplit('/', 1)[-1] - plextv_metadata = PmsConnect(url='https://metadata.provider.plex.tv', token=plexpy.CONFIG.PMS_TOKEN) + plextv_metadata = PmsConnect(url='https://metadata.provider.plex.tv', token=jellypy.CONFIG.PMS_TOKEN) metadata_xml = plextv_metadata.get_metadata(rating_key, output_format='xml') else: return metadata @@ -1474,7 +1474,7 @@ class PmsConnect(object): if cache_key: metadata['_cache_time'] = helpers.timestamp() - out_file_folder = os.path.join(plexpy.CONFIG.CACHE_DIR, 'session_metadata') + out_file_folder = os.path.join(jellypy.CONFIG.CACHE_DIR, 'session_metadata') out_file_path = os.path.join(out_file_folder, 'metadata-sessionKey-%s.json' % cache_key) if not os.path.exists(out_file_folder): @@ -1782,7 +1782,7 @@ class PmsConnect(object): and not session.getElementsByTagName('Session') \ and not session.getElementsByTagName('TranscodeSession') \ and helpers.get_xml_attr(session, 'ratingKey').isdigit() \ - and plexpy.CONFIG.PMS_PLEXPASS: + and jellypy.CONFIG.PMS_PLEXPASS: plex_tv = plextv.PlexTV() parent_rating_key = helpers.get_xml_attr(session, 'parentRatingKey') grandparent_rating_key = helpers.get_xml_attr(session, 'grandparentRatingKey') @@ -3101,9 +3101,9 @@ class PmsConnect(object): logger.info("Tautulli is unable to check for Plex updates. Disabling check for Plex updates.") # Disable check for Plex updates - plexpy.CONFIG.MONITOR_PMS_UPDATES = 0 - plexpy.initialize_scheduler() - plexpy.CONFIG.write() + jellypy.CONFIG.MONITOR_PMS_UPDATES = 0 + jellypy.initialize_scheduler() + jellypy.CONFIG.write() return {} @@ -3123,13 +3123,13 @@ class PmsConnect(object): def set_server_version(self): identity = self.get_server_identity() - version = identity.get('version', plexpy.CONFIG.PMS_VERSION) + version = identity.get('version', jellypy.CONFIG.PMS_VERSION) - plexpy.CONFIG.__setattr__('PMS_VERSION', version) - plexpy.CONFIG.write() + jellypy.CONFIG.__setattr__('PMS_VERSION', version) + jellypy.CONFIG.write() def get_server_update_channel(self): - if plexpy.CONFIG.PMS_UPDATE_CHANNEL == 'plex': + if jellypy.CONFIG.PMS_UPDATE_CHANNEL == 'plex': update_channel_value = self.get_server_pref('ButlerUpdateChannel') if update_channel_value == '8': @@ -3137,4 +3137,4 @@ class PmsConnect(object): else: return 'public' - return plexpy.CONFIG.PMS_UPDATE_CHANNEL + return jellypy.CONFIG.PMS_UPDATE_CHANNEL diff --git a/plexpy/request.py b/jellypy/request.py similarity index 97% rename from plexpy/request.py rename to jellypy/request.py index 50bb8a0a..0989c0ea 100644 --- a/plexpy/request.py +++ b/jellypy/request.py @@ -25,13 +25,13 @@ import collections import requests from requests.packages import urllib3 -import plexpy -if plexpy.PYTHON2: +import jellypy +if jellypy.PYTHON2: import lock import logger else: - from plexpy import lock - from plexpy import logger + from jellypy import lock + from jellypy import logger # Dictionary with last request times, for rate limiting. @@ -59,7 +59,7 @@ def request_response(url, method="get", auto_raise=True, # Disable verification of SSL certificates if requested. Note: this could # pose a security issue! - kwargs["verify"] = bool(plexpy.CONFIG.VERIFY_SSL_CERT) + kwargs["verify"] = bool(jellypy.CONFIG.VERIFY_SSL_CERT) if not kwargs['verify']: urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) @@ -123,7 +123,7 @@ def request_response(url, method="get", auto_raise=True, e.response.status_code, cause) # Debug response - if plexpy.VERBOSE: + if jellypy.VERBOSE: server_message(e.response) else: logger.error("Request raised HTTP error.") @@ -151,7 +151,7 @@ def request_response2(url, method="get", auto_raise=True, # Disable verification of SSL certificates if requested. Note: this could # pose a security issue! - kwargs['verify'] = bool(plexpy.CONFIG.VERIFY_SSL_CERT) + kwargs['verify'] = bool(jellypy.CONFIG.VERIFY_SSL_CERT) if not kwargs['verify']: urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) @@ -203,7 +203,7 @@ def request_response2(url, method="get", auto_raise=True, err_msg = "Request raised a HTTP error: {}".format(http_err) - if plexpy.VERBOSE: + if jellypy.VERBOSE: req_msg = server_message(e.response, return_msg=True) else: @@ -264,7 +264,7 @@ def request_json(url, **kwargs): logger.error("Response returned invalid JSON data") # Debug response - if plexpy.VERBOSE: + if jellypy.VERBOSE: server_message(response) diff --git a/plexpy/session.py b/jellypy/session.py similarity index 98% rename from plexpy/session.py rename to jellypy/session.py index 1a4b8d3b..fca5b415 100644 --- a/plexpy/session.py +++ b/jellypy/session.py @@ -20,13 +20,13 @@ from future.builtins import str import cherrypy -import plexpy -if plexpy.PYTHON2: +import jellypy +if jellypy.PYTHON2: import common import users else: - from plexpy import common - from plexpy import users + from jellypy import common + from jellypy import users def get_session_info(): @@ -68,7 +68,7 @@ def get_session_user_token(): session_user_tokens = users.Users().get_tokens(_session['user_id']) user_token = session_user_tokens['server_token'] else: - user_token = plexpy.CONFIG.PMS_TOKEN + user_token = jellypy.CONFIG.PMS_TOKEN return user_token diff --git a/plexpy/users.py b/jellypy/users.py similarity index 98% rename from plexpy/users.py rename to jellypy/users.py index cb8cb6f4..af4e2b22 100644 --- a/plexpy/users.py +++ b/jellypy/users.py @@ -23,8 +23,8 @@ from future.moves.urllib.parse import parse_qsl import httpagentparser -import plexpy -if plexpy.PYTHON2: +import jellypy +if jellypy.PYTHON2: import common import database import datatables @@ -34,21 +34,21 @@ if plexpy.PYTHON2: import plextv import session else: - from plexpy import common - from plexpy import database - from plexpy import datatables - from plexpy import helpers - from plexpy import libraries - from plexpy import logger - from plexpy import plextv - from plexpy import session + from jellypy import common + from jellypy import database + from jellypy import datatables + from jellypy import helpers + from jellypy import libraries + from jellypy import logger + from jellypy import plextv + from jellypy import session def refresh_users(): logger.info("Tautulli Users :: Requesting users list refresh...") result = plextv.PlexTV().get_full_users_list() - server_id = plexpy.CONFIG.PMS_IDENTIFIER + server_id = jellypy.CONFIG.PMS_IDENTIFIER if not server_id: logger.error("Tautulli Users :: No PMS identifier, cannot refresh users. Verify server in settings.") return @@ -111,7 +111,7 @@ class Users(object): custom_where = [['users.deleted_user', 0]] if grouping is None: - grouping = plexpy.CONFIG.GROUP_HISTORY_TABLES + grouping = jellypy.CONFIG.GROUP_HISTORY_TABLES if session.get_session_user_id(): custom_where.append(['users.user_id', session.get_session_user_id()]) @@ -486,7 +486,7 @@ class Users(object): return [] if grouping is None: - grouping = plexpy.CONFIG.GROUP_HISTORY_TABLES + grouping = jellypy.CONFIG.GROUP_HISTORY_TABLES if query_days and query_days is not None: query_days = map(helpers.cast_to_int, query_days.split(',')) @@ -548,7 +548,7 @@ class Users(object): return [] if grouping is None: - grouping = plexpy.CONFIG.GROUP_HISTORY_TABLES + grouping = jellypy.CONFIG.GROUP_HISTORY_TABLES monitor_db = database.MonitorDatabase() diff --git a/plexpy/version.py b/jellypy/version.py similarity index 100% rename from plexpy/version.py rename to jellypy/version.py diff --git a/plexpy/versioncheck.py b/jellypy/versioncheck.py similarity index 70% rename from plexpy/versioncheck.py rename to jellypy/versioncheck.py index fcb8fce9..8040ce9f 100644 --- a/plexpy/versioncheck.py +++ b/jellypy/versioncheck.py @@ -27,23 +27,23 @@ import re import subprocess import tarfile -import plexpy -if plexpy.PYTHON2: +import jellypy +if jellypy.PYTHON2: import common import helpers import logger import request else: - from plexpy import common - from plexpy import helpers - from plexpy import logger - from plexpy import request + from jellypy import common + from jellypy import helpers + from jellypy import logger + from jellypy import request def runGit(args): - if plexpy.CONFIG.GIT_PATH: - git_locations = ['"' + plexpy.CONFIG.GIT_PATH + '"'] + if jellypy.CONFIG.GIT_PATH: + git_locations = ['"' + jellypy.CONFIG.GIT_PATH + '"'] else: git_locations = ['git'] @@ -56,8 +56,8 @@ def runGit(args): cmd = cur_git + ' ' + args try: - logger.debug('Trying to execute: "' + cmd + '" with shell in ' + plexpy.PROG_DIR) - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, cwd=plexpy.PROG_DIR) + logger.debug('Trying to execute: "' + cmd + '" with shell in ' + jellypy.PROG_DIR) + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, cwd=jellypy.PROG_DIR) output, err = p.communicate() output = output.strip().decode() @@ -80,18 +80,18 @@ def runGit(args): def get_version(): - if plexpy.FROZEN and common.PLATFORM == 'Windows': - plexpy.INSTALL_TYPE = 'windows' + if jellypy.FROZEN and common.PLATFORM == 'Windows': + jellypy.INSTALL_TYPE = 'windows' current_version, current_branch = get_version_from_file() return current_version, 'origin', current_branch - elif plexpy.FROZEN and common.PLATFORM == 'Darwin': - plexpy.INSTALL_TYPE = 'macos' + elif jellypy.FROZEN and common.PLATFORM == 'Darwin': + jellypy.INSTALL_TYPE = 'macos' current_version, current_branch = get_version_from_file() return current_version, 'origin', current_branch - elif os.path.isdir(os.path.join(plexpy.PROG_DIR, '.git')): - plexpy.INSTALL_TYPE = 'git' + elif os.path.isdir(os.path.join(jellypy.PROG_DIR, '.git')): + jellypy.INSTALL_TYPE = 'git' output, err = runGit('rev-parse HEAD') if not output: @@ -104,9 +104,9 @@ def get_version(): logger.error('Output does not look like a hash, not using it.') cur_commit_hash = None - if plexpy.CONFIG.DO_NOT_OVERRIDE_GIT_BRANCH and plexpy.CONFIG.GIT_BRANCH: + if jellypy.CONFIG.DO_NOT_OVERRIDE_GIT_BRANCH and jellypy.CONFIG.GIT_BRANCH: remote_name = None - branch_name = plexpy.CONFIG.GIT_BRANCH + branch_name = jellypy.CONFIG.GIT_BRANCH else: remote_branch, err = runGit('rev-parse --abbrev-ref --symbolic-full-name @{u}') @@ -116,16 +116,16 @@ def get_version(): else: remote_name = branch_name = None - if not remote_name and plexpy.CONFIG.GIT_REMOTE: - logger.error('Could not retrieve remote name from git. Falling back to %s.' % plexpy.CONFIG.GIT_REMOTE) - remote_name = plexpy.CONFIG.GIT_REMOTE + if not remote_name and jellypy.CONFIG.GIT_REMOTE: + logger.error('Could not retrieve remote name from git. Falling back to %s.' % jellypy.CONFIG.GIT_REMOTE) + remote_name = jellypy.CONFIG.GIT_REMOTE if not remote_name: logger.error('Could not retrieve remote name from git. Defaulting to origin.') branch_name = 'origin' - if not branch_name and plexpy.CONFIG.GIT_BRANCH: - logger.error('Could not retrieve branch name from git. Falling back to %s.' % plexpy.CONFIG.GIT_BRANCH) - branch_name = plexpy.CONFIG.GIT_BRANCH + if not branch_name and jellypy.CONFIG.GIT_BRANCH: + logger.error('Could not retrieve branch name from git. Falling back to %s.' % jellypy.CONFIG.GIT_BRANCH) + branch_name = jellypy.CONFIG.GIT_BRANCH if not branch_name: logger.error('Could not retrieve branch name from git. Defaulting to master.') branch_name = 'master' @@ -133,20 +133,20 @@ def get_version(): return cur_commit_hash, remote_name, branch_name else: - if plexpy.DOCKER: - plexpy.INSTALL_TYPE = 'docker' - elif plexpy.SNAP: - plexpy.INSTALL_TYPE = 'snap' + if jellypy.DOCKER: + jellypy.INSTALL_TYPE = 'docker' + elif jellypy.SNAP: + jellypy.INSTALL_TYPE = 'snap' else: - plexpy.INSTALL_TYPE = 'source' + jellypy.INSTALL_TYPE = 'source' current_version, current_branch = get_version_from_file() return current_version, 'origin', current_branch def get_version_from_file(): - version_file = os.path.join(plexpy.PROG_DIR, 'version.txt') - branch_file = os.path.join(plexpy.PROG_DIR, 'branch.txt') + version_file = os.path.join(jellypy.PROG_DIR, 'version.txt') + branch_file = os.path.join(jellypy.PROG_DIR, 'branch.txt') if os.path.isfile(version_file): with open(version_file, 'r') as f: @@ -166,30 +166,30 @@ def get_version_from_file(): def check_update(scheduler=False, notify=False, use_cache=False): check_github(scheduler=scheduler, notify=notify, use_cache=use_cache) - if not plexpy.CURRENT_VERSION: - plexpy.UPDATE_AVAILABLE = None - elif plexpy.COMMITS_BEHIND > 0 and \ - (plexpy.common.BRANCH in ('master', 'beta') or plexpy.SNAP or plexpy.FROZEN) and \ - plexpy.common.RELEASE != plexpy.LATEST_RELEASE: - plexpy.UPDATE_AVAILABLE = 'release' - elif plexpy.COMMITS_BEHIND > 0 and \ - not plexpy.SNAP and not plexpy.FROZEN and \ - plexpy.CURRENT_VERSION != plexpy.LATEST_VERSION: - plexpy.UPDATE_AVAILABLE = 'commit' + if not jellypy.CURRENT_VERSION: + jellypy.UPDATE_AVAILABLE = None + elif jellypy.COMMITS_BEHIND > 0 and \ + (jellypy.common.BRANCH in ('master', 'beta') or jellypy.SNAP or jellypy.FROZEN) and \ + jellypy.common.RELEASE != jellypy.LATEST_RELEASE: + jellypy.UPDATE_AVAILABLE = 'release' + elif jellypy.COMMITS_BEHIND > 0 and \ + not jellypy.SNAP and not jellypy.FROZEN and \ + jellypy.CURRENT_VERSION != jellypy.LATEST_VERSION: + jellypy.UPDATE_AVAILABLE = 'commit' else: - plexpy.UPDATE_AVAILABLE = False + jellypy.UPDATE_AVAILABLE = False - if plexpy.WIN_SYS_TRAY_ICON: - plexpy.WIN_SYS_TRAY_ICON.change_tray_update_icon() - elif plexpy.MAC_SYS_TRAY_ICON: - plexpy.MAC_SYS_TRAY_ICON.change_tray_update_icon() + if jellypy.WIN_SYS_TRAY_ICON: + jellypy.WIN_SYS_TRAY_ICON.change_tray_update_icon() + elif jellypy.MAC_SYS_TRAY_ICON: + jellypy.MAC_SYS_TRAY_ICON.change_tray_update_icon() def check_github(scheduler=False, notify=False, use_cache=False): - plexpy.COMMITS_BEHIND = 0 + jellypy.COMMITS_BEHIND = 0 - if plexpy.CONFIG.GIT_TOKEN: - headers = {'Authorization': 'token {}'.format(plexpy.CONFIG.GIT_TOKEN)} + if jellypy.CONFIG.GIT_TOKEN: + headers = {'Authorization': 'token {}'.format(jellypy.CONFIG.GIT_TOKEN)} else: headers = {} @@ -197,118 +197,118 @@ def check_github(scheduler=False, notify=False, use_cache=False): if not version: # Get the latest version available from github logger.info('Retrieving latest version information from GitHub') - url = 'https://api.github.com/repos/%s/%s/commits/%s' % (plexpy.CONFIG.GIT_USER, - plexpy.CONFIG.GIT_REPO, - plexpy.CONFIG.GIT_BRANCH) + url = 'https://api.github.com/repos/%s/%s/commits/%s' % (jellypy.CONFIG.GIT_USER, + jellypy.CONFIG.GIT_REPO, + jellypy.CONFIG.GIT_BRANCH) version = request.request_json(url, headers=headers, timeout=20, validator=lambda x: type(x) == dict) github_cache('version', github_data=version) if version is None: logger.warn('Could not get the latest version from GitHub. Are you running a local development version?') - return plexpy.CURRENT_VERSION + return jellypy.CURRENT_VERSION - plexpy.LATEST_VERSION = version['sha'] - logger.debug("Latest version is %s", plexpy.LATEST_VERSION) + jellypy.LATEST_VERSION = version['sha'] + logger.debug("Latest version is %s", jellypy.LATEST_VERSION) # See how many commits behind we are - if not plexpy.CURRENT_VERSION: + if not jellypy.CURRENT_VERSION: logger.info('You are running an unknown version of Tautulli. Run the updater to identify your version') - return plexpy.LATEST_VERSION + return jellypy.LATEST_VERSION - if plexpy.LATEST_VERSION == plexpy.CURRENT_VERSION: + if jellypy.LATEST_VERSION == jellypy.CURRENT_VERSION: logger.info('Tautulli is up to date') - return plexpy.LATEST_VERSION + return jellypy.LATEST_VERSION commits = github_cache('commits', use_cache=use_cache) if not commits: logger.info('Comparing currently installed version with latest GitHub version') - url = 'https://api.github.com/repos/%s/%s/compare/%s...%s' % (plexpy.CONFIG.GIT_USER, - plexpy.CONFIG.GIT_REPO, - plexpy.LATEST_VERSION, - plexpy.CURRENT_VERSION) + url = 'https://api.github.com/repos/%s/%s/compare/%s...%s' % (jellypy.CONFIG.GIT_USER, + jellypy.CONFIG.GIT_REPO, + jellypy.LATEST_VERSION, + jellypy.CURRENT_VERSION) commits = request.request_json(url, headers=headers, timeout=20, whitelist_status_code=404, validator=lambda x: type(x) == dict) github_cache('commits', github_data=commits) if commits is None: logger.warn('Could not get commits behind from GitHub.') - return plexpy.LATEST_VERSION + return jellypy.LATEST_VERSION try: - plexpy.COMMITS_BEHIND = int(commits['behind_by']) - logger.debug("In total, %d commits behind", plexpy.COMMITS_BEHIND) + jellypy.COMMITS_BEHIND = int(commits['behind_by']) + logger.debug("In total, %d commits behind", jellypy.COMMITS_BEHIND) except KeyError: logger.info('Cannot compare versions. Are you running a local development version?') - plexpy.COMMITS_BEHIND = 0 + jellypy.COMMITS_BEHIND = 0 - if plexpy.COMMITS_BEHIND > 0: - logger.info('New version is available. You are %s commits behind' % plexpy.COMMITS_BEHIND) + if jellypy.COMMITS_BEHIND > 0: + logger.info('New version is available. You are %s commits behind' % jellypy.COMMITS_BEHIND) releases = github_cache('releases', use_cache=use_cache) if not releases: - url = 'https://api.github.com/repos/%s/%s/releases' % (plexpy.CONFIG.GIT_USER, - plexpy.CONFIG.GIT_REPO) + url = 'https://api.github.com/repos/%s/%s/releases' % (jellypy.CONFIG.GIT_USER, + jellypy.CONFIG.GIT_REPO) releases = request.request_json(url, timeout=20, whitelist_status_code=404, validator=lambda x: type(x) == list) github_cache('releases', github_data=releases) if releases is None: logger.warn('Could not get releases from GitHub.') - return plexpy.LATEST_VERSION + return jellypy.LATEST_VERSION - if plexpy.CONFIG.GIT_BRANCH == 'master': + if jellypy.CONFIG.GIT_BRANCH == 'master': release = next((r for r in releases if not r['prerelease']), releases[0]) - elif plexpy.CONFIG.GIT_BRANCH == 'beta': + elif jellypy.CONFIG.GIT_BRANCH == 'beta': release = next((r for r in releases if not r['tag_name'].endswith('-nightly')), releases[0]) - elif plexpy.CONFIG.GIT_BRANCH == 'nightly': + elif jellypy.CONFIG.GIT_BRANCH == 'nightly': release = next((r for r in releases), releases[0]) else: release = releases[0] - plexpy.LATEST_RELEASE = release['tag_name'] + jellypy.LATEST_RELEASE = release['tag_name'] if notify: - plexpy.NOTIFY_QUEUE.put({'notify_action': 'on_plexpyupdate', + jellypy.NOTIFY_QUEUE.put({'notify_action': 'on_plexpyupdate', 'plexpy_download_info': release, - 'plexpy_update_commit': plexpy.LATEST_VERSION, - 'plexpy_update_behind': plexpy.COMMITS_BEHIND}) + 'plexpy_update_commit': jellypy.LATEST_VERSION, + 'plexpy_update_behind': jellypy.COMMITS_BEHIND}) - if plexpy.PYTHON2: + if jellypy.PYTHON2: logger.warn('Tautulli is running using Python 2. Unable to run automatic update.') - elif scheduler and plexpy.CONFIG.PLEXPY_AUTO_UPDATE and \ - not plexpy.DOCKER and not plexpy.SNAP and \ - not (plexpy.FROZEN and common.PLATFORM == 'Darwin'): + elif scheduler and jellypy.CONFIG.PLEXPY_AUTO_UPDATE and \ + not jellypy.DOCKER and not jellypy.SNAP and \ + not (jellypy.FROZEN and common.PLATFORM == 'Darwin'): logger.info('Running automatic update.') - plexpy.shutdown(restart=True, update=True) + jellypy.shutdown(restart=True, update=True) - elif plexpy.COMMITS_BEHIND == 0: + elif jellypy.COMMITS_BEHIND == 0: logger.info('Tautulli is up to date') - return plexpy.LATEST_VERSION + return jellypy.LATEST_VERSION def update(): - if plexpy.PYTHON2: + if jellypy.PYTHON2: logger.warn('Tautulli is running using Python 2. Unable to update.') return - if not plexpy.UPDATE_AVAILABLE: + if not jellypy.UPDATE_AVAILABLE: return - if plexpy.INSTALL_TYPE in ('docker', 'snap', 'macos'): + if jellypy.INSTALL_TYPE in ('docker', 'snap', 'macos'): return - elif plexpy.INSTALL_TYPE == 'windows': + elif jellypy.INSTALL_TYPE == 'windows': logger.info('Calling Windows scheduled task to update Tautulli') CREATE_NO_WINDOW = 0x08000000 subprocess.Popen(['SCHTASKS', '/Run', '/TN', 'TautulliUpdateTask'], creationflags=CREATE_NO_WINDOW) - elif plexpy.INSTALL_TYPE == 'git': - output, err = runGit('pull --ff-only {} {}'.format(plexpy.CONFIG.GIT_REMOTE, - plexpy.CONFIG.GIT_BRANCH)) + elif jellypy.INSTALL_TYPE == 'git': + output, err = runGit('pull --ff-only {} {}'.format(jellypy.CONFIG.GIT_REMOTE, + jellypy.CONFIG.GIT_BRANCH)) if not output: logger.error('Unable to download latest version') @@ -320,12 +320,12 @@ def update(): elif line.endswith(('Aborting', 'Aborting.')): logger.error('Unable to update from git: ' + line) - elif plexpy.INSTALL_TYPE == 'source': - tar_download_url = 'https://github.com/{}/{}/tarball/{}'.format(plexpy.CONFIG.GIT_USER, - plexpy.CONFIG.GIT_REPO, - plexpy.CONFIG.GIT_BRANCH) - update_dir = os.path.join(plexpy.DATA_DIR, 'update') - version_path = os.path.join(plexpy.PROG_DIR, 'version.txt') + elif jellypy.INSTALL_TYPE == 'source': + tar_download_url = 'https://github.com/{}/{}/tarball/{}'.format(jellypy.CONFIG.GIT_USER, + jellypy.CONFIG.GIT_REPO, + jellypy.CONFIG.GIT_BRANCH) + update_dir = os.path.join(jellypy.DATA_DIR, 'update') + version_path = os.path.join(jellypy.PROG_DIR, 'version.txt') logger.info('Downloading update from: ' + tar_download_url) data = request.request_content(tar_download_url) @@ -334,8 +334,8 @@ def update(): logger.error("Unable to retrieve new version from '%s', can't update", tar_download_url) return - download_name = plexpy.CONFIG.GIT_BRANCH + '-github' - tar_download_path = os.path.join(plexpy.DATA_DIR, download_name) + download_name = jellypy.CONFIG.GIT_BRANCH + '-github' + tar_download_path = os.path.join(jellypy.DATA_DIR, download_name) # Save tar to disk with open(tar_download_path, 'wb') as f: @@ -363,7 +363,7 @@ def update(): dirname = dirname[len(content_dir) + 1:] for curfile in filenames: old_path = os.path.join(content_dir, dirname, curfile) - new_path = os.path.join(plexpy.PROG_DIR, dirname, curfile) + new_path = os.path.join(jellypy.PROG_DIR, dirname, curfile) if os.path.isfile(new_path): os.remove(new_path) @@ -372,7 +372,7 @@ def update(): # Update version.txt try: with open(version_path, 'w') as f: - f.write(str(plexpy.LATEST_VERSION)) + f.write(str(jellypy.LATEST_VERSION)) except IOError as e: logger.error( "Unable to write current version to version.txt, update not complete: %s", @@ -382,18 +382,18 @@ def update(): def reset_git_install(): - if plexpy.INSTALL_TYPE == 'git': - logger.info('Attempting to reset git install to "{}/{}/{}"'.format(plexpy.CONFIG.GIT_REMOTE, - plexpy.CONFIG.GIT_BRANCH, + if jellypy.INSTALL_TYPE == 'git': + logger.info('Attempting to reset git install to "{}/{}/{}"'.format(jellypy.CONFIG.GIT_REMOTE, + jellypy.CONFIG.GIT_BRANCH, common.RELEASE)) - output, err = runGit('remote set-url {} https://github.com/{}/{}.git'.format(plexpy.CONFIG.GIT_REMOTE, - plexpy.CONFIG.GIT_USER, - plexpy.CONFIG.GIT_REPO)) - output, err = runGit('fetch {}'.format(plexpy.CONFIG.GIT_REMOTE)) - output, err = runGit('checkout {}'.format(plexpy.CONFIG.GIT_BRANCH)) - output, err = runGit('branch -u {}/{}'.format(plexpy.CONFIG.GIT_REMOTE, - plexpy.CONFIG.GIT_BRANCH)) + output, err = runGit('remote set-url {} https://github.com/{}/{}.git'.format(jellypy.CONFIG.GIT_REMOTE, + jellypy.CONFIG.GIT_USER, + jellypy.CONFIG.GIT_REPO)) + output, err = runGit('fetch {}'.format(jellypy.CONFIG.GIT_REMOTE)) + output, err = runGit('checkout {}'.format(jellypy.CONFIG.GIT_BRANCH)) + output, err = runGit('branch -u {}/{}'.format(jellypy.CONFIG.GIT_REMOTE, + jellypy.CONFIG.GIT_BRANCH)) output, err = runGit('reset --hard {}'.format(common.RELEASE)) if not output: @@ -410,12 +410,12 @@ def reset_git_install(): def checkout_git_branch(): - if plexpy.INSTALL_TYPE == 'git': - logger.info('Attempting to checkout git branch "{}/{}"'.format(plexpy.CONFIG.GIT_REMOTE, - plexpy.CONFIG.GIT_BRANCH)) + if jellypy.INSTALL_TYPE == 'git': + logger.info('Attempting to checkout git branch "{}/{}"'.format(jellypy.CONFIG.GIT_REMOTE, + jellypy.CONFIG.GIT_BRANCH)) - output, err = runGit('fetch {}'.format(plexpy.CONFIG.GIT_REMOTE)) - output, err = runGit('checkout {}'.format(plexpy.CONFIG.GIT_BRANCH)) + output, err = runGit('fetch {}'.format(jellypy.CONFIG.GIT_REMOTE)) + output, err = runGit('checkout {}'.format(jellypy.CONFIG.GIT_BRANCH)) if not output: logger.error('Unable to change git branch.') @@ -426,13 +426,13 @@ def checkout_git_branch(): logger.error('Unable to checkout from git: ' + line) return - output, err = runGit('pull {} {}'.format(plexpy.CONFIG.GIT_REMOTE, - plexpy.CONFIG.GIT_BRANCH)) + output, err = runGit('pull {} {}'.format(jellypy.CONFIG.GIT_REMOTE, + jellypy.CONFIG.GIT_BRANCH)) def github_cache(cache, github_data=None, use_cache=True): timestamp = helpers.timestamp() - cache_filepath = os.path.join(plexpy.CONFIG.CACHE_DIR, 'github_{}.json'.format(cache)) + cache_filepath = os.path.join(jellypy.CONFIG.CACHE_DIR, 'github_{}.json'.format(cache)) if github_data: cache_data = {'github_data': github_data, '_cache_time': timestamp} @@ -447,7 +447,7 @@ def github_cache(cache, github_data=None, use_cache=True): try: with open(cache_filepath, 'r', encoding='utf-8') as cache_file: cache_data = json.load(cache_file) - if timestamp - cache_data['_cache_time'] < plexpy.CONFIG.CHECK_GITHUB_CACHE_SECONDS: + if timestamp - cache_data['_cache_time'] < jellypy.CONFIG.CHECK_GITHUB_CACHE_SECONDS: logger.debug('Using cached GitHub %s data', cache) return cache_data['github_data'] except: @@ -455,7 +455,7 @@ def github_cache(cache, github_data=None, use_cache=True): def read_changelog(latest_only=False, since_prev_release=False): - changelog_file = os.path.join(plexpy.PROG_DIR, 'CHANGELOG.md') + changelog_file = os.path.join(jellypy.PROG_DIR, 'CHANGELOG.md') if not os.path.isfile(changelog_file): return '

Missing changelog file

' @@ -470,7 +470,7 @@ def read_changelog(latest_only=False, since_prev_release=False): list_pattern = re.compile(r'(^[ \t]*\*\s)(.+)') beta_release = False - prev_release = str(plexpy.PREV_RELEASE) + prev_release = str(jellypy.PREV_RELEASE) with open(changelog_file, "r") as logfile: for line in logfile: diff --git a/plexpy/web_socket.py b/jellypy/web_socket.py similarity index 79% rename from plexpy/web_socket.py rename to jellypy/web_socket.py index 58ad2db4..b0372ec5 100644 --- a/plexpy/web_socket.py +++ b/jellypy/web_socket.py @@ -28,19 +28,19 @@ import time import certifi import websocket -import plexpy -if plexpy.PYTHON2: +import jellypy +if jellypy.PYTHON2: import activity_handler import activity_pinger import activity_processor import database import logger else: - from plexpy import activity_handler - from plexpy import activity_pinger - from plexpy import activity_processor - from plexpy import database - from plexpy import logger + from jellypy import activity_handler + from jellypy import activity_pinger + from jellypy import activity_processor + from jellypy import database + from jellypy import logger name = 'websocket' @@ -66,30 +66,30 @@ def start_thread(): def on_connect(): - if plexpy.PLEX_SERVER_UP is None: - plexpy.PLEX_SERVER_UP = True + if jellypy.PLEX_SERVER_UP is None: + jellypy.PLEX_SERVER_UP = True - if not plexpy.PLEX_SERVER_UP: + if not jellypy.PLEX_SERVER_UP: logger.info("Tautulli WebSocket :: The Plex Media Server is back up.") - plexpy.NOTIFY_QUEUE.put({'notify_action': 'on_intup'}) - plexpy.PLEX_SERVER_UP = True + jellypy.NOTIFY_QUEUE.put({'notify_action': 'on_intup'}) + jellypy.PLEX_SERVER_UP = True - plexpy.initialize_scheduler() - if plexpy.CONFIG.WEBSOCKET_MONITOR_PING_PONG: + jellypy.initialize_scheduler() + if jellypy.CONFIG.WEBSOCKET_MONITOR_PING_PONG: send_ping() def on_disconnect(): - if plexpy.PLEX_SERVER_UP is None: - plexpy.PLEX_SERVER_UP = False + if jellypy.PLEX_SERVER_UP is None: + jellypy.PLEX_SERVER_UP = False - if plexpy.PLEX_SERVER_UP: + if jellypy.PLEX_SERVER_UP: logger.info("Tautulli WebSocket :: Unable to get a response from the server, Plex server is down.") - plexpy.NOTIFY_QUEUE.put({'notify_action': 'on_intdown'}) - plexpy.PLEX_SERVER_UP = False + jellypy.NOTIFY_QUEUE.put({'notify_action': 'on_intdown'}) + jellypy.PLEX_SERVER_UP = False activity_processor.ActivityProcessor().set_temp_stopped() - plexpy.initialize_scheduler() + jellypy.initialize_scheduler() def reconnect(): @@ -106,14 +106,14 @@ def shutdown(): def close(): logger.info("Tautulli WebSocket :: Disconnecting websocket...") - plexpy.WEBSOCKET.close() - plexpy.WS_CONNECTED = False + jellypy.WEBSOCKET.close() + jellypy.WS_CONNECTED = False def send_ping(): - if plexpy.WS_CONNECTED: + if jellypy.WS_CONNECTED: # logger.debug("Tautulli WebSocket :: Sending ping.") - plexpy.WEBSOCKET.ping("Hi?") + jellypy.WEBSOCKET.ping("Hi?") global pong_timer pong_timer = threading.Timer(5.0, wait_pong) @@ -127,7 +127,7 @@ def wait_pong(): logger.warn("Tautulli WebSocket :: Failed to receive pong from websocket, ping attempt %s." % str(pong_count)) - if pong_count >= plexpy.CONFIG.WEBSOCKET_CONNECTION_ATTEMPTS: + if pong_count >= jellypy.CONFIG.WEBSOCKET_CONNECTION_ATTEMPTS: pong_count = 0 close() @@ -144,24 +144,24 @@ def receive_pong(): def run(): from websocket import create_connection - if plexpy.CONFIG.PMS_SSL and plexpy.CONFIG.PMS_URL[:5] == 'https': - uri = plexpy.CONFIG.PMS_URL.replace('https://', 'wss://') + '/:/websockets/notifications' + if jellypy.CONFIG.PMS_SSL and jellypy.CONFIG.PMS_URL[:5] == 'https': + uri = jellypy.CONFIG.PMS_URL.replace('https://', 'wss://') + '/:/websockets/notifications' secure = 'secure ' - if plexpy.CONFIG.VERIFY_SSL_CERT: + if jellypy.CONFIG.VERIFY_SSL_CERT: sslopt = {'ca_certs': certifi.where()} else: sslopt = {'cert_reqs': ssl.CERT_NONE} else: uri = 'ws://%s:%s/:/websockets/notifications' % ( - plexpy.CONFIG.PMS_IP, - plexpy.CONFIG.PMS_PORT + jellypy.CONFIG.PMS_IP, + jellypy.CONFIG.PMS_PORT ) secure = '' sslopt = None # Set authentication token (if one is available) - if plexpy.CONFIG.PMS_TOKEN: - header = ["X-Plex-Token: %s" % plexpy.CONFIG.PMS_TOKEN] + if jellypy.CONFIG.PMS_TOKEN: + header = ["X-Plex-Token: %s" % jellypy.CONFIG.PMS_TOKEN] else: header = [] @@ -172,18 +172,18 @@ def run(): # Try an open the websocket connection logger.info("Tautulli WebSocket :: Opening %swebsocket." % secure) try: - plexpy.WEBSOCKET = create_connection(uri, header=header, sslopt=sslopt) + jellypy.WEBSOCKET = create_connection(uri, header=header, sslopt=sslopt) logger.info("Tautulli WebSocket :: Ready") - plexpy.WS_CONNECTED = True + jellypy.WS_CONNECTED = True except (websocket.WebSocketException, IOError, Exception) as e: logger.error("Tautulli WebSocket :: %s.", e) - if plexpy.WS_CONNECTED: + if jellypy.WS_CONNECTED: on_connect() - while plexpy.WS_CONNECTED: + while jellypy.WS_CONNECTED: try: - process(*receive(plexpy.WEBSOCKET)) + process(*receive(jellypy.WEBSOCKET)) # successfully received data, reset reconnects counter reconnects = 0 @@ -195,19 +195,19 @@ def run(): if reconnects == 0: logger.warn("Tautulli WebSocket :: Connection has closed.") - if not plexpy.CONFIG.PMS_IS_CLOUD and reconnects < plexpy.CONFIG.WEBSOCKET_CONNECTION_ATTEMPTS: + if not jellypy.CONFIG.PMS_IS_CLOUD and reconnects < jellypy.CONFIG.WEBSOCKET_CONNECTION_ATTEMPTS: reconnects += 1 # Sleep 5 between connection attempts if reconnects > 1: - time.sleep(plexpy.CONFIG.WEBSOCKET_CONNECTION_TIMEOUT) + time.sleep(jellypy.CONFIG.WEBSOCKET_CONNECTION_TIMEOUT) logger.warn("Tautulli WebSocket :: Reconnection attempt %s." % str(reconnects)) try: - plexpy.WEBSOCKET = create_connection(uri, header=header) + jellypy.WEBSOCKET = create_connection(uri, header=header) logger.info("Tautulli WebSocket :: Ready") - plexpy.WS_CONNECTED = True + jellypy.WS_CONNECTED = True except (websocket.WebSocketException, IOError, Exception) as e: logger.error("Tautulli WebSocket :: %s.", e) @@ -223,7 +223,7 @@ def run(): close() break - if not plexpy.WS_CONNECTED and not ws_shutdown: + if not jellypy.WS_CONNECTED and not ws_shutdown: on_disconnect() logger.debug("Tautulli WebSocket :: Leaving thread.") diff --git a/plexpy/webauth.py b/jellypy/webauth.py similarity index 86% rename from plexpy/webauth.py rename to jellypy/webauth.py index ca27755c..1e5a8df7 100644 --- a/plexpy/webauth.py +++ b/jellypy/webauth.py @@ -29,19 +29,19 @@ import cherrypy from hashing_passwords import check_hash import jwt -import plexpy -if plexpy.PYTHON2: +import jellypy +if jellypy.PYTHON2: import logger from database import MonitorDatabase from helpers import timestamp from users import Users, refresh_users from plextv import PlexTV else: - from plexpy import logger - from plexpy.database import MonitorDatabase - from plexpy.helpers import timestamp - from plexpy.users import Users, refresh_users - from plexpy.plextv import PlexTV + from jellypy import logger + from jellypy.database import MonitorDatabase + from jellypy.helpers import timestamp + from jellypy.users import Users, refresh_users + from jellypy.plextv import PlexTV # Monkey patch SameSite support into cookies. # https://stackoverflow.com/a/50813092 @@ -83,7 +83,7 @@ def plex_user_login(username=None, password=None, token=None, headers=None): if user_id != str(user_details['user_id']): # The user is not in the database. return None - elif plexpy.CONFIG.HTTP_PLEX_ADMIN and user_details['is_admin']: + elif jellypy.CONFIG.HTTP_PLEX_ADMIN and user_details['is_admin']: # Plex admin login return user_details, 'admin' elif not user_details['allow_guest'] or user_details['deleted_user']: @@ -91,7 +91,7 @@ def plex_user_login(username=None, password=None, token=None, headers=None): return None # Stop here if guest access is not enabled - if not plexpy.CONFIG.ALLOW_GUEST_ACCESS: + if not jellypy.CONFIG.ALLOW_GUEST_ACCESS: return None # The user is in the database, and guest access is enabled, so try to retrieve a server token. @@ -139,17 +139,17 @@ def check_credentials(username=None, password=None, token=None, admin_login='0', Returns True and the user group on success or False and no user group""" if username and password: - if plexpy.CONFIG.HTTP_PASSWORD: + if jellypy.CONFIG.HTTP_PASSWORD: user_details = {'user_id': None, 'username': username} - if plexpy.CONFIG.HTTP_HASHED_PASSWORD and \ - username == plexpy.CONFIG.HTTP_USERNAME and check_hash(password, plexpy.CONFIG.HTTP_PASSWORD): + if jellypy.CONFIG.HTTP_HASHED_PASSWORD and \ + username == jellypy.CONFIG.HTTP_USERNAME and check_hash(password, jellypy.CONFIG.HTTP_PASSWORD): return True, user_details, 'admin' - elif not plexpy.CONFIG.HTTP_HASHED_PASSWORD and \ - username == plexpy.CONFIG.HTTP_USERNAME and password == plexpy.CONFIG.HTTP_PASSWORD: + elif not jellypy.CONFIG.HTTP_HASHED_PASSWORD and \ + username == jellypy.CONFIG.HTTP_USERNAME and password == jellypy.CONFIG.HTTP_PASSWORD: return True, user_details, 'admin' - if plexpy.CONFIG.HTTP_PLEX_ADMIN or (not admin_login == '1' and plexpy.CONFIG.ALLOW_GUEST_ACCESS): + if jellypy.CONFIG.HTTP_PLEX_ADMIN or (not admin_login == '1' and jellypy.CONFIG.ALLOW_GUEST_ACCESS): plex_login = plex_user_login(token=token, headers=headers) if plex_login is not None: return True, plex_login[0], plex_login[1] @@ -158,13 +158,13 @@ def check_credentials(username=None, password=None, token=None, admin_login='0', def check_jwt_token(): - jwt_cookie = str(JWT_COOKIE_NAME + plexpy.CONFIG.PMS_UUID) + jwt_cookie = str(JWT_COOKIE_NAME + jellypy.CONFIG.PMS_UUID) jwt_token = cherrypy.request.cookie.get(jwt_cookie) if jwt_token: try: payload = jwt.decode( - jwt_token.value, plexpy.CONFIG.JWT_SECRET, leeway=timedelta(seconds=10), algorithms=[JWT_ALGORITHM] + jwt_token.value, jellypy.CONFIG.JWT_SECRET, leeway=timedelta(seconds=10), algorithms=[JWT_ALGORITHM] ) except (jwt.DecodeError, jwt.ExpiredSignatureError): return None @@ -186,14 +186,14 @@ def check_auth(*args, **kwargs): for condition in conditions: # A condition is just a callable that returns true or false if not condition(): - raise cherrypy.HTTPRedirect(plexpy.HTTP_ROOT) + raise cherrypy.HTTPRedirect(jellypy.HTTP_ROOT) else: redirect_uri = cherrypy.request.wsgi_environ['REQUEST_URI'] if redirect_uri: redirect_uri = '?redirect_uri=' + quote(redirect_uri) - raise cherrypy.HTTPRedirect(plexpy.HTTP_ROOT + "auth/logout" + redirect_uri) + raise cherrypy.HTTPRedirect(jellypy.HTTP_ROOT + "auth/logout" + redirect_uri) def requireAuth(*conditions): @@ -268,11 +268,11 @@ def check_rate_limit(ip_address): except ValueError: last_success = 0 - max_timestamp = max(last_success, last_timestamp - plexpy.CONFIG.HTTP_RATE_LIMIT_ATTEMPTS_INTERVAL) + max_timestamp = max(last_success, last_timestamp - jellypy.CONFIG.HTTP_RATE_LIMIT_ATTEMPTS_INTERVAL) attempts = [login for login in result if login['timestamp'] >= max_timestamp and not login['success']] - if len(attempts) >= plexpy.CONFIG.HTTP_RATE_LIMIT_ATTEMPTS: - return max(last_timestamp - (timestamp() - plexpy.CONFIG.HTTP_RATE_LIMIT_LOCKOUT_TIME), 0) + if len(attempts) >= jellypy.CONFIG.HTTP_RATE_LIMIT_ATTEMPTS: + return max(last_timestamp - (timestamp() - jellypy.CONFIG.HTTP_RATE_LIMIT_LOCKOUT_TIME), 0) # Controller to provide login and logout actions @@ -280,9 +280,9 @@ def check_rate_limit(ip_address): class AuthController(object): def check_auth_enabled(self): - if not plexpy.CONFIG.HTTP_BASIC_AUTH and plexpy.CONFIG.HTTP_PASSWORD: + if not jellypy.CONFIG.HTTP_BASIC_AUTH and jellypy.CONFIG.HTTP_PASSWORD: return - raise cherrypy.HTTPRedirect(plexpy.HTTP_ROOT) + raise cherrypy.HTTPRedirect(jellypy.HTTP_ROOT) def on_login(self, username=None, user_id=None, user_group=None, success=False, oauth=False): """Called on successful login""" @@ -310,12 +310,12 @@ class AuthController(object): logger.debug("Tautulli WebAuth :: %s user '%s' logged out of Tautulli." % (user_group.capitalize(), username)) def get_loginform(self, redirect_uri=''): - from plexpy.webserve import serve_template + from jellypy.webserve import serve_template return serve_template(templatename="login.html", title="Login", redirect_uri=unquote(redirect_uri)) @cherrypy.expose def index(self, *args, **kwargs): - raise cherrypy.HTTPRedirect(plexpy.HTTP_ROOT + "auth/login") + raise cherrypy.HTTPRedirect(jellypy.HTTP_ROOT + "auth/login") @cherrypy.expose def login(self, redirect_uri='', *args, **kwargs): @@ -331,12 +331,12 @@ class AuthController(object): if payload: self.on_logout(payload['user'], payload['user_group']) - jwt_cookie = str(JWT_COOKIE_NAME + plexpy.CONFIG.PMS_UUID) + jwt_cookie = str(JWT_COOKIE_NAME + jellypy.CONFIG.PMS_UUID) cherrypy.response.cookie[jwt_cookie] = '' cherrypy.response.cookie[jwt_cookie]['expires'] = 0 - cherrypy.response.cookie[jwt_cookie]['path'] = plexpy.HTTP_ROOT.rstrip('/') or '/' + cherrypy.response.cookie[jwt_cookie]['path'] = jellypy.HTTP_ROOT.rstrip('/') or '/' - if plexpy.HTTP_ROOT != '/': + if jellypy.HTTP_ROOT != '/': # Also expire the JWT on the root path cherrypy.response.headers['Set-Cookie'] = jwt_cookie + '=""; expires=Thu, 01 Jan 1970 12:00:00 GMT; path=/' @@ -345,7 +345,7 @@ class AuthController(object): if redirect_uri: redirect_uri = '?redirect_uri=' + redirect_uri - raise cherrypy.HTTPRedirect(plexpy.HTTP_ROOT + "auth/login" + redirect_uri) + raise cherrypy.HTTPRedirect(jellypy.HTTP_ROOT + "auth/login" + redirect_uri) @cherrypy.expose @cherrypy.tools.json_out() @@ -383,7 +383,7 @@ class AuthController(object): 'exp': expiry } - jwt_token = jwt.encode(payload, plexpy.CONFIG.JWT_SECRET, algorithm=JWT_ALGORITHM).decode('utf-8') + jwt_token = jwt.encode(payload, jellypy.CONFIG.JWT_SECRET, algorithm=JWT_ALGORITHM).decode('utf-8') self.on_login(username=user_details['username'], user_id=user_details['user_id'], @@ -391,16 +391,16 @@ class AuthController(object): success=True, oauth=bool(token)) - jwt_cookie = str(JWT_COOKIE_NAME + plexpy.CONFIG.PMS_UUID) + jwt_cookie = str(JWT_COOKIE_NAME + jellypy.CONFIG.PMS_UUID) cherrypy.response.cookie[jwt_cookie] = jwt_token cherrypy.response.cookie[jwt_cookie]['expires'] = int(time_delta.total_seconds()) - cherrypy.response.cookie[jwt_cookie]['path'] = plexpy.HTTP_ROOT.rstrip('/') or '/' + cherrypy.response.cookie[jwt_cookie]['path'] = jellypy.HTTP_ROOT.rstrip('/') or '/' cherrypy.response.cookie[jwt_cookie]['httponly'] = True cherrypy.response.cookie[jwt_cookie]['samesite'] = 'lax' cherrypy.request.login = payload cherrypy.response.status = 200 - return {'status': 'success', 'token': jwt_token, 'uuid': plexpy.CONFIG.PMS_UUID} + return {'status': 'success', 'token': jwt_token, 'uuid': jellypy.CONFIG.PMS_UUID} elif admin_login == '1' and username: self.on_login(username=username) diff --git a/plexpy/webserve.py b/jellypy/webserve.py similarity index 92% rename from plexpy/webserve.py rename to jellypy/webserve.py index 5461e67f..cb8324b8 100644 --- a/plexpy/webserve.py +++ b/jellypy/webserve.py @@ -43,8 +43,8 @@ import mako.exceptions import websocket -import plexpy -if plexpy.PYTHON2: +import jellypy +if jellypy.PYTHON2: import activity_pinger import common import config @@ -79,51 +79,51 @@ if plexpy.PYTHON2: elif common.PLATFORM == 'Darwin': import macos else: - from plexpy import activity_pinger - from plexpy import common - from plexpy import config - from plexpy import database - from plexpy import datafactory - from plexpy import exporter - from plexpy import graphs - from plexpy import helpers - from plexpy import http_handler - from plexpy import libraries - from plexpy import log_reader - from plexpy import logger - from plexpy import newsletter_handler - from plexpy import newsletters - from plexpy import mobile_app - from plexpy import notification_handler - from plexpy import notifiers - from plexpy import plextv - from plexpy import plexivity_import - from plexpy import plexwatch_import - from plexpy import pmsconnect - from plexpy import users - from plexpy import versioncheck - from plexpy import web_socket - from plexpy import webstart - from plexpy.api2 import API2 - from plexpy.helpers import checked, addtoapi, get_ip, create_https_certificates, build_datatables_json, sanitize_out - from plexpy.session import get_session_info, get_session_user_id, allow_session_user, allow_session_library - from plexpy.webauth import AuthController, requireAuth, member_of, check_auth + from jellypy import activity_pinger + from jellypy import common + from jellypy import config + from jellypy import database + from jellypy import datafactory + from jellypy import exporter + from jellypy import graphs + from jellypy import helpers + from jellypy import http_handler + from jellypy import libraries + from jellypy import log_reader + from jellypy import logger + from jellypy import newsletter_handler + from jellypy import newsletters + from jellypy import mobile_app + from jellypy import notification_handler + from jellypy import notifiers + from jellypy import plextv + from jellypy import plexivity_import + from jellypy import plexwatch_import + from jellypy import pmsconnect + from jellypy import users + from jellypy import versioncheck + from jellypy import web_socket + from jellypy import webstart + from jellypy.api2 import API2 + from jellypy.helpers import checked, addtoapi, get_ip, create_https_certificates, build_datatables_json, sanitize_out + from jellypy.session import get_session_info, get_session_user_id, allow_session_user, allow_session_library + from jellypy.webauth import AuthController, requireAuth, member_of, check_auth if common.PLATFORM == 'Windows': - from plexpy import windows + from jellypy import windows elif common.PLATFORM == 'Darwin': - from plexpy import macos + from jellypy import macos def serve_template(templatename, **kwargs): - interface_dir = os.path.join(str(plexpy.PROG_DIR), 'data/interfaces/') - template_dir = os.path.join(str(interface_dir), plexpy.CONFIG.INTERFACE) + interface_dir = os.path.join(str(jellypy.PROG_DIR), 'data/interfaces/') + template_dir = os.path.join(str(interface_dir), jellypy.CONFIG.INTERFACE) _hplookup = TemplateLookup(directories=[template_dir], default_filters=['unicode', 'h'], error_handler=mako_error_handler) - http_root = plexpy.HTTP_ROOT - server_name = plexpy.CONFIG.PMS_NAME - cache_param = '?' + (plexpy.CURRENT_VERSION or common.RELEASE) + http_root = jellypy.HTTP_ROOT + server_name = jellypy.CONFIG.PMS_NAME + cache_param = '?' + (jellypy.CURRENT_VERSION or common.RELEASE) _session = get_session_info() @@ -171,13 +171,13 @@ def mako_error_handler(context, error): class BaseRedirect(object): @cherrypy.expose def index(self): - raise cherrypy.HTTPRedirect(plexpy.HTTP_ROOT) + raise cherrypy.HTTPRedirect(jellypy.HTTP_ROOT) @cherrypy.expose def status(self, *args, **kwargs): path = '/' + '/'.join(args) if args else '' query = '?' + urlencode(kwargs) if kwargs else '' - raise cherrypy.HTTPRedirect(plexpy.HTTP_ROOT + 'status' + path + query) + raise cherrypy.HTTPRedirect(jellypy.HTTP_ROOT + 'status' + path + query) class WebInterface(object): @@ -185,15 +185,15 @@ class WebInterface(object): auth = AuthController() def __init__(self): - self.interface_dir = os.path.join(str(plexpy.PROG_DIR), 'data/') + self.interface_dir = os.path.join(str(jellypy.PROG_DIR), 'data/') @cherrypy.expose @requireAuth() def index(self, **kwargs): - if plexpy.CONFIG.FIRST_RUN_COMPLETE: - raise cherrypy.HTTPRedirect(plexpy.HTTP_ROOT + "home") + if jellypy.CONFIG.FIRST_RUN_COMPLETE: + raise cherrypy.HTTPRedirect(jellypy.HTTP_ROOT + "home") else: - raise cherrypy.HTTPRedirect(plexpy.HTTP_ROOT + "welcome") + raise cherrypy.HTTPRedirect(jellypy.HTTP_ROOT + "welcome") ##### Welcome ##### @@ -202,22 +202,22 @@ class WebInterface(object): @requireAuth(member_of("admin")) def welcome(self, **kwargs): config = { - "pms_identifier": plexpy.CONFIG.PMS_IDENTIFIER, - "pms_ip": plexpy.CONFIG.PMS_IP, - "pms_port": plexpy.CONFIG.PMS_PORT, - "pms_is_remote": plexpy.CONFIG.PMS_IS_REMOTE, - "pms_ssl": plexpy.CONFIG.PMS_SSL, - "pms_is_cloud": plexpy.CONFIG.PMS_IS_CLOUD, - "pms_token": plexpy.CONFIG.PMS_TOKEN, - "pms_uuid": plexpy.CONFIG.PMS_UUID, - "pms_name": plexpy.CONFIG.PMS_NAME, - "logging_ignore_interval": plexpy.CONFIG.LOGGING_IGNORE_INTERVAL + "pms_identifier": jellypy.CONFIG.PMS_IDENTIFIER, + "pms_ip": jellypy.CONFIG.PMS_IP, + "pms_port": jellypy.CONFIG.PMS_PORT, + "pms_is_remote": jellypy.CONFIG.PMS_IS_REMOTE, + "pms_ssl": jellypy.CONFIG.PMS_SSL, + "pms_is_cloud": jellypy.CONFIG.PMS_IS_CLOUD, + "pms_token": jellypy.CONFIG.PMS_TOKEN, + "pms_uuid": jellypy.CONFIG.PMS_UUID, + "pms_name": jellypy.CONFIG.PMS_NAME, + "logging_ignore_interval": jellypy.CONFIG.LOGGING_IGNORE_INTERVAL } # The setup wizard just refreshes the page on submit so we must redirect to home if config set. - if plexpy.CONFIG.FIRST_RUN_COMPLETE: - plexpy.initialize_scheduler() - raise cherrypy.HTTPRedirect(plexpy.HTTP_ROOT + "home") + if jellypy.CONFIG.FIRST_RUN_COMPLETE: + jellypy.initialize_scheduler() + raise cherrypy.HTTPRedirect(jellypy.HTTP_ROOT + "home") else: return serve_template(templatename="welcome.html", title="Welcome", config=config) @@ -252,8 +252,8 @@ class WebInterface(object): """ if token: # Need to set token so result doesn't return http 401 - plexpy.CONFIG.__setattr__('PMS_TOKEN', token) - plexpy.CONFIG.write() + jellypy.CONFIG.__setattr__('PMS_TOKEN', token) + jellypy.CONFIG.write() include_cloud = not (include_cloud == 'false') all_servers = not (all_servers == 'false') @@ -272,12 +272,12 @@ class WebInterface(object): @requireAuth() def home(self, **kwargs): config = { - "home_sections": plexpy.CONFIG.HOME_SECTIONS, - "home_refresh_interval": plexpy.CONFIG.HOME_REFRESH_INTERVAL, - "pms_name": plexpy.CONFIG.PMS_NAME, - "pms_is_cloud": plexpy.CONFIG.PMS_IS_CLOUD, - "update_show_changelog": plexpy.CONFIG.UPDATE_SHOW_CHANGELOG, - "first_run_complete": plexpy.CONFIG.FIRST_RUN_COMPLETE + "home_sections": jellypy.CONFIG.HOME_SECTIONS, + "home_refresh_interval": jellypy.CONFIG.HOME_REFRESH_INTERVAL, + "pms_name": jellypy.CONFIG.PMS_NAME, + "pms_is_cloud": jellypy.CONFIG.PMS_IS_CLOUD, + "update_show_changelog": jellypy.CONFIG.UPDATE_SHOW_CHANGELOG, + "first_run_complete": jellypy.CONFIG.FIRST_RUN_COMPLETE } return serve_template(templatename="index.html", title="Home", config=config) @@ -302,12 +302,12 @@ class WebInterface(object): } ``` """ - if plexpy.CONFIG.DATE_FORMAT: - date_format = plexpy.CONFIG.DATE_FORMAT + if jellypy.CONFIG.DATE_FORMAT: + date_format = jellypy.CONFIG.DATE_FORMAT else: date_format = 'YYYY-MM-DD' - if plexpy.CONFIG.TIME_FORMAT: - time_format = plexpy.CONFIG.TIME_FORMAT + if jellypy.CONFIG.TIME_FORMAT: + time_format = jellypy.CONFIG.TIME_FORMAT else: time_format = 'HH:mm' @@ -320,7 +320,7 @@ class WebInterface(object): @requireAuth() def get_current_activity(self, **kwargs): - pms_connect = pmsconnect.PmsConnect(token=plexpy.CONFIG.PMS_TOKEN) + pms_connect = pmsconnect.PmsConnect(token=jellypy.CONFIG.PMS_TOKEN) result = pms_connect.get_current_activity() if result: @@ -333,7 +333,7 @@ class WebInterface(object): @requireAuth() def get_current_activity_instance(self, session_key=None, **kwargs): - pms_connect = pmsconnect.PmsConnect(token=plexpy.CONFIG.PMS_TOKEN) + pms_connect = pmsconnect.PmsConnect(token=jellypy.CONFIG.PMS_TOKEN) result = pms_connect.get_current_activity() if result: @@ -375,18 +375,18 @@ class WebInterface(object): @cherrypy.tools.json_out() @requireAuth(member_of("admin")) def return_plex_xml_url(self, endpoint='', plextv=False, **kwargs): - kwargs['X-Plex-Token'] = plexpy.CONFIG.PMS_TOKEN + kwargs['X-Plex-Token'] = jellypy.CONFIG.PMS_TOKEN if helpers.bool_true(plextv): base_url = 'https://plex.tv' else: - if plexpy.CONFIG.PMS_URL_OVERRIDE: - base_url = plexpy.CONFIG.PMS_URL_OVERRIDE + if jellypy.CONFIG.PMS_URL_OVERRIDE: + base_url = jellypy.CONFIG.PMS_URL_OVERRIDE else: - base_url = plexpy.CONFIG.PMS_URL + base_url = jellypy.CONFIG.PMS_URL if '{machine_id}' in endpoint: - endpoint = endpoint.format(machine_id=plexpy.CONFIG.PMS_IDENTIFIER) + endpoint = endpoint.format(machine_id=jellypy.CONFIG.PMS_IDENTIFIER) return base_url + endpoint + '?' + urlencode(kwargs) @@ -405,7 +405,7 @@ class WebInterface(object): def library_stats(self, **kwargs): data_factory = datafactory.DataFactory() - library_cards = plexpy.CONFIG.HOME_LIBRARY_CARDS + library_cards = jellypy.CONFIG.HOME_LIBRARY_CARDS stats_data = data_factory.get_library_stats(library_cards=library_cards) @@ -601,11 +601,11 @@ class WebInterface(object): @requireAuth() def library(self, section_id=None, **kwargs): if not allow_session_library(section_id): - raise cherrypy.HTTPRedirect(plexpy.HTTP_ROOT) + raise cherrypy.HTTPRedirect(jellypy.HTTP_ROOT) config = { - "get_file_sizes": plexpy.CONFIG.GET_FILE_SIZES, - "get_file_sizes_hold": plexpy.CONFIG.GET_FILE_SIZES_HOLD + "get_file_sizes": jellypy.CONFIG.GET_FILE_SIZES, + "get_file_sizes_hold": jellypy.CONFIG.GET_FILE_SIZES_HOLD } if section_id: @@ -633,7 +633,7 @@ class WebInterface(object): status_message = 'An error occured.' return serve_template(templatename="edit_library.html", title="Edit Library", - data=result, server_id=plexpy.CONFIG.PMS_IDENTIFIER, status_message=status_message) + data=result, server_id=jellypy.CONFIG.PMS_IDENTIFIER, status_message=status_message) @cherrypy.expose @requireAuth(member_of("admin")) @@ -923,7 +923,7 @@ class WebInterface(object): @cherrypy.tools.json_out() @requireAuth(member_of("admin")) def get_media_info_file_sizes(self, section_id=None, rating_key=None, **kwargs): - get_file_sizes_hold = plexpy.CONFIG.GET_FILE_SIZES_HOLD + get_file_sizes_hold = jellypy.CONFIG.GET_FILE_SIZES_HOLD section_ids = set(get_file_sizes_hold['section_ids']) rating_keys = set(get_file_sizes_hold['rating_keys']) @@ -932,7 +932,7 @@ class WebInterface(object): section_ids.add(section_id) elif rating_key: rating_keys.add(rating_key) - plexpy.CONFIG.GET_FILE_SIZES_HOLD = {'section_ids': list(section_ids), 'rating_keys': list(rating_keys)} + jellypy.CONFIG.GET_FILE_SIZES_HOLD = {'section_ids': list(section_ids), 'rating_keys': list(rating_keys)} library_data = libraries.Libraries() result = library_data.get_media_info_file_sizes(section_id=section_id, @@ -942,7 +942,7 @@ class WebInterface(object): section_ids.remove(section_id) elif rating_key: rating_keys.remove(rating_key) - plexpy.CONFIG.GET_FILE_SIZES_HOLD = {'section_ids': list(section_ids), 'rating_keys': list(rating_keys)} + jellypy.CONFIG.GET_FILE_SIZES_HOLD = {'section_ids': list(section_ids), 'rating_keys': list(rating_keys)} else: result = False @@ -1194,7 +1194,7 @@ class WebInterface(object): None ``` """ - get_file_sizes_hold = plexpy.CONFIG.GET_FILE_SIZES_HOLD + get_file_sizes_hold = jellypy.CONFIG.GET_FILE_SIZES_HOLD section_ids = set(get_file_sizes_hold['section_ids']) if section_id not in section_ids: @@ -1330,7 +1330,7 @@ class WebInterface(object): @requireAuth() def user(self, user_id=None, **kwargs): if not allow_session_user(user_id): - raise cherrypy.HTTPRedirect(plexpy.HTTP_ROOT) + raise cherrypy.HTTPRedirect(jellypy.HTTP_ROOT) if user_id: try: @@ -2636,7 +2636,7 @@ class WebInterface(object): if get_session_user_id(): user_id = get_session_user_id() - plex_tv = plextv.PlexTV(token=plexpy.CONFIG.PMS_TOKEN) + plex_tv = plextv.PlexTV(token=jellypy.CONFIG.PMS_TOKEN) result = plex_tv.get_synced_items(machine_id=machine_id, user_id_filter=user_id) if result: @@ -2704,7 +2704,7 @@ class WebInterface(object): else: filename = logger.FILENAME - with open(os.path.join(plexpy.CONFIG.LOG_DIR, filename), 'r', encoding='utf-8') as f: + with open(os.path.join(jellypy.CONFIG.LOG_DIR, filename), 'r', encoding='utf-8') as f: for l in f.readlines(): try: temp_loglevel_and_time = l.split(' - ', 1) @@ -2773,7 +2773,7 @@ class WebInterface(object): ] ``` """ - window = int(kwargs.get('window', plexpy.CONFIG.PMS_LOGS_LINE_CAP)) + window = int(kwargs.get('window', jellypy.CONFIG.PMS_LOGS_LINE_CAP)) log_lines = [] log_type = kwargs.get('log_type', 'server') @@ -2997,7 +2997,7 @@ class WebInterface(object): filename = logger.FILENAME try: - open(os.path.join(plexpy.CONFIG.LOG_DIR, filename), 'w').close() + open(os.path.join(jellypy.CONFIG.LOG_DIR, filename), 'w').close() result = 'success' msg = 'Cleared the %s file.' % filename logger.info(msg) @@ -3011,15 +3011,15 @@ class WebInterface(object): @cherrypy.expose @requireAuth(member_of("admin")) def toggleVerbose(self, **kwargs): - plexpy.VERBOSE = not plexpy.VERBOSE + jellypy.VERBOSE = not jellypy.VERBOSE - plexpy.CONFIG.__setattr__('VERBOSE_LOGS', plexpy.VERBOSE) - plexpy.CONFIG.write() + jellypy.CONFIG.__setattr__('VERBOSE_LOGS', jellypy.VERBOSE) + jellypy.CONFIG.write() - logger.initLogger(console=not plexpy.QUIET, log_dir=plexpy.CONFIG.LOG_DIR, verbose=plexpy.VERBOSE) - logger.info("Verbose toggled, set to %s", plexpy.VERBOSE) + logger.initLogger(console=not jellypy.QUIET, log_dir=jellypy.CONFIG.LOG_DIR, verbose=jellypy.VERBOSE) + logger.info("Verbose toggled, set to %s", jellypy.VERBOSE) logger.debug("If you read this message, debug logging is available") - raise cherrypy.HTTPRedirect(plexpy.HTTP_ROOT + "logs") + raise cherrypy.HTTPRedirect(jellypy.HTTP_ROOT + "logs") @cherrypy.expose @requireAuth() @@ -3042,7 +3042,7 @@ class WebInterface(object): filename = logger.FILENAME try: - with open(os.path.join(plexpy.CONFIG.LOG_DIR, filename), 'r', encoding='utf-8') as f: + with open(os.path.join(jellypy.CONFIG.LOG_DIR, filename), 'r', encoding='utf-8') as f: return '
%s
' % f.read() except IOError as e: return "Log file not found." @@ -3053,122 +3053,122 @@ class WebInterface(object): @cherrypy.expose @requireAuth(member_of("admin")) def settings(self, **kwargs): - interface_dir = os.path.join(plexpy.PROG_DIR, 'data/interfaces/') + interface_dir = os.path.join(jellypy.PROG_DIR, 'data/interfaces/') interface_list = [name for name in os.listdir(interface_dir) if os.path.isdir(os.path.join(interface_dir, name))] # Initialise blank passwords so we do not expose them in the html forms # but users are still able to clear them - if plexpy.CONFIG.HTTP_PASSWORD != '': + if jellypy.CONFIG.HTTP_PASSWORD != '': http_password = ' ' else: http_password = '' config = { - "allow_guest_access": checked(plexpy.CONFIG.ALLOW_GUEST_ACCESS), - "history_table_activity": checked(plexpy.CONFIG.HISTORY_TABLE_ACTIVITY), - "http_basic_auth": checked(plexpy.CONFIG.HTTP_BASIC_AUTH), - "http_hash_password": checked(plexpy.CONFIG.HTTP_HASH_PASSWORD), - "http_hashed_password": plexpy.CONFIG.HTTP_HASHED_PASSWORD, - "http_host": plexpy.CONFIG.HTTP_HOST, - "http_username": plexpy.CONFIG.HTTP_USERNAME, - "http_port": plexpy.CONFIG.HTTP_PORT, + "allow_guest_access": checked(jellypy.CONFIG.ALLOW_GUEST_ACCESS), + "history_table_activity": checked(jellypy.CONFIG.HISTORY_TABLE_ACTIVITY), + "http_basic_auth": checked(jellypy.CONFIG.HTTP_BASIC_AUTH), + "http_hash_password": checked(jellypy.CONFIG.HTTP_HASH_PASSWORD), + "http_hashed_password": jellypy.CONFIG.HTTP_HASHED_PASSWORD, + "http_host": jellypy.CONFIG.HTTP_HOST, + "http_username": jellypy.CONFIG.HTTP_USERNAME, + "http_port": jellypy.CONFIG.HTTP_PORT, "http_password": http_password, - "http_root": plexpy.CONFIG.HTTP_ROOT, - "http_proxy": checked(plexpy.CONFIG.HTTP_PROXY), - "http_plex_admin": checked(plexpy.CONFIG.HTTP_PLEX_ADMIN), - "launch_browser": checked(plexpy.CONFIG.LAUNCH_BROWSER), - "launch_startup": checked(plexpy.CONFIG.LAUNCH_STARTUP), - "enable_https": checked(plexpy.CONFIG.ENABLE_HTTPS), - "https_create_cert": checked(plexpy.CONFIG.HTTPS_CREATE_CERT), - "https_cert": plexpy.CONFIG.HTTPS_CERT, - "https_cert_chain": plexpy.CONFIG.HTTPS_CERT_CHAIN, - "https_key": plexpy.CONFIG.HTTPS_KEY, - "https_domain": plexpy.CONFIG.HTTPS_DOMAIN, - "https_ip": plexpy.CONFIG.HTTPS_IP, - "http_base_url": plexpy.CONFIG.HTTP_BASE_URL, - "anon_redirect": plexpy.CONFIG.ANON_REDIRECT, - "api_enabled": checked(plexpy.CONFIG.API_ENABLED), - "api_key": plexpy.CONFIG.API_KEY, - "update_db_interval": plexpy.CONFIG.UPDATE_DB_INTERVAL, - "freeze_db": checked(plexpy.CONFIG.FREEZE_DB), - "backup_days": plexpy.CONFIG.BACKUP_DAYS, - "backup_dir": plexpy.CONFIG.BACKUP_DIR, - "backup_interval": plexpy.CONFIG.BACKUP_INTERVAL, - "cache_dir": plexpy.CONFIG.CACHE_DIR, - "export_dir": plexpy.CONFIG.EXPORT_DIR, - "log_dir": plexpy.CONFIG.LOG_DIR, - "log_blacklist": checked(plexpy.CONFIG.LOG_BLACKLIST), - "check_github": checked(plexpy.CONFIG.CHECK_GITHUB), + "http_root": jellypy.CONFIG.HTTP_ROOT, + "http_proxy": checked(jellypy.CONFIG.HTTP_PROXY), + "http_plex_admin": checked(jellypy.CONFIG.HTTP_PLEX_ADMIN), + "launch_browser": checked(jellypy.CONFIG.LAUNCH_BROWSER), + "launch_startup": checked(jellypy.CONFIG.LAUNCH_STARTUP), + "enable_https": checked(jellypy.CONFIG.ENABLE_HTTPS), + "https_create_cert": checked(jellypy.CONFIG.HTTPS_CREATE_CERT), + "https_cert": jellypy.CONFIG.HTTPS_CERT, + "https_cert_chain": jellypy.CONFIG.HTTPS_CERT_CHAIN, + "https_key": jellypy.CONFIG.HTTPS_KEY, + "https_domain": jellypy.CONFIG.HTTPS_DOMAIN, + "https_ip": jellypy.CONFIG.HTTPS_IP, + "http_base_url": jellypy.CONFIG.HTTP_BASE_URL, + "anon_redirect": jellypy.CONFIG.ANON_REDIRECT, + "api_enabled": checked(jellypy.CONFIG.API_ENABLED), + "api_key": jellypy.CONFIG.API_KEY, + "update_db_interval": jellypy.CONFIG.UPDATE_DB_INTERVAL, + "freeze_db": checked(jellypy.CONFIG.FREEZE_DB), + "backup_days": jellypy.CONFIG.BACKUP_DAYS, + "backup_dir": jellypy.CONFIG.BACKUP_DIR, + "backup_interval": jellypy.CONFIG.BACKUP_INTERVAL, + "cache_dir": jellypy.CONFIG.CACHE_DIR, + "export_dir": jellypy.CONFIG.EXPORT_DIR, + "log_dir": jellypy.CONFIG.LOG_DIR, + "log_blacklist": checked(jellypy.CONFIG.LOG_BLACKLIST), + "check_github": checked(jellypy.CONFIG.CHECK_GITHUB), "interface_list": interface_list, - "cache_sizemb": plexpy.CONFIG.CACHE_SIZEMB, - "pms_identifier": plexpy.CONFIG.PMS_IDENTIFIER, - "pms_ip": plexpy.CONFIG.PMS_IP, - "pms_logs_folder": plexpy.CONFIG.PMS_LOGS_FOLDER, - "pms_port": plexpy.CONFIG.PMS_PORT, - "pms_token": plexpy.CONFIG.PMS_TOKEN, - "pms_ssl": plexpy.CONFIG.PMS_SSL, - "pms_is_remote": plexpy.CONFIG.PMS_IS_REMOTE, - "pms_is_cloud": plexpy.CONFIG.PMS_IS_CLOUD, - "pms_url": plexpy.CONFIG.PMS_URL, - "pms_url_manual": checked(plexpy.CONFIG.PMS_URL_MANUAL), - "pms_uuid": plexpy.CONFIG.PMS_UUID, - "pms_web_url": plexpy.CONFIG.PMS_WEB_URL, - "pms_name": plexpy.CONFIG.PMS_NAME, - "pms_update_check_interval": plexpy.CONFIG.PMS_UPDATE_CHECK_INTERVAL, - "date_format": plexpy.CONFIG.DATE_FORMAT, - "time_format": plexpy.CONFIG.TIME_FORMAT, - "week_start_monday": checked(plexpy.CONFIG.WEEK_START_MONDAY), - "get_file_sizes": checked(plexpy.CONFIG.GET_FILE_SIZES), - "monitor_pms_updates": checked(plexpy.CONFIG.MONITOR_PMS_UPDATES), - "refresh_libraries_interval": plexpy.CONFIG.REFRESH_LIBRARIES_INTERVAL, - "refresh_libraries_on_startup": checked(plexpy.CONFIG.REFRESH_LIBRARIES_ON_STARTUP), - "refresh_users_interval": plexpy.CONFIG.REFRESH_USERS_INTERVAL, - "refresh_users_on_startup": checked(plexpy.CONFIG.REFRESH_USERS_ON_STARTUP), - "logging_ignore_interval": plexpy.CONFIG.LOGGING_IGNORE_INTERVAL, - "notify_consecutive": checked(plexpy.CONFIG.NOTIFY_CONSECUTIVE), - "notify_upload_posters": plexpy.CONFIG.NOTIFY_UPLOAD_POSTERS, - "notify_recently_added_upgrade": checked(plexpy.CONFIG.NOTIFY_RECENTLY_ADDED_UPGRADE), - "notify_group_recently_added_grandparent": checked(plexpy.CONFIG.NOTIFY_GROUP_RECENTLY_ADDED_GRANDPARENT), - "notify_group_recently_added_parent": checked(plexpy.CONFIG.NOTIFY_GROUP_RECENTLY_ADDED_PARENT), - "notify_recently_added_delay": plexpy.CONFIG.NOTIFY_RECENTLY_ADDED_DELAY, - "notify_remote_access_threshold": plexpy.CONFIG.NOTIFY_REMOTE_ACCESS_THRESHOLD, - "notify_concurrent_by_ip": checked(plexpy.CONFIG.NOTIFY_CONCURRENT_BY_IP), - "notify_concurrent_threshold": plexpy.CONFIG.NOTIFY_CONCURRENT_THRESHOLD, - "notify_continued_session_threshold": plexpy.CONFIG.NOTIFY_CONTINUED_SESSION_THRESHOLD, - "notify_new_device_initial_only": checked(plexpy.CONFIG.NOTIFY_NEW_DEVICE_INITIAL_ONLY), - "home_sections": json.dumps(plexpy.CONFIG.HOME_SECTIONS), - "home_stats_cards": json.dumps(plexpy.CONFIG.HOME_STATS_CARDS), - "home_library_cards": json.dumps(plexpy.CONFIG.HOME_LIBRARY_CARDS), - "home_refresh_interval": plexpy.CONFIG.HOME_REFRESH_INTERVAL, - "buffer_threshold": plexpy.CONFIG.BUFFER_THRESHOLD, - "buffer_wait": plexpy.CONFIG.BUFFER_WAIT, - "group_history_tables": checked(plexpy.CONFIG.GROUP_HISTORY_TABLES), - "git_token": plexpy.CONFIG.GIT_TOKEN, - "imgur_client_id": plexpy.CONFIG.IMGUR_CLIENT_ID, - "cloudinary_cloud_name": plexpy.CONFIG.CLOUDINARY_CLOUD_NAME, - "cloudinary_api_key": plexpy.CONFIG.CLOUDINARY_API_KEY, - "cloudinary_api_secret": plexpy.CONFIG.CLOUDINARY_API_SECRET, - "cache_images": checked(plexpy.CONFIG.CACHE_IMAGES), - "pms_version": plexpy.CONFIG.PMS_VERSION, - "plexpy_auto_update": checked(plexpy.CONFIG.PLEXPY_AUTO_UPDATE), - "git_branch": plexpy.CONFIG.GIT_BRANCH, - "git_path": plexpy.CONFIG.GIT_PATH, - "git_remote": plexpy.CONFIG.GIT_REMOTE, - "movie_watched_percent": plexpy.CONFIG.MOVIE_WATCHED_PERCENT, - "tv_watched_percent": plexpy.CONFIG.TV_WATCHED_PERCENT, - "music_watched_percent": plexpy.CONFIG.MUSIC_WATCHED_PERCENT, - "themoviedb_lookup": checked(plexpy.CONFIG.THEMOVIEDB_LOOKUP), - "tvmaze_lookup": checked(plexpy.CONFIG.TVMAZE_LOOKUP), - "musicbrainz_lookup": checked(plexpy.CONFIG.MUSICBRAINZ_LOOKUP), - "show_advanced_settings": plexpy.CONFIG.SHOW_ADVANCED_SETTINGS, - "newsletter_dir": plexpy.CONFIG.NEWSLETTER_DIR, - "newsletter_self_hosted": checked(plexpy.CONFIG.NEWSLETTER_SELF_HOSTED), - "newsletter_auth": plexpy.CONFIG.NEWSLETTER_AUTH, - "newsletter_password": plexpy.CONFIG.NEWSLETTER_PASSWORD, - "newsletter_inline_styles": checked(plexpy.CONFIG.NEWSLETTER_INLINE_STYLES), - "newsletter_custom_dir": plexpy.CONFIG.NEWSLETTER_CUSTOM_DIR, - "sys_tray_icon": checked(plexpy.CONFIG.SYS_TRAY_ICON) + "cache_sizemb": jellypy.CONFIG.CACHE_SIZEMB, + "pms_identifier": jellypy.CONFIG.PMS_IDENTIFIER, + "pms_ip": jellypy.CONFIG.PMS_IP, + "pms_logs_folder": jellypy.CONFIG.PMS_LOGS_FOLDER, + "pms_port": jellypy.CONFIG.PMS_PORT, + "pms_token": jellypy.CONFIG.PMS_TOKEN, + "pms_ssl": jellypy.CONFIG.PMS_SSL, + "pms_is_remote": jellypy.CONFIG.PMS_IS_REMOTE, + "pms_is_cloud": jellypy.CONFIG.PMS_IS_CLOUD, + "pms_url": jellypy.CONFIG.PMS_URL, + "pms_url_manual": checked(jellypy.CONFIG.PMS_URL_MANUAL), + "pms_uuid": jellypy.CONFIG.PMS_UUID, + "pms_web_url": jellypy.CONFIG.PMS_WEB_URL, + "pms_name": jellypy.CONFIG.PMS_NAME, + "pms_update_check_interval": jellypy.CONFIG.PMS_UPDATE_CHECK_INTERVAL, + "date_format": jellypy.CONFIG.DATE_FORMAT, + "time_format": jellypy.CONFIG.TIME_FORMAT, + "week_start_monday": checked(jellypy.CONFIG.WEEK_START_MONDAY), + "get_file_sizes": checked(jellypy.CONFIG.GET_FILE_SIZES), + "monitor_pms_updates": checked(jellypy.CONFIG.MONITOR_PMS_UPDATES), + "refresh_libraries_interval": jellypy.CONFIG.REFRESH_LIBRARIES_INTERVAL, + "refresh_libraries_on_startup": checked(jellypy.CONFIG.REFRESH_LIBRARIES_ON_STARTUP), + "refresh_users_interval": jellypy.CONFIG.REFRESH_USERS_INTERVAL, + "refresh_users_on_startup": checked(jellypy.CONFIG.REFRESH_USERS_ON_STARTUP), + "logging_ignore_interval": jellypy.CONFIG.LOGGING_IGNORE_INTERVAL, + "notify_consecutive": checked(jellypy.CONFIG.NOTIFY_CONSECUTIVE), + "notify_upload_posters": jellypy.CONFIG.NOTIFY_UPLOAD_POSTERS, + "notify_recently_added_upgrade": checked(jellypy.CONFIG.NOTIFY_RECENTLY_ADDED_UPGRADE), + "notify_group_recently_added_grandparent": checked(jellypy.CONFIG.NOTIFY_GROUP_RECENTLY_ADDED_GRANDPARENT), + "notify_group_recently_added_parent": checked(jellypy.CONFIG.NOTIFY_GROUP_RECENTLY_ADDED_PARENT), + "notify_recently_added_delay": jellypy.CONFIG.NOTIFY_RECENTLY_ADDED_DELAY, + "notify_remote_access_threshold": jellypy.CONFIG.NOTIFY_REMOTE_ACCESS_THRESHOLD, + "notify_concurrent_by_ip": checked(jellypy.CONFIG.NOTIFY_CONCURRENT_BY_IP), + "notify_concurrent_threshold": jellypy.CONFIG.NOTIFY_CONCURRENT_THRESHOLD, + "notify_continued_session_threshold": jellypy.CONFIG.NOTIFY_CONTINUED_SESSION_THRESHOLD, + "notify_new_device_initial_only": checked(jellypy.CONFIG.NOTIFY_NEW_DEVICE_INITIAL_ONLY), + "home_sections": json.dumps(jellypy.CONFIG.HOME_SECTIONS), + "home_stats_cards": json.dumps(jellypy.CONFIG.HOME_STATS_CARDS), + "home_library_cards": json.dumps(jellypy.CONFIG.HOME_LIBRARY_CARDS), + "home_refresh_interval": jellypy.CONFIG.HOME_REFRESH_INTERVAL, + "buffer_threshold": jellypy.CONFIG.BUFFER_THRESHOLD, + "buffer_wait": jellypy.CONFIG.BUFFER_WAIT, + "group_history_tables": checked(jellypy.CONFIG.GROUP_HISTORY_TABLES), + "git_token": jellypy.CONFIG.GIT_TOKEN, + "imgur_client_id": jellypy.CONFIG.IMGUR_CLIENT_ID, + "cloudinary_cloud_name": jellypy.CONFIG.CLOUDINARY_CLOUD_NAME, + "cloudinary_api_key": jellypy.CONFIG.CLOUDINARY_API_KEY, + "cloudinary_api_secret": jellypy.CONFIG.CLOUDINARY_API_SECRET, + "cache_images": checked(jellypy.CONFIG.CACHE_IMAGES), + "pms_version": jellypy.CONFIG.PMS_VERSION, + "plexpy_auto_update": checked(jellypy.CONFIG.PLEXPY_AUTO_UPDATE), + "git_branch": jellypy.CONFIG.GIT_BRANCH, + "git_path": jellypy.CONFIG.GIT_PATH, + "git_remote": jellypy.CONFIG.GIT_REMOTE, + "movie_watched_percent": jellypy.CONFIG.MOVIE_WATCHED_PERCENT, + "tv_watched_percent": jellypy.CONFIG.TV_WATCHED_PERCENT, + "music_watched_percent": jellypy.CONFIG.MUSIC_WATCHED_PERCENT, + "themoviedb_lookup": checked(jellypy.CONFIG.THEMOVIEDB_LOOKUP), + "tvmaze_lookup": checked(jellypy.CONFIG.TVMAZE_LOOKUP), + "musicbrainz_lookup": checked(jellypy.CONFIG.MUSICBRAINZ_LOOKUP), + "show_advanced_settings": jellypy.CONFIG.SHOW_ADVANCED_SETTINGS, + "newsletter_dir": jellypy.CONFIG.NEWSLETTER_DIR, + "newsletter_self_hosted": checked(jellypy.CONFIG.NEWSLETTER_SELF_HOSTED), + "newsletter_auth": jellypy.CONFIG.NEWSLETTER_AUTH, + "newsletter_password": jellypy.CONFIG.NEWSLETTER_PASSWORD, + "newsletter_inline_styles": checked(jellypy.CONFIG.NEWSLETTER_INLINE_STYLES), + "newsletter_custom_dir": jellypy.CONFIG.NEWSLETTER_CUSTOM_DIR, + "sys_tray_icon": checked(jellypy.CONFIG.SYS_TRAY_ICON) } return serve_template(templatename="settings.html", title="Settings", config=config, kwargs=kwargs) @@ -3216,12 +3216,12 @@ class WebInterface(object): # If http password exists in config, do not overwrite when blank value received if kwargs.get('http_password'): - if kwargs['http_password'] == ' ' and plexpy.CONFIG.HTTP_PASSWORD != '': - if kwargs.get('http_hash_password') and not plexpy.CONFIG.HTTP_HASHED_PASSWORD: - kwargs['http_password'] = make_hash(plexpy.CONFIG.HTTP_PASSWORD) + if kwargs['http_password'] == ' ' and jellypy.CONFIG.HTTP_PASSWORD != '': + if kwargs.get('http_hash_password') and not jellypy.CONFIG.HTTP_HASHED_PASSWORD: + kwargs['http_password'] = make_hash(jellypy.CONFIG.HTTP_PASSWORD) kwargs['http_hashed_password'] = 1 else: - kwargs['http_password'] = plexpy.CONFIG.HTTP_PASSWORD + kwargs['http_password'] = jellypy.CONFIG.HTTP_PASSWORD elif kwargs['http_password'] and kwargs.get('http_hash_password'): kwargs['http_password'] = make_hash(kwargs['http_password']) @@ -3244,31 +3244,31 @@ class WebInterface(object): kwargs[plain_config] = kwargs[use_config] del kwargs[use_config] - if kwargs.get('launch_startup') != plexpy.CONFIG.LAUNCH_STARTUP or \ - kwargs.get('launch_browser') != plexpy.CONFIG.LAUNCH_BROWSER: + if kwargs.get('launch_startup') != jellypy.CONFIG.LAUNCH_STARTUP or \ + kwargs.get('launch_browser') != jellypy.CONFIG.LAUNCH_BROWSER: startup_changed = True # If we change any monitoring settings, make sure we reschedule tasks. - if kwargs.get('check_github') != plexpy.CONFIG.CHECK_GITHUB or \ - kwargs.get('refresh_libraries_interval') != str(plexpy.CONFIG.REFRESH_LIBRARIES_INTERVAL) or \ - kwargs.get('refresh_users_interval') != str(plexpy.CONFIG.REFRESH_USERS_INTERVAL) or \ - kwargs.get('pms_update_check_interval') != str(plexpy.CONFIG.PMS_UPDATE_CHECK_INTERVAL) or \ - kwargs.get('monitor_pms_updates') != plexpy.CONFIG.MONITOR_PMS_UPDATES or \ - kwargs.get('pms_url_manual') != plexpy.CONFIG.PMS_URL_MANUAL: + if kwargs.get('check_github') != jellypy.CONFIG.CHECK_GITHUB or \ + kwargs.get('refresh_libraries_interval') != str(jellypy.CONFIG.REFRESH_LIBRARIES_INTERVAL) or \ + kwargs.get('refresh_users_interval') != str(jellypy.CONFIG.REFRESH_USERS_INTERVAL) or \ + kwargs.get('pms_update_check_interval') != str(jellypy.CONFIG.PMS_UPDATE_CHECK_INTERVAL) or \ + kwargs.get('monitor_pms_updates') != jellypy.CONFIG.MONITOR_PMS_UPDATES or \ + kwargs.get('pms_url_manual') != jellypy.CONFIG.PMS_URL_MANUAL: reschedule = True # If we change the SSL setting for PMS or PMS remote setting, make sure we grab the new url. - if kwargs.get('pms_ssl') != str(plexpy.CONFIG.PMS_SSL) or \ - kwargs.get('pms_is_remote') != str(plexpy.CONFIG.PMS_IS_REMOTE) or \ - kwargs.get('pms_url_manual') != plexpy.CONFIG.PMS_URL_MANUAL: + if kwargs.get('pms_ssl') != str(jellypy.CONFIG.PMS_SSL) or \ + kwargs.get('pms_is_remote') != str(jellypy.CONFIG.PMS_IS_REMOTE) or \ + kwargs.get('pms_url_manual') != jellypy.CONFIG.PMS_URL_MANUAL: server_changed = True # If we change the HTTPS setting, make sure we generate a new certificate. if kwargs.get('enable_https') and kwargs.get('https_create_cert'): - if kwargs.get('https_domain') != plexpy.CONFIG.HTTPS_DOMAIN or \ - kwargs.get('https_ip') != plexpy.CONFIG.HTTPS_IP or \ - kwargs.get('https_cert') != plexpy.CONFIG.HTTPS_CERT or \ - kwargs.get('https_key') != plexpy.CONFIG.HTTPS_KEY: + if kwargs.get('https_domain') != jellypy.CONFIG.HTTPS_DOMAIN or \ + kwargs.get('https_ip') != jellypy.CONFIG.HTTPS_IP or \ + kwargs.get('https_cert') != jellypy.CONFIG.HTTPS_CERT or \ + kwargs.get('https_key') != jellypy.CONFIG.HTTPS_KEY: https_changed = True # Remove config with 'hsec-' prefix and change home_sections to list @@ -3286,7 +3286,7 @@ class WebInterface(object): kwargs['home_stats_cards'] = kwargs['home_stats_cards'].split(',') if kwargs['home_stats_cards'] == ['first_run_wizard']: - kwargs['home_stats_cards'] = plexpy.CONFIG.HOME_STATS_CARDS + kwargs['home_stats_cards'] = jellypy.CONFIG.HOME_STATS_CARDS # Remove config with 'hlcard-' prefix and change home_library_cards to list if kwargs.get('home_library_cards'): @@ -3308,10 +3308,10 @@ class WebInterface(object): if kwargs.pop('auth_changed', None): refresh_users = True - plexpy.CONFIG.process_kwargs(kwargs) + jellypy.CONFIG.process_kwargs(kwargs) # Write the config - plexpy.CONFIG.write() + jellypy.CONFIG.write() # Enable or disable system startup if startup_changed: @@ -3323,7 +3323,7 @@ class WebInterface(object): # Get new server URLs for SSL communications and get new server friendly name if server_changed: plextv.get_server_resources() - if plexpy.WS_CONNECTED: + if jellypy.WS_CONNECTED: web_socket.reconnect() # If first run, start websocket @@ -3333,11 +3333,11 @@ class WebInterface(object): # Reconfigure scheduler if intervals changed if reschedule: - plexpy.initialize_scheduler() + jellypy.initialize_scheduler() # Generate a new HTTPS certificate if https_changed: - create_https_certificates(plexpy.CONFIG.HTTPS_CERT, plexpy.CONFIG.HTTPS_KEY) + create_https_certificates(jellypy.CONFIG.HTTPS_CERT, jellypy.CONFIG.HTTPS_KEY) # Refresh users table if our server IP changes. if refresh_libraries: @@ -3359,7 +3359,7 @@ class WebInterface(object): @cherrypy.tools.json_out() @requireAuth(member_of("admin")) def backup_config(self, **kwargs): - """ Creates a manual backup of the plexpy.db file """ + """ Creates a manual backup of the jellypy.db file """ result = config.make_backup() @@ -3394,17 +3394,17 @@ class WebInterface(object): return {'plexpass': plexpass, 'pms_platform': common.PMS_PLATFORM_NAME_OVERRIDES.get( - plexpy.CONFIG.PMS_PLATFORM, plexpy.CONFIG.PMS_PLATFORM), - 'pms_update_channel': plexpy.CONFIG.PMS_UPDATE_CHANNEL, - 'pms_update_distro': plexpy.CONFIG.PMS_UPDATE_DISTRO, - 'pms_update_distro_build': plexpy.CONFIG.PMS_UPDATE_DISTRO_BUILD, + jellypy.CONFIG.PMS_PLATFORM, jellypy.CONFIG.PMS_PLATFORM), + 'pms_update_channel': jellypy.CONFIG.PMS_UPDATE_CHANNEL, + 'pms_update_distro': jellypy.CONFIG.PMS_UPDATE_DISTRO, + 'pms_update_distro_build': jellypy.CONFIG.PMS_UPDATE_DISTRO_BUILD, 'plex_update_channel': 'plexpass' if update_channel == 'beta' else 'public'} @cherrypy.expose @cherrypy.tools.json_out() @requireAuth(member_of("admin")) def backup_db(self, **kwargs): - """ Creates a manual backup of the plexpy.db file """ + """ Creates a manual backup of the jellypy.db file """ result = database.make_backup() @@ -3736,11 +3736,11 @@ class WebInterface(object): @cherrypy.tools.json_out() @requireAuth(member_of("admin")) def facebook_retrieve_token(self, **kwargs): - if plexpy.CONFIG.FACEBOOK_TOKEN == 'temp': + if jellypy.CONFIG.FACEBOOK_TOKEN == 'temp': return {'result': 'waiting'} - elif plexpy.CONFIG.FACEBOOK_TOKEN: - token = plexpy.CONFIG.FACEBOOK_TOKEN - plexpy.CONFIG.FACEBOOK_TOKEN = '' + elif jellypy.CONFIG.FACEBOOK_TOKEN: + token = jellypy.CONFIG.FACEBOOK_TOKEN + jellypy.CONFIG.FACEBOOK_TOKEN = '' return {'result': 'success', 'msg': 'Authorization successful.', 'access_token': token} else: return {'result': 'error', 'msg': 'Failed to request authorization.'} @@ -3779,10 +3779,10 @@ class WebInterface(object): kwargs[plain_config] = kwargs[use_config] del kwargs[use_config] - plexpy.CONFIG.process_kwargs(kwargs) + jellypy.CONFIG.process_kwargs(kwargs) # Write the config - plexpy.CONFIG.write() + jellypy.CONFIG.write() cherrypy.response.status = 200 @@ -3902,7 +3902,7 @@ class WebInterface(object): return {'result': 'error', 'message': 'No app specified for import'} if database_file: - database_path = os.path.join(plexpy.CONFIG.CACHE_DIR, database_file.filename + '.import.db') + database_path = os.path.join(jellypy.CONFIG.CACHE_DIR, database_file.filename + '.import.db') logger.info("Received database file '%s' for import. Saving to cache '%s'.", database_file.filename, database_path) with open(database_path, 'wb') as f: @@ -3993,7 +3993,7 @@ class WebInterface(object): 'message': 'Database import is in progress. Please wait until it is finished to import a config.'} if config_file: - config_path = os.path.join(plexpy.CONFIG.CACHE_DIR, config_file.filename + '.import.ini') + config_path = os.path.join(jellypy.CONFIG.CACHE_DIR, config_file.filename + '.import.ini') logger.info("Received config file '%s' for import. Saving to cache '%s'.", config_file.filename, config_path) with open(config_path, 'wb') as f: @@ -4037,7 +4037,7 @@ class WebInterface(object): if key: path = base64.b64decode(key).decode('UTF-8') if not path: - path = plexpy.DATA_DIR + path = jellypy.DATA_DIR data = helpers.browse_path(path=path, filter_ext=filter_ext) if data: @@ -4160,7 +4160,7 @@ class WebInterface(object): if helpers.bool_true(test_websocket): # Quick test websocket connection ws_url = result['url'].replace('http', 'ws', 1) + '/:/websockets/notifications' - header = ['X-Plex-Token: %s' % plexpy.CONFIG.PMS_TOKEN] + header = ['X-Plex-Token: %s' % jellypy.CONFIG.PMS_TOKEN] logger.debug("Testing websocket connection...") try: @@ -4240,8 +4240,8 @@ class WebInterface(object): @requireAuth(member_of("admin")) def generate_api_key(self, device=None, **kwargs): apikey = '' - while not apikey or apikey == plexpy.CONFIG.API_KEY or mobile_app.get_mobile_device_by_token(device_token=apikey): - apikey = plexpy.generate_uuid() + while not apikey or apikey == jellypy.CONFIG.API_KEY or mobile_app.get_mobile_device_by_token(device_token=apikey): + apikey = jellypy.generate_uuid() logger.info("New API key generated.") logger._BLACKLIST_WORDS.add(apikey) @@ -4275,40 +4275,40 @@ class WebInterface(object): """ versioncheck.check_update() - if plexpy.UPDATE_AVAILABLE is None: + if jellypy.UPDATE_AVAILABLE is None: update = {'result': 'error', 'update': None, 'message': 'You are running an unknown version of Tautulli.' } - elif plexpy.UPDATE_AVAILABLE == 'release': + elif jellypy.UPDATE_AVAILABLE == 'release': update = {'result': 'success', 'update': True, 'release': True, - 'message': 'A new release (%s) of Tautulli is available.' % plexpy.LATEST_RELEASE, - 'current_release': plexpy.common.RELEASE, - 'latest_release': plexpy.LATEST_RELEASE, + 'message': 'A new release (%s) of Tautulli is available.' % jellypy.LATEST_RELEASE, + 'current_release': jellypy.common.RELEASE, + 'latest_release': jellypy.LATEST_RELEASE, 'release_url': helpers.anon_url( 'https://github.com/%s/%s/releases/tag/%s' - % (plexpy.CONFIG.GIT_USER, - plexpy.CONFIG.GIT_REPO, - plexpy.LATEST_RELEASE)) + % (jellypy.CONFIG.GIT_USER, + jellypy.CONFIG.GIT_REPO, + jellypy.LATEST_RELEASE)) } - elif plexpy.UPDATE_AVAILABLE == 'commit': + elif jellypy.UPDATE_AVAILABLE == 'commit': update = {'result': 'success', 'update': True, 'release': False, 'message': 'A newer version of Tautulli is available.', - 'current_version': plexpy.CURRENT_VERSION, - 'latest_version': plexpy.LATEST_VERSION, - 'commits_behind': plexpy.COMMITS_BEHIND, + 'current_version': jellypy.CURRENT_VERSION, + 'latest_version': jellypy.LATEST_VERSION, + 'commits_behind': jellypy.COMMITS_BEHIND, 'compare_url': helpers.anon_url( 'https://github.com/%s/%s/compare/%s...%s' - % (plexpy.CONFIG.GIT_USER, - plexpy.CONFIG.GIT_REPO, - plexpy.CURRENT_VERSION, - plexpy.LATEST_VERSION)) + % (jellypy.CONFIG.GIT_USER, + jellypy.CONFIG.GIT_REPO, + jellypy.CURRENT_VERSION, + jellypy.LATEST_VERSION)) } else: @@ -4317,8 +4317,8 @@ class WebInterface(object): 'message': 'Tautulli is up to date.' } - if plexpy.DOCKER or plexpy.SNAP or plexpy.FROZEN: - update['install_type'] = plexpy.INSTALL_TYPE + if jellypy.DOCKER or jellypy.SNAP or jellypy.FROZEN: + update['install_type'] = jellypy.INSTALL_TYPE return update @@ -4328,10 +4328,10 @@ class WebInterface(object): message = title quote = self.random_arnold_quotes() if signal: - plexpy.SIGNAL = signal + jellypy.SIGNAL = signal - if plexpy.CONFIG.HTTP_ROOT.strip('/'): - new_http_root = '/' + plexpy.CONFIG.HTTP_ROOT.strip('/') + '/' + if jellypy.CONFIG.HTTP_ROOT.strip('/'): + new_http_root = '/' + jellypy.CONFIG.HTTP_ROOT.strip('/') + '/' else: new_http_root = '/' @@ -4351,27 +4351,27 @@ class WebInterface(object): @cherrypy.expose @requireAuth(member_of("admin")) def update(self, **kwargs): - if plexpy.PYTHON2: - raise cherrypy.HTTPRedirect(plexpy.HTTP_ROOT + "home?update=python2") - if plexpy.DOCKER or plexpy.SNAP: - raise cherrypy.HTTPRedirect(plexpy.HTTP_ROOT + "home") + if jellypy.PYTHON2: + raise cherrypy.HTTPRedirect(jellypy.HTTP_ROOT + "home?update=python2") + if jellypy.DOCKER or jellypy.SNAP: + raise cherrypy.HTTPRedirect(jellypy.HTTP_ROOT + "home") # Show changelog after updating - plexpy.CONFIG.__setattr__('UPDATE_SHOW_CHANGELOG', 1) - plexpy.CONFIG.write() + jellypy.CONFIG.__setattr__('UPDATE_SHOW_CHANGELOG', 1) + jellypy.CONFIG.write() return self.do_state_change('update', 'Updating', 120) @cherrypy.expose @requireAuth(member_of("admin")) def checkout_git_branch(self, git_remote=None, git_branch=None, **kwargs): - if git_branch == plexpy.CONFIG.GIT_BRANCH: + if git_branch == jellypy.CONFIG.GIT_BRANCH: logger.error("Already on the %s branch" % git_branch) - raise cherrypy.HTTPRedirect(plexpy.HTTP_ROOT + "home") + raise cherrypy.HTTPRedirect(jellypy.HTTP_ROOT + "home") # Set the new git remote and branch - plexpy.CONFIG.__setattr__('GIT_REMOTE', git_remote) - plexpy.CONFIG.__setattr__('GIT_BRANCH', git_branch) - plexpy.CONFIG.write() + jellypy.CONFIG.__setattr__('GIT_REMOTE', git_remote) + jellypy.CONFIG.__setattr__('GIT_BRANCH', git_branch) + jellypy.CONFIG.write() return self.do_state_change('checkout', 'Switching Git Branches', 120) @cherrypy.expose @@ -4392,14 +4392,14 @@ class WebInterface(object): latest_only = helpers.bool_true(latest_only) since_prev_release = helpers.bool_true(since_prev_release) - if since_prev_release and plexpy.PREV_RELEASE == common.RELEASE: + if since_prev_release and jellypy.PREV_RELEASE == common.RELEASE: latest_only = True since_prev_release = False # Set update changelog shown status if helpers.bool_true(update_shown): - plexpy.CONFIG.__setattr__('UPDATE_SHOW_CHANGELOG', 0) - plexpy.CONFIG.write() + jellypy.CONFIG.__setattr__('UPDATE_SHOW_CHANGELOG', 0) + jellypy.CONFIG.write() return versioncheck.read_changelog(latest_only=latest_only, since_prev_release=since_prev_release) @@ -4409,13 +4409,13 @@ class WebInterface(object): @requireAuth() def info(self, rating_key=None, guid=None, source=None, section_id=None, user_id=None, **kwargs): if rating_key and not str(rating_key).isdigit(): - raise cherrypy.HTTPRedirect(plexpy.HTTP_ROOT) + raise cherrypy.HTTPRedirect(jellypy.HTTP_ROOT) metadata = None config = { - "pms_identifier": plexpy.CONFIG.PMS_IDENTIFIER, - "pms_web_url": plexpy.CONFIG.PMS_WEB_URL + "pms_identifier": jellypy.CONFIG.PMS_IDENTIFIER, + "pms_web_url": jellypy.CONFIG.PMS_WEB_URL } if user_id: @@ -4443,13 +4443,13 @@ class WebInterface(object): if metadata: if metadata['section_id'] and not allow_session_library(metadata['section_id']): - raise cherrypy.HTTPRedirect(plexpy.HTTP_ROOT) + raise cherrypy.HTTPRedirect(jellypy.HTTP_ROOT) return serve_template(templatename="info.html", metadata=metadata, title="Info", config=config, source=source, user_info=user_info) else: if get_session_user_id(): - raise cherrypy.HTTPRedirect(plexpy.HTTP_ROOT) + raise cherrypy.HTTPRedirect(jellypy.HTTP_ROOT) else: return self.update_metadata(rating_key) @@ -4514,7 +4514,7 @@ class WebInterface(object): if notifier_id: data['notifier_id'] = notifier_id - plexpy.NOTIFY_QUEUE.put(data) + jellypy.NOTIFY_QUEUE.put(data) return {'result': 'success', 'message': 'Notification queued.'} else: @@ -4560,13 +4560,13 @@ class WebInterface(object): ``` """ if isinstance(img, str) and img.startswith('interfaces/default/images'): - fp = os.path.join(plexpy.PROG_DIR, 'data', img) + fp = os.path.join(jellypy.PROG_DIR, 'data', img) return serve_file(path=fp, content_type='image/png') if not img and not rating_key: if fallback in common.DEFAULT_IMAGES: fbi = common.DEFAULT_IMAGES[fallback] - fp = os.path.join(plexpy.PROG_DIR, 'data', fbi) + fp = os.path.join(jellypy.PROG_DIR, 'data', fbi) return serve_file(path=fp, content_type='image/png') logger.warn('No image input received.') return @@ -4596,7 +4596,7 @@ class WebInterface(object): return {'img_hash': img_hash} fp = '{}.{}'.format(img_hash, img_format) # we want to be able to preview the thumbs - c_dir = os.path.join(plexpy.CONFIG.CACHE_DIR, 'images') + c_dir = os.path.join(jellypy.CONFIG.CACHE_DIR, 'images') ffp = os.path.join(c_dir, fp) if not os.path.exists(c_dir): @@ -4605,7 +4605,7 @@ class WebInterface(object): clip = helpers.bool_true(clip) try: - if not plexpy.CONFIG.CACHE_IMAGES or refresh or 'indexes' in img: + if not jellypy.CONFIG.CACHE_IMAGES or refresh or 'indexes' in img: raise NotFound return serve_file(path=ffp, content_type='image/png') @@ -4626,7 +4626,7 @@ class WebInterface(object): if result and result[0]: cherrypy.response.headers['Content-type'] = result[1] - if plexpy.CONFIG.CACHE_IMAGES and 'indexes' not in img: + if jellypy.CONFIG.CACHE_IMAGES and 'indexes' not in img: with open(ffp, 'wb') as f: f.write(result[0]) @@ -4638,7 +4638,7 @@ class WebInterface(object): logger.warn("Failed to get image %s, falling back to %s." % (img, fallback)) if fallback in common.DEFAULT_IMAGES: fbi = common.DEFAULT_IMAGES[fallback] - fp = os.path.join(plexpy.PROG_DIR, 'data', fbi) + fp = os.path.join(jellypy.PROG_DIR, 'data', fbi) return serve_file(path=fp, content_type='image/png') @cherrypy.expose @@ -4647,7 +4647,7 @@ class WebInterface(object): cherrypy.response.headers['Cache-Control'] = 'max-age=3600' # 1 hour if len(args) >= 2 and args[0] == 'images': - resource_dir = os.path.join(str(plexpy.PROG_DIR), 'data/interfaces/default/') + resource_dir = os.path.join(str(jellypy.PROG_DIR), 'data/interfaces/default/') try: return serve_file(path=os.path.join(resource_dir, *args), content_type='image/png') except NotFound: @@ -4657,7 +4657,7 @@ class WebInterface(object): if img_hash in common.DEFAULT_IMAGES: fbi = common.DEFAULT_IMAGES[img_hash] - fp = os.path.join(plexpy.PROG_DIR, 'data', fbi) + fp = os.path.join(jellypy.PROG_DIR, 'data', fbi) return serve_file(path=fp, content_type='image/png') img_info = notification_handler.get_hash_image_info(img_hash=img_hash) @@ -4676,11 +4676,11 @@ class WebInterface(object): config_file = config.FILENAME try: - plexpy.CONFIG.write() + jellypy.CONFIG.write() except: pass - return serve_download(plexpy.CONFIG_FILE, name=config_file) + return serve_download(jellypy.CONFIG_FILE, name=config_file) @cherrypy.expose @requireAuth(member_of("admin")) @@ -4692,12 +4692,12 @@ class WebInterface(object): try: db = database.MonitorDatabase() db.connection.execute('begin immediate') - shutil.copyfile(plexpy.DB_FILE, os.path.join(plexpy.CONFIG.CACHE_DIR, database_file)) + shutil.copyfile(jellypy.DB_FILE, os.path.join(jellypy.CONFIG.CACHE_DIR, database_file)) db.connection.rollback() except: pass - return serve_download(os.path.join(plexpy.CONFIG.CACHE_DIR, database_file), name=database_file) + return serve_download(os.path.join(jellypy.CONFIG.CACHE_DIR, database_file), name=database_file) @cherrypy.expose @requireAuth(member_of("admin")) @@ -4719,7 +4719,7 @@ class WebInterface(object): except: pass - return serve_download(os.path.join(plexpy.CONFIG.LOG_DIR, filename), name=filename) + return serve_download(os.path.join(jellypy.CONFIG.LOG_DIR, filename), name=filename) @cherrypy.expose @requireAuth(member_of("admin")) @@ -4729,13 +4729,13 @@ class WebInterface(object): log_type = kwargs.get('log_type', 'server') log_file = "" - if plexpy.CONFIG.PMS_LOGS_FOLDER: + if jellypy.CONFIG.PMS_LOGS_FOLDER: if log_type == "server": log_file = 'Plex Media Server.log' - log_file_path = os.path.join(plexpy.CONFIG.PMS_LOGS_FOLDER, log_file) + log_file_path = os.path.join(jellypy.CONFIG.PMS_LOGS_FOLDER, log_file) elif log_type == "scanner": log_file = 'Plex Media Scanner.log' - log_file_path = os.path.join(plexpy.CONFIG.PMS_LOGS_FOLDER, log_file) + log_file_path = os.path.join(jellypy.CONFIG.PMS_LOGS_FOLDER, log_file) else: return "Plex log folder not set in the settings." @@ -4759,7 +4759,7 @@ class WebInterface(object): @addtoapi() def delete_cache(self, folder='', **kwargs): """ Delete and recreate the cache directory. """ - cache_dir = os.path.join(plexpy.CONFIG.CACHE_DIR, folder) + cache_dir = os.path.join(jellypy.CONFIG.CACHE_DIR, folder) result = 'success' msg = 'Cleared the %scache.' % (folder + ' ' if folder else '') try: @@ -5734,7 +5734,7 @@ class WebInterface(object): ``` """ try: - pms_connect = pmsconnect.PmsConnect(token=plexpy.CONFIG.PMS_TOKEN) + pms_connect = pmsconnect.PmsConnect(token=jellypy.CONFIG.PMS_TOKEN) result = pms_connect.get_current_activity() if result: @@ -6423,14 +6423,14 @@ class WebInterface(object): @cherrypy.expose def newsletter(self, *args, **kwargs): request_uri = cherrypy.request.wsgi_environ['REQUEST_URI'] - if plexpy.CONFIG.NEWSLETTER_AUTH == 2: + if jellypy.CONFIG.NEWSLETTER_AUTH == 2: redirect_uri = request_uri.replace('/newsletter', '/newsletter_auth') raise cherrypy.HTTPRedirect(redirect_uri) - elif plexpy.CONFIG.NEWSLETTER_AUTH == 1 and plexpy.CONFIG.NEWSLETTER_PASSWORD: + elif jellypy.CONFIG.NEWSLETTER_AUTH == 1 and jellypy.CONFIG.NEWSLETTER_PASSWORD: if len(args) >= 2 and args[0] == 'image': return self.newsletter_auth(*args, **kwargs) - elif kwargs.pop('key', None) == plexpy.CONFIG.NEWSLETTER_PASSWORD: + elif kwargs.pop('key', None) == jellypy.CONFIG.NEWSLETTER_PASSWORD: return self.newsletter_auth(*args, **kwargs) else: return serve_template(templatename="newsletter_auth.html", @@ -6447,7 +6447,7 @@ class WebInterface(object): # Keep this for backwards compatibility for images through /newsletter/image if len(args) >= 2 and args[0] == 'image': if args[1] == 'images': - resource_dir = os.path.join(str(plexpy.PROG_DIR), 'data/interfaces/default/') + resource_dir = os.path.join(str(jellypy.PROG_DIR), 'data/interfaces/default/') try: return serve_file(path=os.path.join(resource_dir, *args[1:]), content_type='image/png') except NotFound: @@ -6535,7 +6535,7 @@ class WebInterface(object): status = {'result': 'success', 'message': 'Ok'} if args or kwargs: - if not cherrypy.request.path_info == '/api/v2' and plexpy.AUTH_ENABLED: + if not cherrypy.request.path_info == '/api/v2' and jellypy.AUTH_ENABLED: cherrypy.request.config['auth.require'] = [] check_auth() @@ -6571,7 +6571,7 @@ class WebInterface(object): ``` """ cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store" - status = {'result': 'success', 'connected': plexpy.PLEX_SERVER_UP} + status = {'result': 'success', 'connected': jellypy.PLEX_SERVER_UP} return status diff --git a/plexpy/webstart.py b/jellypy/webstart.py similarity index 85% rename from plexpy/webstart.py rename to jellypy/webstart.py index 1818d4f8..783fa4a1 100644 --- a/plexpy/webstart.py +++ b/jellypy/webstart.py @@ -20,34 +20,34 @@ import sys import cherrypy -import plexpy -if plexpy.PYTHON2: +import jellypy +if jellypy.PYTHON2: import logger import webauth from helpers import create_https_certificates from webserve import WebInterface, BaseRedirect else: - from plexpy import logger - from plexpy import webauth - from plexpy.helpers import create_https_certificates - from plexpy.webserve import WebInterface, BaseRedirect + from jellypy import logger + from jellypy import webauth + from jellypy.helpers import create_https_certificates + from jellypy.webserve import WebInterface, BaseRedirect def start(): logger.info("Tautulli WebStart :: Initializing Tautulli web server...") web_config = { - 'http_port': plexpy.HTTP_PORT, - 'http_host': plexpy.CONFIG.HTTP_HOST, - 'http_root': plexpy.CONFIG.HTTP_ROOT, - 'http_environment': plexpy.CONFIG.HTTP_ENVIRONMENT, - 'http_proxy': plexpy.CONFIG.HTTP_PROXY, - 'enable_https': plexpy.CONFIG.ENABLE_HTTPS, - 'https_cert': plexpy.CONFIG.HTTPS_CERT, - 'https_cert_chain': plexpy.CONFIG.HTTPS_CERT_CHAIN, - 'https_key': plexpy.CONFIG.HTTPS_KEY, - 'http_username': plexpy.CONFIG.HTTP_USERNAME, - 'http_password': plexpy.CONFIG.HTTP_PASSWORD, - 'http_basic_auth': plexpy.CONFIG.HTTP_BASIC_AUTH + 'http_port': jellypy.HTTP_PORT, + 'http_host': jellypy.CONFIG.HTTP_HOST, + 'http_root': jellypy.CONFIG.HTTP_ROOT, + 'http_environment': jellypy.CONFIG.HTTP_ENVIRONMENT, + 'http_proxy': jellypy.CONFIG.HTTP_PROXY, + 'enable_https': jellypy.CONFIG.ENABLE_HTTPS, + 'https_cert': jellypy.CONFIG.HTTPS_CERT, + 'https_cert_chain': jellypy.CONFIG.HTTPS_CERT_CHAIN, + 'https_key': jellypy.CONFIG.HTTPS_KEY, + 'http_username': jellypy.CONFIG.HTTP_USERNAME, + 'http_password': jellypy.CONFIG.HTTP_PASSWORD, + 'http_basic_auth': jellypy.CONFIG.HTTP_BASIC_AUTH } initialize(web_config) @@ -73,7 +73,7 @@ def initialize(options): if enable_https: # If either the HTTPS certificate or key do not exist, try to make self-signed ones. - if plexpy.CONFIG.HTTPS_CREATE_CERT and \ + if jellypy.CONFIG.HTTPS_CREATE_CERT and \ (not (https_cert and os.path.exists(https_cert)) or not (https_key and os.path.exists(https_key))): if not create_https_certificates(https_cert, https_key): @@ -96,7 +96,7 @@ def initialize(options): 'tools.decode.on': True } - if plexpy.DEV: + if jellypy.DEV: options_dict['environment'] = "test_suite" options_dict['engine.autoreload.on'] = True @@ -114,39 +114,39 @@ def initialize(options): if options['http_password']: login_allowed = ["Tautulli admin (username is '%s')" % options['http_username']] - if plexpy.CONFIG.HTTP_PLEX_ADMIN: + if jellypy.CONFIG.HTTP_PLEX_ADMIN: login_allowed.append("Plex admin") logger.info("Tautulli WebStart :: Web server authentication is enabled: %s.", ' and '.join(login_allowed)) if options['http_basic_auth']: - plexpy.AUTH_ENABLED = False + jellypy.AUTH_ENABLED = False basic_auth_enabled = True else: - plexpy.AUTH_ENABLED = True + jellypy.AUTH_ENABLED = True basic_auth_enabled = False cherrypy.tools.auth = cherrypy.Tool('before_handler', webauth.check_auth, priority=2) else: - plexpy.AUTH_ENABLED = False + jellypy.AUTH_ENABLED = False basic_auth_enabled = False if options['http_root'].strip('/'): - plexpy.HTTP_ROOT = options['http_root'] = '/' + str(options['http_root'].strip('/')) + '/' + jellypy.HTTP_ROOT = options['http_root'] = '/' + str(options['http_root'].strip('/')) + '/' else: - plexpy.HTTP_ROOT = options['http_root'] = '/' + jellypy.HTTP_ROOT = options['http_root'] = '/' cherrypy.config.update(options_dict) conf = { '/': { 'engine.timeout_monitor.on': False, - 'tools.staticdir.root': os.path.join(plexpy.PROG_DIR, 'data'), + 'tools.staticdir.root': os.path.join(jellypy.PROG_DIR, 'data'), 'tools.proxy.on': bool(options['http_proxy']), 'tools.gzip.on': True, 'tools.gzip.mime_types': ['text/html', 'text/plain', 'text/css', 'text/javascript', 'application/json', 'application/javascript'], - 'tools.auth.on': plexpy.AUTH_ENABLED, + 'tools.auth.on': jellypy.AUTH_ENABLED, 'tools.auth_basic.on': basic_auth_enabled, 'tools.auth_basic.realm': 'Tautulli web server', 'tools.auth_basic.checkpassword': cherrypy.lib.auth_basic.checkpassword_dict({ @@ -216,7 +216,7 @@ def initialize(options): }, '/cache': { 'tools.staticdir.on': True, - 'tools.staticdir.dir': plexpy.CONFIG.CACHE_DIR, + 'tools.staticdir.dir': jellypy.CONFIG.CACHE_DIR, 'tools.caching.on': True, 'tools.caching.force': True, 'tools.caching.delay': 0, @@ -227,7 +227,7 @@ def initialize(options): }, #'/pms_image_proxy': { # 'tools.staticdir.on': True, - # 'tools.staticdir.dir': os.path.join(plexpy.CONFIG.CACHE_DIR, 'images'), + # 'tools.staticdir.dir': os.path.join(jellypy.CONFIG.CACHE_DIR, 'images'), # 'tools.caching.on': True, # 'tools.caching.force': True, # 'tools.caching.delay': 0, @@ -238,7 +238,7 @@ def initialize(options): #}, '/favicon.ico': { 'tools.staticfile.on': True, - 'tools.staticfile.filename': os.path.abspath(os.path.join(plexpy.PROG_DIR, 'data/interfaces/default/images/favicon/favicon.ico')), + 'tools.staticfile.filename': os.path.abspath(os.path.join(jellypy.PROG_DIR, 'data/interfaces/default/images/favicon/favicon.ico')), 'tools.caching.on': True, 'tools.caching.force': True, 'tools.caching.delay': 0, @@ -250,14 +250,14 @@ def initialize(options): } cherrypy.tree.mount(WebInterface(), options['http_root'], config=conf) - if plexpy.HTTP_ROOT != '/': + if jellypy.HTTP_ROOT != '/': cherrypy.tree.mount(BaseRedirect(), '/') try: logger.info("Tautulli WebStart :: Starting Tautulli web server on %s://%s:%d%s", protocol, options['http_host'], options['http_port'], options['http_root']) #cherrypy.process.servers.check_port(str(options['http_host']), options['http_port']) - if not plexpy.DEV: + if not jellypy.DEV: cherrypy.server.start() else: cherrypy.engine.signals.subscribe() diff --git a/plexpy/windows.py b/jellypy/windows.py similarity index 83% rename from plexpy/windows.py rename to jellypy/windows.py index 35adc2ff..6b709c9b 100644 --- a/plexpy/windows.py +++ b/jellypy/windows.py @@ -29,34 +29,34 @@ try: except ImportError: import _winreg as winreg -import plexpy -if plexpy.PYTHON2: +import jellypy +if jellypy.PYTHON2: import common import logger import versioncheck else: - from plexpy import common - from plexpy import logger - from plexpy import versioncheck + from jellypy import common + from jellypy import logger + from jellypy import versioncheck class WindowsSystemTray(object): def __init__(self): - self.image_dir = os.path.join(plexpy.PROG_DIR, 'data/interfaces/', plexpy.CONFIG.INTERFACE, 'images') + self.image_dir = os.path.join(jellypy.PROG_DIR, 'data/interfaces/', jellypy.CONFIG.INTERFACE, 'images') self.icon = os.path.join(self.image_dir, 'logo-circle.ico') - if plexpy.UPDATE_AVAILABLE: + if jellypy.UPDATE_AVAILABLE: self.hover_text = common.PRODUCT + ' - Update Available!' self.update_title = 'Check for Updates - Update Available!' else: self.hover_text = common.PRODUCT self.update_title = 'Check for Updates' - if plexpy.CONFIG.LAUNCH_STARTUP: + if jellypy.CONFIG.LAUNCH_STARTUP: launch_start_icon = os.path.join(self.image_dir, 'check-solid.ico') else: launch_start_icon = None - if plexpy.CONFIG.LAUNCH_BROWSER: + if jellypy.CONFIG.LAUNCH_BROWSER: launch_browser_icon = os.path.join(self.image_dir, 'check-solid.ico') else: launch_browser_icon = None @@ -70,7 +70,7 @@ class WindowsSystemTray(object): [self.update_title, None, self.tray_check_update, None], ['Restart', None, self.tray_restart, None] ] - if not plexpy.FROZEN: + if not jellypy.FROZEN: self.menu.insert(6, ['Update', None, self.tray_update, None]) self.tray_icon = SysTrayIcon(self.icon, self.hover_text, self.menu, on_quit=self.tray_quit) @@ -89,22 +89,22 @@ class WindowsSystemTray(object): self.tray_icon.update(**kwargs) def tray_open(self, tray_icon): - plexpy.launch_browser(plexpy.CONFIG.HTTP_HOST, plexpy.HTTP_PORT, plexpy.HTTP_ROOT) + jellypy.launch_browser(jellypy.CONFIG.HTTP_HOST, jellypy.HTTP_PORT, jellypy.HTTP_ROOT) def tray_startup(self, tray_icon): - plexpy.CONFIG.LAUNCH_STARTUP = not plexpy.CONFIG.LAUNCH_STARTUP + jellypy.CONFIG.LAUNCH_STARTUP = not jellypy.CONFIG.LAUNCH_STARTUP set_startup() def tray_browser(self, tray_icon): - plexpy.CONFIG.LAUNCH_BROWSER = not plexpy.CONFIG.LAUNCH_BROWSER + jellypy.CONFIG.LAUNCH_BROWSER = not jellypy.CONFIG.LAUNCH_BROWSER set_startup() def tray_check_update(self, tray_icon): versioncheck.check_update() def tray_update(self, tray_icon): - if plexpy.UPDATE_AVAILABLE: - plexpy.SIGNAL = 'update' + if jellypy.UPDATE_AVAILABLE: + jellypy.SIGNAL = 'update' else: self.hover_text = common.PRODUCT + ' - No Update Available' self.update_title = 'Check for Updates - No Update Available' @@ -112,13 +112,13 @@ class WindowsSystemTray(object): self.update(hover_text=self.hover_text, menu_options=self.menu) def tray_restart(self, tray_icon): - plexpy.SIGNAL = 'restart' + jellypy.SIGNAL = 'restart' def tray_quit(self, tray_icon): - plexpy.SIGNAL = 'shutdown' + jellypy.SIGNAL = 'shutdown' def change_tray_update_icon(self): - if plexpy.UPDATE_AVAILABLE: + if jellypy.UPDATE_AVAILABLE: self.hover_text = common.PRODUCT + ' - Update Available!' self.update_title = 'Check for Updates - Update Available!' else: @@ -128,11 +128,11 @@ class WindowsSystemTray(object): self.update(hover_text=self.hover_text, menu_options=self.menu) def change_tray_icons(self): - if plexpy.CONFIG.LAUNCH_STARTUP: + if jellypy.CONFIG.LAUNCH_STARTUP: launch_start_icon = os.path.join(self.image_dir, 'check-solid.ico') else: launch_start_icon = None - if plexpy.CONFIG.LAUNCH_BROWSER: + if jellypy.CONFIG.LAUNCH_BROWSER: launch_browser_icon = os.path.join(self.image_dir, 'check-solid.ico') else: launch_browser_icon = None @@ -142,23 +142,23 @@ class WindowsSystemTray(object): def set_startup(): - if plexpy.WIN_SYS_TRAY_ICON: - plexpy.WIN_SYS_TRAY_ICON.change_tray_icons() + if jellypy.WIN_SYS_TRAY_ICON: + jellypy.WIN_SYS_TRAY_ICON.change_tray_icons() startup_reg_path = "Software\\Microsoft\\Windows\\CurrentVersion\\Run" exe = sys.executable - run_args = [arg for arg in plexpy.ARGS if arg != '--nolaunch'] - if plexpy.FROZEN: + run_args = [arg for arg in jellypy.ARGS if arg != '--nolaunch'] + if jellypy.FROZEN: args = [exe] + run_args else: - args = [exe, plexpy.FULL_PATH] + run_args + args = [exe, jellypy.FULL_PATH] + run_args - registry_key_name = '{}_{}'.format(common.PRODUCT, plexpy.CONFIG.PMS_UUID) + registry_key_name = '{}_{}'.format(common.PRODUCT, jellypy.CONFIG.PMS_UUID) cmd = ' '.join(cmd_quote(arg) for arg in args).replace('python.exe', 'pythonw.exe').replace("'", '"') - if plexpy.CONFIG.LAUNCH_STARTUP: + if jellypy.CONFIG.LAUNCH_STARTUP: # Rename old Tautulli registry key try: registry_key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, startup_reg_path, 0, winreg.KEY_ALL_ACCESS) diff --git a/lib/IPy.py b/lib/IPy.py deleted file mode 100644 index d1ea8457..00000000 --- a/lib/IPy.py +++ /dev/null @@ -1,1652 +0,0 @@ -""" -IPy - class and tools for handling of IPv4 and IPv6 addresses and networks. -See README file for learn how to use IPy. - -Further Information might be available at: -https://github.com/haypo/python-ipy -""" - -__version__ = '0.83' - -import bisect -import collections -import sys -import types - -# Definition of the Ranges for IPv4 IPs -# this should include www.iana.org/assignments/ipv4-address-space -# and www.iana.org/assignments/multicast-addresses -IPv4ranges = { - '0': 'PUBLIC', # fall back - '00000000': 'PRIVATE', # 0/8 - '00001010': 'PRIVATE', # 10/8 - '0110010001': 'CARRIER_GRADE_NAT', #100.64/10 - '01111111': 'PRIVATE', # 127.0/8 - '1': 'PUBLIC', # fall back - '1010100111111110': 'PRIVATE', # 169.254/16 - '101011000001': 'PRIVATE', # 172.16/12 - '1100000010101000': 'PRIVATE', # 192.168/16 - '111': 'RESERVED', # 224/3 - } - -# Definition of the Ranges for IPv6 IPs -# http://www.iana.org/assignments/ipv6-address-space/ -# http://www.iana.org/assignments/ipv6-unicast-address-assignments/ -# http://www.iana.org/assignments/ipv6-multicast-addresses/ -IPv6ranges = { - '00000000' : 'RESERVED', # ::/8 - '0' * 96 : 'RESERVED', # ::/96 Formerly IPV4COMP [RFC4291] - '0' * 128 : 'UNSPECIFIED', # ::/128 - '0' * 127 + '1' : 'LOOPBACK', # ::1/128 - '0' * 80 + '1' * 16 : 'IPV4MAP', # ::ffff:0:0/96 - '00000000011001001111111110011011' + '0' * 64 : 'WKP46TRANS', # 0064:ff9b::/96 Well-Known-Prefix [RFC6052] - '00000001' : 'UNASSIGNED', # 0100::/8 - '0000001' : 'RESERVED', # 0200::/7 Formerly NSAP [RFC4048] - '0000010' : 'RESERVED', # 0400::/7 Formerly IPX [RFC3513] - '0000011' : 'RESERVED', # 0600::/7 - '00001' : 'RESERVED', # 0800::/5 - '0001' : 'RESERVED', # 1000::/4 - '001' : 'GLOBAL-UNICAST', # 2000::/3 [RFC4291] - '00100000000000010000000' : 'SPECIALPURPOSE', # 2001::/23 [RFC4773] - '00100000000000010000000000000000' : 'TEREDO', # 2001::/32 [RFC4380] - '00100000000000010000000000000010' + '0' * 16 : 'BMWG', # 2001:0002::/48 Benchmarking [RFC5180] - '0010000000000001000000000001' : 'ORCHID', # 2001:0010::/28 (Temp until 2014-03-21) [RFC4843] - '00100000000000010000001' : 'ALLOCATED APNIC', # 2001:0200::/23 - '00100000000000010000010' : 'ALLOCATED ARIN', # 2001:0400::/23 - '00100000000000010000011' : 'ALLOCATED RIPE NCC', # 2001:0600::/23 - '00100000000000010000100' : 'ALLOCATED RIPE NCC', # 2001:0800::/23 - '00100000000000010000101' : 'ALLOCATED RIPE NCC', # 2001:0a00::/23 - '00100000000000010000110' : 'ALLOCATED APNIC', # 2001:0c00::/23 - '00100000000000010000110110111000' : 'DOCUMENTATION', # 2001:0db8::/32 [RFC3849] - '00100000000000010000111' : 'ALLOCATED APNIC', # 2001:0e00::/23 - '00100000000000010001001' : 'ALLOCATED LACNIC', # 2001:1200::/23 - '00100000000000010001010' : 'ALLOCATED RIPE NCC', # 2001:1400::/23 - '00100000000000010001011' : 'ALLOCATED RIPE NCC', # 2001:1600::/23 - '00100000000000010001100' : 'ALLOCATED ARIN', # 2001:1800::/23 - '00100000000000010001101' : 'ALLOCATED RIPE NCC', # 2001:1a00::/23 - '0010000000000001000111' : 'ALLOCATED RIPE NCC', # 2001:1c00::/22 - '00100000000000010010' : 'ALLOCATED RIPE NCC', # 2001:2000::/20 - '001000000000000100110' : 'ALLOCATED RIPE NCC', # 2001:3000::/21 - '0010000000000001001110' : 'ALLOCATED RIPE NCC', # 2001:3800::/22 - '0010000000000001001111' : 'RESERVED', # 2001:3c00::/22 Possible future allocation to RIPE NCC - '00100000000000010100000' : 'ALLOCATED RIPE NCC', # 2001:4000::/23 - '00100000000000010100001' : 'ALLOCATED AFRINIC', # 2001:4200::/23 - '00100000000000010100010' : 'ALLOCATED APNIC', # 2001:4400::/23 - '00100000000000010100011' : 'ALLOCATED RIPE NCC', # 2001:4600::/23 - '00100000000000010100100' : 'ALLOCATED ARIN', # 2001:4800::/23 - '00100000000000010100101' : 'ALLOCATED RIPE NCC', # 2001:4a00::/23 - '00100000000000010100110' : 'ALLOCATED RIPE NCC', # 2001:4c00::/23 - '00100000000000010101' : 'ALLOCATED RIPE NCC', # 2001:5000::/20 - '0010000000000001100' : 'ALLOCATED APNIC', # 2001:8000::/19 - '00100000000000011010' : 'ALLOCATED APNIC', # 2001:a000::/20 - '00100000000000011011' : 'ALLOCATED APNIC', # 2001:b000::/20 - '0010000000000010' : '6TO4', # 2002::/16 "6to4" [RFC3056] - '001000000000001100' : 'ALLOCATED RIPE NCC', # 2003::/18 - '001001000000' : 'ALLOCATED APNIC', # 2400::/12 - '001001100000' : 'ALLOCATED ARIN', # 2600::/12 - '00100110000100000000000' : 'ALLOCATED ARIN', # 2610::/23 - '00100110001000000000000' : 'ALLOCATED ARIN', # 2620::/23 - '001010000000' : 'ALLOCATED LACNIC', # 2800::/12 - '001010100000' : 'ALLOCATED RIPE NCC', # 2a00::/12 - '001011000000' : 'ALLOCATED AFRINIC', # 2c00::/12 - '00101101' : 'RESERVED', # 2d00::/8 - '0010111' : 'RESERVED', # 2e00::/7 - '0011' : 'RESERVED', # 3000::/4 - '010' : 'RESERVED', # 4000::/3 - '011' : 'RESERVED', # 6000::/3 - '100' : 'RESERVED', # 8000::/3 - '101' : 'RESERVED', # a000::/3 - '110' : 'RESERVED', # c000::/3 - '1110' : 'RESERVED', # e000::/4 - '11110' : 'RESERVED', # f000::/5 - '111110' : 'RESERVED', # f800::/6 - '1111110' : 'ULA', # fc00::/7 [RFC4193] - '111111100' : 'RESERVED', # fe00::/9 - '1111111010' : 'LINKLOCAL', # fe80::/10 - '1111111011' : 'RESERVED', # fec0::/10 Formerly SITELOCAL [RFC4291] - '11111111' : 'MULTICAST', # ff00::/8 - '1111111100000001' : 'NODE-LOCAL MULTICAST', # ff01::/16 - '1111111100000010' : 'LINK-LOCAL MULTICAST', # ff02::/16 - '1111111100000100' : 'ADMIN-LOCAL MULTICAST', # ff04::/16 - '1111111100000101' : 'SITE-LOCAL MULTICAST', # ff05::/16 - '1111111100001000' : 'ORG-LOCAL MULTICAST', # ff08::/16 - '1111111100001110' : 'GLOBAL MULTICAST', # ff0e::/16 - '1111111100001111' : 'RESERVED MULTICAST', # ff0f::/16 - '111111110011' : 'PREFIX-BASED MULTICAST', # ff30::/12 [RFC3306] - '111111110111' : 'RP-EMBEDDED MULTICAST', # ff70::/12 [RFC3956] - } - -MAX_IPV4_ADDRESS = 0xffffffff -MAX_IPV6_ADDRESS = 0xffffffffffffffffffffffffffffffff -IPV6_TEST_MAP = 0xffffffffffffffffffffffff00000000 -IPV6_MAP_MASK = 0x00000000000000000000ffff00000000 - -if sys.version_info >= (3,): - INT_TYPES = (int,) - STR_TYPES = (str,) - xrange = range -else: - INT_TYPES = (int, long) - STR_TYPES = (str, unicode) - - -class IPint(object): - """Handling of IP addresses returning integers. - - Use class IP instead because some features are not implemented for - IPint.""" - - def __init__(self, data, ipversion=0, make_net=0): - """Create an instance of an IP object. - - Data can be a network specification or a single IP. IP - addresses can be specified in all forms understood by - parseAddress(). The size of a network can be specified as - - /prefixlen a.b.c.0/24 2001:658:22a:cafe::/64 - -lastIP a.b.c.0-a.b.c.255 2001:658:22a:cafe::-2001:658:22a:cafe:ffff:ffff:ffff:ffff - /decimal netmask a.b.c.d/255.255.255.0 not supported for IPv6 - - If no size specification is given a size of 1 address (/32 for - IPv4 and /128 for IPv6) is assumed. - - If make_net is True, an IP address will be transformed into the network - address by applying the specified netmask. - - >>> print(IP('127.0.0.0/8')) - 127.0.0.0/8 - >>> print(IP('127.0.0.0/255.0.0.0')) - 127.0.0.0/8 - >>> print(IP('127.0.0.0-127.255.255.255')) - 127.0.0.0/8 - >>> print(IP('127.0.0.1/255.0.0.0', make_net=True)) - 127.0.0.0/8 - - See module documentation for more examples. - """ - - # Print no Prefixlen for /32 and /128 - self.NoPrefixForSingleIp = 1 - - # Do we want prefix printed by default? see _printPrefix() - self.WantPrefixLen = None - - netbits = 0 - prefixlen = -1 - - # handling of non string values in constructor - if isinstance(data, INT_TYPES): - self.ip = int(data) - if ipversion == 0: - if self.ip <= MAX_IPV4_ADDRESS: - ipversion = 4 - else: - ipversion = 6 - if ipversion == 4: - if self.ip > MAX_IPV4_ADDRESS: - raise ValueError("IPv4 Address can't be larger than %x: %x" % (MAX_IPV4_ADDRESS, self.ip)) - prefixlen = 32 - elif ipversion == 6: - if self.ip > MAX_IPV6_ADDRESS: - raise ValueError("IPv6 Address can't be larger than %x: %x" % (MAX_IPV6_ADDRESS, self.ip)) - prefixlen = 128 - else: - raise ValueError("only IPv4 and IPv6 supported") - self._ipversion = ipversion - self._prefixlen = prefixlen - # handle IP instance as an parameter - elif isinstance(data, IPint): - self._ipversion = data._ipversion - self._prefixlen = data._prefixlen - self.ip = data.ip - elif isinstance(data, STR_TYPES): - # TODO: refactor me! - # splitting of a string into IP and prefixlen et. al. - x = data.split('-') - if len(x) == 2: - # a.b.c.0-a.b.c.255 specification ? - (ip, last) = x - (self.ip, parsedVersion) = parseAddress(ip) - if parsedVersion != 4: - raise ValueError("first-last notation only allowed for IPv4") - (last, lastversion) = parseAddress(last) - if lastversion != 4: - raise ValueError("last address should be IPv4, too") - if last < self.ip: - raise ValueError("last address should be larger than first") - size = last - self.ip - netbits = _count1Bits(size) - # make sure the broadcast is the same as the last ip - # otherwise it will return /16 for something like: - # 192.168.0.0-192.168.191.255 - if IP('%s/%s' % (ip, 32-netbits)).broadcast().int() != last: - raise ValueError("the range %s is not on a network boundary." % data) - elif len(x) == 1: - x = data.split('/') - # if no prefix is given use defaults - if len(x) == 1: - ip = x[0] - prefixlen = -1 - elif len(x) > 2: - raise ValueError("only one '/' allowed in IP Address") - else: - (ip, prefixlen) = x - if prefixlen.find('.') != -1: - # check if the user might have used a netmask like - # a.b.c.d/255.255.255.0 - (netmask, vers) = parseAddress(prefixlen) - if vers != 4: - raise ValueError("netmask must be IPv4") - prefixlen = _netmaskToPrefixlen(netmask) - elif len(x) > 2: - raise ValueError("only one '-' allowed in IP Address") - else: - raise ValueError("can't parse") - - (self.ip, parsedVersion) = parseAddress(ip) - if ipversion == 0: - ipversion = parsedVersion - if prefixlen == -1: - bits = _ipVersionToLen(ipversion) - prefixlen = bits - netbits - self._ipversion = ipversion - self._prefixlen = int(prefixlen) - - if make_net: - self.ip = self.ip & _prefixlenToNetmask(self._prefixlen, self._ipversion) - - if not _checkNetaddrWorksWithPrefixlen(self.ip, - self._prefixlen, self._ipversion): - raise ValueError("%s has invalid prefix length (%s)" % (repr(self), self._prefixlen)) - else: - raise TypeError("Unsupported data type: %s" % type(data)) - - def int(self): - """Return the first / base / network addess as an (long) integer. - - The same as IP[0]. - - >>> "%X" % IP('10.0.0.0/8').int() - 'A000000' - """ - return self.ip - - def version(self): - """Return the IP version of this Object. - - >>> IP('10.0.0.0/8').version() - 4 - >>> IP('::1').version() - 6 - """ - return self._ipversion - - def prefixlen(self): - """Returns Network Prefixlen. - - >>> IP('10.0.0.0/8').prefixlen() - 8 - """ - return self._prefixlen - - def net(self): - """ - Return the base (first) address of a network as an (long) integer. - """ - return self.int() - - def broadcast(self): - """ - Return the broadcast (last) address of a network as an (long) integer. - - The same as IP[-1].""" - return self.int() + self.len() - 1 - - def _printPrefix(self, want): - """Prints Prefixlen/Netmask. - - Not really. In fact it is our universal Netmask/Prefixlen printer. - This is considered an internal function. - - want == 0 / None don't return anything 1.2.3.0 - want == 1 /prefix 1.2.3.0/24 - want == 2 /netmask 1.2.3.0/255.255.255.0 - want == 3 -lastip 1.2.3.0-1.2.3.255 - """ - - if (self._ipversion == 4 and self._prefixlen == 32) or \ - (self._ipversion == 6 and self._prefixlen == 128): - if self.NoPrefixForSingleIp: - want = 0 - if want == None: - want = self.WantPrefixLen - if want == None: - want = 1 - if want: - if want == 2: - # this should work with IP and IPint - netmask = self.netmask() - if not isinstance(netmask, INT_TYPES): - netmask = netmask.int() - return "/%s" % (intToIp(netmask, self._ipversion)) - elif want == 3: - return "-%s" % (intToIp(self.ip + self.len() - 1, self._ipversion)) - else: - # default - return "/%d" % (self._prefixlen) - else: - return '' - - # We have different flavours to convert to: - # strFullsize 127.0.0.1 2001:0658:022a:cafe:0200:c0ff:fe8d:08fa - # strNormal 127.0.0.1 2001:658:22a:cafe:200:c0ff:fe8d:08fa - # strCompressed 127.0.0.1 2001:658:22a:cafe::1 - # strHex 0x7F000001 0x20010658022ACAFE0200C0FFFE8D08FA - # strDec 2130706433 42540616829182469433547974687817795834 - - def strBin(self, wantprefixlen = None): - """Return a string representation as a binary value. - - >>> print(IP('127.0.0.1').strBin()) - 01111111000000000000000000000001 - >>> print(IP('2001:0658:022a:cafe:0200::1').strBin()) - 00100000000000010000011001011000000000100010101011001010111111100000001000000000000000000000000000000000000000000000000000000001 - """ - - bits = _ipVersionToLen(self._ipversion) - if self.WantPrefixLen == None and wantprefixlen == None: - wantprefixlen = 0 - ret = _intToBin(self.ip) - return '0' * (bits - len(ret)) + ret + self._printPrefix(wantprefixlen) - - def strCompressed(self, wantprefixlen = None): - """Return a string representation in compressed format using '::' Notation. - - >>> IP('127.0.0.1').strCompressed() - '127.0.0.1' - >>> IP('2001:0658:022a:cafe:0200::1').strCompressed() - '2001:658:22a:cafe:200::1' - >>> IP('ffff:ffff:ffff:ffff:ffff:f:f:fffc/127').strCompressed() - 'ffff:ffff:ffff:ffff:ffff:f:f:fffc/127' - """ - - if self.WantPrefixLen == None and wantprefixlen == None: - wantprefixlen = 1 - - if self._ipversion == 4: - return self.strFullsize(wantprefixlen) - else: - if self.ip >> 32 == 0xffff: - ipv4 = intToIp(self.ip & MAX_IPV4_ADDRESS, 4) - text = "::ffff:" + ipv4 + self._printPrefix(wantprefixlen) - return text - # find the longest sequence of '0' - hextets = [int(x, 16) for x in self.strFullsize(0).split(':')] - # every element of followingzeros will contain the number of zeros - # following the corresponding element of hextets - followingzeros = [0] * 8 - for i in xrange(len(hextets)): - followingzeros[i] = _countFollowingZeros(hextets[i:]) - # compressionpos is the position where we can start removing zeros - compressionpos = followingzeros.index(max(followingzeros)) - if max(followingzeros) > 1: - # genererate string with the longest number of zeros cut out - # now we need hextets as strings - hextets = [x for x in self.strNormal(0).split(':')] - while compressionpos < len(hextets) and hextets[compressionpos] == '0': - del(hextets[compressionpos]) - hextets.insert(compressionpos, '') - if compressionpos + 1 >= len(hextets): - hextets.append('') - if compressionpos == 0: - hextets = [''] + hextets - return ':'.join(hextets) + self._printPrefix(wantprefixlen) - else: - return self.strNormal(0) + self._printPrefix(wantprefixlen) - - def strNormal(self, wantprefixlen = None): - """Return a string representation in the usual format. - - >>> print(IP('127.0.0.1').strNormal()) - 127.0.0.1 - >>> print(IP('2001:0658:022a:cafe:0200::1').strNormal()) - 2001:658:22a:cafe:200:0:0:1 - """ - - if self.WantPrefixLen == None and wantprefixlen == None: - wantprefixlen = 1 - - if self._ipversion == 4: - ret = self.strFullsize(0) - elif self._ipversion == 6: - ret = ':'.join(["%x" % x for x in [int(x, 16) for x in self.strFullsize(0).split(':')]]) - else: - raise ValueError("only IPv4 and IPv6 supported") - - - - return ret + self._printPrefix(wantprefixlen) - - def strFullsize(self, wantprefixlen = None): - """Return a string representation in the non-mangled format. - - >>> print(IP('127.0.0.1').strFullsize()) - 127.0.0.1 - >>> print(IP('2001:0658:022a:cafe:0200::1').strFullsize()) - 2001:0658:022a:cafe:0200:0000:0000:0001 - """ - - if self.WantPrefixLen == None and wantprefixlen == None: - wantprefixlen = 1 - - return intToIp(self.ip, self._ipversion) + self._printPrefix(wantprefixlen) - - def strHex(self, wantprefixlen = None): - """Return a string representation in hex format in lower case. - - >>> print(IP('127.0.0.1').strHex()) - 0x7f000001 - >>> print(IP('2001:0658:022a:cafe:0200::1').strHex()) - 0x20010658022acafe0200000000000001 - """ - - if self.WantPrefixLen == None and wantprefixlen == None: - wantprefixlen = 0 - - x = '0x%x' % self.ip - return x + self._printPrefix(wantprefixlen) - - def strDec(self, wantprefixlen = None): - """Return a string representation in decimal format. - - >>> print(IP('127.0.0.1').strDec()) - 2130706433 - >>> print(IP('2001:0658:022a:cafe:0200::1').strDec()) - 42540616829182469433547762482097946625 - """ - - if self.WantPrefixLen == None and wantprefixlen == None: - wantprefixlen = 0 - - x = '%d' % self.ip - return x + self._printPrefix(wantprefixlen) - - def iptype(self): - """Return a description of the IP type ('PRIVATE', 'RESERVED', etc). - - >>> print(IP('127.0.0.1').iptype()) - PRIVATE - >>> print(IP('192.168.1.1').iptype()) - PRIVATE - >>> print(IP('195.185.1.2').iptype()) - PUBLIC - >>> print(IP('::1').iptype()) - LOOPBACK - >>> print(IP('2001:0658:022a:cafe:0200::1').iptype()) - ALLOCATED RIPE NCC - - The type information for IPv6 is out of sync with reality. - """ - - # this could be greatly improved - - if self._ipversion == 4: - iprange = IPv4ranges - elif self._ipversion == 6: - iprange = IPv6ranges - else: - raise ValueError("only IPv4 and IPv6 supported") - - bits = self.strBin() - for i in xrange(len(bits), 0, -1): - if bits[:i] in iprange: - return iprange[bits[:i]] - return "unknown" - - - def netmask(self): - """Return netmask as an integer. - - >>> "%X" % IP('195.185.0.0/16').netmask().int() - 'FFFF0000' - """ - - # TODO: unify with prefixlenToNetmask? - bits = _ipVersionToLen(self._ipversion) - locallen = bits - self._prefixlen - - return ((2 ** self._prefixlen) - 1) << locallen - - - def strNetmask(self): - """Return netmask as an string. Mostly useful for IPv6. - - >>> print(IP('195.185.0.0/16').strNetmask()) - 255.255.0.0 - >>> print(IP('2001:0658:022a:cafe::0/64').strNetmask()) - /64 - """ - - # TODO: unify with prefixlenToNetmask? - # Note: call to _ipVersionToLen() also validates version is 4 or 6 - bits = _ipVersionToLen(self._ipversion) - if self._ipversion == 4: - locallen = bits - self._prefixlen - return intToIp(((2 ** self._prefixlen) - 1) << locallen, 4) - elif self._ipversion == 6: - return "/%d" % self._prefixlen - - def len(self): - """Return the length of a subnet. - - >>> print(IP('195.185.1.0/28').len()) - 16 - >>> print(IP('195.185.1.0/24').len()) - 256 - """ - - bits = _ipVersionToLen(self._ipversion) - locallen = bits - self._prefixlen - return 2 ** locallen - - - def __nonzero__(self): - """All IPy objects should evaluate to true in boolean context. - Ordinarily they do, but if handling a default route expressed as - 0.0.0.0/0, the __len__() of the object becomes 0, which is used - as the boolean value of the object. - """ - return True - - - def __len__(self): - """ - Return the length of a subnet. - - Called to implement the built-in function len(). - It will break with large IPv6 Networks. - Use the object's len() instead. - """ - return self.len() - - def __add__(self, other): - """Emulate numeric objects through network aggregation""" - if self._ipversion != other._ipversion: - raise ValueError("Only networks with the same IP version can be added.") - if self._prefixlen != other._prefixlen: - raise ValueError("Only networks with the same prefixlen can be added.") - if self._prefixlen < 1: - raise ValueError("Networks with a prefixlen longer than /1 can't be added.") - if self > other: - # fixed by Skinny Puppy - return other.__add__(self) - if other.int() - self[-1].int() != 1: - raise ValueError("Only adjacent networks can be added together.") - ret = IP(self.int(), ipversion=self._ipversion) - ret._prefixlen = self.prefixlen() - 1 - if not _checkNetaddrWorksWithPrefixlen(ret.ip, ret._prefixlen, - ret._ipversion): - raise ValueError("The resulting %s has invalid prefix length (%s)" - % (repr(ret), ret._prefixlen)) - return ret - - def __sub__(self, other): - """Return the prefixes that are in this IP but not in the other""" - return _remove_subprefix(self, other) - - def __getitem__(self, key): - """Called to implement evaluation of self[key]. - - >>> ip=IP('127.0.0.0/30') - >>> for x in ip: - ... print(repr(x)) - ... - IP('127.0.0.0') - IP('127.0.0.1') - IP('127.0.0.2') - IP('127.0.0.3') - >>> ip[2] - IP('127.0.0.2') - >>> ip[-1] - IP('127.0.0.3') - """ - - if isinstance(key, slice): - return [self.ip + int(x) for x in xrange(*key.indices(len(self)))] - if not isinstance(key, INT_TYPES): - raise TypeError - if key < 0: - if abs(key) <= self.len(): - key = self.len() - abs(key) - else: - raise IndexError - else: - if key >= self.len(): - raise IndexError - - return self.ip + int(key) - - - - def __contains__(self, item): - """Called to implement membership test operators. - - Should return true if item is in self, false otherwise. Item - can be other IP-objects, strings or ints. - - >>> IP('195.185.1.1').strHex() - '0xc3b90101' - >>> 0xC3B90101 in IP('195.185.1.0/24') - True - >>> '127.0.0.1' in IP('127.0.0.0/24') - True - >>> IP('127.0.0.0/24') in IP('127.0.0.0/25') - False - """ - - if isinstance(item, IP): - if item._ipversion != self._ipversion: - return False - else: - item = IP(item) - if item.ip >= self.ip and item.ip < self.ip + self.len() - item.len() + 1: - return True - else: - return False - - - def overlaps(self, item): - """Check if two IP address ranges overlap. - - Returns 0 if the two ranges don't overlap, 1 if the given - range overlaps at the end and -1 if it does at the beginning. - - >>> IP('192.168.0.0/23').overlaps('192.168.1.0/24') - 1 - >>> IP('192.168.0.0/23').overlaps('192.168.1.255') - 1 - >>> IP('192.168.0.0/23').overlaps('192.168.2.0') - 0 - >>> IP('192.168.1.0/24').overlaps('192.168.0.0/23') - -1 - """ - - if not isinstance(item, IP): - item = IP(item) - if item.ip >= self.ip and item.ip < self.ip + self.len(): - return 1 - elif self.ip >= item.ip and self.ip < item.ip + item.len(): - return -1 - else: - return 0 - - - def __str__(self): - """Dispatch to the prefered String Representation. - - Used to implement str(IP).""" - - return self.strCompressed() - - - def __repr__(self): - """Print a representation of the Object. - - Used to implement repr(IP). Returns a string which evaluates - to an identical Object (without the wantprefixlen stuff - see - module docstring. - - >>> print(repr(IP('10.0.0.0/24'))) - IP('10.0.0.0/24') - """ - - return("IPint('%s')" % (self.strCompressed(1))) - - - def __cmp__(self, other): - """Called by comparison operations. - - Should return a negative integer if self < other, zero if self - == other, a positive integer if self > other. - - Order is first determined by the address family. IPv4 addresses - are always smaller than IPv6 addresses: - - >>> IP('10.0.0.0') < IP('2001:db8::') - 1 - - Then the first address is compared. Lower addresses are - always smaller: - - >>> IP('10.0.0.0') > IP('10.0.0.1') - 0 - >>> IP('10.0.0.0/24') > IP('10.0.0.1') - 0 - >>> IP('10.0.1.0') > IP('10.0.0.0/24') - 1 - >>> IP('10.0.1.0/24') > IP('10.0.0.0/24') - 1 - >>> IP('10.0.1.0/24') > IP('10.0.0.0') - 1 - - Then the prefix length is compared. Shorter prefixes are - considered smaller than longer prefixes: - - >>> IP('10.0.0.0/24') > IP('10.0.0.0') - 0 - >>> IP('10.0.0.0/24') > IP('10.0.0.0/25') - 0 - >>> IP('10.0.0.0/24') > IP('10.0.0.0/23') - 1 - - """ - if not isinstance(other, IPint): - raise TypeError - - # Lower version -> lower result - if self._ipversion != other._ipversion: - return self._ipversion < other._ipversion and -1 or 1 - - # Lower start address -> lower result - if self.ip != other.ip: - return self.ip < other.ip and -1 or 1 - - # Shorter prefix length -> lower result - if self._prefixlen != other._prefixlen: - return self._prefixlen < other._prefixlen and -1 or 1 - - # No differences found - return 0 - - def __eq__(self, other): - if not isinstance(other, IPint): - return False - return self.__cmp__(other) == 0 - - def __ne__(self, other): - return not self.__eq__(other) - - def __lt__(self, other): - return self.__cmp__(other) < 0 - - def __hash__(self): - """Called for the key object for dictionary operations, and by - the built-in function hash(). Should return a 32-bit integer - usable as a hash value for dictionary operations. The only - required property is that objects which compare equal have the - same hash value - - >>> IP('10.0.0.0/24').__hash__() - -167772185 - """ - - thehash = int(-1) - ip = self.ip - while ip > 0: - thehash = thehash ^ (ip & 0x7fffffff) - ip = ip >> 32 - thehash = thehash ^ self._prefixlen - return int(thehash) - - -class IP(IPint): - """Class for handling IP addresses and networks.""" - - def net(self): - """Return the base (first) address of a network as an IP object. - - The same as IP[0]. - - >>> IP('10.0.0.0/8').net() - IP('10.0.0.0') - """ - return IP(IPint.net(self), ipversion=self._ipversion) - - def broadcast(self): - """Return the broadcast (last) address of a network as an IP object. - - The same as IP[-1]. - - >>> IP('10.0.0.0/8').broadcast() - IP('10.255.255.255') - """ - return IP(IPint.broadcast(self)) - - def netmask(self): - """Return netmask as an IP object. - - >>> IP('10.0.0.0/8').netmask() - IP('255.0.0.0') - """ - return IP(IPint.netmask(self), ipversion=self._ipversion) - - def _getIPv4Map(self): - if self._ipversion != 6: - return None - if (self.ip >> 32) != 0xffff: - return None - ipv4 = self.ip & MAX_IPV4_ADDRESS - if self._prefixlen != 128: - ipv4 = '%s/%s' % (ipv4, 32-(128-self._prefixlen)) - return IP(ipv4, ipversion=4) - - def reverseNames(self): - """Return a list with values forming the reverse lookup. - - >>> IP('213.221.113.87/32').reverseNames() - ['87.113.221.213.in-addr.arpa.'] - >>> IP('213.221.112.224/30').reverseNames() - ['224.112.221.213.in-addr.arpa.', '225.112.221.213.in-addr.arpa.', '226.112.221.213.in-addr.arpa.', '227.112.221.213.in-addr.arpa.'] - >>> IP('127.0.0.0/24').reverseNames() - ['0.0.127.in-addr.arpa.'] - >>> IP('127.0.0.0/23').reverseNames() - ['0.0.127.in-addr.arpa.', '1.0.127.in-addr.arpa.'] - >>> IP('127.0.0.0/16').reverseNames() - ['0.127.in-addr.arpa.'] - >>> IP('127.0.0.0/15').reverseNames() - ['0.127.in-addr.arpa.', '1.127.in-addr.arpa.'] - >>> IP('128.0.0.0/8').reverseNames() - ['128.in-addr.arpa.'] - >>> IP('128.0.0.0/7').reverseNames() - ['128.in-addr.arpa.', '129.in-addr.arpa.'] - >>> IP('::1:2').reverseNames() - ['2.0.0.0.1.ip6.arpa.'] - """ - - if self._ipversion == 4: - ret = [] - # TODO: Refactor. Add support for IPint objects - if self.len() < 2**8: - for x in self: - ret.append(x.reverseName()) - elif self.len() < 2**16: - for i in xrange(0, self.len(), 2**8): - ret.append(self[i].reverseName()[2:]) - elif self.len() < 2**24: - for i in xrange(0, self.len(), 2**16): - ret.append(self[i].reverseName()[4:]) - else: - for i in xrange(0, self.len(), 2**24): - ret.append(self[i].reverseName()[6:]) - return ret - elif self._ipversion == 6: - ipv4 = self._getIPv4Map() - if ipv4 is not None: - return ipv4.reverseNames() - s = "%x" % self.ip - if self._prefixlen % 4 != 0: - raise NotImplementedError("can't create IPv6 reverse names at sub nibble level") - s = list(s) - s.reverse() - s = '.'.join(s) - first_nibble_index = int(32 - (self._prefixlen // 4)) * 2 - return ["%s.ip6.arpa." % s[first_nibble_index:]] - else: - raise ValueError("only IPv4 and IPv6 supported") - - def reverseName(self): - """Return the value for reverse lookup/PTR records as RFC 2317 look alike. - - RFC 2317 is an ugly hack which only works for sub-/24 e.g. not - for /23. Do not use it. Better set up a zone for every - address. See reverseName for a way to achieve that. - - >>> print(IP('195.185.1.1').reverseName()) - 1.1.185.195.in-addr.arpa. - >>> print(IP('195.185.1.0/28').reverseName()) - 0-15.1.185.195.in-addr.arpa. - >>> IP('::1:2').reverseName() - '2.0.0.0.1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa.' - >>> IP('ff02::/64').reverseName() - '0.0.0.0.0.0.0.0.0.0.0.0.2.0.f.f.ip6.arpa.' - """ - - if self._ipversion == 4: - s = self.strFullsize(0) - s = s.split('.') - s.reverse() - first_byte_index = int(4 - (self._prefixlen // 8)) - if self._prefixlen % 8 != 0: - nibblepart = "%s-%s" % (s[3-(self._prefixlen // 8)], intToIp(self.ip + self.len() - 1, 4).split('.')[-1]) - nibblepart += '.' - else: - nibblepart = "" - - s = '.'.join(s[first_byte_index:]) - return "%s%s.in-addr.arpa." % (nibblepart, s) - - elif self._ipversion == 6: - ipv4 = self._getIPv4Map() - if ipv4 is not None: - return ipv4.reverseName() - s = '%032x' % self.ip - if self._prefixlen % 4 != 0: - nibblepart = "%s-%x" % (s[self._prefixlen:], self.ip + self.len() - 1) - nibblepart += '.' - else: - nibblepart = "" - s = list(s) - s.reverse() - s = '.'.join(s) - first_nibble_index = int(32 - (self._prefixlen // 4)) * 2 - return "%s%s.ip6.arpa." % (nibblepart, s[first_nibble_index:]) - else: - raise ValueError("only IPv4 and IPv6 supported") - - def make_net(self, netmask): - """Transform a single IP address into a network specification by - applying the given netmask. - - Returns a new IP instance. - - >>> print(IP('127.0.0.1').make_net('255.0.0.0')) - 127.0.0.0/8 - """ - if '/' in str(netmask): - raise ValueError("invalid netmask (%s)" % netmask) - return IP('%s/%s' % (self, netmask), make_net=True) - - def __getitem__(self, key): - """Called to implement evaluation of self[key]. - - >>> ip=IP('127.0.0.0/30') - >>> for x in ip: - ... print(str(x)) - ... - 127.0.0.0 - 127.0.0.1 - 127.0.0.2 - 127.0.0.3 - >>> print(str(ip[2])) - 127.0.0.2 - >>> print(str(ip[-1])) - 127.0.0.3 - """ - if isinstance(key, slice): - return [IP(IPint.__getitem__(self, x), ipversion=self._ipversion) for x in xrange(*key.indices(len(self)))] - return IP(IPint.__getitem__(self, key), ipversion=self._ipversion) - - def __repr__(self): - """Print a representation of the Object. - - >>> IP('10.0.0.0/8') - IP('10.0.0.0/8') - """ - - return("IP('%s')" % (self.strCompressed(1))) - - def get_mac(self): - """ - Get the 802.3 MAC address from IPv6 RFC 2464 address, in lower case. - Return None if the address is an IPv4 or not a IPv6 RFC 2464 address. - - >>> IP('fe80::f66d:04ff:fe47:2fae').get_mac() - 'f4:6d:04:47:2f:ae' - """ - if self._ipversion != 6: - return None - if (self.ip & 0x20000ffff000000) != 0x20000fffe000000: - return None - return '%02x:%02x:%02x:%02x:%02x:%02x' % ( - (((self.ip >> 56) & 0xff) & 0xfd), - (self.ip >> 48) & 0xff, - (self.ip >> 40) & 0xff, - (self.ip >> 16) & 0xff, - (self.ip >> 8) & 0xff, - self.ip & 0xff, - ) - - def v46map(self): - """ - Returns the IPv6 mapped address of an IPv4 address, or the corresponding - IPv4 address if the IPv6 address is in the appropriate range. - Raises a ValueError if the IPv6 address is not translatable. See RFC 4291. - - >>> IP('192.168.1.1').v46map() - IP('::ffff:192.168.1.1') - >>> IP('::ffff:192.168.1.1').v46map() - IP('192.168.1.1') - """ - if self._ipversion == 4: - return IP(str(IPV6_MAP_MASK + self.ip) + - "/%s" % (self._prefixlen + 96)) - else: - if self.ip & IPV6_TEST_MAP == IPV6_MAP_MASK: - return IP(str(self.ip - IPV6_MAP_MASK) + - "/%s" % (self._prefixlen - 96)) - raise ValueError("%s cannot be converted to an IPv4 address." - % repr(self)) - -class IPSet(collections.MutableSet): - def __init__(self, iterable=[]): - # Make sure it's iterable, otherwise wrap - if not isinstance(iterable, collections.Iterable): - raise TypeError("'%s' object is not iterable" % type(iterable).__name__) - - # Make sure we only accept IP objects - for prefix in iterable: - if not isinstance(prefix, IP): - raise ValueError('Only IP objects can be added to an IPSet') - - # Store and optimize - self.prefixes = iterable[:] - self.optimize() - - def __contains__(self, ip): - valid_masks = self.prefixtable.keys() - if isinstance(ip, IP): - #Don't dig through more-specific ranges - ip_mask = ip._prefixlen - valid_masks = [x for x in valid_masks if x <= ip_mask] - for mask in sorted(valid_masks): - i = bisect.bisect(self.prefixtable[mask], ip) - # Because of sorting order, a match can only occur in the prefix - # that comes before the result of the search. - if i and ip in self.prefixtable[mask][i - 1]: - return True - - def __iter__(self): - for prefix in self.prefixes: - yield prefix - - def __len__(self): - return self.len() - - def __add__(self, other): - return IPSet(self.prefixes + other.prefixes) - - def __sub__(self, other): - new = IPSet(self.prefixes) - for prefix in other: - new.discard(prefix) - return new - - def __and__(self, other): - left = iter(self.prefixes) - right = iter(other.prefixes) - result = [] - try: - l = next(left) - r = next(right) - while True: - # iterate over prefixes in order, keeping the smaller of the - # two if they overlap - if l in r: - result.append(l) - l = next(left) - continue - elif r in l: - result.append(r) - r = next(right) - continue - if l < r: - l = next(left) - else: - r = next(right) - except StopIteration: - return IPSet(result) - - def __repr__(self): - return '%s([' % self.__class__.__name__ + ', '.join(map(repr, self.prefixes)) + '])' - - def len(self): - return sum(prefix.len() for prefix in self.prefixes) - - def add(self, value): - # Make sure it's iterable, otherwise wrap - if not isinstance(value, collections.Iterable): - value = [value] - - # Check type - for prefix in value: - if not isinstance(prefix, IP): - raise ValueError('Only IP objects can be added to an IPSet') - - # Append and optimize - self.prefixes.extend(value) - self.optimize() - - def discard(self, value): - # Make sure it's iterable, otherwise wrap - if not isinstance(value, collections.Iterable): - value = [value] - - # This is much faster than iterating over the addresses - if isinstance(value, IPSet): - value = value.prefixes - - # Remove - for del_prefix in value: - if not isinstance(del_prefix, IP): - raise ValueError('Only IP objects can be removed from an IPSet') - - # First check if this prefix contains anything in our list - found = False - d = 0 - for i in range(len(self.prefixes)): - if self.prefixes[i - d] in del_prefix: - self.prefixes.pop(i - d) - d = d + 1 - found = True - - if found: - # If the prefix was bigger than an existing prefix, then it's - # certainly not a subset of one, so skip the rest - continue - - # Maybe one of our prefixes contains this prefix - found = False - for i in range(len(self.prefixes)): - if del_prefix in self.prefixes[i]: - self.prefixes[i:i+1] = self.prefixes[i] - del_prefix - break - - self.optimize() - - def isdisjoint(self, other): - left = iter(self.prefixes) - right = iter(other.prefixes) - try: - l = next(left) - r = next(right) - while True: - if l in r or r in l: - return False - if l < r: - l = next(left) - else: - r = next(right) - except StopIteration: - return True - - def optimize(self): - # The algorithm below *depends* on the sort order - self.prefixes.sort() - - # First eliminate all values that are a subset of other values - addrlen = len(self.prefixes) - i = 0 - while i < addrlen: - # Everything that might be inside this prefix follows - # directly behind it - j = i+1 - while j < addrlen and self.prefixes[j] in self.prefixes[i]: - # Mark for deletion by overwriting with None - self.prefixes[j] = None - j += 1 - - # Continue where we left off - i = j - - # Try to merge as many prefixes as possible - run_again = True - while run_again: - # Filter None values. This happens when a subset is eliminated - # above, or when two prefixes are merged below - self.prefixes = [a for a in self.prefixes if a is not None] - - # We'll set run_again to True when we make changes that require - # re-evaluation of the whole list - run_again = False - - # We can merge two prefixes that have the same version, same - # prefix length and differ only on the last bit of the prefix - addrlen = len(self.prefixes) - i = 0 - while i < addrlen-1: - j = i + 1 - - try: - # The next line will throw an exception when merging - # is not possible - self.prefixes[i] += self.prefixes[j] - self.prefixes[j] = None - i = j + 1 - run_again = True - except ValueError: - # Can't be merged, see if position j can be merged - i = j - - # O(n) insertion now by prefix means faster searching on __contains__ - # when lots of ranges with the same length exist - self.prefixtable = {} - for address in self.prefixes: - try: - self.prefixtable[address._prefixlen].append(address) - except KeyError: - self.prefixtable[address._prefixlen] = [address] - -def _parseAddressIPv6(ipstr): - """ - Internal function used by parseAddress() to parse IPv6 address with ':'. - - >>> print(_parseAddressIPv6('::')) - 0 - >>> print(_parseAddressIPv6('::1')) - 1 - >>> print(_parseAddressIPv6('0:0:0:0:0:0:0:1')) - 1 - >>> print(_parseAddressIPv6('0:0:0::0:0:1')) - 1 - >>> print(_parseAddressIPv6('0:0:0:0:0:0:0:0')) - 0 - >>> print(_parseAddressIPv6('0:0:0::0:0:0')) - 0 - - >>> print(_parseAddressIPv6('FEDC:BA98:7654:3210:FEDC:BA98:7654:3210')) - 338770000845734292534325025077361652240 - >>> print(_parseAddressIPv6('1080:0000:0000:0000:0008:0800:200C:417A')) - 21932261930451111902915077091070067066 - >>> print(_parseAddressIPv6('1080:0:0:0:8:800:200C:417A')) - 21932261930451111902915077091070067066 - >>> print(_parseAddressIPv6('1080:0::8:800:200C:417A')) - 21932261930451111902915077091070067066 - >>> print(_parseAddressIPv6('1080::8:800:200C:417A')) - 21932261930451111902915077091070067066 - >>> print(_parseAddressIPv6('FF01:0:0:0:0:0:0:43')) - 338958331222012082418099330867817087043 - >>> print(_parseAddressIPv6('FF01:0:0::0:0:43')) - 338958331222012082418099330867817087043 - >>> print(_parseAddressIPv6('FF01::43')) - 338958331222012082418099330867817087043 - >>> print(_parseAddressIPv6('0:0:0:0:0:0:13.1.68.3')) - 218186755 - >>> print(_parseAddressIPv6('::13.1.68.3')) - 218186755 - >>> print(_parseAddressIPv6('0:0:0:0:0:FFFF:129.144.52.38')) - 281472855454758 - >>> print(_parseAddressIPv6('::FFFF:129.144.52.38')) - 281472855454758 - >>> print(_parseAddressIPv6('1080:0:0:0:8:800:200C:417A')) - 21932261930451111902915077091070067066 - >>> print(_parseAddressIPv6('1080::8:800:200C:417A')) - 21932261930451111902915077091070067066 - >>> print(_parseAddressIPv6('::1:2:3:4:5:6')) - 1208962713947218704138246 - >>> print(_parseAddressIPv6('1:2:3:4:5:6::')) - 5192455318486707404433266432802816 - """ - - # Split string into a list, example: - # '1080:200C::417A' => ['1080', '200C', '417A'] and fill_pos=2 - # and fill_pos is the position of '::' in the list - items = [] - index = 0 - fill_pos = None - while index < len(ipstr): - text = ipstr[index:] - if text.startswith("::"): - if fill_pos is not None: - # Invalid IPv6, eg. '1::2::' - raise ValueError("%r: Invalid IPv6 address: more than one '::'" % ipstr) - fill_pos = len(items) - index += 2 - continue - pos = text.find(':') - if pos == 0: - # Invalid IPv6, eg. '1::2:' - raise ValueError("%r: Invalid IPv6 address" % ipstr) - if pos != -1: - items.append(text[:pos]) - if text[pos:pos+2] == "::": - index += pos - else: - index += pos+1 - - if index == len(ipstr): - # Invalid IPv6, eg. '1::2:' - raise ValueError("%r: Invalid IPv6 address" % ipstr) - else: - items.append(text) - break - - if items and '.' in items[-1]: - # IPv6 ending with IPv4 like '::ffff:192.168.0.1' - if (fill_pos is not None) and not (fill_pos <= len(items)-1): - # Invalid IPv6: 'ffff:192.168.0.1::' - raise ValueError("%r: Invalid IPv6 address: '::' after IPv4" % ipstr) - value = parseAddress(items[-1])[0] - items = items[:-1] + ["%04x" % (value >> 16), "%04x" % (value & 0xffff)] - - # Expand fill_pos to fill with '0' - # ['1','2'] with fill_pos=1 => ['1', '0', '0', '0', '0', '0', '0', '2'] - if fill_pos is not None: - diff = 8 - len(items) - if diff <= 0: - raise ValueError("%r: Invalid IPv6 address: '::' is not needed" % ipstr) - items = items[:fill_pos] + ['0']*diff + items[fill_pos:] - - # Here we have a list of 8 strings - if len(items) != 8: - # Invalid IPv6, eg. '1:2:3' - raise ValueError("%r: Invalid IPv6 address: should have 8 hextets" % ipstr) - - # Convert strings to long integer - value = 0 - index = 0 - for item in items: - try: - item = int(item, 16) - error = not(0 <= item <= 0xffff) - except ValueError: - error = True - if error: - raise ValueError("%r: Invalid IPv6 address: invalid hexlet %r" % (ipstr, item)) - value = (value << 16) + item - index += 1 - return value - -def parseAddress(ipstr): - """ - Parse a string and return the corresponding IP address (as integer) - and a guess of the IP version. - - Following address formats are recognized: - - >>> def testParseAddress(address): - ... ip, version = parseAddress(address) - ... print(("%s (IPv%s)" % (ip, version))) - ... - >>> testParseAddress('0x0123456789abcdef') # IPv4 if <= 0xffffffff else IPv6 - 81985529216486895 (IPv6) - >>> testParseAddress('123.123.123.123') # IPv4 - 2071690107 (IPv4) - >>> testParseAddress('123.123') # 0-padded IPv4 - 2071658496 (IPv4) - >>> testParseAddress('127') - 2130706432 (IPv4) - >>> testParseAddress('255') - 4278190080 (IPv4) - >>> testParseAddress('256') - 256 (IPv4) - >>> testParseAddress('108000000000000000080800200C417A') - 21932261930451111902915077091070067066 (IPv6) - >>> testParseAddress('0x108000000000000000080800200C417A') - 21932261930451111902915077091070067066 (IPv6) - >>> testParseAddress('1080:0000:0000:0000:0008:0800:200C:417A') - 21932261930451111902915077091070067066 (IPv6) - >>> testParseAddress('1080:0:0:0:8:800:200C:417A') - 21932261930451111902915077091070067066 (IPv6) - >>> testParseAddress('1080:0::8:800:200C:417A') - 21932261930451111902915077091070067066 (IPv6) - >>> testParseAddress('::1') - 1 (IPv6) - >>> testParseAddress('::') - 0 (IPv6) - >>> testParseAddress('0:0:0:0:0:FFFF:129.144.52.38') - 281472855454758 (IPv6) - >>> testParseAddress('::13.1.68.3') - 218186755 (IPv6) - >>> testParseAddress('::FFFF:129.144.52.38') - 281472855454758 (IPv6) - """ - - try: - hexval = int(ipstr, 16) - except ValueError: - hexval = None - try: - intval = int(ipstr, 10) - except ValueError: - intval = None - - if ipstr.startswith('0x') and hexval is not None: - if hexval > MAX_IPV6_ADDRESS: - raise ValueError("IP Address can't be larger than %x: %x" % (MAX_IPV6_ADDRESS, hexval)) - if hexval <= MAX_IPV4_ADDRESS: - return (hexval, 4) - else: - return (hexval, 6) - - if ipstr.find(':') != -1: - return (_parseAddressIPv6(ipstr), 6) - - elif len(ipstr) == 32 and hexval is not None: - # assume IPv6 in pure hexadecimal notation - return (hexval, 6) - - elif ipstr.find('.') != -1 or (intval is not None and intval < 256): - # assume IPv4 ('127' gets interpreted as '127.0.0.0') - bytes = ipstr.split('.') - if len(bytes) > 4: - raise ValueError("IPv4 Address with more than 4 bytes") - bytes += ['0'] * (4 - len(bytes)) - bytes = [int(x) for x in bytes] - for x in bytes: - if x > 255 or x < 0: - raise ValueError("%r: single byte must be 0 <= byte < 256" % (ipstr)) - return ((bytes[0] << 24) + (bytes[1] << 16) + (bytes[2] << 8) + bytes[3], 4) - - elif intval is not None: - # we try to interprete it as a decimal digit - - # this ony works for numbers > 255 ... others - # will be interpreted as IPv4 first byte - if intval > MAX_IPV6_ADDRESS: - raise ValueError("IP Address can't be larger than %x: %x" % (MAX_IPV6_ADDRESS, intval)) - if intval <= MAX_IPV4_ADDRESS: - return (intval, 4) - else: - return (intval, 6) - - raise ValueError("IP Address format was invalid: %s" % ipstr) - - -def intToIp(ip, version): - """Transform an integer string into an IP address.""" - - # just to be sure and hoping for Python 2.2 - ip = int(ip) - - if ip < 0: - raise ValueError("IPs can't be negative: %d" % (ip)) - - ret = '' - if version == 4: - if ip > MAX_IPV4_ADDRESS: - raise ValueError("IPv4 Address can't be larger than %x: %x" % (MAX_IPV4_ADDRESS, ip)) - for l in xrange(4): - ret = str(ip & 0xff) + '.' + ret - ip = ip >> 8 - ret = ret[:-1] - elif version == 6: - if ip > MAX_IPV6_ADDRESS: - raise ValueError("IPv6 Address can't be larger than %x: %x" % (MAX_IPV6_ADDRESS, ip)) - l = "%032x" % ip - for x in xrange(1, 33): - ret = l[-x] + ret - if x % 4 == 0: - ret = ':' + ret - ret = ret[1:] - else: - raise ValueError("only IPv4 and IPv6 supported") - - return ret - -def _ipVersionToLen(version): - """Return number of bits in address for a certain IP version. - - >>> _ipVersionToLen(4) - 32 - >>> _ipVersionToLen(6) - 128 - >>> _ipVersionToLen(5) - Traceback (most recent call last): - File "", line 1, in ? - File "IPy.py", line 1076, in _ipVersionToLen - raise ValueError("only IPv4 and IPv6 supported") - ValueError: only IPv4 and IPv6 supported - """ - - if version == 4: - return 32 - elif version == 6: - return 128 - else: - raise ValueError("only IPv4 and IPv6 supported") - - -def _countFollowingZeros(l): - """Return number of elements containing 0 at the beginning of the list.""" - if len(l) == 0: - return 0 - elif l[0] != 0: - return 0 - else: - return 1 + _countFollowingZeros(l[1:]) - - -_BitTable = {'0': '0000', '1': '0001', '2': '0010', '3': '0011', - '4': '0100', '5': '0101', '6': '0110', '7': '0111', - '8': '1000', '9': '1001', 'a': '1010', 'b': '1011', - 'c': '1100', 'd': '1101', 'e': '1110', 'f': '1111'} - -def _intToBin(val): - """Return the binary representation of an integer as string.""" - - if val < 0: - raise ValueError("Only positive values allowed") - s = "%x" % val - ret = '' - for x in s: - ret += _BitTable[x] - # remove leading zeros - while ret[0] == '0' and len(ret) > 1: - ret = ret[1:] - return ret - -def _count1Bits(num): - """Find the highest bit set to 1 in an integer.""" - ret = 0 - while num > 0: - num = num >> 1 - ret += 1 - return ret - -def _count0Bits(num): - """Find the highest bit set to 0 in an integer.""" - - # this could be so easy if _count1Bits(~int(num)) would work as excepted - num = int(num) - if num < 0: - raise ValueError("Only positive Numbers please: %s" % (num)) - ret = 0 - while num > 0: - if num & 1 == 1: - break - num = num >> 1 - ret += 1 - return ret - - -def _checkPrefix(ip, prefixlen, version): - """Check the validity of a prefix - - Checks if the variant part of a prefix only has 0s, and the length is - correct. - - >>> _checkPrefix(0x7f000000, 24, 4) - 1 - >>> _checkPrefix(0x7f000001, 24, 4) - 0 - >>> repr(_checkPrefix(0x7f000001, -1, 4)) - 'None' - >>> repr(_checkPrefix(0x7f000001, 33, 4)) - 'None' - """ - - # TODO: unify this v4/v6/invalid code in a function - bits = _ipVersionToLen(version) - - if prefixlen < 0 or prefixlen > bits: - return None - - if ip == 0: - zbits = bits + 1 - else: - zbits = _count0Bits(ip) - if zbits < bits - prefixlen: - return 0 - else: - return 1 - - -def _checkNetmask(netmask, masklen): - """Checks if a netmask is expressable as a prefixlen.""" - - num = int(netmask) - bits = masklen - - # remove zero bits at the end - while (num & 1) == 0 and bits != 0: - num = num >> 1 - bits -= 1 - if bits == 0: - break - # now check if the rest consists only of ones - while bits > 0: - if (num & 1) == 0: - raise ValueError("Netmask 0x%x can't be expressed as an prefix." % netmask) - num = num >> 1 - bits -= 1 - - -def _checkNetaddrWorksWithPrefixlen(net, prefixlen, version): - """Check if a base addess of a network is compatible with a prefixlen""" - try: - return (net & _prefixlenToNetmask(prefixlen, version) == net) - except ValueError: - return False - - -def _netmaskToPrefixlen(netmask): - """Convert an Integer representing a netmask to a prefixlen. - - E.g. 0xffffff00 (255.255.255.0) returns 24 - """ - - netlen = _count0Bits(netmask) - masklen = _count1Bits(netmask) - _checkNetmask(netmask, masklen) - return masklen - netlen - - -def _prefixlenToNetmask(prefixlen, version): - """Return a mask of n bits as a long integer. - - From 'IP address conversion functions with the builtin socket module' - by Alex Martelli - http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66517 - """ - if prefixlen == 0: - return 0 - elif prefixlen < 0: - raise ValueError("Prefixlen must be > 0") - return ((2<= 0: - self._linepending.append(lines[ last ]) - - - def __del__(self): - if len(self._linepending): - self.output.write(self.translate(''.join(self._linepending))) - - -class HTTPTranslator(LineBufferTranslator): - """ Translates output from |urllib2| HTTPHandler(debuglevel = 1) into - HTTP-compatible, readible text structures for human analysis. - """ - - RE_LINE_PARSER = re.compile(r'^(?:([a-z]+):)\s*(\'?)([^\r\n]*)\2(?:[\r\n]*)$') - RE_LINE_BREAK = re.compile(r'(\r?\n|(?:\\r)?\\n)') - RE_HTTP_METHOD = re.compile(r'^(POST|GET|HEAD|DELETE|PUT|TRACE|OPTIONS)') - RE_PARAMETER_SPACER = re.compile(r'&([a-z0-9]+)=') - - @classmethod - def spacer(cls, line): - return cls.RE_PARAMETER_SPACER.sub(r' &\1= ', line) - - def translate(self, line): - - parsed = self.RE_LINE_PARSER.match(line) - - if parsed: - value = parsed.group(3) - stage = parsed.group(1) - - if stage == 'send': # query string is rendered here - return '\n# HTTP Request:\n' + self.stripslashes(value) - elif stage == 'reply': - return '\n\n# HTTP Response:\n' + self.stripslashes(value) - elif stage == 'header': - return value + '\n' - else: - return value - - - return line - - -def consume(outbuffer = None): # Capture standard output - sys.stdout = HTTPTranslator(outbuffer or sys.stdout) - return sys.stdout - - -if __name__ == '__main__': - consume(sys.stdout).write(sys.stdin.read()) - print('\n') - -# vim: set nowrap tabstop=4 shiftwidth=4 softtabstop=0 expandtab textwidth=0 filetype=python foldmethod=indent foldcolumn=4 diff --git a/lib/UniversalAnalytics/Tracker.py b/lib/UniversalAnalytics/Tracker.py deleted file mode 100644 index b7d9476e..00000000 --- a/lib/UniversalAnalytics/Tracker.py +++ /dev/null @@ -1,424 +0,0 @@ -from future.moves.urllib.request import urlopen, build_opener, install_opener -from future.moves.urllib.request import Request, HTTPSHandler -from future.moves.urllib.error import URLError, HTTPError -from future.moves.urllib.parse import urlencode - -import random -import datetime -import time -import uuid -import hashlib -import socket - - -def generate_uuid(basedata=None): - """ Provides a _random_ UUID with no input, or a UUID4-format MD5 checksum of any input data provided """ - if basedata is None: - return str(uuid.uuid4()) - elif isinstance(basedata, str): - checksum = hashlib.md5(str(basedata).encode('utf-8')).hexdigest() - return '%8s-%4s-%4s-%4s-%12s' % ( - checksum[0:8], checksum[8:12], checksum[12:16], checksum[16:20], checksum[20:32]) - - -class Time(datetime.datetime): - """ Wrappers and convenience methods for processing various time representations """ - - @classmethod - def from_unix(cls, seconds, milliseconds=0): - """ Produce a full |datetime.datetime| object from a Unix timestamp """ - base = list(time.gmtime(seconds))[0:6] - base.append(milliseconds * 1000) # microseconds - return cls(*base) - - @classmethod - def to_unix(cls, timestamp): - """ Wrapper over time module to produce Unix epoch time as a float """ - if not isinstance(timestamp, datetime.datetime): - raise TypeError('Time.milliseconds expects a datetime object') - base = time.mktime(timestamp.timetuple()) - return base - - @classmethod - def milliseconds_offset(cls, timestamp, now=None): - """ Offset time (in milliseconds) from a |datetime.datetime| object to now """ - if isinstance(timestamp, (int, float)): - base = timestamp - else: - base = cls.to_unix(timestamp) - base = base + (timestamp.microsecond / 1000000) - if now is None: - now = time.time() - return (now - base) * 1000 - - -class HTTPRequest(object): - """ URL Construction and request handling abstraction. - This is not intended to be used outside this module. - - Automates mapping of persistent state (i.e. query parameters) - onto transcient datasets for each query. - """ - - endpoint = 'https://www.google-analytics.com/collect' - - @staticmethod - def debug(): - """ Activate debugging on urllib2 """ - handler = HTTPSHandler(debuglevel=1) - opener = build_opener(handler) - install_opener(opener) - - # Store properties for all requests - def __init__(self, user_agent=None, *args, **opts): - self.user_agent = user_agent or 'Analytics Pros - Universal Analytics (Python)' - - @classmethod - def fixUTF8(cls, data): # Ensure proper encoding for UA's servers... - """ Convert all strings to UTF-8 """ - for key in data: - if isinstance(data[key], str): - data[key] = data[key].encode('utf-8') - return data - - # Apply stored properties to the given dataset & POST to the configured endpoint - def send(self, data): - request = Request( - self.endpoint + '?' + urlencode(self.fixUTF8(data)).encode('utf-8'), - headers={ - 'User-Agent': self.user_agent - } - ) - self.open(request) - - def open(self, request): - try: - return urlopen(request) - except HTTPError as e: - return False - except URLError as e: - self.cache_request(request) - return False - - def cache_request(self, request): - # TODO: implement a proper caching mechanism here for re-transmitting hits - # record = (Time.now(), request.get_full_url(), request.get_data(), request.headers) - pass - - -class HTTPPost(HTTPRequest): - - # Apply stored properties to the given dataset & POST to the configured endpoint - def send(self, data): - request = Request( - self.endpoint, - data=urlencode(self.fixUTF8(data)).encode('utf-8'), - headers={ - 'User-Agent': self.user_agent - } - ) - self.open(request) - - -class Tracker(object): - """ Primary tracking interface for Universal Analytics """ - params = None - parameter_alias = {} - valid_hittypes = ('pageview', 'event', 'social', 'screenview', 'transaction', 'item', 'exception', 'timing') - - @classmethod - def alias(cls, typemap, base, *names): - """ Declare an alternate (humane) name for a measurement protocol parameter """ - cls.parameter_alias[base] = (typemap, base) - for i in names: - cls.parameter_alias[i] = (typemap, base) - - @classmethod - def coerceParameter(cls, name, value=None): - if isinstance(name, str) and name[0] == '&': - return name[1:], str(value) - elif name in cls.parameter_alias: - typecast, param_name = cls.parameter_alias.get(name) - return param_name, typecast(value) - else: - raise KeyError('Parameter "{0}" is not recognized'.format(name)) - - def payload(self, data): - for key, value in data.items(): - try: - yield self.coerceParameter(key, value) - except KeyError: - continue - - option_sequence = { - 'pageview': [(str, 'dp')], - 'event': [(str, 'ec'), (str, 'ea'), (str, 'el'), (int, 'ev')], - 'social': [(str, 'sn'), (str, 'sa'), (str, 'st')], - 'timing': [(str, 'utc'), (str, 'utv'), (str, 'utt'), (str, 'utl')] - } - - @classmethod - def consume_options(cls, data, hittype, args): - """ Interpret sequential arguments related to known hittypes based on declared structures """ - opt_position = 0 - data['t'] = hittype # integrate hit type parameter - if hittype in cls.option_sequence: - for expected_type, optname in cls.option_sequence[hittype]: - if opt_position < len(args) and isinstance(args[opt_position], expected_type): - data[optname] = args[opt_position] - opt_position += 1 - - @classmethod - def hittime(cls, timestamp=None, age=None, milliseconds=None): - """ Returns an integer represeting the milliseconds offset for a given hit (relative to now) """ - if isinstance(timestamp, (int, float)): - return int(Time.milliseconds_offset(Time.from_unix(timestamp, milliseconds=milliseconds))) - if isinstance(timestamp, datetime.datetime): - return int(Time.milliseconds_offset(timestamp)) - if isinstance(age, (int, float)): - return int(age * 1000) + (milliseconds or 0) - - @property - def account(self): - return self.params.get('tid', None) - - def __init__(self, account, name=None, client_id=None, hash_client_id=False, user_id=None, user_agent=None, - use_post=True): - - if use_post is False: - self.http = HTTPRequest(user_agent=user_agent) - else: - self.http = HTTPPost(user_agent=user_agent) - - self.params = {'v': 1, 'tid': account} - - if client_id is None: - client_id = generate_uuid() - - self.params['cid'] = client_id - - self.hash_client_id = hash_client_id - - if user_id is not None: - self.params['uid'] = user_id - - def set_timestamp(self, data): - """ Interpret time-related options, apply queue-time parameter as needed """ - if 'hittime' in data: # an absolute timestamp - data['qt'] = self.hittime(timestamp=data.pop('hittime', None)) - if 'hitage' in data: # a relative age (in seconds) - data['qt'] = self.hittime(age=data.pop('hitage', None)) - - def send(self, hittype, *args, **data): - """ Transmit HTTP requests to Google Analytics using the measurement protocol """ - - if hittype not in self.valid_hittypes: - raise KeyError('Unsupported Universal Analytics Hit Type: {0}'.format(repr(hittype))) - - self.set_timestamp(data) - self.consume_options(data, hittype, args) - - for item in args: # process dictionary-object arguments of transcient data - if isinstance(item, dict): - for key, val in self.payload(item): - data[key] = val - - for k, v in self.params.items(): # update only absent parameters - if k not in data: - data[k] = v - - data = dict(self.payload(data)) - - if self.hash_client_id: - data['cid'] = generate_uuid(data['cid']) - - # Transmit the hit to Google... - self.http.send(data) - - # Setting persistent attibutes of the session/hit/etc (inc. custom dimensions/metrics) - def set(self, name, value=None): - if isinstance(name, dict): - for key, value in name.items(): - try: - param, value = self.coerceParameter(key, value) - self.params[param] = value - except KeyError: - pass - elif isinstance(name, str): - try: - param, value = self.coerceParameter(name, value) - self.params[param] = value - except KeyError: - pass - - def __getitem__(self, name): - param, value = self.coerceParameter(name, None) - return self.params.get(param, None) - - def __setitem__(self, name, value): - param, value = self.coerceParameter(name, value) - self.params[param] = value - - def __delitem__(self, name): - param, value = self.coerceParameter(name, None) - if param in self.params: - del self.params[param] - - -def safe_unicode(obj): - """ Safe convertion to the Unicode string version of the object """ - try: - return str(obj) - except UnicodeDecodeError: - return obj.decode('utf-8') - - -# Declaring name mappings for Measurement Protocol parameters -MAX_CUSTOM_DEFINITIONS = 200 -MAX_EC_LISTS = 11 # 1-based index -MAX_EC_PRODUCTS = 11 # 1-based index -MAX_EC_PROMOTIONS = 11 # 1-based index - -Tracker.alias(int, 'v', 'protocol-version') -Tracker.alias(safe_unicode, 'cid', 'client-id', 'clientId', 'clientid') -Tracker.alias(safe_unicode, 'tid', 'trackingId', 'account') -Tracker.alias(safe_unicode, 'uid', 'user-id', 'userId', 'userid') -Tracker.alias(safe_unicode, 'uip', 'user-ip', 'userIp', 'ipaddr') -Tracker.alias(safe_unicode, 'ua', 'userAgent', 'userAgentOverride', 'user-agent') -Tracker.alias(safe_unicode, 'dp', 'page', 'path') -Tracker.alias(safe_unicode, 'dt', 'title', 'pagetitle', 'pageTitle' 'page-title') -Tracker.alias(safe_unicode, 'dl', 'location') -Tracker.alias(safe_unicode, 'dh', 'hostname') -Tracker.alias(safe_unicode, 'sc', 'sessioncontrol', 'session-control', 'sessionControl') -Tracker.alias(safe_unicode, 'dr', 'referrer', 'referer') -Tracker.alias(int, 'qt', 'queueTime', 'queue-time') -Tracker.alias(safe_unicode, 't', 'hitType', 'hittype') -Tracker.alias(int, 'aip', 'anonymizeIp', 'anonIp', 'anonymize-ip') -Tracker.alias(safe_unicode, 'ds', 'dataSource', 'data-source') - -# Campaign attribution -Tracker.alias(safe_unicode, 'cn', 'campaign', 'campaignName', 'campaign-name') -Tracker.alias(safe_unicode, 'cs', 'source', 'campaignSource', 'campaign-source') -Tracker.alias(safe_unicode, 'cm', 'medium', 'campaignMedium', 'campaign-medium') -Tracker.alias(safe_unicode, 'ck', 'keyword', 'campaignKeyword', 'campaign-keyword') -Tracker.alias(safe_unicode, 'cc', 'content', 'campaignContent', 'campaign-content') -Tracker.alias(safe_unicode, 'ci', 'campaignId', 'campaignID', 'campaign-id') - -# Technical specs -Tracker.alias(safe_unicode, 'sr', 'screenResolution', 'screen-resolution', 'resolution') -Tracker.alias(safe_unicode, 'vp', 'viewport', 'viewportSize', 'viewport-size') -Tracker.alias(safe_unicode, 'de', 'encoding', 'documentEncoding', 'document-encoding') -Tracker.alias(int, 'sd', 'colors', 'screenColors', 'screen-colors') -Tracker.alias(safe_unicode, 'ul', 'language', 'user-language', 'userLanguage') - -# Mobile app -Tracker.alias(safe_unicode, 'an', 'appName', 'app-name', 'app') -Tracker.alias(safe_unicode, 'cd', 'contentDescription', 'screenName', 'screen-name', 'content-description') -Tracker.alias(safe_unicode, 'av', 'appVersion', 'app-version', 'version') -Tracker.alias(safe_unicode, 'aid', 'appID', 'appId', 'application-id', 'app-id', 'applicationId') -Tracker.alias(safe_unicode, 'aiid', 'appInstallerId', 'app-installer-id') - -# Ecommerce -Tracker.alias(safe_unicode, 'ta', 'affiliation', 'transactionAffiliation', 'transaction-affiliation') -Tracker.alias(safe_unicode, 'ti', 'transaction', 'transactionId', 'transaction-id') -Tracker.alias(float, 'tr', 'revenue', 'transactionRevenue', 'transaction-revenue') -Tracker.alias(float, 'ts', 'shipping', 'transactionShipping', 'transaction-shipping') -Tracker.alias(float, 'tt', 'tax', 'transactionTax', 'transaction-tax') -Tracker.alias(safe_unicode, 'cu', 'currency', 'transactionCurrency', - 'transaction-currency') # Currency code, e.g. USD, EUR -Tracker.alias(safe_unicode, 'in', 'item-name', 'itemName') -Tracker.alias(float, 'ip', 'item-price', 'itemPrice') -Tracker.alias(float, 'iq', 'item-quantity', 'itemQuantity') -Tracker.alias(safe_unicode, 'ic', 'item-code', 'sku', 'itemCode') -Tracker.alias(safe_unicode, 'iv', 'item-variation', 'item-category', 'itemCategory', 'itemVariation') - -# Events -Tracker.alias(safe_unicode, 'ec', 'event-category', 'eventCategory', 'category') -Tracker.alias(safe_unicode, 'ea', 'event-action', 'eventAction', 'action') -Tracker.alias(safe_unicode, 'el', 'event-label', 'eventLabel', 'label') -Tracker.alias(int, 'ev', 'event-value', 'eventValue', 'value') -Tracker.alias(int, 'ni', 'noninteractive', 'nonInteractive', 'noninteraction', 'nonInteraction') - -# Social -Tracker.alias(safe_unicode, 'sa', 'social-action', 'socialAction') -Tracker.alias(safe_unicode, 'sn', 'social-network', 'socialNetwork') -Tracker.alias(safe_unicode, 'st', 'social-target', 'socialTarget') - -# Exceptions -Tracker.alias(safe_unicode, 'exd', 'exception-description', 'exceptionDescription', 'exDescription') -Tracker.alias(int, 'exf', 'exception-fatal', 'exceptionFatal', 'exFatal') - -# User Timing -Tracker.alias(safe_unicode, 'utc', 'timingCategory', 'timing-category') -Tracker.alias(safe_unicode, 'utv', 'timingVariable', 'timing-variable') -Tracker.alias(float, 'utt', 'time', 'timingTime', 'timing-time') -Tracker.alias(safe_unicode, 'utl', 'timingLabel', 'timing-label') -Tracker.alias(float, 'dns', 'timingDNS', 'timing-dns') -Tracker.alias(float, 'pdt', 'timingPageLoad', 'timing-page-load') -Tracker.alias(float, 'rrt', 'timingRedirect', 'timing-redirect') -Tracker.alias(safe_unicode, 'tcp', 'timingTCPConnect', 'timing-tcp-connect') -Tracker.alias(safe_unicode, 'srt', 'timingServerResponse', 'timing-server-response') - -# Custom dimensions and metrics -for i in range(0, 200): - Tracker.alias(safe_unicode, 'cd{0}'.format(i), 'dimension{0}'.format(i)) - Tracker.alias(int, 'cm{0}'.format(i), 'metric{0}'.format(i)) - -# Content groups -for i in range(0, 5): - Tracker.alias(safe_unicode, 'cg{0}'.format(i), 'contentGroup{0}'.format(i)) - -# Enhanced Ecommerce -Tracker.alias(str, 'pa') # Product action -Tracker.alias(str, 'tcc') # Coupon code -Tracker.alias(str, 'pal') # Product action list -Tracker.alias(int, 'cos') # Checkout step -Tracker.alias(str, 'col') # Checkout step option - -Tracker.alias(str, 'promoa') # Promotion action - -for product_index in range(1, MAX_EC_PRODUCTS): - Tracker.alias(str, 'pr{0}id'.format(product_index)) # Product SKU - Tracker.alias(str, 'pr{0}nm'.format(product_index)) # Product name - Tracker.alias(str, 'pr{0}br'.format(product_index)) # Product brand - Tracker.alias(str, 'pr{0}ca'.format(product_index)) # Product category - Tracker.alias(str, 'pr{0}va'.format(product_index)) # Product variant - Tracker.alias(str, 'pr{0}pr'.format(product_index)) # Product price - Tracker.alias(int, 'pr{0}qt'.format(product_index)) # Product quantity - Tracker.alias(str, 'pr{0}cc'.format(product_index)) # Product coupon code - Tracker.alias(int, 'pr{0}ps'.format(product_index)) # Product position - - for custom_index in range(MAX_CUSTOM_DEFINITIONS): - Tracker.alias(str, 'pr{0}cd{1}'.format(product_index, custom_index)) # Product custom dimension - Tracker.alias(int, 'pr{0}cm{1}'.format(product_index, custom_index)) # Product custom metric - - for list_index in range(1, MAX_EC_LISTS): - Tracker.alias(str, 'il{0}pi{1}id'.format(list_index, product_index)) # Product impression SKU - Tracker.alias(str, 'il{0}pi{1}nm'.format(list_index, product_index)) # Product impression name - Tracker.alias(str, 'il{0}pi{1}br'.format(list_index, product_index)) # Product impression brand - Tracker.alias(str, 'il{0}pi{1}ca'.format(list_index, product_index)) # Product impression category - Tracker.alias(str, 'il{0}pi{1}va'.format(list_index, product_index)) # Product impression variant - Tracker.alias(int, 'il{0}pi{1}ps'.format(list_index, product_index)) # Product impression position - Tracker.alias(int, 'il{0}pi{1}pr'.format(list_index, product_index)) # Product impression price - - for custom_index in range(MAX_CUSTOM_DEFINITIONS): - Tracker.alias(str, 'il{0}pi{1}cd{2}'.format(list_index, product_index, - custom_index)) # Product impression custom dimension - Tracker.alias(int, 'il{0}pi{1}cm{2}'.format(list_index, product_index, - custom_index)) # Product impression custom metric - -for list_index in range(1, MAX_EC_LISTS): - Tracker.alias(str, 'il{0}nm'.format(list_index)) # Product impression list name - -for promotion_index in range(1, MAX_EC_PROMOTIONS): - Tracker.alias(str, 'promo{0}id'.format(promotion_index)) # Promotion ID - Tracker.alias(str, 'promo{0}nm'.format(promotion_index)) # Promotion name - Tracker.alias(str, 'promo{0}cr'.format(promotion_index)) # Promotion creative - Tracker.alias(str, 'promo{0}ps'.format(promotion_index)) # Promotion position - - -# Shortcut for creating trackers -def create(account, *args, **kwargs): - return Tracker(account, *args, **kwargs) - -# vim: set nowrap tabstop=4 shiftwidth=4 softtabstop=0 expandtab textwidth=0 filetype=python foldmethod=indent foldcolumn=4 diff --git a/lib/UniversalAnalytics/__init__.py b/lib/UniversalAnalytics/__init__.py deleted file mode 100644 index 0d8817d6..00000000 --- a/lib/UniversalAnalytics/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from . import Tracker \ No newline at end of file diff --git a/lib/appdirs.py b/lib/appdirs.py deleted file mode 100644 index ae67001a..00000000 --- a/lib/appdirs.py +++ /dev/null @@ -1,608 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# Copyright (c) 2005-2010 ActiveState Software Inc. -# Copyright (c) 2013 Eddy Petrișor - -"""Utilities for determining application-specific dirs. - -See for details and usage. -""" -# Dev Notes: -# - MSDN on where to store app data files: -# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120 -# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html -# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html - -__version_info__ = (1, 4, 3) -__version__ = '.'.join(map(str, __version_info__)) - - -import sys -import os - -PY3 = sys.version_info[0] == 3 - -if PY3: - unicode = str - -if sys.platform.startswith('java'): - import platform - os_name = platform.java_ver()[3][0] - if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc. - system = 'win32' - elif os_name.startswith('Mac'): # "Mac OS X", etc. - system = 'darwin' - else: # "Linux", "SunOS", "FreeBSD", etc. - # Setting this to "linux2" is not ideal, but only Windows or Mac - # are actually checked for and the rest of the module expects - # *sys.platform* style strings. - system = 'linux2' -else: - system = sys.platform - - - -def user_data_dir(appname=None, appauthor=None, version=None, roaming=False): - r"""Return full path to the user-specific data dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "roaming" (boolean, default False) can be set True to use the Windows - roaming appdata directory. That means that for users on a Windows - network setup for roaming profiles, this user data will be - sync'd on login. See - - for a discussion of issues. - - Typical user data directories are: - Mac OS X: ~/Library/Application Support/ - Unix: ~/.local/share/ # or in $XDG_DATA_HOME, if defined - Win XP (not roaming): C:\Documents and Settings\\Application Data\\ - Win XP (roaming): C:\Documents and Settings\\Local Settings\Application Data\\ - Win 7 (not roaming): C:\Users\\AppData\Local\\ - Win 7 (roaming): C:\Users\\AppData\Roaming\\ - - For Unix, we follow the XDG spec and support $XDG_DATA_HOME. - That means, by default "~/.local/share/". - """ - if system == "win32": - if appauthor is None: - appauthor = appname - const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA" - path = os.path.normpath(_get_win_folder(const)) - if appname: - if appauthor is not False: - path = os.path.join(path, appauthor, appname) - else: - path = os.path.join(path, appname) - elif system == 'darwin': - path = os.path.expanduser('~/Library/Application Support/') - if appname: - path = os.path.join(path, appname) - else: - path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share")) - if appname: - path = os.path.join(path, appname) - if appname and version: - path = os.path.join(path, version) - return path - - -def site_data_dir(appname=None, appauthor=None, version=None, multipath=False): - r"""Return full path to the user-shared data dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "multipath" is an optional parameter only applicable to *nix - which indicates that the entire list of data dirs should be - returned. By default, the first item from XDG_DATA_DIRS is - returned, or '/usr/local/share/', - if XDG_DATA_DIRS is not set - - Typical site data directories are: - Mac OS X: /Library/Application Support/ - Unix: /usr/local/share/ or /usr/share/ - Win XP: C:\Documents and Settings\All Users\Application Data\\ - Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) - Win 7: C:\ProgramData\\ # Hidden, but writeable on Win 7. - - For Unix, this is using the $XDG_DATA_DIRS[0] default. - - WARNING: Do not use this on Windows. See the Vista-Fail note above for why. - """ - if system == "win32": - if appauthor is None: - appauthor = appname - path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA")) - if appname: - if appauthor is not False: - path = os.path.join(path, appauthor, appname) - else: - path = os.path.join(path, appname) - elif system == 'darwin': - path = os.path.expanduser('/Library/Application Support') - if appname: - path = os.path.join(path, appname) - else: - # XDG default for $XDG_DATA_DIRS - # only first, if multipath is False - path = os.getenv('XDG_DATA_DIRS', - os.pathsep.join(['/usr/local/share', '/usr/share'])) - pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)] - if appname: - if version: - appname = os.path.join(appname, version) - pathlist = [os.sep.join([x, appname]) for x in pathlist] - - if multipath: - path = os.pathsep.join(pathlist) - else: - path = pathlist[0] - return path - - if appname and version: - path = os.path.join(path, version) - return path - - -def user_config_dir(appname=None, appauthor=None, version=None, roaming=False): - r"""Return full path to the user-specific config dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "roaming" (boolean, default False) can be set True to use the Windows - roaming appdata directory. That means that for users on a Windows - network setup for roaming profiles, this user data will be - sync'd on login. See - - for a discussion of issues. - - Typical user config directories are: - Mac OS X: same as user_data_dir - Unix: ~/.config/ # or in $XDG_CONFIG_HOME, if defined - Win *: same as user_data_dir - - For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME. - That means, by default "~/.config/". - """ - if system in ["win32", "darwin"]: - path = user_data_dir(appname, appauthor, None, roaming) - else: - path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config")) - if appname: - path = os.path.join(path, appname) - if appname and version: - path = os.path.join(path, version) - return path - - -def site_config_dir(appname=None, appauthor=None, version=None, multipath=False): - r"""Return full path to the user-shared data dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "multipath" is an optional parameter only applicable to *nix - which indicates that the entire list of config dirs should be - returned. By default, the first item from XDG_CONFIG_DIRS is - returned, or '/etc/xdg/', if XDG_CONFIG_DIRS is not set - - Typical site config directories are: - Mac OS X: same as site_data_dir - Unix: /etc/xdg/ or $XDG_CONFIG_DIRS[i]/ for each value in - $XDG_CONFIG_DIRS - Win *: same as site_data_dir - Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) - - For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False - - WARNING: Do not use this on Windows. See the Vista-Fail note above for why. - """ - if system in ["win32", "darwin"]: - path = site_data_dir(appname, appauthor) - if appname and version: - path = os.path.join(path, version) - else: - # XDG default for $XDG_CONFIG_DIRS - # only first, if multipath is False - path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg') - pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)] - if appname: - if version: - appname = os.path.join(appname, version) - pathlist = [os.sep.join([x, appname]) for x in pathlist] - - if multipath: - path = os.pathsep.join(pathlist) - else: - path = pathlist[0] - return path - - -def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True): - r"""Return full path to the user-specific cache dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "opinion" (boolean) can be False to disable the appending of - "Cache" to the base app data dir for Windows. See - discussion below. - - Typical user cache directories are: - Mac OS X: ~/Library/Caches/ - Unix: ~/.cache/ (XDG default) - Win XP: C:\Documents and Settings\\Local Settings\Application Data\\\Cache - Vista: C:\Users\\AppData\Local\\\Cache - - On Windows the only suggestion in the MSDN docs is that local settings go in - the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming - app data dir (the default returned by `user_data_dir` above). Apps typically - put cache data somewhere *under* the given dir here. Some examples: - ...\Mozilla\Firefox\Profiles\\Cache - ...\Acme\SuperApp\Cache\1.0 - OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value. - This can be disabled with the `opinion=False` option. - """ - if system == "win32": - if appauthor is None: - appauthor = appname - path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA")) - if appname: - if appauthor is not False: - path = os.path.join(path, appauthor, appname) - else: - path = os.path.join(path, appname) - if opinion: - path = os.path.join(path, "Cache") - elif system == 'darwin': - path = os.path.expanduser('~/Library/Caches') - if appname: - path = os.path.join(path, appname) - else: - path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache')) - if appname: - path = os.path.join(path, appname) - if appname and version: - path = os.path.join(path, version) - return path - - -def user_state_dir(appname=None, appauthor=None, version=None, roaming=False): - r"""Return full path to the user-specific state dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "roaming" (boolean, default False) can be set True to use the Windows - roaming appdata directory. That means that for users on a Windows - network setup for roaming profiles, this user data will be - sync'd on login. See - - for a discussion of issues. - - Typical user state directories are: - Mac OS X: same as user_data_dir - Unix: ~/.local/state/ # or in $XDG_STATE_HOME, if defined - Win *: same as user_data_dir - - For Unix, we follow this Debian proposal - to extend the XDG spec and support $XDG_STATE_HOME. - - That means, by default "~/.local/state/". - """ - if system in ["win32", "darwin"]: - path = user_data_dir(appname, appauthor, None, roaming) - else: - path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state")) - if appname: - path = os.path.join(path, appname) - if appname and version: - path = os.path.join(path, version) - return path - - -def user_log_dir(appname=None, appauthor=None, version=None, opinion=True): - r"""Return full path to the user-specific log dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "opinion" (boolean) can be False to disable the appending of - "Logs" to the base app data dir for Windows, and "log" to the - base cache dir for Unix. See discussion below. - - Typical user log directories are: - Mac OS X: ~/Library/Logs/ - Unix: ~/.cache//log # or under $XDG_CACHE_HOME if defined - Win XP: C:\Documents and Settings\\Local Settings\Application Data\\\Logs - Vista: C:\Users\\AppData\Local\\\Logs - - On Windows the only suggestion in the MSDN docs is that local settings - go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in - examples of what some windows apps use for a logs dir.) - - OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA` - value for Windows and appends "log" to the user cache dir for Unix. - This can be disabled with the `opinion=False` option. - """ - if system == "darwin": - path = os.path.join( - os.path.expanduser('~/Library/Logs'), - appname) - elif system == "win32": - path = user_data_dir(appname, appauthor, version) - version = False - if opinion: - path = os.path.join(path, "Logs") - else: - path = user_cache_dir(appname, appauthor, version) - version = False - if opinion: - path = os.path.join(path, "log") - if appname and version: - path = os.path.join(path, version) - return path - - -class AppDirs(object): - """Convenience wrapper for getting application dirs.""" - def __init__(self, appname=None, appauthor=None, version=None, - roaming=False, multipath=False): - self.appname = appname - self.appauthor = appauthor - self.version = version - self.roaming = roaming - self.multipath = multipath - - @property - def user_data_dir(self): - return user_data_dir(self.appname, self.appauthor, - version=self.version, roaming=self.roaming) - - @property - def site_data_dir(self): - return site_data_dir(self.appname, self.appauthor, - version=self.version, multipath=self.multipath) - - @property - def user_config_dir(self): - return user_config_dir(self.appname, self.appauthor, - version=self.version, roaming=self.roaming) - - @property - def site_config_dir(self): - return site_config_dir(self.appname, self.appauthor, - version=self.version, multipath=self.multipath) - - @property - def user_cache_dir(self): - return user_cache_dir(self.appname, self.appauthor, - version=self.version) - - @property - def user_state_dir(self): - return user_state_dir(self.appname, self.appauthor, - version=self.version) - - @property - def user_log_dir(self): - return user_log_dir(self.appname, self.appauthor, - version=self.version) - - -#---- internal support stuff - -def _get_win_folder_from_registry(csidl_name): - """This is a fallback technique at best. I'm not sure if using the - registry for this guarantees us the correct answer for all CSIDL_* - names. - """ - if PY3: - import winreg as _winreg - else: - import _winreg - - shell_folder_name = { - "CSIDL_APPDATA": "AppData", - "CSIDL_COMMON_APPDATA": "Common AppData", - "CSIDL_LOCAL_APPDATA": "Local AppData", - }[csidl_name] - - key = _winreg.OpenKey( - _winreg.HKEY_CURRENT_USER, - r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders" - ) - dir, type = _winreg.QueryValueEx(key, shell_folder_name) - return dir - - -def _get_win_folder_with_pywin32(csidl_name): - from win32com.shell import shellcon, shell - dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0) - # Try to make this a unicode path because SHGetFolderPath does - # not return unicode strings when there is unicode data in the - # path. - try: - dir = unicode(dir) - - # Downgrade to short path name if have highbit chars. See - # . - has_high_char = False - for c in dir: - if ord(c) > 255: - has_high_char = True - break - if has_high_char: - try: - import win32api - dir = win32api.GetShortPathName(dir) - except ImportError: - pass - except UnicodeError: - pass - return dir - - -def _get_win_folder_with_ctypes(csidl_name): - import ctypes - - csidl_const = { - "CSIDL_APPDATA": 26, - "CSIDL_COMMON_APPDATA": 35, - "CSIDL_LOCAL_APPDATA": 28, - }[csidl_name] - - buf = ctypes.create_unicode_buffer(1024) - ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf) - - # Downgrade to short path name if have highbit chars. See - # . - has_high_char = False - for c in buf: - if ord(c) > 255: - has_high_char = True - break - if has_high_char: - buf2 = ctypes.create_unicode_buffer(1024) - if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024): - buf = buf2 - - return buf.value - -def _get_win_folder_with_jna(csidl_name): - import array - from com.sun import jna - from com.sun.jna.platform import win32 - - buf_size = win32.WinDef.MAX_PATH * 2 - buf = array.zeros('c', buf_size) - shell = win32.Shell32.INSTANCE - shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf) - dir = jna.Native.toString(buf.tostring()).rstrip("\0") - - # Downgrade to short path name if have highbit chars. See - # . - has_high_char = False - for c in dir: - if ord(c) > 255: - has_high_char = True - break - if has_high_char: - buf = array.zeros('c', buf_size) - kernel = win32.Kernel32.INSTANCE - if kernel.GetShortPathName(dir, buf, buf_size): - dir = jna.Native.toString(buf.tostring()).rstrip("\0") - - return dir - -if system == "win32": - try: - import win32com.shell - _get_win_folder = _get_win_folder_with_pywin32 - except ImportError: - try: - from ctypes import windll - _get_win_folder = _get_win_folder_with_ctypes - except ImportError: - try: - import com.sun.jna - _get_win_folder = _get_win_folder_with_jna - except ImportError: - _get_win_folder = _get_win_folder_from_registry - - -#---- self test code - -if __name__ == "__main__": - appname = "MyApp" - appauthor = "MyCompany" - - props = ("user_data_dir", - "user_config_dir", - "user_cache_dir", - "user_state_dir", - "user_log_dir", - "site_data_dir", - "site_config_dir") - - print("-- app dirs %s --" % __version__) - - print("-- app dirs (with optional 'version')") - dirs = AppDirs(appname, appauthor, version="1.0") - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) - - print("\n-- app dirs (without optional 'version')") - dirs = AppDirs(appname, appauthor) - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) - - print("\n-- app dirs (without optional 'appauthor')") - dirs = AppDirs(appname) - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) - - print("\n-- app dirs (with disabled 'appauthor')") - dirs = AppDirs(appname, appauthor=False) - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) diff --git a/lib/apscheduler/__init__.py b/lib/apscheduler/__init__.py deleted file mode 100644 index 968169a9..00000000 --- a/lib/apscheduler/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -from pkg_resources import get_distribution, DistributionNotFound - -try: - release = get_distribution('APScheduler').version.split('-')[0] -except DistributionNotFound: - release = '3.5.0' - -version_info = tuple(int(x) if x.isdigit() else x for x in release.split('.')) -version = __version__ = '.'.join(str(x) for x in version_info[:3]) -del get_distribution, DistributionNotFound diff --git a/lib/apscheduler/events.py b/lib/apscheduler/events.py deleted file mode 100644 index 016da03c..00000000 --- a/lib/apscheduler/events.py +++ /dev/null @@ -1,94 +0,0 @@ -__all__ = ('EVENT_SCHEDULER_STARTED', 'EVENT_SCHEDULER_SHUTDOWN', 'EVENT_SCHEDULER_PAUSED', - 'EVENT_SCHEDULER_RESUMED', 'EVENT_EXECUTOR_ADDED', 'EVENT_EXECUTOR_REMOVED', - 'EVENT_JOBSTORE_ADDED', 'EVENT_JOBSTORE_REMOVED', 'EVENT_ALL_JOBS_REMOVED', - 'EVENT_JOB_ADDED', 'EVENT_JOB_REMOVED', 'EVENT_JOB_MODIFIED', 'EVENT_JOB_EXECUTED', - 'EVENT_JOB_ERROR', 'EVENT_JOB_MISSED', 'EVENT_JOB_SUBMITTED', 'EVENT_JOB_MAX_INSTANCES', - 'SchedulerEvent', 'JobEvent', 'JobExecutionEvent', 'JobSubmissionEvent') - - -EVENT_SCHEDULER_STARTED = EVENT_SCHEDULER_START = 2 ** 0 -EVENT_SCHEDULER_SHUTDOWN = 2 ** 1 -EVENT_SCHEDULER_PAUSED = 2 ** 2 -EVENT_SCHEDULER_RESUMED = 2 ** 3 -EVENT_EXECUTOR_ADDED = 2 ** 4 -EVENT_EXECUTOR_REMOVED = 2 ** 5 -EVENT_JOBSTORE_ADDED = 2 ** 6 -EVENT_JOBSTORE_REMOVED = 2 ** 7 -EVENT_ALL_JOBS_REMOVED = 2 ** 8 -EVENT_JOB_ADDED = 2 ** 9 -EVENT_JOB_REMOVED = 2 ** 10 -EVENT_JOB_MODIFIED = 2 ** 11 -EVENT_JOB_EXECUTED = 2 ** 12 -EVENT_JOB_ERROR = 2 ** 13 -EVENT_JOB_MISSED = 2 ** 14 -EVENT_JOB_SUBMITTED = 2 ** 15 -EVENT_JOB_MAX_INSTANCES = 2 ** 16 -EVENT_ALL = (EVENT_SCHEDULER_STARTED | EVENT_SCHEDULER_SHUTDOWN | EVENT_SCHEDULER_PAUSED | - EVENT_SCHEDULER_RESUMED | EVENT_EXECUTOR_ADDED | EVENT_EXECUTOR_REMOVED | - EVENT_JOBSTORE_ADDED | EVENT_JOBSTORE_REMOVED | EVENT_ALL_JOBS_REMOVED | - EVENT_JOB_ADDED | EVENT_JOB_REMOVED | EVENT_JOB_MODIFIED | EVENT_JOB_EXECUTED | - EVENT_JOB_ERROR | EVENT_JOB_MISSED | EVENT_JOB_SUBMITTED | EVENT_JOB_MAX_INSTANCES) - - -class SchedulerEvent(object): - """ - An event that concerns the scheduler itself. - - :ivar code: the type code of this event - :ivar alias: alias of the job store or executor that was added or removed (if applicable) - """ - - def __init__(self, code, alias=None): - super(SchedulerEvent, self).__init__() - self.code = code - self.alias = alias - - def __repr__(self): - return '<%s (code=%d)>' % (self.__class__.__name__, self.code) - - -class JobEvent(SchedulerEvent): - """ - An event that concerns a job. - - :ivar code: the type code of this event - :ivar job_id: identifier of the job in question - :ivar jobstore: alias of the job store containing the job in question - """ - - def __init__(self, code, job_id, jobstore): - super(JobEvent, self).__init__(code) - self.code = code - self.job_id = job_id - self.jobstore = jobstore - - -class JobSubmissionEvent(JobEvent): - """ - An event that concerns the submission of a job to its executor. - - :ivar scheduled_run_times: a list of datetimes when the job was intended to run - """ - - def __init__(self, code, job_id, jobstore, scheduled_run_times): - super(JobSubmissionEvent, self).__init__(code, job_id, jobstore) - self.scheduled_run_times = scheduled_run_times - - -class JobExecutionEvent(JobEvent): - """ - An event that concerns the running of a job within its executor. - - :ivar scheduled_run_time: the time when the job was scheduled to be run - :ivar retval: the return value of the successfully executed job - :ivar exception: the exception raised by the job - :ivar traceback: a formatted traceback for the exception - """ - - def __init__(self, code, job_id, jobstore, scheduled_run_time, retval=None, exception=None, - traceback=None): - super(JobExecutionEvent, self).__init__(code, job_id, jobstore) - self.scheduled_run_time = scheduled_run_time - self.retval = retval - self.exception = exception - self.traceback = traceback diff --git a/lib/apscheduler/executors/__init__.py b/lib/apscheduler/executors/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/lib/apscheduler/executors/asyncio.py b/lib/apscheduler/executors/asyncio.py deleted file mode 100644 index 06fc7f96..00000000 --- a/lib/apscheduler/executors/asyncio.py +++ /dev/null @@ -1,59 +0,0 @@ -from __future__ import absolute_import - -import sys - -from apscheduler.executors.base import BaseExecutor, run_job -from apscheduler.util import iscoroutinefunction_partial - -try: - from apscheduler.executors.base_py3 import run_coroutine_job -except ImportError: - run_coroutine_job = None - - -class AsyncIOExecutor(BaseExecutor): - """ - Runs jobs in the default executor of the event loop. - - If the job function is a native coroutine function, it is scheduled to be run directly in the - event loop as soon as possible. All other functions are run in the event loop's default - executor which is usually a thread pool. - - Plugin alias: ``asyncio`` - """ - - def start(self, scheduler, alias): - super(AsyncIOExecutor, self).start(scheduler, alias) - self._eventloop = scheduler._eventloop - self._pending_futures = set() - - def shutdown(self, wait=True): - # There is no way to honor wait=True without converting this method into a coroutine method - for f in self._pending_futures: - if not f.done(): - f.cancel() - - self._pending_futures.clear() - - def _do_submit_job(self, job, run_times): - def callback(f): - self._pending_futures.discard(f) - try: - events = f.result() - except BaseException: - self._run_job_error(job.id, *sys.exc_info()[1:]) - else: - self._run_job_success(job.id, events) - - if iscoroutinefunction_partial(job.func): - if run_coroutine_job is not None: - coro = run_coroutine_job(job, job._jobstore_alias, run_times, self._logger.name) - f = self._eventloop.create_task(coro) - else: - raise Exception('Executing coroutine based jobs is not supported with Trollius') - else: - f = self._eventloop.run_in_executor(None, run_job, job, job._jobstore_alias, run_times, - self._logger.name) - - f.add_done_callback(callback) - self._pending_futures.add(f) diff --git a/lib/apscheduler/executors/base.py b/lib/apscheduler/executors/base.py deleted file mode 100644 index 4c09fc11..00000000 --- a/lib/apscheduler/executors/base.py +++ /dev/null @@ -1,146 +0,0 @@ -from abc import ABCMeta, abstractmethod -from collections import defaultdict -from datetime import datetime, timedelta -from traceback import format_tb -import logging -import sys - -from pytz import utc -import six - -from apscheduler.events import ( - JobExecutionEvent, EVENT_JOB_MISSED, EVENT_JOB_ERROR, EVENT_JOB_EXECUTED) - - -class MaxInstancesReachedError(Exception): - def __init__(self, job): - super(MaxInstancesReachedError, self).__init__( - 'Job "%s" has already reached its maximum number of instances (%d)' % - (job.id, job.max_instances)) - - -class BaseExecutor(six.with_metaclass(ABCMeta, object)): - """Abstract base class that defines the interface that every executor must implement.""" - - _scheduler = None - _lock = None - _logger = logging.getLogger('apscheduler.executors') - - def __init__(self): - super(BaseExecutor, self).__init__() - self._instances = defaultdict(lambda: 0) - - def start(self, scheduler, alias): - """ - Called by the scheduler when the scheduler is being started or when the executor is being - added to an already running scheduler. - - :param apscheduler.schedulers.base.BaseScheduler scheduler: the scheduler that is starting - this executor - :param str|unicode alias: alias of this executor as it was assigned to the scheduler - - """ - self._scheduler = scheduler - self._lock = scheduler._create_lock() - self._logger = logging.getLogger('apscheduler.executors.%s' % alias) - - def shutdown(self, wait=True): - """ - Shuts down this executor. - - :param bool wait: ``True`` to wait until all submitted jobs - have been executed - """ - - def submit_job(self, job, run_times): - """ - Submits job for execution. - - :param Job job: job to execute - :param list[datetime] run_times: list of datetimes specifying - when the job should have been run - :raises MaxInstancesReachedError: if the maximum number of - allowed instances for this job has been reached - - """ - assert self._lock is not None, 'This executor has not been started yet' - with self._lock: - if self._instances[job.id] >= job.max_instances: - raise MaxInstancesReachedError(job) - - self._do_submit_job(job, run_times) - self._instances[job.id] += 1 - - @abstractmethod - def _do_submit_job(self, job, run_times): - """Performs the actual task of scheduling `run_job` to be called.""" - - def _run_job_success(self, job_id, events): - """ - Called by the executor with the list of generated events when :func:`run_job` has been - successfully called. - - """ - with self._lock: - self._instances[job_id] -= 1 - if self._instances[job_id] == 0: - del self._instances[job_id] - - for event in events: - self._scheduler._dispatch_event(event) - - def _run_job_error(self, job_id, exc, traceback=None): - """Called by the executor with the exception if there is an error calling `run_job`.""" - with self._lock: - self._instances[job_id] -= 1 - if self._instances[job_id] == 0: - del self._instances[job_id] - - exc_info = (exc.__class__, exc, traceback) - self._logger.error('Error running job %s', job_id, exc_info=exc_info) - - -def run_job(job, jobstore_alias, run_times, logger_name): - """ - Called by executors to run the job. Returns a list of scheduler events to be dispatched by the - scheduler. - - """ - events = [] - logger = logging.getLogger(logger_name) - for run_time in run_times: - # See if the job missed its run time window, and handle - # possible misfires accordingly - if job.misfire_grace_time is not None: - difference = datetime.now(utc) - run_time - grace_time = timedelta(seconds=job.misfire_grace_time) - if difference > grace_time: - events.append(JobExecutionEvent(EVENT_JOB_MISSED, job.id, jobstore_alias, - run_time)) - logger.warning('Run time of job "%s" was missed by %s', job, difference) - continue - - logger.info('Running job "%s" (scheduled at %s)', job, run_time) - try: - retval = job.func(*job.args, **job.kwargs) - except BaseException: - exc, tb = sys.exc_info()[1:] - formatted_tb = ''.join(format_tb(tb)) - events.append(JobExecutionEvent(EVENT_JOB_ERROR, job.id, jobstore_alias, run_time, - exception=exc, traceback=formatted_tb)) - logger.exception('Job "%s" raised an exception', job) - - # This is to prevent cyclic references that would lead to memory leaks - if six.PY2: - sys.exc_clear() - del tb - else: - import traceback - traceback.clear_frames(tb) - del tb - else: - events.append(JobExecutionEvent(EVENT_JOB_EXECUTED, job.id, jobstore_alias, run_time, - retval=retval)) - logger.info('Job "%s" executed successfully', job) - - return events diff --git a/lib/apscheduler/executors/base_py3.py b/lib/apscheduler/executors/base_py3.py deleted file mode 100644 index 61abd842..00000000 --- a/lib/apscheduler/executors/base_py3.py +++ /dev/null @@ -1,41 +0,0 @@ -import logging -import sys -from datetime import datetime, timedelta -from traceback import format_tb - -from pytz import utc - -from apscheduler.events import ( - JobExecutionEvent, EVENT_JOB_MISSED, EVENT_JOB_ERROR, EVENT_JOB_EXECUTED) - - -async def run_coroutine_job(job, jobstore_alias, run_times, logger_name): - """Coroutine version of run_job().""" - events = [] - logger = logging.getLogger(logger_name) - for run_time in run_times: - # See if the job missed its run time window, and handle possible misfires accordingly - if job.misfire_grace_time is not None: - difference = datetime.now(utc) - run_time - grace_time = timedelta(seconds=job.misfire_grace_time) - if difference > grace_time: - events.append(JobExecutionEvent(EVENT_JOB_MISSED, job.id, jobstore_alias, - run_time)) - logger.warning('Run time of job "%s" was missed by %s', job, difference) - continue - - logger.info('Running job "%s" (scheduled at %s)', job, run_time) - try: - retval = await job.func(*job.args, **job.kwargs) - except BaseException: - exc, tb = sys.exc_info()[1:] - formatted_tb = ''.join(format_tb(tb)) - events.append(JobExecutionEvent(EVENT_JOB_ERROR, job.id, jobstore_alias, run_time, - exception=exc, traceback=formatted_tb)) - logger.exception('Job "%s" raised an exception', job) - else: - events.append(JobExecutionEvent(EVENT_JOB_EXECUTED, job.id, jobstore_alias, run_time, - retval=retval)) - logger.info('Job "%s" executed successfully', job) - - return events diff --git a/lib/apscheduler/executors/debug.py b/lib/apscheduler/executors/debug.py deleted file mode 100644 index ac739aeb..00000000 --- a/lib/apscheduler/executors/debug.py +++ /dev/null @@ -1,20 +0,0 @@ -import sys - -from apscheduler.executors.base import BaseExecutor, run_job - - -class DebugExecutor(BaseExecutor): - """ - A special executor that executes the target callable directly instead of deferring it to a - thread or process. - - Plugin alias: ``debug`` - """ - - def _do_submit_job(self, job, run_times): - try: - events = run_job(job, job._jobstore_alias, run_times, self._logger.name) - except BaseException: - self._run_job_error(job.id, *sys.exc_info()[1:]) - else: - self._run_job_success(job.id, events) diff --git a/lib/apscheduler/executors/gevent.py b/lib/apscheduler/executors/gevent.py deleted file mode 100644 index 1235bb6e..00000000 --- a/lib/apscheduler/executors/gevent.py +++ /dev/null @@ -1,30 +0,0 @@ -from __future__ import absolute_import -import sys - -from apscheduler.executors.base import BaseExecutor, run_job - - -try: - import gevent -except ImportError: # pragma: nocover - raise ImportError('GeventExecutor requires gevent installed') - - -class GeventExecutor(BaseExecutor): - """ - Runs jobs as greenlets. - - Plugin alias: ``gevent`` - """ - - def _do_submit_job(self, job, run_times): - def callback(greenlet): - try: - events = greenlet.get() - except BaseException: - self._run_job_error(job.id, *sys.exc_info()[1:]) - else: - self._run_job_success(job.id, events) - - gevent.spawn(run_job, job, job._jobstore_alias, run_times, self._logger.name).\ - link(callback) diff --git a/lib/apscheduler/executors/pool.py b/lib/apscheduler/executors/pool.py deleted file mode 100644 index 2f4ef455..00000000 --- a/lib/apscheduler/executors/pool.py +++ /dev/null @@ -1,54 +0,0 @@ -from abc import abstractmethod -import concurrent.futures - -from apscheduler.executors.base import BaseExecutor, run_job - - -class BasePoolExecutor(BaseExecutor): - @abstractmethod - def __init__(self, pool): - super(BasePoolExecutor, self).__init__() - self._pool = pool - - def _do_submit_job(self, job, run_times): - def callback(f): - exc, tb = (f.exception_info() if hasattr(f, 'exception_info') else - (f.exception(), getattr(f.exception(), '__traceback__', None))) - if exc: - self._run_job_error(job.id, exc, tb) - else: - self._run_job_success(job.id, f.result()) - - f = self._pool.submit(run_job, job, job._jobstore_alias, run_times, self._logger.name) - f.add_done_callback(callback) - - def shutdown(self, wait=True): - self._pool.shutdown(wait) - - -class ThreadPoolExecutor(BasePoolExecutor): - """ - An executor that runs jobs in a concurrent.futures thread pool. - - Plugin alias: ``threadpool`` - - :param max_workers: the maximum number of spawned threads. - """ - - def __init__(self, max_workers=10): - pool = concurrent.futures.ThreadPoolExecutor(int(max_workers)) - super(ThreadPoolExecutor, self).__init__(pool) - - -class ProcessPoolExecutor(BasePoolExecutor): - """ - An executor that runs jobs in a concurrent.futures process pool. - - Plugin alias: ``processpool`` - - :param max_workers: the maximum number of spawned processes. - """ - - def __init__(self, max_workers=10): - pool = concurrent.futures.ProcessPoolExecutor(int(max_workers)) - super(ProcessPoolExecutor, self).__init__(pool) diff --git a/lib/apscheduler/executors/tornado.py b/lib/apscheduler/executors/tornado.py deleted file mode 100644 index 3b97eec9..00000000 --- a/lib/apscheduler/executors/tornado.py +++ /dev/null @@ -1,54 +0,0 @@ -from __future__ import absolute_import - -import sys -from concurrent.futures import ThreadPoolExecutor - -from tornado.gen import convert_yielded - -from apscheduler.executors.base import BaseExecutor, run_job - -try: - from apscheduler.executors.base_py3 import run_coroutine_job - from apscheduler.util import iscoroutinefunction_partial -except ImportError: - def iscoroutinefunction_partial(func): - return False - - -class TornadoExecutor(BaseExecutor): - """ - Runs jobs either in a thread pool or directly on the I/O loop. - - If the job function is a native coroutine function, it is scheduled to be run directly in the - I/O loop as soon as possible. All other functions are run in a thread pool. - - Plugin alias: ``tornado`` - - :param int max_workers: maximum number of worker threads in the thread pool - """ - - def __init__(self, max_workers=10): - super(TornadoExecutor, self).__init__() - self.executor = ThreadPoolExecutor(max_workers) - - def start(self, scheduler, alias): - super(TornadoExecutor, self).start(scheduler, alias) - self._ioloop = scheduler._ioloop - - def _do_submit_job(self, job, run_times): - def callback(f): - try: - events = f.result() - except BaseException: - self._run_job_error(job.id, *sys.exc_info()[1:]) - else: - self._run_job_success(job.id, events) - - if iscoroutinefunction_partial(job.func): - f = run_coroutine_job(job, job._jobstore_alias, run_times, self._logger.name) - else: - f = self.executor.submit(run_job, job, job._jobstore_alias, run_times, - self._logger.name) - - f = convert_yielded(f) - f.add_done_callback(callback) diff --git a/lib/apscheduler/executors/twisted.py b/lib/apscheduler/executors/twisted.py deleted file mode 100644 index c7bcf647..00000000 --- a/lib/apscheduler/executors/twisted.py +++ /dev/null @@ -1,25 +0,0 @@ -from __future__ import absolute_import - -from apscheduler.executors.base import BaseExecutor, run_job - - -class TwistedExecutor(BaseExecutor): - """ - Runs jobs in the reactor's thread pool. - - Plugin alias: ``twisted`` - """ - - def start(self, scheduler, alias): - super(TwistedExecutor, self).start(scheduler, alias) - self._reactor = scheduler._reactor - - def _do_submit_job(self, job, run_times): - def callback(success, result): - if success: - self._run_job_success(job.id, result) - else: - self._run_job_error(job.id, result.value, result.tb) - - self._reactor.getThreadPool().callInThreadWithCallback( - callback, run_job, job, job._jobstore_alias, run_times, self._logger.name) diff --git a/lib/apscheduler/job.py b/lib/apscheduler/job.py deleted file mode 100644 index d676ca89..00000000 --- a/lib/apscheduler/job.py +++ /dev/null @@ -1,301 +0,0 @@ -from inspect import ismethod, isclass -from uuid import uuid4 - -import six - -from apscheduler.triggers.base import BaseTrigger -from apscheduler.util import ( - ref_to_obj, obj_to_ref, datetime_repr, repr_escape, get_callable_name, check_callable_args, - convert_to_datetime) - -try: - from collections.abc import Iterable, Mapping -except ImportError: - from collections import Iterable, Mapping - - -class Job(object): - """ - Contains the options given when scheduling callables and its current schedule and other state. - This class should never be instantiated by the user. - - :var str id: the unique identifier of this job - :var str name: the description of this job - :var func: the callable to execute - :var tuple|list args: positional arguments to the callable - :var dict kwargs: keyword arguments to the callable - :var bool coalesce: whether to only run the job once when several run times are due - :var trigger: the trigger object that controls the schedule of this job - :var str executor: the name of the executor that will run this job - :var int misfire_grace_time: the time (in seconds) how much this job's execution is allowed to - be late - :var int max_instances: the maximum number of concurrently executing instances allowed for this - job - :var datetime.datetime next_run_time: the next scheduled run time of this job - - .. note:: - The ``misfire_grace_time`` has some non-obvious effects on job execution. See the - :ref:`missed-job-executions` section in the documentation for an in-depth explanation. - """ - - __slots__ = ('_scheduler', '_jobstore_alias', 'id', 'trigger', 'executor', 'func', 'func_ref', - 'args', 'kwargs', 'name', 'misfire_grace_time', 'coalesce', 'max_instances', - 'next_run_time') - - def __init__(self, scheduler, id=None, **kwargs): - super(Job, self).__init__() - self._scheduler = scheduler - self._jobstore_alias = None - self._modify(id=id or uuid4().hex, **kwargs) - - def modify(self, **changes): - """ - Makes the given changes to this job and saves it in the associated job store. - - Accepted keyword arguments are the same as the variables on this class. - - .. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.modify_job` - - :return Job: this job instance - - """ - self._scheduler.modify_job(self.id, self._jobstore_alias, **changes) - return self - - def reschedule(self, trigger, **trigger_args): - """ - Shortcut for switching the trigger on this job. - - .. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.reschedule_job` - - :return Job: this job instance - - """ - self._scheduler.reschedule_job(self.id, self._jobstore_alias, trigger, **trigger_args) - return self - - def pause(self): - """ - Temporarily suspend the execution of this job. - - .. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.pause_job` - - :return Job: this job instance - - """ - self._scheduler.pause_job(self.id, self._jobstore_alias) - return self - - def resume(self): - """ - Resume the schedule of this job if previously paused. - - .. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.resume_job` - - :return Job: this job instance - - """ - self._scheduler.resume_job(self.id, self._jobstore_alias) - return self - - def remove(self): - """ - Unschedules this job and removes it from its associated job store. - - .. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.remove_job` - - """ - self._scheduler.remove_job(self.id, self._jobstore_alias) - - @property - def pending(self): - """ - Returns ``True`` if the referenced job is still waiting to be added to its designated job - store. - - """ - return self._jobstore_alias is None - - # - # Private API - # - - def _get_run_times(self, now): - """ - Computes the scheduled run times between ``next_run_time`` and ``now`` (inclusive). - - :type now: datetime.datetime - :rtype: list[datetime.datetime] - - """ - run_times = [] - next_run_time = self.next_run_time - while next_run_time and next_run_time <= now: - run_times.append(next_run_time) - next_run_time = self.trigger.get_next_fire_time(next_run_time, now) - - return run_times - - def _modify(self, **changes): - """ - Validates the changes to the Job and makes the modifications if and only if all of them - validate. - - """ - approved = {} - - if 'id' in changes: - value = changes.pop('id') - if not isinstance(value, six.string_types): - raise TypeError("id must be a nonempty string") - if hasattr(self, 'id'): - raise ValueError('The job ID may not be changed') - approved['id'] = value - - if 'func' in changes or 'args' in changes or 'kwargs' in changes: - func = changes.pop('func') if 'func' in changes else self.func - args = changes.pop('args') if 'args' in changes else self.args - kwargs = changes.pop('kwargs') if 'kwargs' in changes else self.kwargs - - if isinstance(func, six.string_types): - func_ref = func - func = ref_to_obj(func) - elif callable(func): - try: - func_ref = obj_to_ref(func) - except ValueError: - # If this happens, this Job won't be serializable - func_ref = None - else: - raise TypeError('func must be a callable or a textual reference to one') - - if not hasattr(self, 'name') and changes.get('name', None) is None: - changes['name'] = get_callable_name(func) - - if isinstance(args, six.string_types) or not isinstance(args, Iterable): - raise TypeError('args must be a non-string iterable') - if isinstance(kwargs, six.string_types) or not isinstance(kwargs, Mapping): - raise TypeError('kwargs must be a dict-like object') - - check_callable_args(func, args, kwargs) - - approved['func'] = func - approved['func_ref'] = func_ref - approved['args'] = args - approved['kwargs'] = kwargs - - if 'name' in changes: - value = changes.pop('name') - if not value or not isinstance(value, six.string_types): - raise TypeError("name must be a nonempty string") - approved['name'] = value - - if 'misfire_grace_time' in changes: - value = changes.pop('misfire_grace_time') - if value is not None and (not isinstance(value, six.integer_types) or value <= 0): - raise TypeError('misfire_grace_time must be either None or a positive integer') - approved['misfire_grace_time'] = value - - if 'coalesce' in changes: - value = bool(changes.pop('coalesce')) - approved['coalesce'] = value - - if 'max_instances' in changes: - value = changes.pop('max_instances') - if not isinstance(value, six.integer_types) or value <= 0: - raise TypeError('max_instances must be a positive integer') - approved['max_instances'] = value - - if 'trigger' in changes: - trigger = changes.pop('trigger') - if not isinstance(trigger, BaseTrigger): - raise TypeError('Expected a trigger instance, got %s instead' % - trigger.__class__.__name__) - - approved['trigger'] = trigger - - if 'executor' in changes: - value = changes.pop('executor') - if not isinstance(value, six.string_types): - raise TypeError('executor must be a string') - approved['executor'] = value - - if 'next_run_time' in changes: - value = changes.pop('next_run_time') - approved['next_run_time'] = convert_to_datetime(value, self._scheduler.timezone, - 'next_run_time') - - if changes: - raise AttributeError('The following are not modifiable attributes of Job: %s' % - ', '.join(changes)) - - for key, value in six.iteritems(approved): - setattr(self, key, value) - - def __getstate__(self): - # Don't allow this Job to be serialized if the function reference could not be determined - if not self.func_ref: - raise ValueError( - 'This Job cannot be serialized since the reference to its callable (%r) could not ' - 'be determined. Consider giving a textual reference (module:function name) ' - 'instead.' % (self.func,)) - - # Instance methods cannot survive serialization as-is, so store the "self" argument - # explicitly - if ismethod(self.func) and not isclass(self.func.__self__): - args = (self.func.__self__,) + tuple(self.args) - else: - args = self.args - - return { - 'version': 1, - 'id': self.id, - 'func': self.func_ref, - 'trigger': self.trigger, - 'executor': self.executor, - 'args': args, - 'kwargs': self.kwargs, - 'name': self.name, - 'misfire_grace_time': self.misfire_grace_time, - 'coalesce': self.coalesce, - 'max_instances': self.max_instances, - 'next_run_time': self.next_run_time - } - - def __setstate__(self, state): - if state.get('version', 1) > 1: - raise ValueError('Job has version %s, but only version 1 can be handled' % - state['version']) - - self.id = state['id'] - self.func_ref = state['func'] - self.func = ref_to_obj(self.func_ref) - self.trigger = state['trigger'] - self.executor = state['executor'] - self.args = state['args'] - self.kwargs = state['kwargs'] - self.name = state['name'] - self.misfire_grace_time = state['misfire_grace_time'] - self.coalesce = state['coalesce'] - self.max_instances = state['max_instances'] - self.next_run_time = state['next_run_time'] - - def __eq__(self, other): - if isinstance(other, Job): - return self.id == other.id - return NotImplemented - - def __repr__(self): - return '' % (repr_escape(self.id), repr_escape(self.name)) - - def __str__(self): - return repr_escape(self.__unicode__()) - - def __unicode__(self): - if hasattr(self, 'next_run_time'): - status = ('next run at: ' + datetime_repr(self.next_run_time) if - self.next_run_time else 'paused') - else: - status = 'pending' - - return u'%s (trigger: %s, %s)' % (self.name, self.trigger, status) diff --git a/lib/apscheduler/jobstores/__init__.py b/lib/apscheduler/jobstores/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/lib/apscheduler/jobstores/base.py b/lib/apscheduler/jobstores/base.py deleted file mode 100644 index 9cff66c4..00000000 --- a/lib/apscheduler/jobstores/base.py +++ /dev/null @@ -1,143 +0,0 @@ -from abc import ABCMeta, abstractmethod -import logging - -import six - - -class JobLookupError(KeyError): - """Raised when the job store cannot find a job for update or removal.""" - - def __init__(self, job_id): - super(JobLookupError, self).__init__(u'No job by the id of %s was found' % job_id) - - -class ConflictingIdError(KeyError): - """Raised when the uniqueness of job IDs is being violated.""" - - def __init__(self, job_id): - super(ConflictingIdError, self).__init__( - u'Job identifier (%s) conflicts with an existing job' % job_id) - - -class TransientJobError(ValueError): - """ - Raised when an attempt to add transient (with no func_ref) job to a persistent job store is - detected. - """ - - def __init__(self, job_id): - super(TransientJobError, self).__init__( - u'Job (%s) cannot be added to this job store because a reference to the callable ' - u'could not be determined.' % job_id) - - -class BaseJobStore(six.with_metaclass(ABCMeta)): - """Abstract base class that defines the interface that every job store must implement.""" - - _scheduler = None - _alias = None - _logger = logging.getLogger('apscheduler.jobstores') - - def start(self, scheduler, alias): - """ - Called by the scheduler when the scheduler is being started or when the job store is being - added to an already running scheduler. - - :param apscheduler.schedulers.base.BaseScheduler scheduler: the scheduler that is starting - this job store - :param str|unicode alias: alias of this job store as it was assigned to the scheduler - """ - - self._scheduler = scheduler - self._alias = alias - self._logger = logging.getLogger('apscheduler.jobstores.%s' % alias) - - def shutdown(self): - """Frees any resources still bound to this job store.""" - - def _fix_paused_jobs_sorting(self, jobs): - for i, job in enumerate(jobs): - if job.next_run_time is not None: - if i > 0: - paused_jobs = jobs[:i] - del jobs[:i] - jobs.extend(paused_jobs) - break - - @abstractmethod - def lookup_job(self, job_id): - """ - Returns a specific job, or ``None`` if it isn't found.. - - The job store is responsible for setting the ``scheduler`` and ``jobstore`` attributes of - the returned job to point to the scheduler and itself, respectively. - - :param str|unicode job_id: identifier of the job - :rtype: Job - """ - - @abstractmethod - def get_due_jobs(self, now): - """ - Returns the list of jobs that have ``next_run_time`` earlier or equal to ``now``. - The returned jobs must be sorted by next run time (ascending). - - :param datetime.datetime now: the current (timezone aware) datetime - :rtype: list[Job] - """ - - @abstractmethod - def get_next_run_time(self): - """ - Returns the earliest run time of all the jobs stored in this job store, or ``None`` if - there are no active jobs. - - :rtype: datetime.datetime - """ - - @abstractmethod - def get_all_jobs(self): - """ - Returns a list of all jobs in this job store. - The returned jobs should be sorted by next run time (ascending). - Paused jobs (next_run_time == None) should be sorted last. - - The job store is responsible for setting the ``scheduler`` and ``jobstore`` attributes of - the returned jobs to point to the scheduler and itself, respectively. - - :rtype: list[Job] - """ - - @abstractmethod - def add_job(self, job): - """ - Adds the given job to this store. - - :param Job job: the job to add - :raises ConflictingIdError: if there is another job in this store with the same ID - """ - - @abstractmethod - def update_job(self, job): - """ - Replaces the job in the store with the given newer version. - - :param Job job: the job to update - :raises JobLookupError: if the job does not exist - """ - - @abstractmethod - def remove_job(self, job_id): - """ - Removes the given job from this store. - - :param str|unicode job_id: identifier of the job - :raises JobLookupError: if the job does not exist - """ - - @abstractmethod - def remove_all_jobs(self): - """Removes all jobs from this store.""" - - def __repr__(self): - return '<%s>' % self.__class__.__name__ diff --git a/lib/apscheduler/jobstores/memory.py b/lib/apscheduler/jobstores/memory.py deleted file mode 100644 index abfe7c6c..00000000 --- a/lib/apscheduler/jobstores/memory.py +++ /dev/null @@ -1,108 +0,0 @@ -from __future__ import absolute_import - -from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError -from apscheduler.util import datetime_to_utc_timestamp - - -class MemoryJobStore(BaseJobStore): - """ - Stores jobs in an array in RAM. Provides no persistence support. - - Plugin alias: ``memory`` - """ - - def __init__(self): - super(MemoryJobStore, self).__init__() - # list of (job, timestamp), sorted by next_run_time and job id (ascending) - self._jobs = [] - self._jobs_index = {} # id -> (job, timestamp) lookup table - - def lookup_job(self, job_id): - return self._jobs_index.get(job_id, (None, None))[0] - - def get_due_jobs(self, now): - now_timestamp = datetime_to_utc_timestamp(now) - pending = [] - for job, timestamp in self._jobs: - if timestamp is None or timestamp > now_timestamp: - break - pending.append(job) - - return pending - - def get_next_run_time(self): - return self._jobs[0][0].next_run_time if self._jobs else None - - def get_all_jobs(self): - return [j[0] for j in self._jobs] - - def add_job(self, job): - if job.id in self._jobs_index: - raise ConflictingIdError(job.id) - - timestamp = datetime_to_utc_timestamp(job.next_run_time) - index = self._get_job_index(timestamp, job.id) - self._jobs.insert(index, (job, timestamp)) - self._jobs_index[job.id] = (job, timestamp) - - def update_job(self, job): - old_job, old_timestamp = self._jobs_index.get(job.id, (None, None)) - if old_job is None: - raise JobLookupError(job.id) - - # If the next run time has not changed, simply replace the job in its present index. - # Otherwise, reinsert the job to the list to preserve the ordering. - old_index = self._get_job_index(old_timestamp, old_job.id) - new_timestamp = datetime_to_utc_timestamp(job.next_run_time) - if old_timestamp == new_timestamp: - self._jobs[old_index] = (job, new_timestamp) - else: - del self._jobs[old_index] - new_index = self._get_job_index(new_timestamp, job.id) - self._jobs.insert(new_index, (job, new_timestamp)) - - self._jobs_index[old_job.id] = (job, new_timestamp) - - def remove_job(self, job_id): - job, timestamp = self._jobs_index.get(job_id, (None, None)) - if job is None: - raise JobLookupError(job_id) - - index = self._get_job_index(timestamp, job_id) - del self._jobs[index] - del self._jobs_index[job.id] - - def remove_all_jobs(self): - self._jobs = [] - self._jobs_index = {} - - def shutdown(self): - self.remove_all_jobs() - - def _get_job_index(self, timestamp, job_id): - """ - Returns the index of the given job, or if it's not found, the index where the job should be - inserted based on the given timestamp. - - :type timestamp: int - :type job_id: str - - """ - lo, hi = 0, len(self._jobs) - timestamp = float('inf') if timestamp is None else timestamp - while lo < hi: - mid = (lo + hi) // 2 - mid_job, mid_timestamp = self._jobs[mid] - mid_timestamp = float('inf') if mid_timestamp is None else mid_timestamp - if mid_timestamp > timestamp: - hi = mid - elif mid_timestamp < timestamp: - lo = mid + 1 - elif mid_job.id > job_id: - hi = mid - elif mid_job.id < job_id: - lo = mid + 1 - else: - return mid - - return lo diff --git a/lib/apscheduler/jobstores/mongodb.py b/lib/apscheduler/jobstores/mongodb.py deleted file mode 100644 index 7dbc3b12..00000000 --- a/lib/apscheduler/jobstores/mongodb.py +++ /dev/null @@ -1,141 +0,0 @@ -from __future__ import absolute_import -import warnings - -from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError -from apscheduler.util import maybe_ref, datetime_to_utc_timestamp, utc_timestamp_to_datetime -from apscheduler.job import Job - -try: - import cPickle as pickle -except ImportError: # pragma: nocover - import pickle - -try: - from bson.binary import Binary - from pymongo.errors import DuplicateKeyError - from pymongo import MongoClient, ASCENDING -except ImportError: # pragma: nocover - raise ImportError('MongoDBJobStore requires PyMongo installed') - - -class MongoDBJobStore(BaseJobStore): - """ - Stores jobs in a MongoDB database. Any leftover keyword arguments are directly passed to - pymongo's `MongoClient - `_. - - Plugin alias: ``mongodb`` - - :param str database: database to store jobs in - :param str collection: collection to store jobs in - :param client: a :class:`~pymongo.mongo_client.MongoClient` instance to use instead of - providing connection arguments - :param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the - highest available - """ - - def __init__(self, database='apscheduler', collection='jobs', client=None, - pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args): - super(MongoDBJobStore, self).__init__() - self.pickle_protocol = pickle_protocol - - if not database: - raise ValueError('The "database" parameter must not be empty') - if not collection: - raise ValueError('The "collection" parameter must not be empty') - - if client: - self.client = maybe_ref(client) - else: - connect_args.setdefault('w', 1) - self.client = MongoClient(**connect_args) - - self.collection = self.client[database][collection] - - def start(self, scheduler, alias): - super(MongoDBJobStore, self).start(scheduler, alias) - self.collection.ensure_index('next_run_time', sparse=True) - - @property - def connection(self): - warnings.warn('The "connection" member is deprecated -- use "client" instead', - DeprecationWarning) - return self.client - - def lookup_job(self, job_id): - document = self.collection.find_one(job_id, ['job_state']) - return self._reconstitute_job(document['job_state']) if document else None - - def get_due_jobs(self, now): - timestamp = datetime_to_utc_timestamp(now) - return self._get_jobs({'next_run_time': {'$lte': timestamp}}) - - def get_next_run_time(self): - document = self.collection.find_one({'next_run_time': {'$ne': None}}, - projection=['next_run_time'], - sort=[('next_run_time', ASCENDING)]) - return utc_timestamp_to_datetime(document['next_run_time']) if document else None - - def get_all_jobs(self): - jobs = self._get_jobs({}) - self._fix_paused_jobs_sorting(jobs) - return jobs - - def add_job(self, job): - try: - self.collection.insert({ - '_id': job.id, - 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), - 'job_state': Binary(pickle.dumps(job.__getstate__(), self.pickle_protocol)) - }) - except DuplicateKeyError: - raise ConflictingIdError(job.id) - - def update_job(self, job): - changes = { - 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), - 'job_state': Binary(pickle.dumps(job.__getstate__(), self.pickle_protocol)) - } - result = self.collection.update({'_id': job.id}, {'$set': changes}) - if result and result['n'] == 0: - raise JobLookupError(job.id) - - def remove_job(self, job_id): - result = self.collection.remove(job_id) - if result and result['n'] == 0: - raise JobLookupError(job_id) - - def remove_all_jobs(self): - self.collection.remove() - - def shutdown(self): - self.client.close() - - def _reconstitute_job(self, job_state): - job_state = pickle.loads(job_state) - job = Job.__new__(Job) - job.__setstate__(job_state) - job._scheduler = self._scheduler - job._jobstore_alias = self._alias - return job - - def _get_jobs(self, conditions): - jobs = [] - failed_job_ids = [] - for document in self.collection.find(conditions, ['_id', 'job_state'], - sort=[('next_run_time', ASCENDING)]): - try: - jobs.append(self._reconstitute_job(document['job_state'])) - except BaseException: - self._logger.exception('Unable to restore job "%s" -- removing it', - document['_id']) - failed_job_ids.append(document['_id']) - - # Remove all the jobs we failed to restore - if failed_job_ids: - self.collection.remove({'_id': {'$in': failed_job_ids}}) - - return jobs - - def __repr__(self): - return '<%s (client=%s)>' % (self.__class__.__name__, self.client) diff --git a/lib/apscheduler/jobstores/redis.py b/lib/apscheduler/jobstores/redis.py deleted file mode 100644 index 5bb69d63..00000000 --- a/lib/apscheduler/jobstores/redis.py +++ /dev/null @@ -1,150 +0,0 @@ -from __future__ import absolute_import -from datetime import datetime - -from pytz import utc -import six - -from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError -from apscheduler.util import datetime_to_utc_timestamp, utc_timestamp_to_datetime -from apscheduler.job import Job - -try: - import cPickle as pickle -except ImportError: # pragma: nocover - import pickle - -try: - from redis import Redis -except ImportError: # pragma: nocover - raise ImportError('RedisJobStore requires redis installed') - - -class RedisJobStore(BaseJobStore): - """ - Stores jobs in a Redis database. Any leftover keyword arguments are directly passed to redis's - :class:`~redis.StrictRedis`. - - Plugin alias: ``redis`` - - :param int db: the database number to store jobs in - :param str jobs_key: key to store jobs in - :param str run_times_key: key to store the jobs' run times in - :param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the - highest available - """ - - def __init__(self, db=0, jobs_key='apscheduler.jobs', run_times_key='apscheduler.run_times', - pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args): - super(RedisJobStore, self).__init__() - - if db is None: - raise ValueError('The "db" parameter must not be empty') - if not jobs_key: - raise ValueError('The "jobs_key" parameter must not be empty') - if not run_times_key: - raise ValueError('The "run_times_key" parameter must not be empty') - - self.pickle_protocol = pickle_protocol - self.jobs_key = jobs_key - self.run_times_key = run_times_key - self.redis = Redis(db=int(db), **connect_args) - - def lookup_job(self, job_id): - job_state = self.redis.hget(self.jobs_key, job_id) - return self._reconstitute_job(job_state) if job_state else None - - def get_due_jobs(self, now): - timestamp = datetime_to_utc_timestamp(now) - job_ids = self.redis.zrangebyscore(self.run_times_key, 0, timestamp) - if job_ids: - job_states = self.redis.hmget(self.jobs_key, *job_ids) - return self._reconstitute_jobs(six.moves.zip(job_ids, job_states)) - return [] - - def get_next_run_time(self): - next_run_time = self.redis.zrange(self.run_times_key, 0, 0, withscores=True) - if next_run_time: - return utc_timestamp_to_datetime(next_run_time[0][1]) - - def get_all_jobs(self): - job_states = self.redis.hgetall(self.jobs_key) - jobs = self._reconstitute_jobs(six.iteritems(job_states)) - paused_sort_key = datetime(9999, 12, 31, tzinfo=utc) - return sorted(jobs, key=lambda job: job.next_run_time or paused_sort_key) - - def add_job(self, job): - if self.redis.hexists(self.jobs_key, job.id): - raise ConflictingIdError(job.id) - - with self.redis.pipeline() as pipe: - pipe.multi() - pipe.hset(self.jobs_key, job.id, pickle.dumps(job.__getstate__(), - self.pickle_protocol)) - if job.next_run_time: - pipe.zadd(self.run_times_key, - {job.id: datetime_to_utc_timestamp(job.next_run_time)}) - - pipe.execute() - - def update_job(self, job): - if not self.redis.hexists(self.jobs_key, job.id): - raise JobLookupError(job.id) - - with self.redis.pipeline() as pipe: - pipe.hset(self.jobs_key, job.id, pickle.dumps(job.__getstate__(), - self.pickle_protocol)) - if job.next_run_time: - pipe.zadd(self.run_times_key, - {job.id: datetime_to_utc_timestamp(job.next_run_time)}) - else: - pipe.zrem(self.run_times_key, job.id) - - pipe.execute() - - def remove_job(self, job_id): - if not self.redis.hexists(self.jobs_key, job_id): - raise JobLookupError(job_id) - - with self.redis.pipeline() as pipe: - pipe.hdel(self.jobs_key, job_id) - pipe.zrem(self.run_times_key, job_id) - pipe.execute() - - def remove_all_jobs(self): - with self.redis.pipeline() as pipe: - pipe.delete(self.jobs_key) - pipe.delete(self.run_times_key) - pipe.execute() - - def shutdown(self): - self.redis.connection_pool.disconnect() - - def _reconstitute_job(self, job_state): - job_state = pickle.loads(job_state) - job = Job.__new__(Job) - job.__setstate__(job_state) - job._scheduler = self._scheduler - job._jobstore_alias = self._alias - return job - - def _reconstitute_jobs(self, job_states): - jobs = [] - failed_job_ids = [] - for job_id, job_state in job_states: - try: - jobs.append(self._reconstitute_job(job_state)) - except BaseException: - self._logger.exception('Unable to restore job "%s" -- removing it', job_id) - failed_job_ids.append(job_id) - - # Remove all the jobs we failed to restore - if failed_job_ids: - with self.redis.pipeline() as pipe: - pipe.hdel(self.jobs_key, *failed_job_ids) - pipe.zrem(self.run_times_key, *failed_job_ids) - pipe.execute() - - return jobs - - def __repr__(self): - return '<%s>' % self.__class__.__name__ diff --git a/lib/apscheduler/jobstores/rethinkdb.py b/lib/apscheduler/jobstores/rethinkdb.py deleted file mode 100644 index d8a78cde..00000000 --- a/lib/apscheduler/jobstores/rethinkdb.py +++ /dev/null @@ -1,155 +0,0 @@ -from __future__ import absolute_import - -from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError -from apscheduler.util import maybe_ref, datetime_to_utc_timestamp, utc_timestamp_to_datetime -from apscheduler.job import Job - -try: - import cPickle as pickle -except ImportError: # pragma: nocover - import pickle - -try: - from rethinkdb import RethinkDB -except ImportError: # pragma: nocover - raise ImportError('RethinkDBJobStore requires rethinkdb installed') - - -class RethinkDBJobStore(BaseJobStore): - """ - Stores jobs in a RethinkDB database. Any leftover keyword arguments are directly passed to - rethinkdb's `RethinkdbClient `_. - - Plugin alias: ``rethinkdb`` - - :param str database: database to store jobs in - :param str collection: collection to store jobs in - :param client: a :class:`rethinkdb.net.Connection` instance to use instead of providing - connection arguments - :param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the - highest available - """ - - def __init__(self, database='apscheduler', table='jobs', client=None, - pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args): - super(RethinkDBJobStore, self).__init__() - - if not database: - raise ValueError('The "database" parameter must not be empty') - if not table: - raise ValueError('The "table" parameter must not be empty') - - self.database = database - self.table_name = table - self.table = None - self.client = client - self.pickle_protocol = pickle_protocol - self.connect_args = connect_args - self.r = RethinkDB() - self.conn = None - - def start(self, scheduler, alias): - super(RethinkDBJobStore, self).start(scheduler, alias) - - if self.client: - self.conn = maybe_ref(self.client) - else: - self.conn = self.r.connect(db=self.database, **self.connect_args) - - if self.database not in self.r.db_list().run(self.conn): - self.r.db_create(self.database).run(self.conn) - - if self.table_name not in self.r.table_list().run(self.conn): - self.r.table_create(self.table_name).run(self.conn) - - if 'next_run_time' not in self.r.table(self.table_name).index_list().run(self.conn): - self.r.table(self.table_name).index_create('next_run_time').run(self.conn) - - self.table = self.r.db(self.database).table(self.table_name) - - def lookup_job(self, job_id): - results = list(self.table.get_all(job_id).pluck('job_state').run(self.conn)) - return self._reconstitute_job(results[0]['job_state']) if results else None - - def get_due_jobs(self, now): - return self._get_jobs(self.r.row['next_run_time'] <= datetime_to_utc_timestamp(now)) - - def get_next_run_time(self): - results = list( - self.table - .filter(self.r.row['next_run_time'] != None) # noqa - .order_by(self.r.asc('next_run_time')) - .map(lambda x: x['next_run_time']) - .limit(1) - .run(self.conn) - ) - return utc_timestamp_to_datetime(results[0]) if results else None - - def get_all_jobs(self): - jobs = self._get_jobs() - self._fix_paused_jobs_sorting(jobs) - return jobs - - def add_job(self, job): - job_dict = { - 'id': job.id, - 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), - 'job_state': self.r.binary(pickle.dumps(job.__getstate__(), self.pickle_protocol)) - } - results = self.table.insert(job_dict).run(self.conn) - if results['errors'] > 0: - raise ConflictingIdError(job.id) - - def update_job(self, job): - changes = { - 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), - 'job_state': self.r.binary(pickle.dumps(job.__getstate__(), self.pickle_protocol)) - } - results = self.table.get_all(job.id).update(changes).run(self.conn) - skipped = False in map(lambda x: results[x] == 0, results.keys()) - if results['skipped'] > 0 or results['errors'] > 0 or not skipped: - raise JobLookupError(job.id) - - def remove_job(self, job_id): - results = self.table.get_all(job_id).delete().run(self.conn) - if results['deleted'] + results['skipped'] != 1: - raise JobLookupError(job_id) - - def remove_all_jobs(self): - self.table.delete().run(self.conn) - - def shutdown(self): - self.conn.close() - - def _reconstitute_job(self, job_state): - job_state = pickle.loads(job_state) - job = Job.__new__(Job) - job.__setstate__(job_state) - job._scheduler = self._scheduler - job._jobstore_alias = self._alias - return job - - def _get_jobs(self, predicate=None): - jobs = [] - failed_job_ids = [] - query = (self.table.filter(self.r.row['next_run_time'] != None).filter(predicate) # noqa - if predicate else self.table) - query = query.order_by('next_run_time', 'id').pluck('id', 'job_state') - - for document in query.run(self.conn): - try: - jobs.append(self._reconstitute_job(document['job_state'])) - except Exception: - self._logger.exception('Unable to restore job "%s" -- removing it', document['id']) - failed_job_ids.append(document['id']) - - # Remove all the jobs we failed to restore - if failed_job_ids: - self.r.expr(failed_job_ids).for_each( - lambda job_id: self.table.get_all(job_id).delete()).run(self.conn) - - return jobs - - def __repr__(self): - connection = self.conn - return '<%s (connection=%s)>' % (self.__class__.__name__, connection) diff --git a/lib/apscheduler/jobstores/sqlalchemy.py b/lib/apscheduler/jobstores/sqlalchemy.py deleted file mode 100644 index fecbd834..00000000 --- a/lib/apscheduler/jobstores/sqlalchemy.py +++ /dev/null @@ -1,154 +0,0 @@ -from __future__ import absolute_import - -from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError -from apscheduler.util import maybe_ref, datetime_to_utc_timestamp, utc_timestamp_to_datetime -from apscheduler.job import Job - -try: - import cPickle as pickle -except ImportError: # pragma: nocover - import pickle - -try: - from sqlalchemy import ( - create_engine, Table, Column, MetaData, Unicode, Float, LargeBinary, select) - from sqlalchemy.exc import IntegrityError - from sqlalchemy.sql.expression import null -except ImportError: # pragma: nocover - raise ImportError('SQLAlchemyJobStore requires SQLAlchemy installed') - - -class SQLAlchemyJobStore(BaseJobStore): - """ - Stores jobs in a database table using SQLAlchemy. - The table will be created if it doesn't exist in the database. - - Plugin alias: ``sqlalchemy`` - - :param str url: connection string (see - :ref:`SQLAlchemy documentation ` on this) - :param engine: an SQLAlchemy :class:`~sqlalchemy.engine.Engine` to use instead of creating a - new one based on ``url`` - :param str tablename: name of the table to store jobs in - :param metadata: a :class:`~sqlalchemy.schema.MetaData` instance to use instead of creating a - new one - :param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the - highest available - :param str tableschema: name of the (existing) schema in the target database where the table - should be - :param dict engine_options: keyword arguments to :func:`~sqlalchemy.create_engine` - (ignored if ``engine`` is given) - """ - - def __init__(self, url=None, engine=None, tablename='apscheduler_jobs', metadata=None, - pickle_protocol=pickle.HIGHEST_PROTOCOL, tableschema=None, engine_options=None): - super(SQLAlchemyJobStore, self).__init__() - self.pickle_protocol = pickle_protocol - metadata = maybe_ref(metadata) or MetaData() - - if engine: - self.engine = maybe_ref(engine) - elif url: - self.engine = create_engine(url, **(engine_options or {})) - else: - raise ValueError('Need either "engine" or "url" defined') - - # 191 = max key length in MySQL for InnoDB/utf8mb4 tables, - # 25 = precision that translates to an 8-byte float - self.jobs_t = Table( - tablename, metadata, - Column('id', Unicode(191, _warn_on_bytestring=False), primary_key=True), - Column('next_run_time', Float(25), index=True), - Column('job_state', LargeBinary, nullable=False), - schema=tableschema - ) - - def start(self, scheduler, alias): - super(SQLAlchemyJobStore, self).start(scheduler, alias) - self.jobs_t.create(self.engine, True) - - def lookup_job(self, job_id): - selectable = select([self.jobs_t.c.job_state]).where(self.jobs_t.c.id == job_id) - job_state = self.engine.execute(selectable).scalar() - return self._reconstitute_job(job_state) if job_state else None - - def get_due_jobs(self, now): - timestamp = datetime_to_utc_timestamp(now) - return self._get_jobs(self.jobs_t.c.next_run_time <= timestamp) - - def get_next_run_time(self): - selectable = select([self.jobs_t.c.next_run_time]).\ - where(self.jobs_t.c.next_run_time != null()).\ - order_by(self.jobs_t.c.next_run_time).limit(1) - next_run_time = self.engine.execute(selectable).scalar() - return utc_timestamp_to_datetime(next_run_time) - - def get_all_jobs(self): - jobs = self._get_jobs() - self._fix_paused_jobs_sorting(jobs) - return jobs - - def add_job(self, job): - insert = self.jobs_t.insert().values(**{ - 'id': job.id, - 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), - 'job_state': pickle.dumps(job.__getstate__(), self.pickle_protocol) - }) - try: - self.engine.execute(insert) - except IntegrityError: - raise ConflictingIdError(job.id) - - def update_job(self, job): - update = self.jobs_t.update().values(**{ - 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), - 'job_state': pickle.dumps(job.__getstate__(), self.pickle_protocol) - }).where(self.jobs_t.c.id == job.id) - result = self.engine.execute(update) - if result.rowcount == 0: - raise JobLookupError(job.id) - - def remove_job(self, job_id): - delete = self.jobs_t.delete().where(self.jobs_t.c.id == job_id) - result = self.engine.execute(delete) - if result.rowcount == 0: - raise JobLookupError(job_id) - - def remove_all_jobs(self): - delete = self.jobs_t.delete() - self.engine.execute(delete) - - def shutdown(self): - self.engine.dispose() - - def _reconstitute_job(self, job_state): - job_state = pickle.loads(job_state) - job_state['jobstore'] = self - job = Job.__new__(Job) - job.__setstate__(job_state) - job._scheduler = self._scheduler - job._jobstore_alias = self._alias - return job - - def _get_jobs(self, *conditions): - jobs = [] - selectable = select([self.jobs_t.c.id, self.jobs_t.c.job_state]).\ - order_by(self.jobs_t.c.next_run_time) - selectable = selectable.where(*conditions) if conditions else selectable - failed_job_ids = set() - for row in self.engine.execute(selectable): - try: - jobs.append(self._reconstitute_job(row.job_state)) - except BaseException: - self._logger.exception('Unable to restore job "%s" -- removing it', row.id) - failed_job_ids.add(row.id) - - # Remove all the jobs we failed to restore - if failed_job_ids: - delete = self.jobs_t.delete().where(self.jobs_t.c.id.in_(failed_job_ids)) - self.engine.execute(delete) - - return jobs - - def __repr__(self): - return '<%s (url=%s)>' % (self.__class__.__name__, self.engine.url) diff --git a/lib/apscheduler/jobstores/zookeeper.py b/lib/apscheduler/jobstores/zookeeper.py deleted file mode 100644 index 2cca83e8..00000000 --- a/lib/apscheduler/jobstores/zookeeper.py +++ /dev/null @@ -1,179 +0,0 @@ -from __future__ import absolute_import - -import os -from datetime import datetime - -from pytz import utc -from kazoo.exceptions import NoNodeError, NodeExistsError - -from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError -from apscheduler.util import maybe_ref, datetime_to_utc_timestamp, utc_timestamp_to_datetime -from apscheduler.job import Job - -try: - import cPickle as pickle -except ImportError: # pragma: nocover - import pickle - -try: - from kazoo.client import KazooClient -except ImportError: # pragma: nocover - raise ImportError('ZooKeeperJobStore requires Kazoo installed') - - -class ZooKeeperJobStore(BaseJobStore): - """ - Stores jobs in a ZooKeeper tree. Any leftover keyword arguments are directly passed to - kazoo's `KazooClient - `_. - - Plugin alias: ``zookeeper`` - - :param str path: path to store jobs in - :param client: a :class:`~kazoo.client.KazooClient` instance to use instead of - providing connection arguments - :param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the - highest available - """ - - def __init__(self, path='/apscheduler', client=None, close_connection_on_exit=False, - pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args): - super(ZooKeeperJobStore, self).__init__() - self.pickle_protocol = pickle_protocol - self.close_connection_on_exit = close_connection_on_exit - - if not path: - raise ValueError('The "path" parameter must not be empty') - - self.path = path - - if client: - self.client = maybe_ref(client) - else: - self.client = KazooClient(**connect_args) - self._ensured_path = False - - def _ensure_paths(self): - if not self._ensured_path: - self.client.ensure_path(self.path) - self._ensured_path = True - - def start(self, scheduler, alias): - super(ZooKeeperJobStore, self).start(scheduler, alias) - if not self.client.connected: - self.client.start() - - def lookup_job(self, job_id): - self._ensure_paths() - node_path = os.path.join(self.path, job_id) - try: - content, _ = self.client.get(node_path) - doc = pickle.loads(content) - job = self._reconstitute_job(doc['job_state']) - return job - except BaseException: - return None - - def get_due_jobs(self, now): - timestamp = datetime_to_utc_timestamp(now) - jobs = [job_def['job'] for job_def in self._get_jobs() - if job_def['next_run_time'] is not None and job_def['next_run_time'] <= timestamp] - return jobs - - def get_next_run_time(self): - next_runs = [job_def['next_run_time'] for job_def in self._get_jobs() - if job_def['next_run_time'] is not None] - return utc_timestamp_to_datetime(min(next_runs)) if len(next_runs) > 0 else None - - def get_all_jobs(self): - jobs = [job_def['job'] for job_def in self._get_jobs()] - self._fix_paused_jobs_sorting(jobs) - return jobs - - def add_job(self, job): - self._ensure_paths() - node_path = os.path.join(self.path, str(job.id)) - value = { - 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), - 'job_state': job.__getstate__() - } - data = pickle.dumps(value, self.pickle_protocol) - try: - self.client.create(node_path, value=data) - except NodeExistsError: - raise ConflictingIdError(job.id) - - def update_job(self, job): - self._ensure_paths() - node_path = os.path.join(self.path, str(job.id)) - changes = { - 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), - 'job_state': job.__getstate__() - } - data = pickle.dumps(changes, self.pickle_protocol) - try: - self.client.set(node_path, value=data) - except NoNodeError: - raise JobLookupError(job.id) - - def remove_job(self, job_id): - self._ensure_paths() - node_path = os.path.join(self.path, str(job_id)) - try: - self.client.delete(node_path) - except NoNodeError: - raise JobLookupError(job_id) - - def remove_all_jobs(self): - try: - self.client.delete(self.path, recursive=True) - except NoNodeError: - pass - self._ensured_path = False - - def shutdown(self): - if self.close_connection_on_exit: - self.client.stop() - self.client.close() - - def _reconstitute_job(self, job_state): - job_state = job_state - job = Job.__new__(Job) - job.__setstate__(job_state) - job._scheduler = self._scheduler - job._jobstore_alias = self._alias - return job - - def _get_jobs(self): - self._ensure_paths() - jobs = [] - failed_job_ids = [] - all_ids = self.client.get_children(self.path) - for node_name in all_ids: - try: - node_path = os.path.join(self.path, node_name) - content, _ = self.client.get(node_path) - doc = pickle.loads(content) - job_def = { - 'job_id': node_name, - 'next_run_time': doc['next_run_time'] if doc['next_run_time'] else None, - 'job_state': doc['job_state'], - 'job': self._reconstitute_job(doc['job_state']), - 'creation_time': _.ctime - } - jobs.append(job_def) - except BaseException: - self._logger.exception('Unable to restore job "%s" -- removing it' % node_name) - failed_job_ids.append(node_name) - - # Remove all the jobs we failed to restore - if failed_job_ids: - for failed_id in failed_job_ids: - self.remove_job(failed_id) - paused_sort_key = datetime(9999, 12, 31, tzinfo=utc) - return sorted(jobs, key=lambda job_def: (job_def['job'].next_run_time or paused_sort_key, - job_def['creation_time'])) - - def __repr__(self): - self._logger.exception('<%s (client=%s)>' % (self.__class__.__name__, self.client)) - return '<%s (client=%s)>' % (self.__class__.__name__, self.client) diff --git a/lib/apscheduler/schedulers/__init__.py b/lib/apscheduler/schedulers/__init__.py deleted file mode 100644 index bd8a7900..00000000 --- a/lib/apscheduler/schedulers/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -class SchedulerAlreadyRunningError(Exception): - """Raised when attempting to start or configure the scheduler when it's already running.""" - - def __str__(self): - return 'Scheduler is already running' - - -class SchedulerNotRunningError(Exception): - """Raised when attempting to shutdown the scheduler when it's not running.""" - - def __str__(self): - return 'Scheduler is not running' diff --git a/lib/apscheduler/schedulers/asyncio.py b/lib/apscheduler/schedulers/asyncio.py deleted file mode 100644 index 289ef13f..00000000 --- a/lib/apscheduler/schedulers/asyncio.py +++ /dev/null @@ -1,68 +0,0 @@ -from __future__ import absolute_import -from functools import wraps, partial - -from apscheduler.schedulers.base import BaseScheduler -from apscheduler.util import maybe_ref - -try: - import asyncio -except ImportError: # pragma: nocover - try: - import trollius as asyncio - except ImportError: - raise ImportError( - 'AsyncIOScheduler requires either Python 3.4 or the asyncio package installed') - - -def run_in_event_loop(func): - @wraps(func) - def wrapper(self, *args, **kwargs): - wrapped = partial(func, self, *args, **kwargs) - self._eventloop.call_soon_threadsafe(wrapped) - return wrapper - - -class AsyncIOScheduler(BaseScheduler): - """ - A scheduler that runs on an asyncio (:pep:`3156`) event loop. - - The default executor can run jobs based on native coroutines (``async def``). - - Extra options: - - ============== ============================================================= - ``event_loop`` AsyncIO event loop to use (defaults to the global event loop) - ============== ============================================================= - """ - - _eventloop = None - _timeout = None - - @run_in_event_loop - def shutdown(self, wait=True): - super(AsyncIOScheduler, self).shutdown(wait) - self._stop_timer() - - def _configure(self, config): - self._eventloop = maybe_ref(config.pop('event_loop', None)) or asyncio.get_event_loop() - super(AsyncIOScheduler, self)._configure(config) - - def _start_timer(self, wait_seconds): - self._stop_timer() - if wait_seconds is not None: - self._timeout = self._eventloop.call_later(wait_seconds, self.wakeup) - - def _stop_timer(self): - if self._timeout: - self._timeout.cancel() - del self._timeout - - @run_in_event_loop - def wakeup(self): - self._stop_timer() - wait_seconds = self._process_jobs() - self._start_timer(wait_seconds) - - def _create_default_executor(self): - from apscheduler.executors.asyncio import AsyncIOExecutor - return AsyncIOExecutor() diff --git a/lib/apscheduler/schedulers/background.py b/lib/apscheduler/schedulers/background.py deleted file mode 100644 index 03f29822..00000000 --- a/lib/apscheduler/schedulers/background.py +++ /dev/null @@ -1,41 +0,0 @@ -from __future__ import absolute_import - -from threading import Thread, Event - -from apscheduler.schedulers.base import BaseScheduler -from apscheduler.schedulers.blocking import BlockingScheduler -from apscheduler.util import asbool - - -class BackgroundScheduler(BlockingScheduler): - """ - A scheduler that runs in the background using a separate thread - (:meth:`~apscheduler.schedulers.base.BaseScheduler.start` will return immediately). - - Extra options: - - ========== ============================================================================= - ``daemon`` Set the ``daemon`` option in the background thread (defaults to ``True``, see - `the documentation - `_ - for further details) - ========== ============================================================================= - """ - - _thread = None - - def _configure(self, config): - self._daemon = asbool(config.pop('daemon', True)) - super(BackgroundScheduler, self)._configure(config) - - def start(self, *args, **kwargs): - self._event = Event() - BaseScheduler.start(self, *args, **kwargs) - self._thread = Thread(target=self._main_loop, name='APScheduler') - self._thread.daemon = self._daemon - self._thread.start() - - def shutdown(self, *args, **kwargs): - super(BackgroundScheduler, self).shutdown(*args, **kwargs) - self._thread.join() - del self._thread diff --git a/lib/apscheduler/schedulers/base.py b/lib/apscheduler/schedulers/base.py deleted file mode 100644 index 8e711549..00000000 --- a/lib/apscheduler/schedulers/base.py +++ /dev/null @@ -1,1022 +0,0 @@ -from __future__ import print_function - -from abc import ABCMeta, abstractmethod -from threading import RLock -from datetime import datetime, timedelta -from logging import getLogger -import warnings -import sys - -from pkg_resources import iter_entry_points -from tzlocal import get_localzone -import six - -from apscheduler.schedulers import SchedulerAlreadyRunningError, SchedulerNotRunningError -from apscheduler.executors.base import MaxInstancesReachedError, BaseExecutor -from apscheduler.executors.pool import ThreadPoolExecutor -from apscheduler.jobstores.base import ConflictingIdError, JobLookupError, BaseJobStore -from apscheduler.jobstores.memory import MemoryJobStore -from apscheduler.job import Job -from apscheduler.triggers.base import BaseTrigger -from apscheduler.util import ( - asbool, asint, astimezone, maybe_ref, timedelta_seconds, undefined, TIMEOUT_MAX) -from apscheduler.events import ( - SchedulerEvent, JobEvent, JobSubmissionEvent, EVENT_SCHEDULER_START, EVENT_SCHEDULER_SHUTDOWN, - EVENT_JOBSTORE_ADDED, EVENT_JOBSTORE_REMOVED, EVENT_ALL, EVENT_JOB_MODIFIED, EVENT_JOB_REMOVED, - EVENT_JOB_ADDED, EVENT_EXECUTOR_ADDED, EVENT_EXECUTOR_REMOVED, EVENT_ALL_JOBS_REMOVED, - EVENT_JOB_SUBMITTED, EVENT_JOB_MAX_INSTANCES, EVENT_SCHEDULER_RESUMED, EVENT_SCHEDULER_PAUSED) - -try: - from collections.abc import MutableMapping -except ImportError: - from collections import MutableMapping - -#: constant indicating a scheduler's stopped state -STATE_STOPPED = 0 -#: constant indicating a scheduler's running state (started and processing jobs) -STATE_RUNNING = 1 -#: constant indicating a scheduler's paused state (started but not processing jobs) -STATE_PAUSED = 2 - - -class BaseScheduler(six.with_metaclass(ABCMeta)): - """ - Abstract base class for all schedulers. - - Takes the following keyword arguments: - - :param str|logging.Logger logger: logger to use for the scheduler's logging (defaults to - apscheduler.scheduler) - :param str|datetime.tzinfo timezone: the default time zone (defaults to the local timezone) - :param int|float jobstore_retry_interval: the minimum number of seconds to wait between - retries in the scheduler's main loop if the job store raises an exception when getting - the list of due jobs - :param dict job_defaults: default values for newly added jobs - :param dict jobstores: a dictionary of job store alias -> job store instance or configuration - dict - :param dict executors: a dictionary of executor alias -> executor instance or configuration - dict - - :ivar int state: current running state of the scheduler (one of the following constants from - ``apscheduler.schedulers.base``: ``STATE_STOPPED``, ``STATE_RUNNING``, ``STATE_PAUSED``) - - .. seealso:: :ref:`scheduler-config` - """ - - _trigger_plugins = dict((ep.name, ep) for ep in iter_entry_points('apscheduler.triggers')) - _trigger_classes = {} - _executor_plugins = dict((ep.name, ep) for ep in iter_entry_points('apscheduler.executors')) - _executor_classes = {} - _jobstore_plugins = dict((ep.name, ep) for ep in iter_entry_points('apscheduler.jobstores')) - _jobstore_classes = {} - - # - # Public API - # - - def __init__(self, gconfig={}, **options): - super(BaseScheduler, self).__init__() - self._executors = {} - self._executors_lock = self._create_lock() - self._jobstores = {} - self._jobstores_lock = self._create_lock() - self._listeners = [] - self._listeners_lock = self._create_lock() - self._pending_jobs = [] - self.state = STATE_STOPPED - self.configure(gconfig, **options) - - def configure(self, gconfig={}, prefix='apscheduler.', **options): - """ - Reconfigures the scheduler with the given options. - - Can only be done when the scheduler isn't running. - - :param dict gconfig: a "global" configuration dictionary whose values can be overridden by - keyword arguments to this method - :param str|unicode prefix: pick only those keys from ``gconfig`` that are prefixed with - this string (pass an empty string or ``None`` to use all keys) - :raises SchedulerAlreadyRunningError: if the scheduler is already running - - """ - if self.state != STATE_STOPPED: - raise SchedulerAlreadyRunningError - - # If a non-empty prefix was given, strip it from the keys in the - # global configuration dict - if prefix: - prefixlen = len(prefix) - gconfig = dict((key[prefixlen:], value) for key, value in six.iteritems(gconfig) - if key.startswith(prefix)) - - # Create a structure from the dotted options - # (e.g. "a.b.c = d" -> {'a': {'b': {'c': 'd'}}}) - config = {} - for key, value in six.iteritems(gconfig): - parts = key.split('.') - parent = config - key = parts.pop(0) - while parts: - parent = parent.setdefault(key, {}) - key = parts.pop(0) - parent[key] = value - - # Override any options with explicit keyword arguments - config.update(options) - self._configure(config) - - def start(self, paused=False): - """ - Start the configured executors and job stores and begin processing scheduled jobs. - - :param bool paused: if ``True``, don't start job processing until :meth:`resume` is called - :raises SchedulerAlreadyRunningError: if the scheduler is already running - :raises RuntimeError: if running under uWSGI with threads disabled - - """ - if self.state != STATE_STOPPED: - raise SchedulerAlreadyRunningError - - self._check_uwsgi() - - with self._executors_lock: - # Create a default executor if nothing else is configured - if 'default' not in self._executors: - self.add_executor(self._create_default_executor(), 'default') - - # Start all the executors - for alias, executor in six.iteritems(self._executors): - executor.start(self, alias) - - with self._jobstores_lock: - # Create a default job store if nothing else is configured - if 'default' not in self._jobstores: - self.add_jobstore(self._create_default_jobstore(), 'default') - - # Start all the job stores - for alias, store in six.iteritems(self._jobstores): - store.start(self, alias) - - # Schedule all pending jobs - for job, jobstore_alias, replace_existing in self._pending_jobs: - self._real_add_job(job, jobstore_alias, replace_existing) - del self._pending_jobs[:] - - self.state = STATE_PAUSED if paused else STATE_RUNNING - self._logger.info('Scheduler started') - self._dispatch_event(SchedulerEvent(EVENT_SCHEDULER_START)) - - if not paused: - self.wakeup() - - @abstractmethod - def shutdown(self, wait=True): - """ - Shuts down the scheduler, along with its executors and job stores. - - Does not interrupt any currently running jobs. - - :param bool wait: ``True`` to wait until all currently executing jobs have finished - :raises SchedulerNotRunningError: if the scheduler has not been started yet - - """ - if self.state == STATE_STOPPED: - raise SchedulerNotRunningError - - self.state = STATE_STOPPED - - # Shut down all executors - with self._executors_lock: - for executor in six.itervalues(self._executors): - executor.shutdown(wait) - - # Shut down all job stores - with self._jobstores_lock: - for jobstore in six.itervalues(self._jobstores): - jobstore.shutdown() - - self._logger.info('Scheduler has been shut down') - self._dispatch_event(SchedulerEvent(EVENT_SCHEDULER_SHUTDOWN)) - - def pause(self): - """ - Pause job processing in the scheduler. - - This will prevent the scheduler from waking up to do job processing until :meth:`resume` - is called. It will not however stop any already running job processing. - - """ - if self.state == STATE_STOPPED: - raise SchedulerNotRunningError - elif self.state == STATE_RUNNING: - self.state = STATE_PAUSED - self._logger.info('Paused scheduler job processing') - self._dispatch_event(SchedulerEvent(EVENT_SCHEDULER_PAUSED)) - - def resume(self): - """Resume job processing in the scheduler.""" - if self.state == STATE_STOPPED: - raise SchedulerNotRunningError - elif self.state == STATE_PAUSED: - self.state = STATE_RUNNING - self._logger.info('Resumed scheduler job processing') - self._dispatch_event(SchedulerEvent(EVENT_SCHEDULER_RESUMED)) - self.wakeup() - - @property - def running(self): - """ - Return ``True`` if the scheduler has been started. - - This is a shortcut for ``scheduler.state != STATE_STOPPED``. - - """ - return self.state != STATE_STOPPED - - def add_executor(self, executor, alias='default', **executor_opts): - """ - Adds an executor to this scheduler. - - Any extra keyword arguments will be passed to the executor plugin's constructor, assuming - that the first argument is the name of an executor plugin. - - :param str|unicode|apscheduler.executors.base.BaseExecutor executor: either an executor - instance or the name of an executor plugin - :param str|unicode alias: alias for the scheduler - :raises ValueError: if there is already an executor by the given alias - - """ - with self._executors_lock: - if alias in self._executors: - raise ValueError('This scheduler already has an executor by the alias of "%s"' % - alias) - - if isinstance(executor, BaseExecutor): - self._executors[alias] = executor - elif isinstance(executor, six.string_types): - self._executors[alias] = executor = self._create_plugin_instance( - 'executor', executor, executor_opts) - else: - raise TypeError('Expected an executor instance or a string, got %s instead' % - executor.__class__.__name__) - - # Start the executor right away if the scheduler is running - if self.state != STATE_STOPPED: - executor.start(self, alias) - - self._dispatch_event(SchedulerEvent(EVENT_EXECUTOR_ADDED, alias)) - - def remove_executor(self, alias, shutdown=True): - """ - Removes the executor by the given alias from this scheduler. - - :param str|unicode alias: alias of the executor - :param bool shutdown: ``True`` to shut down the executor after - removing it - - """ - with self._executors_lock: - executor = self._lookup_executor(alias) - del self._executors[alias] - - if shutdown: - executor.shutdown() - - self._dispatch_event(SchedulerEvent(EVENT_EXECUTOR_REMOVED, alias)) - - def add_jobstore(self, jobstore, alias='default', **jobstore_opts): - """ - Adds a job store to this scheduler. - - Any extra keyword arguments will be passed to the job store plugin's constructor, assuming - that the first argument is the name of a job store plugin. - - :param str|unicode|apscheduler.jobstores.base.BaseJobStore jobstore: job store to be added - :param str|unicode alias: alias for the job store - :raises ValueError: if there is already a job store by the given alias - - """ - with self._jobstores_lock: - if alias in self._jobstores: - raise ValueError('This scheduler already has a job store by the alias of "%s"' % - alias) - - if isinstance(jobstore, BaseJobStore): - self._jobstores[alias] = jobstore - elif isinstance(jobstore, six.string_types): - self._jobstores[alias] = jobstore = self._create_plugin_instance( - 'jobstore', jobstore, jobstore_opts) - else: - raise TypeError('Expected a job store instance or a string, got %s instead' % - jobstore.__class__.__name__) - - # Start the job store right away if the scheduler isn't stopped - if self.state != STATE_STOPPED: - jobstore.start(self, alias) - - # Notify listeners that a new job store has been added - self._dispatch_event(SchedulerEvent(EVENT_JOBSTORE_ADDED, alias)) - - # Notify the scheduler so it can scan the new job store for jobs - if self.state != STATE_STOPPED: - self.wakeup() - - def remove_jobstore(self, alias, shutdown=True): - """ - Removes the job store by the given alias from this scheduler. - - :param str|unicode alias: alias of the job store - :param bool shutdown: ``True`` to shut down the job store after removing it - - """ - with self._jobstores_lock: - jobstore = self._lookup_jobstore(alias) - del self._jobstores[alias] - - if shutdown: - jobstore.shutdown() - - self._dispatch_event(SchedulerEvent(EVENT_JOBSTORE_REMOVED, alias)) - - def add_listener(self, callback, mask=EVENT_ALL): - """ - add_listener(callback, mask=EVENT_ALL) - - Adds a listener for scheduler events. - - When a matching event occurs, ``callback`` is executed with the event object as its - sole argument. If the ``mask`` parameter is not provided, the callback will receive events - of all types. - - :param callback: any callable that takes one argument - :param int mask: bitmask that indicates which events should be - listened to - - .. seealso:: :mod:`apscheduler.events` - .. seealso:: :ref:`scheduler-events` - - """ - with self._listeners_lock: - self._listeners.append((callback, mask)) - - def remove_listener(self, callback): - """Removes a previously added event listener.""" - - with self._listeners_lock: - for i, (cb, _) in enumerate(self._listeners): - if callback == cb: - del self._listeners[i] - - def add_job(self, func, trigger=None, args=None, kwargs=None, id=None, name=None, - misfire_grace_time=undefined, coalesce=undefined, max_instances=undefined, - next_run_time=undefined, jobstore='default', executor='default', - replace_existing=False, **trigger_args): - """ - add_job(func, trigger=None, args=None, kwargs=None, id=None, \ - name=None, misfire_grace_time=undefined, coalesce=undefined, \ - max_instances=undefined, next_run_time=undefined, \ - jobstore='default', executor='default', \ - replace_existing=False, **trigger_args) - - Adds the given job to the job list and wakes up the scheduler if it's already running. - - Any option that defaults to ``undefined`` will be replaced with the corresponding default - value when the job is scheduled (which happens when the scheduler is started, or - immediately if the scheduler is already running). - - The ``func`` argument can be given either as a callable object or a textual reference in - the ``package.module:some.object`` format, where the first half (separated by ``:``) is an - importable module and the second half is a reference to the callable object, relative to - the module. - - The ``trigger`` argument can either be: - #. the alias name of the trigger (e.g. ``date``, ``interval`` or ``cron``), in which case - any extra keyword arguments to this method are passed on to the trigger's constructor - #. an instance of a trigger class - - :param func: callable (or a textual reference to one) to run at the given time - :param str|apscheduler.triggers.base.BaseTrigger trigger: trigger that determines when - ``func`` is called - :param list|tuple args: list of positional arguments to call func with - :param dict kwargs: dict of keyword arguments to call func with - :param str|unicode id: explicit identifier for the job (for modifying it later) - :param str|unicode name: textual description of the job - :param int misfire_grace_time: seconds after the designated runtime that the job is still - allowed to be run - :param bool coalesce: run once instead of many times if the scheduler determines that the - job should be run more than once in succession - :param int max_instances: maximum number of concurrently running instances allowed for this - job - :param datetime next_run_time: when to first run the job, regardless of the trigger (pass - ``None`` to add the job as paused) - :param str|unicode jobstore: alias of the job store to store the job in - :param str|unicode executor: alias of the executor to run the job with - :param bool replace_existing: ``True`` to replace an existing job with the same ``id`` - (but retain the number of runs from the existing one) - :rtype: Job - - """ - job_kwargs = { - 'trigger': self._create_trigger(trigger, trigger_args), - 'executor': executor, - 'func': func, - 'args': tuple(args) if args is not None else (), - 'kwargs': dict(kwargs) if kwargs is not None else {}, - 'id': id, - 'name': name, - 'misfire_grace_time': misfire_grace_time, - 'coalesce': coalesce, - 'max_instances': max_instances, - 'next_run_time': next_run_time - } - job_kwargs = dict((key, value) for key, value in six.iteritems(job_kwargs) if - value is not undefined) - job = Job(self, **job_kwargs) - - # Don't really add jobs to job stores before the scheduler is up and running - with self._jobstores_lock: - if self.state == STATE_STOPPED: - self._pending_jobs.append((job, jobstore, replace_existing)) - self._logger.info('Adding job tentatively -- it will be properly scheduled when ' - 'the scheduler starts') - else: - self._real_add_job(job, jobstore, replace_existing) - - return job - - def scheduled_job(self, trigger, args=None, kwargs=None, id=None, name=None, - misfire_grace_time=undefined, coalesce=undefined, max_instances=undefined, - next_run_time=undefined, jobstore='default', executor='default', - **trigger_args): - """ - scheduled_job(trigger, args=None, kwargs=None, id=None, \ - name=None, misfire_grace_time=undefined, \ - coalesce=undefined, max_instances=undefined, \ - next_run_time=undefined, jobstore='default', \ - executor='default',**trigger_args) - - A decorator version of :meth:`add_job`, except that ``replace_existing`` is always - ``True``. - - .. important:: The ``id`` argument must be given if scheduling a job in a persistent job - store. The scheduler cannot, however, enforce this requirement. - - """ - def inner(func): - self.add_job(func, trigger, args, kwargs, id, name, misfire_grace_time, coalesce, - max_instances, next_run_time, jobstore, executor, True, **trigger_args) - return func - return inner - - def modify_job(self, job_id, jobstore=None, **changes): - """ - Modifies the properties of a single job. - - Modifications are passed to this method as extra keyword arguments. - - :param str|unicode job_id: the identifier of the job - :param str|unicode jobstore: alias of the job store that contains the job - :return Job: the relevant job instance - - """ - with self._jobstores_lock: - job, jobstore = self._lookup_job(job_id, jobstore) - job._modify(**changes) - if jobstore: - self._lookup_jobstore(jobstore).update_job(job) - - self._dispatch_event(JobEvent(EVENT_JOB_MODIFIED, job_id, jobstore)) - - # Wake up the scheduler since the job's next run time may have been changed - if self.state == STATE_RUNNING: - self.wakeup() - - return job - - def reschedule_job(self, job_id, jobstore=None, trigger=None, **trigger_args): - """ - Constructs a new trigger for a job and updates its next run time. - - Extra keyword arguments are passed directly to the trigger's constructor. - - :param str|unicode job_id: the identifier of the job - :param str|unicode jobstore: alias of the job store that contains the job - :param trigger: alias of the trigger type or a trigger instance - :return Job: the relevant job instance - - """ - trigger = self._create_trigger(trigger, trigger_args) - now = datetime.now(self.timezone) - next_run_time = trigger.get_next_fire_time(None, now) - return self.modify_job(job_id, jobstore, trigger=trigger, next_run_time=next_run_time) - - def pause_job(self, job_id, jobstore=None): - """ - Causes the given job not to be executed until it is explicitly resumed. - - :param str|unicode job_id: the identifier of the job - :param str|unicode jobstore: alias of the job store that contains the job - :return Job: the relevant job instance - - """ - return self.modify_job(job_id, jobstore, next_run_time=None) - - def resume_job(self, job_id, jobstore=None): - """ - Resumes the schedule of the given job, or removes the job if its schedule is finished. - - :param str|unicode job_id: the identifier of the job - :param str|unicode jobstore: alias of the job store that contains the job - :return Job|None: the relevant job instance if the job was rescheduled, or ``None`` if no - next run time could be calculated and the job was removed - - """ - with self._jobstores_lock: - job, jobstore = self._lookup_job(job_id, jobstore) - now = datetime.now(self.timezone) - next_run_time = job.trigger.get_next_fire_time(None, now) - if next_run_time: - return self.modify_job(job_id, jobstore, next_run_time=next_run_time) - else: - self.remove_job(job.id, jobstore) - - def get_jobs(self, jobstore=None, pending=None): - """ - Returns a list of pending jobs (if the scheduler hasn't been started yet) and scheduled - jobs, either from a specific job store or from all of them. - - If the scheduler has not been started yet, only pending jobs can be returned because the - job stores haven't been started yet either. - - :param str|unicode jobstore: alias of the job store - :param bool pending: **DEPRECATED** - :rtype: list[Job] - - """ - if pending is not None: - warnings.warn('The "pending" option is deprecated -- get_jobs() always returns ' - 'scheduled jobs if the scheduler has been started and pending jobs ' - 'otherwise', DeprecationWarning) - - with self._jobstores_lock: - jobs = [] - if self.state == STATE_STOPPED: - for job, alias, replace_existing in self._pending_jobs: - if jobstore is None or alias == jobstore: - jobs.append(job) - else: - for alias, store in six.iteritems(self._jobstores): - if jobstore is None or alias == jobstore: - jobs.extend(store.get_all_jobs()) - - return jobs - - def get_job(self, job_id, jobstore=None): - """ - Returns the Job that matches the given ``job_id``. - - :param str|unicode job_id: the identifier of the job - :param str|unicode jobstore: alias of the job store that most likely contains the job - :return: the Job by the given ID, or ``None`` if it wasn't found - :rtype: Job - - """ - with self._jobstores_lock: - try: - return self._lookup_job(job_id, jobstore)[0] - except JobLookupError: - return - - def remove_job(self, job_id, jobstore=None): - """ - Removes a job, preventing it from being run any more. - - :param str|unicode job_id: the identifier of the job - :param str|unicode jobstore: alias of the job store that contains the job - :raises JobLookupError: if the job was not found - - """ - jobstore_alias = None - with self._jobstores_lock: - # Check if the job is among the pending jobs - if self.state == STATE_STOPPED: - for i, (job, alias, replace_existing) in enumerate(self._pending_jobs): - if job.id == job_id and jobstore in (None, alias): - del self._pending_jobs[i] - jobstore_alias = alias - break - else: - # Otherwise, try to remove it from each store until it succeeds or we run out of - # stores to check - for alias, store in six.iteritems(self._jobstores): - if jobstore in (None, alias): - try: - store.remove_job(job_id) - jobstore_alias = alias - break - except JobLookupError: - continue - - if jobstore_alias is None: - raise JobLookupError(job_id) - - # Notify listeners that a job has been removed - event = JobEvent(EVENT_JOB_REMOVED, job_id, jobstore_alias) - self._dispatch_event(event) - - self._logger.info('Removed job %s', job_id) - - def remove_all_jobs(self, jobstore=None): - """ - Removes all jobs from the specified job store, or all job stores if none is given. - - :param str|unicode jobstore: alias of the job store - - """ - with self._jobstores_lock: - if self.state == STATE_STOPPED: - if jobstore: - self._pending_jobs = [pending for pending in self._pending_jobs if - pending[1] != jobstore] - else: - self._pending_jobs = [] - else: - for alias, store in six.iteritems(self._jobstores): - if jobstore in (None, alias): - store.remove_all_jobs() - - self._dispatch_event(SchedulerEvent(EVENT_ALL_JOBS_REMOVED, jobstore)) - - def print_jobs(self, jobstore=None, out=None): - """ - print_jobs(jobstore=None, out=sys.stdout) - - Prints out a textual listing of all jobs currently scheduled on either all job stores or - just a specific one. - - :param str|unicode jobstore: alias of the job store, ``None`` to list jobs from all stores - :param file out: a file-like object to print to (defaults to **sys.stdout** if nothing is - given) - - """ - out = out or sys.stdout - with self._jobstores_lock: - if self.state == STATE_STOPPED: - print(u'Pending jobs:', file=out) - if self._pending_jobs: - for job, jobstore_alias, replace_existing in self._pending_jobs: - if jobstore in (None, jobstore_alias): - print(u' %s' % job, file=out) - else: - print(u' No pending jobs', file=out) - else: - for alias, store in sorted(six.iteritems(self._jobstores)): - if jobstore in (None, alias): - print(u'Jobstore %s:' % alias, file=out) - jobs = store.get_all_jobs() - if jobs: - for job in jobs: - print(u' %s' % job, file=out) - else: - print(u' No scheduled jobs', file=out) - - @abstractmethod - def wakeup(self): - """ - Notifies the scheduler that there may be jobs due for execution. - Triggers :meth:`_process_jobs` to be run in an implementation specific manner. - """ - - # - # Private API - # - - def _configure(self, config): - # Set general options - self._logger = maybe_ref(config.pop('logger', None)) or getLogger('apscheduler.scheduler') - self.timezone = astimezone(config.pop('timezone', None)) or get_localzone() - self.jobstore_retry_interval = float(config.pop('jobstore_retry_interval', 10)) - - # Set the job defaults - job_defaults = config.get('job_defaults', {}) - self._job_defaults = { - 'misfire_grace_time': asint(job_defaults.get('misfire_grace_time', 1)), - 'coalesce': asbool(job_defaults.get('coalesce', True)), - 'max_instances': asint(job_defaults.get('max_instances', 1)) - } - - # Configure executors - self._executors.clear() - for alias, value in six.iteritems(config.get('executors', {})): - if isinstance(value, BaseExecutor): - self.add_executor(value, alias) - elif isinstance(value, MutableMapping): - executor_class = value.pop('class', None) - plugin = value.pop('type', None) - if plugin: - executor = self._create_plugin_instance('executor', plugin, value) - elif executor_class: - cls = maybe_ref(executor_class) - executor = cls(**value) - else: - raise ValueError( - 'Cannot create executor "%s" -- either "type" or "class" must be defined' % - alias) - - self.add_executor(executor, alias) - else: - raise TypeError( - "Expected executor instance or dict for executors['%s'], got %s instead" % - (alias, value.__class__.__name__)) - - # Configure job stores - self._jobstores.clear() - for alias, value in six.iteritems(config.get('jobstores', {})): - if isinstance(value, BaseJobStore): - self.add_jobstore(value, alias) - elif isinstance(value, MutableMapping): - jobstore_class = value.pop('class', None) - plugin = value.pop('type', None) - if plugin: - jobstore = self._create_plugin_instance('jobstore', plugin, value) - elif jobstore_class: - cls = maybe_ref(jobstore_class) - jobstore = cls(**value) - else: - raise ValueError( - 'Cannot create job store "%s" -- either "type" or "class" must be ' - 'defined' % alias) - - self.add_jobstore(jobstore, alias) - else: - raise TypeError( - "Expected job store instance or dict for jobstores['%s'], got %s instead" % - (alias, value.__class__.__name__)) - - def _create_default_executor(self): - """Creates a default executor store, specific to the particular scheduler type.""" - return ThreadPoolExecutor() - - def _create_default_jobstore(self): - """Creates a default job store, specific to the particular scheduler type.""" - return MemoryJobStore() - - def _lookup_executor(self, alias): - """ - Returns the executor instance by the given name from the list of executors that were added - to this scheduler. - - :type alias: str - :raises KeyError: if no executor by the given alias is not found - - """ - try: - return self._executors[alias] - except KeyError: - raise KeyError('No such executor: %s' % alias) - - def _lookup_jobstore(self, alias): - """ - Returns the job store instance by the given name from the list of job stores that were - added to this scheduler. - - :type alias: str - :raises KeyError: if no job store by the given alias is not found - - """ - try: - return self._jobstores[alias] - except KeyError: - raise KeyError('No such job store: %s' % alias) - - def _lookup_job(self, job_id, jobstore_alias): - """ - Finds a job by its ID. - - :type job_id: str - :param str jobstore_alias: alias of a job store to look in - :return tuple[Job, str]: a tuple of job, jobstore alias (jobstore alias is None in case of - a pending job) - :raises JobLookupError: if no job by the given ID is found. - - """ - if self.state == STATE_STOPPED: - # Check if the job is among the pending jobs - for job, alias, replace_existing in self._pending_jobs: - if job.id == job_id: - return job, None - else: - # Look in all job stores - for alias, store in six.iteritems(self._jobstores): - if jobstore_alias in (None, alias): - job = store.lookup_job(job_id) - if job is not None: - return job, alias - - raise JobLookupError(job_id) - - def _dispatch_event(self, event): - """ - Dispatches the given event to interested listeners. - - :param SchedulerEvent event: the event to send - - """ - with self._listeners_lock: - listeners = tuple(self._listeners) - - for cb, mask in listeners: - if event.code & mask: - try: - cb(event) - except BaseException: - self._logger.exception('Error notifying listener') - - def _check_uwsgi(self): - """Check if we're running under uWSGI with threads disabled.""" - uwsgi_module = sys.modules.get('uwsgi') - if not getattr(uwsgi_module, 'has_threads', True): - raise RuntimeError('The scheduler seems to be running under uWSGI, but threads have ' - 'been disabled. You must run uWSGI with the --enable-threads ' - 'option for the scheduler to work.') - - def _real_add_job(self, job, jobstore_alias, replace_existing): - """ - :param Job job: the job to add - :param bool replace_existing: ``True`` to use update_job() in case the job already exists - in the store - - """ - # Fill in undefined values with defaults - replacements = {} - for key, value in six.iteritems(self._job_defaults): - if not hasattr(job, key): - replacements[key] = value - - # Calculate the next run time if there is none defined - if not hasattr(job, 'next_run_time'): - now = datetime.now(self.timezone) - replacements['next_run_time'] = job.trigger.get_next_fire_time(None, now) - - # Apply any replacements - job._modify(**replacements) - - # Add the job to the given job store - store = self._lookup_jobstore(jobstore_alias) - try: - store.add_job(job) - except ConflictingIdError: - if replace_existing: - store.update_job(job) - else: - raise - - # Mark the job as no longer pending - job._jobstore_alias = jobstore_alias - - # Notify listeners that a new job has been added - event = JobEvent(EVENT_JOB_ADDED, job.id, jobstore_alias) - self._dispatch_event(event) - - self._logger.info('Added job "%s" to job store "%s"', job.name, jobstore_alias) - - # Notify the scheduler about the new job - if self.state == STATE_RUNNING: - self.wakeup() - - def _create_plugin_instance(self, type_, alias, constructor_kwargs): - """Creates an instance of the given plugin type, loading the plugin first if necessary.""" - plugin_container, class_container, base_class = { - 'trigger': (self._trigger_plugins, self._trigger_classes, BaseTrigger), - 'jobstore': (self._jobstore_plugins, self._jobstore_classes, BaseJobStore), - 'executor': (self._executor_plugins, self._executor_classes, BaseExecutor) - }[type_] - - try: - plugin_cls = class_container[alias] - except KeyError: - if alias in plugin_container: - plugin_cls = class_container[alias] = plugin_container[alias].load() - if not issubclass(plugin_cls, base_class): - raise TypeError('The {0} entry point does not point to a {0} class'. - format(type_)) - else: - raise LookupError('No {0} by the name "{1}" was found'.format(type_, alias)) - - return plugin_cls(**constructor_kwargs) - - def _create_trigger(self, trigger, trigger_args): - if isinstance(trigger, BaseTrigger): - return trigger - elif trigger is None: - trigger = 'date' - elif not isinstance(trigger, six.string_types): - raise TypeError('Expected a trigger instance or string, got %s instead' % - trigger.__class__.__name__) - - # Use the scheduler's time zone if nothing else is specified - trigger_args.setdefault('timezone', self.timezone) - - # Instantiate the trigger class - return self._create_plugin_instance('trigger', trigger, trigger_args) - - def _create_lock(self): - """Creates a reentrant lock object.""" - return RLock() - - def _process_jobs(self): - """ - Iterates through jobs in every jobstore, starts jobs that are due and figures out how long - to wait for the next round. - - If the ``get_due_jobs()`` call raises an exception, a new wakeup is scheduled in at least - ``jobstore_retry_interval`` seconds. - - """ - if self.state == STATE_PAUSED: - self._logger.debug('Scheduler is paused -- not processing jobs') - return None - - self._logger.debug('Looking for jobs to run') - now = datetime.now(self.timezone) - next_wakeup_time = None - events = [] - - with self._jobstores_lock: - for jobstore_alias, jobstore in six.iteritems(self._jobstores): - try: - due_jobs = jobstore.get_due_jobs(now) - except Exception as e: - # Schedule a wakeup at least in jobstore_retry_interval seconds - self._logger.warning('Error getting due jobs from job store %r: %s', - jobstore_alias, e) - retry_wakeup_time = now + timedelta(seconds=self.jobstore_retry_interval) - if not next_wakeup_time or next_wakeup_time > retry_wakeup_time: - next_wakeup_time = retry_wakeup_time - - continue - - for job in due_jobs: - # Look up the job's executor - try: - executor = self._lookup_executor(job.executor) - except BaseException: - self._logger.error( - 'Executor lookup ("%s") failed for job "%s" -- removing it from the ' - 'job store', job.executor, job) - self.remove_job(job.id, jobstore_alias) - continue - - run_times = job._get_run_times(now) - run_times = run_times[-1:] if run_times and job.coalesce else run_times - if run_times: - try: - executor.submit_job(job, run_times) - except MaxInstancesReachedError: - self._logger.warning( - 'Execution of job "%s" skipped: maximum number of running ' - 'instances reached (%d)', job, job.max_instances) - event = JobSubmissionEvent(EVENT_JOB_MAX_INSTANCES, job.id, - jobstore_alias, run_times) - events.append(event) - except BaseException: - self._logger.exception('Error submitting job "%s" to executor "%s"', - job, job.executor) - else: - event = JobSubmissionEvent(EVENT_JOB_SUBMITTED, job.id, jobstore_alias, - run_times) - events.append(event) - - # Update the job if it has a next execution time. - # Otherwise remove it from the job store. - job_next_run = job.trigger.get_next_fire_time(run_times[-1], now) - if job_next_run: - job._modify(next_run_time=job_next_run) - jobstore.update_job(job) - else: - self.remove_job(job.id, jobstore_alias) - - # Set a new next wakeup time if there isn't one yet or - # the jobstore has an even earlier one - jobstore_next_run_time = jobstore.get_next_run_time() - if jobstore_next_run_time and (next_wakeup_time is None or - jobstore_next_run_time < next_wakeup_time): - next_wakeup_time = jobstore_next_run_time.astimezone(self.timezone) - - # Dispatch collected events - for event in events: - self._dispatch_event(event) - - # Determine the delay until this method should be called again - if self.state == STATE_PAUSED: - wait_seconds = None - self._logger.debug('Scheduler is paused; waiting until resume() is called') - elif next_wakeup_time is None: - wait_seconds = None - self._logger.debug('No jobs; waiting until a job is added') - else: - wait_seconds = min(max(timedelta_seconds(next_wakeup_time - now), 0), TIMEOUT_MAX) - self._logger.debug('Next wakeup is due at %s (in %f seconds)', next_wakeup_time, - wait_seconds) - - return wait_seconds diff --git a/lib/apscheduler/schedulers/blocking.py b/lib/apscheduler/schedulers/blocking.py deleted file mode 100644 index e6171575..00000000 --- a/lib/apscheduler/schedulers/blocking.py +++ /dev/null @@ -1,33 +0,0 @@ -from __future__ import absolute_import - -from threading import Event - -from apscheduler.schedulers.base import BaseScheduler, STATE_STOPPED -from apscheduler.util import TIMEOUT_MAX - - -class BlockingScheduler(BaseScheduler): - """ - A scheduler that runs in the foreground - (:meth:`~apscheduler.schedulers.base.BaseScheduler.start` will block). - """ - _event = None - - def start(self, *args, **kwargs): - self._event = Event() - super(BlockingScheduler, self).start(*args, **kwargs) - self._main_loop() - - def shutdown(self, wait=True): - super(BlockingScheduler, self).shutdown(wait) - self._event.set() - - def _main_loop(self): - wait_seconds = TIMEOUT_MAX - while self.state != STATE_STOPPED: - self._event.wait(wait_seconds) - self._event.clear() - wait_seconds = self._process_jobs() - - def wakeup(self): - self._event.set() diff --git a/lib/apscheduler/schedulers/gevent.py b/lib/apscheduler/schedulers/gevent.py deleted file mode 100644 index d48ed74a..00000000 --- a/lib/apscheduler/schedulers/gevent.py +++ /dev/null @@ -1,35 +0,0 @@ -from __future__ import absolute_import - -from apscheduler.schedulers.blocking import BlockingScheduler -from apscheduler.schedulers.base import BaseScheduler - -try: - from gevent.event import Event - from gevent.lock import RLock - import gevent -except ImportError: # pragma: nocover - raise ImportError('GeventScheduler requires gevent installed') - - -class GeventScheduler(BlockingScheduler): - """A scheduler that runs as a Gevent greenlet.""" - - _greenlet = None - - def start(self, *args, **kwargs): - self._event = Event() - BaseScheduler.start(self, *args, **kwargs) - self._greenlet = gevent.spawn(self._main_loop) - return self._greenlet - - def shutdown(self, *args, **kwargs): - super(GeventScheduler, self).shutdown(*args, **kwargs) - self._greenlet.join() - del self._greenlet - - def _create_lock(self): - return RLock() - - def _create_default_executor(self): - from apscheduler.executors.gevent import GeventExecutor - return GeventExecutor() diff --git a/lib/apscheduler/schedulers/qt.py b/lib/apscheduler/schedulers/qt.py deleted file mode 100644 index 0329a000..00000000 --- a/lib/apscheduler/schedulers/qt.py +++ /dev/null @@ -1,43 +0,0 @@ -from __future__ import absolute_import - -from apscheduler.schedulers.base import BaseScheduler - -try: - from PyQt5.QtCore import QObject, QTimer -except (ImportError, RuntimeError): # pragma: nocover - try: - from PyQt4.QtCore import QObject, QTimer - except ImportError: - try: - from PySide.QtCore import QObject, QTimer # noqa - except ImportError: - raise ImportError('QtScheduler requires either PyQt5, PyQt4 or PySide installed') - - -class QtScheduler(BaseScheduler): - """A scheduler that runs in a Qt event loop.""" - - _timer = None - - def shutdown(self, *args, **kwargs): - super(QtScheduler, self).shutdown(*args, **kwargs) - self._stop_timer() - - def _start_timer(self, wait_seconds): - self._stop_timer() - if wait_seconds is not None: - wait_time = min(wait_seconds * 1000, 2147483647) - self._timer = QTimer.singleShot(wait_time, self._process_jobs) - - def _stop_timer(self): - if self._timer: - if self._timer.isActive(): - self._timer.stop() - del self._timer - - def wakeup(self): - self._start_timer(0) - - def _process_jobs(self): - wait_seconds = super(QtScheduler, self)._process_jobs() - self._start_timer(wait_seconds) diff --git a/lib/apscheduler/schedulers/tornado.py b/lib/apscheduler/schedulers/tornado.py deleted file mode 100644 index 0a9171f2..00000000 --- a/lib/apscheduler/schedulers/tornado.py +++ /dev/null @@ -1,63 +0,0 @@ -from __future__ import absolute_import - -from datetime import timedelta -from functools import wraps - -from apscheduler.schedulers.base import BaseScheduler -from apscheduler.util import maybe_ref - -try: - from tornado.ioloop import IOLoop -except ImportError: # pragma: nocover - raise ImportError('TornadoScheduler requires tornado installed') - - -def run_in_ioloop(func): - @wraps(func) - def wrapper(self, *args, **kwargs): - self._ioloop.add_callback(func, self, *args, **kwargs) - return wrapper - - -class TornadoScheduler(BaseScheduler): - """ - A scheduler that runs on a Tornado IOLoop. - - The default executor can run jobs based on native coroutines (``async def``). - - =========== =============================================================== - ``io_loop`` Tornado IOLoop instance to use (defaults to the global IO loop) - =========== =============================================================== - """ - - _ioloop = None - _timeout = None - - @run_in_ioloop - def shutdown(self, wait=True): - super(TornadoScheduler, self).shutdown(wait) - self._stop_timer() - - def _configure(self, config): - self._ioloop = maybe_ref(config.pop('io_loop', None)) or IOLoop.current() - super(TornadoScheduler, self)._configure(config) - - def _start_timer(self, wait_seconds): - self._stop_timer() - if wait_seconds is not None: - self._timeout = self._ioloop.add_timeout(timedelta(seconds=wait_seconds), self.wakeup) - - def _stop_timer(self): - if self._timeout: - self._ioloop.remove_timeout(self._timeout) - del self._timeout - - def _create_default_executor(self): - from apscheduler.executors.tornado import TornadoExecutor - return TornadoExecutor() - - @run_in_ioloop - def wakeup(self): - self._stop_timer() - wait_seconds = self._process_jobs() - self._start_timer(wait_seconds) diff --git a/lib/apscheduler/schedulers/twisted.py b/lib/apscheduler/schedulers/twisted.py deleted file mode 100644 index 6b43a84b..00000000 --- a/lib/apscheduler/schedulers/twisted.py +++ /dev/null @@ -1,62 +0,0 @@ -from __future__ import absolute_import - -from functools import wraps - -from apscheduler.schedulers.base import BaseScheduler -from apscheduler.util import maybe_ref - -try: - from twisted.internet import reactor as default_reactor -except ImportError: # pragma: nocover - raise ImportError('TwistedScheduler requires Twisted installed') - - -def run_in_reactor(func): - @wraps(func) - def wrapper(self, *args, **kwargs): - self._reactor.callFromThread(func, self, *args, **kwargs) - return wrapper - - -class TwistedScheduler(BaseScheduler): - """ - A scheduler that runs on a Twisted reactor. - - Extra options: - - =========== ======================================================== - ``reactor`` Reactor instance to use (defaults to the global reactor) - =========== ======================================================== - """ - - _reactor = None - _delayedcall = None - - def _configure(self, config): - self._reactor = maybe_ref(config.pop('reactor', default_reactor)) - super(TwistedScheduler, self)._configure(config) - - @run_in_reactor - def shutdown(self, wait=True): - super(TwistedScheduler, self).shutdown(wait) - self._stop_timer() - - def _start_timer(self, wait_seconds): - self._stop_timer() - if wait_seconds is not None: - self._delayedcall = self._reactor.callLater(wait_seconds, self.wakeup) - - def _stop_timer(self): - if self._delayedcall and self._delayedcall.active(): - self._delayedcall.cancel() - del self._delayedcall - - @run_in_reactor - def wakeup(self): - self._stop_timer() - wait_seconds = self._process_jobs() - self._start_timer(wait_seconds) - - def _create_default_executor(self): - from apscheduler.executors.twisted import TwistedExecutor - return TwistedExecutor() diff --git a/lib/apscheduler/triggers/__init__.py b/lib/apscheduler/triggers/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/lib/apscheduler/triggers/base.py b/lib/apscheduler/triggers/base.py deleted file mode 100644 index ce2526a8..00000000 --- a/lib/apscheduler/triggers/base.py +++ /dev/null @@ -1,48 +0,0 @@ -from abc import ABCMeta, abstractmethod -from datetime import timedelta -import random - -import six - - -class BaseTrigger(six.with_metaclass(ABCMeta)): - """Abstract base class that defines the interface that every trigger must implement.""" - - __slots__ = () - - @abstractmethod - def get_next_fire_time(self, previous_fire_time, now): - """ - Returns the next datetime to fire on, If no such datetime can be calculated, returns - ``None``. - - :param datetime.datetime previous_fire_time: the previous time the trigger was fired - :param datetime.datetime now: current datetime - """ - - def _apply_jitter(self, next_fire_time, jitter, now): - """ - Randomize ``next_fire_time`` by adding or subtracting a random value (the jitter). If the - resulting datetime is in the past, returns the initial ``next_fire_time`` without jitter. - - ``next_fire_time - jitter <= result <= next_fire_time + jitter`` - - :param datetime.datetime|None next_fire_time: next fire time without jitter applied. If - ``None``, returns ``None``. - :param int|None jitter: maximum number of seconds to add or subtract to - ``next_fire_time``. If ``None`` or ``0``, returns ``next_fire_time`` - :param datetime.datetime now: current datetime - :return datetime.datetime|None: next fire time with a jitter. - """ - if next_fire_time is None or not jitter: - return next_fire_time - - next_fire_time_with_jitter = next_fire_time + timedelta( - seconds=random.uniform(-jitter, jitter)) - - if next_fire_time_with_jitter < now: - # Next fire time with jitter is in the past. - # Ignore jitter to avoid false misfire. - return next_fire_time - - return next_fire_time_with_jitter diff --git a/lib/apscheduler/triggers/combining.py b/lib/apscheduler/triggers/combining.py deleted file mode 100644 index 64f83011..00000000 --- a/lib/apscheduler/triggers/combining.py +++ /dev/null @@ -1,95 +0,0 @@ -from apscheduler.triggers.base import BaseTrigger -from apscheduler.util import obj_to_ref, ref_to_obj - - -class BaseCombiningTrigger(BaseTrigger): - __slots__ = ('triggers', 'jitter') - - def __init__(self, triggers, jitter=None): - self.triggers = triggers - self.jitter = jitter - - def __getstate__(self): - return { - 'version': 1, - 'triggers': [(obj_to_ref(trigger.__class__), trigger.__getstate__()) - for trigger in self.triggers], - 'jitter': self.jitter - } - - def __setstate__(self, state): - if state.get('version', 1) > 1: - raise ValueError( - 'Got serialized data for version %s of %s, but only versions up to 1 can be ' - 'handled' % (state['version'], self.__class__.__name__)) - - self.jitter = state['jitter'] - self.triggers = [] - for clsref, state in state['triggers']: - cls = ref_to_obj(clsref) - trigger = cls.__new__(cls) - trigger.__setstate__(state) - self.triggers.append(trigger) - - def __repr__(self): - return '<{}({}{})>'.format(self.__class__.__name__, self.triggers, - ', jitter={}'.format(self.jitter) if self.jitter else '') - - -class AndTrigger(BaseCombiningTrigger): - """ - Always returns the earliest next fire time that all the given triggers can agree on. - The trigger is considered to be finished when any of the given triggers has finished its - schedule. - - Trigger alias: ``and`` - - :param list triggers: triggers to combine - :param int|None jitter: advance or delay the job execution by ``jitter`` seconds at most. - """ - - __slots__ = () - - def get_next_fire_time(self, previous_fire_time, now): - while True: - fire_times = [trigger.get_next_fire_time(previous_fire_time, now) - for trigger in self.triggers] - if None in fire_times: - return None - elif min(fire_times) == max(fire_times): - return self._apply_jitter(fire_times[0], self.jitter, now) - else: - now = max(fire_times) - - def __str__(self): - return 'and[{}]'.format(', '.join(str(trigger) for trigger in self.triggers)) - - -class OrTrigger(BaseCombiningTrigger): - """ - Always returns the earliest next fire time produced by any of the given triggers. - The trigger is considered finished when all the given triggers have finished their schedules. - - Trigger alias: ``or`` - - :param list triggers: triggers to combine - :param int|None jitter: advance or delay the job execution by ``jitter`` seconds at most. - - .. note:: Triggers that depends on the previous fire time, such as the interval trigger, may - seem to behave strangely since they are always passed the previous fire time produced by - any of the given triggers. - """ - - __slots__ = () - - def get_next_fire_time(self, previous_fire_time, now): - fire_times = [trigger.get_next_fire_time(previous_fire_time, now) - for trigger in self.triggers] - fire_times = [fire_time for fire_time in fire_times if fire_time is not None] - if fire_times: - return self._apply_jitter(min(fire_times), self.jitter, now) - else: - return None - - def __str__(self): - return 'or[{}]'.format(', '.join(str(trigger) for trigger in self.triggers)) diff --git a/lib/apscheduler/triggers/cron/__init__.py b/lib/apscheduler/triggers/cron/__init__.py deleted file mode 100644 index ce675dd9..00000000 --- a/lib/apscheduler/triggers/cron/__init__.py +++ /dev/null @@ -1,238 +0,0 @@ -from datetime import datetime, timedelta - -from tzlocal import get_localzone -import six - -from apscheduler.triggers.base import BaseTrigger -from apscheduler.triggers.cron.fields import ( - BaseField, MonthField, WeekField, DayOfMonthField, DayOfWeekField, DEFAULT_VALUES) -from apscheduler.util import datetime_ceil, convert_to_datetime, datetime_repr, astimezone - - -class CronTrigger(BaseTrigger): - """ - Triggers when current time matches all specified time constraints, - similarly to how the UNIX cron scheduler works. - - :param int|str year: 4-digit year - :param int|str month: month (1-12) - :param int|str day: day of the (1-31) - :param int|str week: ISO week (1-53) - :param int|str day_of_week: number or name of weekday (0-6 or mon,tue,wed,thu,fri,sat,sun) - :param int|str hour: hour (0-23) - :param int|str minute: minute (0-59) - :param int|str second: second (0-59) - :param datetime|str start_date: earliest possible date/time to trigger on (inclusive) - :param datetime|str end_date: latest possible date/time to trigger on (inclusive) - :param datetime.tzinfo|str timezone: time zone to use for the date/time calculations (defaults - to scheduler timezone) - :param int|None jitter: advance or delay the job execution by ``jitter`` seconds at most. - - .. note:: The first weekday is always **monday**. - """ - - FIELD_NAMES = ('year', 'month', 'day', 'week', 'day_of_week', 'hour', 'minute', 'second') - FIELDS_MAP = { - 'year': BaseField, - 'month': MonthField, - 'week': WeekField, - 'day': DayOfMonthField, - 'day_of_week': DayOfWeekField, - 'hour': BaseField, - 'minute': BaseField, - 'second': BaseField - } - - __slots__ = 'timezone', 'start_date', 'end_date', 'fields', 'jitter' - - def __init__(self, year=None, month=None, day=None, week=None, day_of_week=None, hour=None, - minute=None, second=None, start_date=None, end_date=None, timezone=None, - jitter=None): - if timezone: - self.timezone = astimezone(timezone) - elif isinstance(start_date, datetime) and start_date.tzinfo: - self.timezone = start_date.tzinfo - elif isinstance(end_date, datetime) and end_date.tzinfo: - self.timezone = end_date.tzinfo - else: - self.timezone = get_localzone() - - self.start_date = convert_to_datetime(start_date, self.timezone, 'start_date') - self.end_date = convert_to_datetime(end_date, self.timezone, 'end_date') - - self.jitter = jitter - - values = dict((key, value) for (key, value) in six.iteritems(locals()) - if key in self.FIELD_NAMES and value is not None) - self.fields = [] - assign_defaults = False - for field_name in self.FIELD_NAMES: - if field_name in values: - exprs = values.pop(field_name) - is_default = False - assign_defaults = not values - elif assign_defaults: - exprs = DEFAULT_VALUES[field_name] - is_default = True - else: - exprs = '*' - is_default = True - - field_class = self.FIELDS_MAP[field_name] - field = field_class(field_name, exprs, is_default) - self.fields.append(field) - - @classmethod - def from_crontab(cls, expr, timezone=None): - """ - Create a :class:`~CronTrigger` from a standard crontab expression. - - See https://en.wikipedia.org/wiki/Cron for more information on the format accepted here. - - :param expr: minute, hour, day of month, month, day of week - :param datetime.tzinfo|str timezone: time zone to use for the date/time calculations ( - defaults to scheduler timezone) - :return: a :class:`~CronTrigger` instance - - """ - values = expr.split() - if len(values) != 5: - raise ValueError('Wrong number of fields; got {}, expected 5'.format(len(values))) - - return cls(minute=values[0], hour=values[1], day=values[2], month=values[3], - day_of_week=values[4], timezone=timezone) - - def _increment_field_value(self, dateval, fieldnum): - """ - Increments the designated field and resets all less significant fields to their minimum - values. - - :type dateval: datetime - :type fieldnum: int - :return: a tuple containing the new date, and the number of the field that was actually - incremented - :rtype: tuple - """ - - values = {} - i = 0 - while i < len(self.fields): - field = self.fields[i] - if not field.REAL: - if i == fieldnum: - fieldnum -= 1 - i -= 1 - else: - i += 1 - continue - - if i < fieldnum: - values[field.name] = field.get_value(dateval) - i += 1 - elif i > fieldnum: - values[field.name] = field.get_min(dateval) - i += 1 - else: - value = field.get_value(dateval) - maxval = field.get_max(dateval) - if value == maxval: - fieldnum -= 1 - i -= 1 - else: - values[field.name] = value + 1 - i += 1 - - difference = datetime(**values) - dateval.replace(tzinfo=None) - return self.timezone.normalize(dateval + difference), fieldnum - - def _set_field_value(self, dateval, fieldnum, new_value): - values = {} - for i, field in enumerate(self.fields): - if field.REAL: - if i < fieldnum: - values[field.name] = field.get_value(dateval) - elif i > fieldnum: - values[field.name] = field.get_min(dateval) - else: - values[field.name] = new_value - - return self.timezone.localize(datetime(**values)) - - def get_next_fire_time(self, previous_fire_time, now): - if previous_fire_time: - start_date = min(now, previous_fire_time + timedelta(microseconds=1)) - if start_date == previous_fire_time: - start_date += timedelta(microseconds=1) - else: - start_date = max(now, self.start_date) if self.start_date else now - - fieldnum = 0 - next_date = datetime_ceil(start_date).astimezone(self.timezone) - while 0 <= fieldnum < len(self.fields): - field = self.fields[fieldnum] - curr_value = field.get_value(next_date) - next_value = field.get_next_value(next_date) - - if next_value is None: - # No valid value was found - next_date, fieldnum = self._increment_field_value(next_date, fieldnum - 1) - elif next_value > curr_value: - # A valid, but higher than the starting value, was found - if field.REAL: - next_date = self._set_field_value(next_date, fieldnum, next_value) - fieldnum += 1 - else: - next_date, fieldnum = self._increment_field_value(next_date, fieldnum) - else: - # A valid value was found, no changes necessary - fieldnum += 1 - - # Return if the date has rolled past the end date - if self.end_date and next_date > self.end_date: - return None - - if fieldnum >= 0: - next_date = self._apply_jitter(next_date, self.jitter, now) - return min(next_date, self.end_date) if self.end_date else next_date - - def __getstate__(self): - return { - 'version': 2, - 'timezone': self.timezone, - 'start_date': self.start_date, - 'end_date': self.end_date, - 'fields': self.fields, - 'jitter': self.jitter, - } - - def __setstate__(self, state): - # This is for compatibility with APScheduler 3.0.x - if isinstance(state, tuple): - state = state[1] - - if state.get('version', 1) > 2: - raise ValueError( - 'Got serialized data for version %s of %s, but only versions up to 2 can be ' - 'handled' % (state['version'], self.__class__.__name__)) - - self.timezone = state['timezone'] - self.start_date = state['start_date'] - self.end_date = state['end_date'] - self.fields = state['fields'] - self.jitter = state.get('jitter') - - def __str__(self): - options = ["%s='%s'" % (f.name, f) for f in self.fields if not f.is_default] - return 'cron[%s]' % (', '.join(options)) - - def __repr__(self): - options = ["%s='%s'" % (f.name, f) for f in self.fields if not f.is_default] - if self.start_date: - options.append("start_date=%r" % datetime_repr(self.start_date)) - if self.end_date: - options.append("end_date=%r" % datetime_repr(self.end_date)) - if self.jitter: - options.append('jitter=%s' % self.jitter) - - return "<%s (%s, timezone='%s')>" % ( - self.__class__.__name__, ', '.join(options), self.timezone) diff --git a/lib/apscheduler/triggers/cron/expressions.py b/lib/apscheduler/triggers/cron/expressions.py deleted file mode 100644 index 55a37167..00000000 --- a/lib/apscheduler/triggers/cron/expressions.py +++ /dev/null @@ -1,251 +0,0 @@ -"""This module contains the expressions applicable for CronTrigger's fields.""" - -from calendar import monthrange -import re - -from apscheduler.util import asint - -__all__ = ('AllExpression', 'RangeExpression', 'WeekdayRangeExpression', - 'WeekdayPositionExpression', 'LastDayOfMonthExpression') - - -WEEKDAYS = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun'] -MONTHS = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec'] - - -class AllExpression(object): - value_re = re.compile(r'\*(?:/(?P\d+))?$') - - def __init__(self, step=None): - self.step = asint(step) - if self.step == 0: - raise ValueError('Increment must be higher than 0') - - def validate_range(self, field_name): - from apscheduler.triggers.cron.fields import MIN_VALUES, MAX_VALUES - - value_range = MAX_VALUES[field_name] - MIN_VALUES[field_name] - if self.step and self.step > value_range: - raise ValueError('the step value ({}) is higher than the total range of the ' - 'expression ({})'.format(self.step, value_range)) - - def get_next_value(self, date, field): - start = field.get_value(date) - minval = field.get_min(date) - maxval = field.get_max(date) - start = max(start, minval) - - if not self.step: - next = start - else: - distance_to_next = (self.step - (start - minval)) % self.step - next = start + distance_to_next - - if next <= maxval: - return next - - def __eq__(self, other): - return isinstance(other, self.__class__) and self.step == other.step - - def __str__(self): - if self.step: - return '*/%d' % self.step - return '*' - - def __repr__(self): - return "%s(%s)" % (self.__class__.__name__, self.step) - - -class RangeExpression(AllExpression): - value_re = re.compile( - r'(?P\d+)(?:-(?P\d+))?(?:/(?P\d+))?$') - - def __init__(self, first, last=None, step=None): - super(RangeExpression, self).__init__(step) - first = asint(first) - last = asint(last) - if last is None and step is None: - last = first - if last is not None and first > last: - raise ValueError('The minimum value in a range must not be higher than the maximum') - self.first = first - self.last = last - - def validate_range(self, field_name): - from apscheduler.triggers.cron.fields import MIN_VALUES, MAX_VALUES - - super(RangeExpression, self).validate_range(field_name) - if self.first < MIN_VALUES[field_name]: - raise ValueError('the first value ({}) is lower than the minimum value ({})' - .format(self.first, MIN_VALUES[field_name])) - if self.last is not None and self.last > MAX_VALUES[field_name]: - raise ValueError('the last value ({}) is higher than the maximum value ({})' - .format(self.last, MAX_VALUES[field_name])) - value_range = (self.last or MAX_VALUES[field_name]) - self.first - if self.step and self.step > value_range: - raise ValueError('the step value ({}) is higher than the total range of the ' - 'expression ({})'.format(self.step, value_range)) - - def get_next_value(self, date, field): - startval = field.get_value(date) - minval = field.get_min(date) - maxval = field.get_max(date) - - # Apply range limits - minval = max(minval, self.first) - maxval = min(maxval, self.last) if self.last is not None else maxval - nextval = max(minval, startval) - - # Apply the step if defined - if self.step: - distance_to_next = (self.step - (nextval - minval)) % self.step - nextval += distance_to_next - - return nextval if nextval <= maxval else None - - def __eq__(self, other): - return (isinstance(other, self.__class__) and self.first == other.first and - self.last == other.last) - - def __str__(self): - if self.last != self.first and self.last is not None: - range = '%d-%d' % (self.first, self.last) - else: - range = str(self.first) - - if self.step: - return '%s/%d' % (range, self.step) - return range - - def __repr__(self): - args = [str(self.first)] - if self.last != self.first and self.last is not None or self.step: - args.append(str(self.last)) - if self.step: - args.append(str(self.step)) - return "%s(%s)" % (self.__class__.__name__, ', '.join(args)) - - -class MonthRangeExpression(RangeExpression): - value_re = re.compile(r'(?P[a-z]+)(?:-(?P[a-z]+))?', re.IGNORECASE) - - def __init__(self, first, last=None): - try: - first_num = MONTHS.index(first.lower()) + 1 - except ValueError: - raise ValueError('Invalid month name "%s"' % first) - - if last: - try: - last_num = MONTHS.index(last.lower()) + 1 - except ValueError: - raise ValueError('Invalid month name "%s"' % last) - else: - last_num = None - - super(MonthRangeExpression, self).__init__(first_num, last_num) - - def __str__(self): - if self.last != self.first and self.last is not None: - return '%s-%s' % (MONTHS[self.first - 1], MONTHS[self.last - 1]) - return MONTHS[self.first - 1] - - def __repr__(self): - args = ["'%s'" % MONTHS[self.first]] - if self.last != self.first and self.last is not None: - args.append("'%s'" % MONTHS[self.last - 1]) - return "%s(%s)" % (self.__class__.__name__, ', '.join(args)) - - -class WeekdayRangeExpression(RangeExpression): - value_re = re.compile(r'(?P[a-z]+)(?:-(?P[a-z]+))?', re.IGNORECASE) - - def __init__(self, first, last=None): - try: - first_num = WEEKDAYS.index(first.lower()) - except ValueError: - raise ValueError('Invalid weekday name "%s"' % first) - - if last: - try: - last_num = WEEKDAYS.index(last.lower()) - except ValueError: - raise ValueError('Invalid weekday name "%s"' % last) - else: - last_num = None - - super(WeekdayRangeExpression, self).__init__(first_num, last_num) - - def __str__(self): - if self.last != self.first and self.last is not None: - return '%s-%s' % (WEEKDAYS[self.first], WEEKDAYS[self.last]) - return WEEKDAYS[self.first] - - def __repr__(self): - args = ["'%s'" % WEEKDAYS[self.first]] - if self.last != self.first and self.last is not None: - args.append("'%s'" % WEEKDAYS[self.last]) - return "%s(%s)" % (self.__class__.__name__, ', '.join(args)) - - -class WeekdayPositionExpression(AllExpression): - options = ['1st', '2nd', '3rd', '4th', '5th', 'last'] - value_re = re.compile(r'(?P%s) +(?P(?:\d+|\w+))' % - '|'.join(options), re.IGNORECASE) - - def __init__(self, option_name, weekday_name): - super(WeekdayPositionExpression, self).__init__(None) - try: - self.option_num = self.options.index(option_name.lower()) - except ValueError: - raise ValueError('Invalid weekday position "%s"' % option_name) - - try: - self.weekday = WEEKDAYS.index(weekday_name.lower()) - except ValueError: - raise ValueError('Invalid weekday name "%s"' % weekday_name) - - def get_next_value(self, date, field): - # Figure out the weekday of the month's first day and the number of days in that month - first_day_wday, last_day = monthrange(date.year, date.month) - - # Calculate which day of the month is the first of the target weekdays - first_hit_day = self.weekday - first_day_wday + 1 - if first_hit_day <= 0: - first_hit_day += 7 - - # Calculate what day of the month the target weekday would be - if self.option_num < 5: - target_day = first_hit_day + self.option_num * 7 - else: - target_day = first_hit_day + ((last_day - first_hit_day) // 7) * 7 - - if target_day <= last_day and target_day >= date.day: - return target_day - - def __eq__(self, other): - return (super(WeekdayPositionExpression, self).__eq__(other) and - self.option_num == other.option_num and self.weekday == other.weekday) - - def __str__(self): - return '%s %s' % (self.options[self.option_num], WEEKDAYS[self.weekday]) - - def __repr__(self): - return "%s('%s', '%s')" % (self.__class__.__name__, self.options[self.option_num], - WEEKDAYS[self.weekday]) - - -class LastDayOfMonthExpression(AllExpression): - value_re = re.compile(r'last', re.IGNORECASE) - - def __init__(self): - super(LastDayOfMonthExpression, self).__init__(None) - - def get_next_value(self, date, field): - return monthrange(date.year, date.month)[1] - - def __str__(self): - return 'last' - - def __repr__(self): - return "%s()" % self.__class__.__name__ diff --git a/lib/apscheduler/triggers/cron/fields.py b/lib/apscheduler/triggers/cron/fields.py deleted file mode 100644 index 86d620c4..00000000 --- a/lib/apscheduler/triggers/cron/fields.py +++ /dev/null @@ -1,111 +0,0 @@ -"""Fields represent CronTrigger options which map to :class:`~datetime.datetime` fields.""" - -from calendar import monthrange -import re - -import six - -from apscheduler.triggers.cron.expressions import ( - AllExpression, RangeExpression, WeekdayPositionExpression, LastDayOfMonthExpression, - WeekdayRangeExpression, MonthRangeExpression) - - -__all__ = ('MIN_VALUES', 'MAX_VALUES', 'DEFAULT_VALUES', 'BaseField', 'WeekField', - 'DayOfMonthField', 'DayOfWeekField') - - -MIN_VALUES = {'year': 1970, 'month': 1, 'day': 1, 'week': 1, 'day_of_week': 0, 'hour': 0, - 'minute': 0, 'second': 0} -MAX_VALUES = {'year': 9999, 'month': 12, 'day': 31, 'week': 53, 'day_of_week': 6, 'hour': 23, - 'minute': 59, 'second': 59} -DEFAULT_VALUES = {'year': '*', 'month': 1, 'day': 1, 'week': '*', 'day_of_week': '*', 'hour': 0, - 'minute': 0, 'second': 0} -SEPARATOR = re.compile(' *, *') - - -class BaseField(object): - REAL = True - COMPILERS = [AllExpression, RangeExpression] - - def __init__(self, name, exprs, is_default=False): - self.name = name - self.is_default = is_default - self.compile_expressions(exprs) - - def get_min(self, dateval): - return MIN_VALUES[self.name] - - def get_max(self, dateval): - return MAX_VALUES[self.name] - - def get_value(self, dateval): - return getattr(dateval, self.name) - - def get_next_value(self, dateval): - smallest = None - for expr in self.expressions: - value = expr.get_next_value(dateval, self) - if smallest is None or (value is not None and value < smallest): - smallest = value - - return smallest - - def compile_expressions(self, exprs): - self.expressions = [] - - # Split a comma-separated expression list, if any - for expr in SEPARATOR.split(str(exprs).strip()): - self.compile_expression(expr) - - def compile_expression(self, expr): - for compiler in self.COMPILERS: - match = compiler.value_re.match(expr) - if match: - compiled_expr = compiler(**match.groupdict()) - - try: - compiled_expr.validate_range(self.name) - except ValueError as e: - exc = ValueError('Error validating expression {!r}: {}'.format(expr, e)) - six.raise_from(exc, None) - - self.expressions.append(compiled_expr) - return - - raise ValueError('Unrecognized expression "%s" for field "%s"' % (expr, self.name)) - - def __eq__(self, other): - return isinstance(self, self.__class__) and self.expressions == other.expressions - - def __str__(self): - expr_strings = (str(e) for e in self.expressions) - return ','.join(expr_strings) - - def __repr__(self): - return "%s('%s', '%s')" % (self.__class__.__name__, self.name, self) - - -class WeekField(BaseField): - REAL = False - - def get_value(self, dateval): - return dateval.isocalendar()[1] - - -class DayOfMonthField(BaseField): - COMPILERS = BaseField.COMPILERS + [WeekdayPositionExpression, LastDayOfMonthExpression] - - def get_max(self, dateval): - return monthrange(dateval.year, dateval.month)[1] - - -class DayOfWeekField(BaseField): - REAL = False - COMPILERS = BaseField.COMPILERS + [WeekdayRangeExpression] - - def get_value(self, dateval): - return dateval.weekday() - - -class MonthField(BaseField): - COMPILERS = BaseField.COMPILERS + [MonthRangeExpression] diff --git a/lib/apscheduler/triggers/date.py b/lib/apscheduler/triggers/date.py deleted file mode 100644 index 07681008..00000000 --- a/lib/apscheduler/triggers/date.py +++ /dev/null @@ -1,51 +0,0 @@ -from datetime import datetime - -from tzlocal import get_localzone - -from apscheduler.triggers.base import BaseTrigger -from apscheduler.util import convert_to_datetime, datetime_repr, astimezone - - -class DateTrigger(BaseTrigger): - """ - Triggers once on the given datetime. If ``run_date`` is left empty, current time is used. - - :param datetime|str run_date: the date/time to run the job at - :param datetime.tzinfo|str timezone: time zone for ``run_date`` if it doesn't have one already - """ - - __slots__ = 'run_date' - - def __init__(self, run_date=None, timezone=None): - timezone = astimezone(timezone) or get_localzone() - if run_date is not None: - self.run_date = convert_to_datetime(run_date, timezone, 'run_date') - else: - self.run_date = datetime.now(timezone) - - def get_next_fire_time(self, previous_fire_time, now): - return self.run_date if previous_fire_time is None else None - - def __getstate__(self): - return { - 'version': 1, - 'run_date': self.run_date - } - - def __setstate__(self, state): - # This is for compatibility with APScheduler 3.0.x - if isinstance(state, tuple): - state = state[1] - - if state.get('version', 1) > 1: - raise ValueError( - 'Got serialized data for version %s of %s, but only version 1 can be handled' % - (state['version'], self.__class__.__name__)) - - self.run_date = state['run_date'] - - def __str__(self): - return 'date[%s]' % datetime_repr(self.run_date) - - def __repr__(self): - return "<%s (run_date='%s')>" % (self.__class__.__name__, datetime_repr(self.run_date)) diff --git a/lib/apscheduler/triggers/interval.py b/lib/apscheduler/triggers/interval.py deleted file mode 100644 index 831ba383..00000000 --- a/lib/apscheduler/triggers/interval.py +++ /dev/null @@ -1,106 +0,0 @@ -from datetime import timedelta, datetime -from math import ceil - -from tzlocal import get_localzone - -from apscheduler.triggers.base import BaseTrigger -from apscheduler.util import convert_to_datetime, timedelta_seconds, datetime_repr, astimezone - - -class IntervalTrigger(BaseTrigger): - """ - Triggers on specified intervals, starting on ``start_date`` if specified, ``datetime.now()`` + - interval otherwise. - - :param int weeks: number of weeks to wait - :param int days: number of days to wait - :param int hours: number of hours to wait - :param int minutes: number of minutes to wait - :param int seconds: number of seconds to wait - :param datetime|str start_date: starting point for the interval calculation - :param datetime|str end_date: latest possible date/time to trigger on - :param datetime.tzinfo|str timezone: time zone to use for the date/time calculations - :param int|None jitter: advance or delay the job execution by ``jitter`` seconds at most. - """ - - __slots__ = 'timezone', 'start_date', 'end_date', 'interval', 'interval_length', 'jitter' - - def __init__(self, weeks=0, days=0, hours=0, minutes=0, seconds=0, start_date=None, - end_date=None, timezone=None, jitter=None): - self.interval = timedelta(weeks=weeks, days=days, hours=hours, minutes=minutes, - seconds=seconds) - self.interval_length = timedelta_seconds(self.interval) - if self.interval_length == 0: - self.interval = timedelta(seconds=1) - self.interval_length = 1 - - if timezone: - self.timezone = astimezone(timezone) - elif isinstance(start_date, datetime) and start_date.tzinfo: - self.timezone = start_date.tzinfo - elif isinstance(end_date, datetime) and end_date.tzinfo: - self.timezone = end_date.tzinfo - else: - self.timezone = get_localzone() - - start_date = start_date or (datetime.now(self.timezone) + self.interval) - self.start_date = convert_to_datetime(start_date, self.timezone, 'start_date') - self.end_date = convert_to_datetime(end_date, self.timezone, 'end_date') - - self.jitter = jitter - - def get_next_fire_time(self, previous_fire_time, now): - if previous_fire_time: - next_fire_time = previous_fire_time + self.interval - elif self.start_date > now: - next_fire_time = self.start_date - else: - timediff_seconds = timedelta_seconds(now - self.start_date) - next_interval_num = int(ceil(timediff_seconds / self.interval_length)) - next_fire_time = self.start_date + self.interval * next_interval_num - - if self.jitter is not None: - next_fire_time = self._apply_jitter(next_fire_time, self.jitter, now) - - if not self.end_date or next_fire_time <= self.end_date: - return self.timezone.normalize(next_fire_time) - - def __getstate__(self): - return { - 'version': 2, - 'timezone': self.timezone, - 'start_date': self.start_date, - 'end_date': self.end_date, - 'interval': self.interval, - 'jitter': self.jitter, - } - - def __setstate__(self, state): - # This is for compatibility with APScheduler 3.0.x - if isinstance(state, tuple): - state = state[1] - - if state.get('version', 1) > 2: - raise ValueError( - 'Got serialized data for version %s of %s, but only versions up to 2 can be ' - 'handled' % (state['version'], self.__class__.__name__)) - - self.timezone = state['timezone'] - self.start_date = state['start_date'] - self.end_date = state['end_date'] - self.interval = state['interval'] - self.interval_length = timedelta_seconds(self.interval) - self.jitter = state.get('jitter') - - def __str__(self): - return 'interval[%s]' % str(self.interval) - - def __repr__(self): - options = ['interval=%r' % self.interval, 'start_date=%r' % datetime_repr(self.start_date)] - if self.end_date: - options.append("end_date=%r" % datetime_repr(self.end_date)) - if self.jitter: - options.append('jitter=%s' % self.jitter) - - return "<%s (%s, timezone='%s')>" % ( - self.__class__.__name__, ', '.join(options), self.timezone) diff --git a/lib/apscheduler/util.py b/lib/apscheduler/util.py deleted file mode 100644 index 8b7b3f5e..00000000 --- a/lib/apscheduler/util.py +++ /dev/null @@ -1,429 +0,0 @@ -"""This module contains several handy functions primarily meant for internal use.""" - -from __future__ import division - -from datetime import date, datetime, time, timedelta, tzinfo -from calendar import timegm -from functools import partial -from inspect import isclass, ismethod -import re - -from pytz import timezone, utc, FixedOffset -import six - -try: - from inspect import signature -except ImportError: # pragma: nocover - from funcsigs import signature - -try: - from threading import TIMEOUT_MAX -except ImportError: - TIMEOUT_MAX = 4294967 # Maximum value accepted by Event.wait() on Windows - -try: - from asyncio import iscoroutinefunction -except ImportError: - try: - from trollius import iscoroutinefunction - except ImportError: - def iscoroutinefunction(func): - return False - -__all__ = ('asint', 'asbool', 'astimezone', 'convert_to_datetime', 'datetime_to_utc_timestamp', - 'utc_timestamp_to_datetime', 'timedelta_seconds', 'datetime_ceil', 'get_callable_name', - 'obj_to_ref', 'ref_to_obj', 'maybe_ref', 'repr_escape', 'check_callable_args', - 'TIMEOUT_MAX') - - -class _Undefined(object): - def __nonzero__(self): - return False - - def __bool__(self): - return False - - def __repr__(self): - return '' - - -undefined = _Undefined() #: a unique object that only signifies that no value is defined - - -def asint(text): - """ - Safely converts a string to an integer, returning ``None`` if the string is ``None``. - - :type text: str - :rtype: int - - """ - if text is not None: - return int(text) - - -def asbool(obj): - """ - Interprets an object as a boolean value. - - :rtype: bool - - """ - if isinstance(obj, str): - obj = obj.strip().lower() - if obj in ('true', 'yes', 'on', 'y', 't', '1'): - return True - if obj in ('false', 'no', 'off', 'n', 'f', '0'): - return False - raise ValueError('Unable to interpret value "%s" as boolean' % obj) - return bool(obj) - - -def astimezone(obj): - """ - Interprets an object as a timezone. - - :rtype: tzinfo - - """ - if isinstance(obj, six.string_types): - return timezone(obj) - if isinstance(obj, tzinfo): - if not hasattr(obj, 'localize') or not hasattr(obj, 'normalize'): - raise TypeError('Only timezones from the pytz library are supported') - if obj.zone == 'local': - raise ValueError( - 'Unable to determine the name of the local timezone -- you must explicitly ' - 'specify the name of the local timezone. Please refrain from using timezones like ' - 'EST to prevent problems with daylight saving time. Instead, use a locale based ' - 'timezone name (such as Europe/Helsinki).') - return obj - if obj is not None: - raise TypeError('Expected tzinfo, got %s instead' % obj.__class__.__name__) - - -_DATE_REGEX = re.compile( - r'(?P\d{4})-(?P\d{1,2})-(?P\d{1,2})' - r'(?:[ T](?P\d{1,2}):(?P\d{1,2}):(?P\d{1,2})' - r'(?:\.(?P\d{1,6}))?' - r'(?PZ|[+-]\d\d:\d\d)?)?$') - - -def convert_to_datetime(input, tz, arg_name): - """ - Converts the given object to a timezone aware datetime object. - - If a timezone aware datetime object is passed, it is returned unmodified. - If a native datetime object is passed, it is given the specified timezone. - If the input is a string, it is parsed as a datetime with the given timezone. - - Date strings are accepted in three different forms: date only (Y-m-d), date with time - (Y-m-d H:M:S) or with date+time with microseconds (Y-m-d H:M:S.micro). Additionally you can - override the time zone by giving a specific offset in the format specified by ISO 8601: - Z (UTC), +HH:MM or -HH:MM. - - :param str|datetime input: the datetime or string to convert to a timezone aware datetime - :param datetime.tzinfo tz: timezone to interpret ``input`` in - :param str arg_name: the name of the argument (used in an error message) - :rtype: datetime - - """ - if input is None: - return - elif isinstance(input, datetime): - datetime_ = input - elif isinstance(input, date): - datetime_ = datetime.combine(input, time()) - elif isinstance(input, six.string_types): - m = _DATE_REGEX.match(input) - if not m: - raise ValueError('Invalid date string') - - values = m.groupdict() - tzname = values.pop('timezone') - if tzname == 'Z': - tz = utc - elif tzname: - hours, minutes = (int(x) for x in tzname[1:].split(':')) - sign = 1 if tzname[0] == '+' else -1 - tz = FixedOffset(sign * (hours * 60 + minutes)) - - values = {k: int(v or 0) for k, v in values.items()} - datetime_ = datetime(**values) - else: - raise TypeError('Unsupported type for %s: %s' % (arg_name, input.__class__.__name__)) - - if datetime_.tzinfo is not None: - return datetime_ - if tz is None: - raise ValueError( - 'The "tz" argument must be specified if %s has no timezone information' % arg_name) - if isinstance(tz, six.string_types): - tz = timezone(tz) - - try: - return tz.localize(datetime_, is_dst=None) - except AttributeError: - raise TypeError( - 'Only pytz timezones are supported (need the localize() and normalize() methods)') - - -def datetime_to_utc_timestamp(timeval): - """ - Converts a datetime instance to a timestamp. - - :type timeval: datetime - :rtype: float - - """ - if timeval is not None: - return timegm(timeval.utctimetuple()) + timeval.microsecond / 1000000 - - -def utc_timestamp_to_datetime(timestamp): - """ - Converts the given timestamp to a datetime instance. - - :type timestamp: float - :rtype: datetime - - """ - if timestamp is not None: - return datetime.fromtimestamp(timestamp, utc) - - -def timedelta_seconds(delta): - """ - Converts the given timedelta to seconds. - - :type delta: timedelta - :rtype: float - - """ - return delta.days * 24 * 60 * 60 + delta.seconds + \ - delta.microseconds / 1000000.0 - - -def datetime_ceil(dateval): - """ - Rounds the given datetime object upwards. - - :type dateval: datetime - - """ - if dateval.microsecond > 0: - return dateval + timedelta(seconds=1, microseconds=-dateval.microsecond) - return dateval - - -def datetime_repr(dateval): - return dateval.strftime('%Y-%m-%d %H:%M:%S %Z') if dateval else 'None' - - -def get_callable_name(func): - """ - Returns the best available display name for the given function/callable. - - :rtype: str - - """ - # the easy case (on Python 3.3+) - if hasattr(func, '__qualname__'): - return func.__qualname__ - - # class methods, bound and unbound methods - f_self = getattr(func, '__self__', None) or getattr(func, 'im_self', None) - if f_self and hasattr(func, '__name__'): - f_class = f_self if isclass(f_self) else f_self.__class__ - else: - f_class = getattr(func, 'im_class', None) - - if f_class and hasattr(func, '__name__'): - return '%s.%s' % (f_class.__name__, func.__name__) - - # class or class instance - if hasattr(func, '__call__'): - # class - if hasattr(func, '__name__'): - return func.__name__ - - # instance of a class with a __call__ method - return func.__class__.__name__ - - raise TypeError('Unable to determine a name for %r -- maybe it is not a callable?' % func) - - -def obj_to_ref(obj): - """ - Returns the path to the given callable. - - :rtype: str - :raises TypeError: if the given object is not callable - :raises ValueError: if the given object is a :class:`~functools.partial`, lambda or a nested - function - - """ - if isinstance(obj, partial): - raise ValueError('Cannot create a reference to a partial()') - - name = get_callable_name(obj) - if '' in name: - raise ValueError('Cannot create a reference to a lambda') - if '' in name: - raise ValueError('Cannot create a reference to a nested function') - - if ismethod(obj): - if hasattr(obj, 'im_self') and obj.im_self: - # bound method - module = obj.im_self.__module__ - elif hasattr(obj, 'im_class') and obj.im_class: - # unbound method - module = obj.im_class.__module__ - else: - module = obj.__module__ - else: - module = obj.__module__ - return '%s:%s' % (module, name) - - -def ref_to_obj(ref): - """ - Returns the object pointed to by ``ref``. - - :type ref: str - - """ - if not isinstance(ref, six.string_types): - raise TypeError('References must be strings') - if ':' not in ref: - raise ValueError('Invalid reference') - - modulename, rest = ref.split(':', 1) - try: - obj = __import__(modulename, fromlist=[rest]) - except ImportError: - raise LookupError('Error resolving reference %s: could not import module' % ref) - - try: - for name in rest.split('.'): - obj = getattr(obj, name) - return obj - except Exception: - raise LookupError('Error resolving reference %s: error looking up object' % ref) - - -def maybe_ref(ref): - """ - Returns the object that the given reference points to, if it is indeed a reference. - If it is not a reference, the object is returned as-is. - - """ - if not isinstance(ref, str): - return ref - return ref_to_obj(ref) - - -if six.PY2: - def repr_escape(string): - if isinstance(string, six.text_type): - return string.encode('ascii', 'backslashreplace') - return string -else: - def repr_escape(string): - return string - - -def check_callable_args(func, args, kwargs): - """ - Ensures that the given callable can be called with the given arguments. - - :type args: tuple - :type kwargs: dict - - """ - pos_kwargs_conflicts = [] # parameters that have a match in both args and kwargs - positional_only_kwargs = [] # positional-only parameters that have a match in kwargs - unsatisfied_args = [] # parameters in signature that don't have a match in args or kwargs - unsatisfied_kwargs = [] # keyword-only arguments that don't have a match in kwargs - unmatched_args = list(args) # args that didn't match any of the parameters in the signature - # kwargs that didn't match any of the parameters in the signature - unmatched_kwargs = list(kwargs) - # indicates if the signature defines *args and **kwargs respectively - has_varargs = has_var_kwargs = False - - try: - sig = signature(func) - except ValueError: - # signature() doesn't work against every kind of callable - return - - for param in six.itervalues(sig.parameters): - if param.kind == param.POSITIONAL_OR_KEYWORD: - if param.name in unmatched_kwargs and unmatched_args: - pos_kwargs_conflicts.append(param.name) - elif unmatched_args: - del unmatched_args[0] - elif param.name in unmatched_kwargs: - unmatched_kwargs.remove(param.name) - elif param.default is param.empty: - unsatisfied_args.append(param.name) - elif param.kind == param.POSITIONAL_ONLY: - if unmatched_args: - del unmatched_args[0] - elif param.name in unmatched_kwargs: - unmatched_kwargs.remove(param.name) - positional_only_kwargs.append(param.name) - elif param.default is param.empty: - unsatisfied_args.append(param.name) - elif param.kind == param.KEYWORD_ONLY: - if param.name in unmatched_kwargs: - unmatched_kwargs.remove(param.name) - elif param.default is param.empty: - unsatisfied_kwargs.append(param.name) - elif param.kind == param.VAR_POSITIONAL: - has_varargs = True - elif param.kind == param.VAR_KEYWORD: - has_var_kwargs = True - - # Make sure there are no conflicts between args and kwargs - if pos_kwargs_conflicts: - raise ValueError('The following arguments are supplied in both args and kwargs: %s' % - ', '.join(pos_kwargs_conflicts)) - - # Check if keyword arguments are being fed to positional-only parameters - if positional_only_kwargs: - raise ValueError('The following arguments cannot be given as keyword arguments: %s' % - ', '.join(positional_only_kwargs)) - - # Check that the number of positional arguments minus the number of matched kwargs matches the - # argspec - if unsatisfied_args: - raise ValueError('The following arguments have not been supplied: %s' % - ', '.join(unsatisfied_args)) - - # Check that all keyword-only arguments have been supplied - if unsatisfied_kwargs: - raise ValueError( - 'The following keyword-only arguments have not been supplied in kwargs: %s' % - ', '.join(unsatisfied_kwargs)) - - # Check that the callable can accept the given number of positional arguments - if not has_varargs and unmatched_args: - raise ValueError( - 'The list of positional arguments is longer than the target callable can handle ' - '(allowed: %d, given in args: %d)' % (len(args) - len(unmatched_args), len(args))) - - # Check that the callable can accept the given keyword arguments - if not has_var_kwargs and unmatched_kwargs: - raise ValueError( - 'The target callable does not accept the following keyword arguments: %s' % - ', '.join(unmatched_kwargs)) - - -def iscoroutinefunction_partial(f): - while isinstance(f, partial): - f = f.func - - # The asyncio version of iscoroutinefunction includes testing for @coroutine - # decorations vs. the inspect version which does not. - return iscoroutinefunction(f) diff --git a/lib/argparse.py b/lib/argparse.py deleted file mode 100644 index 70a77cc0..00000000 --- a/lib/argparse.py +++ /dev/null @@ -1,2392 +0,0 @@ -# Author: Steven J. Bethard . -# Maintainer: Thomas Waldmann - -"""Command-line parsing library - -This module is an optparse-inspired command-line parsing library that: - - - handles both optional and positional arguments - - produces highly informative usage messages - - supports parsers that dispatch to sub-parsers - -The following is a simple usage example that sums integers from the -command-line and writes the result to a file:: - - parser = argparse.ArgumentParser( - description='sum the integers at the command line') - parser.add_argument( - 'integers', metavar='int', nargs='+', type=int, - help='an integer to be summed') - parser.add_argument( - '--log', default=sys.stdout, type=argparse.FileType('w'), - help='the file where the sum should be written') - args = parser.parse_args() - args.log.write('%s' % sum(args.integers)) - args.log.close() - -The module contains the following public classes: - - - ArgumentParser -- The main entry point for command-line parsing. As the - example above shows, the add_argument() method is used to populate - the parser with actions for optional and positional arguments. Then - the parse_args() method is invoked to convert the args at the - command-line into an object with attributes. - - - ArgumentError -- The exception raised by ArgumentParser objects when - there are errors with the parser's actions. Errors raised while - parsing the command-line are caught by ArgumentParser and emitted - as command-line messages. - - - FileType -- A factory for defining types of files to be created. As the - example above shows, instances of FileType are typically passed as - the type= argument of add_argument() calls. - - - Action -- The base class for parser actions. Typically actions are - selected by passing strings like 'store_true' or 'append_const' to - the action= argument of add_argument(). However, for greater - customization of ArgumentParser actions, subclasses of Action may - be defined and passed as the action= argument. - - - HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter, - ArgumentDefaultsHelpFormatter -- Formatter classes which - may be passed as the formatter_class= argument to the - ArgumentParser constructor. HelpFormatter is the default, - RawDescriptionHelpFormatter and RawTextHelpFormatter tell the parser - not to change the formatting for help text, and - ArgumentDefaultsHelpFormatter adds information about argument defaults - to the help. - -All other classes in this module are considered implementation details. -(Also note that HelpFormatter and RawDescriptionHelpFormatter are only -considered public as object names -- the API of the formatter objects is -still considered an implementation detail.) -""" - -__version__ = '1.4.0' # we use our own version number independant of the - # one in stdlib and we release this on pypi. - -__external_lib__ = True # to make sure the tests really test THIS lib, - # not the builtin one in Python stdlib - -__all__ = [ - 'ArgumentParser', - 'ArgumentError', - 'ArgumentTypeError', - 'FileType', - 'HelpFormatter', - 'ArgumentDefaultsHelpFormatter', - 'RawDescriptionHelpFormatter', - 'RawTextHelpFormatter', - 'Namespace', - 'Action', - 'ONE_OR_MORE', - 'OPTIONAL', - 'PARSER', - 'REMAINDER', - 'SUPPRESS', - 'ZERO_OR_MORE', -] - - -import copy as _copy -import os as _os -import re as _re -import sys as _sys -import textwrap as _textwrap - -from gettext import gettext as _ - -try: - set -except NameError: - # for python < 2.4 compatibility (sets module is there since 2.3): - from sets import Set as set - -try: - basestring -except NameError: - basestring = str - -try: - sorted -except NameError: - # for python < 2.4 compatibility: - def sorted(iterable, reverse=False): - result = list(iterable) - result.sort() - if reverse: - result.reverse() - return result - - -def _callable(obj): - return hasattr(obj, '__call__') or hasattr(obj, '__bases__') - - -SUPPRESS = '==SUPPRESS==' - -OPTIONAL = '?' -ZERO_OR_MORE = '*' -ONE_OR_MORE = '+' -PARSER = 'A...' -REMAINDER = '...' -_UNRECOGNIZED_ARGS_ATTR = '_unrecognized_args' - -# ============================= -# Utility functions and classes -# ============================= - -class _AttributeHolder(object): - """Abstract base class that provides __repr__. - - The __repr__ method returns a string in the format:: - ClassName(attr=name, attr=name, ...) - The attributes are determined either by a class-level attribute, - '_kwarg_names', or by inspecting the instance __dict__. - """ - - def __repr__(self): - type_name = type(self).__name__ - arg_strings = [] - for arg in self._get_args(): - arg_strings.append(repr(arg)) - for name, value in self._get_kwargs(): - arg_strings.append('%s=%r' % (name, value)) - return '%s(%s)' % (type_name, ', '.join(arg_strings)) - - def _get_kwargs(self): - return sorted(self.__dict__.items()) - - def _get_args(self): - return [] - - -def _ensure_value(namespace, name, value): - if getattr(namespace, name, None) is None: - setattr(namespace, name, value) - return getattr(namespace, name) - - -# =============== -# Formatting Help -# =============== - -class HelpFormatter(object): - """Formatter for generating usage messages and argument help strings. - - Only the name of this class is considered a public API. All the methods - provided by the class are considered an implementation detail. - """ - - def __init__(self, - prog, - indent_increment=2, - max_help_position=24, - width=None): - - # default setting for width - if width is None: - try: - width = int(_os.environ['COLUMNS']) - except (KeyError, ValueError): - width = 80 - width -= 2 - - self._prog = prog - self._indent_increment = indent_increment - self._max_help_position = max_help_position - self._width = width - - self._current_indent = 0 - self._level = 0 - self._action_max_length = 0 - - self._root_section = self._Section(self, None) - self._current_section = self._root_section - - self._whitespace_matcher = _re.compile(r'\s+') - self._long_break_matcher = _re.compile(r'\n\n\n+') - - # =============================== - # Section and indentation methods - # =============================== - def _indent(self): - self._current_indent += self._indent_increment - self._level += 1 - - def _dedent(self): - self._current_indent -= self._indent_increment - assert self._current_indent >= 0, 'Indent decreased below 0.' - self._level -= 1 - - class _Section(object): - - def __init__(self, formatter, parent, heading=None): - self.formatter = formatter - self.parent = parent - self.heading = heading - self.items = [] - - def format_help(self): - # format the indented section - if self.parent is not None: - self.formatter._indent() - join = self.formatter._join_parts - for func, args in self.items: - func(*args) - item_help = join([func(*args) for func, args in self.items]) - if self.parent is not None: - self.formatter._dedent() - - # return nothing if the section was empty - if not item_help: - return '' - - # add the heading if the section was non-empty - if self.heading is not SUPPRESS and self.heading is not None: - current_indent = self.formatter._current_indent - heading = '%*s%s:\n' % (current_indent, '', self.heading) - else: - heading = '' - - # join the section-initial newline, the heading and the help - return join(['\n', heading, item_help, '\n']) - - def _add_item(self, func, args): - self._current_section.items.append((func, args)) - - # ======================== - # Message building methods - # ======================== - def start_section(self, heading): - self._indent() - section = self._Section(self, self._current_section, heading) - self._add_item(section.format_help, []) - self._current_section = section - - def end_section(self): - self._current_section = self._current_section.parent - self._dedent() - - def add_text(self, text): - if text is not SUPPRESS and text is not None: - self._add_item(self._format_text, [text]) - - def add_usage(self, usage, actions, groups, prefix=None): - if usage is not SUPPRESS: - args = usage, actions, groups, prefix - self._add_item(self._format_usage, args) - - def add_argument(self, action): - if action.help is not SUPPRESS: - - # find all invocations - get_invocation = self._format_action_invocation - invocations = [get_invocation(action)] - for subaction in self._iter_indented_subactions(action): - invocations.append(get_invocation(subaction)) - - # update the maximum item length - invocation_length = max([len(s) for s in invocations]) - action_length = invocation_length + self._current_indent - self._action_max_length = max(self._action_max_length, - action_length) - - # add the item to the list - self._add_item(self._format_action, [action]) - - def add_arguments(self, actions): - for action in actions: - self.add_argument(action) - - # ======================= - # Help-formatting methods - # ======================= - def format_help(self): - help = self._root_section.format_help() - if help: - help = self._long_break_matcher.sub('\n\n', help) - help = help.strip('\n') + '\n' - return help - - def _join_parts(self, part_strings): - return ''.join([part - for part in part_strings - if part and part is not SUPPRESS]) - - def _format_usage(self, usage, actions, groups, prefix): - if prefix is None: - prefix = _('usage: ') - - # if usage is specified, use that - if usage is not None: - usage = usage % dict(prog=self._prog) - - # if no optionals or positionals are available, usage is just prog - elif usage is None and not actions: - usage = '%(prog)s' % dict(prog=self._prog) - - # if optionals and positionals are available, calculate usage - elif usage is None: - prog = '%(prog)s' % dict(prog=self._prog) - - # split optionals from positionals - optionals = [] - positionals = [] - for action in actions: - if action.option_strings: - optionals.append(action) - else: - positionals.append(action) - - # build full usage string - format = self._format_actions_usage - action_usage = format(optionals + positionals, groups) - usage = ' '.join([s for s in [prog, action_usage] if s]) - - # wrap the usage parts if it's too long - text_width = self._width - self._current_indent - if len(prefix) + len(usage) > text_width: - - # break usage into wrappable parts - part_regexp = r'\(.*?\)+|\[.*?\]+|\S+' - opt_usage = format(optionals, groups) - pos_usage = format(positionals, groups) - opt_parts = _re.findall(part_regexp, opt_usage) - pos_parts = _re.findall(part_regexp, pos_usage) - assert ' '.join(opt_parts) == opt_usage - assert ' '.join(pos_parts) == pos_usage - - # helper for wrapping lines - def get_lines(parts, indent, prefix=None): - lines = [] - line = [] - if prefix is not None: - line_len = len(prefix) - 1 - else: - line_len = len(indent) - 1 - for part in parts: - if line_len + 1 + len(part) > text_width: - lines.append(indent + ' '.join(line)) - line = [] - line_len = len(indent) - 1 - line.append(part) - line_len += len(part) + 1 - if line: - lines.append(indent + ' '.join(line)) - if prefix is not None: - lines[0] = lines[0][len(indent):] - return lines - - # if prog is short, follow it with optionals or positionals - if len(prefix) + len(prog) <= 0.75 * text_width: - indent = ' ' * (len(prefix) + len(prog) + 1) - if opt_parts: - lines = get_lines([prog] + opt_parts, indent, prefix) - lines.extend(get_lines(pos_parts, indent)) - elif pos_parts: - lines = get_lines([prog] + pos_parts, indent, prefix) - else: - lines = [prog] - - # if prog is long, put it on its own line - else: - indent = ' ' * len(prefix) - parts = opt_parts + pos_parts - lines = get_lines(parts, indent) - if len(lines) > 1: - lines = [] - lines.extend(get_lines(opt_parts, indent)) - lines.extend(get_lines(pos_parts, indent)) - lines = [prog] + lines - - # join lines into usage - usage = '\n'.join(lines) - - # prefix with 'usage:' - return '%s%s\n\n' % (prefix, usage) - - def _format_actions_usage(self, actions, groups): - # find group indices and identify actions in groups - group_actions = set() - inserts = {} - for group in groups: - try: - start = actions.index(group._group_actions[0]) - except ValueError: - continue - else: - end = start + len(group._group_actions) - if actions[start:end] == group._group_actions: - for action in group._group_actions: - group_actions.add(action) - if not group.required: - if start in inserts: - inserts[start] += ' [' - else: - inserts[start] = '[' - inserts[end] = ']' - else: - if start in inserts: - inserts[start] += ' (' - else: - inserts[start] = '(' - inserts[end] = ')' - for i in range(start + 1, end): - inserts[i] = '|' - - # collect all actions format strings - parts = [] - for i, action in enumerate(actions): - - # suppressed arguments are marked with None - # remove | separators for suppressed arguments - if action.help is SUPPRESS: - parts.append(None) - if inserts.get(i) == '|': - inserts.pop(i) - elif inserts.get(i + 1) == '|': - inserts.pop(i + 1) - - # produce all arg strings - elif not action.option_strings: - part = self._format_args(action, action.dest) - - # if it's in a group, strip the outer [] - if action in group_actions: - if part[0] == '[' and part[-1] == ']': - part = part[1:-1] - - # add the action string to the list - parts.append(part) - - # produce the first way to invoke the option in brackets - else: - option_string = action.option_strings[0] - - # if the Optional doesn't take a value, format is: - # -s or --long - if action.nargs == 0: - part = '%s' % option_string - - # if the Optional takes a value, format is: - # -s ARGS or --long ARGS - else: - default = action.dest.upper() - args_string = self._format_args(action, default) - part = '%s %s' % (option_string, args_string) - - # make it look optional if it's not required or in a group - if not action.required and action not in group_actions: - part = '[%s]' % part - - # add the action string to the list - parts.append(part) - - # insert things at the necessary indices - for i in sorted(inserts, reverse=True): - parts[i:i] = [inserts[i]] - - # join all the action items with spaces - text = ' '.join([item for item in parts if item is not None]) - - # clean up separators for mutually exclusive groups - open = r'[\[(]' - close = r'[\])]' - text = _re.sub(r'(%s) ' % open, r'\1', text) - text = _re.sub(r' (%s)' % close, r'\1', text) - text = _re.sub(r'%s *%s' % (open, close), r'', text) - text = _re.sub(r'\(([^|]*)\)', r'\1', text) - text = text.strip() - - # return the text - return text - - def _format_text(self, text): - if '%(prog)' in text: - text = text % dict(prog=self._prog) - text_width = self._width - self._current_indent - indent = ' ' * self._current_indent - return self._fill_text(text, text_width, indent) + '\n\n' - - def _format_action(self, action): - # determine the required width and the entry label - help_position = min(self._action_max_length + 2, - self._max_help_position) - help_width = self._width - help_position - action_width = help_position - self._current_indent - 2 - action_header = self._format_action_invocation(action) - - # ho nelp; start on same line and add a final newline - if not action.help: - tup = self._current_indent, '', action_header - action_header = '%*s%s\n' % tup - - # short action name; start on the same line and pad two spaces - elif len(action_header) <= action_width: - tup = self._current_indent, '', action_width, action_header - action_header = '%*s%-*s ' % tup - indent_first = 0 - - # long action name; start on the next line - else: - tup = self._current_indent, '', action_header - action_header = '%*s%s\n' % tup - indent_first = help_position - - # collect the pieces of the action help - parts = [action_header] - - # if there was help for the action, add lines of help text - if action.help: - help_text = self._expand_help(action) - help_lines = self._split_lines(help_text, help_width) - parts.append('%*s%s\n' % (indent_first, '', help_lines[0])) - for line in help_lines[1:]: - parts.append('%*s%s\n' % (help_position, '', line)) - - # or add a newline if the description doesn't end with one - elif not action_header.endswith('\n'): - parts.append('\n') - - # if there are any sub-actions, add their help as well - for subaction in self._iter_indented_subactions(action): - parts.append(self._format_action(subaction)) - - # return a single string - return self._join_parts(parts) - - def _format_action_invocation(self, action): - if not action.option_strings: - metavar, = self._metavar_formatter(action, action.dest)(1) - return metavar - - else: - parts = [] - - # if the Optional doesn't take a value, format is: - # -s, --long - if action.nargs == 0: - parts.extend(action.option_strings) - - # if the Optional takes a value, format is: - # -s ARGS, --long ARGS - else: - default = action.dest.upper() - args_string = self._format_args(action, default) - for option_string in action.option_strings: - parts.append('%s %s' % (option_string, args_string)) - - return ', '.join(parts) - - def _metavar_formatter(self, action, default_metavar): - if action.metavar is not None: - result = action.metavar - elif action.choices is not None: - choice_strs = [str(choice) for choice in action.choices] - result = '{%s}' % ','.join(choice_strs) - else: - result = default_metavar - - def format(tuple_size): - if isinstance(result, tuple): - return result - else: - return (result, ) * tuple_size - return format - - def _format_args(self, action, default_metavar): - get_metavar = self._metavar_formatter(action, default_metavar) - if action.nargs is None: - result = '%s' % get_metavar(1) - elif action.nargs == OPTIONAL: - result = '[%s]' % get_metavar(1) - elif action.nargs == ZERO_OR_MORE: - result = '[%s [%s ...]]' % get_metavar(2) - elif action.nargs == ONE_OR_MORE: - result = '%s [%s ...]' % get_metavar(2) - elif action.nargs == REMAINDER: - result = '...' - elif action.nargs == PARSER: - result = '%s ...' % get_metavar(1) - else: - formats = ['%s' for _ in range(action.nargs)] - result = ' '.join(formats) % get_metavar(action.nargs) - return result - - def _expand_help(self, action): - params = dict(vars(action), prog=self._prog) - for name in list(params): - if params[name] is SUPPRESS: - del params[name] - for name in list(params): - if hasattr(params[name], '__name__'): - params[name] = params[name].__name__ - if params.get('choices') is not None: - choices_str = ', '.join([str(c) for c in params['choices']]) - params['choices'] = choices_str - return self._get_help_string(action) % params - - def _iter_indented_subactions(self, action): - try: - get_subactions = action._get_subactions - except AttributeError: - pass - else: - self._indent() - for subaction in get_subactions(): - yield subaction - self._dedent() - - def _split_lines(self, text, width): - text = self._whitespace_matcher.sub(' ', text).strip() - return _textwrap.wrap(text, width) - - def _fill_text(self, text, width, indent): - text = self._whitespace_matcher.sub(' ', text).strip() - return _textwrap.fill(text, width, initial_indent=indent, - subsequent_indent=indent) - - def _get_help_string(self, action): - return action.help - - -class RawDescriptionHelpFormatter(HelpFormatter): - """Help message formatter which retains any formatting in descriptions. - - Only the name of this class is considered a public API. All the methods - provided by the class are considered an implementation detail. - """ - - def _fill_text(self, text, width, indent): - return ''.join([indent + line for line in text.splitlines(True)]) - - -class RawTextHelpFormatter(RawDescriptionHelpFormatter): - """Help message formatter which retains formatting of all help text. - - Only the name of this class is considered a public API. All the methods - provided by the class are considered an implementation detail. - """ - - def _split_lines(self, text, width): - return text.splitlines() - - -class ArgumentDefaultsHelpFormatter(HelpFormatter): - """Help message formatter which adds default values to argument help. - - Only the name of this class is considered a public API. All the methods - provided by the class are considered an implementation detail. - """ - - def _get_help_string(self, action): - help = action.help - if '%(default)' not in action.help: - if action.default is not SUPPRESS: - defaulting_nargs = [OPTIONAL, ZERO_OR_MORE] - if action.option_strings or action.nargs in defaulting_nargs: - help += ' (default: %(default)s)' - return help - - -# ===================== -# Options and Arguments -# ===================== - -def _get_action_name(argument): - if argument is None: - return None - elif argument.option_strings: - return '/'.join(argument.option_strings) - elif argument.metavar not in (None, SUPPRESS): - return argument.metavar - elif argument.dest not in (None, SUPPRESS): - return argument.dest - else: - return None - - -class ArgumentError(Exception): - """An error from creating or using an argument (optional or positional). - - The string value of this exception is the message, augmented with - information about the argument that caused it. - """ - - def __init__(self, argument, message): - self.argument_name = _get_action_name(argument) - self.message = message - - def __str__(self): - if self.argument_name is None: - format = '%(message)s' - else: - format = 'argument %(argument_name)s: %(message)s' - return format % dict(message=self.message, - argument_name=self.argument_name) - - -class ArgumentTypeError(Exception): - """An error from trying to convert a command line string to a type.""" - pass - - -# ============== -# Action classes -# ============== - -class Action(_AttributeHolder): - """Information about how to convert command line strings to Python objects. - - Action objects are used by an ArgumentParser to represent the information - needed to parse a single argument from one or more strings from the - command line. The keyword arguments to the Action constructor are also - all attributes of Action instances. - - Keyword Arguments: - - - option_strings -- A list of command-line option strings which - should be associated with this action. - - - dest -- The name of the attribute to hold the created object(s) - - - nargs -- The number of command-line arguments that should be - consumed. By default, one argument will be consumed and a single - value will be produced. Other values include: - - N (an integer) consumes N arguments (and produces a list) - - '?' consumes zero or one arguments - - '*' consumes zero or more arguments (and produces a list) - - '+' consumes one or more arguments (and produces a list) - Note that the difference between the default and nargs=1 is that - with the default, a single value will be produced, while with - nargs=1, a list containing a single value will be produced. - - - const -- The value to be produced if the option is specified and the - option uses an action that takes no values. - - - default -- The value to be produced if the option is not specified. - - - type -- The type which the command-line arguments should be converted - to, should be one of 'string', 'int', 'float', 'complex' or a - callable object that accepts a single string argument. If None, - 'string' is assumed. - - - choices -- A container of values that should be allowed. If not None, - after a command-line argument has been converted to the appropriate - type, an exception will be raised if it is not a member of this - collection. - - - required -- True if the action must always be specified at the - command line. This is only meaningful for optional command-line - arguments. - - - help -- The help string describing the argument. - - - metavar -- The name to be used for the option's argument with the - help string. If None, the 'dest' value will be used as the name. - """ - - def __init__(self, - option_strings, - dest, - nargs=None, - const=None, - default=None, - type=None, - choices=None, - required=False, - help=None, - metavar=None): - self.option_strings = option_strings - self.dest = dest - self.nargs = nargs - self.const = const - self.default = default - self.type = type - self.choices = choices - self.required = required - self.help = help - self.metavar = metavar - - def _get_kwargs(self): - names = [ - 'option_strings', - 'dest', - 'nargs', - 'const', - 'default', - 'type', - 'choices', - 'help', - 'metavar', - ] - return [(name, getattr(self, name)) for name in names] - - def __call__(self, parser, namespace, values, option_string=None): - raise NotImplementedError(_('.__call__() not defined')) - - -class _StoreAction(Action): - - def __init__(self, - option_strings, - dest, - nargs=None, - const=None, - default=None, - type=None, - choices=None, - required=False, - help=None, - metavar=None): - if nargs == 0: - raise ValueError('nargs for store actions must be > 0; if you ' - 'have nothing to store, actions such as store ' - 'true or store const may be more appropriate') - if const is not None and nargs != OPTIONAL: - raise ValueError('nargs must be %r to supply const' % OPTIONAL) - super(_StoreAction, self).__init__( - option_strings=option_strings, - dest=dest, - nargs=nargs, - const=const, - default=default, - type=type, - choices=choices, - required=required, - help=help, - metavar=metavar) - - def __call__(self, parser, namespace, values, option_string=None): - setattr(namespace, self.dest, values) - - -class _StoreConstAction(Action): - - def __init__(self, - option_strings, - dest, - const, - default=None, - required=False, - help=None, - metavar=None): - super(_StoreConstAction, self).__init__( - option_strings=option_strings, - dest=dest, - nargs=0, - const=const, - default=default, - required=required, - help=help) - - def __call__(self, parser, namespace, values, option_string=None): - setattr(namespace, self.dest, self.const) - - -class _StoreTrueAction(_StoreConstAction): - - def __init__(self, - option_strings, - dest, - default=False, - required=False, - help=None): - super(_StoreTrueAction, self).__init__( - option_strings=option_strings, - dest=dest, - const=True, - default=default, - required=required, - help=help) - - -class _StoreFalseAction(_StoreConstAction): - - def __init__(self, - option_strings, - dest, - default=True, - required=False, - help=None): - super(_StoreFalseAction, self).__init__( - option_strings=option_strings, - dest=dest, - const=False, - default=default, - required=required, - help=help) - - -class _AppendAction(Action): - - def __init__(self, - option_strings, - dest, - nargs=None, - const=None, - default=None, - type=None, - choices=None, - required=False, - help=None, - metavar=None): - if nargs == 0: - raise ValueError('nargs for append actions must be > 0; if arg ' - 'strings are not supplying the value to append, ' - 'the append const action may be more appropriate') - if const is not None and nargs != OPTIONAL: - raise ValueError('nargs must be %r to supply const' % OPTIONAL) - super(_AppendAction, self).__init__( - option_strings=option_strings, - dest=dest, - nargs=nargs, - const=const, - default=default, - type=type, - choices=choices, - required=required, - help=help, - metavar=metavar) - - def __call__(self, parser, namespace, values, option_string=None): - items = _copy.copy(_ensure_value(namespace, self.dest, [])) - items.append(values) - setattr(namespace, self.dest, items) - - -class _AppendConstAction(Action): - - def __init__(self, - option_strings, - dest, - const, - default=None, - required=False, - help=None, - metavar=None): - super(_AppendConstAction, self).__init__( - option_strings=option_strings, - dest=dest, - nargs=0, - const=const, - default=default, - required=required, - help=help, - metavar=metavar) - - def __call__(self, parser, namespace, values, option_string=None): - items = _copy.copy(_ensure_value(namespace, self.dest, [])) - items.append(self.const) - setattr(namespace, self.dest, items) - - -class _CountAction(Action): - - def __init__(self, - option_strings, - dest, - default=None, - required=False, - help=None): - super(_CountAction, self).__init__( - option_strings=option_strings, - dest=dest, - nargs=0, - default=default, - required=required, - help=help) - - def __call__(self, parser, namespace, values, option_string=None): - new_count = _ensure_value(namespace, self.dest, 0) + 1 - setattr(namespace, self.dest, new_count) - - -class _HelpAction(Action): - - def __init__(self, - option_strings, - dest=SUPPRESS, - default=SUPPRESS, - help=None): - super(_HelpAction, self).__init__( - option_strings=option_strings, - dest=dest, - default=default, - nargs=0, - help=help) - - def __call__(self, parser, namespace, values, option_string=None): - parser.print_help() - parser.exit() - - -class _VersionAction(Action): - - def __init__(self, - option_strings, - version=None, - dest=SUPPRESS, - default=SUPPRESS, - help="show program's version number and exit"): - super(_VersionAction, self).__init__( - option_strings=option_strings, - dest=dest, - default=default, - nargs=0, - help=help) - self.version = version - - def __call__(self, parser, namespace, values, option_string=None): - version = self.version - if version is None: - version = parser.version - formatter = parser._get_formatter() - formatter.add_text(version) - parser.exit(message=formatter.format_help()) - - -class _SubParsersAction(Action): - - class _ChoicesPseudoAction(Action): - - def __init__(self, name, aliases, help): - metavar = dest = name - if aliases: - metavar += ' (%s)' % ', '.join(aliases) - sup = super(_SubParsersAction._ChoicesPseudoAction, self) - sup.__init__(option_strings=[], dest=dest, help=help, - metavar=metavar) - - def __init__(self, - option_strings, - prog, - parser_class, - dest=SUPPRESS, - help=None, - metavar=None): - - self._prog_prefix = prog - self._parser_class = parser_class - self._name_parser_map = {} - self._choices_actions = [] - - super(_SubParsersAction, self).__init__( - option_strings=option_strings, - dest=dest, - nargs=PARSER, - choices=self._name_parser_map, - help=help, - metavar=metavar) - - def add_parser(self, name, **kwargs): - # set prog from the existing prefix - if kwargs.get('prog') is None: - kwargs['prog'] = '%s %s' % (self._prog_prefix, name) - - aliases = kwargs.pop('aliases', ()) - - # create a pseudo-action to hold the choice help - if 'help' in kwargs: - help = kwargs.pop('help') - choice_action = self._ChoicesPseudoAction(name, aliases, help) - self._choices_actions.append(choice_action) - - # create the parser and add it to the map - parser = self._parser_class(**kwargs) - self._name_parser_map[name] = parser - - # make parser available under aliases also - for alias in aliases: - self._name_parser_map[alias] = parser - - return parser - - def _get_subactions(self): - return self._choices_actions - - def __call__(self, parser, namespace, values, option_string=None): - parser_name = values[0] - arg_strings = values[1:] - - # set the parser name if requested - if self.dest is not SUPPRESS: - setattr(namespace, self.dest, parser_name) - - # select the parser - try: - parser = self._name_parser_map[parser_name] - except KeyError: - tup = parser_name, ', '.join(self._name_parser_map) - msg = _('unknown parser %r (choices: %s)' % tup) - raise ArgumentError(self, msg) - - # parse all the remaining options into the namespace - # store any unrecognized options on the object, so that the top - # level parser can decide what to do with them - namespace, arg_strings = parser.parse_known_args(arg_strings, namespace) - if arg_strings: - vars(namespace).setdefault(_UNRECOGNIZED_ARGS_ATTR, []) - getattr(namespace, _UNRECOGNIZED_ARGS_ATTR).extend(arg_strings) - - -# ============== -# Type classes -# ============== - -class FileType(object): - """Factory for creating file object types - - Instances of FileType are typically passed as type= arguments to the - ArgumentParser add_argument() method. - - Keyword Arguments: - - mode -- A string indicating how the file is to be opened. Accepts the - same values as the builtin open() function. - - bufsize -- The file's desired buffer size. Accepts the same values as - the builtin open() function. - """ - - def __init__(self, mode='r', bufsize=None): - self._mode = mode - self._bufsize = bufsize - - def __call__(self, string): - # the special argument "-" means sys.std{in,out} - if string == '-': - if 'r' in self._mode: - return _sys.stdin - elif 'w' in self._mode: - return _sys.stdout - else: - msg = _('argument "-" with mode %r' % self._mode) - raise ValueError(msg) - - try: - # all other arguments are used as file names - if self._bufsize: - return open(string, self._mode, self._bufsize) - else: - return open(string, self._mode) - except IOError: - err = _sys.exc_info()[1] - message = _("can't open '%s': %s") - raise ArgumentTypeError(message % (string, err)) - - def __repr__(self): - args = [self._mode, self._bufsize] - args_str = ', '.join([repr(arg) for arg in args if arg is not None]) - return '%s(%s)' % (type(self).__name__, args_str) - -# =========================== -# Optional and Positional Parsing -# =========================== - -class Namespace(_AttributeHolder): - """Simple object for storing attributes. - - Implements equality by attribute names and values, and provides a simple - string representation. - """ - - def __init__(self, **kwargs): - for name in kwargs: - setattr(self, name, kwargs[name]) - - __hash__ = None - - def __eq__(self, other): - return vars(self) == vars(other) - - def __ne__(self, other): - return not (self == other) - - def __contains__(self, key): - return key in self.__dict__ - - -class _ActionsContainer(object): - - def __init__(self, - description, - prefix_chars, - argument_default, - conflict_handler): - super(_ActionsContainer, self).__init__() - - self.description = description - self.argument_default = argument_default - self.prefix_chars = prefix_chars - self.conflict_handler = conflict_handler - - # set up registries - self._registries = {} - - # register actions - self.register('action', None, _StoreAction) - self.register('action', 'store', _StoreAction) - self.register('action', 'store_const', _StoreConstAction) - self.register('action', 'store_true', _StoreTrueAction) - self.register('action', 'store_false', _StoreFalseAction) - self.register('action', 'append', _AppendAction) - self.register('action', 'append_const', _AppendConstAction) - self.register('action', 'count', _CountAction) - self.register('action', 'help', _HelpAction) - self.register('action', 'version', _VersionAction) - self.register('action', 'parsers', _SubParsersAction) - - # raise an exception if the conflict handler is invalid - self._get_handler() - - # action storage - self._actions = [] - self._option_string_actions = {} - - # groups - self._action_groups = [] - self._mutually_exclusive_groups = [] - - # defaults storage - self._defaults = {} - - # determines whether an "option" looks like a negative number - self._negative_number_matcher = _re.compile(r'^-\d+$|^-\d*\.\d+$') - - # whether or not there are any optionals that look like negative - # numbers -- uses a list so it can be shared and edited - self._has_negative_number_optionals = [] - - # ==================== - # Registration methods - # ==================== - def register(self, registry_name, value, object): - registry = self._registries.setdefault(registry_name, {}) - registry[value] = object - - def _registry_get(self, registry_name, value, default=None): - return self._registries[registry_name].get(value, default) - - # ================================== - # Namespace default accessor methods - # ================================== - def set_defaults(self, **kwargs): - self._defaults.update(kwargs) - - # if these defaults match any existing arguments, replace - # the previous default on the object with the new one - for action in self._actions: - if action.dest in kwargs: - action.default = kwargs[action.dest] - - def get_default(self, dest): - for action in self._actions: - if action.dest == dest and action.default is not None: - return action.default - return self._defaults.get(dest, None) - - - # ======================= - # Adding argument actions - # ======================= - def add_argument(self, *args, **kwargs): - """ - add_argument(dest, ..., name=value, ...) - add_argument(option_string, option_string, ..., name=value, ...) - """ - - # if no positional args are supplied or only one is supplied and - # it doesn't look like an option string, parse a positional - # argument - chars = self.prefix_chars - if not args or len(args) == 1 and args[0][0] not in chars: - if args and 'dest' in kwargs: - raise ValueError('dest supplied twice for positional argument') - kwargs = self._get_positional_kwargs(*args, **kwargs) - - # otherwise, we're adding an optional argument - else: - kwargs = self._get_optional_kwargs(*args, **kwargs) - - # if no default was supplied, use the parser-level default - if 'default' not in kwargs: - dest = kwargs['dest'] - if dest in self._defaults: - kwargs['default'] = self._defaults[dest] - elif self.argument_default is not None: - kwargs['default'] = self.argument_default - - # create the action object, and add it to the parser - action_class = self._pop_action_class(kwargs) - if not _callable(action_class): - raise ValueError('unknown action "%s"' % action_class) - action = action_class(**kwargs) - - # raise an error if the action type is not callable - type_func = self._registry_get('type', action.type, action.type) - if not _callable(type_func): - raise ValueError('%r is not callable' % type_func) - - return self._add_action(action) - - def add_argument_group(self, *args, **kwargs): - group = _ArgumentGroup(self, *args, **kwargs) - self._action_groups.append(group) - return group - - def add_mutually_exclusive_group(self, **kwargs): - group = _MutuallyExclusiveGroup(self, **kwargs) - self._mutually_exclusive_groups.append(group) - return group - - def _add_action(self, action): - # resolve any conflicts - self._check_conflict(action) - - # add to actions list - self._actions.append(action) - action.container = self - - # index the action by any option strings it has - for option_string in action.option_strings: - self._option_string_actions[option_string] = action - - # set the flag if any option strings look like negative numbers - for option_string in action.option_strings: - if self._negative_number_matcher.match(option_string): - if not self._has_negative_number_optionals: - self._has_negative_number_optionals.append(True) - - # return the created action - return action - - def _remove_action(self, action): - self._actions.remove(action) - - def _add_container_actions(self, container): - # collect groups by titles - title_group_map = {} - for group in self._action_groups: - if group.title in title_group_map: - msg = _('cannot merge actions - two groups are named %r') - raise ValueError(msg % (group.title)) - title_group_map[group.title] = group - - # map each action to its group - group_map = {} - for group in container._action_groups: - - # if a group with the title exists, use that, otherwise - # create a new group matching the container's group - if group.title not in title_group_map: - title_group_map[group.title] = self.add_argument_group( - title=group.title, - description=group.description, - conflict_handler=group.conflict_handler) - - # map the actions to their new group - for action in group._group_actions: - group_map[action] = title_group_map[group.title] - - # add container's mutually exclusive groups - # NOTE: if add_mutually_exclusive_group ever gains title= and - # description= then this code will need to be expanded as above - for group in container._mutually_exclusive_groups: - mutex_group = self.add_mutually_exclusive_group( - required=group.required) - - # map the actions to their new mutex group - for action in group._group_actions: - group_map[action] = mutex_group - - # add all actions to this container or their group - for action in container._actions: - group_map.get(action, self)._add_action(action) - - def _get_positional_kwargs(self, dest, **kwargs): - # make sure required is not specified - if 'required' in kwargs: - msg = _("'required' is an invalid argument for positionals") - raise TypeError(msg) - - # mark positional arguments as required if at least one is - # always required - if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]: - kwargs['required'] = True - if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs: - kwargs['required'] = True - - # return the keyword arguments with no option strings - return dict(kwargs, dest=dest, option_strings=[]) - - def _get_optional_kwargs(self, *args, **kwargs): - # determine short and long option strings - option_strings = [] - long_option_strings = [] - for option_string in args: - # error on strings that don't start with an appropriate prefix - if not option_string[0] in self.prefix_chars: - msg = _('invalid option string %r: ' - 'must start with a character %r') - tup = option_string, self.prefix_chars - raise ValueError(msg % tup) - - # strings starting with two prefix characters are long options - option_strings.append(option_string) - if option_string[0] in self.prefix_chars: - if len(option_string) > 1: - if option_string[1] in self.prefix_chars: - long_option_strings.append(option_string) - - # infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x' - dest = kwargs.pop('dest', None) - if dest is None: - if long_option_strings: - dest_option_string = long_option_strings[0] - else: - dest_option_string = option_strings[0] - dest = dest_option_string.lstrip(self.prefix_chars) - if not dest: - msg = _('dest= is required for options like %r') - raise ValueError(msg % option_string) - dest = dest.replace('-', '_') - - # return the updated keyword arguments - return dict(kwargs, dest=dest, option_strings=option_strings) - - def _pop_action_class(self, kwargs, default=None): - action = kwargs.pop('action', default) - return self._registry_get('action', action, action) - - def _get_handler(self): - # determine function from conflict handler string - handler_func_name = '_handle_conflict_%s' % self.conflict_handler - try: - return getattr(self, handler_func_name) - except AttributeError: - msg = _('invalid conflict_resolution value: %r') - raise ValueError(msg % self.conflict_handler) - - def _check_conflict(self, action): - - # find all options that conflict with this option - confl_optionals = [] - for option_string in action.option_strings: - if option_string in self._option_string_actions: - confl_optional = self._option_string_actions[option_string] - confl_optionals.append((option_string, confl_optional)) - - # resolve any conflicts - if confl_optionals: - conflict_handler = self._get_handler() - conflict_handler(action, confl_optionals) - - def _handle_conflict_error(self, action, conflicting_actions): - message = _('conflicting option string(s): %s') - conflict_string = ', '.join([option_string - for option_string, action - in conflicting_actions]) - raise ArgumentError(action, message % conflict_string) - - def _handle_conflict_resolve(self, action, conflicting_actions): - - # remove all conflicting options - for option_string, action in conflicting_actions: - - # remove the conflicting option - action.option_strings.remove(option_string) - self._option_string_actions.pop(option_string, None) - - # if the option now has no option string, remove it from the - # container holding it - if not action.option_strings: - action.container._remove_action(action) - - -class _ArgumentGroup(_ActionsContainer): - - def __init__(self, container, title=None, description=None, **kwargs): - # add any missing keyword arguments by checking the container - update = kwargs.setdefault - update('conflict_handler', container.conflict_handler) - update('prefix_chars', container.prefix_chars) - update('argument_default', container.argument_default) - super_init = super(_ArgumentGroup, self).__init__ - super_init(description=description, **kwargs) - - # group attributes - self.title = title - self._group_actions = [] - - # share most attributes with the container - self._registries = container._registries - self._actions = container._actions - self._option_string_actions = container._option_string_actions - self._defaults = container._defaults - self._has_negative_number_optionals = \ - container._has_negative_number_optionals - - def _add_action(self, action): - action = super(_ArgumentGroup, self)._add_action(action) - self._group_actions.append(action) - return action - - def _remove_action(self, action): - super(_ArgumentGroup, self)._remove_action(action) - self._group_actions.remove(action) - - -class _MutuallyExclusiveGroup(_ArgumentGroup): - - def __init__(self, container, required=False): - super(_MutuallyExclusiveGroup, self).__init__(container) - self.required = required - self._container = container - - def _add_action(self, action): - if action.required: - msg = _('mutually exclusive arguments must be optional') - raise ValueError(msg) - action = self._container._add_action(action) - self._group_actions.append(action) - return action - - def _remove_action(self, action): - self._container._remove_action(action) - self._group_actions.remove(action) - - -class ArgumentParser(_AttributeHolder, _ActionsContainer): - """Object for parsing command line strings into Python objects. - - Keyword Arguments: - - prog -- The name of the program (default: sys.argv[0]) - - usage -- A usage message (default: auto-generated from arguments) - - description -- A description of what the program does - - epilog -- Text following the argument descriptions - - parents -- Parsers whose arguments should be copied into this one - - formatter_class -- HelpFormatter class for printing help messages - - prefix_chars -- Characters that prefix optional arguments - - fromfile_prefix_chars -- Characters that prefix files containing - additional arguments - - argument_default -- The default value for all arguments - - conflict_handler -- String indicating how to handle conflicts - - add_help -- Add a -h/-help option - """ - - def __init__(self, - prog=None, - usage=None, - description=None, - epilog=None, - version=None, - parents=[], - formatter_class=HelpFormatter, - prefix_chars='-', - fromfile_prefix_chars=None, - argument_default=None, - conflict_handler='error', - add_help=True): - - if version is not None: - import warnings - warnings.warn( - """The "version" argument to ArgumentParser is deprecated. """ - """Please use """ - """"add_argument(..., action='version', version="N", ...)" """ - """instead""", DeprecationWarning) - - superinit = super(ArgumentParser, self).__init__ - superinit(description=description, - prefix_chars=prefix_chars, - argument_default=argument_default, - conflict_handler=conflict_handler) - - # default setting for prog - if prog is None: - prog = _os.path.basename(_sys.argv[0]) - - self.prog = prog - self.usage = usage - self.epilog = epilog - self.version = version - self.formatter_class = formatter_class - self.fromfile_prefix_chars = fromfile_prefix_chars - self.add_help = add_help - - add_group = self.add_argument_group - self._positionals = add_group(_('positional arguments')) - self._optionals = add_group(_('optional arguments')) - self._subparsers = None - - # register types - def identity(string): - return string - self.register('type', None, identity) - - # add help and version arguments if necessary - # (using explicit default to override global argument_default) - if '-' in prefix_chars: - default_prefix = '-' - else: - default_prefix = prefix_chars[0] - if self.add_help: - self.add_argument( - default_prefix+'h', default_prefix*2+'help', - action='help', default=SUPPRESS, - help=_('show this help message and exit')) - if self.version: - self.add_argument( - default_prefix+'v', default_prefix*2+'version', - action='version', default=SUPPRESS, - version=self.version, - help=_("show program's version number and exit")) - - # add parent arguments and defaults - for parent in parents: - self._add_container_actions(parent) - try: - defaults = parent._defaults - except AttributeError: - pass - else: - self._defaults.update(defaults) - - # ======================= - # Pretty __repr__ methods - # ======================= - def _get_kwargs(self): - names = [ - 'prog', - 'usage', - 'description', - 'version', - 'formatter_class', - 'conflict_handler', - 'add_help', - ] - return [(name, getattr(self, name)) for name in names] - - # ================================== - # Optional/Positional adding methods - # ================================== - def add_subparsers(self, **kwargs): - if self._subparsers is not None: - self.error(_('cannot have multiple subparser arguments')) - - # add the parser class to the arguments if it's not present - kwargs.setdefault('parser_class', type(self)) - - if 'title' in kwargs or 'description' in kwargs: - title = _(kwargs.pop('title', 'subcommands')) - description = _(kwargs.pop('description', None)) - self._subparsers = self.add_argument_group(title, description) - else: - self._subparsers = self._positionals - - # prog defaults to the usage message of this parser, skipping - # optional arguments and with no "usage:" prefix - if kwargs.get('prog') is None: - formatter = self._get_formatter() - positionals = self._get_positional_actions() - groups = self._mutually_exclusive_groups - formatter.add_usage(self.usage, positionals, groups, '') - kwargs['prog'] = formatter.format_help().strip() - - # create the parsers action and add it to the positionals list - parsers_class = self._pop_action_class(kwargs, 'parsers') - action = parsers_class(option_strings=[], **kwargs) - self._subparsers._add_action(action) - - # return the created parsers action - return action - - def _add_action(self, action): - if action.option_strings: - self._optionals._add_action(action) - else: - self._positionals._add_action(action) - return action - - def _get_optional_actions(self): - return [action - for action in self._actions - if action.option_strings] - - def _get_positional_actions(self): - return [action - for action in self._actions - if not action.option_strings] - - # ===================================== - # Command line argument parsing methods - # ===================================== - def parse_args(self, args=None, namespace=None): - args, argv = self.parse_known_args(args, namespace) - if argv: - msg = _('unrecognized arguments: %s') - self.error(msg % ' '.join(argv)) - return args - - def parse_known_args(self, args=None, namespace=None): - # args default to the system args - if args is None: - args = _sys.argv[1:] - - # default Namespace built from parser defaults - if namespace is None: - namespace = Namespace() - - # add any action defaults that aren't present - for action in self._actions: - if action.dest is not SUPPRESS: - if not hasattr(namespace, action.dest): - if action.default is not SUPPRESS: - setattr(namespace, action.dest, action.default) - - # add any parser defaults that aren't present - for dest in self._defaults: - if not hasattr(namespace, dest): - setattr(namespace, dest, self._defaults[dest]) - - # parse the arguments and exit if there are any errors - try: - namespace, args = self._parse_known_args(args, namespace) - if hasattr(namespace, _UNRECOGNIZED_ARGS_ATTR): - args.extend(getattr(namespace, _UNRECOGNIZED_ARGS_ATTR)) - delattr(namespace, _UNRECOGNIZED_ARGS_ATTR) - return namespace, args - except ArgumentError: - err = _sys.exc_info()[1] - self.error(str(err)) - - def _parse_known_args(self, arg_strings, namespace): - # replace arg strings that are file references - if self.fromfile_prefix_chars is not None: - arg_strings = self._read_args_from_files(arg_strings) - - # map all mutually exclusive arguments to the other arguments - # they can't occur with - action_conflicts = {} - for mutex_group in self._mutually_exclusive_groups: - group_actions = mutex_group._group_actions - for i, mutex_action in enumerate(mutex_group._group_actions): - conflicts = action_conflicts.setdefault(mutex_action, []) - conflicts.extend(group_actions[:i]) - conflicts.extend(group_actions[i + 1:]) - - # find all option indices, and determine the arg_string_pattern - # which has an 'O' if there is an option at an index, - # an 'A' if there is an argument, or a '-' if there is a '--' - option_string_indices = {} - arg_string_pattern_parts = [] - arg_strings_iter = iter(arg_strings) - for i, arg_string in enumerate(arg_strings_iter): - - # all args after -- are non-options - if arg_string == '--': - arg_string_pattern_parts.append('-') - for arg_string in arg_strings_iter: - arg_string_pattern_parts.append('A') - - # otherwise, add the arg to the arg strings - # and note the index if it was an option - else: - option_tuple = self._parse_optional(arg_string) - if option_tuple is None: - pattern = 'A' - else: - option_string_indices[i] = option_tuple - pattern = 'O' - arg_string_pattern_parts.append(pattern) - - # join the pieces together to form the pattern - arg_strings_pattern = ''.join(arg_string_pattern_parts) - - # converts arg strings to the appropriate and then takes the action - seen_actions = set() - seen_non_default_actions = set() - - def take_action(action, argument_strings, option_string=None): - seen_actions.add(action) - argument_values = self._get_values(action, argument_strings) - - # error if this argument is not allowed with other previously - # seen arguments, assuming that actions that use the default - # value don't really count as "present" - if argument_values is not action.default: - seen_non_default_actions.add(action) - for conflict_action in action_conflicts.get(action, []): - if conflict_action in seen_non_default_actions: - msg = _('not allowed with argument %s') - action_name = _get_action_name(conflict_action) - raise ArgumentError(action, msg % action_name) - - # take the action if we didn't receive a SUPPRESS value - # (e.g. from a default) - if argument_values is not SUPPRESS: - action(self, namespace, argument_values, option_string) - - # function to convert arg_strings into an optional action - def consume_optional(start_index): - - # get the optional identified at this index - option_tuple = option_string_indices[start_index] - action, option_string, explicit_arg = option_tuple - - # identify additional optionals in the same arg string - # (e.g. -xyz is the same as -x -y -z if no args are required) - match_argument = self._match_argument - action_tuples = [] - while True: - - # if we found no optional action, skip it - if action is None: - extras.append(arg_strings[start_index]) - return start_index + 1 - - # if there is an explicit argument, try to match the - # optional's string arguments to only this - if explicit_arg is not None: - arg_count = match_argument(action, 'A') - - # if the action is a single-dash option and takes no - # arguments, try to parse more single-dash options out - # of the tail of the option string - chars = self.prefix_chars - if arg_count == 0 and option_string[1] not in chars: - action_tuples.append((action, [], option_string)) - char = option_string[0] - option_string = char + explicit_arg[0] - new_explicit_arg = explicit_arg[1:] or None - optionals_map = self._option_string_actions - if option_string in optionals_map: - action = optionals_map[option_string] - explicit_arg = new_explicit_arg - else: - msg = _('ignored explicit argument %r') - raise ArgumentError(action, msg % explicit_arg) - - # if the action expect exactly one argument, we've - # successfully matched the option; exit the loop - elif arg_count == 1: - stop = start_index + 1 - args = [explicit_arg] - action_tuples.append((action, args, option_string)) - break - - # error if a double-dash option did not use the - # explicit argument - else: - msg = _('ignored explicit argument %r') - raise ArgumentError(action, msg % explicit_arg) - - # if there is no explicit argument, try to match the - # optional's string arguments with the following strings - # if successful, exit the loop - else: - start = start_index + 1 - selected_patterns = arg_strings_pattern[start:] - arg_count = match_argument(action, selected_patterns) - stop = start + arg_count - args = arg_strings[start:stop] - action_tuples.append((action, args, option_string)) - break - - # add the Optional to the list and return the index at which - # the Optional's string args stopped - assert action_tuples - for action, args, option_string in action_tuples: - take_action(action, args, option_string) - return stop - - # the list of Positionals left to be parsed; this is modified - # by consume_positionals() - positionals = self._get_positional_actions() - - # function to convert arg_strings into positional actions - def consume_positionals(start_index): - # match as many Positionals as possible - match_partial = self._match_arguments_partial - selected_pattern = arg_strings_pattern[start_index:] - arg_counts = match_partial(positionals, selected_pattern) - - # slice off the appropriate arg strings for each Positional - # and add the Positional and its args to the list - for action, arg_count in zip(positionals, arg_counts): - args = arg_strings[start_index: start_index + arg_count] - start_index += arg_count - take_action(action, args) - - # slice off the Positionals that we just parsed and return the - # index at which the Positionals' string args stopped - positionals[:] = positionals[len(arg_counts):] - return start_index - - # consume Positionals and Optionals alternately, until we have - # passed the last option string - extras = [] - start_index = 0 - if option_string_indices: - max_option_string_index = max(option_string_indices) - else: - max_option_string_index = -1 - while start_index <= max_option_string_index: - - # consume any Positionals preceding the next option - next_option_string_index = min([ - index - for index in option_string_indices - if index >= start_index]) - if start_index != next_option_string_index: - positionals_end_index = consume_positionals(start_index) - - # only try to parse the next optional if we didn't consume - # the option string during the positionals parsing - if positionals_end_index > start_index: - start_index = positionals_end_index - continue - else: - start_index = positionals_end_index - - # if we consumed all the positionals we could and we're not - # at the index of an option string, there were extra arguments - if start_index not in option_string_indices: - strings = arg_strings[start_index:next_option_string_index] - extras.extend(strings) - start_index = next_option_string_index - - # consume the next optional and any arguments for it - start_index = consume_optional(start_index) - - # consume any positionals following the last Optional - stop_index = consume_positionals(start_index) - - # if we didn't consume all the argument strings, there were extras - extras.extend(arg_strings[stop_index:]) - - # if we didn't use all the Positional objects, there were too few - # arg strings supplied. - if positionals: - self.error(_('too few arguments')) - - # make sure all required actions were present, and convert defaults. - for action in self._actions: - if action not in seen_actions: - if action.required: - name = _get_action_name(action) - self.error(_('argument %s is required') % name) - else: - # Convert action default now instead of doing it before - # parsing arguments to avoid calling convert functions - # twice (which may fail) if the argument was given, but - # only if it was defined already in the namespace - if (action.default is not None and - isinstance(action.default, basestring) and - hasattr(namespace, action.dest) and - action.default is getattr(namespace, action.dest)): - setattr(namespace, action.dest, - self._get_value(action, action.default)) - - # make sure all required groups had one option present - for group in self._mutually_exclusive_groups: - if group.required: - for action in group._group_actions: - if action in seen_non_default_actions: - break - - # if no actions were used, report the error - else: - names = [_get_action_name(action) - for action in group._group_actions - if action.help is not SUPPRESS] - msg = _('one of the arguments %s is required') - self.error(msg % ' '.join(names)) - - # return the updated namespace and the extra arguments - return namespace, extras - - def _read_args_from_files(self, arg_strings): - # expand arguments referencing files - new_arg_strings = [] - for arg_string in arg_strings: - - # for regular arguments, just add them back into the list - if arg_string[0] not in self.fromfile_prefix_chars: - new_arg_strings.append(arg_string) - - # replace arguments referencing files with the file content - else: - try: - args_file = open(arg_string[1:]) - try: - arg_strings = [] - for arg_line in args_file.read().splitlines(): - for arg in self.convert_arg_line_to_args(arg_line): - arg_strings.append(arg) - arg_strings = self._read_args_from_files(arg_strings) - new_arg_strings.extend(arg_strings) - finally: - args_file.close() - except IOError: - err = _sys.exc_info()[1] - self.error(str(err)) - - # return the modified argument list - return new_arg_strings - - def convert_arg_line_to_args(self, arg_line): - return [arg_line] - - def _match_argument(self, action, arg_strings_pattern): - # match the pattern for this action to the arg strings - nargs_pattern = self._get_nargs_pattern(action) - match = _re.match(nargs_pattern, arg_strings_pattern) - - # raise an exception if we weren't able to find a match - if match is None: - nargs_errors = { - None: _('expected one argument'), - OPTIONAL: _('expected at most one argument'), - ONE_OR_MORE: _('expected at least one argument'), - } - default = _('expected %s argument(s)') % action.nargs - msg = nargs_errors.get(action.nargs, default) - raise ArgumentError(action, msg) - - # return the number of arguments matched - return len(match.group(1)) - - def _match_arguments_partial(self, actions, arg_strings_pattern): - # progressively shorten the actions list by slicing off the - # final actions until we find a match - result = [] - for i in range(len(actions), 0, -1): - actions_slice = actions[:i] - pattern = ''.join([self._get_nargs_pattern(action) - for action in actions_slice]) - match = _re.match(pattern, arg_strings_pattern) - if match is not None: - result.extend([len(string) for string in match.groups()]) - break - - # return the list of arg string counts - return result - - def _parse_optional(self, arg_string): - # if it's an empty string, it was meant to be a positional - if not arg_string: - return None - - # if it doesn't start with a prefix, it was meant to be positional - if not arg_string[0] in self.prefix_chars: - return None - - # if the option string is present in the parser, return the action - if arg_string in self._option_string_actions: - action = self._option_string_actions[arg_string] - return action, arg_string, None - - # if it's just a single character, it was meant to be positional - if len(arg_string) == 1: - return None - - # if the option string before the "=" is present, return the action - if '=' in arg_string: - option_string, explicit_arg = arg_string.split('=', 1) - if option_string in self._option_string_actions: - action = self._option_string_actions[option_string] - return action, option_string, explicit_arg - - # search through all possible prefixes of the option string - # and all actions in the parser for possible interpretations - option_tuples = self._get_option_tuples(arg_string) - - # if multiple actions match, the option string was ambiguous - if len(option_tuples) > 1: - options = ', '.join([option_string - for action, option_string, explicit_arg in option_tuples]) - tup = arg_string, options - self.error(_('ambiguous option: %s could match %s') % tup) - - # if exactly one action matched, this segmentation is good, - # so return the parsed action - elif len(option_tuples) == 1: - option_tuple, = option_tuples - return option_tuple - - # if it was not found as an option, but it looks like a negative - # number, it was meant to be positional - # unless there are negative-number-like options - if self._negative_number_matcher.match(arg_string): - if not self._has_negative_number_optionals: - return None - - # if it contains a space, it was meant to be a positional - if ' ' in arg_string: - return None - - # it was meant to be an optional but there is no such option - # in this parser (though it might be a valid option in a subparser) - return None, arg_string, None - - def _get_option_tuples(self, option_string): - result = [] - - # option strings starting with two prefix characters are only - # split at the '=' - chars = self.prefix_chars - if option_string[0] in chars and option_string[1] in chars: - if '=' in option_string: - option_prefix, explicit_arg = option_string.split('=', 1) - else: - option_prefix = option_string - explicit_arg = None - for option_string in self._option_string_actions: - if option_string.startswith(option_prefix): - action = self._option_string_actions[option_string] - tup = action, option_string, explicit_arg - result.append(tup) - - # single character options can be concatenated with their arguments - # but multiple character options always have to have their argument - # separate - elif option_string[0] in chars and option_string[1] not in chars: - option_prefix = option_string - explicit_arg = None - short_option_prefix = option_string[:2] - short_explicit_arg = option_string[2:] - - for option_string in self._option_string_actions: - if option_string == short_option_prefix: - action = self._option_string_actions[option_string] - tup = action, option_string, short_explicit_arg - result.append(tup) - elif option_string.startswith(option_prefix): - action = self._option_string_actions[option_string] - tup = action, option_string, explicit_arg - result.append(tup) - - # shouldn't ever get here - else: - self.error(_('unexpected option string: %s') % option_string) - - # return the collected option tuples - return result - - def _get_nargs_pattern(self, action): - # in all examples below, we have to allow for '--' args - # which are represented as '-' in the pattern - nargs = action.nargs - - # the default (None) is assumed to be a single argument - if nargs is None: - nargs_pattern = '(-*A-*)' - - # allow zero or one arguments - elif nargs == OPTIONAL: - nargs_pattern = '(-*A?-*)' - - # allow zero or more arguments - elif nargs == ZERO_OR_MORE: - nargs_pattern = '(-*[A-]*)' - - # allow one or more arguments - elif nargs == ONE_OR_MORE: - nargs_pattern = '(-*A[A-]*)' - - # allow any number of options or arguments - elif nargs == REMAINDER: - nargs_pattern = '([-AO]*)' - - # allow one argument followed by any number of options or arguments - elif nargs == PARSER: - nargs_pattern = '(-*A[-AO]*)' - - # all others should be integers - else: - nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs) - - # if this is an optional action, -- is not allowed - if action.option_strings: - nargs_pattern = nargs_pattern.replace('-*', '') - nargs_pattern = nargs_pattern.replace('-', '') - - # return the pattern - return nargs_pattern - - # ======================== - # Value conversion methods - # ======================== - def _get_values(self, action, arg_strings): - # for everything but PARSER args, strip out '--' - if action.nargs not in [PARSER, REMAINDER]: - arg_strings = [s for s in arg_strings if s != '--'] - - # optional argument produces a default when not present - if not arg_strings and action.nargs == OPTIONAL: - if action.option_strings: - value = action.const - else: - value = action.default - if isinstance(value, basestring): - value = self._get_value(action, value) - self._check_value(action, value) - - # when nargs='*' on a positional, if there were no command-line - # args, use the default if it is anything other than None - elif (not arg_strings and action.nargs == ZERO_OR_MORE and - not action.option_strings): - if action.default is not None: - value = action.default - else: - value = arg_strings - self._check_value(action, value) - - # single argument or optional argument produces a single value - elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]: - arg_string, = arg_strings - value = self._get_value(action, arg_string) - self._check_value(action, value) - - # REMAINDER arguments convert all values, checking none - elif action.nargs == REMAINDER: - value = [self._get_value(action, v) for v in arg_strings] - - # PARSER arguments convert all values, but check only the first - elif action.nargs == PARSER: - value = [self._get_value(action, v) for v in arg_strings] - self._check_value(action, value[0]) - - # all other types of nargs produce a list - else: - value = [self._get_value(action, v) for v in arg_strings] - for v in value: - self._check_value(action, v) - - # return the converted value - return value - - def _get_value(self, action, arg_string): - type_func = self._registry_get('type', action.type, action.type) - if not _callable(type_func): - msg = _('%r is not callable') - raise ArgumentError(action, msg % type_func) - - # convert the value to the appropriate type - try: - result = type_func(arg_string) - - # ArgumentTypeErrors indicate errors - except ArgumentTypeError: - name = getattr(action.type, '__name__', repr(action.type)) - msg = str(_sys.exc_info()[1]) - raise ArgumentError(action, msg) - - # TypeErrors or ValueErrors also indicate errors - except (TypeError, ValueError): - name = getattr(action.type, '__name__', repr(action.type)) - msg = _('invalid %s value: %r') - raise ArgumentError(action, msg % (name, arg_string)) - - # return the converted value - return result - - def _check_value(self, action, value): - # converted value must be one of the choices (if specified) - if action.choices is not None and value not in action.choices: - tup = value, ', '.join(map(repr, action.choices)) - msg = _('invalid choice: %r (choose from %s)') % tup - raise ArgumentError(action, msg) - - # ======================= - # Help-formatting methods - # ======================= - def format_usage(self): - formatter = self._get_formatter() - formatter.add_usage(self.usage, self._actions, - self._mutually_exclusive_groups) - return formatter.format_help() - - def format_help(self): - formatter = self._get_formatter() - - # usage - formatter.add_usage(self.usage, self._actions, - self._mutually_exclusive_groups) - - # description - formatter.add_text(self.description) - - # positionals, optionals and user-defined groups - for action_group in self._action_groups: - formatter.start_section(action_group.title) - formatter.add_text(action_group.description) - formatter.add_arguments(action_group._group_actions) - formatter.end_section() - - # epilog - formatter.add_text(self.epilog) - - # determine help from format above - return formatter.format_help() - - def format_version(self): - import warnings - warnings.warn( - 'The format_version method is deprecated -- the "version" ' - 'argument to ArgumentParser is no longer supported.', - DeprecationWarning) - formatter = self._get_formatter() - formatter.add_text(self.version) - return formatter.format_help() - - def _get_formatter(self): - return self.formatter_class(prog=self.prog) - - # ===================== - # Help-printing methods - # ===================== - def print_usage(self, file=None): - if file is None: - file = _sys.stdout - self._print_message(self.format_usage(), file) - - def print_help(self, file=None): - if file is None: - file = _sys.stdout - self._print_message(self.format_help(), file) - - def print_version(self, file=None): - import warnings - warnings.warn( - 'The print_version method is deprecated -- the "version" ' - 'argument to ArgumentParser is no longer supported.', - DeprecationWarning) - self._print_message(self.format_version(), file) - - def _print_message(self, message, file=None): - if message: - if file is None: - file = _sys.stderr - file.write(message) - - # =============== - # Exiting methods - # =============== - def exit(self, status=0, message=None): - if message: - self._print_message(message, _sys.stderr) - _sys.exit(status) - - def error(self, message): - """error(message: string) - - Prints a usage message incorporating the message to stderr and - exits. - - If you override this in a subclass, it should not return -- it - should either exit or raise an exception. - """ - self.print_usage(_sys.stderr) - self.exit(2, _('%s: error: %s\n') % (self.prog, message)) diff --git a/lib/arrow/__init__.py b/lib/arrow/__init__.py deleted file mode 100644 index 63dd6be9..00000000 --- a/lib/arrow/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -# -*- coding: utf-8 -*- - -from .arrow import Arrow -from .factory import ArrowFactory -from .api import get, now, utcnow - -__version__ = '0.10.0' -VERSION = __version__ diff --git a/lib/arrow/api.py b/lib/arrow/api.py deleted file mode 100644 index 16de39fe..00000000 --- a/lib/arrow/api.py +++ /dev/null @@ -1,55 +0,0 @@ -# -*- coding: utf-8 -*- -''' -Provides the default implementation of :class:`ArrowFactory ` -methods for use as a module API. - -''' - -from __future__ import absolute_import - -from arrow.factory import ArrowFactory - - -# internal default factory. -_factory = ArrowFactory() - - -def get(*args, **kwargs): - ''' Implements the default :class:`ArrowFactory ` - ``get`` method. - - ''' - - return _factory.get(*args, **kwargs) - -def utcnow(): - ''' Implements the default :class:`ArrowFactory ` - ``utcnow`` method. - - ''' - - return _factory.utcnow() - - -def now(tz=None): - ''' Implements the default :class:`ArrowFactory ` - ``now`` method. - - ''' - - return _factory.now(tz) - - -def factory(type): - ''' Returns an :class:`.ArrowFactory` for the specified :class:`Arrow ` - or derived type. - - :param type: the type, :class:`Arrow ` or derived. - - ''' - - return ArrowFactory(type) - - -__all__ = ['get', 'utcnow', 'now', 'factory'] - diff --git a/lib/arrow/arrow.py b/lib/arrow/arrow.py deleted file mode 100644 index 131eec07..00000000 --- a/lib/arrow/arrow.py +++ /dev/null @@ -1,948 +0,0 @@ -# -*- coding: utf-8 -*- -''' -Provides the :class:`Arrow ` class, an enhanced ``datetime`` -replacement. - -''' - -from __future__ import absolute_import - -from datetime import datetime, timedelta, tzinfo -from dateutil import tz as dateutil_tz -from dateutil.relativedelta import relativedelta -import calendar -import sys -import warnings - - -from arrow import util, locales, parser, formatter - - -class Arrow(object): - '''An :class:`Arrow ` object. - - Implements the ``datetime`` interface, behaving as an aware ``datetime`` while implementing - additional functionality. - - :param year: the calendar year. - :param month: the calendar month. - :param day: the calendar day. - :param hour: (optional) the hour. Defaults to 0. - :param minute: (optional) the minute, Defaults to 0. - :param second: (optional) the second, Defaults to 0. - :param microsecond: (optional) the microsecond. Defaults 0. - :param tzinfo: (optional) the ``tzinfo`` object. Defaults to ``None``. - - If tzinfo is None, it is assumed to be UTC on creation. - - Usage:: - - >>> import arrow - >>> arrow.Arrow(2013, 5, 5, 12, 30, 45) - - - ''' - - resolution = datetime.resolution - - _ATTRS = ['year', 'month', 'day', 'hour', 'minute', 'second', 'microsecond'] - _ATTRS_PLURAL = ['{0}s'.format(a) for a in _ATTRS] - _MONTHS_PER_QUARTER = 3 - - def __init__(self, year, month, day, hour=0, minute=0, second=0, microsecond=0, - tzinfo=None): - - if util.isstr(tzinfo): - tzinfo = parser.TzinfoParser.parse(tzinfo) - tzinfo = tzinfo or dateutil_tz.tzutc() - - self._datetime = datetime(year, month, day, hour, minute, second, - microsecond, tzinfo) - - - # factories: single object, both original and from datetime. - - @classmethod - def now(cls, tzinfo=None): - '''Constructs an :class:`Arrow ` object, representing "now". - - :param tzinfo: (optional) a ``tzinfo`` object. Defaults to local time. - - ''' - - utc = datetime.utcnow().replace(tzinfo=dateutil_tz.tzutc()) - dt = utc.astimezone(dateutil_tz.tzlocal() if tzinfo is None else tzinfo) - - return cls(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, - dt.microsecond, dt.tzinfo) - - @classmethod - def utcnow(cls): - ''' Constructs an :class:`Arrow ` object, representing "now" in UTC - time. - - ''' - - dt = datetime.utcnow() - - return cls(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, - dt.microsecond, dateutil_tz.tzutc()) - - @classmethod - def fromtimestamp(cls, timestamp, tzinfo=None): - ''' Constructs an :class:`Arrow ` object from a timestamp. - - :param timestamp: an ``int`` or ``float`` timestamp, or a ``str`` that converts to either. - :param tzinfo: (optional) a ``tzinfo`` object. Defaults to local time. - - ''' - - tzinfo = tzinfo or dateutil_tz.tzlocal() - timestamp = cls._get_timestamp_from_input(timestamp) - dt = datetime.fromtimestamp(timestamp, tzinfo) - - return cls(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, - dt.microsecond, tzinfo) - - @classmethod - def utcfromtimestamp(cls, timestamp): - '''Constructs an :class:`Arrow ` object from a timestamp, in UTC time. - - :param timestamp: an ``int`` or ``float`` timestamp, or a ``str`` that converts to either. - - ''' - - timestamp = cls._get_timestamp_from_input(timestamp) - dt = datetime.utcfromtimestamp(timestamp) - - return cls(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, - dt.microsecond, dateutil_tz.tzutc()) - - @classmethod - def fromdatetime(cls, dt, tzinfo=None): - ''' Constructs an :class:`Arrow ` object from a ``datetime`` and optional - ``tzinfo`` object. - - :param dt: the ``datetime`` - :param tzinfo: (optional) a ``tzinfo`` object. Defaults to UTC. - - ''' - - tzinfo = tzinfo or dt.tzinfo or dateutil_tz.tzutc() - - return cls(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, - dt.microsecond, tzinfo) - - @classmethod - def fromdate(cls, date, tzinfo=None): - ''' Constructs an :class:`Arrow ` object from a ``date`` and optional - ``tzinfo`` object. Time values are set to 0. - - :param date: the ``date`` - :param tzinfo: (optional) a ``tzinfo`` object. Defaults to UTC. - ''' - - tzinfo = tzinfo or dateutil_tz.tzutc() - - return cls(date.year, date.month, date.day, tzinfo=tzinfo) - - @classmethod - def strptime(cls, date_str, fmt, tzinfo=None): - ''' Constructs an :class:`Arrow ` object from a date string and format, - in the style of ``datetime.strptime``. - - :param date_str: the date string. - :param fmt: the format string. - :param tzinfo: (optional) an optional ``tzinfo`` - ''' - - dt = datetime.strptime(date_str, fmt) - tzinfo = tzinfo or dt.tzinfo - - return cls(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, - dt.microsecond, tzinfo) - - - # factories: ranges and spans - - @classmethod - def range(cls, frame, start, end=None, tz=None, limit=None): - ''' Returns an array of :class:`Arrow ` objects, representing - an iteration of time between two inputs. - - :param frame: the timeframe. Can be any ``datetime`` property (day, hour, minute...). - :param start: A datetime expression, the start of the range. - :param end: (optional) A datetime expression, the end of the range. - :param tz: (optional) A timezone expression. Defaults to UTC. - :param limit: (optional) A maximum number of tuples to return. - - **NOTE**: the **end** or **limit** must be provided. Call with **end** alone to - return the entire range, with **limit** alone to return a maximum # of results from the - start, and with both to cap a range at a maximum # of results. - - Supported frame values: year, quarter, month, week, day, hour, minute, second - - Recognized datetime expressions: - - - An :class:`Arrow ` object. - - A ``datetime`` object. - - Recognized timezone expressions: - - - A ``tzinfo`` object. - - A ``str`` describing a timezone, similar to 'US/Pacific', or 'Europe/Berlin'. - - A ``str`` in ISO-8601 style, as in '+07:00'. - - A ``str``, one of the following: 'local', 'utc', 'UTC'. - - Usage: - - >>> start = datetime(2013, 5, 5, 12, 30) - >>> end = datetime(2013, 5, 5, 17, 15) - >>> for r in arrow.Arrow.range('hour', start, end): - ... print repr(r) - ... - - - - - - - ''' - - _, frame_relative, relative_steps = cls._get_frames(frame) - - tzinfo = cls._get_tzinfo(start.tzinfo if tz is None else tz) - - start = cls._get_datetime(start).replace(tzinfo=tzinfo) - end, limit = cls._get_iteration_params(end, limit) - end = cls._get_datetime(end).replace(tzinfo=tzinfo) - - current = cls.fromdatetime(start) - results = [] - - while current <= end and len(results) < limit: - results.append(current) - - values = [getattr(current, f) for f in cls._ATTRS] - current = cls(*values, tzinfo=tzinfo) + relativedelta(**{frame_relative: relative_steps}) - - return results - - - @classmethod - def span_range(cls, frame, start, end, tz=None, limit=None): - ''' Returns an array of tuples, each :class:`Arrow ` objects, - representing a series of timespans between two inputs. - - :param frame: the timeframe. Can be any ``datetime`` property (day, hour, minute...). - :param start: A datetime expression, the start of the range. - :param end: (optional) A datetime expression, the end of the range. - :param tz: (optional) A timezone expression. Defaults to UTC. - :param limit: (optional) A maximum number of tuples to return. - - **NOTE**: the **end** or **limit** must be provided. Call with **end** alone to - return the entire range, with **limit** alone to return a maximum # of results from the - start, and with both to cap a range at a maximum # of results. - - Supported frame values: year, quarter, month, week, day, hour, minute, second - - Recognized datetime expressions: - - - An :class:`Arrow ` object. - - A ``datetime`` object. - - Recognized timezone expressions: - - - A ``tzinfo`` object. - - A ``str`` describing a timezone, similar to 'US/Pacific', or 'Europe/Berlin'. - - A ``str`` in ISO-8601 style, as in '+07:00'. - - A ``str``, one of the following: 'local', 'utc', 'UTC'. - - Usage: - - >>> start = datetime(2013, 5, 5, 12, 30) - >>> end = datetime(2013, 5, 5, 17, 15) - >>> for r in arrow.Arrow.span_range('hour', start, end): - ... print r - ... - (, ) - (, ) - (, ) - (, ) - (, ) - - ''' - tzinfo = cls._get_tzinfo(start.tzinfo if tz is None else tz) - start = cls.fromdatetime(start, tzinfo).span(frame)[0] - _range = cls.range(frame, start, end, tz, limit) - return [r.span(frame) for r in _range] - - - # representations - - def __repr__(self): - - dt = self._datetime - attrs = ', '.join([str(i) for i in [dt.year, dt.month, dt.day, dt.hour, dt.minute, - dt.second, dt.microsecond]]) - - return '<{0} [{1}]>'.format(self.__class__.__name__, self.__str__()) - - def __str__(self): - return self._datetime.isoformat() - - def __format__(self, formatstr): - - if len(formatstr) > 0: - return self.format(formatstr) - - return str(self) - - def __hash__(self): - return self._datetime.__hash__() - - - # attributes & properties - - def __getattr__(self, name): - - if name == 'week': - return self.isocalendar()[1] - - if name == 'quarter': - return int((self.month-1)/self._MONTHS_PER_QUARTER) + 1 - - if not name.startswith('_'): - value = getattr(self._datetime, name, None) - - if value is not None: - return value - - return object.__getattribute__(self, name) - - @property - def tzinfo(self): - ''' Gets the ``tzinfo`` of the :class:`Arrow ` object. ''' - - return self._datetime.tzinfo - - @tzinfo.setter - def tzinfo(self, tzinfo): - ''' Sets the ``tzinfo`` of the :class:`Arrow ` object. ''' - - self._datetime = self._datetime.replace(tzinfo=tzinfo) - - @property - def datetime(self): - ''' Returns a datetime representation of the :class:`Arrow ` object. ''' - - return self._datetime - - @property - def naive(self): - ''' Returns a naive datetime representation of the :class:`Arrow ` object. ''' - - return self._datetime.replace(tzinfo=None) - - @property - def timestamp(self): - ''' Returns a timestamp representation of the :class:`Arrow ` object. ''' - - return calendar.timegm(self._datetime.utctimetuple()) - - @property - def float_timestamp(self): - ''' Returns a floating-point representation of the :class:`Arrow ` object. ''' - - return self.timestamp + float(self.microsecond) / 1000000 - - - # mutation and duplication. - - def clone(self): - ''' Returns a new :class:`Arrow ` object, cloned from the current one. - - Usage: - - >>> arw = arrow.utcnow() - >>> cloned = arw.clone() - - ''' - - return self.fromdatetime(self._datetime) - - def replace(self, **kwargs): - ''' Returns a new :class:`Arrow ` object with attributes updated - according to inputs. - - Use single property names to set their value absolutely: - - >>> import arrow - >>> arw = arrow.utcnow() - >>> arw - - >>> arw.replace(year=2014, month=6) - - - You can also provide a timezone expression can also be replaced: - - >>> arw.replace(tzinfo=tz.tzlocal()) - - - Use plural property names to shift their current value relatively (**deprecated**): - - >>> arw.replace(years=1, months=-1) - - - Recognized timezone expressions: - - - A ``tzinfo`` object. - - A ``str`` describing a timezone, similar to 'US/Pacific', or 'Europe/Berlin'. - - A ``str`` in ISO-8601 style, as in '+07:00'. - - A ``str``, one of the following: 'local', 'utc', 'UTC'. - - ''' - - absolute_kwargs = {} - relative_kwargs = {} # TODO: DEPRECATED; remove in next release - - for key, value in kwargs.items(): - - if key in self._ATTRS: - absolute_kwargs[key] = value - elif key in self._ATTRS_PLURAL or key in ['weeks', 'quarters']: - # TODO: DEPRECATED - warnings.warn("replace() with plural property to shift value" - "is deprecated, use shift() instead", - DeprecationWarning) - relative_kwargs[key] = value - elif key in ['week', 'quarter']: - raise AttributeError('setting absolute {0} is not supported'.format(key)) - elif key !='tzinfo': - raise AttributeError('unknown attribute: "{0}"'.format(key)) - - # core datetime does not support quarters, translate to months. - relative_kwargs.setdefault('months', 0) - relative_kwargs['months'] += relative_kwargs.pop('quarters', 0) * self._MONTHS_PER_QUARTER - - current = self._datetime.replace(**absolute_kwargs) - current += relativedelta(**relative_kwargs) # TODO: DEPRECATED - - tzinfo = kwargs.get('tzinfo') - - if tzinfo is not None: - tzinfo = self._get_tzinfo(tzinfo) - current = current.replace(tzinfo=tzinfo) - - return self.fromdatetime(current) - - def shift(self, **kwargs): - ''' Returns a new :class:`Arrow ` object with attributes updated - according to inputs. - - Use plural property names to shift their current value relatively: - - >>> import arrow - >>> arw = arrow.utcnow() - >>> arw - - >>> arw.shift(years=1, months=-1) - - - ''' - - relative_kwargs = {} - - for key, value in kwargs.items(): - - if key in self._ATTRS_PLURAL or key in ['weeks', 'quarters']: - relative_kwargs[key] = value - else: - raise AttributeError() - - # core datetime does not support quarters, translate to months. - relative_kwargs.setdefault('months', 0) - relative_kwargs['months'] += relative_kwargs.pop('quarters', 0) * self._MONTHS_PER_QUARTER - - current = self._datetime + relativedelta(**relative_kwargs) - - return self.fromdatetime(current) - - def to(self, tz): - ''' Returns a new :class:`Arrow ` object, converted - to the target timezone. - - :param tz: an expression representing a timezone. - - Recognized timezone expressions: - - - A ``tzinfo`` object. - - A ``str`` describing a timezone, similar to 'US/Pacific', or 'Europe/Berlin'. - - A ``str`` in ISO-8601 style, as in '+07:00'. - - A ``str``, one of the following: 'local', 'utc', 'UTC'. - - Usage:: - - >>> utc = arrow.utcnow() - >>> utc - - - >>> utc.to('US/Pacific') - - - >>> utc.to(tz.tzlocal()) - - - >>> utc.to('-07:00') - - - >>> utc.to('local') - - - >>> utc.to('local').to('utc') - - - ''' - - if not isinstance(tz, tzinfo): - tz = parser.TzinfoParser.parse(tz) - - dt = self._datetime.astimezone(tz) - - return self.__class__(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, - dt.microsecond, dt.tzinfo) - - def span(self, frame, count=1): - ''' Returns two new :class:`Arrow ` objects, representing the timespan - of the :class:`Arrow ` object in a given timeframe. - - :param frame: the timeframe. Can be any ``datetime`` property (day, hour, minute...). - :param count: (optional) the number of frames to span. - - Supported frame values: year, quarter, month, week, day, hour, minute, second - - Usage:: - - >>> arrow.utcnow() - - - >>> arrow.utcnow().span('hour') - (, ) - - >>> arrow.utcnow().span('day') - (, ) - - >>> arrow.utcnow().span('day', count=2) - (, ) - - ''' - - frame_absolute, frame_relative, relative_steps = self._get_frames(frame) - - if frame_absolute == 'week': - attr = 'day' - elif frame_absolute == 'quarter': - attr = 'month' - else: - attr = frame_absolute - - index = self._ATTRS.index(attr) - frames = self._ATTRS[:index + 1] - - values = [getattr(self, f) for f in frames] - - for i in range(3 - len(values)): - values.append(1) - - floor = self.__class__(*values, tzinfo=self.tzinfo) - - if frame_absolute == 'week': - floor = floor + relativedelta(days=-(self.isoweekday() - 1)) - elif frame_absolute == 'quarter': - floor = floor + relativedelta(months=-((self.month - 1) % 3)) - - ceil = floor + relativedelta( - **{frame_relative: count * relative_steps}) + relativedelta(microseconds=-1) - - return floor, ceil - - def floor(self, frame): - ''' Returns a new :class:`Arrow ` object, representing the "floor" - of the timespan of the :class:`Arrow ` object in a given timeframe. - Equivalent to the first element in the 2-tuple returned by - :func:`span `. - - :param frame: the timeframe. Can be any ``datetime`` property (day, hour, minute...). - - Usage:: - - >>> arrow.utcnow().floor('hour') - - ''' - - return self.span(frame)[0] - - def ceil(self, frame): - ''' Returns a new :class:`Arrow ` object, representing the "ceiling" - of the timespan of the :class:`Arrow ` object in a given timeframe. - Equivalent to the second element in the 2-tuple returned by - :func:`span `. - - :param frame: the timeframe. Can be any ``datetime`` property (day, hour, minute...). - - Usage:: - - >>> arrow.utcnow().ceil('hour') - - ''' - - return self.span(frame)[1] - - - # string output and formatting. - - def format(self, fmt='YYYY-MM-DD HH:mm:ssZZ', locale='en_us'): - ''' Returns a string representation of the :class:`Arrow ` object, - formatted according to a format string. - - :param fmt: the format string. - - Usage:: - - >>> arrow.utcnow().format('YYYY-MM-DD HH:mm:ss ZZ') - '2013-05-09 03:56:47 -00:00' - - >>> arrow.utcnow().format('X') - '1368071882' - - >>> arrow.utcnow().format('MMMM DD, YYYY') - 'May 09, 2013' - - >>> arrow.utcnow().format() - '2013-05-09 03:56:47 -00:00' - - ''' - - return formatter.DateTimeFormatter(locale).format(self._datetime, fmt) - - - def humanize(self, other=None, locale='en_us', only_distance=False): - ''' Returns a localized, humanized representation of a relative difference in time. - - :param other: (optional) an :class:`Arrow ` or ``datetime`` object. - Defaults to now in the current :class:`Arrow ` object's timezone. - :param locale: (optional) a ``str`` specifying a locale. Defaults to 'en_us'. - :param only_distance: (optional) returns only time difference eg: "11 seconds" without "in" or "ago" part. - - Usage:: - - >>> earlier = arrow.utcnow().replace(hours=-2) - >>> earlier.humanize() - '2 hours ago' - - >>> later = later = earlier.replace(hours=4) - >>> later.humanize(earlier) - 'in 4 hours' - - ''' - - locale = locales.get_locale(locale) - - if other is None: - utc = datetime.utcnow().replace(tzinfo=dateutil_tz.tzutc()) - dt = utc.astimezone(self._datetime.tzinfo) - - elif isinstance(other, Arrow): - dt = other._datetime - - elif isinstance(other, datetime): - if other.tzinfo is None: - dt = other.replace(tzinfo=self._datetime.tzinfo) - else: - dt = other.astimezone(self._datetime.tzinfo) - - else: - raise TypeError() - - delta = int(util.total_seconds(self._datetime - dt)) - sign = -1 if delta < 0 else 1 - diff = abs(delta) - delta = diff - - if diff < 10: - return locale.describe('now', only_distance=only_distance) - - if diff < 45: - return locale.describe('seconds', sign, only_distance=only_distance) - - elif diff < 90: - return locale.describe('minute', sign, only_distance=only_distance) - elif diff < 2700: - minutes = sign * int(max(delta / 60, 2)) - return locale.describe('minutes', minutes, only_distance=only_distance) - - elif diff < 5400: - return locale.describe('hour', sign, only_distance=only_distance) - elif diff < 79200: - hours = sign * int(max(delta / 3600, 2)) - return locale.describe('hours', hours, only_distance=only_distance) - - elif diff < 129600: - return locale.describe('day', sign, only_distance=only_distance) - elif diff < 2160000: - days = sign * int(max(delta / 86400, 2)) - return locale.describe('days', days, only_distance=only_distance) - - elif diff < 3888000: - return locale.describe('month', sign, only_distance=only_distance) - elif diff < 29808000: - self_months = self._datetime.year * 12 + self._datetime.month - other_months = dt.year * 12 + dt.month - - months = sign * int(max(abs(other_months - self_months), 2)) - - return locale.describe('months', months, only_distance=only_distance) - - elif diff < 47260800: - return locale.describe('year', sign, only_distance=only_distance) - else: - years = sign * int(max(delta / 31536000, 2)) - return locale.describe('years', years, only_distance=only_distance) - - - # math - - def __add__(self, other): - - if isinstance(other, (timedelta, relativedelta)): - return self.fromdatetime(self._datetime + other, self._datetime.tzinfo) - - raise TypeError() - - def __radd__(self, other): - return self.__add__(other) - - def __sub__(self, other): - - if isinstance(other, (timedelta, relativedelta)): - return self.fromdatetime(self._datetime - other, self._datetime.tzinfo) - - elif isinstance(other, datetime): - return self._datetime - other - - elif isinstance(other, Arrow): - return self._datetime - other._datetime - - raise TypeError() - - def __rsub__(self, other): - - if isinstance(other, datetime): - return other - self._datetime - - raise TypeError() - - - # comparisons - - def _cmperror(self, other): - raise TypeError('can\'t compare \'{0}\' to \'{1}\''.format( - type(self), type(other))) - - def __eq__(self, other): - - if not isinstance(other, (Arrow, datetime)): - return False - - return self._datetime == self._get_datetime(other) - - def __ne__(self, other): - return not self.__eq__(other) - - def __gt__(self, other): - - if not isinstance(other, (Arrow, datetime)): - self._cmperror(other) - - return self._datetime > self._get_datetime(other) - - def __ge__(self, other): - - if not isinstance(other, (Arrow, datetime)): - self._cmperror(other) - - return self._datetime >= self._get_datetime(other) - - def __lt__(self, other): - - if not isinstance(other, (Arrow, datetime)): - self._cmperror(other) - - return self._datetime < self._get_datetime(other) - - def __le__(self, other): - - if not isinstance(other, (Arrow, datetime)): - self._cmperror(other) - - return self._datetime <= self._get_datetime(other) - - - # datetime methods - - def date(self): - ''' Returns a ``date`` object with the same year, month and day. ''' - - return self._datetime.date() - - def time(self): - ''' Returns a ``time`` object with the same hour, minute, second, microsecond. ''' - - return self._datetime.time() - - def timetz(self): - ''' Returns a ``time`` object with the same hour, minute, second, microsecond and tzinfo. ''' - - return self._datetime.timetz() - - def astimezone(self, tz): - ''' Returns a ``datetime`` object, adjusted to the specified tzinfo. - - :param tz: a ``tzinfo`` object. - - ''' - - return self._datetime.astimezone(tz) - - def utcoffset(self): - ''' Returns a ``timedelta`` object representing the whole number of minutes difference from UTC time. ''' - - return self._datetime.utcoffset() - - def dst(self): - ''' Returns the daylight savings time adjustment. ''' - return self._datetime.dst() - - def timetuple(self): - ''' Returns a ``time.struct_time``, in the current timezone. ''' - - return self._datetime.timetuple() - - def utctimetuple(self): - ''' Returns a ``time.struct_time``, in UTC time. ''' - - return self._datetime.utctimetuple() - - def toordinal(self): - ''' Returns the proleptic Gregorian ordinal of the date. ''' - - return self._datetime.toordinal() - - def weekday(self): - ''' Returns the day of the week as an integer (0-6). ''' - - return self._datetime.weekday() - - def isoweekday(self): - ''' Returns the ISO day of the week as an integer (1-7). ''' - - return self._datetime.isoweekday() - - def isocalendar(self): - ''' Returns a 3-tuple, (ISO year, ISO week number, ISO weekday). ''' - - return self._datetime.isocalendar() - - def isoformat(self, sep='T'): - '''Returns an ISO 8601 formatted representation of the date and time. ''' - - return self._datetime.isoformat(sep) - - def ctime(self): - ''' Returns a ctime formatted representation of the date and time. ''' - - return self._datetime.ctime() - - def strftime(self, format): - ''' Formats in the style of ``datetime.strptime``. - - :param format: the format string. - - ''' - - return self._datetime.strftime(format) - - def for_json(self): - '''Serializes for the ``for_json`` protocol of simplejson.''' - return self.isoformat() - - # internal tools. - - @staticmethod - def _get_tzinfo(tz_expr): - - if tz_expr is None: - return dateutil_tz.tzutc() - if isinstance(tz_expr, tzinfo): - return tz_expr - else: - try: - return parser.TzinfoParser.parse(tz_expr) - except parser.ParserError: - raise ValueError('\'{0}\' not recognized as a timezone'.format( - tz_expr)) - - @classmethod - def _get_datetime(cls, expr): - - if isinstance(expr, Arrow): - return expr.datetime - - if isinstance(expr, datetime): - return expr - - try: - expr = float(expr) - return cls.utcfromtimestamp(expr).datetime - except: - raise ValueError( - '\'{0}\' not recognized as a timestamp or datetime'.format(expr)) - - @classmethod - def _get_frames(cls, name): - - if name in cls._ATTRS: - return name, '{0}s'.format(name), 1 - - elif name in ['week', 'weeks']: - return 'week', 'weeks', 1 - elif name in ['quarter', 'quarters']: - return 'quarter', 'months', 3 - - raise AttributeError() - - @classmethod - def _get_iteration_params(cls, end, limit): - - if end is None: - - if limit is None: - raise Exception('one of \'end\' or \'limit\' is required') - - return cls.max, limit - - else: - if limit is None: - return end, sys.maxsize - return end, limit - - @staticmethod - def _get_timestamp_from_input(timestamp): - - try: - return float(timestamp) - except: - raise ValueError('cannot parse \'{0}\' as a timestamp'.format(timestamp)) - -Arrow.min = Arrow.fromdatetime(datetime.min) -Arrow.max = Arrow.fromdatetime(datetime.max) diff --git a/lib/arrow/factory.py b/lib/arrow/factory.py deleted file mode 100644 index a5d690b2..00000000 --- a/lib/arrow/factory.py +++ /dev/null @@ -1,254 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Implements the :class:`ArrowFactory ` class, -providing factory methods for common :class:`Arrow ` -construction scenarios. - -""" - -from __future__ import absolute_import - -from arrow.arrow import Arrow -from arrow import parser -from arrow.util import is_timestamp, isstr - -from datetime import datetime, tzinfo, date -from dateutil import tz as dateutil_tz -from time import struct_time -import calendar - - -class ArrowFactory(object): - ''' A factory for generating :class:`Arrow ` objects. - - :param type: (optional) the :class:`Arrow `-based class to construct from. - Defaults to :class:`Arrow `. - - ''' - - def __init__(self, type=Arrow): - self.type = type - - def get(self, *args, **kwargs): - ''' Returns an :class:`Arrow ` object based on flexible inputs. - - Usage:: - - >>> import arrow - - **No inputs** to get current UTC time:: - - >>> arrow.get() - - - **None** to also get current UTC time:: - - >>> arrow.get(None) - - - **One** :class:`Arrow ` object, to get a copy. - - >>> arw = arrow.utcnow() - >>> arrow.get(arw) - - - **One** ``str``, ``float``, or ``int``, convertible to a floating-point timestamp, to get that timestamp in UTC:: - - >>> arrow.get(1367992474.293378) - - - >>> arrow.get(1367992474) - - - >>> arrow.get('1367992474.293378') - - - >>> arrow.get('1367992474') - - - **One** ISO-8601-formatted ``str``, to parse it:: - - >>> arrow.get('2013-09-29T01:26:43.830580') - - - **One** ``tzinfo``, to get the current time in that timezone:: - - >>> arrow.get(tz.tzlocal()) - - - **One** naive ``datetime``, to get that datetime in UTC:: - - >>> arrow.get(datetime(2013, 5, 5)) - - - **One** aware ``datetime``, to get that datetime:: - - >>> arrow.get(datetime(2013, 5, 5, tzinfo=tz.tzlocal())) - - - **One** naive ``date``, to get that date in UTC:: - - >>> arrow.get(date(2013, 5, 5)) - - - **Two** arguments, a naive or aware ``datetime``, and a timezone expression (as above):: - - >>> arrow.get(datetime(2013, 5, 5), 'US/Pacific') - - - **Two** arguments, a naive ``date``, and a timezone expression (as above):: - - >>> arrow.get(date(2013, 5, 5), 'US/Pacific') - - - **Two** arguments, both ``str``, to parse the first according to the format of the second:: - - >>> arrow.get('2013-05-05 12:30:45', 'YYYY-MM-DD HH:mm:ss') - - - **Two** arguments, first a ``str`` to parse and second a ``list`` of formats to try:: - - >>> arrow.get('2013-05-05 12:30:45', ['MM/DD/YYYY', 'YYYY-MM-DD HH:mm:ss']) - - - **Three or more** arguments, as for the constructor of a ``datetime``:: - - >>> arrow.get(2013, 5, 5, 12, 30, 45) - - - **One** time.struct time:: - >>> arrow.get(gmtime(0)) - - - ''' - - arg_count = len(args) - locale = kwargs.get('locale', 'en_us') - tz = kwargs.get('tzinfo', None) - - # () -> now, @ utc. - if arg_count == 0: - if isinstance(tz, tzinfo): - return self.type.now(tz) - return self.type.utcnow() - - if arg_count == 1: - arg = args[0] - - # (None) -> now, @ utc. - if arg is None: - return self.type.utcnow() - - # try (int, float, str(int), str(float)) -> utc, from timestamp. - if is_timestamp(arg): - return self.type.utcfromtimestamp(arg) - - # (Arrow) -> from the object's datetime. - if isinstance(arg, Arrow): - return self.type.fromdatetime(arg.datetime) - - # (datetime) -> from datetime. - if isinstance(arg, datetime): - return self.type.fromdatetime(arg) - - # (date) -> from date. - if isinstance(arg, date): - return self.type.fromdate(arg) - - # (tzinfo) -> now, @ tzinfo. - elif isinstance(arg, tzinfo): - return self.type.now(arg) - - # (str) -> now, @ tzinfo. - elif isstr(arg): - dt = parser.DateTimeParser(locale).parse_iso(arg) - return self.type.fromdatetime(dt) - - # (struct_time) -> from struct_time - elif isinstance(arg, struct_time): - return self.type.utcfromtimestamp(calendar.timegm(arg)) - - else: - raise TypeError('Can\'t parse single argument type of \'{0}\''.format(type(arg))) - - elif arg_count == 2: - - arg_1, arg_2 = args[0], args[1] - - if isinstance(arg_1, datetime): - - # (datetime, tzinfo) -> fromdatetime @ tzinfo/string. - if isinstance(arg_2, tzinfo) or isstr(arg_2): - return self.type.fromdatetime(arg_1, arg_2) - else: - raise TypeError('Can\'t parse two arguments of types \'datetime\', \'{0}\''.format( - type(arg_2))) - - # (date, tzinfo/str) -> fromdate @ tzinfo/string. - elif isinstance(arg_1, date): - - if isinstance(arg_2, tzinfo) or isstr(arg_2): - return self.type.fromdate(arg_1, tzinfo=arg_2) - else: - raise TypeError('Can\'t parse two arguments of types \'date\', \'{0}\''.format( - type(arg_2))) - - # (str, format) -> parse. - elif isstr(arg_1) and (isstr(arg_2) or isinstance(arg_2, list)): - dt = parser.DateTimeParser(locale).parse(args[0], args[1]) - return self.type.fromdatetime(dt, tzinfo=tz) - - else: - raise TypeError('Can\'t parse two arguments of types \'{0}\', \'{1}\''.format( - type(arg_1), type(arg_2))) - - # 3+ args -> datetime-like via constructor. - else: - return self.type(*args, **kwargs) - - def utcnow(self): - '''Returns an :class:`Arrow ` object, representing "now" in UTC time. - - Usage:: - - >>> import arrow - >>> arrow.utcnow() - - ''' - - return self.type.utcnow() - - def now(self, tz=None): - '''Returns an :class:`Arrow ` object, representing "now". - - :param tz: (optional) An expression representing a timezone. Defaults to local time. - - Recognized timezone expressions: - - - A ``tzinfo`` object. - - A ``str`` describing a timezone, similar to 'US/Pacific', or 'Europe/Berlin'. - - A ``str`` in ISO-8601 style, as in '+07:00'. - - A ``str``, one of the following: 'local', 'utc', 'UTC'. - - Usage:: - - >>> import arrow - >>> arrow.now() - - - >>> arrow.now('US/Pacific') - - - >>> arrow.now('+02:00') - - - >>> arrow.now('local') - - ''' - - if tz is None: - tz = dateutil_tz.tzlocal() - elif not isinstance(tz, tzinfo): - tz = parser.TzinfoParser.parse(tz) - - return self.type.now(tz) diff --git a/lib/arrow/formatter.py b/lib/arrow/formatter.py deleted file mode 100644 index 50fd3a17..00000000 --- a/lib/arrow/formatter.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import absolute_import - -import calendar -import re -from dateutil import tz as dateutil_tz -from arrow import util, locales - - -class DateTimeFormatter(object): - - _FORMAT_RE = re.compile('(YYY?Y?|MM?M?M?|Do|DD?D?D?|d?dd?d?|HH?|hh?|mm?|ss?|SS?S?S?S?S?|ZZ?|a|A|X)') - - def __init__(self, locale='en_us'): - - self.locale = locales.get_locale(locale) - - def format(cls, dt, fmt): - - return cls._FORMAT_RE.sub(lambda m: cls._format_token(dt, m.group(0)), fmt) - - def _format_token(self, dt, token): - - if token == 'YYYY': - return self.locale.year_full(dt.year) - if token == 'YY': - return self.locale.year_abbreviation(dt.year) - - if token == 'MMMM': - return self.locale.month_name(dt.month) - if token == 'MMM': - return self.locale.month_abbreviation(dt.month) - if token == 'MM': - return '{0:02d}'.format(dt.month) - if token == 'M': - return str(dt.month) - - if token == 'DDDD': - return '{0:03d}'.format(dt.timetuple().tm_yday) - if token == 'DDD': - return str(dt.timetuple().tm_yday) - if token == 'DD': - return '{0:02d}'.format(dt.day) - if token == 'D': - return str(dt.day) - - if token == 'Do': - return self.locale.ordinal_number(dt.day) - - if token == 'dddd': - return self.locale.day_name(dt.isoweekday()) - if token == 'ddd': - return self.locale.day_abbreviation(dt.isoweekday()) - if token == 'd': - return str(dt.isoweekday()) - - if token == 'HH': - return '{0:02d}'.format(dt.hour) - if token == 'H': - return str(dt.hour) - if token == 'hh': - return '{0:02d}'.format(dt.hour if 0 < dt.hour < 13 else abs(dt.hour - 12)) - if token == 'h': - return str(dt.hour if 0 < dt.hour < 13 else abs(dt.hour - 12)) - - if token == 'mm': - return '{0:02d}'.format(dt.minute) - if token == 'm': - return str(dt.minute) - - if token == 'ss': - return '{0:02d}'.format(dt.second) - if token == 's': - return str(dt.second) - - if token == 'SSSSSS': - return str('{0:06d}'.format(int(dt.microsecond))) - if token == 'SSSSS': - return str('{0:05d}'.format(int(dt.microsecond / 10))) - if token == 'SSSS': - return str('{0:04d}'.format(int(dt.microsecond / 100))) - if token == 'SSS': - return str('{0:03d}'.format(int(dt.microsecond / 1000))) - if token == 'SS': - return str('{0:02d}'.format(int(dt.microsecond / 10000))) - if token == 'S': - return str(int(dt.microsecond / 100000)) - - if token == 'X': - return str(calendar.timegm(dt.utctimetuple())) - - if token in ['ZZ', 'Z']: - separator = ':' if token == 'ZZ' else '' - tz = dateutil_tz.tzutc() if dt.tzinfo is None else dt.tzinfo - total_minutes = int(util.total_seconds(tz.utcoffset(dt)) / 60) - - sign = '+' if total_minutes >= 0 else '-' - total_minutes = abs(total_minutes) - hour, minute = divmod(total_minutes, 60) - - return '{0}{1:02d}{2}{3:02d}'.format(sign, hour, separator, minute) - - if token in ('a', 'A'): - return self.locale.meridian(dt.hour, token) - diff --git a/lib/arrow/locales.py b/lib/arrow/locales.py deleted file mode 100644 index 7cf7f4c3..00000000 --- a/lib/arrow/locales.py +++ /dev/null @@ -1,2011 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import absolute_import -from __future__ import unicode_literals - -import inspect -import sys - - -def get_locale(name): - '''Returns an appropriate :class:`Locale ` - corresponding to an inpute locale name. - - :param name: the name of the locale. - - ''' - - locale_cls = _locales.get(name.lower()) - - if locale_cls is None: - raise ValueError('Unsupported locale \'{0}\''.format(name)) - - return locale_cls() - - -# base locale type. - -class Locale(object): - ''' Represents locale-specific data and functionality. ''' - - names = [] - - timeframes = { - 'now': '', - 'seconds': '', - 'minute': '', - 'minutes': '', - 'hour': '', - 'hours': '', - 'day': '', - 'days': '', - 'month': '', - 'months': '', - 'year': '', - 'years': '', - } - - meridians = { - 'am': '', - 'pm': '', - 'AM': '', - 'PM': '', - } - - past = None - future = None - - month_names = [] - month_abbreviations = [] - - day_names = [] - day_abbreviations = [] - - ordinal_day_re = r'(\d+)' - - def __init__(self): - - self._month_name_to_ordinal = None - - def describe(self, timeframe, delta=0, only_distance=False): - ''' Describes a delta within a timeframe in plain language. - - :param timeframe: a string representing a timeframe. - :param delta: a quantity representing a delta in a timeframe. - :param only_distance: return only distance eg: "11 seconds" without "in" or "ago" keywords - ''' - - humanized = self._format_timeframe(timeframe, delta) - if not only_distance: - humanized = self._format_relative(humanized, timeframe, delta) - - return humanized - - def day_name(self, day): - ''' Returns the day name for a specified day of the week. - - :param day: the ``int`` day of the week (1-7). - - ''' - - return self.day_names[day] - - def day_abbreviation(self, day): - ''' Returns the day abbreviation for a specified day of the week. - - :param day: the ``int`` day of the week (1-7). - - ''' - - return self.day_abbreviations[day] - - def month_name(self, month): - ''' Returns the month name for a specified month of the year. - - :param month: the ``int`` month of the year (1-12). - - ''' - - return self.month_names[month] - - def month_abbreviation(self, month): - ''' Returns the month abbreviation for a specified month of the year. - - :param month: the ``int`` month of the year (1-12). - - ''' - - return self.month_abbreviations[month] - - def month_number(self, name): - ''' Returns the month number for a month specified by name or abbreviation. - - :param name: the month name or abbreviation. - - ''' - - if self._month_name_to_ordinal is None: - self._month_name_to_ordinal = self._name_to_ordinal(self.month_names) - self._month_name_to_ordinal.update(self._name_to_ordinal(self.month_abbreviations)) - - return self._month_name_to_ordinal.get(name) - - def year_full(self, year): - ''' Returns the year for specific locale if available - - :param name: the ``int`` year (4-digit) - ''' - return '{0:04d}'.format(year) - - def year_abbreviation(self, year): - ''' Returns the year for specific locale if available - - :param name: the ``int`` year (4-digit) - ''' - return '{0:04d}'.format(year)[2:] - - def meridian(self, hour, token): - ''' Returns the meridian indicator for a specified hour and format token. - - :param hour: the ``int`` hour of the day. - :param token: the format token. - ''' - - if token == 'a': - return self.meridians['am'] if hour < 12 else self.meridians['pm'] - if token == 'A': - return self.meridians['AM'] if hour < 12 else self.meridians['PM'] - - def ordinal_number(self, n): - ''' Returns the ordinal format of a given integer - - :param n: an integer - ''' - return self._ordinal_number(n) - - def _ordinal_number(self, n): - return '{0}'.format(n) - - def _name_to_ordinal(self, lst): - return dict(map(lambda i: (i[1].lower(), i[0] + 1), enumerate(lst[1:]))) - - def _format_timeframe(self, timeframe, delta): - - return self.timeframes[timeframe].format(abs(delta)) - - def _format_relative(self, humanized, timeframe, delta): - - if timeframe == 'now': - return humanized - - direction = self.past if delta < 0 else self.future - - return direction.format(humanized) - - -# base locale type implementations. - -class EnglishLocale(Locale): - - names = ['en', 'en_us', 'en_gb', 'en_au', 'en_be', 'en_jp', 'en_za', 'en_ca'] - - past = '{0} ago' - future = 'in {0}' - - timeframes = { - 'now': 'just now', - 'seconds': 'seconds', - 'minute': 'a minute', - 'minutes': '{0} minutes', - 'hour': 'an hour', - 'hours': '{0} hours', - 'day': 'a day', - 'days': '{0} days', - 'month': 'a month', - 'months': '{0} months', - 'year': 'a year', - 'years': '{0} years', - } - - meridians = { - 'am': 'am', - 'pm': 'pm', - 'AM': 'AM', - 'PM': 'PM', - } - - month_names = ['', 'January', 'February', 'March', 'April', 'May', 'June', 'July', - 'August', 'September', 'October', 'November', 'December'] - month_abbreviations = ['', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', - 'Sep', 'Oct', 'Nov', 'Dec'] - - day_names = ['', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'] - day_abbreviations = ['', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] - - ordinal_day_re = r'((?P[2-3]?1(?=st)|[2-3]?2(?=nd)|[2-3]?3(?=rd)|[1-3]?[04-9](?=th)|1[1-3](?=th))(st|nd|rd|th))' - - def _ordinal_number(self, n): - if n % 100 not in (11, 12, 13): - remainder = abs(n) % 10 - if remainder == 1: - return '{0}st'.format(n) - elif remainder == 2: - return '{0}nd'.format(n) - elif remainder == 3: - return '{0}rd'.format(n) - return '{0}th'.format(n) - - -class ItalianLocale(Locale): - names = ['it', 'it_it'] - past = '{0} fa' - future = 'tra {0}' - - timeframes = { - 'now': 'adesso', - 'seconds': 'qualche secondo', - 'minute': 'un minuto', - 'minutes': '{0} minuti', - 'hour': 'un\'ora', - 'hours': '{0} ore', - 'day': 'un giorno', - 'days': '{0} giorni', - 'month': 'un mese', - 'months': '{0} mesi', - 'year': 'un anno', - 'years': '{0} anni', - } - - month_names = ['', 'gennaio', 'febbraio', 'marzo', 'aprile', 'maggio', 'giugno', 'luglio', - 'agosto', 'settembre', 'ottobre', 'novembre', 'dicembre'] - month_abbreviations = ['', 'gen', 'feb', 'mar', 'apr', 'mag', 'giu', 'lug', 'ago', - 'set', 'ott', 'nov', 'dic'] - - day_names = ['', 'lunedì', 'martedì', 'mercoledì', 'giovedì', 'venerdì', 'sabato', 'domenica'] - day_abbreviations = ['', 'lun', 'mar', 'mer', 'gio', 'ven', 'sab', 'dom'] - - ordinal_day_re = r'((?P[1-3]?[0-9](?=[ºª]))[ºª])' - - def _ordinal_number(self, n): - return '{0}º'.format(n) - - -class SpanishLocale(Locale): - names = ['es', 'es_es'] - past = 'hace {0}' - future = 'en {0}' - - timeframes = { - 'now': 'ahora', - 'seconds': 'segundos', - 'minute': 'un minuto', - 'minutes': '{0} minutos', - 'hour': 'una hora', - 'hours': '{0} horas', - 'day': 'un día', - 'days': '{0} días', - 'month': 'un mes', - 'months': '{0} meses', - 'year': 'un año', - 'years': '{0} años', - } - - month_names = ['', 'enero', 'febrero', 'marzo', 'abril', 'mayo', 'junio', 'julio', - 'agosto', 'septiembre', 'octubre', 'noviembre', 'diciembre'] - month_abbreviations = ['', 'ene', 'feb', 'mar', 'abr', 'may', 'jun', 'jul', 'ago', - 'sep', 'oct', 'nov', 'dic'] - - day_names = ['', 'lunes', 'martes', 'miércoles', 'jueves', 'viernes', 'sábado', 'domingo'] - day_abbreviations = ['', 'lun', 'mar', 'mie', 'jue', 'vie', 'sab', 'dom'] - - ordinal_day_re = r'((?P[1-3]?[0-9](?=[ºª]))[ºª])' - - def _ordinal_number(self, n): - return '{0}º'.format(n) - - -class FrenchLocale(Locale): - names = ['fr', 'fr_fr'] - past = 'il y a {0}' - future = 'dans {0}' - - timeframes = { - 'now': 'maintenant', - 'seconds': 'quelques secondes', - 'minute': 'une minute', - 'minutes': '{0} minutes', - 'hour': 'une heure', - 'hours': '{0} heures', - 'day': 'un jour', - 'days': '{0} jours', - 'month': 'un mois', - 'months': '{0} mois', - 'year': 'un an', - 'years': '{0} ans', - } - - month_names = ['', 'janvier', 'février', 'mars', 'avril', 'mai', 'juin', 'juillet', - 'août', 'septembre', 'octobre', 'novembre', 'décembre'] - month_abbreviations = ['', 'janv', 'févr', 'mars', 'avr', 'mai', 'juin', 'juil', 'août', - 'sept', 'oct', 'nov', 'déc'] - - day_names = ['', 'lundi', 'mardi', 'mercredi', 'jeudi', 'vendredi', 'samedi', 'dimanche'] - day_abbreviations = ['', 'lun', 'mar', 'mer', 'jeu', 'ven', 'sam', 'dim'] - - ordinal_day_re = r'((?P\b1(?=er\b)|[1-3]?[02-9](?=e\b)|[1-3]1(?=e\b))(er|e)\b)' - - def _ordinal_number(self, n): - if abs(n) == 1: - return '{0}er'.format(n) - return '{0}e'.format(n) - - -class GreekLocale(Locale): - - names = ['el', 'el_gr'] - - past = '{0} πριν' - future = 'σε {0}' - - timeframes = { - 'now': 'τώρα', - 'seconds': 'δευτερόλεπτα', - 'minute': 'ένα λεπτό', - 'minutes': '{0} λεπτά', - 'hour': 'μια ώρα', - 'hours': '{0} ώρες', - 'day': 'μια μέρα', - 'days': '{0} μέρες', - 'month': 'ένα μήνα', - 'months': '{0} μήνες', - 'year': 'ένα χρόνο', - 'years': '{0} χρόνια', - } - - month_names = ['', 'Ιανουαρίου', 'Φεβρουαρίου', 'Μαρτίου', 'Απριλίου', 'Μαΐου', 'Ιουνίου', - 'Ιουλίου', 'Αυγούστου', 'Σεπτεμβρίου', 'Οκτωβρίου', 'Νοεμβρίου', 'Δεκεμβρίου'] - month_abbreviations = ['', 'Ιαν', 'Φεβ', 'Μαρ', 'Απρ', 'Μαϊ', 'Ιον', 'Ιολ', 'Αυγ', - 'Σεπ', 'Οκτ', 'Νοε', 'Δεκ'] - - day_names = ['', 'Δευτέρα', 'Τρίτη', 'Τετάρτη', 'Πέμπτη', 'Παρασκευή', 'Σάββατο', 'Κυριακή'] - day_abbreviations = ['', 'Δευ', 'Τρι', 'Τετ', 'Πεμ', 'Παρ', 'Σαβ', 'Κυρ'] - - -class JapaneseLocale(Locale): - - names = ['ja', 'ja_jp'] - - past = '{0}前' - future = '{0}後' - - timeframes = { - 'now': '現在', - 'seconds': '数秒', - 'minute': '1分', - 'minutes': '{0}分', - 'hour': '1時間', - 'hours': '{0}時間', - 'day': '1日', - 'days': '{0}日', - 'month': '1ヶ月', - 'months': '{0}ヶ月', - 'year': '1年', - 'years': '{0}年', - } - - month_names = ['', '1月', '2月', '3月', '4月', '5月', '6月', '7月', '8月', - '9月', '10月', '11月', '12月'] - month_abbreviations = ['', ' 1', ' 2', ' 3', ' 4', ' 5', ' 6', ' 7', ' 8', - ' 9', '10', '11', '12'] - - day_names = ['', '月曜日', '火曜日', '水曜日', '木曜日', '金曜日', '土曜日', '日曜日'] - day_abbreviations = ['', '月', '火', '水', '木', '金', '土', '日'] - - -class SwedishLocale(Locale): - - names = ['sv', 'sv_se'] - - past = 'för {0} sen' - future = 'om {0}' - - timeframes = { - 'now': 'just nu', - 'seconds': 'några sekunder', - 'minute': 'en minut', - 'minutes': '{0} minuter', - 'hour': 'en timme', - 'hours': '{0} timmar', - 'day': 'en dag', - 'days': '{0} dagar', - 'month': 'en månad', - 'months': '{0} månader', - 'year': 'ett år', - 'years': '{0} år', - } - - month_names = ['', 'januari', 'februari', 'mars', 'april', 'maj', 'juni', 'juli', - 'augusti', 'september', 'oktober', 'november', 'december'] - month_abbreviations = ['', 'jan', 'feb', 'mar', 'apr', 'maj', 'jun', 'jul', - 'aug', 'sep', 'okt', 'nov', 'dec'] - - day_names = ['', 'måndag', 'tisdag', 'onsdag', 'torsdag', 'fredag', 'lördag', 'söndag'] - day_abbreviations = ['', 'mån', 'tis', 'ons', 'tor', 'fre', 'lör', 'sön'] - - -class FinnishLocale(Locale): - - names = ['fi', 'fi_fi'] - - # The finnish grammar is very complex, and its hard to convert - # 1-to-1 to something like English. - - past = '{0} sitten' - future = '{0} kuluttua' - - timeframes = { - 'now': ['juuri nyt', 'juuri nyt'], - 'seconds': ['muutama sekunti', 'muutaman sekunnin'], - 'minute': ['minuutti', 'minuutin'], - 'minutes': ['{0} minuuttia', '{0} minuutin'], - 'hour': ['tunti', 'tunnin'], - 'hours': ['{0} tuntia', '{0} tunnin'], - 'day': ['päivä', 'päivä'], - 'days': ['{0} päivää', '{0} päivän'], - 'month': ['kuukausi', 'kuukauden'], - 'months': ['{0} kuukautta', '{0} kuukauden'], - 'year': ['vuosi', 'vuoden'], - 'years': ['{0} vuotta', '{0} vuoden'], - } - - # Months and days are lowercase in Finnish - month_names = ['', 'tammikuu', 'helmikuu', 'maaliskuu', 'huhtikuu', - 'toukokuu', 'kesäkuu', 'heinäkuu', 'elokuu', - 'syyskuu', 'lokakuu', 'marraskuu', 'joulukuu'] - - month_abbreviations = ['', 'tammi', 'helmi', 'maalis', 'huhti', - 'touko', 'kesä', 'heinä', 'elo', - 'syys', 'loka', 'marras', 'joulu'] - - day_names = ['', 'maanantai', 'tiistai', 'keskiviikko', 'torstai', - 'perjantai', 'lauantai', 'sunnuntai'] - - day_abbreviations = ['', 'ma', 'ti', 'ke', 'to', 'pe', 'la', 'su'] - - def _format_timeframe(self, timeframe, delta): - return (self.timeframes[timeframe][0].format(abs(delta)), - self.timeframes[timeframe][1].format(abs(delta))) - - def _format_relative(self, humanized, timeframe, delta): - if timeframe == 'now': - return humanized[0] - - direction = self.past if delta < 0 else self.future - which = 0 if delta < 0 else 1 - - return direction.format(humanized[which]) - - def _ordinal_number(self, n): - return '{0}.'.format(n) - - -class ChineseCNLocale(Locale): - - names = ['zh', 'zh_cn'] - - past = '{0}前' - future = '{0}后' - - timeframes = { - 'now': '刚才', - 'seconds': '几秒', - 'minute': '1分钟', - 'minutes': '{0}分钟', - 'hour': '1小时', - 'hours': '{0}小时', - 'day': '1天', - 'days': '{0}天', - 'month': '1个月', - 'months': '{0}个月', - 'year': '1年', - 'years': '{0}年', - } - - month_names = ['', '一月', '二月', '三月', '四月', '五月', '六月', '七月', - '八月', '九月', '十月', '十一月', '十二月'] - month_abbreviations = ['', ' 1', ' 2', ' 3', ' 4', ' 5', ' 6', ' 7', ' 8', - ' 9', '10', '11', '12'] - - day_names = ['', '星期一', '星期二', '星期三', '星期四', '星期五', '星期六', '星期日'] - day_abbreviations = ['', '一', '二', '三', '四', '五', '六', '日'] - - -class ChineseTWLocale(Locale): - - names = ['zh_tw'] - - past = '{0}前' - future = '{0}後' - - timeframes = { - 'now': '剛才', - 'seconds': '幾秒', - 'minute': '1分鐘', - 'minutes': '{0}分鐘', - 'hour': '1小時', - 'hours': '{0}小時', - 'day': '1天', - 'days': '{0}天', - 'month': '1個月', - 'months': '{0}個月', - 'year': '1年', - 'years': '{0}年', - } - - month_names = ['', '1月', '2月', '3月', '4月', '5月', '6月', '7月', '8月', - '9月', '10月', '11月', '12月'] - month_abbreviations = ['', ' 1', ' 2', ' 3', ' 4', ' 5', ' 6', ' 7', ' 8', - ' 9', '10', '11', '12'] - - day_names = ['', '周一', '周二', '周三', '周四', '周五', '周六', '周日'] - day_abbreviations = ['', '一', '二', '三', '四', '五', '六', '日'] - - -class KoreanLocale(Locale): - - names = ['ko', 'ko_kr'] - - past = '{0} 전' - future = '{0} 후' - - timeframes = { - 'now': '지금', - 'seconds': '몇 초', - 'minute': '1분', - 'minutes': '{0}분', - 'hour': '1시간', - 'hours': '{0}시간', - 'day': '1일', - 'days': '{0}일', - 'month': '1개월', - 'months': '{0}개월', - 'year': '1년', - 'years': '{0}년', - } - - month_names = ['', '1월', '2월', '3월', '4월', '5월', '6월', '7월', '8월', - '9월', '10월', '11월', '12월'] - month_abbreviations = ['', ' 1', ' 2', ' 3', ' 4', ' 5', ' 6', ' 7', ' 8', - ' 9', '10', '11', '12'] - - day_names = ['', '월요일', '화요일', '수요일', '목요일', '금요일', '토요일', '일요일'] - day_abbreviations = ['', '월', '화', '수', '목', '금', '토', '일'] - - -# derived locale types & implementations. -class DutchLocale(Locale): - - names = ['nl', 'nl_nl'] - - past = '{0} geleden' - future = 'over {0}' - - timeframes = { - 'now': 'nu', - 'seconds': 'seconden', - 'minute': 'een minuut', - 'minutes': '{0} minuten', - 'hour': 'een uur', - 'hours': '{0} uur', - 'day': 'een dag', - 'days': '{0} dagen', - 'month': 'een maand', - 'months': '{0} maanden', - 'year': 'een jaar', - 'years': '{0} jaar', - } - - # In Dutch names of months and days are not starting with a capital letter - # like in the English language. - month_names = ['', 'januari', 'februari', 'maart', 'april', 'mei', 'juni', 'juli', - 'augustus', 'september', 'oktober', 'november', 'december'] - month_abbreviations = ['', 'jan', 'feb', 'mrt', 'apr', 'mei', 'jun', 'jul', 'aug', - 'sep', 'okt', 'nov', 'dec'] - - day_names = ['', 'maandag', 'dinsdag', 'woensdag', 'donderdag', 'vrijdag', 'zaterdag', 'zondag'] - day_abbreviations = ['', 'ma', 'di', 'wo', 'do', 'vr', 'za', 'zo'] - - -class SlavicBaseLocale(Locale): - - def _format_timeframe(self, timeframe, delta): - - form = self.timeframes[timeframe] - delta = abs(delta) - - if isinstance(form, list): - - if delta % 10 == 1 and delta % 100 != 11: - form = form[0] - elif 2 <= delta % 10 <= 4 and (delta % 100 < 10 or delta % 100 >= 20): - form = form[1] - else: - form = form[2] - - return form.format(delta) - -class BelarusianLocale(SlavicBaseLocale): - - names = ['be', 'be_by'] - - past = '{0} таму' - future = 'праз {0}' - - timeframes = { - 'now': 'зараз', - 'seconds': 'некалькі секунд', - 'minute': 'хвіліну', - 'minutes': ['{0} хвіліну', '{0} хвіліны', '{0} хвілін'], - 'hour': 'гадзіну', - 'hours': ['{0} гадзіну', '{0} гадзіны', '{0} гадзін'], - 'day': 'дзень', - 'days': ['{0} дзень', '{0} дні', '{0} дзён'], - 'month': 'месяц', - 'months': ['{0} месяц', '{0} месяцы', '{0} месяцаў'], - 'year': 'год', - 'years': ['{0} год', '{0} гады', '{0} гадоў'], - } - - month_names = ['', 'студзеня', 'лютага', 'сакавіка', 'красавіка', 'траўня', 'чэрвеня', - 'ліпеня', 'жніўня', 'верасня', 'кастрычніка', 'лістапада', 'снежня'] - month_abbreviations = ['', 'студ', 'лют', 'сак', 'крас', 'трав', 'чэрв', 'ліп', 'жнів', - 'вер', 'каст', 'ліст', 'снеж'] - - day_names = ['', 'панядзелак', 'аўторак', 'серада', 'чацвер', 'пятніца', 'субота', 'нядзеля'] - day_abbreviations = ['', 'пн', 'ат', 'ср', 'чц', 'пт', 'сб', 'нд'] - - -class PolishLocale(SlavicBaseLocale): - - names = ['pl', 'pl_pl'] - - past = '{0} temu' - future = 'za {0}' - - timeframes = { - 'now': 'teraz', - 'seconds': 'kilka sekund', - 'minute': 'minutę', - 'minutes': ['{0} minut', '{0} minuty', '{0} minut'], - 'hour': 'godzina', - 'hours': ['{0} godzin', '{0} godziny', '{0} godzin'], - 'day': 'dzień', - 'days': ['{0} dzień', '{0} dni', '{0} dni'], - 'month': 'miesiąc', - 'months': ['{0} miesiąc', '{0} miesiące', '{0} miesięcy'], - 'year': 'rok', - 'years': ['{0} rok', '{0} lata', '{0} lat'], - } - - month_names = ['', 'styczeń', 'luty', 'marzec', 'kwiecień', 'maj', - 'czerwiec', 'lipiec', 'sierpień', 'wrzesień', 'październik', - 'listopad', 'grudzień'] - month_abbreviations = ['', 'sty', 'lut', 'mar', 'kwi', 'maj', 'cze', 'lip', - 'sie', 'wrz', 'paź', 'lis', 'gru'] - - day_names = ['', 'poniedziałek', 'wtorek', 'środa', 'czwartek', 'piątek', - 'sobota', 'niedziela'] - day_abbreviations = ['', 'Pn', 'Wt', 'Śr', 'Czw', 'Pt', 'So', 'Nd'] - - -class RussianLocale(SlavicBaseLocale): - - names = ['ru', 'ru_ru'] - - past = '{0} назад' - future = 'через {0}' - - timeframes = { - 'now': 'сейчас', - 'seconds': 'несколько секунд', - 'minute': 'минуту', - 'minutes': ['{0} минуту', '{0} минуты', '{0} минут'], - 'hour': 'час', - 'hours': ['{0} час', '{0} часа', '{0} часов'], - 'day': 'день', - 'days': ['{0} день', '{0} дня', '{0} дней'], - 'month': 'месяц', - 'months': ['{0} месяц', '{0} месяца', '{0} месяцев'], - 'year': 'год', - 'years': ['{0} год', '{0} года', '{0} лет'], - } - - month_names = ['', 'января', 'февраля', 'марта', 'апреля', 'мая', 'июня', - 'июля', 'августа', 'сентября', 'октября', 'ноября', 'декабря'] - month_abbreviations = ['', 'янв', 'фев', 'мар', 'апр', 'май', 'июн', 'июл', - 'авг', 'сен', 'окт', 'ноя', 'дек'] - - day_names = ['', 'понедельник', 'вторник', 'среда', 'четверг', 'пятница', - 'суббота', 'воскресенье'] - day_abbreviations = ['', 'пн', 'вт', 'ср', 'чт', 'пт', 'сб', 'вс'] - - -class BulgarianLocale(SlavicBaseLocale): - - names = ['bg', 'bg_BG'] - - past = '{0} назад' - future = 'напред {0}' - - timeframes = { - 'now': 'сега', - 'seconds': 'няколко секунди', - 'minute': 'минута', - 'minutes': ['{0} минута', '{0} минути', '{0} минути'], - 'hour': 'час', - 'hours': ['{0} час', '{0} часа', '{0} часа'], - 'day': 'ден', - 'days': ['{0} ден', '{0} дни', '{0} дни'], - 'month': 'месец', - 'months': ['{0} месец', '{0} месеца', '{0} месеца'], - 'year': 'година', - 'years': ['{0} година', '{0} години', '{0} години'], - } - - month_names = ['', 'януари', 'февруари', 'март', 'април', 'май', 'юни', - 'юли', 'август', 'септември', 'октомври', 'ноември', 'декември'] - month_abbreviations = ['', 'ян', 'февр', 'март', 'апр', 'май', 'юни', 'юли', - 'авг', 'септ', 'окт', 'ноем', 'дек'] - - day_names = ['', 'понеделник', 'вторник', 'сряда', 'четвъртък', 'петък', - 'събота', 'неделя'] - day_abbreviations = ['', 'пон', 'вт', 'ср', 'четв', 'пет', 'съб', 'нед'] - - -class UkrainianLocale(SlavicBaseLocale): - - names = ['ua', 'uk_ua'] - - past = '{0} тому' - future = 'за {0}' - - timeframes = { - 'now': 'зараз', - 'seconds': 'кілька секунд', - 'minute': 'хвилину', - 'minutes': ['{0} хвилину', '{0} хвилини', '{0} хвилин'], - 'hour': 'годину', - 'hours': ['{0} годину', '{0} години', '{0} годин'], - 'day': 'день', - 'days': ['{0} день', '{0} дні', '{0} днів'], - 'month': 'місяць', - 'months': ['{0} місяць', '{0} місяці', '{0} місяців'], - 'year': 'рік', - 'years': ['{0} рік', '{0} роки', '{0} років'], - } - - month_names = ['', 'січня', 'лютого', 'березня', 'квітня', 'травня', 'червня', - 'липня', 'серпня', 'вересня', 'жовтня', 'листопада', 'грудня'] - month_abbreviations = ['', 'січ', 'лют', 'бер', 'квіт', 'трав', 'черв', 'лип', 'серп', - 'вер', 'жовт', 'лист', 'груд'] - - day_names = ['', 'понеділок', 'вівторок', 'середа', 'четвер', 'п’ятниця', 'субота', 'неділя'] - day_abbreviations = ['', 'пн', 'вт', 'ср', 'чт', 'пт', 'сб', 'нд'] - - -class _DeutschLocaleCommonMixin(object): - - past = 'vor {0}' - future = 'in {0}' - - timeframes = { - 'now': 'gerade eben', - 'seconds': 'Sekunden', - 'minute': 'einer Minute', - 'minutes': '{0} Minuten', - 'hour': 'einer Stunde', - 'hours': '{0} Stunden', - 'day': 'einem Tag', - 'days': '{0} Tagen', - 'month': 'einem Monat', - 'months': '{0} Monaten', - 'year': 'einem Jahr', - 'years': '{0} Jahren', - } - - month_names = [ - '', 'Januar', 'Februar', 'März', 'April', 'Mai', 'Juni', 'Juli', - 'August', 'September', 'Oktober', 'November', 'Dezember' - ] - - month_abbreviations = [ - '', 'Jan', 'Feb', 'Mär', 'Apr', 'Mai', 'Jun', 'Jul', 'Aug', 'Sep', - 'Okt', 'Nov', 'Dez' - ] - - day_names = [ - '', 'Montag', 'Dienstag', 'Mittwoch', 'Donnerstag', 'Freitag', - 'Samstag', 'Sonntag' - ] - - day_abbreviations = [ - '', 'Mo', 'Di', 'Mi', 'Do', 'Fr', 'Sa', 'So' - ] - - def _ordinal_number(self, n): - return '{0}.'.format(n) - - -class GermanLocale(_DeutschLocaleCommonMixin, Locale): - - names = ['de', 'de_de'] - - timeframes = _DeutschLocaleCommonMixin.timeframes.copy() - timeframes['days'] = '{0} Tagen' - - -class AustriaLocale(_DeutschLocaleCommonMixin, Locale): - - names = ['de', 'de_at'] - - timeframes = _DeutschLocaleCommonMixin.timeframes.copy() - timeframes['days'] = '{0} Tage' - - -class NorwegianLocale(Locale): - - names = ['nb', 'nb_no'] - - past = 'for {0} siden' - future = 'om {0}' - - timeframes = { - 'now': 'nå nettopp', - 'seconds': 'noen sekunder', - 'minute': 'ett minutt', - 'minutes': '{0} minutter', - 'hour': 'en time', - 'hours': '{0} timer', - 'day': 'en dag', - 'days': '{0} dager', - 'month': 'en måned', - 'months': '{0} måneder', - 'year': 'ett år', - 'years': '{0} år', - } - - month_names = ['', 'januar', 'februar', 'mars', 'april', 'mai', 'juni', - 'juli', 'august', 'september', 'oktober', 'november', - 'desember'] - month_abbreviations = ['', 'jan', 'feb', 'mar', 'apr', 'mai', 'jun', 'jul', - 'aug', 'sep', 'okt', 'nov', 'des'] - - day_names = ['', 'mandag', 'tirsdag', 'onsdag', 'torsdag', 'fredag', - 'lørdag', 'søndag'] - day_abbreviations = ['', 'ma', 'ti', 'on', 'to', 'fr', 'lø', 'sø'] - - -class NewNorwegianLocale(Locale): - - names = ['nn', 'nn_no'] - - past = 'for {0} sidan' - future = 'om {0}' - - timeframes = { - 'now': 'no nettopp', - 'seconds': 'nokre sekund', - 'minute': 'ett minutt', - 'minutes': '{0} minutt', - 'hour': 'ein time', - 'hours': '{0} timar', - 'day': 'ein dag', - 'days': '{0} dagar', - 'month': 'en månad', - 'months': '{0} månader', - 'year': 'eit år', - 'years': '{0} år', - } - - month_names = ['', 'januar', 'februar', 'mars', 'april', 'mai', 'juni', - 'juli', 'august', 'september', 'oktober', 'november', - 'desember'] - month_abbreviations = ['', 'jan', 'feb', 'mar', 'apr', 'mai', 'jun', 'jul', - 'aug', 'sep', 'okt', 'nov', 'des'] - - day_names = ['', 'måndag', 'tysdag', 'onsdag', 'torsdag', 'fredag', - 'laurdag', 'sundag'] - day_abbreviations = ['', 'må', 'ty', 'on', 'to', 'fr', 'la', 'su'] - - -class PortugueseLocale(Locale): - names = ['pt', 'pt_pt'] - - past = 'há {0}' - future = 'em {0}' - - timeframes = { - 'now': 'agora', - 'seconds': 'segundos', - 'minute': 'um minuto', - 'minutes': '{0} minutos', - 'hour': 'uma hora', - 'hours': '{0} horas', - 'day': 'um dia', - 'days': '{0} dias', - 'month': 'um mês', - 'months': '{0} meses', - 'year': 'um ano', - 'years': '{0} anos', - } - - month_names = ['', 'janeiro', 'fevereiro', 'março', 'abril', 'maio', 'junho', 'julho', - 'agosto', 'setembro', 'outubro', 'novembro', 'dezembro'] - month_abbreviations = ['', 'jan', 'fev', 'mar', 'abr', 'maio', 'jun', 'jul', 'ago', - 'set', 'out', 'nov', 'dez'] - - day_names = ['', 'segunda-feira', 'terça-feira', 'quarta-feira', 'quinta-feira', 'sexta-feira', - 'sábado', 'domingo'] - day_abbreviations = ['', 'seg', 'ter', 'qua', 'qui', 'sex', 'sab', 'dom'] - - -class BrazilianPortugueseLocale(PortugueseLocale): - names = ['pt_br'] - - past = 'fazem {0}' - - -class TagalogLocale(Locale): - - names = ['tl'] - - past = 'nakaraang {0}' - future = '{0} mula ngayon' - - timeframes = { - 'now': 'ngayon lang', - 'seconds': 'segundo', - 'minute': 'isang minuto', - 'minutes': '{0} minuto', - 'hour': 'isang oras', - 'hours': '{0} oras', - 'day': 'isang araw', - 'days': '{0} araw', - 'month': 'isang buwan', - 'months': '{0} buwan', - 'year': 'isang taon', - 'years': '{0} taon', - } - - month_names = ['', 'Enero', 'Pebrero', 'Marso', 'Abril', 'Mayo', 'Hunyo', 'Hulyo', - 'Agosto', 'Setyembre', 'Oktubre', 'Nobyembre', 'Disyembre'] - month_abbreviations = ['', 'Ene', 'Peb', 'Mar', 'Abr', 'May', 'Hun', 'Hul', 'Ago', - 'Set', 'Okt', 'Nob', 'Dis'] - - day_names = ['', 'Lunes', 'Martes', 'Miyerkules', 'Huwebes', 'Biyernes', 'Sabado', 'Linggo'] - day_abbreviations = ['', 'Lun', 'Mar', 'Miy', 'Huw', 'Biy', 'Sab', 'Lin'] - - -class VietnameseLocale(Locale): - - names = ['vi', 'vi_vn'] - - past = '{0} trước' - future = '{0} nữa' - - timeframes = { - 'now': 'hiện tại', - 'seconds': 'giây', - 'minute': 'một phút', - 'minutes': '{0} phút', - 'hour': 'một giờ', - 'hours': '{0} giờ', - 'day': 'một ngày', - 'days': '{0} ngày', - 'month': 'một tháng', - 'months': '{0} tháng', - 'year': 'một năm', - 'years': '{0} năm', - } - - month_names = ['', 'Tháng Một', 'Tháng Hai', 'Tháng Ba', 'Tháng Tư', 'Tháng Năm', 'Tháng Sáu', 'Tháng Bảy', - 'Tháng Tám', 'Tháng Chín', 'Tháng Mười', 'Tháng Mười Một', 'Tháng Mười Hai'] - month_abbreviations = ['', 'Tháng 1', 'Tháng 2', 'Tháng 3', 'Tháng 4', 'Tháng 5', 'Tháng 6', 'Tháng 7', 'Tháng 8', - 'Tháng 9', 'Tháng 10', 'Tháng 11', 'Tháng 12'] - - day_names = ['', 'Thứ Hai', 'Thứ Ba', 'Thứ Tư', 'Thứ Năm', 'Thứ Sáu', 'Thứ Bảy', 'Chủ Nhật'] - day_abbreviations = ['', 'Thứ 2', 'Thứ 3', 'Thứ 4', 'Thứ 5', 'Thứ 6', 'Thứ 7', 'CN'] - - -class TurkishLocale(Locale): - - names = ['tr', 'tr_tr'] - - past = '{0} önce' - future = '{0} sonra' - - timeframes = { - 'now': 'şimdi', - 'seconds': 'saniye', - 'minute': 'bir dakika', - 'minutes': '{0} dakika', - 'hour': 'bir saat', - 'hours': '{0} saat', - 'day': 'bir gün', - 'days': '{0} gün', - 'month': 'bir ay', - 'months': '{0} ay', - 'year': 'yıl', - 'years': '{0} yıl', - } - - month_names = ['', 'Ocak', 'Şubat', 'Mart', 'Nisan', 'Mayıs', 'Haziran', 'Temmuz', - 'Ağustos', 'Eylül', 'Ekim', 'Kasım', 'Aralık'] - month_abbreviations = ['', 'Oca', 'Şub', 'Mar', 'Nis', 'May', 'Haz', 'Tem', 'Ağu', - 'Eyl', 'Eki', 'Kas', 'Ara'] - - day_names = ['', 'Pazartesi', 'Salı', 'Çarşamba', 'Perşembe', 'Cuma', 'Cumartesi', 'Pazar'] - day_abbreviations = ['', 'Pzt', 'Sal', 'Çar', 'Per', 'Cum', 'Cmt', 'Paz'] - - -class AzerbaijaniLocale(Locale): - - names = ['az', 'az_az'] - - past = '{0} əvvəl' - future = '{0} sonra' - - timeframes = { - 'now': 'indi', - 'seconds': 'saniyə', - 'minute': 'bir dəqiqə', - 'minutes': '{0} dəqiqə', - 'hour': 'bir saat', - 'hours': '{0} saat', - 'day': 'bir gün', - 'days': '{0} gün', - 'month': 'bir ay', - 'months': '{0} ay', - 'year': 'il', - 'years': '{0} il', - } - - month_names = ['', 'Yanvar', 'Fevral', 'Mart', 'Aprel', 'May', 'İyun', 'İyul', - 'Avqust', 'Sentyabr', 'Oktyabr', 'Noyabr', 'Dekabr'] - month_abbreviations = ['', 'Yan', 'Fev', 'Mar', 'Apr', 'May', 'İyn', 'İyl', 'Avq', - 'Sen', 'Okt', 'Noy', 'Dek'] - - day_names = ['', 'Bazar ertəsi', 'Çərşənbə axşamı', 'Çərşənbə', 'Cümə axşamı', 'Cümə', 'Şənbə', 'Bazar'] - day_abbreviations = ['', 'Ber', 'Çax', 'Çər', 'Cax', 'Cüm', 'Şnb', 'Bzr'] - - -class ArabicLocale(Locale): - - names = ['ar', 'ar_eg'] - - past = 'منذ {0}' - future = 'خلال {0}' - - timeframes = { - 'now': 'الآن', - 'seconds': 'ثوان', - 'minute': 'دقيقة', - 'minutes': '{0} دقائق', - 'hour': 'ساعة', - 'hours': '{0} ساعات', - 'day': 'يوم', - 'days': '{0} أيام', - 'month': 'شهر', - 'months': '{0} شهور', - 'year': 'سنة', - 'years': '{0} سنوات', - } - - month_names = ['', 'يناير', 'فبراير', 'مارس', 'أبريل', 'مايو', 'يونيو', 'يوليو', - 'أغسطس', 'سبتمبر', 'أكتوبر', 'نوفمبر', 'ديسمبر'] - month_abbreviations = ['', 'يناير', 'فبراير', 'مارس', 'أبريل', 'مايو', 'يونيو', 'يوليو', - 'أغسطس', 'سبتمبر', 'أكتوبر', 'نوفمبر', 'ديسمبر'] - - day_names = ['', 'الاثنين', 'الثلاثاء', 'الأربعاء', 'الخميس', 'الجمعة', 'السبت', 'الأحد'] - day_abbreviations = ['', 'اثنين', 'ثلاثاء', 'أربعاء', 'خميس', 'جمعة', 'سبت', 'أحد'] - - -class IcelandicLocale(Locale): - - def _format_timeframe(self, timeframe, delta): - - timeframe = self.timeframes[timeframe] - if delta < 0: - timeframe = timeframe[0] - elif delta > 0: - timeframe = timeframe[1] - - return timeframe.format(abs(delta)) - - names = ['is', 'is_is'] - - past = 'fyrir {0} síðan' - future = 'eftir {0}' - - timeframes = { - 'now': 'rétt í þessu', - 'seconds': ('nokkrum sekúndum', 'nokkrar sekúndur'), - 'minute': ('einni mínútu', 'eina mínútu'), - 'minutes': ('{0} mínútum', '{0} mínútur'), - 'hour': ('einum tíma', 'einn tíma'), - 'hours': ('{0} tímum', '{0} tíma'), - 'day': ('einum degi', 'einn dag'), - 'days': ('{0} dögum', '{0} daga'), - 'month': ('einum mánuði', 'einn mánuð'), - 'months': ('{0} mánuðum', '{0} mánuði'), - 'year': ('einu ári', 'eitt ár'), - 'years': ('{0} árum', '{0} ár'), - } - - meridians = { - 'am': 'f.h.', - 'pm': 'e.h.', - 'AM': 'f.h.', - 'PM': 'e.h.', - } - - month_names = ['', 'janúar', 'febrúar', 'mars', 'apríl', 'maí', 'júní', - 'júlí', 'ágúst', 'september', 'október', 'nóvember', 'desember'] - month_abbreviations = ['', 'jan', 'feb', 'mar', 'apr', 'maí', 'jún', - 'júl', 'ágú', 'sep', 'okt', 'nóv', 'des'] - - day_names = ['', 'mánudagur', 'þriðjudagur', 'miðvikudagur', 'fimmtudagur', - 'föstudagur', 'laugardagur', 'sunnudagur'] - day_abbreviations = ['', 'mán', 'þri', 'mið', 'fim', 'fös', 'lau', 'sun'] - - -class DanishLocale(Locale): - - names = ['da', 'da_dk'] - - past = 'for {0} siden' - future = 'efter {0}' - - timeframes = { - 'now': 'lige nu', - 'seconds': 'et par sekunder', - 'minute': 'et minut', - 'minutes': '{0} minutter', - 'hour': 'en time', - 'hours': '{0} timer', - 'day': 'en dag', - 'days': '{0} dage', - 'month': 'en måned', - 'months': '{0} måneder', - 'year': 'et år', - 'years': '{0} år', - } - - month_names = ['', 'januar', 'februar', 'marts', 'april', 'maj', 'juni', - 'juli', 'august', 'september', 'oktober', 'november', 'december'] - month_abbreviations = ['', 'jan', 'feb', 'mar', 'apr', 'maj', 'jun', - 'jul', 'aug', 'sep', 'okt', 'nov', 'dec'] - - day_names = ['', 'mandag', 'tirsdag', 'onsdag', 'torsdag', 'fredag', - 'lørdag', 'søndag'] - day_abbreviations = ['', 'man', 'tir', 'ons', 'tor', 'fre', 'lør', 'søn'] - - -class MalayalamLocale(Locale): - - names = ['ml'] - - past = '{0} മുമ്പ്' - future = '{0} ശേഷം' - - timeframes = { - 'now': 'ഇപ്പോൾ', - 'seconds': 'സെക്കന്റ്‌', - 'minute': 'ഒരു മിനിറ്റ്', - 'minutes': '{0} മിനിറ്റ്', - 'hour': 'ഒരു മണിക്കൂർ', - 'hours': '{0} മണിക്കൂർ', - 'day': 'ഒരു ദിവസം ', - 'days': '{0} ദിവസം ', - 'month': 'ഒരു മാസം ', - 'months': '{0} മാസം ', - 'year': 'ഒരു വർഷം ', - 'years': '{0} വർഷം ', - } - - meridians = { - 'am': 'രാവിലെ', - 'pm': 'ഉച്ചക്ക് ശേഷം', - 'AM': 'രാവിലെ', - 'PM': 'ഉച്ചക്ക് ശേഷം', - } - - month_names = ['', 'ജനുവരി', 'ഫെബ്രുവരി', 'മാർച്ച്‌', 'ഏപ്രിൽ ', 'മെയ്‌ ', 'ജൂണ്‍', 'ജൂലൈ', - 'ഓഗസ്റ്റ്‌', 'സെപ്റ്റംബർ', 'ഒക്ടോബർ', 'നവംബർ', 'ഡിസംബർ'] - month_abbreviations = ['', 'ജനു', 'ഫെബ് ', 'മാർ', 'ഏപ്രിൽ', 'മേയ്', 'ജൂണ്‍', 'ജൂലൈ', 'ഓഗസ്റ', - 'സെപ്റ്റ', 'ഒക്ടോ', 'നവം', 'ഡിസം'] - - day_names = ['', 'തിങ്കള്‍', 'ചൊവ്വ', 'ബുധന്‍', 'വ്യാഴം', 'വെള്ളി', 'ശനി', 'ഞായര്‍'] - day_abbreviations = ['', 'തിങ്കള്‍', 'ചൊവ്വ', 'ബുധന്‍', 'വ്യാഴം', 'വെള്ളി', 'ശനി', 'ഞായര്‍'] - - -class HindiLocale(Locale): - - names = ['hi'] - - past = '{0} पहले' - future = '{0} बाद' - - timeframes = { - 'now': 'अभी', - 'seconds': 'सेकंड्', - 'minute': 'एक मिनट ', - 'minutes': '{0} मिनट ', - 'hour': 'एक घंटा', - 'hours': '{0} घंटे', - 'day': 'एक दिन', - 'days': '{0} दिन', - 'month': 'एक माह ', - 'months': '{0} महीने ', - 'year': 'एक वर्ष ', - 'years': '{0} साल ', - } - - meridians = { - 'am': 'सुबह', - 'pm': 'शाम', - 'AM': 'सुबह', - 'PM': 'शाम', - } - - month_names = ['', 'जनवरी', 'फरवरी', 'मार्च', 'अप्रैल ', 'मई', 'जून', 'जुलाई', - 'अगस्त', 'सितंबर', 'अक्टूबर', 'नवंबर', 'दिसंबर'] - month_abbreviations = ['', 'जन', 'फ़र', 'मार्च', 'अप्रै', 'मई', 'जून', 'जुलाई', 'आग', - 'सित', 'अकत', 'नवे', 'दिस'] - - day_names = ['', 'सोमवार', 'मंगलवार', 'बुधवार', 'गुरुवार', 'शुक्रवार', 'शनिवार', 'रविवार'] - day_abbreviations = ['', 'सोम', 'मंगल', 'बुध', 'गुरुवार', 'शुक्र', 'शनि', 'रवि'] - -class CzechLocale(Locale): - names = ['cs', 'cs_cz'] - - timeframes = { - 'now': 'Teď', - 'seconds': { - 'past': '{0} sekundami', - 'future': ['{0} sekundy', '{0} sekund'] - }, - 'minute': {'past': 'minutou', 'future': 'minutu', 'zero': '{0} minut'}, - 'minutes': { - 'past': '{0} minutami', - 'future': ['{0} minuty', '{0} minut'] - }, - 'hour': {'past': 'hodinou', 'future': 'hodinu', 'zero': '{0} hodin'}, - 'hours': { - 'past': '{0} hodinami', - 'future': ['{0} hodiny', '{0} hodin'] - }, - 'day': {'past': 'dnem', 'future': 'den', 'zero': '{0} dnů'}, - 'days': { - 'past': '{0} dny', - 'future': ['{0} dny', '{0} dnů'] - }, - 'month': {'past': 'měsícem', 'future': 'měsíc', 'zero': '{0} měsíců'}, - 'months': { - 'past': '{0} měsíci', - 'future': ['{0} měsíce', '{0} měsíců'] - }, - 'year': {'past': 'rokem', 'future': 'rok', 'zero': '{0} let'}, - 'years': { - 'past': '{0} lety', - 'future': ['{0} roky', '{0} let'] - } - } - - past = 'Před {0}' - future = 'Za {0}' - - month_names = ['', 'leden', 'únor', 'březen', 'duben', 'květen', 'červen', - 'červenec', 'srpen', 'září', 'říjen', 'listopad', 'prosinec'] - month_abbreviations = ['', 'led', 'úno', 'bře', 'dub', 'kvě', 'čvn', 'čvc', - 'srp', 'zář', 'říj', 'lis', 'pro'] - - day_names = ['', 'pondělí', 'úterý', 'středa', 'čtvrtek', 'pátek', - 'sobota', 'neděle'] - day_abbreviations = ['', 'po', 'út', 'st', 'čt', 'pá', 'so', 'ne'] - - - def _format_timeframe(self, timeframe, delta): - '''Czech aware time frame format function, takes into account - the differences between past and future forms.''' - form = self.timeframes[timeframe] - if isinstance(form, dict): - if delta == 0: - form = form['zero'] # And *never* use 0 in the singular! - elif delta > 0: - form = form['future'] - else: - form = form['past'] - delta = abs(delta) - - if isinstance(form, list): - if 2 <= delta % 10 <= 4 and (delta % 100 < 10 or delta % 100 >= 20): - form = form[0] - else: - form = form[1] - - return form.format(delta) - - -class SlovakLocale(Locale): - names = ['sk', 'sk_sk'] - - timeframes = { - 'now': 'Teraz', - 'seconds': { - 'past': 'pár sekundami', - 'future': ['{0} sekundy', '{0} sekúnd'] - }, - 'minute': {'past': 'minútou', 'future': 'minútu', 'zero': '{0} minút'}, - 'minutes': { - 'past': '{0} minútami', - 'future': ['{0} minúty', '{0} minút'] - }, - 'hour': {'past': 'hodinou', 'future': 'hodinu', 'zero': '{0} hodín'}, - 'hours': { - 'past': '{0} hodinami', - 'future': ['{0} hodiny', '{0} hodín'] - }, - 'day': {'past': 'dňom', 'future': 'deň', 'zero': '{0} dní'}, - 'days': { - 'past': '{0} dňami', - 'future': ['{0} dni', '{0} dní'] - }, - 'month': {'past': 'mesiacom', 'future': 'mesiac', 'zero': '{0} mesiacov'}, - 'months': { - 'past': '{0} mesiacmi', - 'future': ['{0} mesiace', '{0} mesiacov'] - }, - 'year': {'past': 'rokom', 'future': 'rok', 'zero': '{0} rokov'}, - 'years': { - 'past': '{0} rokmi', - 'future': ['{0} roky', '{0} rokov'] - } - } - - past = 'Pred {0}' - future = 'O {0}' - - month_names = ['', 'január', 'február', 'marec', 'apríl', 'máj', 'jún', - 'júl', 'august', 'september', 'október', 'november', 'december'] - month_abbreviations = ['', 'jan', 'feb', 'mar', 'apr', 'máj', 'jún', 'júl', - 'aug', 'sep', 'okt', 'nov', 'dec'] - - day_names = ['', 'pondelok', 'utorok', 'streda', 'štvrtok', 'piatok', - 'sobota', 'nedeľa'] - day_abbreviations = ['', 'po', 'ut', 'st', 'št', 'pi', 'so', 'ne'] - - - def _format_timeframe(self, timeframe, delta): - '''Slovak aware time frame format function, takes into account - the differences between past and future forms.''' - form = self.timeframes[timeframe] - if isinstance(form, dict): - if delta == 0: - form = form['zero'] # And *never* use 0 in the singular! - elif delta > 0: - form = form['future'] - else: - form = form['past'] - delta = abs(delta) - - if isinstance(form, list): - if 2 <= delta % 10 <= 4 and (delta % 100 < 10 or delta % 100 >= 20): - form = form[0] - else: - form = form[1] - - return form.format(delta) - - -class FarsiLocale(Locale): - - names = ['fa', 'fa_ir'] - - past = '{0} قبل' - future = 'در {0}' - - timeframes = { - 'now': 'اکنون', - 'seconds': 'ثانیه', - 'minute': 'یک دقیقه', - 'minutes': '{0} دقیقه', - 'hour': 'یک ساعت', - 'hours': '{0} ساعت', - 'day': 'یک روز', - 'days': '{0} روز', - 'month': 'یک ماه', - 'months': '{0} ماه', - 'year': 'یک سال', - 'years': '{0} سال', - } - - meridians = { - 'am': 'قبل از ظهر', - 'pm': 'بعد از ظهر', - 'AM': 'قبل از ظهر', - 'PM': 'بعد از ظهر', - } - - month_names = ['', 'January', 'February', 'March', 'April', 'May', 'June', 'July', - 'August', 'September', 'October', 'November', 'December'] - month_abbreviations = ['', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', - 'Sep', 'Oct', 'Nov', 'Dec'] - - day_names = ['', 'دو شنبه', 'سه شنبه', 'چهارشنبه', 'پنجشنبه', 'جمعه', 'شنبه', 'یکشنبه'] - day_abbreviations = ['', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] - - -class MacedonianLocale(Locale): - names = ['mk', 'mk_mk'] - - past = 'пред {0}' - future = 'за {0}' - - timeframes = { - 'now': 'сега', - 'seconds': 'секунди', - 'minute': 'една минута', - 'minutes': '{0} минути', - 'hour': 'еден саат', - 'hours': '{0} саати', - 'day': 'еден ден', - 'days': '{0} дена', - 'month': 'еден месец', - 'months': '{0} месеци', - 'year': 'една година', - 'years': '{0} години', - } - - meridians = { - 'am': 'дп', - 'pm': 'пп', - 'AM': 'претпладне', - 'PM': 'попладне', - } - - month_names = ['', 'Јануари', 'Февруари', 'Март', 'Април', 'Мај', 'Јуни', 'Јули', 'Август', 'Септември', 'Октомври', - 'Ноември', 'Декември'] - month_abbreviations = ['', 'Јан.', ' Фев.', ' Мар.', ' Апр.', ' Мај', ' Јун.', ' Јул.', ' Авг.', ' Септ.', ' Окт.', - ' Ноем.', ' Декем.'] - - day_names = ['', 'Понеделник', ' Вторник', ' Среда', ' Четврток', ' Петок', ' Сабота', ' Недела'] - day_abbreviations = ['', 'Пон.', ' Вт.', ' Сре.', ' Чет.', ' Пет.', ' Саб.', ' Нед.'] - - -class HebrewLocale(Locale): - - names = ['he', 'he_IL'] - - past = 'לפני {0}' - future = 'בעוד {0}' - - timeframes = { - 'now': 'הרגע', - 'seconds': 'שניות', - 'minute': 'דקה', - 'minutes': '{0} דקות', - 'hour': 'שעה', - 'hours': '{0} שעות', - '2-hours': 'שעתיים', - 'day': 'יום', - 'days': '{0} ימים', - '2-days': 'יומיים', - 'month': 'חודש', - 'months': '{0} חודשים', - '2-months': 'חודשיים', - 'year': 'שנה', - 'years': '{0} שנים', - '2-years': 'שנתיים', - } - - meridians = { - 'am': 'לפנ"צ', - 'pm': 'אחר"צ', - 'AM': 'לפני הצהריים', - 'PM': 'אחרי הצהריים', - } - - month_names = ['', 'ינואר', 'פברואר', 'מרץ', 'אפריל', 'מאי', 'יוני', 'יולי', - 'אוגוסט', 'ספטמבר', 'אוקטובר', 'נובמבר', 'דצמבר'] - month_abbreviations = ['', 'ינו׳', 'פבר׳', 'מרץ', 'אפר׳', 'מאי', 'יוני', 'יולי', 'אוג׳', - 'ספט׳', 'אוק׳', 'נוב׳', 'דצמ׳'] - - day_names = ['', 'שני', 'שלישי', 'רביעי', 'חמישי', 'שישי', 'שבת', 'ראשון'] - day_abbreviations = ['', 'ב׳', 'ג׳', 'ד׳', 'ה׳', 'ו׳', 'ש׳', 'א׳'] - - def _format_timeframe(self, timeframe, delta): - '''Hebrew couple of aware''' - couple = '2-{0}'.format(timeframe) - if abs(delta) == 2 and couple in self.timeframes: - return self.timeframes[couple].format(abs(delta)) - else: - return self.timeframes[timeframe].format(abs(delta)) - -class MarathiLocale(Locale): - - names = ['mr'] - - past = '{0} आधी' - future = '{0} नंतर' - - timeframes = { - 'now': 'सद्य', - 'seconds': 'सेकंद', - 'minute': 'एक मिनिट ', - 'minutes': '{0} मिनिट ', - 'hour': 'एक तास', - 'hours': '{0} तास', - 'day': 'एक दिवस', - 'days': '{0} दिवस', - 'month': 'एक महिना ', - 'months': '{0} महिने ', - 'year': 'एक वर्ष ', - 'years': '{0} वर्ष ', - } - - meridians = { - 'am': 'सकाळ', - 'pm': 'संध्याकाळ', - 'AM': 'सकाळ', - 'PM': 'संध्याकाळ', - } - - month_names = ['', 'जानेवारी', 'फेब्रुवारी', 'मार्च', 'एप्रिल', 'मे', 'जून', 'जुलै', - 'अॉगस्ट', 'सप्टेंबर', 'अॉक्टोबर', 'नोव्हेंबर', 'डिसेंबर'] - month_abbreviations = ['', 'जान', 'फेब्रु', 'मार्च', 'एप्रि', 'मे', 'जून', 'जुलै', 'अॉग', - 'सप्टें', 'अॉक्टो', 'नोव्हें', 'डिसें'] - - day_names = ['', 'सोमवार', 'मंगळवार', 'बुधवार', 'गुरुवार', 'शुक्रवार', 'शनिवार', 'रविवार'] - day_abbreviations = ['', 'सोम', 'मंगळ', 'बुध', 'गुरु', 'शुक्र', 'शनि', 'रवि'] - -def _map_locales(): - - locales = {} - - for cls_name, cls in inspect.getmembers(sys.modules[__name__], inspect.isclass): - if issubclass(cls, Locale): - for name in cls.names: - locales[name.lower()] = cls - - return locales - -class CatalanLocale(Locale): - names = ['ca', 'ca_es', 'ca_ad', 'ca_fr', 'ca_it'] - past = 'Fa {0}' - future = 'En {0}' - - timeframes = { - 'now': 'Ara mateix', - 'seconds': 'segons', - 'minute': '1 minut', - 'minutes': '{0} minuts', - 'hour': 'una hora', - 'hours': '{0} hores', - 'day': 'un dia', - 'days': '{0} dies', - 'month': 'un mes', - 'months': '{0} mesos', - 'year': 'un any', - 'years': '{0} anys', - } - - month_names = ['', 'Gener', 'Febrer', 'Març', 'Abril', 'Maig', 'Juny', 'Juliol', 'Agost', 'Setembre', 'Octubre', 'Novembre', 'Desembre'] - month_abbreviations = ['', 'Gener', 'Febrer', 'Març', 'Abril', 'Maig', 'Juny', 'Juliol', 'Agost', 'Setembre', 'Octubre', 'Novembre', 'Desembre'] - day_names = ['', 'Dilluns', 'Dimarts', 'Dimecres', 'Dijous', 'Divendres', 'Dissabte', 'Diumenge'] - day_abbreviations = ['', 'Dilluns', 'Dimarts', 'Dimecres', 'Dijous', 'Divendres', 'Dissabte', 'Diumenge'] - -class BasqueLocale(Locale): - names = ['eu', 'eu_eu'] - past = 'duela {0}' - future = '{0}' # I don't know what's the right phrase in Basque for the future. - - timeframes = { - 'now': 'Orain', - 'seconds': 'segundu', - 'minute': 'minutu bat', - 'minutes': '{0} minutu', - 'hour': 'ordu bat', - 'hours': '{0} ordu', - 'day': 'egun bat', - 'days': '{0} egun', - 'month': 'hilabete bat', - 'months': '{0} hilabet', - 'year': 'urte bat', - 'years': '{0} urte', - } - - month_names = ['', 'urtarrilak', 'otsailak', 'martxoak', 'apirilak', 'maiatzak', 'ekainak', 'uztailak', 'abuztuak', 'irailak', 'urriak', 'azaroak', 'abenduak'] - month_abbreviations = ['', 'urt', 'ots', 'mar', 'api', 'mai', 'eka', 'uzt', 'abu', 'ira', 'urr', 'aza', 'abe'] - day_names = ['', 'asteleehna', 'asteartea', 'asteazkena', 'osteguna', 'ostirala', 'larunbata', 'igandea'] - day_abbreviations = ['', 'al', 'ar', 'az', 'og', 'ol', 'lr', 'ig'] - - -class HungarianLocale(Locale): - - names = ['hu', 'hu_hu'] - - past = '{0} ezelőtt' - future = '{0} múlva' - - timeframes = { - 'now': 'éppen most', - 'seconds': { - 'past': 'másodpercekkel', - 'future': 'pár másodperc' - }, - 'minute': {'past': 'egy perccel', 'future': 'egy perc'}, - 'minutes': {'past': '{0} perccel', 'future': '{0} perc'}, - 'hour': {'past': 'egy órával', 'future': 'egy óra'}, - 'hours': {'past': '{0} órával', 'future': '{0} óra'}, - 'day': { - 'past': 'egy nappal', - 'future': 'egy nap' - }, - 'days': { - 'past': '{0} nappal', - 'future': '{0} nap' - }, - 'month': {'past': 'egy hónappal', 'future': 'egy hónap'}, - 'months': {'past': '{0} hónappal', 'future': '{0} hónap'}, - 'year': {'past': 'egy évvel', 'future': 'egy év'}, - 'years': {'past': '{0} évvel', 'future': '{0} év'}, - } - - month_names = ['', 'január', 'február', 'március', 'április', 'május', - 'június', 'július', 'augusztus', 'szeptember', - 'október', 'november', 'december'] - month_abbreviations = ['', 'jan', 'febr', 'márc', 'ápr', 'máj', 'jún', - 'júl', 'aug', 'szept', 'okt', 'nov', 'dec'] - - day_names = ['', 'hétfő', 'kedd', 'szerda', 'csütörtök', 'péntek', - 'szombat', 'vasárnap'] - day_abbreviations = ['', 'hét', 'kedd', 'szer', 'csüt', 'pént', - 'szom', 'vas'] - - meridians = { - 'am': 'de', - 'pm': 'du', - 'AM': 'DE', - 'PM': 'DU', - } - - def _format_timeframe(self, timeframe, delta): - form = self.timeframes[timeframe] - - if isinstance(form, dict): - if delta > 0: - form = form['future'] - else: - form = form['past'] - - return form.format(abs(delta)) - - -class EsperantoLocale(Locale): - names = ['eo', 'eo_xx'] - past = 'antaŭ {0}' - future = 'post {0}' - - timeframes = { - 'now': 'nun', - 'seconds': 'kelkaj sekundoj', - 'minute': 'unu minuto', - 'minutes': '{0} minutoj', - 'hour': 'un horo', - 'hours': '{0} horoj', - 'day': 'unu tago', - 'days': '{0} tagoj', - 'month': 'unu monato', - 'months': '{0} monatoj', - 'year': 'unu jaro', - 'years': '{0} jaroj', - } - - month_names = ['', 'januaro', 'februaro', 'marto', 'aprilo', 'majo', - 'junio', 'julio', 'aŭgusto', 'septembro', 'oktobro', - 'novembro', 'decembro'] - month_abbreviations = ['', 'jan', 'feb', 'mar', 'apr', 'maj', 'jun', - 'jul', 'aŭg', 'sep', 'okt', 'nov', 'dec'] - - day_names = ['', 'lundo', 'mardo', 'merkredo', 'ĵaŭdo', 'vendredo', - 'sabato', 'dimanĉo'] - day_abbreviations = ['', 'lun', 'mar', 'mer', 'ĵaŭ', 'ven', - 'sab', 'dim'] - - meridians = { - 'am': 'atm', - 'pm': 'ptm', - 'AM': 'ATM', - 'PM': 'PTM', - } - - ordinal_day_re = r'((?P[1-3]?[0-9](?=a))a)' - - def _ordinal_number(self, n): - return '{0}a'.format(n) - - -class ThaiLocale(Locale): - - names = ['th', 'th_th'] - - past = '{0}{1}ที่ผ่านมา' - future = 'ในอีก{1}{0}' - - timeframes = { - 'now': 'ขณะนี้', - 'seconds': 'ไม่กี่วินาที', - 'minute': '1 นาที', - 'minutes': '{0} นาที', - 'hour': '1 ชั่วโมง', - 'hours': '{0} ชั่วโมง', - 'day': '1 วัน', - 'days': '{0} วัน', - 'month': '1 เดือน', - 'months': '{0} เดือน', - 'year': '1 ปี', - 'years': '{0} ปี', - } - - month_names = ['', 'มกราคม', 'กุมภาพันธ์', 'มีนาคม', 'เมษายน', - 'พฤษภาคม', 'มิถุนายน', 'กรกฏาคม', 'สิงหาคม', - 'กันยายน', 'ตุลาคม', 'พฤศจิกายน', 'ธันวาคม'] - month_abbreviations = ['', 'ม.ค.', 'ก.พ.', 'มี.ค.', 'เม.ย.', 'พ.ค.', - 'มิ.ย.', 'ก.ค.', 'ส.ค.', 'ก.ย.', 'ต.ค.', - 'พ.ย.', 'ธ.ค.'] - - day_names = ['', 'จันทร์', 'อังคาร', 'พุธ', 'พฤหัสบดี', 'ศุกร์', - 'เสาร์', 'อาทิตย์'] - day_abbreviations = ['', 'จ', 'อ', 'พ', 'พฤ', 'ศ', 'ส', 'อา'] - - meridians = { - 'am': 'am', - 'pm': 'pm', - 'AM': 'AM', - 'PM': 'PM', - } - - BE_OFFSET = 543 - - def year_full(self, year): - '''Thai always use Buddhist Era (BE) which is CE + 543''' - year += self.BE_OFFSET - return '{0:04d}'.format(year) - - def year_abbreviation(self, year): - '''Thai always use Buddhist Era (BE) which is CE + 543''' - year += self.BE_OFFSET - return '{0:04d}'.format(year)[2:] - - def _format_relative(self, humanized, timeframe, delta): - '''Thai normally doesn't have any space between words''' - if timeframe == 'now': - return humanized - space = '' if timeframe == 'seconds' else ' ' - direction = self.past if delta < 0 else self.future - - return direction.format(humanized, space) - - - -class BengaliLocale(Locale): - - names = ['bn', 'bn_bd', 'bn_in'] - - past = '{0} আগে' - future = '{0} পরে' - - timeframes = { - 'now': 'এখন', - 'seconds': 'সেকেন্ড', - 'minute': 'এক মিনিট', - 'minutes': '{0} মিনিট', - 'hour': 'এক ঘণ্টা', - 'hours': '{0} ঘণ্টা', - 'day': 'এক দিন', - 'days': '{0} দিন', - 'month': 'এক মাস', - 'months': '{0} মাস ', - 'year': 'এক বছর', - 'years': '{0} বছর', - } - - meridians = { - 'am': 'সকাল', - 'pm': 'বিকাল', - 'AM': 'সকাল', - 'PM': 'বিকাল', - } - - month_names = ['', 'জানুয়ারি', 'ফেব্রুয়ারি', 'মার্চ', 'এপ্রিল', 'মে', 'জুন', 'জুলাই', - 'আগস্ট', 'সেপ্টেম্বর', 'অক্টোবর', 'নভেম্বর', 'ডিসেম্বর'] - month_abbreviations = ['', 'জানু', 'ফেব', 'মার্চ', 'এপ্রি', 'মে', 'জুন', 'জুল', - 'অগা','সেপ্ট', 'অক্টো', 'নভে', 'ডিসে'] - - day_names = ['', 'সোমবার', 'মঙ্গলবার', 'বুধবার', 'বৃহস্পতিবার', 'শুক্রবার', 'শনিবার', 'রবিবার'] - day_abbreviations = ['', 'সোম', 'মঙ্গল', 'বুধ', 'বৃহঃ', 'শুক্র', 'শনি', 'রবি'] - - def _ordinal_number(self, n): - if n > 10 or n == 0: - return '{0}তম'.format(n) - if n in [1, 5, 7, 8, 9, 10]: - return '{0}ম'.format(n) - if n in [2, 3]: - return '{0}য়'.format(n) - if n == 4: - return '{0}র্থ'.format(n) - if n == 6: - return '{0}ষ্ঠ'.format(n) - - -class RomanshLocale(Locale): - - names = ['rm', 'rm_ch'] - - past = 'avant {0}' - future = 'en {0}' - - timeframes = { - 'now': 'en quest mument', - 'seconds': 'secundas', - 'minute': 'ina minuta', - 'minutes': '{0} minutas', - 'hour': 'in\'ura', - 'hours': '{0} ura', - 'day': 'in di', - 'days': '{0} dis', - 'month': 'in mais', - 'months': '{0} mais', - 'year': 'in onn', - 'years': '{0} onns', - } - - month_names = [ - '', 'schaner', 'favrer', 'mars', 'avrigl', 'matg', 'zercladur', - 'fanadur', 'avust', 'settember', 'october', 'november', 'december' - ] - - month_abbreviations = [ - '', 'schan', 'fav', 'mars', 'avr', 'matg', 'zer', 'fan', 'avu', - 'set', 'oct', 'nov', 'dec' - ] - - day_names = [ - '', 'glindesdi', 'mardi', 'mesemna', 'gievgia', 'venderdi', - 'sonda', 'dumengia' - ] - - day_abbreviations = [ - '', 'gli', 'ma', 'me', 'gie', 've', 'so', 'du' - ] - - -class SwissLocale(Locale): - - names = ['de', 'de_ch'] - - past = 'vor {0}' - future = 'in {0}' - - timeframes = { - 'now': 'gerade eben', - 'seconds': 'Sekunden', - 'minute': 'einer Minute', - 'minutes': '{0} Minuten', - 'hour': 'einer Stunde', - 'hours': '{0} Stunden', - 'day': 'einem Tag', - 'days': '{0} Tage', - 'month': 'einem Monat', - 'months': '{0} Monaten', - 'year': 'einem Jahr', - 'years': '{0} Jahren', - } - - month_names = [ - '', 'Januar', 'Februar', 'März', 'April', 'Mai', 'Juni', 'Juli', - 'August', 'September', 'Oktober', 'November', 'Dezember' - ] - - month_abbreviations = [ - '', 'Jan', 'Feb', 'Mär', 'Apr', 'Mai', 'Jun', 'Jul', 'Aug', 'Sep', - 'Okt', 'Nov', 'Dez' - ] - - day_names = [ - '', 'Montag', 'Dienstag', 'Mittwoch', 'Donnerstag', 'Freitag', - 'Samstag', 'Sonntag' - ] - - day_abbreviations = [ - '', 'Mo', 'Di', 'Mi', 'Do', 'Fr', 'Sa', 'So' - ] - - -class RomanianLocale(Locale): - names = ['ro', 'ro_ro'] - - past = '{0} în urmă' - future = 'peste {0}' - - timeframes = { - 'now': 'acum', - 'seconds': 'câteva secunde', - 'minute': 'un minut', - 'minutes': '{0} minute', - 'hour': 'o oră', - 'hours': '{0} ore', - 'day': 'o zi', - 'days': '{0} zile', - 'month': 'o lună', - 'months': '{0} luni', - 'year': 'un an', - 'years': '{0} ani', - } - - month_names = ['', 'ianuarie', 'februarie', 'martie', 'aprilie', 'mai', 'iunie', 'iulie', - 'august', 'septembrie', 'octombrie', 'noiembrie', 'decembrie'] - month_abbreviations = ['', 'ian', 'febr', 'mart', 'apr', 'mai', 'iun', 'iul', 'aug', 'sept', 'oct', 'nov', 'dec'] - - day_names = ['', 'luni', 'marți', 'miercuri', 'joi', 'vineri', 'sâmbătă', 'duminică'] - day_abbreviations = ['', 'Lun', 'Mar', 'Mie', 'Joi', 'Vin', 'Sâm', 'Dum'] - - -class SlovenianLocale(Locale): - names = ['sl', 'sl_si'] - - past = 'pred {0}' - future = 'čez {0}' - - timeframes = { - 'now': 'zdaj', - 'seconds': 'sekund', - 'minute': 'minuta', - 'minutes': '{0} minutami', - 'hour': 'uro', - 'hours': '{0} ur', - 'day': 'dan', - 'days': '{0} dni', - 'month': 'mesec', - 'months': '{0} mesecev', - 'year': 'leto', - 'years': '{0} let', - } - - meridians = { - 'am': '', - 'pm': '', - 'AM': '', - 'PM': '', - } - - month_names = [ - '', 'Januar', 'Februar', 'Marec', 'April', 'Maj', 'Junij', 'Julij', - 'Avgust', 'September', 'Oktober', 'November', 'December' - ] - - month_abbreviations = [ - '', 'Jan', 'Feb', 'Mar', 'Apr', 'Maj', 'Jun', 'Jul', 'Avg', - 'Sep', 'Okt', 'Nov', 'Dec' - ] - - day_names = [ - '', 'Ponedeljek', 'Torek', 'Sreda', 'Četrtek', 'Petek', 'Sobota', 'Nedelja' - ] - - day_abbreviations = [ - '', 'Pon', 'Tor', 'Sre', 'Čet', 'Pet', 'Sob', 'Ned' - ] - - -_locales = _map_locales() diff --git a/lib/arrow/parser.py b/lib/arrow/parser.py deleted file mode 100644 index f3ed56cf..00000000 --- a/lib/arrow/parser.py +++ /dev/null @@ -1,328 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import absolute_import -from __future__ import unicode_literals - -from datetime import datetime -from dateutil import tz -import re -from arrow import locales - - -class ParserError(RuntimeError): - pass - - -class DateTimeParser(object): - - _FORMAT_RE = re.compile('(YYY?Y?|MM?M?M?|Do|DD?D?D?|d?d?d?d|HH?|hh?|mm?|ss?|S+|ZZ?Z?|a|A|X)') - _ESCAPE_RE = re.compile('\[[^\[\]]*\]') - - _ONE_OR_MORE_DIGIT_RE = re.compile('\d+') - _ONE_OR_TWO_DIGIT_RE = re.compile('\d{1,2}') - _FOUR_DIGIT_RE = re.compile('\d{4}') - _TWO_DIGIT_RE = re.compile('\d{2}') - _TZ_RE = re.compile('[+\-]?\d{2}:?(\d{2})?') - _TZ_NAME_RE = re.compile('\w[\w+\-/]+') - - - _BASE_INPUT_RE_MAP = { - 'YYYY': _FOUR_DIGIT_RE, - 'YY': _TWO_DIGIT_RE, - 'MM': _TWO_DIGIT_RE, - 'M': _ONE_OR_TWO_DIGIT_RE, - 'DD': _TWO_DIGIT_RE, - 'D': _ONE_OR_TWO_DIGIT_RE, - 'HH': _TWO_DIGIT_RE, - 'H': _ONE_OR_TWO_DIGIT_RE, - 'hh': _TWO_DIGIT_RE, - 'h': _ONE_OR_TWO_DIGIT_RE, - 'mm': _TWO_DIGIT_RE, - 'm': _ONE_OR_TWO_DIGIT_RE, - 'ss': _TWO_DIGIT_RE, - 's': _ONE_OR_TWO_DIGIT_RE, - 'X': re.compile('\d+'), - 'ZZZ': _TZ_NAME_RE, - 'ZZ': _TZ_RE, - 'Z': _TZ_RE, - 'S': _ONE_OR_MORE_DIGIT_RE, - } - - MARKERS = ['YYYY', 'MM', 'DD'] - SEPARATORS = ['-', '/', '.'] - - def __init__(self, locale='en_us'): - - self.locale = locales.get_locale(locale) - self._input_re_map = self._BASE_INPUT_RE_MAP.copy() - self._input_re_map.update({ - 'MMMM': self._choice_re(self.locale.month_names[1:], re.IGNORECASE), - 'MMM': self._choice_re(self.locale.month_abbreviations[1:], - re.IGNORECASE), - 'Do': re.compile(self.locale.ordinal_day_re), - 'dddd': self._choice_re(self.locale.day_names[1:], re.IGNORECASE), - 'ddd': self._choice_re(self.locale.day_abbreviations[1:], - re.IGNORECASE), - 'd' : re.compile("[1-7]"), - 'a': self._choice_re( - (self.locale.meridians['am'], self.locale.meridians['pm']) - ), - # note: 'A' token accepts both 'am/pm' and 'AM/PM' formats to - # ensure backwards compatibility of this token - 'A': self._choice_re(self.locale.meridians.values()) - }) - - def parse_iso(self, string): - - has_time = 'T' in string or ' ' in string.strip() - space_divider = ' ' in string.strip() - - if has_time: - if space_divider: - date_string, time_string = string.split(' ', 1) - else: - date_string, time_string = string.split('T', 1) - time_parts = re.split('[+-]', time_string, 1) - has_tz = len(time_parts) > 1 - has_seconds = time_parts[0].count(':') > 1 - has_subseconds = re.search('[.,]', time_parts[0]) - - if has_subseconds: - formats = ['YYYY-MM-DDTHH:mm:ss%sS' % has_subseconds.group()] - elif has_seconds: - formats = ['YYYY-MM-DDTHH:mm:ss'] - else: - formats = ['YYYY-MM-DDTHH:mm'] - else: - has_tz = False - # generate required formats: YYYY-MM-DD, YYYY-MM-DD, YYYY - # using various separators: -, /, . - l = len(self.MARKERS) - formats = [separator.join(self.MARKERS[:l-i]) - for i in range(l) - for separator in self.SEPARATORS] - - if has_time and has_tz: - formats = [f + 'Z' for f in formats] - - if space_divider: - formats = [item.replace('T', ' ', 1) for item in formats] - - return self._parse_multiformat(string, formats) - - def parse(self, string, fmt): - - if isinstance(fmt, list): - return self._parse_multiformat(string, fmt) - - # fmt is a string of tokens like 'YYYY-MM-DD' - # we construct a new string by replacing each - # token by its pattern: - # 'YYYY-MM-DD' -> '(?P\d{4})-(?P\d{2})-(?P
\d{2})' - tokens = [] - offset = 0 - - # Extract the bracketed expressions to be reinserted later. - escaped_fmt = re.sub(self._ESCAPE_RE, "#" , fmt) - # Any number of S is the same as one. - escaped_fmt = re.sub('S+', 'S', escaped_fmt) - escaped_data = re.findall(self._ESCAPE_RE, fmt) - - fmt_pattern = escaped_fmt - - for m in self._FORMAT_RE.finditer(escaped_fmt): - token = m.group(0) - try: - input_re = self._input_re_map[token] - except KeyError: - raise ParserError('Unrecognized token \'{0}\''.format(token)) - input_pattern = '(?P<{0}>{1})'.format(token, input_re.pattern) - tokens.append(token) - # a pattern doesn't have the same length as the token - # it replaces! We keep the difference in the offset variable. - # This works because the string is scanned left-to-right and matches - # are returned in the order found by finditer. - fmt_pattern = fmt_pattern[:m.start() + offset] + input_pattern + fmt_pattern[m.end() + offset:] - offset += len(input_pattern) - (m.end() - m.start()) - - final_fmt_pattern = "" - a = fmt_pattern.split("#") - b = escaped_data - - # Due to the way Python splits, 'a' will always be longer - for i in range(len(a)): - final_fmt_pattern += a[i] - if i < len(b): - final_fmt_pattern += b[i][1:-1] - - match = re.search(final_fmt_pattern, string, flags=re.IGNORECASE) - if match is None: - raise ParserError('Failed to match \'{0}\' when parsing \'{1}\''.format(final_fmt_pattern, string)) - parts = {} - for token in tokens: - if token == 'Do': - value = match.group('value') - else: - value = match.group(token) - self._parse_token(token, value, parts) - return self._build_datetime(parts) - - def _parse_token(self, token, value, parts): - - if token == 'YYYY': - parts['year'] = int(value) - elif token == 'YY': - value = int(value) - parts['year'] = 1900 + value if value > 68 else 2000 + value - - elif token in ['MMMM', 'MMM']: - parts['month'] = self.locale.month_number(value.lower()) - - elif token in ['MM', 'M']: - parts['month'] = int(value) - - elif token in ['DD', 'D']: - parts['day'] = int(value) - - elif token in ['Do']: - parts['day'] = int(value) - - elif token.upper() in ['HH', 'H']: - parts['hour'] = int(value) - - elif token in ['mm', 'm']: - parts['minute'] = int(value) - - elif token in ['ss', 's']: - parts['second'] = int(value) - - elif token == 'S': - # We have the *most significant* digits of an arbitrary-precision integer. - # We want the six most significant digits as an integer, rounded. - # FIXME: add nanosecond support somehow? - value = value.ljust(7, str('0')) - - # floating-point (IEEE-754) defaults to half-to-even rounding - seventh_digit = int(value[6]) - if seventh_digit == 5: - rounding = int(value[5]) % 2 - elif seventh_digit > 5: - rounding = 1 - else: - rounding = 0 - - parts['microsecond'] = int(value[:6]) + rounding - - elif token == 'X': - parts['timestamp'] = int(value) - - elif token in ['ZZZ', 'ZZ', 'Z']: - parts['tzinfo'] = TzinfoParser.parse(value) - - elif token in ['a', 'A']: - if value in ( - self.locale.meridians['am'], - self.locale.meridians['AM'] - ): - parts['am_pm'] = 'am' - elif value in ( - self.locale.meridians['pm'], - self.locale.meridians['PM'] - ): - parts['am_pm'] = 'pm' - - @staticmethod - def _build_datetime(parts): - - timestamp = parts.get('timestamp') - - if timestamp: - tz_utc = tz.tzutc() - return datetime.fromtimestamp(timestamp, tz=tz_utc) - - am_pm = parts.get('am_pm') - hour = parts.get('hour', 0) - - if am_pm == 'pm' and hour < 12: - hour += 12 - elif am_pm == 'am' and hour == 12: - hour = 0 - - return datetime(year=parts.get('year', 1), month=parts.get('month', 1), - day=parts.get('day', 1), hour=hour, minute=parts.get('minute', 0), - second=parts.get('second', 0), microsecond=parts.get('microsecond', 0), - tzinfo=parts.get('tzinfo')) - - def _parse_multiformat(self, string, formats): - - _datetime = None - - for fmt in formats: - try: - _datetime = self.parse(string, fmt) - break - except ParserError: - pass - - if _datetime is None: - raise ParserError('Could not match input to any of {0} on \'{1}\''.format(formats, string)) - - return _datetime - - @staticmethod - def _map_lookup(input_map, key): - - try: - return input_map[key] - except KeyError: - raise ParserError('Could not match "{0}" to {1}'.format(key, input_map)) - - @staticmethod - def _try_timestamp(string): - - try: - return float(string) - except: - return None - - @staticmethod - def _choice_re(choices, flags=0): - return re.compile('({0})'.format('|'.join(choices)), flags=flags) - - -class TzinfoParser(object): - - _TZINFO_RE = re.compile('([+\-])?(\d\d):?(\d\d)?') - - @classmethod - def parse(cls, string): - - tzinfo = None - - if string == 'local': - tzinfo = tz.tzlocal() - - elif string in ['utc', 'UTC']: - tzinfo = tz.tzutc() - - else: - - iso_match = cls._TZINFO_RE.match(string) - - if iso_match: - sign, hours, minutes = iso_match.groups() - if minutes is None: - minutes = 0 - seconds = int(hours) * 3600 + int(minutes) * 60 - - if sign == '-': - seconds *= -1 - - tzinfo = tz.tzoffset(None, seconds) - - else: - tzinfo = tz.gettz(string) - - if tzinfo is None: - raise ParserError('Could not parse timezone expression "{0}"'.format(string)) - - return tzinfo diff --git a/lib/arrow/util.py b/lib/arrow/util.py deleted file mode 100644 index 3eed4faa..00000000 --- a/lib/arrow/util.py +++ /dev/null @@ -1,47 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import absolute_import - -import sys - -# python 2.6 / 2.7 definitions for total_seconds function. - -def _total_seconds_27(td): # pragma: no cover - return td.total_seconds() - -def _total_seconds_26(td): - return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 1e6) / 1e6 - - -# get version info and assign correct total_seconds function. - -version = '{0}.{1}.{2}'.format(*sys.version_info[:3]) - -if version < '2.7': # pragma: no cover - total_seconds = _total_seconds_26 -else: # pragma: no cover - total_seconds = _total_seconds_27 - -def is_timestamp(value): - if type(value) == bool: - return False - try: - float(value) - return True - except: - return False - -# python 2.7 / 3.0+ definitions for isstr function. - -try: # pragma: no cover - basestring - - def isstr(s): - return isinstance(s, basestring) - -except NameError: #pragma: no cover - - def isstr(s): - return isinstance(s, str) - - -__all__ = ['total_seconds', 'is_timestamp', 'isstr'] diff --git a/lib/backports/__init__.py b/lib/backports/__init__.py deleted file mode 100644 index de40ea7c..00000000 --- a/lib/backports/__init__.py +++ /dev/null @@ -1 +0,0 @@ -__import__('pkg_resources').declare_namespace(__name__) diff --git a/lib/backports/csv.py b/lib/backports/csv.py deleted file mode 100644 index 4694a28e..00000000 --- a/lib/backports/csv.py +++ /dev/null @@ -1,979 +0,0 @@ -# -*- coding: utf-8 -*- -"""A port of Python 3's csv module to Python 2. - -The API of the csv module in Python 2 is drastically different from -the csv module in Python 3. This is due, for the most part, to the -difference between str in Python 2 and Python 3. - -The semantics of Python 3's version are more useful because they support -unicode natively, while Python 2's csv does not. -""" -from __future__ import unicode_literals, absolute_import - -__all__ = [ "QUOTE_MINIMAL", "QUOTE_ALL", "QUOTE_NONNUMERIC", "QUOTE_NONE", - "Error", "Dialect", "__doc__", "excel", "excel_tab", - "field_size_limit", "reader", "writer", - "register_dialect", "get_dialect", "list_dialects", "Sniffer", - "unregister_dialect", "__version__", "DictReader", "DictWriter" ] - -import re -import numbers -from io import StringIO -from csv import ( - QUOTE_MINIMAL, QUOTE_ALL, QUOTE_NONNUMERIC, QUOTE_NONE, - __version__, __doc__, Error, field_size_limit, -) - -# Stuff needed from six -import sys -PY3 = sys.version_info[0] == 3 -if PY3: - string_types = str - text_type = str - binary_type = bytes - unichr = chr -else: - string_types = basestring - text_type = unicode - binary_type = str - - -class QuoteStrategy(object): - quoting = None - - def __init__(self, dialect): - if self.quoting is not None: - assert dialect.quoting == self.quoting - self.dialect = dialect - self.setup() - - escape_pattern_quoted = r'({quotechar})'.format( - quotechar=re.escape(self.dialect.quotechar or '"')) - escape_pattern_unquoted = r'([{specialchars}])'.format( - specialchars=re.escape(self.specialchars)) - - self.escape_re_quoted = re.compile(escape_pattern_quoted) - self.escape_re_unquoted = re.compile(escape_pattern_unquoted) - - def setup(self): - """Optional method for strategy-wide optimizations.""" - - def quoted(self, field=None, raw_field=None, only=None): - """Determine whether this field should be quoted.""" - raise NotImplementedError( - 'quoted must be implemented by a subclass') - - @property - def specialchars(self): - """The special characters that need to be escaped.""" - raise NotImplementedError( - 'specialchars must be implemented by a subclass') - - def escape_re(self, quoted=None): - if quoted: - return self.escape_re_quoted - return self.escape_re_unquoted - - def escapechar(self, quoted=None): - if quoted and self.dialect.doublequote: - return self.dialect.quotechar - return self.dialect.escapechar - - def prepare(self, raw_field, only=None): - field = text_type(raw_field if raw_field is not None else '') - quoted = self.quoted(field=field, raw_field=raw_field, only=only) - - escape_re = self.escape_re(quoted=quoted) - escapechar = self.escapechar(quoted=quoted) - - if escape_re.search(field): - escapechar = '\\\\' if escapechar == '\\' else escapechar - if not escapechar: - raise Error('No escapechar is set') - escape_replace = r'{escapechar}\1'.format(escapechar=escapechar) - field = escape_re.sub(escape_replace, field) - - if quoted: - field = '{quotechar}{field}{quotechar}'.format( - quotechar=self.dialect.quotechar, field=field) - - return field - - -class QuoteMinimalStrategy(QuoteStrategy): - quoting = QUOTE_MINIMAL - - def setup(self): - self.quoted_re = re.compile(r'[{specialchars}]'.format( - specialchars=re.escape(self.specialchars))) - - @property - def specialchars(self): - return ( - self.dialect.lineterminator + - self.dialect.quotechar + - self.dialect.delimiter + - (self.dialect.escapechar or '') - ) - - def quoted(self, field, only, **kwargs): - if field == self.dialect.quotechar and not self.dialect.doublequote: - # If the only character in the field is the quotechar, and - # doublequote is false, then just escape without outer quotes. - return False - return field == '' and only or bool(self.quoted_re.search(field)) - - -class QuoteAllStrategy(QuoteStrategy): - quoting = QUOTE_ALL - - @property - def specialchars(self): - return self.dialect.quotechar - - def quoted(self, **kwargs): - return True - - -class QuoteNonnumericStrategy(QuoteStrategy): - quoting = QUOTE_NONNUMERIC - - @property - def specialchars(self): - return ( - self.dialect.lineterminator + - self.dialect.quotechar + - self.dialect.delimiter + - (self.dialect.escapechar or '') - ) - - def quoted(self, raw_field, **kwargs): - return not isinstance(raw_field, numbers.Number) - - -class QuoteNoneStrategy(QuoteStrategy): - quoting = QUOTE_NONE - - @property - def specialchars(self): - return ( - self.dialect.lineterminator + - (self.dialect.quotechar or '') + - self.dialect.delimiter + - (self.dialect.escapechar or '') - ) - - def quoted(self, field, only, **kwargs): - if field == '' and only: - raise Error('single empty field record must be quoted') - return False - - -class writer(object): - def __init__(self, fileobj, dialect='excel', **fmtparams): - if fileobj is None: - raise TypeError('fileobj must be file-like, not None') - - self.fileobj = fileobj - - if isinstance(dialect, text_type): - dialect = get_dialect(dialect) - - try: - self.dialect = Dialect.combine(dialect, fmtparams) - except Error as e: - raise TypeError(*e.args) - - strategies = { - QUOTE_MINIMAL: QuoteMinimalStrategy, - QUOTE_ALL: QuoteAllStrategy, - QUOTE_NONNUMERIC: QuoteNonnumericStrategy, - QUOTE_NONE: QuoteNoneStrategy, - } - self.strategy = strategies[self.dialect.quoting](self.dialect) - - def writerow(self, row): - if row is None: - raise Error('row must be an iterable') - - row = list(row) - only = len(row) == 1 - row = [self.strategy.prepare(field, only=only) for field in row] - - line = self.dialect.delimiter.join(row) + self.dialect.lineterminator - return self.fileobj.write(line) - - def writerows(self, rows): - for row in rows: - self.writerow(row) - - -START_RECORD = 0 -START_FIELD = 1 -ESCAPED_CHAR = 2 -IN_FIELD = 3 -IN_QUOTED_FIELD = 4 -ESCAPE_IN_QUOTED_FIELD = 5 -QUOTE_IN_QUOTED_FIELD = 6 -EAT_CRNL = 7 -AFTER_ESCAPED_CRNL = 8 - - -class reader(object): - def __init__(self, fileobj, dialect='excel', **fmtparams): - self.input_iter = iter(fileobj) - - if isinstance(dialect, text_type): - dialect = get_dialect(dialect) - - try: - self.dialect = Dialect.combine(dialect, fmtparams) - except Error as e: - raise TypeError(*e.args) - - self.fields = None - self.field = None - self.line_num = 0 - - def parse_reset(self): - self.fields = [] - self.field = [] - self.state = START_RECORD - self.numeric_field = False - - def parse_save_field(self): - field = ''.join(self.field) - self.field = [] - if self.numeric_field: - field = float(field) - self.numeric_field = False - self.fields.append(field) - - def parse_add_char(self, c): - if len(self.field) >= field_size_limit(): - raise Error('field size limit exceeded') - self.field.append(c) - - def parse_process_char(self, c): - switch = { - START_RECORD: self._parse_start_record, - START_FIELD: self._parse_start_field, - ESCAPED_CHAR: self._parse_escaped_char, - AFTER_ESCAPED_CRNL: self._parse_after_escaped_crnl, - IN_FIELD: self._parse_in_field, - IN_QUOTED_FIELD: self._parse_in_quoted_field, - ESCAPE_IN_QUOTED_FIELD: self._parse_escape_in_quoted_field, - QUOTE_IN_QUOTED_FIELD: self._parse_quote_in_quoted_field, - EAT_CRNL: self._parse_eat_crnl, - } - return switch[self.state](c) - - def _parse_start_record(self, c): - if c == '\0': - return - elif c == '\n' or c == '\r': - self.state = EAT_CRNL - return - - self.state = START_FIELD - return self._parse_start_field(c) - - def _parse_start_field(self, c): - if c == '\n' or c == '\r' or c == '\0': - self.parse_save_field() - self.state = START_RECORD if c == '\0' else EAT_CRNL - elif (c == self.dialect.quotechar and - self.dialect.quoting != QUOTE_NONE): - self.state = IN_QUOTED_FIELD - elif c == self.dialect.escapechar: - self.state = ESCAPED_CHAR - elif c == ' ' and self.dialect.skipinitialspace: - pass # Ignore space at start of field - elif c == self.dialect.delimiter: - # Save empty field - self.parse_save_field() - else: - # Begin new unquoted field - if self.dialect.quoting == QUOTE_NONNUMERIC: - self.numeric_field = True - self.parse_add_char(c) - self.state = IN_FIELD - - def _parse_escaped_char(self, c): - if c == '\n' or c == '\r': - self.parse_add_char(c) - self.state = AFTER_ESCAPED_CRNL - return - if c == '\0': - c = '\n' - self.parse_add_char(c) - self.state = IN_FIELD - - def _parse_after_escaped_crnl(self, c): - if c == '\0': - return - return self._parse_in_field(c) - - def _parse_in_field(self, c): - # In unquoted field - if c == '\n' or c == '\r' or c == '\0': - # End of line - return [fields] - self.parse_save_field() - self.state = START_RECORD if c == '\0' else EAT_CRNL - elif c == self.dialect.escapechar: - self.state = ESCAPED_CHAR - elif c == self.dialect.delimiter: - self.parse_save_field() - self.state = START_FIELD - else: - # Normal character - save in field - self.parse_add_char(c) - - def _parse_in_quoted_field(self, c): - if c == '\0': - pass - elif c == self.dialect.escapechar: - self.state = ESCAPE_IN_QUOTED_FIELD - elif (c == self.dialect.quotechar and - self.dialect.quoting != QUOTE_NONE): - if self.dialect.doublequote: - self.state = QUOTE_IN_QUOTED_FIELD - else: - self.state = IN_FIELD - else: - self.parse_add_char(c) - - def _parse_escape_in_quoted_field(self, c): - if c == '\0': - c = '\n' - - self.parse_add_char(c) - self.state = IN_QUOTED_FIELD - - def _parse_quote_in_quoted_field(self, c): - if (self.dialect.quoting != QUOTE_NONE and - c == self.dialect.quotechar): - # save "" as " - self.parse_add_char(c) - self.state = IN_QUOTED_FIELD - elif c == self.dialect.delimiter: - self.parse_save_field() - self.state = START_FIELD - elif c == '\n' or c == '\r' or c == '\0': - # End of line = return [fields] - self.parse_save_field() - self.state = START_RECORD if c == '\0' else EAT_CRNL - elif not self.dialect.strict: - self.parse_add_char(c) - self.state = IN_FIELD - else: - # illegal - raise Error("{delimiter}' expected after '{quotechar}".format( - delimiter=self.dialect.delimiter, - quotechar=self.dialect.quotechar, - )) - - def _parse_eat_crnl(self, c): - if c == '\n' or c == '\r': - pass - elif c == '\0': - self.state = START_RECORD - else: - raise Error('new-line character seen in unquoted field - do you ' - 'need to open the file in universal-newline mode?') - - - def __iter__(self): - return self - - def __next__(self): - self.parse_reset() - - while True: - try: - lineobj = next(self.input_iter) - except StopIteration: - if len(self.field) != 0 or self.state == IN_QUOTED_FIELD: - if self.dialect.strict: - raise Error('unexpected end of data') - self.parse_save_field() - if self.fields: - break - raise - - if not isinstance(lineobj, text_type): - typ = type(lineobj) - typ_name = 'bytes' if typ == bytes else typ.__name__ - err_str = ('iterator should return strings, not {0}' - ' (did you open the file in text mode?)') - raise Error(err_str.format(typ_name)) - - self.line_num += 1 - for c in lineobj: - if c == '\0': - raise Error('line contains NULL byte') - self.parse_process_char(c) - - self.parse_process_char('\0') - - if self.state == START_RECORD: - break - - fields = self.fields - self.fields = None - return fields - - next = __next__ - - -_dialect_registry = {} -def register_dialect(name, dialect='excel', **fmtparams): - if not isinstance(name, text_type): - raise TypeError('"name" must be a string') - - dialect = Dialect.extend(dialect, fmtparams) - - try: - Dialect.validate(dialect) - except: - raise TypeError('dialect is invalid') - - assert name not in _dialect_registry - _dialect_registry[name] = dialect - -def unregister_dialect(name): - try: - _dialect_registry.pop(name) - except KeyError: - raise Error('"{name}" not a registered dialect'.format(name=name)) - -def get_dialect(name): - try: - return _dialect_registry[name] - except KeyError: - raise Error('Could not find dialect {0}'.format(name)) - -def list_dialects(): - return list(_dialect_registry) - - -class Dialect(object): - """Describe a CSV dialect. - This must be subclassed (see csv.excel). Valid attributes are: - delimiter, quotechar, escapechar, doublequote, skipinitialspace, - lineterminator, quoting, strict. - """ - _name = "" - _valid = False - # placeholders - delimiter = None - quotechar = None - escapechar = None - doublequote = None - skipinitialspace = None - lineterminator = None - quoting = None - strict = None - - def __init__(self): - self.validate(self) - if self.__class__ != Dialect: - self._valid = True - - @classmethod - def validate(cls, dialect): - dialect = cls.extend(dialect) - - if not isinstance(dialect.quoting, int): - raise Error('"quoting" must be an integer') - - if dialect.delimiter is None: - raise Error('delimiter must be set') - cls.validate_text(dialect, 'delimiter') - - if dialect.lineterminator is None: - raise Error('lineterminator must be set') - if not isinstance(dialect.lineterminator, text_type): - raise Error('"lineterminator" must be a string') - - if dialect.quoting not in [ - QUOTE_NONE, QUOTE_MINIMAL, QUOTE_NONNUMERIC, QUOTE_ALL]: - raise Error('Invalid quoting specified') - - if dialect.quoting != QUOTE_NONE: - if dialect.quotechar is None and dialect.escapechar is None: - raise Error('quotechar must be set if quoting enabled') - if dialect.quotechar is not None: - cls.validate_text(dialect, 'quotechar') - - @staticmethod - def validate_text(dialect, attr): - val = getattr(dialect, attr) - if not isinstance(val, text_type): - if type(val) == bytes: - raise Error('"{0}" must be string, not bytes'.format(attr)) - raise Error('"{0}" must be string, not {1}'.format( - attr, type(val).__name__)) - - if len(val) != 1: - raise Error('"{0}" must be a 1-character string'.format(attr)) - - @staticmethod - def defaults(): - return { - 'delimiter': ',', - 'doublequote': True, - 'escapechar': None, - 'lineterminator': '\r\n', - 'quotechar': '"', - 'quoting': QUOTE_MINIMAL, - 'skipinitialspace': False, - 'strict': False, - } - - @classmethod - def extend(cls, dialect, fmtparams=None): - if isinstance(dialect, string_types): - dialect = get_dialect(dialect) - - if fmtparams is None: - return dialect - - defaults = cls.defaults() - - if any(param not in defaults for param in fmtparams): - raise TypeError('Invalid fmtparam') - - specified = dict( - (attr, getattr(dialect, attr, None)) - for attr in cls.defaults() - ) - - specified.update(fmtparams) - return type(str('ExtendedDialect'), (cls,), specified) - - @classmethod - def combine(cls, dialect, fmtparams): - """Create a new dialect with defaults and added parameters.""" - dialect = cls.extend(dialect, fmtparams) - defaults = cls.defaults() - specified = dict( - (attr, getattr(dialect, attr, None)) - for attr in defaults - if getattr(dialect, attr, None) is not None or - attr in ['quotechar', 'delimiter', 'lineterminator', 'quoting'] - ) - - defaults.update(specified) - dialect = type(str('CombinedDialect'), (cls,), defaults) - cls.validate(dialect) - return dialect() - - def __delattr__(self, attr): - if self._valid: - raise AttributeError('dialect is immutable.') - super(Dialect, self).__delattr__(attr) - - def __setattr__(self, attr, value): - if self._valid: - raise AttributeError('dialect is immutable.') - super(Dialect, self).__setattr__(attr, value) - - -class excel(Dialect): - """Describe the usual properties of Excel-generated CSV files.""" - delimiter = ',' - quotechar = '"' - doublequote = True - skipinitialspace = False - lineterminator = '\r\n' - quoting = QUOTE_MINIMAL -register_dialect("excel", excel) - -class excel_tab(excel): - """Describe the usual properties of Excel-generated TAB-delimited files.""" - delimiter = '\t' -register_dialect("excel-tab", excel_tab) - -class unix_dialect(Dialect): - """Describe the usual properties of Unix-generated CSV files.""" - delimiter = ',' - quotechar = '"' - doublequote = True - skipinitialspace = False - lineterminator = '\n' - quoting = QUOTE_ALL -register_dialect("unix", unix_dialect) - - -class DictReader(object): - def __init__(self, f, fieldnames=None, restkey=None, restval=None, - dialect="excel", *args, **kwds): - self._fieldnames = fieldnames # list of keys for the dict - self.restkey = restkey # key to catch long rows - self.restval = restval # default value for short rows - self.reader = reader(f, dialect, *args, **kwds) - self.dialect = dialect - self.line_num = 0 - - def __iter__(self): - return self - - @property - def fieldnames(self): - if self._fieldnames is None: - try: - self._fieldnames = next(self.reader) - except StopIteration: - pass - self.line_num = self.reader.line_num - return self._fieldnames - - @fieldnames.setter - def fieldnames(self, value): - self._fieldnames = value - - def __next__(self): - if self.line_num == 0: - # Used only for its side effect. - self.fieldnames - row = next(self.reader) - self.line_num = self.reader.line_num - - # unlike the basic reader, we prefer not to return blanks, - # because we will typically wind up with a dict full of None - # values - while row == []: - row = next(self.reader) - d = dict(zip(self.fieldnames, row)) - lf = len(self.fieldnames) - lr = len(row) - if lf < lr: - d[self.restkey] = row[lf:] - elif lf > lr: - for key in self.fieldnames[lr:]: - d[key] = self.restval - return d - - next = __next__ - - -class DictWriter(object): - def __init__(self, f, fieldnames, restval="", extrasaction="raise", - dialect="excel", *args, **kwds): - self.fieldnames = fieldnames # list of keys for the dict - self.restval = restval # for writing short dicts - if extrasaction.lower() not in ("raise", "ignore"): - raise ValueError("extrasaction (%s) must be 'raise' or 'ignore'" - % extrasaction) - self.extrasaction = extrasaction - self.writer = writer(f, dialect, *args, **kwds) - - def writeheader(self): - header = dict(zip(self.fieldnames, self.fieldnames)) - self.writerow(header) - - def _dict_to_list(self, rowdict): - if self.extrasaction == "raise": - wrong_fields = [k for k in rowdict if k not in self.fieldnames] - if wrong_fields: - raise ValueError("dict contains fields not in fieldnames: " - + ", ".join([repr(x) for x in wrong_fields])) - return (rowdict.get(key, self.restval) for key in self.fieldnames) - - def writerow(self, rowdict): - return self.writer.writerow(self._dict_to_list(rowdict)) - - def writerows(self, rowdicts): - return self.writer.writerows(map(self._dict_to_list, rowdicts)) - -# Guard Sniffer's type checking against builds that exclude complex() -try: - complex -except NameError: - complex = float - -class Sniffer(object): - ''' - "Sniffs" the format of a CSV file (i.e. delimiter, quotechar) - Returns a Dialect object. - ''' - def __init__(self): - # in case there is more than one possible delimiter - self.preferred = [',', '\t', ';', ' ', ':'] - - - def sniff(self, sample, delimiters=None): - """ - Returns a dialect (or None) corresponding to the sample - """ - - quotechar, doublequote, delimiter, skipinitialspace = \ - self._guess_quote_and_delimiter(sample, delimiters) - if not delimiter: - delimiter, skipinitialspace = self._guess_delimiter(sample, - delimiters) - - if not delimiter: - raise Error("Could not determine delimiter") - - class dialect(Dialect): - _name = "sniffed" - lineterminator = '\r\n' - quoting = QUOTE_MINIMAL - # escapechar = '' - - dialect.doublequote = doublequote - dialect.delimiter = delimiter - # _csv.reader won't accept a quotechar of '' - dialect.quotechar = quotechar or '"' - dialect.skipinitialspace = skipinitialspace - - return dialect - - - def _guess_quote_and_delimiter(self, data, delimiters): - """ - Looks for text enclosed between two identical quotes - (the probable quotechar) which are preceded and followed - by the same character (the probable delimiter). - For example: - ,'some text', - The quote with the most wins, same with the delimiter. - If there is no quotechar the delimiter can't be determined - this way. - """ - - matches = [] - for restr in ('(?P[^\w\n"\'])(?P ?)(?P["\']).*?(?P=quote)(?P=delim)', # ,".*?", - '(?:^|\n)(?P["\']).*?(?P=quote)(?P[^\w\n"\'])(?P ?)', # ".*?", - '(?P>[^\w\n"\'])(?P ?)(?P["\']).*?(?P=quote)(?:$|\n)', # ,".*?" - '(?:^|\n)(?P["\']).*?(?P=quote)(?:$|\n)'): # ".*?" (no delim, no space) - regexp = re.compile(restr, re.DOTALL | re.MULTILINE) - matches = regexp.findall(data) - if matches: - break - - if not matches: - # (quotechar, doublequote, delimiter, skipinitialspace) - return ('', False, None, 0) - quotes = {} - delims = {} - spaces = 0 - groupindex = regexp.groupindex - for m in matches: - n = groupindex['quote'] - 1 - key = m[n] - if key: - quotes[key] = quotes.get(key, 0) + 1 - try: - n = groupindex['delim'] - 1 - key = m[n] - except KeyError: - continue - if key and (delimiters is None or key in delimiters): - delims[key] = delims.get(key, 0) + 1 - try: - n = groupindex['space'] - 1 - except KeyError: - continue - if m[n]: - spaces += 1 - - quotechar = max(quotes, key=quotes.get) - - if delims: - delim = max(delims, key=delims.get) - skipinitialspace = delims[delim] == spaces - if delim == '\n': # most likely a file with a single column - delim = '' - else: - # there is *no* delimiter, it's a single column of quoted data - delim = '' - skipinitialspace = 0 - - # if we see an extra quote between delimiters, we've got a - # double quoted format - dq_regexp = re.compile( - r"((%(delim)s)|^)\W*%(quote)s[^%(delim)s\n]*%(quote)s[^%(delim)s\n]*%(quote)s\W*((%(delim)s)|$)" % \ - {'delim':re.escape(delim), 'quote':quotechar}, re.MULTILINE) - - - - if dq_regexp.search(data): - doublequote = True - else: - doublequote = False - - return (quotechar, doublequote, delim, skipinitialspace) - - - def _guess_delimiter(self, data, delimiters): - """ - The delimiter /should/ occur the same number of times on - each row. However, due to malformed data, it may not. We don't want - an all or nothing approach, so we allow for small variations in this - number. - 1) build a table of the frequency of each character on every line. - 2) build a table of frequencies of this frequency (meta-frequency?), - e.g. 'x occurred 5 times in 10 rows, 6 times in 1000 rows, - 7 times in 2 rows' - 3) use the mode of the meta-frequency to determine the /expected/ - frequency for that character - 4) find out how often the character actually meets that goal - 5) the character that best meets its goal is the delimiter - For performance reasons, the data is evaluated in chunks, so it can - try and evaluate the smallest portion of the data possible, evaluating - additional chunks as necessary. - """ - - data = list(filter(None, data.split('\n'))) - - ascii = [unichr(c) for c in range(127)] # 7-bit ASCII - - # build frequency tables - chunkLength = min(10, len(data)) - iteration = 0 - charFrequency = {} - modes = {} - delims = {} - start, end = 0, min(chunkLength, len(data)) - while start < len(data): - iteration += 1 - for line in data[start:end]: - for char in ascii: - metaFrequency = charFrequency.get(char, {}) - # must count even if frequency is 0 - freq = line.count(char) - # value is the mode - metaFrequency[freq] = metaFrequency.get(freq, 0) + 1 - charFrequency[char] = metaFrequency - - for char in charFrequency.keys(): - items = list(charFrequency[char].items()) - if len(items) == 1 and items[0][0] == 0: - continue - # get the mode of the frequencies - if len(items) > 1: - modes[char] = max(items, key=lambda x: x[1]) - # adjust the mode - subtract the sum of all - # other frequencies - items.remove(modes[char]) - modes[char] = (modes[char][0], modes[char][1] - - sum(item[1] for item in items)) - else: - modes[char] = items[0] - - # build a list of possible delimiters - modeList = modes.items() - total = float(chunkLength * iteration) - # (rows of consistent data) / (number of rows) = 100% - consistency = 1.0 - # minimum consistency threshold - threshold = 0.9 - while len(delims) == 0 and consistency >= threshold: - for k, v in modeList: - if v[0] > 0 and v[1] > 0: - if ((v[1]/total) >= consistency and - (delimiters is None or k in delimiters)): - delims[k] = v - consistency -= 0.01 - - if len(delims) == 1: - delim = list(delims.keys())[0] - skipinitialspace = (data[0].count(delim) == - data[0].count("%c " % delim)) - return (delim, skipinitialspace) - - # analyze another chunkLength lines - start = end - end += chunkLength - - if not delims: - return ('', 0) - - # if there's more than one, fall back to a 'preferred' list - if len(delims) > 1: - for d in self.preferred: - if d in delims.keys(): - skipinitialspace = (data[0].count(d) == - data[0].count("%c " % d)) - return (d, skipinitialspace) - - # nothing else indicates a preference, pick the character that - # dominates(?) - items = [(v,k) for (k,v) in delims.items()] - items.sort() - delim = items[-1][1] - - skipinitialspace = (data[0].count(delim) == - data[0].count("%c " % delim)) - return (delim, skipinitialspace) - - - def has_header(self, sample): - # Creates a dictionary of types of data in each column. If any - # column is of a single type (say, integers), *except* for the first - # row, then the first row is presumed to be labels. If the type - # can't be determined, it is assumed to be a string in which case - # the length of the string is the determining factor: if all of the - # rows except for the first are the same length, it's a header. - # Finally, a 'vote' is taken at the end for each column, adding or - # subtracting from the likelihood of the first row being a header. - - rdr = reader(StringIO(sample), self.sniff(sample)) - - header = next(rdr) # assume first row is header - - columns = len(header) - columnTypes = {} - for i in range(columns): columnTypes[i] = None - - checked = 0 - for row in rdr: - # arbitrary number of rows to check, to keep it sane - if checked > 20: - break - checked += 1 - - if len(row) != columns: - continue # skip rows that have irregular number of columns - - for col in list(columnTypes.keys()): - - for thisType in [int, float, complex]: - try: - thisType(row[col]) - break - except (ValueError, OverflowError): - pass - else: - # fallback to length of string - thisType = len(row[col]) - - if thisType != columnTypes[col]: - if columnTypes[col] is None: # add new column type - columnTypes[col] = thisType - else: - # type is inconsistent, remove column from - # consideration - del columnTypes[col] - - # finally, compare results against first row and "vote" - # on whether it's a header - hasHeader = 0 - for col, colType in columnTypes.items(): - if type(colType) == type(0): # it's a length - if len(header[col]) != colType: - hasHeader += 1 - else: - hasHeader -= 1 - else: # attempt typecast - try: - colType(header[col]) - except (ValueError, TypeError): - hasHeader += 1 - else: - hasHeader -= 1 - - return hasHeader > 0 diff --git a/lib/backports/functools_lru_cache.py b/lib/backports/functools_lru_cache.py deleted file mode 100644 index e0b19d95..00000000 --- a/lib/backports/functools_lru_cache.py +++ /dev/null @@ -1,196 +0,0 @@ -from __future__ import absolute_import - -import functools -from collections import namedtuple -from threading import RLock - -_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"]) - - -@functools.wraps(functools.update_wrapper) -def update_wrapper( - wrapper, - wrapped, - assigned=functools.WRAPPER_ASSIGNMENTS, - updated=functools.WRAPPER_UPDATES, -): - """ - Patch two bugs in functools.update_wrapper. - """ - # workaround for http://bugs.python.org/issue3445 - assigned = tuple(attr for attr in assigned if hasattr(wrapped, attr)) - wrapper = functools.update_wrapper(wrapper, wrapped, assigned, updated) - # workaround for https://bugs.python.org/issue17482 - wrapper.__wrapped__ = wrapped - return wrapper - - -class _HashedSeq(list): - __slots__ = 'hashvalue' - - def __init__(self, tup, hash=hash): - self[:] = tup - self.hashvalue = hash(tup) - - def __hash__(self): - return self.hashvalue - - -def _make_key( - args, - kwds, - typed, - kwd_mark=(object(),), - fasttypes=set([int, str, frozenset, type(None)]), - sorted=sorted, - tuple=tuple, - type=type, - len=len, -): - 'Make a cache key from optionally typed positional and keyword arguments' - key = args - if kwds: - sorted_items = sorted(kwds.items()) - key += kwd_mark - for item in sorted_items: - key += item - if typed: - key += tuple(type(v) for v in args) - if kwds: - key += tuple(type(v) for k, v in sorted_items) - elif len(key) == 1 and type(key[0]) in fasttypes: - return key[0] - return _HashedSeq(key) - - -def lru_cache(maxsize=100, typed=False): - """Least-recently-used cache decorator. - - If *maxsize* is set to None, the LRU features are disabled and the cache - can grow without bound. - - If *typed* is True, arguments of different types will be cached separately. - For example, f(3.0) and f(3) will be treated as distinct calls with - distinct results. - - Arguments to the cached function must be hashable. - - View the cache statistics named tuple (hits, misses, maxsize, currsize) with - f.cache_info(). Clear the cache and statistics with f.cache_clear(). - Access the underlying function with f.__wrapped__. - - See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used - - """ - - # Users should only access the lru_cache through its public API: - # cache_info, cache_clear, and f.__wrapped__ - # The internals of the lru_cache are encapsulated for thread safety and - # to allow the implementation to change (including a possible C version). - - def decorating_function(user_function): - - cache = dict() - stats = [0, 0] # make statistics updateable non-locally - HITS, MISSES = 0, 1 # names for the stats fields - make_key = _make_key - cache_get = cache.get # bound method to lookup key or return None - _len = len # localize the global len() function - lock = RLock() # because linkedlist updates aren't threadsafe - root = [] # root of the circular doubly linked list - root[:] = [root, root, None, None] # initialize by pointing to self - nonlocal_root = [root] # make updateable non-locally - PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields - - if maxsize == 0: - - def wrapper(*args, **kwds): - # no caching, just do a statistics update after a successful call - result = user_function(*args, **kwds) - stats[MISSES] += 1 - return result - - elif maxsize is None: - - def wrapper(*args, **kwds): - # simple caching without ordering or size limit - key = make_key(args, kwds, typed) - result = cache_get( - key, root - ) # root used here as a unique not-found sentinel - if result is not root: - stats[HITS] += 1 - return result - result = user_function(*args, **kwds) - cache[key] = result - stats[MISSES] += 1 - return result - - else: - - def wrapper(*args, **kwds): - # size limited caching that tracks accesses by recency - key = make_key(args, kwds, typed) if kwds or typed else args - with lock: - link = cache_get(key) - if link is not None: - # record recent use of the key by moving it - # to the front of the list - root, = nonlocal_root - link_prev, link_next, key, result = link - link_prev[NEXT] = link_next - link_next[PREV] = link_prev - last = root[PREV] - last[NEXT] = root[PREV] = link - link[PREV] = last - link[NEXT] = root - stats[HITS] += 1 - return result - result = user_function(*args, **kwds) - with lock: - root, = nonlocal_root - if key in cache: - # getting here means that this same key was added to the - # cache while the lock was released. since the link - # update is already done, we need only return the - # computed result and update the count of misses. - pass - elif _len(cache) >= maxsize: - # use the old root to store the new key and result - oldroot = root - oldroot[KEY] = key - oldroot[RESULT] = result - # empty the oldest link and make it the new root - root = nonlocal_root[0] = oldroot[NEXT] - oldkey = root[KEY] - root[KEY] = root[RESULT] = None - # now update the cache dictionary for the new links - del cache[oldkey] - cache[key] = oldroot - else: - # put result in a new link at the front of the list - last = root[PREV] - link = [last, root, key, result] - last[NEXT] = root[PREV] = cache[key] = link - stats[MISSES] += 1 - return result - - def cache_info(): - """Report cache statistics""" - with lock: - return _CacheInfo(stats[HITS], stats[MISSES], maxsize, len(cache)) - - def cache_clear(): - """Clear the cache and cache statistics""" - with lock: - cache.clear() - root = nonlocal_root[0] - root[:] = [root, root, None, None] - stats[:] = [0, 0] - - wrapper.__wrapped__ = user_function - wrapper.cache_info = cache_info - wrapper.cache_clear = cache_clear - return update_wrapper(wrapper, user_function) - - return decorating_function diff --git a/lib/bleach/__init__.py b/lib/bleach/__init__.py deleted file mode 100644 index aec2d340..00000000 --- a/lib/bleach/__init__.py +++ /dev/null @@ -1,401 +0,0 @@ -# -*- coding: utf-8 -*- - -from __future__ import unicode_literals -import logging -import re - -import html5lib -from html5lib.sanitizer import HTMLSanitizer -from html5lib.serializer.htmlserializer import HTMLSerializer - -from . import callbacks as linkify_callbacks -from .encoding import force_unicode -from .sanitizer import BleachSanitizer - - -VERSION = (1, 4, 2) -__version__ = '.'.join([str(n) for n in VERSION]) - -__all__ = ['clean', 'linkify'] - -log = logging.getLogger('bleach') - -ALLOWED_TAGS = [ - 'a', - 'abbr', - 'acronym', - 'b', - 'blockquote', - 'code', - 'em', - 'i', - 'li', - 'ol', - 'strong', - 'ul', -] - -ALLOWED_ATTRIBUTES = { - 'a': ['href', 'title'], - 'abbr': ['title'], - 'acronym': ['title'], -} - -ALLOWED_STYLES = [] - -ALLOWED_PROTOCOLS = ['http', 'https', 'mailto'] - -TLDS = """ac ad ae aero af ag ai al am an ao aq ar arpa as asia at au aw ax az - ba bb bd be bf bg bh bi biz bj bm bn bo br bs bt bv bw by bz ca cat - cc cd cf cg ch ci ck cl cm cn co com coop cr cu cv cx cy cz de dj dk - dm do dz ec edu ee eg er es et eu fi fj fk fm fo fr ga gb gd ge gf gg - gh gi gl gm gn gov gp gq gr gs gt gu gw gy hk hm hn hr ht hu id ie il - im in info int io iq ir is it je jm jo jobs jp ke kg kh ki km kn kp - kr kw ky kz la lb lc li lk lr ls lt lu lv ly ma mc md me mg mh mil mk - ml mm mn mo mobi mp mq mr ms mt mu museum mv mw mx my mz na name nc ne - net nf ng ni nl no np nr nu nz om org pa pe pf pg ph pk pl pm pn post - pr pro ps pt pw py qa re ro rs ru rw sa sb sc sd se sg sh si sj sk sl - sm sn so sr ss st su sv sx sy sz tc td tel tf tg th tj tk tl tm tn to - tp tr travel tt tv tw tz ua ug uk us uy uz va vc ve vg vi vn vu wf ws - xn xxx ye yt yu za zm zw""".split() - -# Make sure that .com doesn't get matched by .co first -TLDS.reverse() - -PROTOCOLS = HTMLSanitizer.acceptable_protocols - -url_re = re.compile( - r"""\(* # Match any opening parentheses. - \b(?"]*)? - # /path/zz (excluding "unsafe" chars from RFC 1738, - # except for # and ~, which happen in practice) - """.format('|'.join(PROTOCOLS), '|'.join(TLDS)), - re.IGNORECASE | re.VERBOSE | re.UNICODE) - -proto_re = re.compile(r'^[\w-]+:/{0,3}', re.IGNORECASE) - -punct_re = re.compile(r'([\.,]+)$') - -email_re = re.compile( - r"""(? tag replaced by the text within it - adj = replace_nodes(tree, _text, node, - current_child) - current_child -= 1 - # pull back current_child by 1 to scan the - # new nodes again. - else: - text = force_unicode(attrs.pop('_text')) - for attr_key, attr_val in attrs.items(): - node.set(attr_key, attr_val) - - for n in reversed(list(node)): - node.remove(n) - text = parser.parseFragment(text) - node.text = text.text - for n in text: - node.append(n) - _seen.add(node) - - elif current_child >= 0: - if node.tag == ETREE_TAG('pre') and skip_pre: - linkify_nodes(node, False) - elif not (node in _seen): - linkify_nodes(node, True) - - current_child += 1 - - def email_repl(match): - addr = match.group(0).replace('"', '"') - link = { - '_text': addr, - 'href': 'mailto:{0!s}'.format(addr), - } - link = apply_callbacks(link, True) - - if link is None: - return addr - - _href = link.pop('href') - _text = link.pop('_text') - - repl = '{2!s}' - attr = '{0!s}="{1!s}"' - attribs = ' '.join(attr.format(k, v) for k, v in link.items()) - return repl.format(_href, attribs, _text) - - def link_repl(match): - url = match.group(0) - open_brackets = close_brackets = 0 - if url.startswith('('): - _wrapping = strip_wrapping_parentheses(url) - url, open_brackets, close_brackets = _wrapping - end = '' - m = re.search(punct_re, url) - if m: - end = m.group(0) - url = url[0:m.start()] - if re.search(proto_re, url): - href = url - else: - href = ''.join(['http://', url]) - - link = { - '_text': url, - 'href': href, - } - - link = apply_callbacks(link, True) - - if link is None: - return '(' * open_brackets + url + ')' * close_brackets - - _text = link.pop('_text') - _href = link.pop('href') - - repl = '{0!s}{3!s}{4!s}{5!s}' - attr = '{0!s}="{1!s}"' - attribs = ' '.join(attr.format(k, v) for k, v in link.items()) - - return repl.format('(' * open_brackets, - _href, attribs, _text, end, - ')' * close_brackets) - - try: - linkify_nodes(forest) - except RuntimeError as e: - # If we hit the max recursion depth, just return what we've got. - log.exception('Probable recursion error: {0!r}'.format(e)) - - return _render(forest) - - -def _render(tree): - """Try rendering as HTML, then XML, then give up.""" - return force_unicode(_serialize(tree)) - - -def _serialize(domtree): - walker = html5lib.treewalkers.getTreeWalker('etree') - stream = walker(domtree) - serializer = HTMLSerializer(quote_attr_values=True, - alphabetical_attributes=True, - omit_optional_tags=False) - return serializer.render(stream) diff --git a/lib/bleach/callbacks.py b/lib/bleach/callbacks.py deleted file mode 100644 index 3cb82c25..00000000 --- a/lib/bleach/callbacks.py +++ /dev/null @@ -1,20 +0,0 @@ -"""A set of basic callbacks for bleach.linkify.""" -from __future__ import unicode_literals - - -def nofollow(attrs, new=False): - if attrs['href'].startswith('mailto:'): - return attrs - rel = [x for x in attrs.get('rel', '').split(' ') if x] - if 'nofollow' not in [x.lower() for x in rel]: - rel.append('nofollow') - attrs['rel'] = ' '.join(rel) - - return attrs - - -def target_blank(attrs, new=False): - if attrs['href'].startswith('mailto:'): - return attrs - attrs['target'] = '_blank' - return attrs diff --git a/lib/bleach/encoding.py b/lib/bleach/encoding.py deleted file mode 100644 index 707adaa2..00000000 --- a/lib/bleach/encoding.py +++ /dev/null @@ -1,62 +0,0 @@ -import datetime -from decimal import Decimal -import types -import six - - -def is_protected_type(obj): - """Determine if the object instance is of a protected type. - - Objects of protected types are preserved as-is when passed to - force_unicode(strings_only=True). - """ - return isinstance(obj, ( - six.integer_types + - (types.NoneType, - datetime.datetime, datetime.date, datetime.time, - float, Decimal)) - ) - - -def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'): - """ - Similar to smart_text, except that lazy instances are resolved to - strings, rather than kept as lazy objects. - - If strings_only is True, don't convert (some) non-string-like objects. - """ - # Handle the common case first, saves 30-40% when s is an instance of - # six.text_type. This function gets called often in that setting. - if isinstance(s, six.text_type): - return s - if strings_only and is_protected_type(s): - return s - try: - if not isinstance(s, six.string_types): - if hasattr(s, '__unicode__'): - s = s.__unicode__() - else: - if six.PY3: - if isinstance(s, bytes): - s = six.text_type(s, encoding, errors) - else: - s = six.text_type(s) - else: - s = six.text_type(bytes(s), encoding, errors) - else: - # Note: We use .decode() here, instead of six.text_type(s, - # encoding, errors), so that if s is a SafeBytes, it ends up being - # a SafeText at the end. - s = s.decode(encoding, errors) - except UnicodeDecodeError as e: - if not isinstance(s, Exception): - raise UnicodeDecodeError(*e.args) - else: - # If we get to here, the caller has passed in an Exception - # subclass populated with non-ASCII bytestring data without a - # working unicode method. Try to handle this without raising a - # further exception by individually forcing the exception args - # to unicode. - s = ' '.join([force_unicode(arg, encoding, strings_only, - errors) for arg in s]) - return s diff --git a/lib/bleach/sanitizer.py b/lib/bleach/sanitizer.py deleted file mode 100644 index 3a1f953c..00000000 --- a/lib/bleach/sanitizer.py +++ /dev/null @@ -1,148 +0,0 @@ -from __future__ import unicode_literals -import re -from xml.sax.saxutils import escape, unescape - -from html5lib.constants import tokenTypes -from html5lib.sanitizer import HTMLSanitizerMixin -from html5lib.tokenizer import HTMLTokenizer - - -PROTOS = HTMLSanitizerMixin.acceptable_protocols -PROTOS.remove('feed') - - -class BleachSanitizerMixin(HTMLSanitizerMixin): - """Mixin to replace sanitize_token() and sanitize_css().""" - - allowed_svg_properties = [] - - def sanitize_token(self, token): - """Sanitize a token either by HTML-encoding or dropping. - - Unlike HTMLSanitizerMixin.sanitize_token, allowed_attributes can be - a dict of {'tag': ['attribute', 'pairs'], 'tag': callable}. - - Here callable is a function with two arguments of attribute name - and value. It should return true of false. - - Also gives the option to strip tags instead of encoding. - - """ - if (getattr(self, 'wildcard_attributes', None) is None and - isinstance(self.allowed_attributes, dict)): - self.wildcard_attributes = self.allowed_attributes.get('*', []) - - if token['type'] in (tokenTypes['StartTag'], tokenTypes['EndTag'], - tokenTypes['EmptyTag']): - if token['name'] in self.allowed_elements: - if 'data' in token: - if isinstance(self.allowed_attributes, dict): - allowed_attributes = self.allowed_attributes.get( - token['name'], []) - #print callable(allowed_attributes) - if not callable(allowed_attributes): - allowed_attributes += self.wildcard_attributes - else: - allowed_attributes = self.allowed_attributes - attrs = dict([(name, val) for name, val in - token['data'][::-1] - if (allowed_attributes(name, val) - if callable(allowed_attributes) - else name in allowed_attributes)]) - for attr in self.attr_val_is_uri: - if attr not in attrs: - continue - val_unescaped = re.sub("[`\000-\040\177-\240\s]+", '', - unescape(attrs[attr])).lower() - # Remove replacement characters from unescaped - # characters. - val_unescaped = val_unescaped.replace("\ufffd", "") - if (re.match(r'^[a-z0-9][-+.a-z0-9]*:', val_unescaped) - and (val_unescaped.split(':')[0] not in - self.allowed_protocols)): - del attrs[attr] - for attr in self.svg_attr_val_allows_ref: - if attr in attrs: - attrs[attr] = re.sub(r'url\s*\(\s*[^#\s][^)]+?\)', - ' ', - unescape(attrs[attr])) - if (token['name'] in self.svg_allow_local_href and - 'xlink:href' in attrs and - re.search(r'^\s*[^#\s].*', attrs['xlink:href'])): - del attrs['xlink:href'] - if 'style' in attrs: - attrs['style'] = self.sanitize_css(attrs['style']) - token['data'] = [(name, val) for name, val in - attrs.items()] - return token - elif self.strip_disallowed_elements: - pass - else: - if token['type'] == tokenTypes['EndTag']: - token['data'] = ''.format(token['name']) - elif token['data']: - attr = ' {0!s}="{1!s}"' - attrs = ''.join([attr.format(k, escape(v)) for k, v in - token['data']]) - token['data'] = '<{0!s}{1!s}>'.format(token['name'], attrs) - else: - token['data'] = '<{0!s}>'.format(token['name']) - if token['selfClosing']: - token['data'] = token['data'][:-1] + '/>' - token['type'] = tokenTypes['Characters'] - del token["name"] - return token - elif token['type'] == tokenTypes['Comment']: - if not self.strip_html_comments: - return token - else: - return token - - def sanitize_css(self, style): - """HTMLSanitizerMixin.sanitize_css replacement. - - HTMLSanitizerMixin.sanitize_css always whitelists background-*, - border-*, margin-*, and padding-*. We only whitelist what's in - the whitelist. - - """ - # disallow urls - style = re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ', style) - - # gauntlet - # TODO: Make sure this does what it's meant to - I *think* it wants to - # validate style attribute contents. - parts = style.split(';') - gauntlet = re.compile("""^([-/:,#%.'"\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'""" - """\s*|"[\s\w]+"|\([\d,%\.\s]+\))*$""") - for part in parts: - if not gauntlet.match(part): - return '' - - if not re.match("^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style): - return '' - - clean = [] - for prop, value in re.findall('([-\w]+)\s*:\s*([^:;]*)', style): - if not value: - continue - if prop.lower() in self.allowed_css_properties: - clean.append(prop + ': ' + value + ';') - elif prop.lower() in self.allowed_svg_properties: - clean.append(prop + ': ' + value + ';') - - return ' '.join(clean) - - -class BleachSanitizer(HTMLTokenizer, BleachSanitizerMixin): - def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True, - lowercaseElementName=True, lowercaseAttrName=True, **kwargs): - HTMLTokenizer.__init__(self, stream, encoding, parseMeta, useChardet, - lowercaseElementName, lowercaseAttrName, - **kwargs) - - def __iter__(self): - for token in HTMLTokenizer.__iter__(self): - token = self.sanitize_token(token) - if token: - yield token diff --git a/lib/bs4/__init__.py b/lib/bs4/__init__.py deleted file mode 100644 index fcc27457..00000000 --- a/lib/bs4/__init__.py +++ /dev/null @@ -1,655 +0,0 @@ -"""Beautiful Soup -Elixir and Tonic -"The Screen-Scraper's Friend" -http://www.crummy.com/software/BeautifulSoup/ - -Beautiful Soup uses a pluggable XML or HTML parser to parse a -(possibly invalid) document into a tree representation. Beautiful Soup -provides methods and Pythonic idioms that make it easy to navigate, -search, and modify the parse tree. - -Beautiful Soup works with Python 2.7 and up. It works better if lxml -and/or html5lib is installed. - -For more than you ever wanted to know about Beautiful Soup, see the -documentation: -http://www.crummy.com/software/BeautifulSoup/bs4/doc/ - -""" - -__author__ = "Leonard Richardson (leonardr@segfault.org)" -__version__ = "4.8.1" -__copyright__ = "Copyright (c) 2004-2019 Leonard Richardson" -# Use of this source code is governed by the MIT license. -__license__ = "MIT" - -__all__ = ['BeautifulSoup'] - -import os -import re -import sys -import traceback -import warnings - -from .builder import builder_registry, ParserRejectedMarkup -from .dammit import UnicodeDammit -from .element import ( - CData, - Comment, - DEFAULT_OUTPUT_ENCODING, - Declaration, - Doctype, - NavigableString, - PageElement, - ProcessingInstruction, - ResultSet, - SoupStrainer, - Tag, - ) - -# The very first thing we do is give a useful error if someone is -# running this code under Python 3 without converting it. -'You are trying to run the Python 2 version of Beautiful Soup under Python 3. This will not work.'!='You need to convert the code, either by installing it (`python setup.py install`) or by running 2to3 (`2to3 -w bs4`).' - -class BeautifulSoup(Tag): - """ - This class defines the basic interface called by the tree builders. - - These methods will be called by the parser: - reset() - feed(markup) - - The tree builder may call these methods from its feed() implementation: - handle_starttag(name, attrs) # See note about return value - handle_endtag(name) - handle_data(data) # Appends to the current data node - endData(containerClass) # Ends the current data node - - No matter how complicated the underlying parser is, you should be - able to build a tree using 'start tag' events, 'end tag' events, - 'data' events, and "done with data" events. - - If you encounter an empty-element tag (aka a self-closing tag, - like HTML's
tag), call handle_starttag and then - handle_endtag. - """ - ROOT_TAG_NAME = '[document]' - - # If the end-user gives no indication which tree builder they - # want, look for one with these features. - DEFAULT_BUILDER_FEATURES = ['html', 'fast'] - - ASCII_SPACES = '\x20\x0a\x09\x0c\x0d' - - NO_PARSER_SPECIFIED_WARNING = "No parser was explicitly specified, so I'm using the best available %(markup_type)s parser for this system (\"%(parser)s\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n\nThe code that caused this warning is on line %(line_number)s of the file %(filename)s. To get rid of this warning, pass the additional argument 'features=\"%(parser)s\"' to the BeautifulSoup constructor.\n" - - def __init__(self, markup="", features=None, builder=None, - parse_only=None, from_encoding=None, exclude_encodings=None, - element_classes=None, **kwargs): - """Constructor. - - :param markup: A string or a file-like object representing - markup to be parsed. - - :param features: Desirable features of the parser to be used. This - may be the name of a specific parser ("lxml", "lxml-xml", - "html.parser", or "html5lib") or it may be the type of markup - to be used ("html", "html5", "xml"). It's recommended that you - name a specific parser, so that Beautiful Soup gives you the - same results across platforms and virtual environments. - - :param builder: A TreeBuilder subclass to instantiate (or - instance to use) instead of looking one up based on - `features`. You only need to use this if you've implemented a - custom TreeBuilder. - - :param parse_only: A SoupStrainer. Only parts of the document - matching the SoupStrainer will be considered. This is useful - when parsing part of a document that would otherwise be too - large to fit into memory. - - :param from_encoding: A string indicating the encoding of the - document to be parsed. Pass this in if Beautiful Soup is - guessing wrongly about the document's encoding. - - :param exclude_encodings: A list of strings indicating - encodings known to be wrong. Pass this in if you don't know - the document's encoding but you know Beautiful Soup's guess is - wrong. - - :param element_classes: A dictionary mapping BeautifulSoup - classes like Tag and NavigableString to other classes you'd - like to be instantiated instead as the parse tree is - built. This is useful for using subclasses to modify the - default behavior of Tag or NavigableString. - - :param kwargs: For backwards compatibility purposes, the - constructor accepts certain keyword arguments used in - Beautiful Soup 3. None of these arguments do anything in - Beautiful Soup 4; they will result in a warning and then be ignored. - - Apart from this, any keyword arguments passed into the BeautifulSoup - constructor are propagated to the TreeBuilder constructor. This - makes it possible to configure a TreeBuilder beyond saying - which one to use. - - """ - - if 'convertEntities' in kwargs: - del kwargs['convertEntities'] - warnings.warn( - "BS4 does not respect the convertEntities argument to the " - "BeautifulSoup constructor. Entities are always converted " - "to Unicode characters.") - - if 'markupMassage' in kwargs: - del kwargs['markupMassage'] - warnings.warn( - "BS4 does not respect the markupMassage argument to the " - "BeautifulSoup constructor. The tree builder is responsible " - "for any necessary markup massage.") - - if 'smartQuotesTo' in kwargs: - del kwargs['smartQuotesTo'] - warnings.warn( - "BS4 does not respect the smartQuotesTo argument to the " - "BeautifulSoup constructor. Smart quotes are always converted " - "to Unicode characters.") - - if 'selfClosingTags' in kwargs: - del kwargs['selfClosingTags'] - warnings.warn( - "BS4 does not respect the selfClosingTags argument to the " - "BeautifulSoup constructor. The tree builder is responsible " - "for understanding self-closing tags.") - - if 'isHTML' in kwargs: - del kwargs['isHTML'] - warnings.warn( - "BS4 does not respect the isHTML argument to the " - "BeautifulSoup constructor. Suggest you use " - "features='lxml' for HTML and features='lxml-xml' for " - "XML.") - - def deprecated_argument(old_name, new_name): - if old_name in kwargs: - warnings.warn( - 'The "%s" argument to the BeautifulSoup constructor ' - 'has been renamed to "%s."' % (old_name, new_name)) - value = kwargs[old_name] - del kwargs[old_name] - return value - return None - - parse_only = parse_only or deprecated_argument( - "parseOnlyThese", "parse_only") - - from_encoding = from_encoding or deprecated_argument( - "fromEncoding", "from_encoding") - - if from_encoding and isinstance(markup, str): - warnings.warn("You provided Unicode markup but also provided a value for from_encoding. Your from_encoding will be ignored.") - from_encoding = None - - self.element_classes = element_classes or dict() - - # We need this information to track whether or not the builder - # was specified well enough that we can omit the 'you need to - # specify a parser' warning. - original_builder = builder - original_features = features - - if isinstance(builder, type): - # A builder class was passed in; it needs to be instantiated. - builder_class = builder - builder = None - elif builder is None: - if isinstance(features, str): - features = [features] - if features is None or len(features) == 0: - features = self.DEFAULT_BUILDER_FEATURES - builder_class = builder_registry.lookup(*features) - if builder_class is None: - raise FeatureNotFound( - "Couldn't find a tree builder with the features you " - "requested: %s. Do you need to install a parser library?" - % ",".join(features)) - - # At this point either we have a TreeBuilder instance in - # builder, or we have a builder_class that we can instantiate - # with the remaining **kwargs. - if builder is None: - builder = builder_class(**kwargs) - if not original_builder and not ( - original_features == builder.NAME or - original_features in builder.ALTERNATE_NAMES - ): - if builder.is_xml: - markup_type = "XML" - else: - markup_type = "HTML" - - # This code adapted from warnings.py so that we get the same line - # of code as our warnings.warn() call gets, even if the answer is wrong - # (as it may be in a multithreading situation). - caller = None - try: - caller = sys._getframe(1) - except ValueError: - pass - if caller: - globals = caller.f_globals - line_number = caller.f_lineno - else: - globals = sys.__dict__ - line_number= 1 - filename = globals.get('__file__') - if filename: - fnl = filename.lower() - if fnl.endswith((".pyc", ".pyo")): - filename = filename[:-1] - if filename: - # If there is no filename at all, the user is most likely in a REPL, - # and the warning is not necessary. - values = dict( - filename=filename, - line_number=line_number, - parser=builder.NAME, - markup_type=markup_type - ) - warnings.warn(self.NO_PARSER_SPECIFIED_WARNING % values, stacklevel=2) - else: - if kwargs: - warnings.warn("Keyword arguments to the BeautifulSoup constructor will be ignored. These would normally be passed into the TreeBuilder constructor, but a TreeBuilder instance was passed in as `builder`.") - - self.builder = builder - self.is_xml = builder.is_xml - self.known_xml = self.is_xml - self._namespaces = dict() - self.parse_only = parse_only - - self.builder.initialize_soup(self) - - if hasattr(markup, 'read'): # It's a file-type object. - markup = markup.read() - elif len(markup) <= 256 and ( - (isinstance(markup, bytes) and not b'<' in markup) - or (isinstance(markup, str) and not '<' in markup) - ): - # Print out warnings for a couple beginner problems - # involving passing non-markup to Beautiful Soup. - # Beautiful Soup will still parse the input as markup, - # just in case that's what the user really wants. - if (isinstance(markup, str) - and not os.path.supports_unicode_filenames): - possible_filename = markup.encode("utf8") - else: - possible_filename = markup - is_file = False - try: - is_file = os.path.exists(possible_filename) - except Exception as e: - # This is almost certainly a problem involving - # characters not valid in filenames on this - # system. Just let it go. - pass - if is_file: - if isinstance(markup, str): - markup = markup.encode("utf8") - warnings.warn( - '"%s" looks like a filename, not markup. You should' - ' probably open this file and pass the filehandle into' - ' Beautiful Soup.' % markup) - self._check_markup_is_url(markup) - - rejections = [] - success = False - for (self.markup, self.original_encoding, self.declared_html_encoding, - self.contains_replacement_characters) in ( - self.builder.prepare_markup( - markup, from_encoding, exclude_encodings=exclude_encodings)): - self.reset() - try: - self._feed() - success = True - break - except ParserRejectedMarkup as e: - rejections.append(e) - pass - - if not success: - other_exceptions = [str(e) for e in rejections] - raise ParserRejectedMarkup( - "The markup you provided was rejected by the parser. Trying a different parser or a different encoding may help.\n\nOriginal exception(s) from parser:\n " + "\n ".join(other_exceptions) - ) - - # Clear out the markup and remove the builder's circular - # reference to this object. - self.markup = None - self.builder.soup = None - - def __copy__(self): - copy = type(self)( - self.encode('utf-8'), builder=self.builder, from_encoding='utf-8' - ) - - # Although we encoded the tree to UTF-8, that may not have - # been the encoding of the original markup. Set the copy's - # .original_encoding to reflect the original object's - # .original_encoding. - copy.original_encoding = self.original_encoding - return copy - - def __getstate__(self): - # Frequently a tree builder can't be pickled. - d = dict(self.__dict__) - if 'builder' in d and not self.builder.picklable: - d['builder'] = None - return d - - @staticmethod - def _check_markup_is_url(markup): - """ - Check if markup looks like it's actually a url and raise a warning - if so. Markup can be unicode or str (py2) / bytes (py3). - """ - if isinstance(markup, bytes): - space = b' ' - cant_start_with = (b"http:", b"https:") - elif isinstance(markup, str): - space = ' ' - cant_start_with = ("http:", "https:") - else: - return - - if any(markup.startswith(prefix) for prefix in cant_start_with): - if not space in markup: - if isinstance(markup, bytes): - decoded_markup = markup.decode('utf-8', 'replace') - else: - decoded_markup = markup - warnings.warn( - '"%s" looks like a URL. Beautiful Soup is not an' - ' HTTP client. You should probably use an HTTP client like' - ' requests to get the document behind the URL, and feed' - ' that document to Beautiful Soup.' % decoded_markup - ) - - def _feed(self): - # Convert the document to Unicode. - self.builder.reset() - - self.builder.feed(self.markup) - # Close out any unfinished strings and close all the open tags. - self.endData() - while self.currentTag.name != self.ROOT_TAG_NAME: - self.popTag() - - def reset(self): - Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME) - self.hidden = 1 - self.builder.reset() - self.current_data = [] - self.currentTag = None - self.tagStack = [] - self.preserve_whitespace_tag_stack = [] - self.pushTag(self) - - def new_tag(self, name, namespace=None, nsprefix=None, attrs={}, - sourceline=None, sourcepos=None, **kwattrs): - """Create a new tag associated with this soup.""" - kwattrs.update(attrs) - return self.element_classes.get(Tag, Tag)( - None, self.builder, name, namespace, nsprefix, kwattrs, - sourceline=sourceline, sourcepos=sourcepos - ) - - def new_string(self, s, subclass=None): - """Create a new NavigableString associated with this soup.""" - subclass = subclass or self.element_classes.get( - NavigableString, NavigableString - ) - return subclass(s) - - def insert_before(self, successor): - raise NotImplementedError("BeautifulSoup objects don't support insert_before().") - - def insert_after(self, successor): - raise NotImplementedError("BeautifulSoup objects don't support insert_after().") - - def popTag(self): - tag = self.tagStack.pop() - if self.preserve_whitespace_tag_stack and tag == self.preserve_whitespace_tag_stack[-1]: - self.preserve_whitespace_tag_stack.pop() - #print "Pop", tag.name - if self.tagStack: - self.currentTag = self.tagStack[-1] - return self.currentTag - - def pushTag(self, tag): - #print "Push", tag.name - if self.currentTag is not None: - self.currentTag.contents.append(tag) - self.tagStack.append(tag) - self.currentTag = self.tagStack[-1] - if tag.name in self.builder.preserve_whitespace_tags: - self.preserve_whitespace_tag_stack.append(tag) - - def endData(self, containerClass=None): - - # Default container is NavigableString. - containerClass = containerClass or NavigableString - - # The user may want us to instantiate some alias for the - # container class. - containerClass = self.element_classes.get( - containerClass, containerClass - ) - - if self.current_data: - current_data = ''.join(self.current_data) - # If whitespace is not preserved, and this string contains - # nothing but ASCII spaces, replace it with a single space - # or newline. - if not self.preserve_whitespace_tag_stack: - strippable = True - for i in current_data: - if i not in self.ASCII_SPACES: - strippable = False - break - if strippable: - if '\n' in current_data: - current_data = '\n' - else: - current_data = ' ' - - # Reset the data collector. - self.current_data = [] - - # Should we add this string to the tree at all? - if self.parse_only and len(self.tagStack) <= 1 and \ - (not self.parse_only.text or \ - not self.parse_only.search(current_data)): - return - - o = containerClass(current_data) - self.object_was_parsed(o) - - def object_was_parsed(self, o, parent=None, most_recent_element=None): - """Add an object to the parse tree.""" - if parent is None: - parent = self.currentTag - if most_recent_element is not None: - previous_element = most_recent_element - else: - previous_element = self._most_recent_element - - next_element = previous_sibling = next_sibling = None - if isinstance(o, Tag): - next_element = o.next_element - next_sibling = o.next_sibling - previous_sibling = o.previous_sibling - if previous_element is None: - previous_element = o.previous_element - - fix = parent.next_element is not None - - o.setup(parent, previous_element, next_element, previous_sibling, next_sibling) - - self._most_recent_element = o - parent.contents.append(o) - - # Check if we are inserting into an already parsed node. - if fix: - self._linkage_fixer(parent) - - def _linkage_fixer(self, el): - """Make sure linkage of this fragment is sound.""" - - first = el.contents[0] - child = el.contents[-1] - descendant = child - - if child is first and el.parent is not None: - # Parent should be linked to first child - el.next_element = child - # We are no longer linked to whatever this element is - prev_el = child.previous_element - if prev_el is not None and prev_el is not el: - prev_el.next_element = None - # First child should be linked to the parent, and no previous siblings. - child.previous_element = el - child.previous_sibling = None - - # We have no sibling as we've been appended as the last. - child.next_sibling = None - - # This index is a tag, dig deeper for a "last descendant" - if isinstance(child, Tag) and child.contents: - descendant = child._last_descendant(False) - - # As the final step, link last descendant. It should be linked - # to the parent's next sibling (if found), else walk up the chain - # and find a parent with a sibling. It should have no next sibling. - descendant.next_element = None - descendant.next_sibling = None - target = el - while True: - if target is None: - break - elif target.next_sibling is not None: - descendant.next_element = target.next_sibling - target.next_sibling.previous_element = child - break - target = target.parent - - def _popToTag(self, name, nsprefix=None, inclusivePop=True): - """Pops the tag stack up to and including the most recent - instance of the given tag. If inclusivePop is false, pops the tag - stack up to but *not* including the most recent instqance of - the given tag.""" - #print "Popping to %s" % name - if name == self.ROOT_TAG_NAME: - # The BeautifulSoup object itself can never be popped. - return - - most_recently_popped = None - - stack_size = len(self.tagStack) - for i in range(stack_size - 1, 0, -1): - t = self.tagStack[i] - if (name == t.name and nsprefix == t.prefix): - if inclusivePop: - most_recently_popped = self.popTag() - break - most_recently_popped = self.popTag() - - return most_recently_popped - - def handle_starttag(self, name, namespace, nsprefix, attrs, sourceline=None, - sourcepos=None): - """Push a start tag on to the stack. - - If this method returns None, the tag was rejected by the - SoupStrainer. You should proceed as if the tag had not occurred - in the document. For instance, if this was a self-closing tag, - don't call handle_endtag. - """ - - # print "Start tag %s: %s" % (name, attrs) - self.endData() - - if (self.parse_only and len(self.tagStack) <= 1 - and (self.parse_only.text - or not self.parse_only.search_tag(name, attrs))): - return None - - tag = self.element_classes.get(Tag, Tag)( - self, self.builder, name, namespace, nsprefix, attrs, - self.currentTag, self._most_recent_element, - sourceline=sourceline, sourcepos=sourcepos - ) - if tag is None: - return tag - if self._most_recent_element is not None: - self._most_recent_element.next_element = tag - self._most_recent_element = tag - self.pushTag(tag) - return tag - - def handle_endtag(self, name, nsprefix=None): - #print "End tag: " + name - self.endData() - self._popToTag(name, nsprefix) - - def handle_data(self, data): - self.current_data.append(data) - - def decode(self, pretty_print=False, - eventual_encoding=DEFAULT_OUTPUT_ENCODING, - formatter="minimal"): - """Returns a string or Unicode representation of this document. - To get Unicode, pass None for encoding.""" - - if self.is_xml: - # Print the XML declaration - encoding_part = '' - if eventual_encoding != None: - encoding_part = ' encoding="%s"' % eventual_encoding - prefix = '\n' % encoding_part - else: - prefix = '' - if not pretty_print: - indent_level = None - else: - indent_level = 0 - return prefix + super(BeautifulSoup, self).decode( - indent_level, eventual_encoding, formatter) - -# Alias to make it easier to type import: 'from bs4 import _soup' -_s = BeautifulSoup -_soup = BeautifulSoup - -class BeautifulStoneSoup(BeautifulSoup): - """Deprecated interface to an XML parser.""" - - def __init__(self, *args, **kwargs): - kwargs['features'] = 'xml' - warnings.warn( - 'The BeautifulStoneSoup class is deprecated. Instead of using ' - 'it, pass features="xml" into the BeautifulSoup constructor.') - super(BeautifulStoneSoup, self).__init__(*args, **kwargs) - - -class StopParsing(Exception): - pass - -class FeatureNotFound(ValueError): - pass - - -#By default, act as an HTML pretty-printer. -if __name__ == '__main__': - import sys - soup = BeautifulSoup(sys.stdin) - print(soup.prettify()) diff --git a/lib/bs4/builder/__init__.py b/lib/bs4/builder/__init__.py deleted file mode 100644 index 03a4c1e0..00000000 --- a/lib/bs4/builder/__init__.py +++ /dev/null @@ -1,392 +0,0 @@ -# Use of this source code is governed by the MIT license. -__license__ = "MIT" - -from collections import defaultdict -import itertools -import sys -from bs4.element import ( - CharsetMetaAttributeValue, - ContentMetaAttributeValue, - nonwhitespace_re - ) - -__all__ = [ - 'HTMLTreeBuilder', - 'SAXTreeBuilder', - 'TreeBuilder', - 'TreeBuilderRegistry', - ] - -# Some useful features for a TreeBuilder to have. -FAST = 'fast' -PERMISSIVE = 'permissive' -STRICT = 'strict' -XML = 'xml' -HTML = 'html' -HTML_5 = 'html5' - - -class TreeBuilderRegistry(object): - - def __init__(self): - self.builders_for_feature = defaultdict(list) - self.builders = [] - - def register(self, treebuilder_class): - """Register a treebuilder based on its advertised features.""" - for feature in treebuilder_class.features: - self.builders_for_feature[feature].insert(0, treebuilder_class) - self.builders.insert(0, treebuilder_class) - - def lookup(self, *features): - if len(self.builders) == 0: - # There are no builders at all. - return None - - if len(features) == 0: - # They didn't ask for any features. Give them the most - # recently registered builder. - return self.builders[0] - - # Go down the list of features in order, and eliminate any builders - # that don't match every feature. - features = list(features) - features.reverse() - candidates = None - candidate_set = None - while len(features) > 0: - feature = features.pop() - we_have_the_feature = self.builders_for_feature.get(feature, []) - if len(we_have_the_feature) > 0: - if candidates is None: - candidates = we_have_the_feature - candidate_set = set(candidates) - else: - # Eliminate any candidates that don't have this feature. - candidate_set = candidate_set.intersection( - set(we_have_the_feature)) - - # The only valid candidates are the ones in candidate_set. - # Go through the original list of candidates and pick the first one - # that's in candidate_set. - if candidate_set is None: - return None - for candidate in candidates: - if candidate in candidate_set: - return candidate - return None - -# The BeautifulSoup class will take feature lists from developers and use them -# to look up builders in this registry. -builder_registry = TreeBuilderRegistry() - -class TreeBuilder(object): - """Turn a document into a Beautiful Soup object tree.""" - - NAME = "[Unknown tree builder]" - ALTERNATE_NAMES = [] - features = [] - - is_xml = False - picklable = False - empty_element_tags = None # A tag will be considered an empty-element - # tag when and only when it has no contents. - - # A value for these tag/attribute combinations is a space- or - # comma-separated list of CDATA, rather than a single CDATA. - DEFAULT_CDATA_LIST_ATTRIBUTES = {} - - DEFAULT_PRESERVE_WHITESPACE_TAGS = set() - - USE_DEFAULT = object() - - # Most parsers don't keep track of line numbers. - TRACKS_LINE_NUMBERS = False - - def __init__(self, multi_valued_attributes=USE_DEFAULT, - preserve_whitespace_tags=USE_DEFAULT, - store_line_numbers=USE_DEFAULT): - """Constructor. - - :param multi_valued_attributes: If this is set to None, the - TreeBuilder will not turn any values for attributes like - 'class' into lists. Setting this do a dictionary will - customize this behavior; look at DEFAULT_CDATA_LIST_ATTRIBUTES - for an example. - - Internally, these are called "CDATA list attributes", but that - probably doesn't make sense to an end-user, so the argument name - is `multi_valued_attributes`. - - :param preserve_whitespace_tags: A list of tags to treat - the way
 tags are treated in HTML. Tags in this list
-        will have 
-
-        :param store_line_numbers: If the parser keeps track of the
-        line numbers and positions of the original markup, that
-        information will, by default, be stored in each corresponding
-        `Tag` object. You can turn this off by passing
-        store_line_numbers=False. If the parser you're using doesn't 
-        keep track of this information, then setting store_line_numbers=True
-        will do nothing.
-        """
-        self.soup = None
-        if multi_valued_attributes is self.USE_DEFAULT:
-            multi_valued_attributes = self.DEFAULT_CDATA_LIST_ATTRIBUTES
-        self.cdata_list_attributes = multi_valued_attributes
-        if preserve_whitespace_tags is self.USE_DEFAULT:
-            preserve_whitespace_tags = self.DEFAULT_PRESERVE_WHITESPACE_TAGS
-        self.preserve_whitespace_tags = preserve_whitespace_tags
-        if store_line_numbers == self.USE_DEFAULT:
-            store_line_numbers = self.TRACKS_LINE_NUMBERS
-        self.store_line_numbers = store_line_numbers
-        
-    def initialize_soup(self, soup):
-        """The BeautifulSoup object has been initialized and is now
-        being associated with the TreeBuilder.
-        """
-        self.soup = soup
-        
-    def reset(self):
-        pass
-
-    def can_be_empty_element(self, tag_name):
-        """Might a tag with this name be an empty-element tag?
-
-        The final markup may or may not actually present this tag as
-        self-closing.
-
-        For instance: an HTMLBuilder does not consider a 

tag to be - an empty-element tag (it's not in - HTMLBuilder.empty_element_tags). This means an empty

tag - will be presented as "

", not "

". - - The default implementation has no opinion about which tags are - empty-element tags, so a tag will be presented as an - empty-element tag if and only if it has no contents. - "" will become "", and "bar" will - be left alone. - """ - if self.empty_element_tags is None: - return True - return tag_name in self.empty_element_tags - - def feed(self, markup): - raise NotImplementedError() - - def prepare_markup(self, markup, user_specified_encoding=None, - document_declared_encoding=None, exclude_encodings=None): - yield markup, None, None, False - - def test_fragment_to_document(self, fragment): - """Wrap an HTML fragment to make it look like a document. - - Different parsers do this differently. For instance, lxml - introduces an empty tag, and html5lib - doesn't. Abstracting this away lets us write simple tests - which run HTML fragments through the parser and compare the - results against other HTML fragments. - - This method should not be used outside of tests. - """ - return fragment - - def set_up_substitutions(self, tag): - return False - - def _replace_cdata_list_attribute_values(self, tag_name, attrs): - """Replaces class="foo bar" with class=["foo", "bar"] - - Modifies its input in place. - """ - if not attrs: - return attrs - if self.cdata_list_attributes: - universal = self.cdata_list_attributes.get('*', []) - tag_specific = self.cdata_list_attributes.get( - tag_name.lower(), None) - for attr in list(attrs.keys()): - if attr in universal or (tag_specific and attr in tag_specific): - # We have a "class"-type attribute whose string - # value is a whitespace-separated list of - # values. Split it into a list. - value = attrs[attr] - if isinstance(value, str): - values = nonwhitespace_re.findall(value) - else: - # html5lib sometimes calls setAttributes twice - # for the same tag when rearranging the parse - # tree. On the second call the attribute value - # here is already a list. If this happens, - # leave the value alone rather than trying to - # split it again. - values = value - attrs[attr] = values - return attrs - -class SAXTreeBuilder(TreeBuilder): - """A Beautiful Soup treebuilder that listens for SAX events.""" - - def feed(self, markup): - raise NotImplementedError() - - def close(self): - pass - - def startElement(self, name, attrs): - attrs = dict((key[1], value) for key, value in list(attrs.items())) - #print "Start %s, %r" % (name, attrs) - self.soup.handle_starttag(name, attrs) - - def endElement(self, name): - #print "End %s" % name - self.soup.handle_endtag(name) - - def startElementNS(self, nsTuple, nodeName, attrs): - # Throw away (ns, nodeName) for now. - self.startElement(nodeName, attrs) - - def endElementNS(self, nsTuple, nodeName): - # Throw away (ns, nodeName) for now. - self.endElement(nodeName) - #handler.endElementNS((ns, node.nodeName), node.nodeName) - - def startPrefixMapping(self, prefix, nodeValue): - # Ignore the prefix for now. - pass - - def endPrefixMapping(self, prefix): - # Ignore the prefix for now. - # handler.endPrefixMapping(prefix) - pass - - def characters(self, content): - self.soup.handle_data(content) - - def startDocument(self): - pass - - def endDocument(self): - pass - - -class HTMLTreeBuilder(TreeBuilder): - """This TreeBuilder knows facts about HTML. - - Such as which tags are empty-element tags. - """ - - empty_element_tags = set([ - # These are from HTML5. - 'area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input', 'keygen', 'link', 'menuitem', 'meta', 'param', 'source', 'track', 'wbr', - - # These are from earlier versions of HTML and are removed in HTML5. - 'basefont', 'bgsound', 'command', 'frame', 'image', 'isindex', 'nextid', 'spacer' - ]) - - # The HTML standard defines these as block-level elements. Beautiful - # Soup does not treat these elements differently from other elements, - # but it may do so eventually, and this information is available if - # you need to use it. - block_elements = set(["address", "article", "aside", "blockquote", "canvas", "dd", "div", "dl", "dt", "fieldset", "figcaption", "figure", "footer", "form", "h1", "h2", "h3", "h4", "h5", "h6", "header", "hr", "li", "main", "nav", "noscript", "ol", "output", "p", "pre", "section", "table", "tfoot", "ul", "video"]) - - # The HTML standard defines these attributes as containing a - # space-separated list of values, not a single value. That is, - # class="foo bar" means that the 'class' attribute has two values, - # 'foo' and 'bar', not the single value 'foo bar'. When we - # encounter one of these attributes, we will parse its value into - # a list of values if possible. Upon output, the list will be - # converted back into a string. - DEFAULT_CDATA_LIST_ATTRIBUTES = { - "*" : ['class', 'accesskey', 'dropzone'], - "a" : ['rel', 'rev'], - "link" : ['rel', 'rev'], - "td" : ["headers"], - "th" : ["headers"], - "td" : ["headers"], - "form" : ["accept-charset"], - "object" : ["archive"], - - # These are HTML5 specific, as are *.accesskey and *.dropzone above. - "area" : ["rel"], - "icon" : ["sizes"], - "iframe" : ["sandbox"], - "output" : ["for"], - } - - DEFAULT_PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea']) - - def set_up_substitutions(self, tag): - # We are only interested in tags - if tag.name != 'meta': - return False - - http_equiv = tag.get('http-equiv') - content = tag.get('content') - charset = tag.get('charset') - - # We are interested in tags that say what encoding the - # document was originally in. This means HTML 5-style - # tags that provide the "charset" attribute. It also means - # HTML 4-style tags that provide the "content" - # attribute and have "http-equiv" set to "content-type". - # - # In both cases we will replace the value of the appropriate - # attribute with a standin object that can take on any - # encoding. - meta_encoding = None - if charset is not None: - # HTML 5 style: - # - meta_encoding = charset - tag['charset'] = CharsetMetaAttributeValue(charset) - - elif (content is not None and http_equiv is not None - and http_equiv.lower() == 'content-type'): - # HTML 4 style: - # - tag['content'] = ContentMetaAttributeValue(content) - - return (meta_encoding is not None) - -def register_treebuilders_from(module): - """Copy TreeBuilders from the given module into this module.""" - # I'm fairly sure this is not the best way to do this. - this_module = sys.modules['bs4.builder'] - for name in module.__all__: - obj = getattr(module, name) - - if issubclass(obj, TreeBuilder): - setattr(this_module, name, obj) - this_module.__all__.append(name) - # Register the builder while we're at it. - this_module.builder_registry.register(obj) - -class ParserRejectedMarkup(Exception): - def __init__(self, message_or_exception): - """Explain why the parser rejected the given markup, either - with a textual explanation or another exception. - """ - if isinstance(message_or_exception, Exception): - e = message_or_exception - message_or_exception = "%s: %s" % (e.__class__.__name__, str(e)) - super(ParserRejectedMarkup, self).__init__(message_or_exception) - -# Builders are registered in reverse order of priority, so that custom -# builder registrations will take precedence. In general, we want lxml -# to take precedence over html5lib, because it's faster. And we only -# want to use HTMLParser as a last result. -from . import _htmlparser -register_treebuilders_from(_htmlparser) -try: - from . import _html5lib - register_treebuilders_from(_html5lib) -except ImportError: - # They don't have html5lib installed. - pass -try: - from . import _lxml - register_treebuilders_from(_lxml) -except ImportError: - # They don't have lxml installed. - pass diff --git a/lib/bs4/builder/_html5lib.py b/lib/bs4/builder/_html5lib.py deleted file mode 100644 index 43199189..00000000 --- a/lib/bs4/builder/_html5lib.py +++ /dev/null @@ -1,455 +0,0 @@ -# Use of this source code is governed by the MIT license. -__license__ = "MIT" - -__all__ = [ - 'HTML5TreeBuilder', - ] - -import warnings -import re -from bs4.builder import ( - PERMISSIVE, - HTML, - HTML_5, - HTMLTreeBuilder, - ) -from bs4.element import ( - NamespacedAttribute, - nonwhitespace_re, -) -import html5lib -from html5lib.constants import ( - namespaces, - prefixes, - ) -from bs4.element import ( - Comment, - Doctype, - NavigableString, - Tag, - ) - -try: - # Pre-0.99999999 - from html5lib.treebuilders import _base as treebuilder_base - new_html5lib = False -except ImportError as e: - # 0.99999999 and up - from html5lib.treebuilders import base as treebuilder_base - new_html5lib = True - -class HTML5TreeBuilder(HTMLTreeBuilder): - """Use html5lib to build a tree.""" - - NAME = "html5lib" - - features = [NAME, PERMISSIVE, HTML_5, HTML] - - # html5lib can tell us which line number and position in the - # original file is the source of an element. - TRACKS_LINE_NUMBERS = True - - def prepare_markup(self, markup, user_specified_encoding, - document_declared_encoding=None, exclude_encodings=None): - # Store the user-specified encoding for use later on. - self.user_specified_encoding = user_specified_encoding - - # document_declared_encoding and exclude_encodings aren't used - # ATM because the html5lib TreeBuilder doesn't use - # UnicodeDammit. - if exclude_encodings: - warnings.warn("You provided a value for exclude_encoding, but the html5lib tree builder doesn't support exclude_encoding.") - yield (markup, None, None, False) - - # These methods are defined by Beautiful Soup. - def feed(self, markup): - if self.soup.parse_only is not None: - warnings.warn("You provided a value for parse_only, but the html5lib tree builder doesn't support parse_only. The entire document will be parsed.") - parser = html5lib.HTMLParser(tree=self.create_treebuilder) - self.underlying_builder.parser = parser - extra_kwargs = dict() - if not isinstance(markup, str): - if new_html5lib: - extra_kwargs['override_encoding'] = self.user_specified_encoding - else: - extra_kwargs['encoding'] = self.user_specified_encoding - doc = parser.parse(markup, **extra_kwargs) - - # Set the character encoding detected by the tokenizer. - if isinstance(markup, str): - # We need to special-case this because html5lib sets - # charEncoding to UTF-8 if it gets Unicode input. - doc.original_encoding = None - else: - original_encoding = parser.tokenizer.stream.charEncoding[0] - if not isinstance(original_encoding, str): - # In 0.99999999 and up, the encoding is an html5lib - # Encoding object. We want to use a string for compatibility - # with other tree builders. - original_encoding = original_encoding.name - doc.original_encoding = original_encoding - self.underlying_builder.parser = None - - def create_treebuilder(self, namespaceHTMLElements): - self.underlying_builder = TreeBuilderForHtml5lib( - namespaceHTMLElements, self.soup, - store_line_numbers=self.store_line_numbers - ) - return self.underlying_builder - - def test_fragment_to_document(self, fragment): - """See `TreeBuilder`.""" - return '%s' % fragment - - -class TreeBuilderForHtml5lib(treebuilder_base.TreeBuilder): - - def __init__(self, namespaceHTMLElements, soup=None, - store_line_numbers=True, **kwargs): - if soup: - self.soup = soup - else: - from bs4 import BeautifulSoup - # TODO: Why is the parser 'html.parser' here? To avoid an - # infinite loop? - self.soup = BeautifulSoup( - "", "html.parser", store_line_numbers=store_line_numbers, - **kwargs - ) - super(TreeBuilderForHtml5lib, self).__init__(namespaceHTMLElements) - - # This will be set later to an html5lib.html5parser.HTMLParser - # object, which we can use to track the current line number. - self.parser = None - self.store_line_numbers = store_line_numbers - - def documentClass(self): - self.soup.reset() - return Element(self.soup, self.soup, None) - - def insertDoctype(self, token): - name = token["name"] - publicId = token["publicId"] - systemId = token["systemId"] - - doctype = Doctype.for_name_and_ids(name, publicId, systemId) - self.soup.object_was_parsed(doctype) - - def elementClass(self, name, namespace): - kwargs = {} - if self.parser and self.store_line_numbers: - # This represents the point immediately after the end of the - # tag. We don't know when the tag started, but we do know - # where it ended -- the character just before this one. - sourceline, sourcepos = self.parser.tokenizer.stream.position() - kwargs['sourceline'] = sourceline - kwargs['sourcepos'] = sourcepos-1 - tag = self.soup.new_tag(name, namespace, **kwargs) - - return Element(tag, self.soup, namespace) - - def commentClass(self, data): - return TextNode(Comment(data), self.soup) - - def fragmentClass(self): - from bs4 import BeautifulSoup - # TODO: Why is the parser 'html.parser' here? To avoid an - # infinite loop? - self.soup = BeautifulSoup("", "html.parser") - self.soup.name = "[document_fragment]" - return Element(self.soup, self.soup, None) - - def appendChild(self, node): - # XXX This code is not covered by the BS4 tests. - self.soup.append(node.element) - - def getDocument(self): - return self.soup - - def getFragment(self): - return treebuilder_base.TreeBuilder.getFragment(self).element - - def testSerializer(self, element): - from bs4 import BeautifulSoup - rv = [] - doctype_re = re.compile(r'^(.*?)(?: PUBLIC "(.*?)"(?: "(.*?)")?| SYSTEM "(.*?)")?$') - - def serializeElement(element, indent=0): - if isinstance(element, BeautifulSoup): - pass - if isinstance(element, Doctype): - m = doctype_re.match(element) - if m: - name = m.group(1) - if m.lastindex > 1: - publicId = m.group(2) or "" - systemId = m.group(3) or m.group(4) or "" - rv.append("""|%s""" % - (' ' * indent, name, publicId, systemId)) - else: - rv.append("|%s" % (' ' * indent, name)) - else: - rv.append("|%s" % (' ' * indent,)) - elif isinstance(element, Comment): - rv.append("|%s" % (' ' * indent, element)) - elif isinstance(element, NavigableString): - rv.append("|%s\"%s\"" % (' ' * indent, element)) - else: - if element.namespace: - name = "%s %s" % (prefixes[element.namespace], - element.name) - else: - name = element.name - rv.append("|%s<%s>" % (' ' * indent, name)) - if element.attrs: - attributes = [] - for name, value in list(element.attrs.items()): - if isinstance(name, NamespacedAttribute): - name = "%s %s" % (prefixes[name.namespace], name.name) - if isinstance(value, list): - value = " ".join(value) - attributes.append((name, value)) - - for name, value in sorted(attributes): - rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value)) - indent += 2 - for child in element.children: - serializeElement(child, indent) - serializeElement(element, 0) - - return "\n".join(rv) - -class AttrList(object): - def __init__(self, element): - self.element = element - self.attrs = dict(self.element.attrs) - def __iter__(self): - return list(self.attrs.items()).__iter__() - def __setitem__(self, name, value): - # If this attribute is a multi-valued attribute for this element, - # turn its value into a list. - list_attr = self.element.cdata_list_attributes - if (name in list_attr['*'] - or (self.element.name in list_attr - and name in list_attr[self.element.name])): - # A node that is being cloned may have already undergone - # this procedure. - if not isinstance(value, list): - value = nonwhitespace_re.findall(value) - self.element[name] = value - def items(self): - return list(self.attrs.items()) - def keys(self): - return list(self.attrs.keys()) - def __len__(self): - return len(self.attrs) - def __getitem__(self, name): - return self.attrs[name] - def __contains__(self, name): - return name in list(self.attrs.keys()) - - -class Element(treebuilder_base.Node): - def __init__(self, element, soup, namespace): - treebuilder_base.Node.__init__(self, element.name) - self.element = element - self.soup = soup - self.namespace = namespace - - def appendChild(self, node): - string_child = child = None - if isinstance(node, str): - # Some other piece of code decided to pass in a string - # instead of creating a TextElement object to contain the - # string. - string_child = child = node - elif isinstance(node, Tag): - # Some other piece of code decided to pass in a Tag - # instead of creating an Element object to contain the - # Tag. - child = node - elif node.element.__class__ == NavigableString: - string_child = child = node.element - node.parent = self - else: - child = node.element - node.parent = self - - if not isinstance(child, str) and child.parent is not None: - node.element.extract() - - if (string_child is not None and self.element.contents - and self.element.contents[-1].__class__ == NavigableString): - # We are appending a string onto another string. - # TODO This has O(n^2) performance, for input like - # "aaa..." - old_element = self.element.contents[-1] - new_element = self.soup.new_string(old_element + string_child) - old_element.replace_with(new_element) - self.soup._most_recent_element = new_element - else: - if isinstance(node, str): - # Create a brand new NavigableString from this string. - child = self.soup.new_string(node) - - # Tell Beautiful Soup to act as if it parsed this element - # immediately after the parent's last descendant. (Or - # immediately after the parent, if it has no children.) - if self.element.contents: - most_recent_element = self.element._last_descendant(False) - elif self.element.next_element is not None: - # Something from further ahead in the parse tree is - # being inserted into this earlier element. This is - # very annoying because it means an expensive search - # for the last element in the tree. - most_recent_element = self.soup._last_descendant() - else: - most_recent_element = self.element - - self.soup.object_was_parsed( - child, parent=self.element, - most_recent_element=most_recent_element) - - def getAttributes(self): - if isinstance(self.element, Comment): - return {} - return AttrList(self.element) - - def setAttributes(self, attributes): - - if attributes is not None and len(attributes) > 0: - - converted_attributes = [] - for name, value in list(attributes.items()): - if isinstance(name, tuple): - new_name = NamespacedAttribute(*name) - del attributes[name] - attributes[new_name] = value - - self.soup.builder._replace_cdata_list_attribute_values( - self.name, attributes) - for name, value in list(attributes.items()): - self.element[name] = value - - # The attributes may contain variables that need substitution. - # Call set_up_substitutions manually. - # - # The Tag constructor called this method when the Tag was created, - # but we just set/changed the attributes, so call it again. - self.soup.builder.set_up_substitutions(self.element) - attributes = property(getAttributes, setAttributes) - - def insertText(self, data, insertBefore=None): - text = TextNode(self.soup.new_string(data), self.soup) - if insertBefore: - self.insertBefore(text, insertBefore) - else: - self.appendChild(text) - - def insertBefore(self, node, refNode): - index = self.element.index(refNode.element) - if (node.element.__class__ == NavigableString and self.element.contents - and self.element.contents[index-1].__class__ == NavigableString): - # (See comments in appendChild) - old_node = self.element.contents[index-1] - new_str = self.soup.new_string(old_node + node.element) - old_node.replace_with(new_str) - else: - self.element.insert(index, node.element) - node.parent = self - - def removeChild(self, node): - node.element.extract() - - def reparentChildren(self, new_parent): - """Move all of this tag's children into another tag.""" - # print "MOVE", self.element.contents - # print "FROM", self.element - # print "TO", new_parent.element - - element = self.element - new_parent_element = new_parent.element - # Determine what this tag's next_element will be once all the children - # are removed. - final_next_element = element.next_sibling - - new_parents_last_descendant = new_parent_element._last_descendant(False, False) - if len(new_parent_element.contents) > 0: - # The new parent already contains children. We will be - # appending this tag's children to the end. - new_parents_last_child = new_parent_element.contents[-1] - new_parents_last_descendant_next_element = new_parents_last_descendant.next_element - else: - # The new parent contains no children. - new_parents_last_child = None - new_parents_last_descendant_next_element = new_parent_element.next_element - - to_append = element.contents - if len(to_append) > 0: - # Set the first child's previous_element and previous_sibling - # to elements within the new parent - first_child = to_append[0] - if new_parents_last_descendant is not None: - first_child.previous_element = new_parents_last_descendant - else: - first_child.previous_element = new_parent_element - first_child.previous_sibling = new_parents_last_child - if new_parents_last_descendant is not None: - new_parents_last_descendant.next_element = first_child - else: - new_parent_element.next_element = first_child - if new_parents_last_child is not None: - new_parents_last_child.next_sibling = first_child - - # Find the very last element being moved. It is now the - # parent's last descendant. It has no .next_sibling and - # its .next_element is whatever the previous last - # descendant had. - last_childs_last_descendant = to_append[-1]._last_descendant(False, True) - - last_childs_last_descendant.next_element = new_parents_last_descendant_next_element - if new_parents_last_descendant_next_element is not None: - # TODO: This code has no test coverage and I'm not sure - # how to get html5lib to go through this path, but it's - # just the other side of the previous line. - new_parents_last_descendant_next_element.previous_element = last_childs_last_descendant - last_childs_last_descendant.next_sibling = None - - for child in to_append: - child.parent = new_parent_element - new_parent_element.contents.append(child) - - # Now that this element has no children, change its .next_element. - element.contents = [] - element.next_element = final_next_element - - # print "DONE WITH MOVE" - # print "FROM", self.element - # print "TO", new_parent_element - - def cloneNode(self): - tag = self.soup.new_tag(self.element.name, self.namespace) - node = Element(tag, self.soup, self.namespace) - for key,value in self.attributes: - node.attributes[key] = value - return node - - def hasContent(self): - return self.element.contents - - def getNameTuple(self): - if self.namespace == None: - return namespaces["html"], self.name - else: - return self.namespace, self.name - - nameTuple = property(getNameTuple) - -class TextNode(Element): - def __init__(self, element, soup): - treebuilder_base.Node.__init__(self, None) - self.element = element - self.soup = soup - - def cloneNode(self): - raise NotImplementedError diff --git a/lib/bs4/builder/_htmlparser.py b/lib/bs4/builder/_htmlparser.py deleted file mode 100644 index f1b473fe..00000000 --- a/lib/bs4/builder/_htmlparser.py +++ /dev/null @@ -1,358 +0,0 @@ -# encoding: utf-8 -"""Use the HTMLParser library to parse HTML files that aren't too bad.""" - -# Use of this source code is governed by the MIT license. -__license__ = "MIT" - -__all__ = [ - 'HTMLParserTreeBuilder', - ] - -from future.moves.html.parser import HTMLParser - -try: - from html.parser import HTMLParseError -except ImportError as e: - # HTMLParseError is removed in Python 3.5. Since it can never be - # thrown in 3.5, we can just define our own class as a placeholder. - class HTMLParseError(Exception): - pass - -import sys -import warnings - -# Starting in Python 3.2, the HTMLParser constructor takes a 'strict' -# argument, which we'd like to set to False. Unfortunately, -# http://bugs.python.org/issue13273 makes strict=True a better bet -# before Python 3.2.3. -# -# At the end of this file, we monkeypatch HTMLParser so that -# strict=True works well on Python 3.2.2. -major, minor, release = sys.version_info[:3] -CONSTRUCTOR_TAKES_STRICT = major == 3 and minor == 2 and release >= 3 -CONSTRUCTOR_STRICT_IS_DEPRECATED = major == 3 and minor == 3 -CONSTRUCTOR_TAKES_CONVERT_CHARREFS = major == 3 and minor >= 4 - - -from bs4.element import ( - CData, - Comment, - Declaration, - Doctype, - ProcessingInstruction, - ) -from bs4.dammit import EntitySubstitution, UnicodeDammit - -from bs4.builder import ( - HTML, - HTMLTreeBuilder, - STRICT, - ) - - -HTMLPARSER = 'html.parser' - -class BeautifulSoupHTMLParser(HTMLParser): - - def __init__(self, *args, **kwargs): - HTMLParser.__init__(self, *args, **kwargs) - - # Keep a list of empty-element tags that were encountered - # without an explicit closing tag. If we encounter a closing tag - # of this type, we'll associate it with one of those entries. - # - # This isn't a stack because we don't care about the - # order. It's a list of closing tags we've already handled and - # will ignore, assuming they ever show up. - self.already_closed_empty_element = [] - - def error(self, msg): - """In Python 3, HTMLParser subclasses must implement error(), although this - requirement doesn't appear to be documented. - - In Python 2, HTMLParser implements error() as raising an exception. - - In any event, this method is called only on very strange markup and our best strategy - is to pretend it didn't happen and keep going. - """ - warnings.warn(msg) - - def handle_startendtag(self, name, attrs): - # This is only called when the markup looks like - # . - - # is_startend() tells handle_starttag not to close the tag - # just because its name matches a known empty-element tag. We - # know that this is an empty-element tag and we want to call - # handle_endtag ourselves. - tag = self.handle_starttag(name, attrs, handle_empty_element=False) - self.handle_endtag(name) - - def handle_starttag(self, name, attrs, handle_empty_element=True): - # XXX namespace - attr_dict = {} - for key, value in attrs: - # Change None attribute values to the empty string - # for consistency with the other tree builders. - if value is None: - value = '' - attr_dict[key] = value - attrvalue = '""' - #print "START", name - sourceline, sourcepos = self.getpos() - tag = self.soup.handle_starttag( - name, None, None, attr_dict, sourceline=sourceline, - sourcepos=sourcepos - ) - if tag and tag.is_empty_element and handle_empty_element: - # Unlike other parsers, html.parser doesn't send separate end tag - # events for empty-element tags. (It's handled in - # handle_startendtag, but only if the original markup looked like - # .) - # - # So we need to call handle_endtag() ourselves. Since we - # know the start event is identical to the end event, we - # don't want handle_endtag() to cross off any previous end - # events for tags of this name. - self.handle_endtag(name, check_already_closed=False) - - # But we might encounter an explicit closing tag for this tag - # later on. If so, we want to ignore it. - self.already_closed_empty_element.append(name) - - def handle_endtag(self, name, check_already_closed=True): - #print "END", name - if check_already_closed and name in self.already_closed_empty_element: - # This is a redundant end tag for an empty-element tag. - # We've already called handle_endtag() for it, so just - # check it off the list. - # print "ALREADY CLOSED", name - self.already_closed_empty_element.remove(name) - else: - self.soup.handle_endtag(name) - - def handle_data(self, data): - self.soup.handle_data(data) - - def handle_charref(self, name): - # XXX workaround for a bug in HTMLParser. Remove this once - # it's fixed in all supported versions. - # http://bugs.python.org/issue13633 - if name.startswith('x'): - real_name = int(name.lstrip('x'), 16) - elif name.startswith('X'): - real_name = int(name.lstrip('X'), 16) - else: - real_name = int(name) - - data = None - if real_name < 256: - # HTML numeric entities are supposed to reference Unicode - # code points, but sometimes they reference code points in - # some other encoding (ahem, Windows-1252). E.g. “ - # instead of É for LEFT DOUBLE QUOTATION MARK. This - # code tries to detect this situation and compensate. - for encoding in (self.soup.original_encoding, 'windows-1252'): - if not encoding: - continue - try: - data = bytearray([real_name]).decode(encoding) - except UnicodeDecodeError as e: - pass - if not data: - try: - data = chr(real_name) - except (ValueError, OverflowError) as e: - pass - data = data or "\N{REPLACEMENT CHARACTER}" - self.handle_data(data) - - def handle_entityref(self, name): - character = EntitySubstitution.HTML_ENTITY_TO_CHARACTER.get(name) - if character is not None: - data = character - else: - # If this were XML, it would be ambiguous whether "&foo" - # was an character entity reference with a missing - # semicolon or the literal string "&foo". Since this is - # HTML, we have a complete list of all character entity references, - # and this one wasn't found, so assume it's the literal string "&foo". - data = "&%s" % name - self.handle_data(data) - - def handle_comment(self, data): - self.soup.endData() - self.soup.handle_data(data) - self.soup.endData(Comment) - - def handle_decl(self, data): - self.soup.endData() - if data.startswith("DOCTYPE "): - data = data[len("DOCTYPE "):] - elif data == 'DOCTYPE': - # i.e. "" - data = '' - self.soup.handle_data(data) - self.soup.endData(Doctype) - - def unknown_decl(self, data): - if data.upper().startswith('CDATA['): - cls = CData - data = data[len('CDATA['):] - else: - cls = Declaration - self.soup.endData() - self.soup.handle_data(data) - self.soup.endData(cls) - - def handle_pi(self, data): - self.soup.endData() - self.soup.handle_data(data) - self.soup.endData(ProcessingInstruction) - - -class HTMLParserTreeBuilder(HTMLTreeBuilder): - - is_xml = False - picklable = True - NAME = HTMLPARSER - features = [NAME, HTML, STRICT] - - # The html.parser knows which line number and position in the - # original file is the source of an element. - TRACKS_LINE_NUMBERS = True - - def __init__(self, parser_args=None, parser_kwargs=None, **kwargs): - super(HTMLParserTreeBuilder, self).__init__(**kwargs) - parser_args = parser_args or [] - parser_kwargs = parser_kwargs or {} - if CONSTRUCTOR_TAKES_STRICT and not CONSTRUCTOR_STRICT_IS_DEPRECATED: - parser_kwargs['strict'] = False - if CONSTRUCTOR_TAKES_CONVERT_CHARREFS: - parser_kwargs['convert_charrefs'] = False - self.parser_args = (parser_args, parser_kwargs) - - def prepare_markup(self, markup, user_specified_encoding=None, - document_declared_encoding=None, exclude_encodings=None): - """ - :return: A 4-tuple (markup, original encoding, encoding - declared within markup, whether any characters had to be - replaced with REPLACEMENT CHARACTER). - """ - if isinstance(markup, str): - yield (markup, None, None, False) - return - - try_encodings = [user_specified_encoding, document_declared_encoding] - dammit = UnicodeDammit(markup, try_encodings, is_html=True, - exclude_encodings=exclude_encodings) - yield (dammit.markup, dammit.original_encoding, - dammit.declared_html_encoding, - dammit.contains_replacement_characters) - - def feed(self, markup): - args, kwargs = self.parser_args - parser = BeautifulSoupHTMLParser(*args, **kwargs) - parser.soup = self.soup - try: - parser.feed(markup) - parser.close() - except HTMLParseError as e: - warnings.warn(RuntimeWarning( - "Python's built-in HTMLParser cannot parse the given document. This is not a bug in Beautiful Soup. The best solution is to install an external parser (lxml or html5lib), and use Beautiful Soup with that parser. See http://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-a-parser for help.")) - raise e - parser.already_closed_empty_element = [] - -# Patch 3.2 versions of HTMLParser earlier than 3.2.3 to use some -# 3.2.3 code. This ensures they don't treat markup like

as a -# string. -# -# XXX This code can be removed once most Python 3 users are on 3.2.3. -if major == 3 and minor == 2 and not CONSTRUCTOR_TAKES_STRICT: - import re - attrfind_tolerant = re.compile( - r'\s*((?<=[\'"\s])[^\s/>][^\s/=>]*)(\s*=+\s*' - r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?') - HTMLParserTreeBuilder.attrfind_tolerant = attrfind_tolerant - - locatestarttagend = re.compile(r""" - <[a-zA-Z][-.a-zA-Z0-9:_]* # tag name - (?:\s+ # whitespace before attribute name - (?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name - (?:\s*=\s* # value indicator - (?:'[^']*' # LITA-enclosed value - |\"[^\"]*\" # LIT-enclosed value - |[^'\">\s]+ # bare value - ) - )? - ) - )* - \s* # trailing whitespace -""", re.VERBOSE) - BeautifulSoupHTMLParser.locatestarttagend = locatestarttagend - - from html.parser import tagfind, attrfind - - def parse_starttag(self, i): - self.__starttag_text = None - endpos = self.check_for_whole_start_tag(i) - if endpos < 0: - return endpos - rawdata = self.rawdata - self.__starttag_text = rawdata[i:endpos] - - # Now parse the data between i+1 and j into a tag and attrs - attrs = [] - match = tagfind.match(rawdata, i+1) - assert match, 'unexpected call to parse_starttag()' - k = match.end() - self.lasttag = tag = rawdata[i+1:k].lower() - while k < endpos: - if self.strict: - m = attrfind.match(rawdata, k) - else: - m = attrfind_tolerant.match(rawdata, k) - if not m: - break - attrname, rest, attrvalue = m.group(1, 2, 3) - if not rest: - attrvalue = None - elif attrvalue[:1] == '\'' == attrvalue[-1:] or \ - attrvalue[:1] == '"' == attrvalue[-1:]: - attrvalue = attrvalue[1:-1] - if attrvalue: - attrvalue = self.unescape(attrvalue) - attrs.append((attrname.lower(), attrvalue)) - k = m.end() - - end = rawdata[k:endpos].strip() - if end not in (">", "/>"): - lineno, offset = self.getpos() - if "\n" in self.__starttag_text: - lineno = lineno + self.__starttag_text.count("\n") - offset = len(self.__starttag_text) \ - - self.__starttag_text.rfind("\n") - else: - offset = offset + len(self.__starttag_text) - if self.strict: - self.error("junk characters in start tag: %r" - % (rawdata[k:endpos][:20],)) - self.handle_data(rawdata[i:endpos]) - return endpos - if end.endswith('/>'): - # XHTML-style empty tag: - self.handle_startendtag(tag, attrs) - else: - self.handle_starttag(tag, attrs) - if tag in self.CDATA_CONTENT_ELEMENTS: - self.set_cdata_mode(tag) - return endpos - - def set_cdata_mode(self, elem): - self.cdata_elem = elem.lower() - self.interesting = re.compile(r'' % self.cdata_elem, re.I) - - BeautifulSoupHTMLParser.parse_starttag = parse_starttag - BeautifulSoupHTMLParser.set_cdata_mode = set_cdata_mode - - CONSTRUCTOR_TAKES_STRICT = True diff --git a/lib/bs4/builder/_lxml.py b/lib/bs4/builder/_lxml.py deleted file mode 100644 index f5257963..00000000 --- a/lib/bs4/builder/_lxml.py +++ /dev/null @@ -1,302 +0,0 @@ -# Use of this source code is governed by the MIT license. -__license__ = "MIT" - -__all__ = [ - 'LXMLTreeBuilderForXML', - 'LXMLTreeBuilder', - ] - -try: - from collections.abc import Callable # Python 3.6 -except ImportError as e: - from collections import Callable - -from io import BytesIO -from io import StringIO -from lxml import etree -from bs4.element import ( - Comment, - Doctype, - NamespacedAttribute, - ProcessingInstruction, - XMLProcessingInstruction, -) -from bs4.builder import ( - FAST, - HTML, - HTMLTreeBuilder, - PERMISSIVE, - ParserRejectedMarkup, - TreeBuilder, - XML) -from bs4.dammit import EncodingDetector - -LXML = 'lxml' - -def _invert(d): - "Invert a dictionary." - return dict((v,k) for k, v in list(d.items())) - -class LXMLTreeBuilderForXML(TreeBuilder): - DEFAULT_PARSER_CLASS = etree.XMLParser - - is_xml = True - processing_instruction_class = XMLProcessingInstruction - - NAME = "lxml-xml" - ALTERNATE_NAMES = ["xml"] - - # Well, it's permissive by XML parser standards. - features = [NAME, LXML, XML, FAST, PERMISSIVE] - - CHUNK_SIZE = 512 - - # This namespace mapping is specified in the XML Namespace - # standard. - DEFAULT_NSMAPS = dict(xml='http://www.w3.org/XML/1998/namespace') - - DEFAULT_NSMAPS_INVERTED = _invert(DEFAULT_NSMAPS) - - # NOTE: If we parsed Element objects and looked at .sourceline, - # we'd be able to see the line numbers from the original document. - # But instead we build an XMLParser or HTMLParser object to serve - # as the target of parse messages, and those messages don't include - # line numbers. - - def initialize_soup(self, soup): - """Let the BeautifulSoup object know about the standard namespace - mapping. - """ - super(LXMLTreeBuilderForXML, self).initialize_soup(soup) - self._register_namespaces(self.DEFAULT_NSMAPS) - - def _register_namespaces(self, mapping): - """Let the BeautifulSoup object know about namespaces encountered - while parsing the document. - - This might be useful later on when creating CSS selectors. - """ - for key, value in list(mapping.items()): - if key and key not in self.soup._namespaces: - # Let the BeautifulSoup object know about a new namespace. - # If there are multiple namespaces defined with the same - # prefix, the first one in the document takes precedence. - self.soup._namespaces[key] = value - - def default_parser(self, encoding): - # This can either return a parser object or a class, which - # will be instantiated with default arguments. - if self._default_parser is not None: - return self._default_parser - return etree.XMLParser( - target=self, strip_cdata=False, recover=True, encoding=encoding) - - def parser_for(self, encoding): - # Use the default parser. - parser = self.default_parser(encoding) - - if isinstance(parser, Callable): - # Instantiate the parser with default arguments - parser = parser(target=self, strip_cdata=False, encoding=encoding) - return parser - - def __init__(self, parser=None, empty_element_tags=None, **kwargs): - # TODO: Issue a warning if parser is present but not a - # callable, since that means there's no way to create new - # parsers for different encodings. - self._default_parser = parser - if empty_element_tags is not None: - self.empty_element_tags = set(empty_element_tags) - self.soup = None - self.nsmaps = [self.DEFAULT_NSMAPS_INVERTED] - super(LXMLTreeBuilderForXML, self).__init__(**kwargs) - - def _getNsTag(self, tag): - # Split the namespace URL out of a fully-qualified lxml tag - # name. Copied from lxml's src/lxml/sax.py. - if tag[0] == '{': - return tuple(tag[1:].split('}', 1)) - else: - return (None, tag) - - def prepare_markup(self, markup, user_specified_encoding=None, - exclude_encodings=None, - document_declared_encoding=None): - """ - :yield: A series of 4-tuples. - (markup, encoding, declared encoding, - has undergone character replacement) - - Each 4-tuple represents a strategy for parsing the document. - """ - # Instead of using UnicodeDammit to convert the bytestring to - # Unicode using different encodings, use EncodingDetector to - # iterate over the encodings, and tell lxml to try to parse - # the document as each one in turn. - is_html = not self.is_xml - if is_html: - self.processing_instruction_class = ProcessingInstruction - else: - self.processing_instruction_class = XMLProcessingInstruction - - if isinstance(markup, str): - # We were given Unicode. Maybe lxml can parse Unicode on - # this system? - yield markup, None, document_declared_encoding, False - - if isinstance(markup, str): - # No, apparently not. Convert the Unicode to UTF-8 and - # tell lxml to parse it as UTF-8. - yield (markup.encode("utf8"), "utf8", - document_declared_encoding, False) - - try_encodings = [user_specified_encoding, document_declared_encoding] - detector = EncodingDetector( - markup, try_encodings, is_html, exclude_encodings) - for encoding in detector.encodings: - yield (detector.markup, encoding, document_declared_encoding, False) - - def feed(self, markup): - if isinstance(markup, bytes): - markup = BytesIO(markup) - elif isinstance(markup, str): - markup = StringIO(markup) - - # Call feed() at least once, even if the markup is empty, - # or the parser won't be initialized. - data = markup.read(self.CHUNK_SIZE) - try: - self.parser = self.parser_for(self.soup.original_encoding) - self.parser.feed(data) - while len(data) != 0: - # Now call feed() on the rest of the data, chunk by chunk. - data = markup.read(self.CHUNK_SIZE) - if len(data) != 0: - self.parser.feed(data) - self.parser.close() - except (UnicodeDecodeError, LookupError, etree.ParserError) as e: - raise ParserRejectedMarkup(e) - - def close(self): - self.nsmaps = [self.DEFAULT_NSMAPS_INVERTED] - - def start(self, name, attrs, nsmap={}): - # Make sure attrs is a mutable dict--lxml may send an immutable dictproxy. - attrs = dict(attrs) - nsprefix = None - # Invert each namespace map as it comes in. - if len(nsmap) == 0 and len(self.nsmaps) > 1: - # There are no new namespaces for this tag, but - # non-default namespaces are in play, so we need a - # separate tag stack to know when they end. - self.nsmaps.append(None) - elif len(nsmap) > 0: - # A new namespace mapping has come into play. - - # First, Let the BeautifulSoup object know about it. - self._register_namespaces(nsmap) - - # Then, add it to our running list of inverted namespace - # mappings. - self.nsmaps.append(_invert(nsmap)) - - # Also treat the namespace mapping as a set of attributes on the - # tag, so we can recreate it later. - attrs = attrs.copy() - for prefix, namespace in list(nsmap.items()): - attribute = NamespacedAttribute( - "xmlns", prefix, "http://www.w3.org/2000/xmlns/") - attrs[attribute] = namespace - - # Namespaces are in play. Find any attributes that came in - # from lxml with namespaces attached to their names, and - # turn then into NamespacedAttribute objects. - new_attrs = {} - for attr, value in list(attrs.items()): - namespace, attr = self._getNsTag(attr) - if namespace is None: - new_attrs[attr] = value - else: - nsprefix = self._prefix_for_namespace(namespace) - attr = NamespacedAttribute(nsprefix, attr, namespace) - new_attrs[attr] = value - attrs = new_attrs - - namespace, name = self._getNsTag(name) - nsprefix = self._prefix_for_namespace(namespace) - self.soup.handle_starttag(name, namespace, nsprefix, attrs) - - def _prefix_for_namespace(self, namespace): - """Find the currently active prefix for the given namespace.""" - if namespace is None: - return None - for inverted_nsmap in reversed(self.nsmaps): - if inverted_nsmap is not None and namespace in inverted_nsmap: - return inverted_nsmap[namespace] - return None - - def end(self, name): - self.soup.endData() - completed_tag = self.soup.tagStack[-1] - namespace, name = self._getNsTag(name) - nsprefix = None - if namespace is not None: - for inverted_nsmap in reversed(self.nsmaps): - if inverted_nsmap is not None and namespace in inverted_nsmap: - nsprefix = inverted_nsmap[namespace] - break - self.soup.handle_endtag(name, nsprefix) - if len(self.nsmaps) > 1: - # This tag, or one of its parents, introduced a namespace - # mapping, so pop it off the stack. - self.nsmaps.pop() - - def pi(self, target, data): - self.soup.endData() - self.soup.handle_data(target + ' ' + data) - self.soup.endData(self.processing_instruction_class) - - def data(self, content): - self.soup.handle_data(content) - - def doctype(self, name, pubid, system): - self.soup.endData() - doctype = Doctype.for_name_and_ids(name, pubid, system) - self.soup.object_was_parsed(doctype) - - def comment(self, content): - "Handle comments as Comment objects." - self.soup.endData() - self.soup.handle_data(content) - self.soup.endData(Comment) - - def test_fragment_to_document(self, fragment): - """See `TreeBuilder`.""" - return '\n%s' % fragment - - -class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML): - - NAME = LXML - ALTERNATE_NAMES = ["lxml-html"] - - features = ALTERNATE_NAMES + [NAME, HTML, FAST, PERMISSIVE] - is_xml = False - processing_instruction_class = ProcessingInstruction - - def default_parser(self, encoding): - return etree.HTMLParser - - def feed(self, markup): - encoding = self.soup.original_encoding - try: - self.parser = self.parser_for(encoding) - self.parser.feed(markup) - self.parser.close() - except (UnicodeDecodeError, LookupError, etree.ParserError) as e: - raise ParserRejectedMarkup(e) - - - def test_fragment_to_document(self, fragment): - """See `TreeBuilder`.""" - return '%s' % fragment diff --git a/lib/bs4/check_block.py b/lib/bs4/check_block.py deleted file mode 100644 index a60a7b74..00000000 --- a/lib/bs4/check_block.py +++ /dev/null @@ -1,4 +0,0 @@ -import requests -data = requests.get("https://www.crummy.com/").content -from bs4 import _s -data = [x for x in _s(data).block_text()] diff --git a/lib/bs4/dammit.py b/lib/bs4/dammit.py deleted file mode 100644 index e8cdd147..00000000 --- a/lib/bs4/dammit.py +++ /dev/null @@ -1,872 +0,0 @@ -# -*- coding: utf-8 -*- -"""Beautiful Soup bonus library: Unicode, Dammit - -This library converts a bytestream to Unicode through any means -necessary. It is heavily based on code from Mark Pilgrim's Universal -Feed Parser. It works best on XML and HTML, but it does not rewrite the -XML or HTML to reflect a new encoding; that's the tree builder's job. -""" -# Use of this source code is governed by the MIT license. -__license__ = "MIT" - -import codecs -from future.moves.html.entities import codepoint2name -from future.builtins import chr -import re -import logging -import string - -# Import a library to autodetect character encodings. -chardet_type = None -try: - # First try the fast C implementation. - # PyPI package: cchardet - import cchardet - def chardet_dammit(s): - if isinstance(s, str): - return None - return cchardet.detect(s)['encoding'] -except ImportError: - try: - # Fall back to the pure Python implementation - # Debian package: python-chardet - # PyPI package: chardet - import chardet - def chardet_dammit(s): - if isinstance(s, str): - return None - return chardet.detect(s)['encoding'] - #import chardet.constants - #chardet.constants._debug = 1 - except ImportError: - # No chardet available. - def chardet_dammit(s): - return None - -# Available from http://cjkpython.i18n.org/. -try: - import iconv_codec -except ImportError: - pass - -# Build bytestring and Unicode versions of regular expressions for finding -# a declared encoding inside an XML or HTML document. -xml_encoding = '^\s*<\\?.*encoding=[\'"](.*?)[\'"].*\\?>' -html_meta = '<\\s*meta[^>]+charset\\s*=\\s*["\']?([^>]*?)[ /;\'">]' -encoding_res = dict() -encoding_res[bytes] = { - 'html' : re.compile(html_meta.encode("ascii"), re.I), - 'xml' : re.compile(xml_encoding.encode("ascii"), re.I), -} -encoding_res[str] = { - 'html' : re.compile(html_meta, re.I), - 'xml' : re.compile(xml_encoding, re.I) -} - -class EntitySubstitution(object): - - """Substitute XML or HTML entities for the corresponding characters.""" - - def _populate_class_variables(): - lookup = {} - reverse_lookup = {} - characters_for_re = [] - - # &apos is an XHTML entity and an HTML 5, but not an HTML 4 - # entity. We don't want to use it, but we want to recognize it on the way in. - # - # TODO: Ideally we would be able to recognize all HTML 5 named - # entities, but that's a little tricky. - extra = [(39, 'apos')] - for codepoint, name in list(codepoint2name.items()) + extra: - character = chr(codepoint) - if codepoint not in (34, 39): - # There's no point in turning the quotation mark into - # " or the single quote into ', unless it - # happens within an attribute value, which is handled - # elsewhere. - characters_for_re.append(character) - lookup[character] = name - # But we do want to recognize those entities on the way in and - # convert them to Unicode characters. - reverse_lookup[name] = character - re_definition = "[%s]" % "".join(characters_for_re) - return lookup, reverse_lookup, re.compile(re_definition) - (CHARACTER_TO_HTML_ENTITY, HTML_ENTITY_TO_CHARACTER, - CHARACTER_TO_HTML_ENTITY_RE) = _populate_class_variables() - - CHARACTER_TO_XML_ENTITY = { - "'": "apos", - '"': "quot", - "&": "amp", - "<": "lt", - ">": "gt", - } - - BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|" - "&(?!#\\d+;|#x[0-9a-fA-F]+;|\\w+;)" - ")") - - AMPERSAND_OR_BRACKET = re.compile("([<>&])") - - @classmethod - def _substitute_html_entity(cls, matchobj): - entity = cls.CHARACTER_TO_HTML_ENTITY.get(matchobj.group(0)) - return "&%s;" % entity - - @classmethod - def _substitute_xml_entity(cls, matchobj): - """Used with a regular expression to substitute the - appropriate XML entity for an XML special character.""" - entity = cls.CHARACTER_TO_XML_ENTITY[matchobj.group(0)] - return "&%s;" % entity - - @classmethod - def quoted_attribute_value(self, value): - """Make a value into a quoted XML attribute, possibly escaping it. - - Most strings will be quoted using double quotes. - - Bob's Bar -> "Bob's Bar" - - If a string contains double quotes, it will be quoted using - single quotes. - - Welcome to "my bar" -> 'Welcome to "my bar"' - - If a string contains both single and double quotes, the - double quotes will be escaped, and the string will be quoted - using double quotes. - - Welcome to "Bob's Bar" -> "Welcome to "Bob's bar" - """ - quote_with = '"' - if '"' in value: - if "'" in value: - # The string contains both single and double - # quotes. Turn the double quotes into - # entities. We quote the double quotes rather than - # the single quotes because the entity name is - # """ whether this is HTML or XML. If we - # quoted the single quotes, we'd have to decide - # between ' and &squot;. - replace_with = """ - value = value.replace('"', replace_with) - else: - # There are double quotes but no single quotes. - # We can use single quotes to quote the attribute. - quote_with = "'" - return quote_with + value + quote_with - - @classmethod - def substitute_xml(cls, value, make_quoted_attribute=False): - """Substitute XML entities for special XML characters. - - :param value: A string to be substituted. The less-than sign - will become <, the greater-than sign will become >, - and any ampersands will become &. If you want ampersands - that appear to be part of an entity definition to be left - alone, use substitute_xml_containing_entities() instead. - - :param make_quoted_attribute: If True, then the string will be - quoted, as befits an attribute value. - """ - # Escape angle brackets and ampersands. - value = cls.AMPERSAND_OR_BRACKET.sub( - cls._substitute_xml_entity, value) - - if make_quoted_attribute: - value = cls.quoted_attribute_value(value) - return value - - @classmethod - def substitute_xml_containing_entities( - cls, value, make_quoted_attribute=False): - """Substitute XML entities for special XML characters. - - :param value: A string to be substituted. The less-than sign will - become <, the greater-than sign will become >, and any - ampersands that are not part of an entity defition will - become &. - - :param make_quoted_attribute: If True, then the string will be - quoted, as befits an attribute value. - """ - # Escape angle brackets, and ampersands that aren't part of - # entities. - value = cls.BARE_AMPERSAND_OR_BRACKET.sub( - cls._substitute_xml_entity, value) - - if make_quoted_attribute: - value = cls.quoted_attribute_value(value) - return value - - @classmethod - def substitute_html(cls, s): - """Replace certain Unicode characters with named HTML entities. - - This differs from data.encode(encoding, 'xmlcharrefreplace') - in that the goal is to make the result more readable (to those - with ASCII displays) rather than to recover from - errors. There's absolutely nothing wrong with a UTF-8 string - containg a LATIN SMALL LETTER E WITH ACUTE, but replacing that - character with "é" will make it more readable to some - people. - """ - return cls.CHARACTER_TO_HTML_ENTITY_RE.sub( - cls._substitute_html_entity, s) - - -class EncodingDetector: - """Suggests a number of possible encodings for a bytestring. - - Order of precedence: - - 1. Encodings you specifically tell EncodingDetector to try first - (the override_encodings argument to the constructor). - - 2. An encoding declared within the bytestring itself, either in an - XML declaration (if the bytestring is to be interpreted as an XML - document), or in a tag (if the bytestring is to be - interpreted as an HTML document.) - - 3. An encoding detected through textual analysis by chardet, - cchardet, or a similar external library. - - 4. UTF-8. - - 5. Windows-1252. - """ - def __init__(self, markup, override_encodings=None, is_html=False, - exclude_encodings=None): - self.override_encodings = override_encodings or [] - exclude_encodings = exclude_encodings or [] - self.exclude_encodings = set([x.lower() for x in exclude_encodings]) - self.chardet_encoding = None - self.is_html = is_html - self.declared_encoding = None - - # First order of business: strip a byte-order mark. - self.markup, self.sniffed_encoding = self.strip_byte_order_mark(markup) - - def _usable(self, encoding, tried): - if encoding is not None: - encoding = encoding.lower() - if encoding in self.exclude_encodings: - return False - if encoding not in tried: - tried.add(encoding) - return True - return False - - @property - def encodings(self): - """Yield a number of encodings that might work for this markup.""" - tried = set() - for e in self.override_encodings: - if self._usable(e, tried): - yield e - - # Did the document originally start with a byte-order mark - # that indicated its encoding? - if self._usable(self.sniffed_encoding, tried): - yield self.sniffed_encoding - - # Look within the document for an XML or HTML encoding - # declaration. - if self.declared_encoding is None: - self.declared_encoding = self.find_declared_encoding( - self.markup, self.is_html) - if self._usable(self.declared_encoding, tried): - yield self.declared_encoding - - # Use third-party character set detection to guess at the - # encoding. - if self.chardet_encoding is None: - self.chardet_encoding = chardet_dammit(self.markup) - if self._usable(self.chardet_encoding, tried): - yield self.chardet_encoding - - # As a last-ditch effort, try utf-8 and windows-1252. - for e in ('utf-8', 'windows-1252'): - if self._usable(e, tried): - yield e - - @classmethod - def strip_byte_order_mark(cls, data): - """If a byte-order mark is present, strip it and return the encoding it implies.""" - encoding = None - if isinstance(data, str): - # Unicode data cannot have a byte-order mark. - return data, encoding - if (len(data) >= 4) and (data[:2] == b'\xfe\xff') \ - and (data[2:4] != '\x00\x00'): - encoding = 'utf-16be' - data = data[2:] - elif (len(data) >= 4) and (data[:2] == b'\xff\xfe') \ - and (data[2:4] != '\x00\x00'): - encoding = 'utf-16le' - data = data[2:] - elif data[:3] == b'\xef\xbb\xbf': - encoding = 'utf-8' - data = data[3:] - elif data[:4] == b'\x00\x00\xfe\xff': - encoding = 'utf-32be' - data = data[4:] - elif data[:4] == b'\xff\xfe\x00\x00': - encoding = 'utf-32le' - data = data[4:] - return data, encoding - - @classmethod - def find_declared_encoding(cls, markup, is_html=False, search_entire_document=False): - """Given a document, tries to find its declared encoding. - - An XML encoding is declared at the beginning of the document. - - An HTML encoding is declared in a tag, hopefully near the - beginning of the document. - """ - if search_entire_document: - xml_endpos = html_endpos = len(markup) - else: - xml_endpos = 1024 - html_endpos = max(2048, int(len(markup) * 0.05)) - - if isinstance(markup, bytes): - res = encoding_res[bytes] - else: - res = encoding_res[str] - - xml_re = res['xml'] - html_re = res['html'] - declared_encoding = None - declared_encoding_match = xml_re.search(markup, endpos=xml_endpos) - if not declared_encoding_match and is_html: - declared_encoding_match = html_re.search(markup, endpos=html_endpos) - if declared_encoding_match is not None: - declared_encoding = declared_encoding_match.groups()[0] - if declared_encoding: - if isinstance(declared_encoding, bytes): - declared_encoding = declared_encoding.decode('ascii', 'replace') - return declared_encoding.lower() - return None - -class UnicodeDammit: - """A class for detecting the encoding of a *ML document and - converting it to a Unicode string. If the source encoding is - windows-1252, can replace MS smart quotes with their HTML or XML - equivalents.""" - - # This dictionary maps commonly seen values for "charset" in HTML - # meta tags to the corresponding Python codec names. It only covers - # values that aren't in Python's aliases and can't be determined - # by the heuristics in find_codec. - CHARSET_ALIASES = {"macintosh": "mac-roman", - "x-sjis": "shift-jis"} - - ENCODINGS_WITH_SMART_QUOTES = [ - "windows-1252", - "iso-8859-1", - "iso-8859-2", - ] - - def __init__(self, markup, override_encodings=[], - smart_quotes_to=None, is_html=False, exclude_encodings=[]): - self.smart_quotes_to = smart_quotes_to - self.tried_encodings = [] - self.contains_replacement_characters = False - self.is_html = is_html - self.log = logging.getLogger(__name__) - self.detector = EncodingDetector( - markup, override_encodings, is_html, exclude_encodings) - - # Short-circuit if the data is in Unicode to begin with. - if isinstance(markup, str) or markup == '': - self.markup = markup - self.unicode_markup = str(markup) - self.original_encoding = None - return - - # The encoding detector may have stripped a byte-order mark. - # Use the stripped markup from this point on. - self.markup = self.detector.markup - - u = None - for encoding in self.detector.encodings: - markup = self.detector.markup - u = self._convert_from(encoding) - if u is not None: - break - - if not u: - # None of the encodings worked. As an absolute last resort, - # try them again with character replacement. - - for encoding in self.detector.encodings: - if encoding != "ascii": - u = self._convert_from(encoding, "replace") - if u is not None: - self.log.warning( - "Some characters could not be decoded, and were " - "replaced with REPLACEMENT CHARACTER." - ) - self.contains_replacement_characters = True - break - - # If none of that worked, we could at this point force it to - # ASCII, but that would destroy so much data that I think - # giving up is better. - self.unicode_markup = u - if not u: - self.original_encoding = None - - def _sub_ms_char(self, match): - """Changes a MS smart quote character to an XML or HTML - entity, or an ASCII character.""" - orig = match.group(1) - if self.smart_quotes_to == 'ascii': - sub = self.MS_CHARS_TO_ASCII.get(orig).encode() - else: - sub = self.MS_CHARS.get(orig) - if type(sub) == tuple: - if self.smart_quotes_to == 'xml': - sub = '&#x'.encode() + sub[1].encode() + ';'.encode() - else: - sub = '&'.encode() + sub[0].encode() + ';'.encode() - else: - sub = sub.encode() - return sub - - def _convert_from(self, proposed, errors="strict"): - proposed = self.find_codec(proposed) - if not proposed or (proposed, errors) in self.tried_encodings: - return None - self.tried_encodings.append((proposed, errors)) - markup = self.markup - # Convert smart quotes to HTML if coming from an encoding - # that might have them. - if (self.smart_quotes_to is not None - and proposed in self.ENCODINGS_WITH_SMART_QUOTES): - smart_quotes_re = b"([\x80-\x9f])" - smart_quotes_compiled = re.compile(smart_quotes_re) - markup = smart_quotes_compiled.sub(self._sub_ms_char, markup) - - try: - #print "Trying to convert document to %s (errors=%s)" % ( - # proposed, errors) - u = self._to_unicode(markup, proposed, errors) - self.markup = u - self.original_encoding = proposed - except Exception as e: - #print "That didn't work!" - #print e - return None - #print "Correct encoding: %s" % proposed - return self.markup - - def _to_unicode(self, data, encoding, errors="strict"): - '''Given a string and its encoding, decodes the string into Unicode. - %encoding is a string recognized by encodings.aliases''' - return str(data, encoding, errors) - - @property - def declared_html_encoding(self): - if not self.is_html: - return None - return self.detector.declared_encoding - - def find_codec(self, charset): - value = (self._codec(self.CHARSET_ALIASES.get(charset, charset)) - or (charset and self._codec(charset.replace("-", ""))) - or (charset and self._codec(charset.replace("-", "_"))) - or (charset and charset.lower()) - or charset - ) - if value: - return value.lower() - return None - - def _codec(self, charset): - if not charset: - return charset - codec = None - try: - codecs.lookup(charset) - codec = charset - except (LookupError, ValueError): - pass - return codec - - - # A partial mapping of ISO-Latin-1 to HTML entities/XML numeric entities. - MS_CHARS = {b'\x80': ('euro', '20AC'), - b'\x81': ' ', - b'\x82': ('sbquo', '201A'), - b'\x83': ('fnof', '192'), - b'\x84': ('bdquo', '201E'), - b'\x85': ('hellip', '2026'), - b'\x86': ('dagger', '2020'), - b'\x87': ('Dagger', '2021'), - b'\x88': ('circ', '2C6'), - b'\x89': ('permil', '2030'), - b'\x8A': ('Scaron', '160'), - b'\x8B': ('lsaquo', '2039'), - b'\x8C': ('OElig', '152'), - b'\x8D': '?', - b'\x8E': ('#x17D', '17D'), - b'\x8F': '?', - b'\x90': '?', - b'\x91': ('lsquo', '2018'), - b'\x92': ('rsquo', '2019'), - b'\x93': ('ldquo', '201C'), - b'\x94': ('rdquo', '201D'), - b'\x95': ('bull', '2022'), - b'\x96': ('ndash', '2013'), - b'\x97': ('mdash', '2014'), - b'\x98': ('tilde', '2DC'), - b'\x99': ('trade', '2122'), - b'\x9a': ('scaron', '161'), - b'\x9b': ('rsaquo', '203A'), - b'\x9c': ('oelig', '153'), - b'\x9d': '?', - b'\x9e': ('#x17E', '17E'), - b'\x9f': ('Yuml', ''),} - - # A parochial partial mapping of ISO-Latin-1 to ASCII. Contains - # horrors like stripping diacritical marks to turn á into a, but also - # contains non-horrors like turning “ into ". - MS_CHARS_TO_ASCII = { - b'\x80' : 'EUR', - b'\x81' : ' ', - b'\x82' : ',', - b'\x83' : 'f', - b'\x84' : ',,', - b'\x85' : '...', - b'\x86' : '+', - b'\x87' : '++', - b'\x88' : '^', - b'\x89' : '%', - b'\x8a' : 'S', - b'\x8b' : '<', - b'\x8c' : 'OE', - b'\x8d' : '?', - b'\x8e' : 'Z', - b'\x8f' : '?', - b'\x90' : '?', - b'\x91' : "'", - b'\x92' : "'", - b'\x93' : '"', - b'\x94' : '"', - b'\x95' : '*', - b'\x96' : '-', - b'\x97' : '--', - b'\x98' : '~', - b'\x99' : '(TM)', - b'\x9a' : 's', - b'\x9b' : '>', - b'\x9c' : 'oe', - b'\x9d' : '?', - b'\x9e' : 'z', - b'\x9f' : 'Y', - b'\xa0' : ' ', - b'\xa1' : '!', - b'\xa2' : 'c', - b'\xa3' : 'GBP', - b'\xa4' : '$', #This approximation is especially parochial--this is the - #generic currency symbol. - b'\xa5' : 'YEN', - b'\xa6' : '|', - b'\xa7' : 'S', - b'\xa8' : '..', - b'\xa9' : '', - b'\xaa' : '(th)', - b'\xab' : '<<', - b'\xac' : '!', - b'\xad' : ' ', - b'\xae' : '(R)', - b'\xaf' : '-', - b'\xb0' : 'o', - b'\xb1' : '+-', - b'\xb2' : '2', - b'\xb3' : '3', - b'\xb4' : ("'", 'acute'), - b'\xb5' : 'u', - b'\xb6' : 'P', - b'\xb7' : '*', - b'\xb8' : ',', - b'\xb9' : '1', - b'\xba' : '(th)', - b'\xbb' : '>>', - b'\xbc' : '1/4', - b'\xbd' : '1/2', - b'\xbe' : '3/4', - b'\xbf' : '?', - b'\xc0' : 'A', - b'\xc1' : 'A', - b'\xc2' : 'A', - b'\xc3' : 'A', - b'\xc4' : 'A', - b'\xc5' : 'A', - b'\xc6' : 'AE', - b'\xc7' : 'C', - b'\xc8' : 'E', - b'\xc9' : 'E', - b'\xca' : 'E', - b'\xcb' : 'E', - b'\xcc' : 'I', - b'\xcd' : 'I', - b'\xce' : 'I', - b'\xcf' : 'I', - b'\xd0' : 'D', - b'\xd1' : 'N', - b'\xd2' : 'O', - b'\xd3' : 'O', - b'\xd4' : 'O', - b'\xd5' : 'O', - b'\xd6' : 'O', - b'\xd7' : '*', - b'\xd8' : 'O', - b'\xd9' : 'U', - b'\xda' : 'U', - b'\xdb' : 'U', - b'\xdc' : 'U', - b'\xdd' : 'Y', - b'\xde' : 'b', - b'\xdf' : 'B', - b'\xe0' : 'a', - b'\xe1' : 'a', - b'\xe2' : 'a', - b'\xe3' : 'a', - b'\xe4' : 'a', - b'\xe5' : 'a', - b'\xe6' : 'ae', - b'\xe7' : 'c', - b'\xe8' : 'e', - b'\xe9' : 'e', - b'\xea' : 'e', - b'\xeb' : 'e', - b'\xec' : 'i', - b'\xed' : 'i', - b'\xee' : 'i', - b'\xef' : 'i', - b'\xf0' : 'o', - b'\xf1' : 'n', - b'\xf2' : 'o', - b'\xf3' : 'o', - b'\xf4' : 'o', - b'\xf5' : 'o', - b'\xf6' : 'o', - b'\xf7' : '/', - b'\xf8' : 'o', - b'\xf9' : 'u', - b'\xfa' : 'u', - b'\xfb' : 'u', - b'\xfc' : 'u', - b'\xfd' : 'y', - b'\xfe' : 'b', - b'\xff' : 'y', - } - - # A map used when removing rogue Windows-1252/ISO-8859-1 - # characters in otherwise UTF-8 documents. - # - # Note that \x81, \x8d, \x8f, \x90, and \x9d are undefined in - # Windows-1252. - WINDOWS_1252_TO_UTF8 = { - 0x80 : b'\xe2\x82\xac', # € - 0x82 : b'\xe2\x80\x9a', # ‚ - 0x83 : b'\xc6\x92', # ƒ - 0x84 : b'\xe2\x80\x9e', # „ - 0x85 : b'\xe2\x80\xa6', # … - 0x86 : b'\xe2\x80\xa0', # † - 0x87 : b'\xe2\x80\xa1', # ‡ - 0x88 : b'\xcb\x86', # ˆ - 0x89 : b'\xe2\x80\xb0', # ‰ - 0x8a : b'\xc5\xa0', # Š - 0x8b : b'\xe2\x80\xb9', # ‹ - 0x8c : b'\xc5\x92', # Œ - 0x8e : b'\xc5\xbd', # Ž - 0x91 : b'\xe2\x80\x98', # ‘ - 0x92 : b'\xe2\x80\x99', # ’ - 0x93 : b'\xe2\x80\x9c', # “ - 0x94 : b'\xe2\x80\x9d', # ” - 0x95 : b'\xe2\x80\xa2', # • - 0x96 : b'\xe2\x80\x93', # – - 0x97 : b'\xe2\x80\x94', # — - 0x98 : b'\xcb\x9c', # ˜ - 0x99 : b'\xe2\x84\xa2', # ™ - 0x9a : b'\xc5\xa1', # š - 0x9b : b'\xe2\x80\xba', # › - 0x9c : b'\xc5\x93', # œ - 0x9e : b'\xc5\xbe', # ž - 0x9f : b'\xc5\xb8', # Ÿ - 0xa0 : b'\xc2\xa0', #   - 0xa1 : b'\xc2\xa1', # ¡ - 0xa2 : b'\xc2\xa2', # ¢ - 0xa3 : b'\xc2\xa3', # £ - 0xa4 : b'\xc2\xa4', # ¤ - 0xa5 : b'\xc2\xa5', # ¥ - 0xa6 : b'\xc2\xa6', # ¦ - 0xa7 : b'\xc2\xa7', # § - 0xa8 : b'\xc2\xa8', # ¨ - 0xa9 : b'\xc2\xa9', # © - 0xaa : b'\xc2\xaa', # ª - 0xab : b'\xc2\xab', # « - 0xac : b'\xc2\xac', # ¬ - 0xad : b'\xc2\xad', # ­ - 0xae : b'\xc2\xae', # ® - 0xaf : b'\xc2\xaf', # ¯ - 0xb0 : b'\xc2\xb0', # ° - 0xb1 : b'\xc2\xb1', # ± - 0xb2 : b'\xc2\xb2', # ² - 0xb3 : b'\xc2\xb3', # ³ - 0xb4 : b'\xc2\xb4', # ´ - 0xb5 : b'\xc2\xb5', # µ - 0xb6 : b'\xc2\xb6', # ¶ - 0xb7 : b'\xc2\xb7', # · - 0xb8 : b'\xc2\xb8', # ¸ - 0xb9 : b'\xc2\xb9', # ¹ - 0xba : b'\xc2\xba', # º - 0xbb : b'\xc2\xbb', # » - 0xbc : b'\xc2\xbc', # ¼ - 0xbd : b'\xc2\xbd', # ½ - 0xbe : b'\xc2\xbe', # ¾ - 0xbf : b'\xc2\xbf', # ¿ - 0xc0 : b'\xc3\x80', # À - 0xc1 : b'\xc3\x81', # Á - 0xc2 : b'\xc3\x82', #  - 0xc3 : b'\xc3\x83', # à - 0xc4 : b'\xc3\x84', # Ä - 0xc5 : b'\xc3\x85', # Å - 0xc6 : b'\xc3\x86', # Æ - 0xc7 : b'\xc3\x87', # Ç - 0xc8 : b'\xc3\x88', # È - 0xc9 : b'\xc3\x89', # É - 0xca : b'\xc3\x8a', # Ê - 0xcb : b'\xc3\x8b', # Ë - 0xcc : b'\xc3\x8c', # Ì - 0xcd : b'\xc3\x8d', # Í - 0xce : b'\xc3\x8e', # Î - 0xcf : b'\xc3\x8f', # Ï - 0xd0 : b'\xc3\x90', # Ð - 0xd1 : b'\xc3\x91', # Ñ - 0xd2 : b'\xc3\x92', # Ò - 0xd3 : b'\xc3\x93', # Ó - 0xd4 : b'\xc3\x94', # Ô - 0xd5 : b'\xc3\x95', # Õ - 0xd6 : b'\xc3\x96', # Ö - 0xd7 : b'\xc3\x97', # × - 0xd8 : b'\xc3\x98', # Ø - 0xd9 : b'\xc3\x99', # Ù - 0xda : b'\xc3\x9a', # Ú - 0xdb : b'\xc3\x9b', # Û - 0xdc : b'\xc3\x9c', # Ü - 0xdd : b'\xc3\x9d', # Ý - 0xde : b'\xc3\x9e', # Þ - 0xdf : b'\xc3\x9f', # ß - 0xe0 : b'\xc3\xa0', # à - 0xe1 : b'\xa1', # á - 0xe2 : b'\xc3\xa2', # â - 0xe3 : b'\xc3\xa3', # ã - 0xe4 : b'\xc3\xa4', # ä - 0xe5 : b'\xc3\xa5', # å - 0xe6 : b'\xc3\xa6', # æ - 0xe7 : b'\xc3\xa7', # ç - 0xe8 : b'\xc3\xa8', # è - 0xe9 : b'\xc3\xa9', # é - 0xea : b'\xc3\xaa', # ê - 0xeb : b'\xc3\xab', # ë - 0xec : b'\xc3\xac', # ì - 0xed : b'\xc3\xad', # í - 0xee : b'\xc3\xae', # î - 0xef : b'\xc3\xaf', # ï - 0xf0 : b'\xc3\xb0', # ð - 0xf1 : b'\xc3\xb1', # ñ - 0xf2 : b'\xc3\xb2', # ò - 0xf3 : b'\xc3\xb3', # ó - 0xf4 : b'\xc3\xb4', # ô - 0xf5 : b'\xc3\xb5', # õ - 0xf6 : b'\xc3\xb6', # ö - 0xf7 : b'\xc3\xb7', # ÷ - 0xf8 : b'\xc3\xb8', # ø - 0xf9 : b'\xc3\xb9', # ù - 0xfa : b'\xc3\xba', # ú - 0xfb : b'\xc3\xbb', # û - 0xfc : b'\xc3\xbc', # ü - 0xfd : b'\xc3\xbd', # ý - 0xfe : b'\xc3\xbe', # þ - } - - MULTIBYTE_MARKERS_AND_SIZES = [ - (0xc2, 0xdf, 2), # 2-byte characters start with a byte C2-DF - (0xe0, 0xef, 3), # 3-byte characters start with E0-EF - (0xf0, 0xf4, 4), # 4-byte characters start with F0-F4 - ] - - FIRST_MULTIBYTE_MARKER = MULTIBYTE_MARKERS_AND_SIZES[0][0] - LAST_MULTIBYTE_MARKER = MULTIBYTE_MARKERS_AND_SIZES[-1][1] - - @classmethod - def detwingle(cls, in_bytes, main_encoding="utf8", - embedded_encoding="windows-1252"): - """Fix characters from one encoding embedded in some other encoding. - - Currently the only situation supported is Windows-1252 (or its - subset ISO-8859-1), embedded in UTF-8. - - The input must be a bytestring. If you've already converted - the document to Unicode, you're too late. - - The output is a bytestring in which `embedded_encoding` - characters have been converted to their `main_encoding` - equivalents. - """ - if embedded_encoding.replace('_', '-').lower() not in ( - 'windows-1252', 'windows_1252'): - raise NotImplementedError( - "Windows-1252 and ISO-8859-1 are the only currently supported " - "embedded encodings.") - - if main_encoding.lower() not in ('utf8', 'utf-8'): - raise NotImplementedError( - "UTF-8 is the only currently supported main encoding.") - - byte_chunks = [] - - chunk_start = 0 - pos = 0 - while pos < len(in_bytes): - byte = in_bytes[pos] - if not isinstance(byte, int): - # Python 2.x - byte = ord(byte) - if (byte >= cls.FIRST_MULTIBYTE_MARKER - and byte <= cls.LAST_MULTIBYTE_MARKER): - # This is the start of a UTF-8 multibyte character. Skip - # to the end. - for start, end, size in cls.MULTIBYTE_MARKERS_AND_SIZES: - if byte >= start and byte <= end: - pos += size - break - elif byte >= 0x80 and byte in cls.WINDOWS_1252_TO_UTF8: - # We found a Windows-1252 character! - # Save the string up to this point as a chunk. - byte_chunks.append(in_bytes[chunk_start:pos]) - - # Now translate the Windows-1252 character into UTF-8 - # and add it as another, one-byte chunk. - byte_chunks.append(cls.WINDOWS_1252_TO_UTF8[byte]) - pos += 1 - chunk_start = pos - else: - # Go on to the next character. - pos += 1 - if chunk_start == 0: - # The string is unchanged. - return in_bytes - else: - # Store the final chunk. - byte_chunks.append(in_bytes[chunk_start:]) - return b''.join(byte_chunks) - diff --git a/lib/bs4/diagnose.py b/lib/bs4/diagnose.py deleted file mode 100644 index a1ae23dc..00000000 --- a/lib/bs4/diagnose.py +++ /dev/null @@ -1,224 +0,0 @@ -"""Diagnostic functions, mainly for use when doing tech support.""" - -# Use of this source code is governed by the MIT license. -__license__ = "MIT" - -import cProfile -from io import StringIO -from html.parser import HTMLParser -import bs4 -from bs4 import BeautifulSoup, __version__ -from bs4.builder import builder_registry - -import os -import pstats -import random -import tempfile -import time -import traceback -import sys -import cProfile - -def diagnose(data): - """Diagnostic suite for isolating common problems.""" - print("Diagnostic running on Beautiful Soup %s" % __version__) - print("Python version %s" % sys.version) - - basic_parsers = ["html.parser", "html5lib", "lxml"] - for name in basic_parsers: - for builder in builder_registry.builders: - if name in builder.features: - break - else: - basic_parsers.remove(name) - print(( - "I noticed that %s is not installed. Installing it may help." % - name)) - - if 'lxml' in basic_parsers: - basic_parsers.append("lxml-xml") - try: - from lxml import etree - print("Found lxml version %s" % ".".join(map(str,etree.LXML_VERSION))) - except ImportError as e: - print ( - "lxml is not installed or couldn't be imported.") - - - if 'html5lib' in basic_parsers: - try: - import html5lib - print("Found html5lib version %s" % html5lib.__version__) - except ImportError as e: - print ( - "html5lib is not installed or couldn't be imported.") - - if hasattr(data, 'read'): - data = data.read() - elif data.startswith("http:") or data.startswith("https:"): - print('"%s" looks like a URL. Beautiful Soup is not an HTTP client.' % data) - print("You need to use some other library to get the document behind the URL, and feed that document to Beautiful Soup.") - return - else: - try: - if os.path.exists(data): - print('"%s" looks like a filename. Reading data from the file.' % data) - with open(data) as fp: - data = fp.read() - except ValueError: - # This can happen on some platforms when the 'filename' is - # too long. Assume it's data and not a filename. - pass - print() - - for parser in basic_parsers: - print("Trying to parse your markup with %s" % parser) - success = False - try: - soup = BeautifulSoup(data, features=parser) - success = True - except Exception as e: - print("%s could not parse the markup." % parser) - traceback.print_exc() - if success: - print("Here's what %s did with the markup:" % parser) - print(soup.prettify()) - - print("-" * 80) - -def lxml_trace(data, html=True, **kwargs): - """Print out the lxml events that occur during parsing. - - This lets you see how lxml parses a document when no Beautiful - Soup code is running. - """ - from lxml import etree - for event, element in etree.iterparse(StringIO(data), html=html, **kwargs): - print(("%s, %4s, %s" % (event, element.tag, element.text))) - -class AnnouncingParser(HTMLParser): - """Announces HTMLParser parse events, without doing anything else.""" - - def _p(self, s): - print(s) - - def handle_starttag(self, name, attrs): - self._p("%s START" % name) - - def handle_endtag(self, name): - self._p("%s END" % name) - - def handle_data(self, data): - self._p("%s DATA" % data) - - def handle_charref(self, name): - self._p("%s CHARREF" % name) - - def handle_entityref(self, name): - self._p("%s ENTITYREF" % name) - - def handle_comment(self, data): - self._p("%s COMMENT" % data) - - def handle_decl(self, data): - self._p("%s DECL" % data) - - def unknown_decl(self, data): - self._p("%s UNKNOWN-DECL" % data) - - def handle_pi(self, data): - self._p("%s PI" % data) - -def htmlparser_trace(data): - """Print out the HTMLParser events that occur during parsing. - - This lets you see how HTMLParser parses a document when no - Beautiful Soup code is running. - """ - parser = AnnouncingParser() - parser.feed(data) - -_vowels = "aeiou" -_consonants = "bcdfghjklmnpqrstvwxyz" - -def rword(length=5): - "Generate a random word-like string." - s = '' - for i in range(length): - if i % 2 == 0: - t = _consonants - else: - t = _vowels - s += random.choice(t) - return s - -def rsentence(length=4): - "Generate a random sentence-like string." - return " ".join(rword(random.randint(4,9)) for i in range(length)) - -def rdoc(num_elements=1000): - """Randomly generate an invalid HTML document.""" - tag_names = ['p', 'div', 'span', 'i', 'b', 'script', 'table'] - elements = [] - for i in range(num_elements): - choice = random.randint(0,3) - if choice == 0: - # New tag. - tag_name = random.choice(tag_names) - elements.append("<%s>" % tag_name) - elif choice == 1: - elements.append(rsentence(random.randint(1,4))) - elif choice == 2: - # Close a tag. - tag_name = random.choice(tag_names) - elements.append("" % tag_name) - return "" + "\n".join(elements) + "" - -def benchmark_parsers(num_elements=100000): - """Very basic head-to-head performance benchmark.""" - print("Comparative parser benchmark on Beautiful Soup %s" % __version__) - data = rdoc(num_elements) - print("Generated a large invalid HTML document (%d bytes)." % len(data)) - - for parser in ["lxml", ["lxml", "html"], "html5lib", "html.parser"]: - success = False - try: - a = time.time() - soup = BeautifulSoup(data, parser) - b = time.time() - success = True - except Exception as e: - print("%s could not parse the markup." % parser) - traceback.print_exc() - if success: - print("BS4+%s parsed the markup in %.2fs." % (parser, b-a)) - - from lxml import etree - a = time.time() - etree.HTML(data) - b = time.time() - print("Raw lxml parsed the markup in %.2fs." % (b-a)) - - import html5lib - parser = html5lib.HTMLParser() - a = time.time() - parser.parse(data) - b = time.time() - print("Raw html5lib parsed the markup in %.2fs." % (b-a)) - -def profile(num_elements=100000, parser="lxml"): - - filehandle = tempfile.NamedTemporaryFile() - filename = filehandle.name - - data = rdoc(num_elements) - vars = dict(bs4=bs4, data=data, parser=parser) - cProfile.runctx('bs4.BeautifulSoup(data, parser)' , vars, vars, filename) - - stats = pstats.Stats(filename) - # stats.strip_dirs() - stats.sort_stats("cumulative") - stats.print_stats('_html5lib|bs4', 50) - -if __name__ == '__main__': - diagnose(sys.stdin.read()) diff --git a/lib/bs4/element.py b/lib/bs4/element.py deleted file mode 100644 index 69399e5c..00000000 --- a/lib/bs4/element.py +++ /dev/null @@ -1,1602 +0,0 @@ -# Use of this source code is governed by the MIT license. -__license__ = "MIT" - -try: - from collections.abc import Callable # Python 3.6 -except ImportError as e: - from collections import Callable -import re -import sys -import warnings -try: - import soupsieve -except ImportError as e: - soupsieve = None - warnings.warn( - 'The soupsieve package is not installed. CSS selectors cannot be used.' - ) - -from bs4.formatter import ( - Formatter, - HTMLFormatter, - XMLFormatter, -) - -DEFAULT_OUTPUT_ENCODING = "utf-8" -PY3K = (sys.version_info[0] > 2) - -nonwhitespace_re = re.compile(r"\S+") - -# NOTE: This isn't used as of 4.7.0. I'm leaving it for a little bit on -# the off chance someone imported it for their own use. -whitespace_re = re.compile(r"\s+") - -def _alias(attr): - """Alias one attribute name to another for backward compatibility""" - @property - def alias(self): - return getattr(self, attr) - - @alias.setter - def alias(self): - return setattr(self, attr) - return alias - - -class NamespacedAttribute(str): - - def __new__(cls, prefix, name=None, namespace=None): - if not name: - # This is the default namespace. Its name "has no value" - # per https://www.w3.org/TR/xml-names/#defaulting - name = None - - if name is None: - obj = str.__new__(cls, prefix) - elif prefix is None: - # Not really namespaced. - obj = str.__new__(cls, name) - else: - obj = str.__new__(cls, prefix + ":" + name) - obj.prefix = prefix - obj.name = name - obj.namespace = namespace - return obj - -class AttributeValueWithCharsetSubstitution(str): - """A stand-in object for a character encoding specified in HTML.""" - -class CharsetMetaAttributeValue(AttributeValueWithCharsetSubstitution): - """A generic stand-in for the value of a meta tag's 'charset' attribute. - - When Beautiful Soup parses the markup '', the - value of the 'charset' attribute will be one of these objects. - """ - - def __new__(cls, original_value): - obj = str.__new__(cls, original_value) - obj.original_value = original_value - return obj - - def encode(self, encoding): - return encoding - - -class ContentMetaAttributeValue(AttributeValueWithCharsetSubstitution): - """A generic stand-in for the value of a meta tag's 'content' attribute. - - When Beautiful Soup parses the markup: - - - The value of the 'content' attribute will be one of these objects. - """ - - CHARSET_RE = re.compile(r"((^|;)\s*charset=)([^;]*)", re.M) - - def __new__(cls, original_value): - match = cls.CHARSET_RE.search(original_value) - if match is None: - # No substitution necessary. - return str.__new__(str, original_value) - - obj = str.__new__(cls, original_value) - obj.original_value = original_value - return obj - - def encode(self, encoding): - def rewrite(match): - return match.group(1) + encoding - return self.CHARSET_RE.sub(rewrite, self.original_value) - - -class PageElement(object): - """Contains the navigational information for some part of the page - (either a tag or a piece of text)""" - - def setup(self, parent=None, previous_element=None, next_element=None, - previous_sibling=None, next_sibling=None): - """Sets up the initial relations between this element and - other elements.""" - self.parent = parent - - self.previous_element = previous_element - if previous_element is not None: - self.previous_element.next_element = self - - self.next_element = next_element - if self.next_element is not None: - self.next_element.previous_element = self - - self.next_sibling = next_sibling - if self.next_sibling is not None: - self.next_sibling.previous_sibling = self - - if (previous_sibling is None - and self.parent is not None and self.parent.contents): - previous_sibling = self.parent.contents[-1] - - self.previous_sibling = previous_sibling - if previous_sibling is not None: - self.previous_sibling.next_sibling = self - - def format_string(self, s, formatter): - """Format the given string using the given formatter.""" - if formatter is None: - return s - if not isinstance(formatter, Formatter): - formatter = self.formatter_for_name(formatter) - output = formatter.substitute(s) - return output - - def formatter_for_name(self, formatter): - """Look up or create a Formatter for the given identifier, - if necessary. - - :param formatter: Can be a Formatter object (used as-is), a - function (used as the entity substitution hook for an - XMLFormatter or HTMLFormatter), or a string (used to look up - an XMLFormatter or HTMLFormatter in the appropriate registry. - """ - if isinstance(formatter, Formatter): - return formatter - if self._is_xml: - c = XMLFormatter - else: - c = HTMLFormatter - if callable(formatter): - return c(entity_substitution=formatter) - return c.REGISTRY[formatter] - - @property - def _is_xml(self): - """Is this element part of an XML tree or an HTML tree? - - This is used in formatter_for_name, when deciding whether an - XMLFormatter or HTMLFormatter is more appropriate. It can be - inefficient, but it should be called very rarely. - """ - if self.known_xml is not None: - # Most of the time we will have determined this when the - # document is parsed. - return self.known_xml - - # Otherwise, it's likely that this element was created by - # direct invocation of the constructor from within the user's - # Python code. - if self.parent is None: - # This is the top-level object. It should have .known_xml set - # from tree creation. If not, take a guess--BS is usually - # used on HTML markup. - return getattr(self, 'is_xml', False) - return self.parent._is_xml - - nextSibling = _alias("next_sibling") # BS3 - previousSibling = _alias("previous_sibling") # BS3 - - def replace_with(self, replace_with): - if self.parent is None: - raise ValueError( - "Cannot replace one element with another when the " - "element to be replaced is not part of a tree.") - if replace_with is self: - return - if replace_with is self.parent: - raise ValueError("Cannot replace a Tag with its parent.") - old_parent = self.parent - my_index = self.parent.index(self) - self.extract() - old_parent.insert(my_index, replace_with) - return self - replaceWith = replace_with # BS3 - - def unwrap(self): - my_parent = self.parent - if self.parent is None: - raise ValueError( - "Cannot replace an element with its contents when that" - "element is not part of a tree.") - my_index = self.parent.index(self) - self.extract() - for child in reversed(self.contents[:]): - my_parent.insert(my_index, child) - return self - replace_with_children = unwrap - replaceWithChildren = unwrap # BS3 - - def wrap(self, wrap_inside): - me = self.replace_with(wrap_inside) - wrap_inside.append(me) - return wrap_inside - - def extract(self): - """Destructively rips this element out of the tree.""" - if self.parent is not None: - del self.parent.contents[self.parent.index(self)] - - #Find the two elements that would be next to each other if - #this element (and any children) hadn't been parsed. Connect - #the two. - last_child = self._last_descendant() - next_element = last_child.next_element - - if (self.previous_element is not None and - self.previous_element is not next_element): - self.previous_element.next_element = next_element - if next_element is not None and next_element is not self.previous_element: - next_element.previous_element = self.previous_element - self.previous_element = None - last_child.next_element = None - - self.parent = None - if (self.previous_sibling is not None - and self.previous_sibling is not self.next_sibling): - self.previous_sibling.next_sibling = self.next_sibling - if (self.next_sibling is not None - and self.next_sibling is not self.previous_sibling): - self.next_sibling.previous_sibling = self.previous_sibling - self.previous_sibling = self.next_sibling = None - return self - - def _last_descendant(self, is_initialized=True, accept_self=True): - "Finds the last element beneath this object to be parsed." - if is_initialized and self.next_sibling is not None: - last_child = self.next_sibling.previous_element - else: - last_child = self - while isinstance(last_child, Tag) and last_child.contents: - last_child = last_child.contents[-1] - if not accept_self and last_child is self: - last_child = None - return last_child - # BS3: Not part of the API! - _lastRecursiveChild = _last_descendant - - def insert(self, position, new_child): - if new_child is None: - raise ValueError("Cannot insert None into a tag.") - if new_child is self: - raise ValueError("Cannot insert a tag into itself.") - if (isinstance(new_child, str) - and not isinstance(new_child, NavigableString)): - new_child = NavigableString(new_child) - - from bs4 import BeautifulSoup - if isinstance(new_child, BeautifulSoup): - # We don't want to end up with a situation where one BeautifulSoup - # object contains another. Insert the children one at a time. - for subchild in list(new_child.contents): - self.insert(position, subchild) - position += 1 - return - position = min(position, len(self.contents)) - if hasattr(new_child, 'parent') and new_child.parent is not None: - # We're 'inserting' an element that's already one - # of this object's children. - if new_child.parent is self: - current_index = self.index(new_child) - if current_index < position: - # We're moving this element further down the list - # of this object's children. That means that when - # we extract this element, our target index will - # jump down one. - position -= 1 - new_child.extract() - - new_child.parent = self - previous_child = None - if position == 0: - new_child.previous_sibling = None - new_child.previous_element = self - else: - previous_child = self.contents[position - 1] - new_child.previous_sibling = previous_child - new_child.previous_sibling.next_sibling = new_child - new_child.previous_element = previous_child._last_descendant(False) - if new_child.previous_element is not None: - new_child.previous_element.next_element = new_child - - new_childs_last_element = new_child._last_descendant(False) - - if position >= len(self.contents): - new_child.next_sibling = None - - parent = self - parents_next_sibling = None - while parents_next_sibling is None and parent is not None: - parents_next_sibling = parent.next_sibling - parent = parent.parent - if parents_next_sibling is not None: - # We found the element that comes next in the document. - break - if parents_next_sibling is not None: - new_childs_last_element.next_element = parents_next_sibling - else: - # The last element of this tag is the last element in - # the document. - new_childs_last_element.next_element = None - else: - next_child = self.contents[position] - new_child.next_sibling = next_child - if new_child.next_sibling is not None: - new_child.next_sibling.previous_sibling = new_child - new_childs_last_element.next_element = next_child - - if new_childs_last_element.next_element is not None: - new_childs_last_element.next_element.previous_element = new_childs_last_element - self.contents.insert(position, new_child) - - def append(self, tag): - """Appends the given tag to the contents of this tag.""" - self.insert(len(self.contents), tag) - - def extend(self, tags): - """Appends the given tags to the contents of this tag.""" - for tag in tags: - self.append(tag) - - def insert_before(self, *args): - """Makes the given element(s) the immediate predecessor of this one. - - The elements will have the same parent, and the given elements - will be immediately before this one. - """ - parent = self.parent - if parent is None: - raise ValueError( - "Element has no parent, so 'before' has no meaning.") - if any(x is self for x in args): - raise ValueError("Can't insert an element before itself.") - for predecessor in args: - # Extract first so that the index won't be screwed up if they - # are siblings. - if isinstance(predecessor, PageElement): - predecessor.extract() - index = parent.index(self) - parent.insert(index, predecessor) - - def insert_after(self, *args): - """Makes the given element(s) the immediate successor of this one. - - The elements will have the same parent, and the given elements - will be immediately after this one. - """ - # Do all error checking before modifying the tree. - parent = self.parent - if parent is None: - raise ValueError( - "Element has no parent, so 'after' has no meaning.") - if any(x is self for x in args): - raise ValueError("Can't insert an element after itself.") - - offset = 0 - for successor in args: - # Extract first so that the index won't be screwed up if they - # are siblings. - if isinstance(successor, PageElement): - successor.extract() - index = parent.index(self) - parent.insert(index+1+offset, successor) - offset += 1 - - def find_next(self, name=None, attrs={}, text=None, **kwargs): - """Returns the first item that matches the given criteria and - appears after this Tag in the document.""" - return self._find_one(self.find_all_next, name, attrs, text, **kwargs) - findNext = find_next # BS3 - - def find_all_next(self, name=None, attrs={}, text=None, limit=None, - **kwargs): - """Returns all items that match the given criteria and appear - after this Tag in the document.""" - return self._find_all(name, attrs, text, limit, self.next_elements, - **kwargs) - findAllNext = find_all_next # BS3 - - def find_next_sibling(self, name=None, attrs={}, text=None, **kwargs): - """Returns the closest sibling to this Tag that matches the - given criteria and appears after this Tag in the document.""" - return self._find_one(self.find_next_siblings, name, attrs, text, - **kwargs) - findNextSibling = find_next_sibling # BS3 - - def find_next_siblings(self, name=None, attrs={}, text=None, limit=None, - **kwargs): - """Returns the siblings of this Tag that match the given - criteria and appear after this Tag in the document.""" - return self._find_all(name, attrs, text, limit, - self.next_siblings, **kwargs) - findNextSiblings = find_next_siblings # BS3 - fetchNextSiblings = find_next_siblings # BS2 - - def find_previous(self, name=None, attrs={}, text=None, **kwargs): - """Returns the first item that matches the given criteria and - appears before this Tag in the document.""" - return self._find_one( - self.find_all_previous, name, attrs, text, **kwargs) - findPrevious = find_previous # BS3 - - def find_all_previous(self, name=None, attrs={}, text=None, limit=None, - **kwargs): - """Returns all items that match the given criteria and appear - before this Tag in the document.""" - return self._find_all(name, attrs, text, limit, self.previous_elements, - **kwargs) - findAllPrevious = find_all_previous # BS3 - fetchPrevious = find_all_previous # BS2 - - def find_previous_sibling(self, name=None, attrs={}, text=None, **kwargs): - """Returns the closest sibling to this Tag that matches the - given criteria and appears before this Tag in the document.""" - return self._find_one(self.find_previous_siblings, name, attrs, text, - **kwargs) - findPreviousSibling = find_previous_sibling # BS3 - - def find_previous_siblings(self, name=None, attrs={}, text=None, - limit=None, **kwargs): - """Returns the siblings of this Tag that match the given - criteria and appear before this Tag in the document.""" - return self._find_all(name, attrs, text, limit, - self.previous_siblings, **kwargs) - findPreviousSiblings = find_previous_siblings # BS3 - fetchPreviousSiblings = find_previous_siblings # BS2 - - def find_parent(self, name=None, attrs={}, **kwargs): - """Returns the closest parent of this Tag that matches the given - criteria.""" - # NOTE: We can't use _find_one because findParents takes a different - # set of arguments. - r = None - l = self.find_parents(name, attrs, 1, **kwargs) - if l: - r = l[0] - return r - findParent = find_parent # BS3 - - def find_parents(self, name=None, attrs={}, limit=None, **kwargs): - """Returns the parents of this Tag that match the given - criteria.""" - - return self._find_all(name, attrs, None, limit, self.parents, - **kwargs) - findParents = find_parents # BS3 - fetchParents = find_parents # BS2 - - @property - def next(self): - return self.next_element - - @property - def previous(self): - return self.previous_element - - #These methods do the real heavy lifting. - - def _find_one(self, method, name, attrs, text, **kwargs): - r = None - l = method(name, attrs, text, 1, **kwargs) - if l: - r = l[0] - return r - - def _find_all(self, name, attrs, text, limit, generator, **kwargs): - "Iterates over a generator looking for things that match." - - if text is None and 'string' in kwargs: - text = kwargs['string'] - del kwargs['string'] - - if isinstance(name, SoupStrainer): - strainer = name - else: - strainer = SoupStrainer(name, attrs, text, **kwargs) - - if text is None and not limit and not attrs and not kwargs: - if name is True or name is None: - # Optimization to find all tags. - result = (element for element in generator - if isinstance(element, Tag)) - return ResultSet(strainer, result) - elif isinstance(name, str): - # Optimization to find all tags with a given name. - if name.count(':') == 1: - # This is a name with a prefix. If this is a namespace-aware document, - # we need to match the local name against tag.name. If not, - # we need to match the fully-qualified name against tag.name. - prefix, local_name = name.split(':', 1) - else: - prefix = None - local_name = name - result = (element for element in generator - if isinstance(element, Tag) - and ( - element.name == name - ) or ( - element.name == local_name - and (prefix is None or element.prefix == prefix) - ) - ) - return ResultSet(strainer, result) - results = ResultSet(strainer) - while True: - try: - i = next(generator) - except StopIteration: - break - if i: - found = strainer.search(i) - if found: - results.append(found) - if limit and len(results) >= limit: - break - return results - - #These generators can be used to navigate starting from both - #NavigableStrings and Tags. - @property - def next_elements(self): - i = self.next_element - while i is not None: - yield i - i = i.next_element - - @property - def next_siblings(self): - i = self.next_sibling - while i is not None: - yield i - i = i.next_sibling - - @property - def previous_elements(self): - i = self.previous_element - while i is not None: - yield i - i = i.previous_element - - @property - def previous_siblings(self): - i = self.previous_sibling - while i is not None: - yield i - i = i.previous_sibling - - @property - def parents(self): - i = self.parent - while i is not None: - yield i - i = i.parent - - # Old non-property versions of the generators, for backwards - # compatibility with BS3. - def nextGenerator(self): - return self.next_elements - - def nextSiblingGenerator(self): - return self.next_siblings - - def previousGenerator(self): - return self.previous_elements - - def previousSiblingGenerator(self): - return self.previous_siblings - - def parentGenerator(self): - return self.parents - - -class NavigableString(str, PageElement): - - PREFIX = '' - SUFFIX = '' - - # We can't tell just by looking at a string whether it's contained - # in an XML document or an HTML document. - - known_xml = None - - def __new__(cls, value): - """Create a new NavigableString. - - When unpickling a NavigableString, this method is called with - the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be - passed in to the superclass's __new__ or the superclass won't know - how to handle non-ASCII characters. - """ - if isinstance(value, str): - u = str.__new__(cls, value) - else: - u = str.__new__(cls, value, DEFAULT_OUTPUT_ENCODING) - u.setup() - return u - - def __copy__(self): - """A copy of a NavigableString has the same contents and class - as the original, but it is not connected to the parse tree. - """ - return type(self)(self) - - def __getnewargs__(self): - return (str(self),) - - def __getattr__(self, attr): - """text.string gives you text. This is for backwards - compatibility for Navigable*String, but for CData* it lets you - get the string without the CData wrapper.""" - if attr == 'string': - return self - else: - raise AttributeError( - "'%s' object has no attribute '%s'" % ( - self.__class__.__name__, attr)) - - def output_ready(self, formatter="minimal"): - """Run the string through the provided formatter.""" - output = self.format_string(self, formatter) - return self.PREFIX + output + self.SUFFIX - - @property - def name(self): - return None - - @name.setter - def name(self, name): - raise AttributeError("A NavigableString cannot be given a name.") - -class PreformattedString(NavigableString): - """A NavigableString not subject to the normal formatting rules. - - The string will be passed into the formatter (to trigger side effects), - but the return value will be ignored. - """ - - def output_ready(self, formatter=None): - """CData strings are passed into the formatter, purely - for any side effects. The return value is ignored. - """ - if formatter is not None: - ignore = self.format_string(self, formatter) - return self.PREFIX + self + self.SUFFIX - -class CData(PreformattedString): - - PREFIX = '' - -class ProcessingInstruction(PreformattedString): - """A SGML processing instruction.""" - - PREFIX = '' - -class XMLProcessingInstruction(ProcessingInstruction): - """An XML processing instruction.""" - PREFIX = '' - -class Comment(PreformattedString): - - PREFIX = '' - - -class Declaration(PreformattedString): - PREFIX = '' - - -class Doctype(PreformattedString): - - @classmethod - def for_name_and_ids(cls, name, pub_id, system_id): - value = name or '' - if pub_id is not None: - value += ' PUBLIC "%s"' % pub_id - if system_id is not None: - value += ' "%s"' % system_id - elif system_id is not None: - value += ' SYSTEM "%s"' % system_id - - return Doctype(value) - - PREFIX = '\n' - - -class Tag(PageElement): - - """Represents a found HTML tag with its attributes and contents.""" - - def __init__(self, parser=None, builder=None, name=None, namespace=None, - prefix=None, attrs=None, parent=None, previous=None, - is_xml=None, sourceline=None, sourcepos=None, - can_be_empty_element=None, cdata_list_attributes=None, - preserve_whitespace_tags=None - ): - "Basic constructor." - - if parser is None: - self.parser_class = None - else: - # We don't actually store the parser object: that lets extracted - # chunks be garbage-collected. - self.parser_class = parser.__class__ - if name is None: - raise ValueError("No value provided for new tag's name.") - self.name = name - self.namespace = namespace - self.prefix = prefix - if ((not builder or builder.store_line_numbers) - and (sourceline is not None or sourcepos is not None)): - self.sourceline = sourceline - self.sourcepos = sourcepos - if attrs is None: - attrs = {} - elif attrs: - if builder is not None and builder.cdata_list_attributes: - attrs = builder._replace_cdata_list_attribute_values( - self.name, attrs) - else: - attrs = dict(attrs) - else: - attrs = dict(attrs) - - # If possible, determine ahead of time whether this tag is an - # XML tag. - if builder: - self.known_xml = builder.is_xml - else: - self.known_xml = is_xml - self.attrs = attrs - self.contents = [] - self.setup(parent, previous) - self.hidden = False - - if builder is None: - # In the absence of a TreeBuilder, use whatever values were - # passed in here. They're probably None, unless this is a copy of some - # other tag. - self.can_be_empty_element = can_be_empty_element - self.cdata_list_attributes = cdata_list_attributes - self.preserve_whitespace_tags = preserve_whitespace_tags - else: - # Set up any substitutions for this tag, such as the charset in a META tag. - builder.set_up_substitutions(self) - - # Ask the TreeBuilder whether this tag might be an empty-element tag. - self.can_be_empty_element = builder.can_be_empty_element(name) - - # Keep track of the list of attributes of this tag that - # might need to be treated as a list. - # - # For performance reasons, we store the whole data structure - # rather than asking the question of every tag. Asking would - # require building a new data structure every time, and - # (unlike can_be_empty_element), we almost never need - # to check this. - self.cdata_list_attributes = builder.cdata_list_attributes - - # Keep track of the names that might cause this tag to be treated as a - # whitespace-preserved tag. - self.preserve_whitespace_tags = builder.preserve_whitespace_tags - - parserClass = _alias("parser_class") # BS3 - - def __copy__(self): - """A copy of a Tag is a new Tag, unconnected to the parse tree. - Its contents are a copy of the old Tag's contents. - """ - clone = type(self)( - None, self.builder, self.name, self.namespace, - self.prefix, self.attrs, is_xml=self._is_xml, - sourceline=self.sourceline, sourcepos=self.sourcepos, - can_be_empty_element=self.can_be_empty_element, - cdata_list_attributes=self.cdata_list_attributes, - preserve_whitespace_tags=self.preserve_whitespace_tags - ) - for attr in ('can_be_empty_element', 'hidden'): - setattr(clone, attr, getattr(self, attr)) - for child in self.contents: - clone.append(child.__copy__()) - return clone - - @property - def is_empty_element(self): - """Is this tag an empty-element tag? (aka a self-closing tag) - - A tag that has contents is never an empty-element tag. - - A tag that has no contents may or may not be an empty-element - tag. It depends on the builder used to create the tag. If the - builder has a designated list of empty-element tags, then only - a tag whose name shows up in that list is considered an - empty-element tag. - - If the builder has no designated list of empty-element tags, - then any tag with no contents is an empty-element tag. - """ - return len(self.contents) == 0 and self.can_be_empty_element - isSelfClosing = is_empty_element # BS3 - - @property - def string(self): - """Convenience property to get the single string within this tag. - - :Return: If this tag has a single string child, return value - is that string. If this tag has no children, or more than one - child, return value is None. If this tag has one child tag, - return value is the 'string' attribute of the child tag, - recursively. - """ - if len(self.contents) != 1: - return None - child = self.contents[0] - if isinstance(child, NavigableString): - return child - return child.string - - @string.setter - def string(self, string): - self.clear() - self.append(string.__class__(string)) - - def _all_strings(self, strip=False, types=(NavigableString, CData)): - """Yield all strings of certain classes, possibly stripping them. - - By default, yields only NavigableString and CData objects. So - no comments, processing instructions, etc. - """ - for descendant in self.descendants: - if ( - (types is None and not isinstance(descendant, NavigableString)) - or - (types is not None and type(descendant) not in types)): - continue - if strip: - descendant = descendant.strip() - if len(descendant) == 0: - continue - yield descendant - - strings = property(_all_strings) - - @property - def stripped_strings(self): - for string in self._all_strings(True): - yield string - - def get_text(self, separator="", strip=False, - types=(NavigableString, CData)): - """ - Get all child strings, concatenated using the given separator. - """ - return separator.join([s for s in self._all_strings( - strip, types=types)]) - getText = get_text - text = property(get_text) - - def decompose(self): - """Recursively destroys the contents of this tree.""" - self.extract() - i = self - while i is not None: - next = i.next_element - i.__dict__.clear() - i.contents = [] - i = next - - def clear(self, decompose=False): - """ - Extract all children. If decompose is True, decompose instead. - """ - if decompose: - for element in self.contents[:]: - if isinstance(element, Tag): - element.decompose() - else: - element.extract() - else: - for element in self.contents[:]: - element.extract() - - def smooth(self): - """Smooth out this element's children by consolidating consecutive strings. - - This makes pretty-printed output look more natural following a - lot of operations that modified the tree. - """ - # Mark the first position of every pair of children that need - # to be consolidated. Do this rather than making a copy of - # self.contents, since in most cases very few strings will be - # affected. - marked = [] - for i, a in enumerate(self.contents): - if isinstance(a, Tag): - # Recursively smooth children. - a.smooth() - if i == len(self.contents)-1: - # This is the last item in .contents, and it's not a - # tag. There's no chance it needs any work. - continue - b = self.contents[i+1] - if (isinstance(a, NavigableString) - and isinstance(b, NavigableString) - and not isinstance(a, PreformattedString) - and not isinstance(b, PreformattedString) - ): - marked.append(i) - - # Go over the marked positions in reverse order, so that - # removing items from .contents won't affect the remaining - # positions. - for i in reversed(marked): - a = self.contents[i] - b = self.contents[i+1] - b.extract() - n = NavigableString(a+b) - a.replace_with(n) - - def index(self, element): - """ - Find the index of a child by identity, not value. Avoids issues with - tag.contents.index(element) getting the index of equal elements. - """ - for i, child in enumerate(self.contents): - if child is element: - return i - raise ValueError("Tag.index: element not in tag") - - def get(self, key, default=None): - """Returns the value of the 'key' attribute for the tag, or - the value given for 'default' if it doesn't have that - attribute.""" - return self.attrs.get(key, default) - - def get_attribute_list(self, key, default=None): - """The same as get(), but always returns a list.""" - value = self.get(key, default) - if not isinstance(value, list): - value = [value] - return value - - def has_attr(self, key): - return key in self.attrs - - def __hash__(self): - return str(self).__hash__() - - def __getitem__(self, key): - """tag[key] returns the value of the 'key' attribute for the tag, - and throws an exception if it's not there.""" - return self.attrs[key] - - def __iter__(self): - "Iterating over a tag iterates over its contents." - return iter(self.contents) - - def __len__(self): - "The length of a tag is the length of its list of contents." - return len(self.contents) - - def __contains__(self, x): - return x in self.contents - - def __bool__(self): - "A tag is non-None even if it has no contents." - return True - - def __setitem__(self, key, value): - """Setting tag[key] sets the value of the 'key' attribute for the - tag.""" - self.attrs[key] = value - - def __delitem__(self, key): - "Deleting tag[key] deletes all 'key' attributes for the tag." - self.attrs.pop(key, None) - - def __call__(self, *args, **kwargs): - """Calling a tag like a function is the same as calling its - find_all() method. Eg. tag('a') returns a list of all the A tags - found within this tag.""" - return self.find_all(*args, **kwargs) - - def __getattr__(self, tag): - #print "Getattr %s.%s" % (self.__class__, tag) - if len(tag) > 3 and tag.endswith('Tag'): - # BS3: soup.aTag -> "soup.find("a") - tag_name = tag[:-3] - warnings.warn( - '.%(name)sTag is deprecated, use .find("%(name)s") instead. If you really were looking for a tag called %(name)sTag, use .find("%(name)sTag")' % dict( - name=tag_name - ) - ) - return self.find(tag_name) - # We special case contents to avoid recursion. - elif not tag.startswith("__") and not tag == "contents": - return self.find(tag) - raise AttributeError( - "'%s' object has no attribute '%s'" % (self.__class__, tag)) - - def __eq__(self, other): - """Returns true iff this tag has the same name, the same attributes, - and the same contents (recursively) as the given tag.""" - if self is other: - return True - if (not hasattr(other, 'name') or - not hasattr(other, 'attrs') or - not hasattr(other, 'contents') or - self.name != other.name or - self.attrs != other.attrs or - len(self) != len(other)): - return False - for i, my_child in enumerate(self.contents): - if my_child != other.contents[i]: - return False - return True - - def __ne__(self, other): - """Returns true iff this tag is not identical to the other tag, - as defined in __eq__.""" - return not self == other - - def __repr__(self, encoding="unicode-escape"): - """Renders this tag as a string.""" - if PY3K: - # "The return value must be a string object", i.e. Unicode - return self.decode() - else: - # "The return value must be a string object", i.e. a bytestring. - # By convention, the return value of __repr__ should also be - # an ASCII string. - return self.encode(encoding) - - def __unicode__(self): - return self.decode() - - def __str__(self): - if PY3K: - return self.decode() - else: - return self.encode() - - if PY3K: - __str__ = __repr__ = __unicode__ - - def encode(self, encoding=DEFAULT_OUTPUT_ENCODING, - indent_level=None, formatter="minimal", - errors="xmlcharrefreplace"): - # Turn the data structure into Unicode, then encode the - # Unicode. - u = self.decode(indent_level, encoding, formatter) - return u.encode(encoding, errors) - - def decode(self, indent_level=None, - eventual_encoding=DEFAULT_OUTPUT_ENCODING, - formatter="minimal"): - """Returns a Unicode representation of this tag and its contents. - - :param eventual_encoding: The tag is destined to be - encoded into this encoding. This method is _not_ - responsible for performing that encoding. This information - is passed in so that it can be substituted in if the - document contains a tag that mentions the document's - encoding. - """ - - # First off, turn a non-Formatter `formatter` into a Formatter - # object. This will stop the lookup from happening over and - # over again. - if not isinstance(formatter, Formatter): - formatter = self.formatter_for_name(formatter) - attributes = formatter.attributes(self) - attrs = [] - for key, val in attributes: - if val is None: - decoded = key - else: - if isinstance(val, list) or isinstance(val, tuple): - val = ' '.join(val) - elif not isinstance(val, str): - val = str(val) - elif ( - isinstance(val, AttributeValueWithCharsetSubstitution) - and eventual_encoding is not None - ): - val = val.encode(eventual_encoding) - - text = formatter.attribute_value(val) - decoded = ( - str(key) + '=' - + formatter.quoted_attribute_value(text)) - attrs.append(decoded) - close = '' - closeTag = '' - - prefix = '' - if self.prefix: - prefix = self.prefix + ":" - - if self.is_empty_element: - close = formatter.void_element_close_prefix or '' - else: - closeTag = '' % (prefix, self.name) - - pretty_print = self._should_pretty_print(indent_level) - space = '' - indent_space = '' - if indent_level is not None: - indent_space = (' ' * (indent_level - 1)) - if pretty_print: - space = indent_space - indent_contents = indent_level + 1 - else: - indent_contents = None - contents = self.decode_contents( - indent_contents, eventual_encoding, formatter - ) - - if self.hidden: - # This is the 'document root' object. - s = contents - else: - s = [] - attribute_string = '' - if attrs: - attribute_string = ' ' + ' '.join(attrs) - if indent_level is not None: - # Even if this particular tag is not pretty-printed, - # we should indent up to the start of the tag. - s.append(indent_space) - s.append('<%s%s%s%s>' % ( - prefix, self.name, attribute_string, close)) - if pretty_print: - s.append("\n") - s.append(contents) - if pretty_print and contents and contents[-1] != "\n": - s.append("\n") - if pretty_print and closeTag: - s.append(space) - s.append(closeTag) - if indent_level is not None and closeTag and self.next_sibling: - # Even if this particular tag is not pretty-printed, - # we're now done with the tag, and we should add a - # newline if appropriate. - s.append("\n") - s = ''.join(s) - return s - - def _should_pretty_print(self, indent_level): - """Should this tag be pretty-printed?""" - return ( - indent_level is not None - and ( - not self.preserve_whitespace_tags - or self.name not in self.preserve_whitespace_tags - ) - ) - - def prettify(self, encoding=None, formatter="minimal"): - if encoding is None: - return self.decode(True, formatter=formatter) - else: - return self.encode(encoding, True, formatter=formatter) - - def decode_contents(self, indent_level=None, - eventual_encoding=DEFAULT_OUTPUT_ENCODING, - formatter="minimal"): - """Renders the contents of this tag as a Unicode string. - - :param indent_level: Each line of the rendering will be - indented this many spaces. - - :param eventual_encoding: The tag is destined to be - encoded into this encoding. decode_contents() is _not_ - responsible for performing that encoding. This information - is passed in so that it can be substituted in if the - document contains a tag that mentions the document's - encoding. - - :param formatter: A Formatter object, or a string naming one of - the standard Formatters. - """ - # First off, turn a string formatter into a Formatter object. This - # will stop the lookup from happening over and over again. - if not isinstance(formatter, Formatter): - formatter = self.formatter_for_name(formatter) - - pretty_print = (indent_level is not None) - s = [] - for c in self: - text = None - if isinstance(c, NavigableString): - text = c.output_ready(formatter) - elif isinstance(c, Tag): - s.append(c.decode(indent_level, eventual_encoding, - formatter)) - preserve_whitespace = ( - self.preserve_whitespace_tags and self.name in self.preserve_whitespace_tags - ) - if text and indent_level and not preserve_whitespace: - text = text.strip() - if text: - if pretty_print and not preserve_whitespace: - s.append(" " * (indent_level - 1)) - s.append(text) - if pretty_print and not preserve_whitespace: - s.append("\n") - return ''.join(s) - - def encode_contents( - self, indent_level=None, encoding=DEFAULT_OUTPUT_ENCODING, - formatter="minimal"): - """Renders the contents of this tag as a bytestring. - - :param indent_level: Each line of the rendering will be - indented this many spaces. - - :param eventual_encoding: The bytestring will be in this encoding. - - :param formatter: The output formatter responsible for converting - entities to Unicode characters. - """ - - contents = self.decode_contents(indent_level, encoding, formatter) - return contents.encode(encoding) - - # Old method for BS3 compatibility - def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING, - prettyPrint=False, indentLevel=0): - if not prettyPrint: - indentLevel = None - return self.encode_contents( - indent_level=indentLevel, encoding=encoding) - - #Soup methods - - def find(self, name=None, attrs={}, recursive=True, text=None, - **kwargs): - """Return only the first child of this Tag matching the given - criteria.""" - r = None - l = self.find_all(name, attrs, recursive, text, 1, **kwargs) - if l: - r = l[0] - return r - findChild = find - - def find_all(self, name=None, attrs={}, recursive=True, text=None, - limit=None, **kwargs): - """Extracts a list of Tag objects that match the given - criteria. You can specify the name of the Tag and any - attributes you want the Tag to have. - - The value of a key-value pair in the 'attrs' map can be a - string, a list of strings, a regular expression object, or a - callable that takes a string and returns whether or not the - string matches for some custom definition of 'matches'. The - same is true of the tag name.""" - - generator = self.descendants - if not recursive: - generator = self.children - return self._find_all(name, attrs, text, limit, generator, **kwargs) - findAll = find_all # BS3 - findChildren = find_all # BS2 - - #Generator methods - @property - def children(self): - # return iter() to make the purpose of the method clear - return iter(self.contents) # XXX This seems to be untested. - - @property - def descendants(self): - if not len(self.contents): - return - stopNode = self._last_descendant().next_element - current = self.contents[0] - while current is not stopNode: - yield current - current = current.next_element - - # CSS selector code - def select_one(self, selector, namespaces=None, **kwargs): - """Perform a CSS selection operation on the current element.""" - value = self.select(selector, namespaces, 1, **kwargs) - if value: - return value[0] - return None - - def select(self, selector, namespaces=None, limit=None, **kwargs): - """Perform a CSS selection operation on the current element. - - This uses the SoupSieve library. - - :param selector: A string containing a CSS selector. - - :param namespaces: A dictionary mapping namespace prefixes - used in the CSS selector to namespace URIs. By default, - Beautiful Soup will use the prefixes it encountered while - parsing the document. - - :param limit: After finding this number of results, stop looking. - - :param kwargs: Any extra arguments you'd like to pass in to - soupsieve.select(). - """ - if namespaces is None: - namespaces = self._namespaces - - if limit is None: - limit = 0 - if soupsieve is None: - raise NotImplementedError( - "Cannot execute CSS selectors because the soupsieve package is not installed." - ) - - return soupsieve.select(selector, self, namespaces, limit, **kwargs) - - # Old names for backwards compatibility - def childGenerator(self): - return self.children - - def recursiveChildGenerator(self): - return self.descendants - - def has_key(self, key): - """This was kind of misleading because has_key() (attributes) - was different from __in__ (contents). has_key() is gone in - Python 3, anyway.""" - warnings.warn('has_key is deprecated. Use has_attr("%s") instead.' % ( - key)) - return self.has_attr(key) - -# Next, a couple classes to represent queries and their results. -class SoupStrainer(object): - """Encapsulates a number of ways of matching a markup element (tag or - text).""" - - def __init__(self, name=None, attrs={}, text=None, **kwargs): - self.name = self._normalize_search_value(name) - if not isinstance(attrs, dict): - # Treat a non-dict value for attrs as a search for the 'class' - # attribute. - kwargs['class'] = attrs - attrs = None - - if 'class_' in kwargs: - # Treat class_="foo" as a search for the 'class' - # attribute, overriding any non-dict value for attrs. - kwargs['class'] = kwargs['class_'] - del kwargs['class_'] - - if kwargs: - if attrs: - attrs = attrs.copy() - attrs.update(kwargs) - else: - attrs = kwargs - normalized_attrs = {} - for key, value in list(attrs.items()): - normalized_attrs[key] = self._normalize_search_value(value) - - self.attrs = normalized_attrs - self.text = self._normalize_search_value(text) - - def _normalize_search_value(self, value): - # Leave it alone if it's a Unicode string, a callable, a - # regular expression, a boolean, or None. - if (isinstance(value, str) or isinstance(value, Callable) or hasattr(value, 'match') - or isinstance(value, bool) or value is None): - return value - - # If it's a bytestring, convert it to Unicode, treating it as UTF-8. - if isinstance(value, bytes): - return value.decode("utf8") - - # If it's listlike, convert it into a list of strings. - if hasattr(value, '__iter__'): - new_value = [] - for v in value: - if (hasattr(v, '__iter__') and not isinstance(v, bytes) - and not isinstance(v, str)): - # This is almost certainly the user's mistake. In the - # interests of avoiding infinite loops, we'll let - # it through as-is rather than doing a recursive call. - new_value.append(v) - else: - new_value.append(self._normalize_search_value(v)) - return new_value - - # Otherwise, convert it into a Unicode string. - # The unicode(str()) thing is so this will do the same thing on Python 2 - # and Python 3. - return str(str(value)) - - def __str__(self): - if self.text: - return self.text - else: - return "%s|%s" % (self.name, self.attrs) - - def search_tag(self, markup_name=None, markup_attrs={}): - found = None - markup = None - if isinstance(markup_name, Tag): - markup = markup_name - markup_attrs = markup - call_function_with_tag_data = ( - isinstance(self.name, Callable) - and not isinstance(markup_name, Tag)) - - if ((not self.name) - or call_function_with_tag_data - or (markup and self._matches(markup, self.name)) - or (not markup and self._matches(markup_name, self.name))): - if call_function_with_tag_data: - match = self.name(markup_name, markup_attrs) - else: - match = True - markup_attr_map = None - for attr, match_against in list(self.attrs.items()): - if not markup_attr_map: - if hasattr(markup_attrs, 'get'): - markup_attr_map = markup_attrs - else: - markup_attr_map = {} - for k, v in markup_attrs: - markup_attr_map[k] = v - attr_value = markup_attr_map.get(attr) - if not self._matches(attr_value, match_against): - match = False - break - if match: - if markup: - found = markup - else: - found = markup_name - if found and self.text and not self._matches(found.string, self.text): - found = None - return found - searchTag = search_tag - - def search(self, markup): - # print 'looking for %s in %s' % (self, markup) - found = None - # If given a list of items, scan it for a text element that - # matches. - if hasattr(markup, '__iter__') and not isinstance(markup, (Tag, str)): - for element in markup: - if isinstance(element, NavigableString) \ - and self.search(element): - found = element - break - # If it's a Tag, make sure its name or attributes match. - # Don't bother with Tags if we're searching for text. - elif isinstance(markup, Tag): - if not self.text or self.name or self.attrs: - found = self.search_tag(markup) - # If it's text, make sure the text matches. - elif isinstance(markup, NavigableString) or \ - isinstance(markup, str): - if not self.name and not self.attrs and self._matches(markup, self.text): - found = markup - else: - raise Exception( - "I don't know how to match against a %s" % markup.__class__) - return found - - def _matches(self, markup, match_against, already_tried=None): - # print u"Matching %s against %s" % (markup, match_against) - result = False - if isinstance(markup, list) or isinstance(markup, tuple): - # This should only happen when searching a multi-valued attribute - # like 'class'. - for item in markup: - if self._matches(item, match_against): - return True - # We didn't match any particular value of the multivalue - # attribute, but maybe we match the attribute value when - # considered as a string. - if self._matches(' '.join(markup), match_against): - return True - return False - - if match_against is True: - # True matches any non-None value. - return markup is not None - - if isinstance(match_against, Callable): - return match_against(markup) - - # Custom callables take the tag as an argument, but all - # other ways of matching match the tag name as a string. - original_markup = markup - if isinstance(markup, Tag): - markup = markup.name - - # Ensure that `markup` is either a Unicode string, or None. - markup = self._normalize_search_value(markup) - - if markup is None: - # None matches None, False, an empty string, an empty list, and so on. - return not match_against - - if (hasattr(match_against, '__iter__') - and not isinstance(match_against, str)): - # We're asked to match against an iterable of items. - # The markup must be match at least one item in the - # iterable. We'll try each one in turn. - # - # To avoid infinite recursion we need to keep track of - # items we've already seen. - if not already_tried: - already_tried = set() - for item in match_against: - if item.__hash__: - key = item - else: - key = id(item) - if key in already_tried: - continue - else: - already_tried.add(key) - if self._matches(original_markup, item, already_tried): - return True - else: - return False - - # Beyond this point we might need to run the test twice: once against - # the tag's name and once against its prefixed name. - match = False - - if not match and isinstance(match_against, str): - # Exact string match - match = markup == match_against - - if not match and hasattr(match_against, 'search'): - # Regexp match - return match_against.search(markup) - - if (not match - and isinstance(original_markup, Tag) - and original_markup.prefix): - # Try the whole thing again with the prefixed tag name. - return self._matches( - original_markup.prefix + ':' + original_markup.name, match_against - ) - - return match - - -class ResultSet(list): - """A ResultSet is just a list that keeps track of the SoupStrainer - that created it.""" - def __init__(self, source, result=()): - super(ResultSet, self).__init__(result) - self.source = source - - def __getattr__(self, key): - raise AttributeError( - "ResultSet object has no attribute '%s'. You're probably treating a list of items like a single item. Did you call find_all() when you meant to call find()?" % key - ) diff --git a/lib/bs4/formatter.py b/lib/bs4/formatter.py deleted file mode 100644 index 7dbaa385..00000000 --- a/lib/bs4/formatter.py +++ /dev/null @@ -1,99 +0,0 @@ -from bs4.dammit import EntitySubstitution - -class Formatter(EntitySubstitution): - """Describes a strategy to use when outputting a parse tree to a string. - - Some parts of this strategy come from the distinction between - HTML4, HTML5, and XML. Others are configurable by the user. - """ - # Registries of XML and HTML formatters. - XML_FORMATTERS = {} - HTML_FORMATTERS = {} - - HTML = 'html' - XML = 'xml' - - HTML_DEFAULTS = dict( - cdata_containing_tags=set(["script", "style"]), - ) - - def _default(self, language, value, kwarg): - if value is not None: - return value - if language == self.XML: - return set() - return self.HTML_DEFAULTS[kwarg] - - def __init__( - self, language=None, entity_substitution=None, - void_element_close_prefix='/', cdata_containing_tags=None, - ): - """ - - :param void_element_close_prefix: By default, represent void - elements as rather than - """ - self.language = language - self.entity_substitution = entity_substitution - self.void_element_close_prefix = void_element_close_prefix - self.cdata_containing_tags = self._default( - language, cdata_containing_tags, 'cdata_containing_tags' - ) - - def substitute(self, ns): - """Process a string that needs to undergo entity substitution.""" - if not self.entity_substitution: - return ns - from .element import NavigableString - if (isinstance(ns, NavigableString) - and ns.parent is not None - and ns.parent.name in self.cdata_containing_tags): - # Do nothing. - return ns - # Substitute. - return self.entity_substitution(ns) - - def attribute_value(self, value): - """Process the value of an attribute.""" - return self.substitute(value) - - def attributes(self, tag): - """Reorder a tag's attributes however you want.""" - return sorted(tag.attrs.items()) - - -class HTMLFormatter(Formatter): - REGISTRY = {} - def __init__(self, *args, **kwargs): - return super(HTMLFormatter, self).__init__(self.HTML, *args, **kwargs) - - -class XMLFormatter(Formatter): - REGISTRY = {} - def __init__(self, *args, **kwargs): - return super(XMLFormatter, self).__init__(self.XML, *args, **kwargs) - - -# Set up aliases for the default formatters. -HTMLFormatter.REGISTRY['html'] = HTMLFormatter( - entity_substitution=EntitySubstitution.substitute_html -) -HTMLFormatter.REGISTRY["html5"] = HTMLFormatter( - entity_substitution=EntitySubstitution.substitute_html, - void_element_close_prefix = None -) -HTMLFormatter.REGISTRY["minimal"] = HTMLFormatter( - entity_substitution=EntitySubstitution.substitute_xml -) -HTMLFormatter.REGISTRY[None] = HTMLFormatter( - entity_substitution=None -) -XMLFormatter.REGISTRY["html"] = XMLFormatter( - entity_substitution=EntitySubstitution.substitute_html -) -XMLFormatter.REGISTRY["minimal"] = XMLFormatter( - entity_substitution=EntitySubstitution.substitute_xml -) -XMLFormatter.REGISTRY[None] = Formatter( - Formatter(Formatter.XML, entity_substitution=None) -) diff --git a/lib/bs4/testing.py b/lib/bs4/testing.py deleted file mode 100644 index cc996660..00000000 --- a/lib/bs4/testing.py +++ /dev/null @@ -1,992 +0,0 @@ -# encoding: utf-8 -"""Helper classes for tests.""" - -# Use of this source code is governed by the MIT license. -__license__ = "MIT" - -import pickle -import copy -import functools -import unittest -from unittest import TestCase -from bs4 import BeautifulSoup -from bs4.element import ( - CharsetMetaAttributeValue, - Comment, - ContentMetaAttributeValue, - Doctype, - SoupStrainer, - Tag -) - -from bs4.builder import HTMLParserTreeBuilder -default_builder = HTMLParserTreeBuilder - -BAD_DOCUMENT = """A bare string - - -
-
HTML5 does allow CDATA sections in SVG
-
A tag
-
A
tag that supposedly has contents.
-
AT&T
-
-
-
This numeric entity is missing the final semicolon:
- -
a
-
This document contains (do you see it?)
-
This document ends with That attribute value was bogus
-The doctype is invalid because it contains extra whitespace -
That boolean attribute had no value
-
Here's a nonexistent entity: &#foo; (do you see it?)
-
This document ends before the entity finishes: > -

Paragraphs shouldn't contain block display elements, but this one does:

you see?

-Multiple values for the same attribute. -
Here's a table
-
-
This tag contains nothing but whitespace:
-

This p tag is cut off by

the end of the blockquote tag
-
Here's a nested table:
foo
This table contains bare markup
- -
This document contains a surprise doctype
- -
Tag name contains Unicode characters
- - -""" - - -class SoupTest(unittest.TestCase): - - @property - def default_builder(self): - return default_builder - - def soup(self, markup, **kwargs): - """Build a Beautiful Soup object from markup.""" - builder = kwargs.pop('builder', self.default_builder) - return BeautifulSoup(markup, builder=builder, **kwargs) - - def document_for(self, markup, **kwargs): - """Turn an HTML fragment into a document. - - The details depend on the builder. - """ - return self.default_builder(**kwargs).test_fragment_to_document(markup) - - def assertSoupEquals(self, to_parse, compare_parsed_to=None): - builder = self.default_builder - obj = BeautifulSoup(to_parse, builder=builder) - if compare_parsed_to is None: - compare_parsed_to = to_parse - - self.assertEqual(obj.decode(), self.document_for(compare_parsed_to)) - - def assertConnectedness(self, element): - """Ensure that next_element and previous_element are properly - set for all descendants of the given element. - """ - earlier = None - for e in element.descendants: - if earlier: - self.assertEqual(e, earlier.next_element) - self.assertEqual(earlier, e.previous_element) - earlier = e - - def linkage_validator(self, el, _recursive_call=False): - """Ensure proper linkage throughout the document.""" - descendant = None - # Document element should have no previous element or previous sibling. - # It also shouldn't have a next sibling. - if el.parent is None: - assert el.previous_element is None,\ - "Bad previous_element\nNODE: {}\nPREV: {}\nEXPECTED: {}".format( - el, el.previous_element, None - ) - assert el.previous_sibling is None,\ - "Bad previous_sibling\nNODE: {}\nPREV: {}\nEXPECTED: {}".format( - el, el.previous_sibling, None - ) - assert el.next_sibling is None,\ - "Bad next_sibling\nNODE: {}\nNEXT: {}\nEXPECTED: {}".format( - el, el.next_sibling, None - ) - - idx = 0 - child = None - last_child = None - last_idx = len(el.contents) - 1 - for child in el.contents: - descendant = None - - # Parent should link next element to their first child - # That child should have no previous sibling - if idx == 0: - if el.parent is not None: - assert el.next_element is child,\ - "Bad next_element\nNODE: {}\nNEXT: {}\nEXPECTED: {}".format( - el, el.next_element, child - ) - assert child.previous_element is el,\ - "Bad previous_element\nNODE: {}\nPREV: {}\nEXPECTED: {}".format( - child, child.previous_element, el - ) - assert child.previous_sibling is None,\ - "Bad previous_sibling\nNODE: {}\nPREV {}\nEXPECTED: {}".format( - child, child.previous_sibling, None - ) - - # If not the first child, previous index should link as sibling to this index - # Previous element should match the last index or the last bubbled up descendant - else: - assert child.previous_sibling is el.contents[idx - 1],\ - "Bad previous_sibling\nNODE: {}\nPREV {}\nEXPECTED {}".format( - child, child.previous_sibling, el.contents[idx - 1] - ) - assert el.contents[idx - 1].next_sibling is child,\ - "Bad next_sibling\nNODE: {}\nNEXT {}\nEXPECTED {}".format( - el.contents[idx - 1], el.contents[idx - 1].next_sibling, child - ) - - if last_child is not None: - assert child.previous_element is last_child,\ - "Bad previous_element\nNODE: {}\nPREV {}\nEXPECTED {}\nCONTENTS {}".format( - child, child.previous_element, last_child, child.parent.contents - ) - assert last_child.next_element is child,\ - "Bad next_element\nNODE: {}\nNEXT {}\nEXPECTED {}".format( - last_child, last_child.next_element, child - ) - - if isinstance(child, Tag) and child.contents: - descendant = self.linkage_validator(child, True) - # A bubbled up descendant should have no next siblings - assert descendant.next_sibling is None,\ - "Bad next_sibling\nNODE: {}\nNEXT {}\nEXPECTED {}".format( - descendant, descendant.next_sibling, None - ) - - # Mark last child as either the bubbled up descendant or the current child - if descendant is not None: - last_child = descendant - else: - last_child = child - - # If last child, there are non next siblings - if idx == last_idx: - assert child.next_sibling is None,\ - "Bad next_sibling\nNODE: {}\nNEXT {}\nEXPECTED {}".format( - child, child.next_sibling, None - ) - idx += 1 - - child = descendant if descendant is not None else child - if child is None: - child = el - - if not _recursive_call and child is not None: - target = el - while True: - if target is None: - assert child.next_element is None, \ - "Bad next_element\nNODE: {}\nNEXT {}\nEXPECTED {}".format( - child, child.next_element, None - ) - break - elif target.next_sibling is not None: - assert child.next_element is target.next_sibling, \ - "Bad next_element\nNODE: {}\nNEXT {}\nEXPECTED {}".format( - child, child.next_element, target.next_sibling - ) - break - target = target.parent - - # We are done, so nothing to return - return None - else: - # Return the child to the recursive caller - return child - - -class HTMLTreeBuilderSmokeTest(object): - - """A basic test of a treebuilder's competence. - - Any HTML treebuilder, present or future, should be able to pass - these tests. With invalid markup, there's room for interpretation, - and different parsers can handle it differently. But with the - markup in these tests, there's not much room for interpretation. - """ - - def test_empty_element_tags(self): - """Verify that all HTML4 and HTML5 empty element (aka void element) tags - are handled correctly. - """ - for name in [ - 'area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input', 'keygen', 'link', 'menuitem', 'meta', 'param', 'source', 'track', 'wbr', - 'spacer', 'frame' - ]: - soup = self.soup("") - new_tag = soup.new_tag(name) - self.assertEqual(True, new_tag.is_empty_element) - - def test_pickle_and_unpickle_identity(self): - # Pickling a tree, then unpickling it, yields a tree identical - # to the original. - tree = self.soup("foo") - dumped = pickle.dumps(tree, 2) - loaded = pickle.loads(dumped) - self.assertEqual(loaded.__class__, BeautifulSoup) - self.assertEqual(loaded.decode(), tree.decode()) - - def assertDoctypeHandled(self, doctype_fragment): - """Assert that a given doctype string is handled correctly.""" - doctype_str, soup = self._document_with_doctype(doctype_fragment) - - # Make sure a Doctype object was created. - doctype = soup.contents[0] - self.assertEqual(doctype.__class__, Doctype) - self.assertEqual(doctype, doctype_fragment) - self.assertEqual(str(soup)[:len(doctype_str)], doctype_str) - - # Make sure that the doctype was correctly associated with the - # parse tree and that the rest of the document parsed. - self.assertEqual(soup.p.contents[0], 'foo') - - def _document_with_doctype(self, doctype_fragment): - """Generate and parse a document with the given doctype.""" - doctype = '' % doctype_fragment - markup = doctype + '\n

foo

' - soup = self.soup(markup) - return doctype, soup - - def test_normal_doctypes(self): - """Make sure normal, everyday HTML doctypes are handled correctly.""" - self.assertDoctypeHandled("html") - self.assertDoctypeHandled( - 'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"') - - def test_empty_doctype(self): - soup = self.soup("") - doctype = soup.contents[0] - self.assertEqual("", doctype.strip()) - - def test_public_doctype_with_url(self): - doctype = 'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"' - self.assertDoctypeHandled(doctype) - - def test_system_doctype(self): - self.assertDoctypeHandled('foo SYSTEM "http://www.example.com/"') - - def test_namespaced_system_doctype(self): - # We can handle a namespaced doctype with a system ID. - self.assertDoctypeHandled('xsl:stylesheet SYSTEM "htmlent.dtd"') - - def test_namespaced_public_doctype(self): - # Test a namespaced doctype with a public id. - self.assertDoctypeHandled('xsl:stylesheet PUBLIC "htmlent.dtd"') - - def test_real_xhtml_document(self): - """A real XHTML document should come out more or less the same as it went in.""" - markup = b""" - - -Hello. -Goodbye. -""" - soup = self.soup(markup) - self.assertEqual( - soup.encode("utf-8").replace(b"\n", b""), - markup.replace(b"\n", b"")) - - def test_namespaced_html(self): - """When a namespaced XML document is parsed as HTML it should - be treated as HTML with weird tag names. - """ - markup = b"""content""" - soup = self.soup(markup) - self.assertEqual(2, len(soup.find_all("ns1:foo"))) - - def test_processing_instruction(self): - # We test both Unicode and bytestring to verify that - # process_markup correctly sets processing_instruction_class - # even when the markup is already Unicode and there is no - # need to process anything. - markup = """""" - soup = self.soup(markup) - self.assertEqual(markup, soup.decode()) - - markup = b"""""" - soup = self.soup(markup) - self.assertEqual(markup, soup.encode("utf8")) - - def test_deepcopy(self): - """Make sure you can copy the tree builder. - - This is important because the builder is part of a - BeautifulSoup object, and we want to be able to copy that. - """ - copy.deepcopy(self.default_builder) - - def test_p_tag_is_never_empty_element(self): - """A

tag is never designated as an empty-element tag. - - Even if the markup shows it as an empty-element tag, it - shouldn't be presented that way. - """ - soup = self.soup("

") - self.assertFalse(soup.p.is_empty_element) - self.assertEqual(str(soup.p), "

") - - def test_unclosed_tags_get_closed(self): - """A tag that's not closed by the end of the document should be closed. - - This applies to all tags except empty-element tags. - """ - self.assertSoupEquals("

", "

") - self.assertSoupEquals("", "") - - self.assertSoupEquals("
", "
") - - def test_br_is_always_empty_element_tag(self): - """A
tag is designated as an empty-element tag. - - Some parsers treat

as one
tag, some parsers as - two tags, but it should always be an empty-element tag. - """ - soup = self.soup("

") - self.assertTrue(soup.br.is_empty_element) - self.assertEqual(str(soup.br), "
") - - def test_nested_formatting_elements(self): - self.assertSoupEquals("") - - def test_double_head(self): - html = ''' - - -Ordinary HEAD element test - - - -Hello, world! - - -''' - soup = self.soup(html) - self.assertEqual("text/javascript", soup.find('script')['type']) - - def test_comment(self): - # Comments are represented as Comment objects. - markup = "

foobaz

" - self.assertSoupEquals(markup) - - soup = self.soup(markup) - comment = soup.find(text="foobar") - self.assertEqual(comment.__class__, Comment) - - # The comment is properly integrated into the tree. - foo = soup.find(text="foo") - self.assertEqual(comment, foo.next_element) - baz = soup.find(text="baz") - self.assertEqual(comment, baz.previous_element) - - def test_preserved_whitespace_in_pre_and_textarea(self): - """Whitespace must be preserved in
 and "
-        self.assertSoupEquals(pre_markup)
-        self.assertSoupEquals(textarea_markup)
-
-        soup = self.soup(pre_markup)
-        self.assertEqual(soup.pre.prettify(), pre_markup)
-
-        soup = self.soup(textarea_markup)
-        self.assertEqual(soup.textarea.prettify(), textarea_markup)
-
-        soup = self.soup("")
-        self.assertEqual(soup.textarea.prettify(), "")
-
-    def test_nested_inline_elements(self):
-        """Inline elements can be nested indefinitely."""
-        b_tag = "Inside a B tag"
-        self.assertSoupEquals(b_tag)
-
-        nested_b_tag = "

A nested tag

" - self.assertSoupEquals(nested_b_tag) - - double_nested_b_tag = "

A doubly nested tag

" - self.assertSoupEquals(nested_b_tag) - - def test_nested_block_level_elements(self): - """Block elements can be nested.""" - soup = self.soup('

Foo

') - blockquote = soup.blockquote - self.assertEqual(blockquote.p.b.string, 'Foo') - self.assertEqual(blockquote.b.string, 'Foo') - - def test_correctly_nested_tables(self): - """One table can go inside another one.""" - markup = ('' - '' - "') - - self.assertSoupEquals( - markup, - '
Here's another table:" - '' - '' - '
foo
Here\'s another table:' - '
foo
' - '
') - - self.assertSoupEquals( - "" - "" - "
Foo
Bar
Baz
") - - def test_multivalued_attribute_with_whitespace(self): - # Whitespace separating the values of a multi-valued attribute - # should be ignored. - - markup = '
' - soup = self.soup(markup) - self.assertEqual(['foo', 'bar'], soup.div['class']) - - # If you search by the literal name of the class it's like the whitespace - # wasn't there. - self.assertEqual(soup.div, soup.find('div', class_="foo bar")) - - def test_deeply_nested_multivalued_attribute(self): - # html5lib can set the attributes of the same tag many times - # as it rearranges the tree. This has caused problems with - # multivalued attributes. - markup = '
' - soup = self.soup(markup) - self.assertEqual(["css"], soup.div.div['class']) - - def test_multivalued_attribute_on_html(self): - # html5lib uses a different API to set the attributes ot the - # tag. This has caused problems with multivalued - # attributes. - markup = '' - soup = self.soup(markup) - self.assertEqual(["a", "b"], soup.html['class']) - - def test_angle_brackets_in_attribute_values_are_escaped(self): - self.assertSoupEquals('', '') - - def test_strings_resembling_character_entity_references(self): - # "&T" and "&p" look like incomplete character entities, but they are - # not. - self.assertSoupEquals( - "

• AT&T is in the s&p 500

", - "

\u2022 AT&T is in the s&p 500

" - ) - - def test_apos_entity(self): - self.assertSoupEquals( - "

Bob's Bar

", - "

Bob's Bar

", - ) - - def test_entities_in_foreign_document_encoding(self): - # “ and ” are invalid numeric entities referencing - # Windows-1252 characters. - references a character common - # to Windows-1252 and Unicode, and ☃ references a - # character only found in Unicode. - # - # All of these entities should be converted to Unicode - # characters. - markup = "

“Hello” -☃

" - soup = self.soup(markup) - self.assertEqual("“Hello” -☃", soup.p.string) - - def test_entities_in_attributes_converted_to_unicode(self): - expect = '

' - self.assertSoupEquals('

', expect) - self.assertSoupEquals('

', expect) - self.assertSoupEquals('

', expect) - self.assertSoupEquals('

', expect) - - def test_entities_in_text_converted_to_unicode(self): - expect = '

pi\N{LATIN SMALL LETTER N WITH TILDE}ata

' - self.assertSoupEquals("

piñata

", expect) - self.assertSoupEquals("

piñata

", expect) - self.assertSoupEquals("

piñata

", expect) - self.assertSoupEquals("

piñata

", expect) - - def test_quot_entity_converted_to_quotation_mark(self): - self.assertSoupEquals("

I said "good day!"

", - '

I said "good day!"

') - - def test_out_of_range_entity(self): - expect = "\N{REPLACEMENT CHARACTER}" - self.assertSoupEquals("�", expect) - self.assertSoupEquals("�", expect) - self.assertSoupEquals("�", expect) - - def test_multipart_strings(self): - "Mostly to prevent a recurrence of a bug in the html5lib treebuilder." - soup = self.soup("

\nfoo

") - self.assertEqual("p", soup.h2.string.next_element.name) - self.assertEqual("p", soup.p.name) - self.assertConnectedness(soup) - - def test_empty_element_tags(self): - """Verify consistent handling of empty-element tags, - no matter how they come in through the markup. - """ - self.assertSoupEquals('


', "


") - self.assertSoupEquals('


', "


") - - def test_head_tag_between_head_and_body(self): - "Prevent recurrence of a bug in the html5lib treebuilder." - content = """ - - foo - -""" - soup = self.soup(content) - self.assertNotEqual(None, soup.html.body) - self.assertConnectedness(soup) - - def test_multiple_copies_of_a_tag(self): - "Prevent recurrence of a bug in the html5lib treebuilder." - content = """ - - - - - -""" - soup = self.soup(content) - self.assertConnectedness(soup.article) - - def test_basic_namespaces(self): - """Parsers don't need to *understand* namespaces, but at the - very least they should not choke on namespaces or lose - data.""" - - markup = b'4' - soup = self.soup(markup) - self.assertEqual(markup, soup.encode()) - html = soup.html - self.assertEqual('http://www.w3.org/1999/xhtml', soup.html['xmlns']) - self.assertEqual( - 'http://www.w3.org/1998/Math/MathML', soup.html['xmlns:mathml']) - self.assertEqual( - 'http://www.w3.org/2000/svg', soup.html['xmlns:svg']) - - def test_multivalued_attribute_value_becomes_list(self): - markup = b'' - soup = self.soup(markup) - self.assertEqual(['foo', 'bar'], soup.a['class']) - - # - # Generally speaking, tests below this point are more tests of - # Beautiful Soup than tests of the tree builders. But parsers are - # weird, so we run these tests separately for every tree builder - # to detect any differences between them. - # - - def test_can_parse_unicode_document(self): - # A seemingly innocuous document... but it's in Unicode! And - # it contains characters that can't be represented in the - # encoding found in the declaration! The horror! - markup = 'Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!' - soup = self.soup(markup) - self.assertEqual('Sacr\xe9 bleu!', soup.body.string) - - def test_soupstrainer(self): - """Parsers should be able to work with SoupStrainers.""" - strainer = SoupStrainer("b") - soup = self.soup("A bold statement", - parse_only=strainer) - self.assertEqual(soup.decode(), "bold") - - def test_single_quote_attribute_values_become_double_quotes(self): - self.assertSoupEquals("", - '') - - def test_attribute_values_with_nested_quotes_are_left_alone(self): - text = """a""" - self.assertSoupEquals(text) - - def test_attribute_values_with_double_nested_quotes_get_quoted(self): - text = """a""" - soup = self.soup(text) - soup.foo['attr'] = 'Brawls happen at "Bob\'s Bar"' - self.assertSoupEquals( - soup.foo.decode(), - """a""") - - def test_ampersand_in_attribute_value_gets_escaped(self): - self.assertSoupEquals('', - '') - - self.assertSoupEquals( - 'foo', - 'foo') - - def test_escaped_ampersand_in_attribute_value_is_left_alone(self): - self.assertSoupEquals('') - - def test_entities_in_strings_converted_during_parsing(self): - # Both XML and HTML entities are converted to Unicode characters - # during parsing. - text = "

<<sacré bleu!>>

" - expected = "

<<sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>>

" - self.assertSoupEquals(text, expected) - - def test_smart_quotes_converted_on_the_way_in(self): - # Microsoft smart quotes are converted to Unicode characters during - # parsing. - quote = b"

\x91Foo\x92

" - soup = self.soup(quote) - self.assertEqual( - soup.p.string, - "\N{LEFT SINGLE QUOTATION MARK}Foo\N{RIGHT SINGLE QUOTATION MARK}") - - def test_non_breaking_spaces_converted_on_the_way_in(self): - soup = self.soup("  ") - self.assertEqual(soup.a.string, "\N{NO-BREAK SPACE}" * 2) - - def test_entities_converted_on_the_way_out(self): - text = "

<<sacré bleu!>>

" - expected = "

<<sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>>

".encode("utf-8") - soup = self.soup(text) - self.assertEqual(soup.p.encode("utf-8"), expected) - - def test_real_iso_latin_document(self): - # Smoke test of interrelated functionality, using an - # easy-to-understand document. - - # Here it is in Unicode. Note that it claims to be in ISO-Latin-1. - unicode_html = '

Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!

' - - # That's because we're going to encode it into ISO-Latin-1, and use - # that to test. - iso_latin_html = unicode_html.encode("iso-8859-1") - - # Parse the ISO-Latin-1 HTML. - soup = self.soup(iso_latin_html) - # Encode it to UTF-8. - result = soup.encode("utf-8") - - # What do we expect the result to look like? Well, it would - # look like unicode_html, except that the META tag would say - # UTF-8 instead of ISO-Latin-1. - expected = unicode_html.replace("ISO-Latin-1", "utf-8") - - # And, of course, it would be in UTF-8, not Unicode. - expected = expected.encode("utf-8") - - # Ta-da! - self.assertEqual(result, expected) - - def test_real_shift_jis_document(self): - # Smoke test to make sure the parser can handle a document in - # Shift-JIS encoding, without choking. - shift_jis_html = ( - b'
'
-            b'\x82\xb1\x82\xea\x82\xcdShift-JIS\x82\xc5\x83R\x81[\x83f'
-            b'\x83B\x83\x93\x83O\x82\xb3\x82\xea\x82\xbd\x93\xfa\x96{\x8c'
-            b'\xea\x82\xcc\x83t\x83@\x83C\x83\x8b\x82\xc5\x82\xb7\x81B'
-            b'
') - unicode_html = shift_jis_html.decode("shift-jis") - soup = self.soup(unicode_html) - - # Make sure the parse tree is correctly encoded to various - # encodings. - self.assertEqual(soup.encode("utf-8"), unicode_html.encode("utf-8")) - self.assertEqual(soup.encode("euc_jp"), unicode_html.encode("euc_jp")) - - def test_real_hebrew_document(self): - # A real-world test to make sure we can convert ISO-8859-9 (a - # Hebrew encoding) to UTF-8. - hebrew_document = b'Hebrew (ISO 8859-8) in Visual Directionality

Hebrew (ISO 8859-8) in Visual Directionality

\xed\xe5\xec\xf9' - soup = self.soup( - hebrew_document, from_encoding="iso8859-8") - # Some tree builders call it iso8859-8, others call it iso-8859-9. - # That's not a difference we really care about. - assert soup.original_encoding in ('iso8859-8', 'iso-8859-8') - self.assertEqual( - soup.encode('utf-8'), - hebrew_document.decode("iso8859-8").encode("utf-8")) - - def test_meta_tag_reflects_current_encoding(self): - # Here's the tag saying that a document is - # encoded in Shift-JIS. - meta_tag = ('') - - # Here's a document incorporating that meta tag. - shift_jis_html = ( - '\n%s\n' - '' - 'Shift-JIS markup goes here.') % meta_tag - soup = self.soup(shift_jis_html) - - # Parse the document, and the charset is seemingly unaffected. - parsed_meta = soup.find('meta', {'http-equiv': 'Content-type'}) - content = parsed_meta['content'] - self.assertEqual('text/html; charset=x-sjis', content) - - # But that value is actually a ContentMetaAttributeValue object. - self.assertTrue(isinstance(content, ContentMetaAttributeValue)) - - # And it will take on a value that reflects its current - # encoding. - self.assertEqual('text/html; charset=utf8', content.encode("utf8")) - - # For the rest of the story, see TestSubstitutions in - # test_tree.py. - - def test_html5_style_meta_tag_reflects_current_encoding(self): - # Here's the tag saying that a document is - # encoded in Shift-JIS. - meta_tag = ('') - - # Here's a document incorporating that meta tag. - shift_jis_html = ( - '\n%s\n' - '' - 'Shift-JIS markup goes here.') % meta_tag - soup = self.soup(shift_jis_html) - - # Parse the document, and the charset is seemingly unaffected. - parsed_meta = soup.find('meta', id="encoding") - charset = parsed_meta['charset'] - self.assertEqual('x-sjis', charset) - - # But that value is actually a CharsetMetaAttributeValue object. - self.assertTrue(isinstance(charset, CharsetMetaAttributeValue)) - - # And it will take on a value that reflects its current - # encoding. - self.assertEqual('utf8', charset.encode("utf8")) - - def test_tag_with_no_attributes_can_have_attributes_added(self): - data = self.soup("text") - data.a['foo'] = 'bar' - self.assertEqual('text', data.a.decode()) - - def test_worst_case(self): - """Test the worst case (currently) for linking issues.""" - - soup = self.soup(BAD_DOCUMENT) - self.linkage_validator(soup) - - -class XMLTreeBuilderSmokeTest(object): - - def test_pickle_and_unpickle_identity(self): - # Pickling a tree, then unpickling it, yields a tree identical - # to the original. - tree = self.soup("foo") - dumped = pickle.dumps(tree, 2) - loaded = pickle.loads(dumped) - self.assertEqual(loaded.__class__, BeautifulSoup) - self.assertEqual(loaded.decode(), tree.decode()) - - def test_docstring_generated(self): - soup = self.soup("") - self.assertEqual( - soup.encode(), b'\n') - - def test_xml_declaration(self): - markup = b"""\n""" - soup = self.soup(markup) - self.assertEqual(markup, soup.encode("utf8")) - - def test_processing_instruction(self): - markup = b"""\n""" - soup = self.soup(markup) - self.assertEqual(markup, soup.encode("utf8")) - - def test_real_xhtml_document(self): - """A real XHTML document should come out *exactly* the same as it went in.""" - markup = b""" - - -Hello. -Goodbye. -""" - soup = self.soup(markup) - self.assertEqual( - soup.encode("utf-8"), markup) - - def test_nested_namespaces(self): - doc = b""" - - - - - -""" - soup = self.soup(doc) - self.assertEqual(doc, soup.encode()) - - def test_formatter_processes_script_tag_for_xml_documents(self): - doc = """ - -""" - soup = BeautifulSoup(doc, "lxml-xml") - # lxml would have stripped this while parsing, but we can add - # it later. - soup.script.string = 'console.log("< < hey > > ");' - encoded = soup.encode() - self.assertTrue(b"< < hey > >" in encoded) - - def test_can_parse_unicode_document(self): - markup = 'Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!' - soup = self.soup(markup) - self.assertEqual('Sacr\xe9 bleu!', soup.root.string) - - def test_popping_namespaced_tag(self): - markup = 'b2012-07-02T20:33:42Zcd' - soup = self.soup(markup) - self.assertEqual( - str(soup.rss), markup) - - def test_docstring_includes_correct_encoding(self): - soup = self.soup("") - self.assertEqual( - soup.encode("latin1"), - b'\n') - - def test_large_xml_document(self): - """A large XML document should come out the same as it went in.""" - markup = (b'\n' - + b'0' * (2**12) - + b'') - soup = self.soup(markup) - self.assertEqual(soup.encode("utf-8"), markup) - - - def test_tags_are_empty_element_if_and_only_if_they_are_empty(self): - self.assertSoupEquals("

", "

") - self.assertSoupEquals("

foo

") - - def test_namespaces_are_preserved(self): - markup = 'This tag is in the a namespaceThis tag is in the b namespace' - soup = self.soup(markup) - root = soup.root - self.assertEqual("http://example.com/", root['xmlns:a']) - self.assertEqual("http://example.net/", root['xmlns:b']) - - def test_closing_namespaced_tag(self): - markup = '

20010504

' - soup = self.soup(markup) - self.assertEqual(str(soup.p), markup) - - def test_namespaced_attributes(self): - markup = '' - soup = self.soup(markup) - self.assertEqual(str(soup.foo), markup) - - def test_namespaced_attributes_xml_namespace(self): - markup = 'bar' - soup = self.soup(markup) - self.assertEqual(str(soup.foo), markup) - - def test_find_by_prefixed_name(self): - doc = """ -foo - bar - baz - -""" - soup = self.soup(doc) - - # There are three tags. - self.assertEqual(3, len(soup.find_all('tag'))) - - # But two of them are ns1:tag and one of them is ns2:tag. - self.assertEqual(2, len(soup.find_all('ns1:tag'))) - self.assertEqual(1, len(soup.find_all('ns2:tag'))) - - self.assertEqual(1, len(soup.find_all('ns2:tag', key='value'))) - self.assertEqual(3, len(soup.find_all(['ns1:tag', 'ns2:tag']))) - - def test_copy_tag_preserves_namespace(self): - xml = """ -""" - - soup = self.soup(xml) - tag = soup.document - duplicate = copy.copy(tag) - - # The two tags have the same namespace prefix. - self.assertEqual(tag.prefix, duplicate.prefix) - - def test_worst_case(self): - """Test the worst case (currently) for linking issues.""" - - soup = self.soup(BAD_DOCUMENT) - self.linkage_validator(soup) - - -class HTML5TreeBuilderSmokeTest(HTMLTreeBuilderSmokeTest): - """Smoke test for a tree builder that supports HTML5.""" - - def test_real_xhtml_document(self): - # Since XHTML is not HTML5, HTML5 parsers are not tested to handle - # XHTML documents in any particular way. - pass - - def test_html_tags_have_namespace(self): - markup = "" - soup = self.soup(markup) - self.assertEqual("http://www.w3.org/1999/xhtml", soup.a.namespace) - - def test_svg_tags_have_namespace(self): - markup = '' - soup = self.soup(markup) - namespace = "http://www.w3.org/2000/svg" - self.assertEqual(namespace, soup.svg.namespace) - self.assertEqual(namespace, soup.circle.namespace) - - - def test_mathml_tags_have_namespace(self): - markup = '5' - soup = self.soup(markup) - namespace = 'http://www.w3.org/1998/Math/MathML' - self.assertEqual(namespace, soup.math.namespace) - self.assertEqual(namespace, soup.msqrt.namespace) - - def test_xml_declaration_becomes_comment(self): - markup = '' - soup = self.soup(markup) - self.assertTrue(isinstance(soup.contents[0], Comment)) - self.assertEqual(soup.contents[0], '?xml version="1.0" encoding="utf-8"?') - self.assertEqual("html", soup.contents[0].next_element.name) - -def skipIf(condition, reason): - def nothing(test, *args, **kwargs): - return None - - def decorator(test_item): - if condition: - return nothing - else: - return test_item - - return decorator diff --git a/lib/bs4/tests/__init__.py b/lib/bs4/tests/__init__.py deleted file mode 100644 index 142c8cc3..00000000 --- a/lib/bs4/tests/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"The beautifulsoup tests." diff --git a/lib/bs4/tests/test_builder_registry.py b/lib/bs4/tests/test_builder_registry.py deleted file mode 100644 index 90cad829..00000000 --- a/lib/bs4/tests/test_builder_registry.py +++ /dev/null @@ -1,147 +0,0 @@ -"""Tests of the builder registry.""" - -import unittest -import warnings - -from bs4 import BeautifulSoup -from bs4.builder import ( - builder_registry as registry, - HTMLParserTreeBuilder, - TreeBuilderRegistry, -) - -try: - from bs4.builder import HTML5TreeBuilder - HTML5LIB_PRESENT = True -except ImportError: - HTML5LIB_PRESENT = False - -try: - from bs4.builder import ( - LXMLTreeBuilderForXML, - LXMLTreeBuilder, - ) - LXML_PRESENT = True -except ImportError: - LXML_PRESENT = False - - -class BuiltInRegistryTest(unittest.TestCase): - """Test the built-in registry with the default builders registered.""" - - def test_combination(self): - if LXML_PRESENT: - self.assertEqual(registry.lookup('fast', 'html'), - LXMLTreeBuilder) - - if LXML_PRESENT: - self.assertEqual(registry.lookup('permissive', 'xml'), - LXMLTreeBuilderForXML) - self.assertEqual(registry.lookup('strict', 'html'), - HTMLParserTreeBuilder) - if HTML5LIB_PRESENT: - self.assertEqual(registry.lookup('html5lib', 'html'), - HTML5TreeBuilder) - - def test_lookup_by_markup_type(self): - if LXML_PRESENT: - self.assertEqual(registry.lookup('html'), LXMLTreeBuilder) - self.assertEqual(registry.lookup('xml'), LXMLTreeBuilderForXML) - else: - self.assertEqual(registry.lookup('xml'), None) - if HTML5LIB_PRESENT: - self.assertEqual(registry.lookup('html'), HTML5TreeBuilder) - else: - self.assertEqual(registry.lookup('html'), HTMLParserTreeBuilder) - - def test_named_library(self): - if LXML_PRESENT: - self.assertEqual(registry.lookup('lxml', 'xml'), - LXMLTreeBuilderForXML) - self.assertEqual(registry.lookup('lxml', 'html'), - LXMLTreeBuilder) - if HTML5LIB_PRESENT: - self.assertEqual(registry.lookup('html5lib'), - HTML5TreeBuilder) - - self.assertEqual(registry.lookup('html.parser'), - HTMLParserTreeBuilder) - - def test_beautifulsoup_constructor_does_lookup(self): - - with warnings.catch_warnings(record=True) as w: - # This will create a warning about not explicitly - # specifying a parser, but we'll ignore it. - - # You can pass in a string. - BeautifulSoup("", features="html") - # Or a list of strings. - BeautifulSoup("", features=["html", "fast"]) - - # You'll get an exception if BS can't find an appropriate - # builder. - self.assertRaises(ValueError, BeautifulSoup, - "", features="no-such-feature") - -class RegistryTest(unittest.TestCase): - """Test the TreeBuilderRegistry class in general.""" - - def setUp(self): - self.registry = TreeBuilderRegistry() - - def builder_for_features(self, *feature_list): - cls = type('Builder_' + '_'.join(feature_list), - (object,), {'features' : feature_list}) - - self.registry.register(cls) - return cls - - def test_register_with_no_features(self): - builder = self.builder_for_features() - - # Since the builder advertises no features, you can't find it - # by looking up features. - self.assertEqual(self.registry.lookup('foo'), None) - - # But you can find it by doing a lookup with no features, if - # this happens to be the only registered builder. - self.assertEqual(self.registry.lookup(), builder) - - def test_register_with_features_makes_lookup_succeed(self): - builder = self.builder_for_features('foo', 'bar') - self.assertEqual(self.registry.lookup('foo'), builder) - self.assertEqual(self.registry.lookup('bar'), builder) - - def test_lookup_fails_when_no_builder_implements_feature(self): - builder = self.builder_for_features('foo', 'bar') - self.assertEqual(self.registry.lookup('baz'), None) - - def test_lookup_gets_most_recent_registration_when_no_feature_specified(self): - builder1 = self.builder_for_features('foo') - builder2 = self.builder_for_features('bar') - self.assertEqual(self.registry.lookup(), builder2) - - def test_lookup_fails_when_no_tree_builders_registered(self): - self.assertEqual(self.registry.lookup(), None) - - def test_lookup_gets_most_recent_builder_supporting_all_features(self): - has_one = self.builder_for_features('foo') - has_the_other = self.builder_for_features('bar') - has_both_early = self.builder_for_features('foo', 'bar', 'baz') - has_both_late = self.builder_for_features('foo', 'bar', 'quux') - lacks_one = self.builder_for_features('bar') - has_the_other = self.builder_for_features('foo') - - # There are two builders featuring 'foo' and 'bar', but - # the one that also features 'quux' was registered later. - self.assertEqual(self.registry.lookup('foo', 'bar'), - has_both_late) - - # There is only one builder featuring 'foo', 'bar', and 'baz'. - self.assertEqual(self.registry.lookup('foo', 'bar', 'baz'), - has_both_early) - - def test_lookup_fails_when_cannot_reconcile_requested_features(self): - builder1 = self.builder_for_features('foo', 'bar') - builder2 = self.builder_for_features('foo', 'baz') - self.assertEqual(self.registry.lookup('bar', 'baz'), None) diff --git a/lib/bs4/tests/test_docs.py b/lib/bs4/tests/test_docs.py deleted file mode 100644 index 5b9f6770..00000000 --- a/lib/bs4/tests/test_docs.py +++ /dev/null @@ -1,36 +0,0 @@ -"Test harness for doctests." - -# pylint: disable-msg=E0611,W0142 - -__metaclass__ = type -__all__ = [ - 'additional_tests', - ] - -import atexit -import doctest -import os -#from pkg_resources import ( -# resource_filename, resource_exists, resource_listdir, cleanup_resources) -import unittest - -DOCTEST_FLAGS = ( - doctest.ELLIPSIS | - doctest.NORMALIZE_WHITESPACE | - doctest.REPORT_NDIFF) - - -# def additional_tests(): -# "Run the doc tests (README.txt and docs/*, if any exist)" -# doctest_files = [ -# os.path.abspath(resource_filename('bs4', 'README.txt'))] -# if resource_exists('bs4', 'docs'): -# for name in resource_listdir('bs4', 'docs'): -# if name.endswith('.txt'): -# doctest_files.append( -# os.path.abspath( -# resource_filename('bs4', 'docs/%s' % name))) -# kwargs = dict(module_relative=False, optionflags=DOCTEST_FLAGS) -# atexit.register(cleanup_resources) -# return unittest.TestSuite(( -# doctest.DocFileSuite(*doctest_files, **kwargs))) diff --git a/lib/bs4/tests/test_html5lib.py b/lib/bs4/tests/test_html5lib.py deleted file mode 100644 index d7a0b298..00000000 --- a/lib/bs4/tests/test_html5lib.py +++ /dev/null @@ -1,184 +0,0 @@ -"""Tests to ensure that the html5lib tree builder generates good trees.""" - -import warnings - -try: - from bs4.builder import HTML5TreeBuilder - HTML5LIB_PRESENT = True -except ImportError as e: - HTML5LIB_PRESENT = False -from bs4.element import SoupStrainer -from bs4.testing import ( - HTML5TreeBuilderSmokeTest, - SoupTest, - skipIf, -) - -@skipIf( - not HTML5LIB_PRESENT, - "html5lib seems not to be present, not testing its tree builder.") -class HTML5LibBuilderSmokeTest(SoupTest, HTML5TreeBuilderSmokeTest): - """See ``HTML5TreeBuilderSmokeTest``.""" - - @property - def default_builder(self): - return HTML5TreeBuilder - - def test_soupstrainer(self): - # The html5lib tree builder does not support SoupStrainers. - strainer = SoupStrainer("b") - markup = "

A bold statement.

" - with warnings.catch_warnings(record=True) as w: - soup = self.soup(markup, parse_only=strainer) - self.assertEqual( - soup.decode(), self.document_for(markup)) - - self.assertTrue( - "the html5lib tree builder doesn't support parse_only" in - str(w[0].message)) - - def test_correctly_nested_tables(self): - """html5lib inserts tags where other parsers don't.""" - markup = ('' - '' - "') - - self.assertSoupEquals( - markup, - '
Here's another table:" - '' - '' - '
foo
Here\'s another table:' - '
foo
' - '
') - - self.assertSoupEquals( - "" - "" - "
Foo
Bar
Baz
") - - def test_xml_declaration_followed_by_doctype(self): - markup = ''' - - - - - -

foo

- -''' - soup = self.soup(markup) - # Verify that we can reach the

tag; this means the tree is connected. - self.assertEqual(b"

foo

", soup.p.encode()) - - def test_reparented_markup(self): - markup = '

foo

\n

bar

' - soup = self.soup(markup) - self.assertEqual("

foo

\n

bar

", soup.body.decode()) - self.assertEqual(2, len(soup.find_all('p'))) - - - def test_reparented_markup_ends_with_whitespace(self): - markup = '

foo

\n

bar

\n' - soup = self.soup(markup) - self.assertEqual("

foo

\n

bar

\n", soup.body.decode()) - self.assertEqual(2, len(soup.find_all('p'))) - - def test_reparented_markup_containing_identical_whitespace_nodes(self): - """Verify that we keep the two whitespace nodes in this - document distinct when reparenting the adjacent tags. - """ - markup = '
' - soup = self.soup(markup) - space1, space2 = soup.find_all(string=' ') - tbody1, tbody2 = soup.find_all('tbody') - assert space1.next_element is tbody1 - assert tbody2.next_element is space2 - - def test_reparented_markup_containing_children(self): - markup = '' - soup = self.soup(markup) - noscript = soup.noscript - self.assertEqual("target", noscript.next_element) - target = soup.find(string='target') - - # The 'aftermath' string was duplicated; we want the second one. - final_aftermath = soup.find_all(string='aftermath')[-1] - - # The