From 4320d0ce95599d79f5c3475ea1c6329472bedc87 Mon Sep 17 00:00:00 2001 From: Hydrus Network Developer Date: Thu, 11 Jun 2020 07:01:08 -0500 Subject: [PATCH] Version 400 --- bin/swfrender license.txt | 674 +++++ bin/upnpc license.txt | 26 + help/changelog.html | 71 + help/client_api.html | 6 + help/duplicates.html | 2 +- help/getting_started_subscriptions.html | 17 +- hydrus/client/ClientController.py | 4 +- hydrus/client/ClientDB.py | 334 +-- hydrus/client/ClientFiles.py | 6 +- hydrus/client/ClientLocalServerResources.py | 8 +- hydrus/client/ClientOptions.py | 2 + hydrus/client/gui/ClientGUI.py | 171 +- hydrus/client/gui/ClientGUIACDropdown.py | 27 +- hydrus/client/gui/ClientGUICanvas.py | 36 +- hydrus/client/gui/ClientGUICanvasMedia.py | 2 + hydrus/client/gui/ClientGUIFileSeedCache.py | 16 +- hydrus/client/gui/ClientGUIGallerySeedLog.py | 2 +- hydrus/client/gui/ClientGUIListBoxes.py | 14 +- hydrus/client/gui/ClientGUIManagement.py | 28 +- hydrus/client/gui/ClientGUIPages.py | 20 +- hydrus/client/gui/ClientGUIPanels.py | 8 +- hydrus/client/gui/ClientGUIResults.py | 17 +- .../client/gui/ClientGUIScrolledPanelsEdit.py | 1866 +------------- hydrus/client/gui/ClientGUIShortcuts.py | 2 +- hydrus/client/gui/ClientGUISubscriptions.py | 2196 +++++++++++++++++ hydrus/client/gui/ClientGUITagSuggestions.py | 12 +- hydrus/client/gui/ClientGUITags.py | 67 +- .../client/importing/ClientImportFileSeeds.py | 477 ++-- .../client/importing/ClientImportGallery.py | 10 +- .../importing/ClientImportGallerySeeds.py | 58 +- .../importing/ClientImportSimpleURLs.py | 8 - .../ClientImportSubscriptionLegacy.py | 1962 +++++++++++++++ .../ClientImportSubscriptionQuery.py | 738 ++++-- .../importing/ClientImportSubscriptions.py | 1823 +++++++------- .../client/importing/ClientImportWatchers.py | 10 +- .../networking/ClientNetworkingDomain.py | 25 +- hydrus/core/HydrusConstants.py | 4 +- hydrus/core/HydrusData.py | 28 +- hydrus/core/HydrusPaths.py | 2 + hydrus/core/HydrusSerialisable.py | 30 +- hydrus/core/HydrusServerResources.py | 2 +- hydrus/test/TestClientAPI.py | 8 +- hydrus/test/TestDialogs.py | 3 +- hydrus/test/TestHydrusSerialisable.py | 18 +- requirementspy3.8.txt | 23 + .../derpibooru tag search - no filter.png | Bin 1933 -> 1937 bytes static/default/gugs/derpibooru tag search.png | Bin 1786 -> 1783 bytes .../gugs/nitter media and retweets lookup.png | Bin 0 -> 2040 bytes static/default/gugs/nitter media lookup.png | Bin 0 -> 1613 bytes .../default/gugs/nitter retweets lookup.png | Bin 0 -> 1685 bytes .../default/gugs/twitter username lookup.png | Bin 1758 -> 0 bytes ...oru file page parser - get webm ugoira.png | Bin 2743 -> 2740 bytes .../parsers/danbooru file page parser.png | Bin 2358 -> 2356 bytes .../derpibooru gallery page api parser.png | Bin 2052 -> 2077 bytes .../default/parsers/nitter media parser.png | Bin 0 -> 1804 bytes .../default/parsers/nitter retweet parser.png | Bin 0 -> 1922 bytes ... tweet parser (video from koto.reisen).png | Bin 0 -> 2732 bytes .../default/parsers/nitter tweet parser.png | Bin 0 -> 1918 bytes .../twitter media tweets api parser.png | Bin 2335 -> 0 bytes ... tweet parser (video from koto.reisen).png | Bin 2729 -> 0 bytes .../default/parsers/twitter tweet parser.png | Bin 2448 -> 0 bytes .../parsers/twitter tweets api parser.png | Bin 2879 -> 0 bytes ...mage tweet (single or multiple images).png | Bin 3355 -> 0 bytes .../derpibooru gallery page api.png | Bin 1854 -> 1912 bytes .../url_classes/derpibooru gallery page.png | Bin 1812 -> 1816 bytes .../url_classes/nitter media timeline.png | Bin 0 -> 1539 bytes .../default/url_classes/nitter timeline.png | Bin 0 -> 1328 bytes .../url_classes/nitter tweet media.png | Bin 0 -> 1606 bytes static/default/url_classes/nitter tweet.png | Bin 0 -> 1421 bytes static/default/url_classes/twitter tweet.png | Bin 1906 -> 1432 bytes .../twitter tweets api - media only.png | Bin 1959 -> 0 bytes .../url_classes/twitter tweets api.png | Bin 2138 -> 0 bytes 72 files changed, 7259 insertions(+), 3604 deletions(-) create mode 100644 bin/swfrender license.txt create mode 100644 bin/upnpc license.txt create mode 100644 hydrus/client/gui/ClientGUISubscriptions.py create mode 100644 hydrus/client/importing/ClientImportSubscriptionLegacy.py create mode 100644 requirementspy3.8.txt create mode 100644 static/default/gugs/nitter media and retweets lookup.png create mode 100644 static/default/gugs/nitter media lookup.png create mode 100644 static/default/gugs/nitter retweets lookup.png delete mode 100644 static/default/gugs/twitter username lookup.png create mode 100644 static/default/parsers/nitter media parser.png create mode 100644 static/default/parsers/nitter retweet parser.png create mode 100644 static/default/parsers/nitter tweet parser (video from koto.reisen).png create mode 100644 static/default/parsers/nitter tweet parser.png delete mode 100644 static/default/parsers/twitter media tweets api parser.png delete mode 100644 static/default/parsers/twitter tweet parser (video from koto.reisen).png delete mode 100644 static/default/parsers/twitter tweet parser.png delete mode 100644 static/default/parsers/twitter tweets api parser.png delete mode 100644 static/default/simple_downloader_formulae/twitter image tweet (single or multiple images).png create mode 100644 static/default/url_classes/nitter media timeline.png create mode 100644 static/default/url_classes/nitter timeline.png create mode 100644 static/default/url_classes/nitter tweet media.png create mode 100644 static/default/url_classes/nitter tweet.png delete mode 100644 static/default/url_classes/twitter tweets api - media only.png delete mode 100644 static/default/url_classes/twitter tweets api.png diff --git a/bin/swfrender license.txt b/bin/swfrender license.txt new file mode 100644 index 00000000..f288702d --- /dev/null +++ b/bin/swfrender license.txt @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/bin/upnpc license.txt b/bin/upnpc license.txt new file mode 100644 index 00000000..7f0fe3d8 --- /dev/null +++ b/bin/upnpc license.txt @@ -0,0 +1,26 @@ +MiniUPnP Project +Copyright (c) 2005-2019, Thomas BERNARD +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * The name of the author may not be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/help/changelog.html b/help/changelog.html index 067dd901..f971144e 100755 --- a/help/changelog.html +++ b/help/changelog.html @@ -8,6 +8,77 @@

changelog

    +
  • version 400

  • +
      +
    • subscription data overhaul:
    • +
    • the formerly monolithic subscription object is finally broken up into smaller pieces, reducing work and load lag and total db read/write for all actions
    • +
    • subscriptions work the same as before, no user input is required. they just work better now™
    • +
    • depending on the size and number of your subscriptions, the db update may take a minute or two this week. a backup of your old subscription objects will be created in your db directory, under a new 'legacy_subscriptions_backup' subdirectory
    • +
    • the manage subscriptions dialog should now open within a second (assuming subs are not currently running). it should save just as fast, only with a little lag if you decide to make significant changes or go into many queries' logs, which are now fetched on demand inside the dialog
    • +
    • when subscriptions run, they similarly only have to load the query they are currently working on. boot lag is now almost nothing, and total drive read/write data for a typical sub run is massively reduced
    • +
    • the 'total files in a sub' limits no longer apply. you can have a sub with a thousand queries and half a million urls if you like
    • +
    • basic subscription data is now held in memory at all times, opening up future fast access such as client api and general UI editing of subs. more work will happen here in coming weeks
    • +
    • if due to hard drive fault or other unusual situations some subscription file/gallery log data is missing from the db, a running sub will note this, pause the sub, and provide a popup error for the user. the manage subscription dialog will correct it on launch by resetting the affected queries with new empty data
    • +
    • similarly, if you launch the manage subs dialog and there is orphaned file/gallery log data in the db, this will be noticed, with the surplus data then backed up to the database directory and deleted from the database proper
    • +
    • subscription queries can now handle domain and bandwidth tests for downloaders that host files/posts on a different domain to the gallery search step
    • +
    • if subs are running when manage subs is booted, long delays while waiting for them to pause are less likely
    • +
    • some subscription 'should run?' tests are improved for odd situations such as subs that have no queries or all DEAD queries
    • +
    • improved some error handling in merge/separate code
    • +
    • the 'show/copy quality info' buttons now work off the main thread, disabling the sub edit dialog while they work
    • +
    • updated a little of the subs help
    • +
    • .
    • +
    • boring actual code changes for subs:
    • +
    • wrote a query log container object to store bulky file and gallery log info
    • +
    • wrote a query header object to store options and cache log summary info
    • +
    • wrote a file cache status object to summarise important info so check timings and similar can be decided upon without needing to load a log
    • +
    • the new cache is now used across the program for all file import summary presentation
    • +
    • wrote a new subscription object to hold the new query headers and load logs as needed
    • +
    • updated subscription management to deal with the new subscription objects. it now also keeps them in memory all the time
    • +
    • wrote a fail-safe update from the old subscription objects to the new, which also saves a backup to disk, just in case of unforeseen problems in the near future
    • +
    • updated the subscription ui code to deal with all the new objects
    • +
    • updated the subscription ui to deal with asynchronous log fetching as needed
    • +
    • cleaned up some file import status code
    • +
    • moved old subscription code to a new legacy file
    • +
    • refactored subscription ui code to a new file
    • +
    • refactored and improved sub sync code
    • +
    • misc subscription cleanup
    • +
    • misc subscription ui cleanup
    • +
    • added type hints to multiple subscription locations
    • +
    • improved how missing serialisable object errors are handled at the db level
    • +
    • .
    • +
    • client api:
    • +
    • the client api now delivers 'is_inbox', 'is_local', 'is_trashed' for 'GET /get_files/file_metadata'
    • +
    • the client api's Access-Control-Allow-Headers CORS header is now '*', allowing all
    • +
    • client api version is now 12
    • +
    • .
    • +
    • downloaders:
    • +
    • twitter retired their old api on the 1st of June, and there is unfortunately no good hydrus solution for the new one. however thanks to a user's efforts, a nice new parser for nitter, a twitter wrapper, is added in today's update. please play with it--it has three downloaders, one for a user's media, one for retweets, and one for both together--and adjust your twitter subscriptions to use the new downloader as needed. the twitter downloader is no longer included for new hydrus users
    • +
    • thanks to a user's submission, fixed the md5 hash fetching for default danbooru parsers
    • +
    • derpibooru gallery searching _should_ be fixed to use their current api
    • +
    • .
    • +
    • the rest:
    • +
    • when the client exits or gets a 'modal' maintenance popup window, all currently playing media windows will now pause
    • +
    • regrettably, due to some content merging issues that are too complicated to improve at the moment, the dupe filter will no longer show the files of processed pairs in the duplicate filter more than once per batch. you won't get a series of AB, AC, AD any more. this will return in future
    • +
    • the weird bug where double-clicking the topmost recent tags suggestion would actually remove the top two items _should_ be fixed. general selection-setting on this column should also be improved
    • +
    • middle-clicking on a parent tag in a 'write' autocomplete dropdown no longer launches a page with that invalid parent 'label' tag included--it just does the base tag. the same is true of label tags (such as 'loading...') and namespace tags
    • +
    • when changing 'expand parents on autocomplete' in the cog button on manage tags, the respective autocomplete now changes whether it displays parents
    • +
    • this is slightly complicated: a tag 'write' context (like manage tags) now presents its autocomplete tags (filtering, siblings, parents) according to the tag service of the parent panel, not the current tag service of the autocomplete. so, if you are on 'my tags' panel and switch to 'all known tags' for the a/c, you will no longer get 'all known tags' siblings and parents and so on presented if 'my tags' is not set to take them. this was sometimes causing confusion when a list showed a parent but the underlying panel did not add it on tag entry
    • +
    • to reduce blacklist confusion, when you launch the edit blacklist dialog from an edit tag import options panel, now only the 'blacklist' tab shows, the summary text is blacklist-specific, and the top intro message is improved. a separate 'whitelist' filter will be added in the near future to allow downloading of files only if they have certain tags
    • +
    • 'hard-replace siblings and parents' in _manage tags_ should now correctly remove bad siblings when they are currently pending
    • +
    • network->downloaders->manage downloader and url display now has a checkbox to make the media viewer top-right hover show unmatched urls
    • +
    • the '... elide page tab names' option now applies instantly on options dialog ok to all pages
    • +
    • added 'copy_bmp_or_file_if_not_bmpable' shortcut command to media set. it tries copy_bmp first, then copy_file if not a static image
    • +
    • fixed some edit tag filter layout to stop long intro messages making it super wide
    • +
    • fixed an issue where tag filters could accept non-whitespace-stripped entries and entries with uppercase characters
    • +
    • fixed a display typo where the 'clear orphan files' maintenance job, when set to delete orphans, was accidentally reporting (total number of thumbnails)/(number of files to delete) text in the file delete step instead of the correct (num_done/num_to_do)
    • +
    • clarified the 'reset repository' commands in review services
    • +
    • when launching an external program, the child process's environment's PATH is reset to what it was at hydrus boot (removing hydrus base dir)
    • +
    • when launching an external program from the frozen build, if some Qt/SSL specific PATH variables have been set to hydrus subdirectories by pyinstaller or otherwise, they are now removed. (this hopefully fixes issues launching some Qt programs as external file launchers)
    • +
    • added a separate requirements.txt for python 3.8, which can't handle PySide2 5.13.0
    • +
    • updated help->about to deal better with missing mpv
    • +
    • updated windows mpv to 2020-05-31 build, api version is now 1.108
    • +
    • updated windows sqlite to 3.32.2
    • +
  • version 399

    • improvements:
    • diff --git a/help/client_api.html b/help/client_api.html index b4bda4a1..91688f0f 100644 --- a/help/client_api.html +++ b/help/client_api.html @@ -893,6 +893,9 @@ "has_audio" : false, "num_frames" : null, "num_words" : null, + "is_inbox" : true, + "is_local" : true, + "is_trashed" : false, "known_urls" : [], "service_names_to_statuses_to_tags" : {} }, @@ -908,6 +911,9 @@ "has_audio" : true, "num_frames" : 102, "num_words" : null, + "is_inbox" : false, + "is_local" : true, + "is_trashed" : false, "known_urls" : [ "https://gelbooru.com/index.php?page=post&s=view&id=4841557", "https://img2.gelbooru.com//images/80/c8/80c8646b4a49395fb36c805f316c49a9.jpg", diff --git a/help/duplicates.html b/help/duplicates.html index 65d35671..c5479d63 100644 --- a/help/duplicates.html +++ b/help/duplicates.html @@ -17,7 +17,7 @@

      Let's go to the preparation page first:

      The 'similar shape' algorithm works on distance. Two files with 0 distance are likely exact matches, such as resizes of the same file or lower/higher quality jpegs, whereas those with distance 4 tend to be to be hairstyle or costume changes. You will be starting on distance 0 and not expect to ever go above 4 or 8 or so. Going too high increases the danger of being overwhelmed by false positives.

      -

      If you are interested, the current version of this system uses a 64-bit phash to represent the image shape and a VPTree to search different files' phashes' relative hamming distance. I expect to extend it in future with multiple phash generation (flips, rotations, and 'interesting' image crops and video frames) and most-common colour comparisons.

      +

      If you are interested, the current version of this system uses a 64-bit phash to represent the image shape and a VPTree to search different files' phashes' relative hamming distance. I expect to extend it in future with multiple phash generation (flips, rotations, and 'interesting' image crops and video frames) and most-common colour comparisons.

      Searching for duplicates is fairly fast per file, but with a large client with hundreds of thousands of files, the total CPU time adds up. You can do a little manual searching if you like, but once you are all settled here, I recommend you hit the cog icon on the preparation page and let hydrus do this page's catch-up search work in your regular maintenance time. It'll swiftly catch up and keep you up to date without you even thinking about it.

      Start searching on the 'exact match' search distance of 0. It is generally easier and more valuable to get exact duplicates out of the way first.

      Once you have some files searched, you should see a potential pair count appear in the 'filtering' page.

      diff --git a/help/getting_started_subscriptions.html b/help/getting_started_subscriptions.html index b452ce1f..6e9f8f31 100644 --- a/help/getting_started_subscriptions.html +++ b/help/getting_started_subscriptions.html @@ -10,16 +10,17 @@

      Let's say you found an artist you like. You downloaded everything of theirs from some site, but one or two pieces of new work is posted every week. You'd like to keep up with the new stuff, but you don't want to manually make a new download job every week for every single artist you like.

      what are subs?

      Subscriptions are a way of telling the client to regularly and quietly repeat a gallery search. You set up a number of saved queries, and the client will 'sync' with the latest files in the gallery and download anything new, just as if you were running the download yourself.

      +

      Subscriptions only work for booru-like galleries that put the newest files first, and they only keep up with new content--once they have done their first sync, which usually gets the most recent hundred files or so, they will never reach further into the past. Getting older files, as you will see later, is a job best done with a normal download page.

      Here's the dialog, which is under network->downloaders->manage subscriptions:

      This is a very simple example--there is only one subscription, for safebooru. It has two 'queries' (i.e. searches to keep up with).

      -

      It is important to note that while subscriptions can have multiple queries (even hundreds!), they generally only work on one site. Expect to create one subscription for safebooru, one for artstation, one for paheal, and so on for every site you care about. Advanced users may be able to think of ways to get subscriptions to work on multiple sites at once, but I recommend against this as it throws off some of the internal check timing calculations.

      +

      It is important to note that while subscriptions can have multiple queries (even hundreds!), they generally only work on one site. Expect to create one subscription for safebooru, one for artstation, one for paheal, and so on for every site you care about. Advanced users may be able to think of ways to get around this, but I recommend against it as it throws off some of the internal check timing calculations.

      Before we trip over the advanced buttons here, let's zoom in on the actual subscription:

      This is a big and powerful panel! I recommend you open the screenshot up in a new browser tab, or in the actual client, so you can refer to it.

      Despite all the controls, the basic idea is simple: Up top, I have selected the 'safebooru tag search' download source, and then I have added two artists--"hong_soon-jae" and "houtengeki". These two queries have their own panels for reviewing what URLs they have worked on and further customising their behaviour, but all they really are is little bits of search text. When the subscription runs, it will put the given search text into the given download source just as if you were running the regular downloader.

      For the most part, all you need to do to set up a good subscription is give it a name, select the download source, and use the 'paste queries' button to paste what you want to search. Subscriptions have great default options for almost all query types, so you don't have to go any deeper than that to get started.

      -

      Do not change the 'file limits' options until you know exactly what they do and have a good reason to alter them!

      +

      Do not change the max number of new files options until you know exactly what they do and have a good reason to alter them!

      how do subscriptions work?

      Once you hit ok on the main subscription dialog, the subscription system should immediately come alive. If any queries are due for a 'check', they will perform their search and look for new files (i.e. URLs it has not seen before). Once that is finished, the file download queue will be worked through as normal. Typically, the sub will make a popup like this while it works:

      @@ -29,16 +30,16 @@

      This can often be a nice surprise!

      what makes a good subscription?

      -

      The same rules as for downloaders apply: start slow, be hesitant, and plan for the long-term. Artist queries make great subscriptions as they don't update reliably but not too often and have very stable quality. Pick the artists you like most, see where their stuff is posted, and set up your subs like that.

      +

      The same rules as for downloaders apply: start slow, be hesitant, and plan for the long-term. Artist queries make great subscriptions as they update reliably but not too often and have very stable quality. Pick the artists you like most, see where their stuff is posted, and set up your subs like that.

      Series and character subscriptions are sometimes valuable, but they can be difficult to keep up with and have highly variable quality. It is not uncommon for users to only keep 15% of what a character sub produces. I do not recommend them for anything but your waifu.

      -

      Attribute subscriptions like 'blue_eyes' or 'smile' make for terrible subs as the quality is all over the place and you will be inundated by way too much content. The only exceptions are for specific, low-count searches that really matter to you, like 'contrapposto' or 'gothic trap thighhighs'.

      +

      Attribute subscriptions like 'blue_eyes' or 'smile' make for terrible subs as the quality is all over the place and you will be inundated by too much content. The only exceptions are for specific, low-count searches that really matter to you, like 'contrapposto' or 'gothic trap thighhighs'.

      If you end up subscribing to eight hundred things and get ten thousand new files a week, you made a mistake. Subscriptions are for keeping up with things you like. If you let them overwhelm you, you'll resent them.

      It is a good idea to run a 'full' download for a search before you set up a subscription. As well as making sure you have the exact right query text and that you have everything ever posted (beyond the 100 files deep a sub will typically look), it saves the bulk of the work (and waiting on bandwidth) for the manual downloader, where it belongs. When a new subscription picks up off a freshly completed download queue, its initial subscription sync only takes thirty seconds since its initial URLs are those that were already processed by the manual downloader. I recommend you stack artist searches up in the manual downloader using 'no limit' file limit, and when they are all finished, select them in the list and right-click->copy queries, which will put the search texts in your clipboard, newline-separated. This list can be pasted into the subscription dialog in one go with the 'paste queries' button again!

      The entire subscription system assumes the source is a typical 'newest first' booru-style search. If you dick around with some order_by:rating/random metatag, it won't work.

      how often do subscriptions check?

      Hydrus subscriptions use the same variable-rate checking system as its thread watchers, just on a larger timescale. If you subscribe to a busy feed, it might check for new files once a day, but if you enter an artist who rarely posts, it might only check once every month. You don't have to do anything. The fine details of this are governed by the 'checker options' button. This is one of the things you should not mess with as you start out.

      If a query goes too 'slow' (typically, this means no new files for 180 days), it will be marked DEAD in the same way a thread will, and it will not be checked again. You will get a little popup when this happens. This is all editable as you get a better feel for the system--if you wish, it is completely possible to set up a sub that never dies and only checks once a year.

      -

      I do not recommend setting up a sub that needs to check more than once a day. The system tends only to wake up a few times per day anyway, and any search that is producing that many files is probably a bad fit for a subscription. Subscriptions are for lightweight searches that are updated every now and then.

      +

      I do not recommend setting up a sub that needs to check more than once a day. Any search that is producing that many files is probably a bad fit for a subscription. Subscriptions are for lightweight searches that are updated every now and then.


      (you might like to come back to this point once you have tried subs for a week or so and want to refine your workflow)


      @@ -46,7 +47,7 @@

      One the edit subscription panel, the 'presentation' options let you publish files to a page. The page will have the subscription's name, just like the button makes, but it cuts out the middle-man and 'locks it in' more than the button, which will be forgotten if you restart the client. Also, if a page with that name already exists, the new files will be appended to it, just like a normal import page! I strongly recommend moving to this once you have several subs going. Make a 'page of pages' called 'subs' and put all your subscription landing pages in there, and then you can check it whenever is convenient.

      If you discover your subscription workflow tends to be the same for each sub, you can also customise the publication 'label' used. If multiple subs all publish to the 'nsfw subs' label, they will all end up on the same 'nsfw subs' popup button or landing page. Sending multiple subscriptions' import streams into just one or two locations like this can be great.

      You can also hide the main working popup. I don't recommend this unless you are really having a problem with it, since it is useful to have that 'active' feedback if something goes wrong.

      -

      Note that subscription file import options will, by default, only present 'new' files. Anything already in the db will still be recorded in the internal import cache and used to calculate next check times and so on, but it won't clutter your import stream. This is different to the default for all the other importers, but when you are ready to enter the ranks of the patricians, you will know to edit your 'loud' default file import options under options->importing to behave this way as well. Efficient workflows only care about new files.

      +

      Note that subscription file import options will, by default, only present 'new' files. Anything already in the db will still be recorded in the internal import cache and used to calculate next check times and so on, but it won't clutter your import stream. This is different to the default for all the other importers, but when you are ready to enter the ranks of the Patricians, you will know to edit your 'loud' default file import options under options->importing to behave this way as well. Efficient workflows only care about new files.

      how exactly does the sync work?

      Figuring out when a repeating search has 'caught up' can be a tricky problem to solve. It sounds simple, but unusual situations like 'a file got tagged late, so it inserted deeper than it ideally should in the gallery search' or 'the website changed its URL format completely, help' can cause problems. Subscriptions are automatic systems, to they tend to be a bit more careful and paranoid about problems, lest they burn 10GB on 10,000 unexpected diaperfur images.

      The initial sync is simple. It does a regular search, stopping if it reaches the 'initial file limit' or the last file in the gallery, whichever comes first. The default initial file sync is 100, which is a great number for almost all situations.

      @@ -54,8 +55,8 @@

      If the sub keeps finding apparently new URLs on a regular sync, it will stop upon hitting its 'periodic file limit', which is also usually 100. This is a safety stopgap, and usually happens when the site's URL format itself has changed, which may or may not require attention from you to figure out. If a user just went nuts and uploaded 500 new files to that tag in one day, you'll have a 'gap' in your sub sync, which you'll want to fill in with a manual download. If a sub hits its periodic file limit and thinks something like this happened, it will give you a popup explaining the situation.

      Please note that subscriptions only keep up with new content. They cannot search backwards in time in order to 'fill out' a search, nor can they fill in gaps. Do not change the file limits or check times to try to make this happen. If you want to ensure complete sync with all existing content for a particular search, please use the manual downloader.

      In practice, most subs only need to check the first page of a gallery since only the first two or three urls are new.

      -

      I put character queries in my artist sub, and now things are all mixed up

      -

      On the main subscription dialog, there are 'merge' and 'split' buttons. These are powerful, but they will walk you through the process of pulling queries out of a sub and merging them back into a different one. Only subs that use the same download source can be merged. Give them a go, and if it all goes wrong, just hit the cancel button.

      +

      I put character queries in my artist sub, and now things are all mixed up

      +

      On the main subscription dialog, there are 'merge' and 'separate' buttons. These are powerful, but they will walk you through the process of pulling queries out of a sub and merging them back into a different one. Only subs that use the same download source can be merged. Give them a go, and if it all goes wrong, just hit the cancel button on the dialog.

diff --git a/hydrus/client/ClientController.py b/hydrus/client/ClientController.py index 623f7cb6..fcfb7c4e 100644 --- a/hydrus/client/ClientController.py +++ b/hydrus/client/ClientController.py @@ -1012,7 +1012,9 @@ class Controller( HydrusController.HydrusController ): self.pub( 'splash_set_title_text', 'booting gui\u2026' ) - self.subscriptions_manager = ClientImportSubscriptions.SubscriptionsManager( self ) + subscriptions = HG.client_controller.Read( 'serialisable_named', HydrusSerialisable.SERIALISABLE_TYPE_SUBSCRIPTION ) + + self.subscriptions_manager = ClientImportSubscriptions.SubscriptionsManager( self, subscriptions ) def qt_code_gui(): diff --git a/hydrus/client/ClientDB.py b/hydrus/client/ClientDB.py index 82b8e7ee..4dd6a987 100644 --- a/hydrus/client/ClientDB.py +++ b/hydrus/client/ClientDB.py @@ -1512,7 +1512,6 @@ class DB( HydrusDB.HydrusDB ): def _CleanUpCaches( self ): - self._subscriptions_cache = {} self._service_cache = {} @@ -7185,13 +7184,20 @@ class DB( HydrusDB.HydrusDB ): if timestamp is None: - ( version, dump, object_timestamp ) = self._c.execute( 'SELECT version, dump, timestamp FROM json_dumps_named WHERE dump_type = ? AND dump_name = ? ORDER BY timestamp DESC;', ( dump_type, dump_name ) ).fetchone() + result = self._c.execute( 'SELECT version, dump, timestamp FROM json_dumps_named WHERE dump_type = ? AND dump_name = ? ORDER BY timestamp DESC;', ( dump_type, dump_name ) ).fetchone() else: - ( version, dump, object_timestamp ) = self._c.execute( 'SELECT version, dump, timestamp FROM json_dumps_named WHERE dump_type = ? AND dump_name = ? AND timestamp = ?;', ( dump_type, dump_name, timestamp ) ).fetchone() + result = self._c.execute( 'SELECT version, dump, timestamp FROM json_dumps_named WHERE dump_type = ? AND dump_name = ? AND timestamp = ?;', ( dump_type, dump_name, timestamp ) ).fetchone() + if result is None: + + raise HydrusExceptions.DataMissing( 'Could not find the object of type "{}" and name "{}" and timestamp "{}".'.format( dump_type, dump_name, str( timestamp ) ) ) + + + ( version, dump, object_timestamp ) = result + try: if isinstance( dump, bytes ): @@ -9012,7 +9018,6 @@ class DB( HydrusDB.HydrusDB ): self._combined_file_service_id = self._GetServiceId( CC.COMBINED_FILE_SERVICE_KEY ) self._combined_tag_service_id = self._GetServiceId( CC.COMBINED_TAG_SERVICE_KEY ) - self._subscriptions_cache = {} self._service_cache = {} self._weakref_media_result_cache = ClientMediaResultCache.MediaResultCache() @@ -11727,7 +11732,6 @@ class DB( HydrusDB.HydrusDB ): self._combined_file_service_id = self._GetServiceId( CC.COMBINED_FILE_SERVICE_KEY ) self._combined_tag_service_id = self._GetServiceId( CC.COMBINED_TAG_SERVICE_KEY ) - self._subscriptions_cache = {} self._service_cache = {} self._weakref_media_result_cache = ClientMediaResultCache.MediaResultCache() @@ -12723,227 +12727,6 @@ class DB( HydrusDB.HydrusDB ): self._controller.pub( 'splash_set_status_text', 'updating db to v' + str( version + 1 ) ) - if version == 341: - - try: - - domain_manager = self._GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_DOMAIN_MANAGER ) - - domain_manager.Initialise() - - # - - domain_manager.OverwriteDefaultParsers( [ 'gelbooru 0.2.5 file page parser' ] ) - - # - - domain_manager.TryToLinkURLClassesAndParsers() - - # - - self._SetJSONDump( domain_manager ) - - except Exception as e: - - HydrusData.PrintException( e ) - - message = 'Trying to update some url classes and parsers failed! Please let hydrus dev know!' - - self.pub_initial_message( message ) - - - - if version == 344: - - message = 'The client now only uses one thumbnail per file (previously it needed two). Your \'resized\' thumbnails will now be deleted. This is a significant step that could take some time to complete. It will also significantly impact your next backup run.' - message += os.linesep * 2 - message += 'In order to keep your recycle bin sane, the thumbnails will be permanently deleted. Therefore, this operation cannot be undone. If you are not ready to do this yet (for instance if you do not have a recent backup), kill the hydrus process in Task Manager now.' - message += os.linesep * 2 - message += 'BTW: If you previously put your resized thumbnails on an SSD but not your \'full-size\' ones, you should check the \'migrate database\' dialog once the client boots so you can move the remaining thumbnail directories to fast storage.' - - BlockingSafeShowMessage( message ) - - new_options = self._GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_CLIENT_OPTIONS ) - - self._c.execute( 'CREATE TABLE IF NOT EXISTS ideal_client_files_locations ( location TEXT, weight INTEGER );' ) - self._c.execute( 'CREATE TABLE IF NOT EXISTS ideal_thumbnail_override_location ( location TEXT );' ) - - for ( location, weight ) in new_options._dictionary[ 'client_files_locations_ideal_weights' ]: - - self._c.execute( 'INSERT INTO ideal_client_files_locations ( location, weight ) VALUES ( ?, ? );', ( location, weight ) ) - - - thumbnail_override_location = new_options._dictionary[ 'client_files_locations_full_size_thumbnail_override' ] - - if thumbnail_override_location is not None: - - self._c.execute( 'INSERT INTO ideal_thumbnail_override_location ( location ) VALUES ( ? );', ( thumbnail_override_location, ) ) - - - self._SetJSONDump( new_options ) - - # - - error_occurred = False - - for ( i, prefix ) in enumerate( HydrusData.IterateHexPrefixes() ): - - self._controller.pub( 'splash_set_status_subtext', 'deleting resized thumbnails {}'.format( HydrusData.ConvertValueRangeToPrettyString( i + 1, 256 ) ) ) - - resized_prefix = 'r' + prefix - - try: - - ( location, ) = self._c.execute( 'SELECT location FROM client_files_locations WHERE prefix = ?;', ( resized_prefix, ) ).fetchone() - - except: - - continue - - - full_path = os.path.join( HydrusPaths.ConvertPortablePathToAbsPath( location ), resized_prefix ) - - if os.path.exists( full_path ): - - try: - - HydrusPaths.DeletePath( full_path ) - - except Exception as e: - - HydrusData.PrintException( e ) - - if not error_occurred: - - error_occurred = True - - message = 'There was a problem deleting one or more of your old \'rxx\' resized thumbnail directories, perhaps because of some old read-only files. There is no big harm here, since the old directories are no longer needed, but you will want to delete them yourself. Additional error information has been written to the log. Please contact hydrus dev if you need help.' - - self.pub_initial_message( message ) - - - - - self._c.execute( 'DELETE FROM client_files_locations WHERE prefix = ?;', ( resized_prefix, ) ) - - - - if version == 345: - - # I screwed up the permissions setting on 344 update so that certain non-windows users got de-execution-permissioned rxx folders, which then made them non-traversable and -deletable - # so, let's give it another spin, albeit with less information since we have to guess potential location from remaining locations - - if not HC.PLATFORM_WINDOWS: - - locations_where_r_folders_were_found = set() - - locations = self._STL( self._c.execute( 'SELECT DISTINCT location FROM client_files_locations;' ) ) - - possible_resized_paths = [] - - error_occurred = False - - for prefix in HydrusData.IterateHexPrefixes(): - - resized_prefix = 'r' + prefix - - for location in locations: - - full_path = os.path.join( HydrusPaths.ConvertPortablePathToAbsPath( location ), resized_prefix ) - - if os.path.exists( full_path ): - - possible_resized_paths.append( full_path ) - - locations_where_r_folders_were_found.add( location ) - - - - - num_possible_resized_paths = len( possible_resized_paths ) - - if num_possible_resized_paths > 0: - - message = 'It appears that the update code from last week\'s release, 345, did not successfully delete all your old (and now unneeded) resized thumbnail directories.' - message += os.linesep * 2 - message += 'I have found {} spare \'rxx\' directories (this number should be less than or equal to 256) in these current locations:'.format( num_possible_resized_paths ) - message += os.linesep * 2 - message += os.linesep.join( [ HydrusPaths.ConvertPortablePathToAbsPath( location ) for location in locations_where_r_folders_were_found ] ) - message += os.linesep * 2 - message += 'I will now attempt to delete these directories again, this time with fixed permissions. If you are not ready to do this, kill the hydrus process now.' - - BlockingSafeShowMessage( message ) - - for ( i, full_path ) in enumerate( possible_resized_paths ): - - self._controller.pub( 'splash_set_status_subtext', 'deleting resized thumbnails 2: electric boogaloo {}'.format( HydrusData.ConvertValueRangeToPrettyString( i + 1, num_possible_resized_paths ) ) ) - - try: - - stat_result = os.stat( full_path ) - - current_bits = stat_result.st_mode - - if not stat.S_IXUSR & current_bits: - - os.chmod( full_path, current_bits | stat.S_IXUSR ) - - - HydrusPaths.DeletePath( full_path ) - - except Exception as e: - - HydrusData.PrintException( e ) - - if not error_occurred: - - error_occurred = True - - message = 'The second attempt to delete old resized directories also failed. Error information has been written to the log. Please consult hydrus dev if you cannot figure this out on your own.' - - self.pub_initial_message( message ) - - - - - - - - if version == 346: - - self._c.execute( 'CREATE TABLE IF NOT EXISTS local_file_deletion_reasons ( hash_id INTEGER PRIMARY KEY, reason_id INTEGER );' ) - - - if version == 347: - - try: - - domain_manager = self._GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_DOMAIN_MANAGER ) - - domain_manager.Initialise() - - # - - domain_manager.OverwriteDefaultURLClasses( [ 'yiff.party file attachment long' ] ) - - # - - domain_manager.TryToLinkURLClassesAndParsers() - - # - - self._SetJSONDump( domain_manager ) - - except Exception as e: - - HydrusData.PrintException( e ) - - message = 'Trying to update some url classes and parsers failed! Please let hydrus dev know!' - - self.pub_initial_message( message ) - - - if version == 349: try: @@ -14893,6 +14676,105 @@ class DB( HydrusDB.HydrusDB ): + if version == 399: + + try: + + legacy_subscription_names = self._GetJSONDumpNames( HydrusSerialisable.SERIALISABLE_TYPE_SUBSCRIPTION_LEGACY ) + + if len( legacy_subscription_names ) > 0: + + try: + + HydrusPaths.CheckHasSpaceForDBTransaction( self._db_dir, 500 * 1024 * 1024 ) + + except: + + message = 'The big subscription update for v400 will now start. However this update is heavy and will also try to make a backup of your old subs, and it looks like your system drive or hydrus drive are a bit short on space right now. If your drives are truly currently real tight, please free up some space now. If you have thousands of subs with hundreds of thousands of URLs, you will need a few GB.' + + BlockingSafeShowMessage( message ) + + + from hydrus.client.importing import ClientImportSubscriptionLegacy + + sub_dir = os.path.join( self._db_dir, 'legacy_subscriptions_backup' ) + + HydrusPaths.MakeSureDirectoryExists( sub_dir ) + + for ( i, legacy_subscription_name ) in enumerate( legacy_subscription_names ): + + self._controller.pub( 'splash_set_status_subtext', 'updating subscriptions: {}'.format( HydrusData.ConvertValueRangeToPrettyString( i + 1, len( legacy_subscription_names ) ) ) ) + + legacy_subscription = self._GetJSONDumpNamed( HydrusSerialisable.SERIALISABLE_TYPE_SUBSCRIPTION_LEGACY, legacy_subscription_name ) + + backup_path = os.path.join( sub_dir, 'sub_{}.json'.format( i ) ) + + with open( backup_path, 'w', encoding = 'utf-8' ) as f: + + f.write( legacy_subscription.DumpToString() ) + + + ( subscription, query_log_containers ) = ClientImportSubscriptionLegacy.ConvertLegacySubscriptionToNew( legacy_subscription ) + + self._SetJSONDump( subscription ) + + for query_log_container in query_log_containers: + + self._SetJSONDump( query_log_container ) + + + self._DeleteJSONDumpNamed( HydrusSerialisable.SERIALISABLE_TYPE_SUBSCRIPTION_LEGACY, legacy_subscription_name ) + + + + except Exception as e: + + message = 'Damn, the big subscription update for v400 did not work for you! No changes have been saved, your database is still on v399. You will get an error next, please send it to hydrus dev and go back to using v399 for now!' + + BlockingSafeShowMessage( message ) + + raise + + + # + + try: + + domain_manager = self._GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_DOMAIN_MANAGER ) + + domain_manager.Initialise() + + # + + domain_manager.OverwriteDefaultURLClasses( [ 'twitter tweet', 'nitter tweet media', 'nitter tweet', 'nitter timeline', 'nitter media timeline', 'derpibooru gallery page', 'derpibooru gallery page api' ] ) + + # + + domain_manager.OverwriteDefaultGUGs( [ 'nitter media lookup', 'nitter retweets lookup', 'nitter media and retweets lookup', 'derpibooru tag search', 'derpibooru tag search - no filter' ] ) + + # + + domain_manager.OverwriteDefaultParsers( [ 'nitter media parser', 'nitter retweet parser', 'nitter tweet parser', 'nitter tweet parser (video from koto.reisen)', 'danbooru file page parser', 'danbooru file page parser - get webm ugoira', 'derpibooru gallery page api parser' ] ) + + # + + domain_manager.TryToLinkURLClassesAndParsers() + + # + + self._SetJSONDump( domain_manager ) + + except Exception as e: + + HydrusData.PrintException( e ) + + message = 'Trying to update some downloaders failed! Please let hydrus dev know!' + + self.pub_initial_message( message ) + + + + self._controller.pub( 'splash_set_title_text', 'updated db to v{}'.format( HydrusData.ToHumanInt( version + 1 ) ) ) self._c.execute( 'UPDATE version SET version = ?;', ( version + 1, ) ) diff --git a/hydrus/client/ClientFiles.py b/hydrus/client/ClientFiles.py index 7cd6ca07..e8c65bf7 100644 --- a/hydrus/client/ClientFiles.py +++ b/hydrus/client/ClientFiles.py @@ -535,7 +535,7 @@ class ClientFilesManager( object ): dir = os.path.join( location, prefix ) - filenames = os.listdir( dir ) + filenames = list( os.listdir( dir ) ) for filename in filenames: @@ -553,7 +553,7 @@ class ClientFilesManager( object ): dir = os.path.join( location, prefix ) - filenames = os.listdir( dir ) + filenames = list( os.listdir( dir ) ) for filename in filenames: @@ -893,7 +893,7 @@ class ClientFilesManager( object ): time.sleep( 5 ) - for path in orphan_paths: + for ( i, path ) in enumerate( orphan_paths ): ( i_paused, should_quit ) = job_key.WaitIfNeeded() diff --git a/hydrus/client/ClientLocalServerResources.py b/hydrus/client/ClientLocalServerResources.py index f3e332a1..eecf0fd7 100644 --- a/hydrus/client/ClientLocalServerResources.py +++ b/hydrus/client/ClientLocalServerResources.py @@ -1479,7 +1479,13 @@ class HydrusResourceClientAPIRestrictedGetFilesFileMetadata( HydrusResourceClien metadata_row[ 'num_words' ] = file_info_manager.num_words metadata_row[ 'has_audio' ] = file_info_manager.has_audio - known_urls = sorted( media_result.GetLocationsManager().GetURLs() ) + locations_manager = media_result.GetLocationsManager() + + metadata_row[ 'is_inbox' ] = locations_manager.inbox + metadata_row[ 'is_local' ] = locations_manager.IsLocal() + metadata_row[ 'is_trashed' ] = locations_manager.IsTrashed() + + known_urls = sorted( locations_manager.GetURLs() ) metadata_row[ 'known_urls' ] = known_urls diff --git a/hydrus/client/ClientOptions.py b/hydrus/client/ClientOptions.py index adf37b4d..82aecaaf 100644 --- a/hydrus/client/ClientOptions.py +++ b/hydrus/client/ClientOptions.py @@ -127,6 +127,8 @@ class ClientOptions( HydrusSerialisable.SerialisableBase ): self._dictionary[ 'booleans' ][ 'disable_cv_for_gifs' ] = False + self._dictionary[ 'booleans' ][ 'show_unmatched_urls_in_media_viewer' ] = False + self._dictionary[ 'booleans' ][ 'set_search_focus_on_page_change' ] = False self._dictionary[ 'booleans' ][ 'allow_remove_on_manage_tags_input' ] = True diff --git a/hydrus/client/gui/ClientGUI.py b/hydrus/client/gui/ClientGUI.py index a4677613..2bfd25cf 100644 --- a/hydrus/client/gui/ClientGUI.py +++ b/hydrus/client/gui/ClientGUI.py @@ -66,6 +66,7 @@ from hydrus.client.gui import ClientGUIScrolledPanelsReview from hydrus.client.gui import ClientGUIShortcuts from hydrus.client.gui import ClientGUIShortcutControls from hydrus.client.gui import ClientGUIStyle +from hydrus.client.gui import ClientGUISubscriptions from hydrus.client.gui import ClientGUISystemTray from hydrus.client.gui import ClientGUITags from hydrus.client.gui import ClientGUITopLevelWindows @@ -532,16 +533,23 @@ class FrameGUI( ClientGUITopLevelWindows.MainFrameThatResizes ): library_versions.append( ( 'OpenCV', cv2.__version__ ) ) library_versions.append( ( 'Pillow', PIL.__version__ ) ) - if ClientGUIMPV.MPV_IS_AVAILABLE: + if HC.RUNNING_FROM_FROZEN_BUILD and HC.PLATFORM_MACOS: - library_versions.append( ( 'mpv api version: ', ClientGUIMPV.GetClientAPIVersionString() ) ) + library_versions.append( ( 'mpv: ', 'is not currently available on macOS' ) ) else: - HydrusData.ShowText( 'MPV failed to import because:' ) - HydrusData.ShowText( ClientGUIMPV.mpv_failed_reason ) - - library_versions.append( ( 'mpv', 'not available' ) ) + if ClientGUIMPV.MPV_IS_AVAILABLE: + + library_versions.append( ( 'mpv api version: ', ClientGUIMPV.GetClientAPIVersionString() ) ) + + else: + + HydrusData.ShowText( 'If this information helps, MPV failed to import because:' ) + HydrusData.ShowText( ClientGUIMPV.mpv_failed_reason ) + + library_versions.append( ( 'mpv', 'not available' ) ) + library_versions.append( ( 'FFMPEG', HydrusVideoHandling.GetFFMPEGVersion() ) ) @@ -1698,15 +1706,10 @@ class FrameGUI( ClientGUITopLevelWindows.MainFrameThatResizes ): page.PageHidden() - from hydrus.client.gui import ClientGUICanvasFrame + HG.client_controller.pub( 'pause_all_media' ) for tlw in visible_tlws: - if isinstance( tlw, ClientGUICanvasFrame.CanvasFrame ): - - tlw.PauseMedia() - - tlw.hide() self._system_tray_hidden_tlws.append( ( tlw.isMaximized(), tlw ) ) @@ -2162,17 +2165,21 @@ class FrameGUI( ClientGUITopLevelWindows.MainFrameThatResizes ): url_class_keys_to_display = domain_manager.GetURLClassKeysToDisplay() - panel = ClientGUIScrolledPanelsEdit.EditDownloaderDisplayPanel( dlg, self._controller.network_engine, gugs, gug_keys_to_display, url_classes, url_class_keys_to_display ) + show_unmatched_urls_in_media_viewer = HG.client_controller.new_options.GetBoolean( 'show_unmatched_urls_in_media_viewer' ) + + panel = ClientGUIScrolledPanelsEdit.EditDownloaderDisplayPanel( dlg, self._controller.network_engine, gugs, gug_keys_to_display, url_classes, url_class_keys_to_display, show_unmatched_urls_in_media_viewer ) dlg.SetPanel( panel ) if dlg.exec() == QW.QDialog.Accepted: - ( gug_keys_to_display, url_class_keys_to_display ) = panel.GetValue() + ( gug_keys_to_display, url_class_keys_to_display, show_unmatched_urls_in_media_viewer ) = panel.GetValue() domain_manager.SetGUGKeysToDisplay( gug_keys_to_display ) domain_manager.SetURLClassKeysToDisplay( url_class_keys_to_display ) + HG.client_controller.new_options.SetBoolean( 'show_unmatched_urls_in_media_viewer', show_unmatched_urls_in_media_viewer ) + @@ -2606,24 +2613,110 @@ class FrameGUI( ClientGUITopLevelWindows.MainFrameThatResizes ): def _ManageSubscriptions( self ): - def qt_do_it( subscriptions, original_pause_status ): + def qt_do_it( subscriptions, missing_query_log_container_names, surplus_query_log_container_names, original_pause_status ): + + if len( missing_query_log_container_names ) > 0: + + text = '{} subscription queries had missing database data! This is a serious error!'.format( HydrusData.ToHumanInt( len( missing_query_log_container_names ) ) ) + text += os.linesep * 2 + text += 'If you continue, the client will now create and save empty file/gallery logs for those queries, essentially resetting them, but if you know you need to exit and fix your database in a different way, cancel out now.' + text += os.linesep * 2 + text += 'If you do not know why this happened, you may have had a hard drive fault. Please consult "install_dir/db/help my db is broke.txt", and you may want to contact hydrus dev.' + + result = ClientGUIDialogsQuick.GetYesNo( self, text, title = 'Missing Query Logs!', yes_label = 'continue', no_label = 'back out' ) + + if result == QW.QDialog.Accepted: + + from hydrus.client.importing import ClientImportSubscriptionQuery + + for missing_query_log_container_name in missing_query_log_container_names: + + query_log_container = ClientImportSubscriptionQuery.SubscriptionQueryLogContainer( missing_query_log_container_name ) + + HG.client_controller.WriteSynchronous( 'serialisable', query_log_container ) + + + for subscription in subscriptions: + + for query_header in subscription.GetQueryHeaders(): + + if query_header.GetQueryLogContainerName() in missing_query_log_container_names: + + query_header.Reset( query_log_container ) + + + + + HG.client_controller.subscriptions_manager.SetSubscriptions( subscriptions ) # save the reset + + else: + + return + + + + if len( surplus_query_log_container_names ) > 0: + + text = 'When loading subscription data, the client discovered surplus orphaned subscription data for {} queries! This data is harmless and no longer used. The situation is however unusual, and probably due to an unusual deletion routine or a bug.'.format( HydrusData.ToHumanInt( len( surplus_query_log_container_names ) ) ) + text += os.linesep * 2 + text += 'If you continue, this surplus data will backed up to your database directory and then safely deleted from the database itself, but if you recently did manual database editing and know you need to exit and fix your database in a different way, cancel out now.' + text += os.linesep * 2 + text += 'If you do not know why this happened, hydrus dev would be interested in being told about it and the surrounding circumstances.' + + result = ClientGUIDialogsQuick.GetYesNo( self, text, title = 'Orphan Query Logs!', yes_label = 'continue', no_label = 'back out' ) + + if result == QW.QDialog.Accepted: + + sub_dir = os.path.join( self._controller.GetDBDir(), 'orphaned_query_log_containers' ) + + HydrusPaths.MakeSureDirectoryExists( sub_dir ) + + for surplus_query_log_container_name in surplus_query_log_container_names: + + surplus_query_log_container = HG.client_controller.Read( 'serialisable_named', HydrusSerialisable.SERIALISABLE_TYPE_SUBSCRIPTION_QUERY_LOG_CONTAINER, surplus_query_log_container_name ) + + backup_path = os.path.join( sub_dir, 'qlc_{}.json'.format( surplus_query_log_container_name ) ) + + with open( backup_path, 'w', encoding = 'utf-8' ) as f: + + f.write( surplus_query_log_container.DumpToString() ) + + + HG.client_controller.WriteSynchronous( 'delete_serialisable_named', HydrusSerialisable.SERIALISABLE_TYPE_SUBSCRIPTION_QUERY_LOG_CONTAINER, surplus_query_log_container_name ) + + + else: + + return + + title = 'manage subscriptions' frame_key = 'manage_subscriptions_dialog' with ClientGUITopLevelWindowsPanels.DialogEdit( self, title, frame_key ) as dlg: - panel = ClientGUIScrolledPanelsEdit.EditSubscriptionsPanel( dlg, subscriptions, original_pause_status ) + panel = ClientGUISubscriptions.EditSubscriptionsPanel( dlg, subscriptions, original_pause_status ) dlg.SetPanel( panel ) if dlg.exec() == QW.QDialog.Accepted: - subscriptions = panel.GetValue() + ( subscriptions, edited_query_log_containers, deletee_query_log_container_names ) = panel.GetValue() + + for edited_query_log_container in edited_query_log_containers: + + HG.client_controller.Write( 'serialisable', edited_query_log_container ) + HG.client_controller.Write( 'serialisables_overwrite', [ HydrusSerialisable.SERIALISABLE_TYPE_SUBSCRIPTION ], subscriptions ) - HG.client_controller.subscriptions_manager.NewSubscriptions( subscriptions ) + for deletee_query_log_container_name in deletee_query_log_container_names: + + HG.client_controller.Write( 'delete_serialisable_named', HydrusSerialisable.SERIALISABLE_TYPE_SUBSCRIPTION_QUERY_LOG_CONTAINER, deletee_query_log_container_name ) + + + HG.client_controller.subscriptions_manager.SetSubscriptions( subscriptions ) @@ -2664,40 +2757,24 @@ class FrameGUI( ClientGUITopLevelWindows.MainFrameThatResizes ): - job_key = ClientThreading.JobKey( cancellable = True ) + subscriptions = HG.client_controller.subscriptions_manager.GetSubscriptions() - job_key.SetVariable( 'popup_title', 'loading subscriptions' ) + expected_query_log_container_names = set() - controller.CallLater( 1.0, controller.pub, 'message', job_key ) - - subscription_names = HG.client_controller.Read( 'serialisable_names', HydrusSerialisable.SERIALISABLE_TYPE_SUBSCRIPTION ) - - num_to_do = len( subscription_names ) - - subscriptions = [] - - for ( i, name ) in enumerate( subscription_names ): + for subscription in subscriptions: - if job_key.IsCancelled(): - - job_key.Delete() - - return - - - job_key.SetVariable( 'popup_text_1', HydrusData.ConvertValueRangeToPrettyString( i + 1, num_to_do ) + ': ' + name ) - job_key.SetVariable( 'popup_gauge_1', ( i + 1, num_to_do ) ) - - subscription = HG.client_controller.Read( 'serialisable_named', HydrusSerialisable.SERIALISABLE_TYPE_SUBSCRIPTION, name ) - - subscriptions.append( subscription ) + expected_query_log_container_names.update( subscription.GetAllQueryLogContainerNames() ) - job_key.Delete() + actual_query_log_container_names = set( HG.client_controller.Read( 'serialisable_names', HydrusSerialisable.SERIALISABLE_TYPE_SUBSCRIPTION_QUERY_LOG_CONTAINER ) ) + + missing_query_log_container_names = expected_query_log_container_names.difference( actual_query_log_container_names ) + + surplus_query_log_container_names = actual_query_log_container_names.difference( expected_query_log_container_names ) try: - controller.CallBlockingToQt( self, qt_do_it, subscriptions, original_pause_status ) + controller.CallBlockingToQt( self, qt_do_it, subscriptions, missing_query_log_container_names, surplus_query_log_container_names, original_pause_status ) except HydrusExceptions.QtDeadWindowException: @@ -3860,6 +3937,8 @@ The password is cleartext here but obscured in the entry dialog. Enter a blank p else: + HG.client_controller.pub( 'pause_all_media' ) + title = job_key.GetIfHasVariable( 'popup_title' ) if title is None: @@ -4372,7 +4451,7 @@ The password is cleartext here but obscured in the entry dialog. Enter a blank p if self._controller.new_options.GetBoolean( 'advanced_mode' ): - ClientGUIMenus.AppendMenuItem( submenu, 'nudge subscriptions awake', 'Tell the subs daemon to wake up, just in case any subs are due.', self._controller.subscriptions_manager.ClearCacheAndWake ) + ClientGUIMenus.AppendMenuItem( submenu, 'nudge subscriptions awake', 'Tell the subs daemon to wake up, just in case any subs are due.', self._controller.subscriptions_manager.Wake ) ClientGUIMenus.AppendSeparator( submenu ) @@ -5995,6 +6074,8 @@ The password is cleartext here but obscured in the entry dialog. Enter a blank p self._controller.CreateSplash( 'hydrus client exiting' ) + HG.client_controller.pub( 'pause_all_media' ) + try: if QP.isValid( self._message_manager ): diff --git a/hydrus/client/gui/ClientGUIACDropdown.py b/hydrus/client/gui/ClientGUIACDropdown.py index 2a399f74..a704be68 100644 --- a/hydrus/client/gui/ClientGUIACDropdown.py +++ b/hydrus/client/gui/ClientGUIACDropdown.py @@ -361,7 +361,7 @@ def ShouldDoExactSearch( entry_text ): return 0 < len( test_text ) <= autocomplete_exact_match_threshold -def WriteFetch( win, job_key, results_callable, parsed_autocomplete_text: ClientSearch.ParsedAutocompleteText, file_service_key: bytes, tag_service_key: bytes, expand_parents: bool, results_cache: ClientSearch.PredicateResultsCache ): +def WriteFetch( win, job_key, results_callable, parsed_autocomplete_text: ClientSearch.ParsedAutocompleteText, file_service_key: bytes, tag_service_key: bytes, expand_parents: bool, display_tag_service_key, results_cache: ClientSearch.PredicateResultsCache ): tag_search_context = ClientSearch.TagSearchContext( service_key = tag_service_key ) @@ -387,7 +387,7 @@ def WriteFetch( win, job_key, results_callable, parsed_autocomplete_text: Client results_cache = ClientSearch.PredicateResultsCacheTag( predicates, strict_search_text, True ) - matches = results_cache.FilterPredicates( tag_service_key, strict_search_text ) + matches = results_cache.FilterPredicates( display_tag_service_key, strict_search_text ) else: @@ -402,7 +402,7 @@ def WriteFetch( win, job_key, results_callable, parsed_autocomplete_text: Client if cache_valid: - matches = results_cache.FilterPredicates( tag_service_key, autocomplete_search_text ) + matches = results_cache.FilterPredicates( display_tag_service_key, autocomplete_search_text ) else: @@ -412,13 +412,13 @@ def WriteFetch( win, job_key, results_callable, parsed_autocomplete_text: Client if is_explicit_wildcard: - matches = ClientSearch.FilterPredicatesBySearchText( tag_service_key, autocomplete_search_text, predicates ) + matches = ClientSearch.FilterPredicatesBySearchText( display_tag_service_key, autocomplete_search_text, predicates ) else: results_cache = ClientSearch.PredicateResultsCacheTag( predicates, strict_search_text, False ) - matches = results_cache.FilterPredicates( tag_service_key, autocomplete_search_text ) + matches = results_cache.FilterPredicates( display_tag_service_key, autocomplete_search_text ) @@ -426,11 +426,11 @@ def WriteFetch( win, job_key, results_callable, parsed_autocomplete_text: Client matches = ClientSearch.SortPredicates( matches ) - InsertTagPredicates( matches, tag_service_key, parsed_autocomplete_text ) + InsertTagPredicates( matches, display_tag_service_key, parsed_autocomplete_text ) if expand_parents: - matches = HG.client_controller.tag_parents_manager.ExpandPredicates( tag_service_key, matches ) + matches = HG.client_controller.tag_parents_manager.ExpandPredicates( display_tag_service_key, matches ) HG.client_controller.CallLaterQtSafe( win, 0.0, results_callable, job_key, parsed_autocomplete_text, results_cache, matches ) @@ -2195,6 +2195,8 @@ class AutoCompleteDropdownTagsWrite( AutoCompleteDropdownTags ): def __init__( self, parent, chosen_tag_callable, expand_parents, file_service_key, tag_service_key, null_entry_callable = None, tag_service_key_changed_callable = None, show_paste_button = False ): + self._display_tag_service_key = tag_service_key + self._chosen_tag_callable = chosen_tag_callable self._expand_parents = expand_parents self._null_entry_callable = null_entry_callable @@ -2278,7 +2280,7 @@ class AutoCompleteDropdownTagsWrite( AutoCompleteDropdownTags ): def _InitFavouritesList( self ): - favs_list = ListBoxTagsACWrite( self._dropdown_notebook, self.BroadcastChoices, self._tag_service_key, self._float_mode, height_num_chars = self._list_height_num_chars ) + favs_list = ListBoxTagsACWrite( self._dropdown_notebook, self.BroadcastChoices, self._display_tag_service_key, self._float_mode, height_num_chars = self._list_height_num_chars ) return favs_list @@ -2287,7 +2289,7 @@ class AutoCompleteDropdownTagsWrite( AutoCompleteDropdownTags ): self._list_height_num_chars = 8 - return ListBoxTagsACWrite( self._dropdown_notebook, self.BroadcastChoices, self._tag_service_key, self._float_mode, height_num_chars = self._list_height_num_chars ) + return ListBoxTagsACWrite( self._dropdown_notebook, self.BroadcastChoices, self._display_tag_service_key, self._float_mode, height_num_chars = self._list_height_num_chars ) def _Paste( self ): @@ -2326,7 +2328,6 @@ class AutoCompleteDropdownTagsWrite( AutoCompleteDropdownTags ): - def _ShouldTakeResponsibilityForEnter( self ): parsed_autocomplete_text = self._GetParsedAutocompleteText() @@ -2355,18 +2356,18 @@ class AutoCompleteDropdownTagsWrite( AutoCompleteDropdownTags ): stub_predicates = [] - InsertTagPredicates( stub_predicates, self._tag_service_key, parsed_autocomplete_text ) + InsertTagPredicates( stub_predicates, self._display_tag_service_key, parsed_autocomplete_text ) if self._expand_parents: - stub_predicates = HG.client_controller.tag_parents_manager.ExpandPredicates( self._tag_service_key, stub_predicates ) + stub_predicates = HG.client_controller.tag_parents_manager.ExpandPredicates( self._display_tag_service_key, stub_predicates ) AppendLoadingPredicate( stub_predicates ) HG.client_controller.CallLaterQtSafe( self, 0.2, self.SetStubPredicates, job_key, stub_predicates, parsed_autocomplete_text ) - HG.client_controller.CallToThread( WriteFetch, self, job_key, self.SetFetchedResults, parsed_autocomplete_text, self._file_service_key, self._tag_service_key, self._expand_parents, self._results_cache ) + HG.client_controller.CallToThread( WriteFetch, self, job_key, self.SetFetchedResults, parsed_autocomplete_text, self._file_service_key, self._tag_service_key, self._expand_parents, self._display_tag_service_key, self._results_cache ) def _TakeResponsibilityForEnter( self, shift_down ): diff --git a/hydrus/client/gui/ClientGUICanvas.py b/hydrus/client/gui/ClientGUICanvas.py index 32ecf912..29dda4b8 100644 --- a/hydrus/client/gui/ClientGUICanvas.py +++ b/hydrus/client/gui/ClientGUICanvas.py @@ -442,18 +442,20 @@ class Canvas( QW.QWidget ): def _CopyBMPToClipboard( self ): + copied = False + if self._current_media is not None: if self._current_media.GetMime() in HC.IMAGES: HG.client_controller.pub( 'clipboard', 'bmp', self._current_media ) - else: - - QW.QMessageBox.critical( self, 'Error', 'Sorry, cannot take bmps of anything but static images right now!' ) + copied = True + return copied + def _CopyHashToClipboard( self, hash_type ): @@ -1459,6 +1461,15 @@ class Canvas( QW.QWidget ): self._CopyBMPToClipboard() + elif action == 'copy_bmp_or_file_if_not_bmpable': + + copied = self._CopyBMPToClipboard() + + if not copied: + + self._CopyFileToClipboard() + + elif action == 'copy_file': self._CopyFileToClipboard() @@ -2476,6 +2487,13 @@ class CanvasFilterDuplicates( CanvasWithHovers ): self._processed_pairs = [] self._hashes_due_to_be_deleted_in_this_batch = set() + # ok we started excluding pairs if they had been deleted, now I am extending it to any files that have been processed. + # main thing is if you have AB, AC, that's neat and a bunch of people want it, but current processing system doesn't do B->A->C merge if it happens in a single batch + # I need to store dupe merge options rather than content updates apply them in db transaction or do the retroactive sync or similar to get this done properly + # so regrettably I turn it off for now + + self._hashes_processed_in_this_batch = set() + file_service_key = self._file_search_context.GetFileServiceKey() self._media_list = ClientMedia.ListeningMediaList( file_service_key, [] ) @@ -2523,6 +2541,7 @@ class CanvasFilterDuplicates( CanvasWithHovers ): self._processed_pairs = [] self._hashes_due_to_be_deleted_in_this_batch = set() + self._hashes_processed_in_this_batch = set() def _CurrentMediaIsBetter( self, delete_second = True ): @@ -2787,6 +2806,7 @@ class CanvasFilterDuplicates( CanvasWithHovers ): self._hashes_due_to_be_deleted_in_this_batch.difference_update( hash_pair ) + self._hashes_processed_in_this_batch.difference_update( hash_pair ) self._ShowNewPair() @@ -2833,6 +2853,9 @@ class CanvasFilterDuplicates( CanvasWithHovers ): was_auto_skipped = False + self._hashes_processed_in_this_batch.update( first_media.GetHashes() ) + self._hashes_processed_in_this_batch.update( second_media.GetHashes() ) + if delete_first or delete_second or delete_both: if delete_first or delete_both: @@ -2935,6 +2958,7 @@ class CanvasFilterDuplicates( CanvasWithHovers ): self._hashes_due_to_be_deleted_in_this_batch.difference_update( hash_pair ) + self._hashes_processed_in_this_batch.difference_update( hash_pair ) @@ -2943,6 +2967,7 @@ class CanvasFilterDuplicates( CanvasWithHovers ): if len( self._unprocessed_pairs ) == 0: self._hashes_due_to_be_deleted_in_this_batch = set() + self._hashes_processed_in_this_batch = set() self._processed_pairs = [] # just in case someone 'skip'ed everything in the last batch, so this never got cleared above self.ClearMedia() @@ -2961,6 +2986,11 @@ class CanvasFilterDuplicates( CanvasWithHovers ): ( first_hash, second_hash ) = pair + if first_hash in self._hashes_processed_in_this_batch or second_hash in self._hashes_processed_in_this_batch: + + return False + + if first_hash in self._hashes_due_to_be_deleted_in_this_batch or second_hash in self._hashes_due_to_be_deleted_in_this_batch: return False diff --git a/hydrus/client/gui/ClientGUICanvasMedia.py b/hydrus/client/gui/ClientGUICanvasMedia.py index 2186c356..eb489eb3 100644 --- a/hydrus/client/gui/ClientGUICanvasMedia.py +++ b/hydrus/client/gui/ClientGUICanvasMedia.py @@ -917,6 +917,8 @@ class MediaContainer( QW.QWidget ): self.hide() + HG.client_controller.sub( self, 'Pause', 'pause_all_media' ) + def _DestroyOrHideThisMediaWindow( self, media_window ): diff --git a/hydrus/client/gui/ClientGUIFileSeedCache.py b/hydrus/client/gui/ClientGUIFileSeedCache.py index 8b4a20d0..208596ee 100644 --- a/hydrus/client/gui/ClientGUIFileSeedCache.py +++ b/hydrus/client/gui/ClientGUIFileSeedCache.py @@ -342,9 +342,9 @@ class EditFileSeedCachePanel( ClientGUIScrolledPanels.EditPanel ): def _UpdateText( self ): - ( status, simple_status, ( total_processed, total ) ) = self._file_seed_cache.GetStatus() + file_seed_cache_status = self._file_seed_cache.GetStatus() - self._text.setText( status ) + self._text.setText( file_seed_cache_status.GetStatusText() ) def GetValue( self ): @@ -510,7 +510,7 @@ class FileSeedCacheButton( ClientGUICommon.BetterBitmapButton ): file_seed_cache = self._file_seed_cache_get_callable() - file_seed_cache.RetryFailures() + file_seed_cache.RetryFailed() @@ -758,9 +758,11 @@ class FileSeedCacheStatusControl( QW.QFrame ): else: - ( import_summary, simple_status, ( num_done, num_to_do ) ) = self._file_seed_cache.GetStatus() + file_seed_cache_status = self._file_seed_cache.GetStatus() - self._import_summary_st.setText( import_summary ) + ( num_done, num_to_do ) = file_seed_cache_status.GetValueRange() + + self._import_summary_st.setText( file_seed_cache_status.GetStatusText() ) if num_to_do == 0: @@ -797,7 +799,9 @@ class FileSeedCacheStatusControl( QW.QFrame ): if self._file_seed_cache is not None: - ( import_summary, simple_status, ( num_done, num_to_do ) ) = self._file_seed_cache.GetStatus() + file_seed_cache_status = self._file_seed_cache.GetStatus() + + ( num_done, num_to_do ) = file_seed_cache_status.GetValueRange() ( old_num_done, old_num_to_do ) = self._progress_gauge.GetValueRange() diff --git a/hydrus/client/gui/ClientGUIGallerySeedLog.py b/hydrus/client/gui/ClientGUIGallerySeedLog.py index c7a71a43..03f938b8 100644 --- a/hydrus/client/gui/ClientGUIGallerySeedLog.py +++ b/hydrus/client/gui/ClientGUIGallerySeedLog.py @@ -466,7 +466,7 @@ class GallerySeedLogButton( ClientGUICommon.BetterBitmapButton ): gallery_seed_log = self._gallery_seed_log_get_callable() - gallery_seed_log.RetryFailures() + gallery_seed_log.RetryFailed() diff --git a/hydrus/client/gui/ClientGUIListBoxes.py b/hydrus/client/gui/ClientGUIListBoxes.py index 09240e4c..5d826cc7 100644 --- a/hydrus/client/gui/ClientGUIListBoxes.py +++ b/hydrus/client/gui/ClientGUIListBoxes.py @@ -1635,10 +1635,17 @@ class ListBox( QW.QScrollArea ): if len( self._ordered_terms ) > 0: - self._selected_terms = set() + if len( self._selected_terms ) == 1 and self._IsSelected( 0 ): + + return + + + self._DeselectAll() self._Hit( False, False, 0 ) + self.widget().update() + def SetMinimumHeightNumChars( self, minimum_height_num_chars ): @@ -1821,7 +1828,10 @@ class ListBoxTags( ListBox ): if isinstance( term, ClientSearch.Predicate ): - predicates.append( term ) + if term.GetType() not in ( ClientSearch.PREDICATE_TYPE_LABEL, ClientSearch.PREDICATE_TYPE_NAMESPACE, ClientSearch.PREDICATE_TYPE_PARENT ): + + predicates.append( term ) + else: diff --git a/hydrus/client/gui/ClientGUIManagement.py b/hydrus/client/gui/ClientGUIManagement.py index 057cf494..bd50c9e8 100644 --- a/hydrus/client/gui/ClientGUIManagement.py +++ b/hydrus/client/gui/ClientGUIManagement.py @@ -1856,11 +1856,13 @@ class ManagementPanelImporterMultipleGallery( ManagementPanelImporter ): pretty_status = status - ( file_seed_cache_status, file_seed_cache_simple_status, ( num_done, num_total ) ) = gallery_import.GetFileSeedCache().GetStatus() + file_seed_cache_status = gallery_import.GetFileSeedCache().GetStatus() + + ( num_done, num_total ) = file_seed_cache_status.GetValueRange() progress = ( num_total, num_done ) - pretty_progress = file_seed_cache_simple_status + pretty_progress = file_seed_cache_status.GetStatusText( simple = True ) added = gallery_import.GetCreationTime() @@ -2232,10 +2234,12 @@ class ManagementPanelImporterMultipleGallery( ManagementPanelImporter ): else: - ( status, simple_status, ( value, range ) ) = self._multiple_gallery_import.GetTotalStatus() + file_seed_cache_status = self._multiple_gallery_import.GetTotalStatus() - text_top = HydrusData.ToHumanInt( num_gallery_imports ) + ' queries - ' + HydrusData.ConvertValueRangeToPrettyString( value, range ) - text_bottom = status + ( num_done, num_total ) = file_seed_cache_status.GetValueRange() + + text_top = '{} queries - {}'.format( HydrusData.ToHumanInt( num_gallery_imports ), HydrusData.ConvertValueRangeToPrettyString( num_done, num_total ) ) + text_bottom = file_seed_cache_status.GetStatusText() self._gallery_importers_status_st_top.setText( text_top ) @@ -2561,11 +2565,13 @@ class ManagementPanelImporterMultipleWatcher( ManagementPanelImporter ): pretty_checking_paused = '' - ( status, simple_status, ( num_done, num_total ) ) = watcher.GetFileSeedCache().GetStatus() + file_seed_cache_status = watcher.GetFileSeedCache().GetStatus() + + ( num_done, num_total ) = file_seed_cache_status.GetValueRange() progress = ( num_total, num_done ) - pretty_progress = simple_status + pretty_progress = file_seed_cache_status.GetStatusText( simple = True ) added = watcher.GetCreationTime() @@ -2953,10 +2959,12 @@ class ManagementPanelImporterMultipleWatcher( ManagementPanelImporter ): num_dead_text = HydrusData.ToHumanInt( num_dead ) + ' DEAD - ' - ( status, simple_status, ( value, range ) ) = self._multiple_watcher_import.GetTotalStatus() + file_seed_cache_status = self._multiple_watcher_import.GetTotalStatus() - text_top = HydrusData.ToHumanInt( num_watchers ) + ' watchers - ' + num_dead_text + HydrusData.ConvertValueRangeToPrettyString( value, range ) - text_bottom = status + ( num_done, num_total ) = file_seed_cache_status.GetValueRange() + + text_top = '{} watchers - {}'.format( HydrusData.ToHumanInt( num_watchers ), HydrusData.ConvertValueRangeToPrettyString( num_done, num_total ) ) + text_bottom = file_seed_cache_status.GetStatusText() self._watchers_status_st_top.setText( text_top ) diff --git a/hydrus/client/gui/ClientGUIPages.py b/hydrus/client/gui/ClientGUIPages.py index 92911a51..c891ac75 100644 --- a/hydrus/client/gui/ClientGUIPages.py +++ b/hydrus/client/gui/ClientGUIPages.py @@ -943,11 +943,6 @@ class PagesNotebook( QP.TabWidgetWithDnD ): QP.TabWidgetWithDnD.__init__( self, parent ) - if HG.client_controller.new_options.GetBoolean( 'elide_page_tab_names' ): - - self.tabBar().setElideMode( QC.Qt.ElideMiddle ) - - self._parent_notebook = parent # this is disabled for now because it seems borked in Qt @@ -973,6 +968,7 @@ class PagesNotebook( QP.TabWidgetWithDnD ): self._controller.sub( self, 'RefreshPageName', 'refresh_page_name' ) self._controller.sub( self, 'NotifyPageUnclosed', 'notify_page_unclosed' ) + self._controller.sub( self, '_UpdatePageTabEliding', 'notify_new_options' ) self._widget_event_filter = QP.WidgetEventFilter( self ) self._widget_event_filter.EVT_LEFT_DCLICK( self.EventLeftDoubleClick ) @@ -987,6 +983,8 @@ class PagesNotebook( QP.TabWidgetWithDnD ): self._previous_page_index = -1 + self._UpdatePageTabEliding() + def _RefreshPageNamesAfterDnD( self, page_widget, source_widget ): @@ -1003,6 +1001,18 @@ class PagesNotebook( QP.TabWidgetWithDnD ): + def _UpdatePageTabEliding( self ): + + if HG.client_controller.new_options.GetBoolean( 'elide_page_tab_names' ): + + self.tabBar().setElideMode( QC.Qt.ElideMiddle ) + + else: + + self.tabBar().setElideMode( QC.Qt.ElideNone ) + + + def _UpdatePreviousPageIndex( self ): self._previous_page_index = self.currentIndex() diff --git a/hydrus/client/gui/ClientGUIPanels.py b/hydrus/client/gui/ClientGUIPanels.py index b0dfaccf..5be9a331 100644 --- a/hydrus/client/gui/ClientGUIPanels.py +++ b/hydrus/client/gui/ClientGUIPanels.py @@ -1141,10 +1141,10 @@ class ReviewServicePanel( QW.QWidget ): reset_menu_items = [] - reset_menu_items.append( ( 'normal', 'reprocess definitions', 'Reprocess all definitions.', self._ReprocessDefinitions ) ) - reset_menu_items.append( ( 'normal', 'reprocess content', 'Reprocess all content.', self._ReprocessContent ) ) + reset_menu_items.append( ( 'normal', 'fill in definition gaps', 'Reprocess all definitions.', self._ReprocessDefinitions ) ) + reset_menu_items.append( ( 'normal', 'fill in content gaps', 'Reprocess all content.', self._ReprocessContent ) ) reset_menu_items.append( ( 'separator', None, None, None ) ) - reset_menu_items.append( ( 'normal', 'complete wipe and reset', 'Reset entire repository.', self._Reset ) ) + reset_menu_items.append( ( 'normal', 'wipe database data and reprocess from update files', 'Reset entire repository.', self._Reset ) ) self._reset_button = ClientGUICommon.MenuButton( self, 'reset processing', reset_menu_items ) @@ -1364,7 +1364,7 @@ class ReviewServicePanel( QW.QWidget ): name = self._service.GetName() - message = 'This will delete all the processed information for ' + name + ' from the database.' + os.linesep * 2 + 'Once the service is reset, you will have to reprocess everything that has been downloaded over again. The client will naturally do this in its idle time as before, just starting over from the beginning.' + os.linesep * 2 + 'If you do not understand what this does, click no!' + message = 'This will delete all the processed information for ' + name + ' from the database.' + os.linesep * 2 + 'Once the service is reset, you will have to reprocess everything from your downloaded update files. The client will naturally do this in its idle time as before, just starting over from the beginning.' + os.linesep * 2 + 'If you do not understand what this does, click no!' result = ClientGUIDialogsQuick.GetYesNo( self, message ) diff --git a/hydrus/client/gui/ClientGUIResults.py b/hydrus/client/gui/ClientGUIResults.py index 95fa3679..913115f4 100644 --- a/hydrus/client/gui/ClientGUIResults.py +++ b/hydrus/client/gui/ClientGUIResults.py @@ -146,6 +146,8 @@ class MediaPanel( ClientMedia.ListeningMediaList, QW.QScrollArea ): def _CopyBMPToClipboard( self ): + copied = False + if self._focused_media is not None: if self._HasFocusSingleton(): @@ -156,13 +158,13 @@ class MediaPanel( ClientMedia.ListeningMediaList, QW.QScrollArea ): HG.client_controller.pub( 'clipboard', 'bmp', media ) - else: - - QW.QMessageBox.critical( self, 'Error', 'Sorry, cannot take bmps of anything but static images right now!' ) + copied = True + return copied + def _CopyFilesToClipboard( self ): @@ -1852,6 +1854,15 @@ class MediaPanel( ClientMedia.ListeningMediaList, QW.QScrollArea ): self._CopyBMPToClipboard() + elif action == 'copy_bmp_or_file_if_not_bmpable': + + copied = self._CopyBMPToClipboard() + + if not copied: + + self._CopyFilesToClipboard() + + elif action == 'copy_file': self._CopyFilesToClipboard() diff --git a/hydrus/client/gui/ClientGUIScrolledPanelsEdit.py b/hydrus/client/gui/ClientGUIScrolledPanelsEdit.py index 99381c51..e26443f6 100644 --- a/hydrus/client/gui/ClientGUIScrolledPanelsEdit.py +++ b/hydrus/client/gui/ClientGUIScrolledPanelsEdit.py @@ -32,19 +32,13 @@ from hydrus.client.gui import ClientGUIListBoxes from hydrus.client.gui import ClientGUIListCtrl from hydrus.client.gui import ClientGUIScrolledPanels from hydrus.client.gui import ClientGUIShortcuts -from hydrus.client.gui import ClientGUIFileSeedCache -from hydrus.client.gui import ClientGUIGallerySeedLog from hydrus.client.gui import ClientGUIMPV from hydrus.client.gui import ClientGUIStringControls from hydrus.client.gui import ClientGUITags from hydrus.client.gui import ClientGUITime from hydrus.client.gui import ClientGUITopLevelWindowsPanels from hydrus.client.gui import QtPorting as QP -from hydrus.client.importing import ClientImporting -from hydrus.client.importing import ClientImportFileSeeds from hydrus.client.importing import ClientImportOptions -from hydrus.client.importing import ClientImportSubscriptions -from hydrus.client.importing import ClientImportSubscriptionQuery from hydrus.client.networking import ClientNetworkingContexts from hydrus.client.networking import ClientNetworkingDomain @@ -927,7 +921,7 @@ class EditDomainManagerInfoPanel( ClientGUIScrolledPanels.EditPanel ): class EditDownloaderDisplayPanel( ClientGUIScrolledPanels.EditPanel ): - def __init__( self, parent: QW.QWidget, network_engine, gugs, gug_keys_to_display, url_classes, url_class_keys_to_display ): + def __init__( self, parent: QW.QWidget, network_engine, gugs, gug_keys_to_display, url_classes, url_class_keys_to_display, show_unmatched_urls_in_media_viewer ): ClientGUIScrolledPanels.EditPanel.__init__( self, parent ) @@ -957,7 +951,9 @@ class EditDownloaderDisplayPanel( ClientGUIScrolledPanels.EditPanel ): # - self._url_display_list_ctrl_panel = ClientGUIListCtrl.BetterListCtrlPanel( self._notebook ) + media_viewer_urls_panel = QW.QWidget( self._notebook ) + + self._url_display_list_ctrl_panel = ClientGUIListCtrl.BetterListCtrlPanel( media_viewer_urls_panel ) columns = [ ( 'url class', -1 ), ( 'url type', 20 ), ( 'display on media viewer?', 36 ) ] @@ -967,6 +963,8 @@ class EditDownloaderDisplayPanel( ClientGUIScrolledPanels.EditPanel ): self._url_display_list_ctrl_panel.AddButton( 'edit', self._EditURLDisplay, enabled_only_on_selection = True ) + self._show_unmatched_urls_in_media_viewer = QW.QCheckBox( media_viewer_urls_panel ) + # listctrl_data = [] @@ -986,7 +984,7 @@ class EditDownloaderDisplayPanel( ClientGUIScrolledPanels.EditPanel ): listctrl_data = [] - for ( url_class_key, url_class ) in list(self._url_class_keys_to_url_classes.items()): + for ( url_class_key, url_class ) in self._url_class_keys_to_url_classes.items(): display = url_class_key in url_class_keys_to_display @@ -997,11 +995,28 @@ class EditDownloaderDisplayPanel( ClientGUIScrolledPanels.EditPanel ): self._url_display_list_ctrl.Sort( 1 ) + self._show_unmatched_urls_in_media_viewer.setChecked( show_unmatched_urls_in_media_viewer ) + + # + + vbox = QP.VBoxLayout() + + rows = [] + + rows.append( ( 'show urls that do not have a matching url class?: ', self._show_unmatched_urls_in_media_viewer ) ) + + gridbox = ClientGUICommon.WrapInGrid( media_viewer_urls_panel, rows ) + + QP.AddToLayout( vbox, self._url_display_list_ctrl, CC.FLAGS_EXPAND_BOTH_WAYS ) + QP.AddToLayout( vbox, gridbox, CC.FLAGS_EXPAND_PERPENDICULAR ) + + media_viewer_urls_panel.setLayout( vbox ) + # self._notebook.addTab( self._gug_display_list_ctrl_panel, 'downloaders selector' ) self._notebook.setCurrentWidget( self._gug_display_list_ctrl_panel ) - self._notebook.addTab( self._url_display_list_ctrl_panel, 'media viewer urls' ) + self._notebook.addTab( media_viewer_urls_panel, 'media viewer urls' ) # @@ -1131,7 +1146,9 @@ class EditDownloaderDisplayPanel( ClientGUIScrolledPanels.EditPanel ): gug_keys_to_display = { gug_key for ( gug_key, display ) in self._gug_display_list_ctrl.GetData() if display } url_class_keys_to_display = { url_class_key for ( url_class_key, display ) in self._url_display_list_ctrl.GetData() if display } - return ( gug_keys_to_display, url_class_keys_to_display ) + show_unmatched_urls_in_media_viewer = self._show_unmatched_urls_in_media_viewer.isChecked() + + return ( gug_keys_to_display, url_class_keys_to_display, show_unmatched_urls_in_media_viewer ) class EditDuplicateActionOptionsPanel( ClientGUIScrolledPanels.EditPanel ): @@ -3045,7 +3062,7 @@ class EditNetworkContextPanel( ClientGUIScrolledPanels.EditPanel ): self._context_data_none.setVisible( False ) - names = HG.client_controller.Read( 'serialisable_names', HydrusSerialisable.SERIALISABLE_TYPE_SUBSCRIPTION ) + names = HG.client_controller.Read( 'serialisable_names', HydrusSerialisable.SERIALISABLE_TYPE_SUBSCRIPTION_LEGACY ) for name in names: @@ -3727,1823 +3744,6 @@ class EditServersideService( ClientGUIScrolledPanels.EditPanel ): -class EditSubscriptionPanel( ClientGUIScrolledPanels.EditPanel ): - - def __init__( self, parent: QW.QWidget, subscription: ClientImportSubscriptions.Subscription ): - - subscription = subscription.Duplicate() - - ClientGUIScrolledPanels.EditPanel.__init__( self, parent ) - - self._original_subscription = subscription - - # - - self._name = QW.QLineEdit( self ) - self._delay_st = ClientGUICommon.BetterStaticText( self ) - - # - - ( name, gug_key_and_name, queries, checker_options, initial_file_limit, periodic_file_limit, paused, file_import_options, tag_import_options, self._no_work_until, self._no_work_until_reason ) = subscription.ToTuple() - - self._query_panel = ClientGUICommon.StaticBox( self, 'site and queries' ) - - self._gug_key_and_name = ClientGUIImport.GUGKeyAndNameSelector( self._query_panel, gug_key_and_name ) - - queries_panel = ClientGUIListCtrl.BetterListCtrlPanel( self._query_panel ) - - columns = [ ( 'name/query', 20 ), ( 'paused', 8 ), ( 'status', 8 ), ( 'last new file time', 20 ), ( 'last check time', 20 ), ( 'next check time', 20 ), ( 'file velocity', 20 ), ( 'recent delays', 20 ), ( 'items', 13 ) ] - - self._queries = ClientGUIListCtrl.BetterListCtrl( queries_panel, 'subscription_queries', 10, 20, columns, self._ConvertQueryToListCtrlTuples, use_simple_delete = True, activation_callback = self._EditQuery ) - - queries_panel.SetListCtrl( self._queries ) - - queries_panel.AddButton( 'add', self._AddQuery ) - queries_panel.AddButton( 'copy queries', self._CopyQueries, enabled_only_on_selection = True ) - queries_panel.AddButton( 'paste queries', self._PasteQueries ) - queries_panel.AddButton( 'edit', self._EditQuery, enabled_only_on_selection = True ) - queries_panel.AddDeleteButton() - queries_panel.AddSeparator() - queries_panel.AddButton( 'pause/play', self._PausePlay, enabled_only_on_selection = True ) - queries_panel.AddButton( 'retry failed', self._RetryFailed, enabled_check_func = self._ListCtrlCanRetryFailed ) - queries_panel.AddButton( 'retry ignored', self._RetryIgnored, enabled_check_func = self._ListCtrlCanRetryIgnored ) - queries_panel.AddButton( 'check now', self._CheckNow, enabled_check_func = self._ListCtrlCanCheckNow ) - queries_panel.AddButton( 'reset cache', self._ResetCache, enabled_check_func = self._ListCtrlCanResetCache ) - - if HG.client_controller.new_options.GetBoolean( 'advanced_mode' ): - - queries_panel.AddSeparator() - - menu_items = [] - - menu_items.append( ( 'normal', 'show', 'Show quality info.', self._ShowQualityInfo ) ) - menu_items.append( ( 'normal', 'copy csv data to clipboard', 'Copy quality info to clipboard.', self._CopyQualityInfo ) ) - - queries_panel.AddMenuButton( 'quality info', menu_items, enabled_only_on_selection = True ) - - - # - - self._file_limits_panel = ClientGUICommon.StaticBox( self, 'file limits' ) - - message = '''****Subscriptions are not for large one-time syncs**** - -tl;dr: Do not change the checker options or file limits until you really know what you are doing. The limits are now only 1000 (10000 in advanced mode) anyway, but you should leave them at 100/100. - -A subscription will start at a site's newest files and keep searching further and further back into the past. It will stop naturally if it reaches the end of results or starts to see files it saw in a previous check (and so assumes it has 'caught up' to where it was before). It will stop 'artificially' if it finds enough new files to hit the file limits here. - -Unless you have a very special reason, it is important to keep these file limit numbers low. Being automated, subscriptions typically run when you are not looking at the client, and if they go wrong, it is good to have some brakes to stop them going very wrong. - -First of all, making sure you only get a few dozen or hundred on the first check means you do not spend twenty minutes fetching all the search's thousands of file URLs that you may well have previously downloaded, but it is even more important for regular checks, where the sub is trying to find where it got to before: if a site changes its URL format (say from artistname.deviantart.com to deviantart.com/artistname) or changes its markup or otherwise starts delivering unusual results, the subscription may not realise it is seeing the wrong urls and will keep syncing until it hits its regular limit. If the periodic limit is 100, this is no big deal--you'll likely get a popup message out of it and might need to update the respective downloader--but if it were 60000 (or infinite, and the site were somehow serving you random/full results!), you could run into a huge problem completely by accident. - -Subscription sync searches are somewhat 'fragile' (they cannot pause/resume the gallery pagewalk, only completely cancel), so it is best if they are short--say, no more than five pages. It is better for a sub to pick up a small number of new files every few weeks than trying to catch up in a giant rush once a year. - -If you are not experienced with subscriptions, I strongly suggest you set these to something like 100 for the first check and 100 thereafter, which is likely your default. This works great for typical artist and character queries. - -If you want to get all of an artist's files from a site, use the manual gallery download page first. A good routine is to check that you have the right search text and it all works correctly and that you know what tags you want, and then once that big queue is fully downloaded synced, start a new sub with the same settings to continue checking for anything posted in future.''' - - help_button = ClientGUICommon.BetterBitmapButton( self._file_limits_panel, CC.global_pixmaps().help, QW.QMessageBox.information, None, 'Information', message ) - - help_hbox_1 = ClientGUICommon.WrapInText( help_button, self._file_limits_panel, 'help about file limits -->', QG.QColor( 0, 0, 255 ) ) - - message = '''****Hitting the normal/periodic limit may or may not be a big deal**** - -If one of your subscriptions hits the file limit just doing a normal sync, you will get a little popup telling you. It is likely because of: - -1) The query has not run in a while, or many new files were suddenly posted, so the backlog of to-be-synced files has built up. - -2) The site has changed how it formats file post urls, so the subscription thinks it is seeing new files when it truly is not. - -If 1 is true, you might want to increase its periodic limit a little, or speed up its checking times, and fill in whatever gap of files you missing with a manual download page. - -But if 2 is--and is also perhaps accompanied by many 'could not parse' errors--the maintainer for the site's download parser (hydrus dev or whoever), would be interested in knowing what has happened so they can roll out a fix.'.''' - - help_button = ClientGUICommon.BetterBitmapButton( self._file_limits_panel, CC.global_pixmaps().help, QW.QMessageBox.information, None, 'Information', message ) - - help_hbox_2 = ClientGUICommon.WrapInText( help_button, self._file_limits_panel, 'help about hitting the normal file limit -->', QG.QColor( 0, 0, 255 ) ) - - if HG.client_controller.new_options.GetBoolean( 'advanced_mode' ): - - limits_max = 10000 - - else: - - limits_max = 1000 - - - self._initial_file_limit = QP.MakeQSpinBox( self._file_limits_panel, min=1, max=limits_max ) - self._initial_file_limit.setToolTip( 'The first sync will add no more than this many URLs.' ) - - self._periodic_file_limit = QP.MakeQSpinBox( self._file_limits_panel, min=1, max=limits_max ) - self._periodic_file_limit.setToolTip( 'Normal syncs will add no more than this many URLs, stopping early if they find several URLs the query has seen before.' ) - - self._file_presentation_panel = ClientGUICommon.StaticBox( self, 'presentation' ) - - self._show_a_popup_while_working = QW.QCheckBox( self._file_presentation_panel ) - self._show_a_popup_while_working.setToolTip( 'Careful with this! Leave it on to begin with, just in case it goes wrong!' ) - - self._publish_files_to_popup_button = QW.QCheckBox( self._file_presentation_panel ) - self._publish_files_to_page = QW.QCheckBox( self._file_presentation_panel ) - self._publish_label_override = ClientGUICommon.NoneableTextCtrl( self._file_presentation_panel, none_phrase = 'no, use subscription name' ) - self._merge_query_publish_events = QW.QCheckBox( self._file_presentation_panel ) - - tt = 'This is great to merge multiple subs to a combined location!' - - self._publish_label_override.setToolTip( tt ) - - tt = 'If unchecked, each query will produce its own \'subscription_name: query\' button or page.' - - self._merge_query_publish_events.setToolTip( tt ) - - # - - self._control_panel = ClientGUICommon.StaticBox( self, 'control' ) - - self._paused = QW.QCheckBox( self._control_panel ) - - # - - show_downloader_options = True - - self._checker_options = ClientGUIImport.CheckerOptionsButton( self, checker_options, update_callable = self._CheckerOptionsUpdated ) - self._file_import_options = ClientGUIImport.FileImportOptionsButton( self, file_import_options, show_downloader_options ) - self._tag_import_options = ClientGUIImport.TagImportOptionsButton( self, tag_import_options, show_downloader_options, allow_default_selection = True ) - - # - - self._name.setText( name ) - - self._queries.AddDatas( queries ) - - self._queries.Sort() - - self._initial_file_limit.setValue( initial_file_limit ) - self._periodic_file_limit.setValue( periodic_file_limit ) - - ( show_a_popup_while_working, publish_files_to_popup_button, publish_files_to_page, publish_label_override, merge_query_publish_events ) = subscription.GetPresentationOptions() - - self._show_a_popup_while_working.setChecked( show_a_popup_while_working ) - self._publish_files_to_popup_button.setChecked( publish_files_to_popup_button ) - self._publish_files_to_page.setChecked( publish_files_to_page ) - self._publish_label_override.SetValue( publish_label_override ) - self._merge_query_publish_events.setChecked( merge_query_publish_events ) - - self._paused.setChecked( paused ) - - # - - self._query_panel.Add( self._gug_key_and_name, CC.FLAGS_EXPAND_PERPENDICULAR ) - self._query_panel.Add( queries_panel, CC.FLAGS_EXPAND_BOTH_WAYS ) - - # - - rows = [] - - rows.append( ( 'on first check, get at most this many files: ', self._initial_file_limit ) ) - rows.append( ( 'on normal checks, get at most this many newer files: ', self._periodic_file_limit ) ) - - gridbox = ClientGUICommon.WrapInGrid( self._file_limits_panel, rows ) - - self._file_limits_panel.Add( help_hbox_1, CC.FLAGS_EXPAND_PERPENDICULAR ) - self._file_limits_panel.Add( help_hbox_2, CC.FLAGS_EXPAND_PERPENDICULAR ) - self._file_limits_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR ) - - # - - rows = [] - - rows.append( ( 'show a popup while working: ', self._show_a_popup_while_working ) ) - rows.append( ( 'publish new files to a popup button: ', self._publish_files_to_popup_button ) ) - rows.append( ( 'publish new files to a page: ', self._publish_files_to_page ) ) - rows.append( ( 'publish to a specific label: ', self._publish_label_override ) ) - rows.append( ( 'publish all queries to the same page/popup button: ', self._merge_query_publish_events ) ) - - gridbox = ClientGUICommon.WrapInGrid( self._file_presentation_panel, rows ) - - self._file_presentation_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR ) - - # - - rows = [] - - rows.append( ( 'currently paused: ', self._paused ) ) - - gridbox = ClientGUICommon.WrapInGrid( self._control_panel, rows ) - - self._control_panel.Add( gridbox, CC.FLAGS_LONE_BUTTON ) - - # - - vbox = QP.VBoxLayout() - - QP.AddToLayout( vbox, ClientGUICommon.WrapInText(self._name,self,'name: '), CC.FLAGS_EXPAND_SIZER_PERPENDICULAR ) - QP.AddToLayout( vbox, self._delay_st, CC.FLAGS_EXPAND_PERPENDICULAR ) - QP.AddToLayout( vbox, self._query_panel, CC.FLAGS_EXPAND_BOTH_WAYS ) - QP.AddToLayout( vbox, self._control_panel, CC.FLAGS_EXPAND_PERPENDICULAR ) - QP.AddToLayout( vbox, self._file_limits_panel, CC.FLAGS_EXPAND_PERPENDICULAR ) - QP.AddToLayout( vbox, self._file_presentation_panel, CC.FLAGS_EXPAND_PERPENDICULAR ) - QP.AddToLayout( vbox, self._checker_options, CC.FLAGS_EXPAND_PERPENDICULAR ) - QP.AddToLayout( vbox, self._file_import_options, CC.FLAGS_EXPAND_PERPENDICULAR ) - QP.AddToLayout( vbox, self._tag_import_options, CC.FLAGS_EXPAND_PERPENDICULAR ) - - self.widget().setLayout( vbox ) - - self._UpdateDelayText() - - - def _AddQuery( self ): - - gug_key_and_name = self._gug_key_and_name.GetValue() - - initial_search_text = HG.client_controller.network_engine.domain_manager.GetInitialSearchText( gug_key_and_name ) - - query = ClientImportSubscriptionQuery.SubscriptionQuery( initial_search_text ) - - with ClientGUITopLevelWindowsPanels.DialogEdit( self, 'edit subscription query' ) as dlg: - - panel = EditSubscriptionQueryPanel( dlg, query ) - - dlg.SetPanel( panel ) - - if dlg.exec() == QW.QDialog.Accepted: - - query = panel.GetValue() - - query_text = query.GetQueryText() - - if query_text in self._GetCurrentQueryTexts(): - - QW.QMessageBox.warning( self, 'Warning', 'You already have a query for "'+query_text+'", so nothing new has been added.' ) - - return - - - self._queries.AddDatas( ( query, ) ) - - - - - def _CheckerOptionsUpdated( self, checker_options ): - - for query in self._queries.GetData(): - - query.UpdateNextCheckTime( checker_options ) - - - self._queries.UpdateDatas() - - - def _CheckNow( self ): - - selected_queries = self._queries.GetData( only_selected = True ) - - for query in selected_queries: - - query.CheckNow() - - - self._queries.UpdateDatas( selected_queries ) - - self._queries.Sort() - - self._no_work_until = 0 - - self._UpdateDelayText() - - - def _ConvertQueryToListCtrlTuples( self, query ): - - ( query_text, check_now, last_check_time, next_check_time, paused, status ) = query.ToTuple() - - name = query.GetHumanName() - pretty_name = name - - if paused: - - pretty_paused = 'yes' - - else: - - pretty_paused = '' - - - if status == ClientImporting.CHECKER_STATUS_OK: - - pretty_status = 'ok' - - else: - - pretty_status = 'dead' - - - file_seed_cache = query.GetFileSeedCache() - - last_new_file_time = file_seed_cache.GetLatestAddedTime() - - if last_new_file_time is None or last_new_file_time == 0: - - pretty_last_new_file_time = 'n/a' - - else: - - pretty_last_new_file_time = HydrusData.TimestampToPrettyTimeDelta( last_new_file_time ) - - - if last_check_time is None or last_check_time == 0: - - pretty_last_check_time = '(initial check has not yet occurred)' - - else: - - pretty_last_check_time = HydrusData.TimestampToPrettyTimeDelta( last_check_time ) - - - pretty_next_check_time = query.GetNextCheckStatusString() - - checker_options = self._checker_options.GetValue() - - file_velocity = checker_options.GetRawCurrentVelocity( query.GetFileSeedCache(), last_check_time ) - pretty_file_velocity = checker_options.GetPrettyCurrentVelocity( query.GetFileSeedCache(), last_check_time, no_prefix = True ) - - try: - - estimate = query.GetBandwidthWaitingEstimate( self._original_subscription.GetName() ) - - if estimate == 0: - - pretty_delay = '' - delay = 0 - - else: - - pretty_delay = 'bandwidth: ' + HydrusData.TimeDeltaToPrettyTimeDelta( estimate ) - delay = estimate - - - except: - - pretty_delay = 'could not determine bandwidth--there may be a problem with some of the urls in this query' - delay = 0 - - - ( file_status, simple_status, ( num_done, num_total ) ) = file_seed_cache.GetStatus() - - items = ( num_total, num_done ) - - pretty_items = simple_status - - sort_last_new_file_time = ClientGUIListCtrl.SafeNoneInt( last_new_file_time ) - sort_last_check_time = ClientGUIListCtrl.SafeNoneInt( last_check_time ) - sort_next_check_time = ClientGUIListCtrl.SafeNoneInt( next_check_time ) - - display_tuple = ( pretty_name, pretty_paused, pretty_status, pretty_last_new_file_time, pretty_last_check_time, pretty_next_check_time, pretty_file_velocity, pretty_delay, pretty_items ) - sort_tuple = ( name, paused, status, sort_last_new_file_time, sort_last_check_time, sort_next_check_time, file_velocity, delay, items ) - - return ( display_tuple, sort_tuple ) - - - def _CopyQueries( self ): - - query_texts = [] - - for query in self._queries.GetData( only_selected = True ): - - query_texts.append( query.GetQueryText() ) - - - clipboard_text = os.linesep.join( query_texts ) - - if len( clipboard_text ) > 0: - - HG.client_controller.pub( 'clipboard', 'text', clipboard_text ) - - - - def _EditQuery( self ): - - selected_queries = self._queries.GetData( only_selected = True ) - - for old_query in selected_queries: - - with ClientGUITopLevelWindowsPanels.DialogEdit( self, 'edit subscription query' ) as dlg: - - panel = EditSubscriptionQueryPanel( dlg, old_query ) - - dlg.SetPanel( panel ) - - if dlg.exec() == QW.QDialog.Accepted: - - edited_query = panel.GetValue() - - edited_query_text = edited_query.GetQueryText() - - if edited_query_text != old_query.GetQueryText() and edited_query_text in self._GetCurrentQueryTexts(): - - QW.QMessageBox.warning( self, 'Warning', 'You already have a query for "'+edited_query_text+'"! The edit you just made will not be saved.' ) - - break - - - self._queries.DeleteDatas( ( old_query, ) ) - - self._queries.AddDatas( ( edited_query, ) ) - - else: - - break - - - - - self._queries.Sort() - - - def _GetCurrentQueryTexts( self ): - - query_strings = set() - - for query in self._queries.GetData(): - - query_strings.add( query.GetQueryText() ) - - - return query_strings - - - def _CopyQualityInfo( self ): - - data = self._GetQualityInfo() - - data_strings = [] - - for ( name, num_inbox, num_archived, num_deleted ) in data: - - if num_archived + num_deleted > 0: - - percent = HydrusData.ConvertFloatToPercentage( num_archived / ( num_archived + num_deleted ) ) - - else: - - percent = '0.0%' - - - data_string = '{},{},{},{},{}'.format( name, HydrusData.ToHumanInt( num_inbox ), HydrusData.ToHumanInt( num_archived ), HydrusData.ToHumanInt( num_deleted ), percent ) - - data_strings.append( data_string ) - - - text = os.linesep.join( data_strings ) - - HG.client_controller.pub( 'clipboard', 'text', text ) - - - def _GetQualityInfo( self ): - - data = [] - - for query in self._queries.GetData( only_selected = True ): - - fsc = query.GetFileSeedCache() - - hashes = fsc.GetHashes() - - media_results = HG.client_controller.Read( 'media_results', hashes ) - - num_inbox = 0 - num_archived = 0 - num_deleted = 0 - - for media_result in media_results: - - lm = media_result.GetLocationsManager() - - if lm.IsLocal() and not lm.IsTrashed(): - - if media_result.GetInbox(): - - num_inbox += 1 - - else: - - num_archived += 1 - - - else: - - num_deleted += 1 - - - - data.append( ( query.GetHumanName(), num_inbox, num_archived, num_deleted ) ) - - - return data - - - def _ShowQualityInfo( self ): - - data = self._GetQualityInfo() - - data_strings = [] - - for ( name, num_inbox, num_archived, num_deleted ) in data: - - data_string = '{}: inbox {} | archive {} | deleted {}'.format( name, HydrusData.ToHumanInt( num_inbox ), HydrusData.ToHumanInt( num_archived ), HydrusData.ToHumanInt( num_deleted ) ) - - if num_archived + num_deleted > 0: - - data_string += ' | good {}'.format( HydrusData.ConvertFloatToPercentage( num_archived / ( num_archived + num_deleted ) ) ) - - - data_strings.append( data_string ) - - - message = os.linesep.join( data_strings ) - - QW.QMessageBox.information( self, 'Information', message ) - - - def _ListCtrlCanCheckNow( self ): - - for query in self._queries.GetData( only_selected = True ): - - if query.CanCheckNow(): - - return True - - - - return False - - - def _ListCtrlCanResetCache( self ): - - for query in self._queries.GetData( only_selected = True ): - - if not query.IsInitialSync(): - - return True - - - - return False - - - def _ListCtrlCanRetryFailed( self ): - - for query in self._queries.GetData( only_selected = True ): - - if query.CanRetryFailed(): - - return True - - - - return False - - - def _ListCtrlCanRetryIgnored( self ): - - for query in self._queries.GetData( only_selected = True ): - - if query.CanRetryIgnored(): - - return True - - - - return False - - - def _PasteQueries( self ): - - message = 'This will add new queries by pulling them from your clipboard. It assumes they are currently in your clipboard and newline separated. Is that ok?' - - result = ClientGUIDialogsQuick.GetYesNo( self, message ) - - if result != QW.QDialog.Accepted: - - return - - - try: - - text = HG.client_controller.GetClipboardText() - - except HydrusExceptions.DataMissing as e: - - QW.QMessageBox.critical( self, 'Error', str(e) ) - - return - - - try: - - query_texts = HydrusText.DeserialiseNewlinedTexts( text ) - - current_query_texts = self._GetCurrentQueryTexts() - - already_existing_query_texts = sorted( current_query_texts.intersection( query_texts ) ) - new_query_texts = sorted( set( query_texts ).difference( current_query_texts ) ) - - if len( already_existing_query_texts ) > 0: - - if len( already_existing_query_texts ) > 50: - - message = '{} queries were already in the subscription, so they need not be added.'.format( HydrusData.ToHumanInt( len( already_existing_query_texts ) ) ) - - else: - - if len( already_existing_query_texts ) > 5: - - aeqt_separator = ', ' - - else: - - aeqt_separator = os.linesep - - - message = 'The queries:' - message += os.linesep * 2 - message += aeqt_separator.join( already_existing_query_texts ) - message += os.linesep * 2 - message += 'Were already in the subscription, so they need not be added.' - - - if len( new_query_texts ) > 0: - - if len( new_query_texts ) > 50: - - message = '{} queries were new and will be added.'.format( HydrusData.ToHumanInt( len( new_query_texts ) ) ) - - else: - - if len( new_query_texts ) > 5: - - nqt_separator = ', ' - - else: - - nqt_separator = os.linesep - - - message += os.linesep * 2 - message += 'The queries:' - message += os.linesep * 2 - message += nqt_separator.join( new_query_texts ) - message += os.linesep * 2 - message += 'Were new and will be added.' - - - - QW.QMessageBox.information( self, 'Information', message ) - - - queries = [ ClientImportSubscriptionQuery.SubscriptionQuery( query_text ) for query_text in new_query_texts ] - - self._queries.AddDatas( queries ) - - except: - - QW.QMessageBox.critical( self, 'Error', 'I could not understand what was in the clipboard' ) - - - - def _PausePlay( self ): - - selected_queries = self._queries.GetData( only_selected = True ) - - for query in selected_queries: - - query.PausePlay() - - - self._queries.UpdateDatas( selected_queries ) - - - def _ResetCache( self ): - - message = 'Resetting these queries will delete all their cached urls, meaning when the subscription next runs, they will have to download all those links over again. This may be expensive in time and data. Only do this if you know what it means. Do you want to do it?' - - result = ClientGUIDialogsQuick.GetYesNo( self, message ) - - if result == QW.QDialog.Accepted: - - selected_queries = self._queries.GetData( only_selected = True ) - - for query in selected_queries: - - query.Reset() - - - self._queries.UpdateDatas( selected_queries ) - - - - def _RetryFailed( self ): - - selected_queries = self._queries.GetData( only_selected = True ) - - for query in selected_queries: - - query.RetryFailures() - - - self._queries.UpdateDatas( selected_queries ) - - self._no_work_until = 0 - - self._UpdateDelayText() - - - def _RetryIgnored( self ): - - selected_queries = self._queries.GetData( only_selected = True ) - - for query in selected_queries: - - query.RetryIgnored() - - - self._queries.UpdateDatas( selected_queries ) - - - def _UpdateDelayText( self ): - - if HydrusData.TimeHasPassed( self._no_work_until ): - - status = 'no recent errors' - - else: - - status = 'delayed--retrying ' + HydrusData.TimestampToPrettyTimeDelta( self._no_work_until, just_now_threshold = 0 ) + ' because: ' + self._no_work_until_reason - - - self._delay_st.setText( status ) - - - def GetValue( self ) -> ClientImportSubscriptions.Subscription: - - name = self._name.text() - - subscription = ClientImportSubscriptions.Subscription( name ) - - gug_key_and_name = self._gug_key_and_name.GetValue() - - initial_file_limit = self._initial_file_limit.value() - periodic_file_limit = self._periodic_file_limit.value() - - paused = self._paused.isChecked() - - checker_options = self._checker_options.GetValue() - file_import_options = self._file_import_options.GetValue() - tag_import_options = self._tag_import_options.GetValue() - - queries = self._queries.GetData() - - subscription.SetTuple( gug_key_and_name, checker_options, initial_file_limit, periodic_file_limit, paused, file_import_options, tag_import_options, self._no_work_until ) - - subscription.SetQueries( queries ) - - show_a_popup_while_working = self._show_a_popup_while_working.isChecked() - publish_files_to_popup_button = self._publish_files_to_popup_button.isChecked() - publish_files_to_page = self._publish_files_to_page.isChecked() - publish_label_override = self._publish_label_override.GetValue() - merge_query_publish_events = self._merge_query_publish_events.isChecked() - - subscription.SetPresentationOptions( show_a_popup_while_working, publish_files_to_popup_button, publish_files_to_page, publish_label_override, merge_query_publish_events ) - - return subscription - - -class EditSubscriptionQueryPanel( ClientGUIScrolledPanels.EditPanel ): - - def __init__( self, parent: QW.QWidget, query: ClientImportSubscriptionQuery.SubscriptionQuery ): - - ClientGUIScrolledPanels.EditPanel.__init__( self, parent ) - - self._original_query = query - - self._status_st = ClientGUICommon.BetterStaticText( self ) - - st_width = ClientGUIFunctions.ConvertTextToPixelWidth( self._status_st, 50 ) - - self._status_st.setMinimumWidth( st_width ) - - self._display_name = ClientGUICommon.NoneableTextCtrl( self, none_phrase = 'show query text' ) - self._query_text = QW.QLineEdit( self ) - self._check_now = QW.QCheckBox( self ) - self._paused = QW.QCheckBox( self ) - - self._file_seed_cache_control = ClientGUIFileSeedCache.FileSeedCacheStatusControl( self, HG.client_controller ) - - self._gallery_seed_log_control = ClientGUIGallerySeedLog.GallerySeedLogStatusControl( self, HG.client_controller, True, True ) - - tag_import_options = self._original_query.GetTagImportOptions() - show_downloader_options = False # just for additional tags, no parsing gubbins needed - - self._tag_import_options = ClientGUIImport.TagImportOptionsButton( self, tag_import_options, show_downloader_options ) - - # - - ( query_text, check_now, self._last_check_time, self._next_check_time, paused, self._status ) = self._original_query.ToTuple() - - display_name = self._original_query.GetDisplayName() - - self._display_name.SetValue( display_name ) - - self._query_text.setText( query_text ) - - self._check_now.setChecked( check_now ) - - self._paused.setChecked( paused ) - - self._file_seed_cache = self._original_query.GetFileSeedCache().Duplicate() - - self._file_seed_cache_control.SetFileSeedCache( self._file_seed_cache ) - - self._gallery_seed_log = self._original_query.GetGallerySeedLog().Duplicate() - - self._gallery_seed_log_control.SetGallerySeedLog( self._gallery_seed_log ) - - # - - rows = [] - - rows.append( ( 'optional display name: ', self._display_name ) ) - rows.append( ( 'query text: ', self._query_text ) ) - rows.append( ( 'check now: ', self._check_now ) ) - rows.append( ( 'paused: ', self._paused ) ) - - gridbox = ClientGUICommon.WrapInGrid( self, rows ) - - vbox = QP.VBoxLayout() - - QP.AddToLayout( vbox, self._status_st, CC.FLAGS_EXPAND_PERPENDICULAR ) - QP.AddToLayout( vbox, self._file_seed_cache_control, CC.FLAGS_EXPAND_PERPENDICULAR ) - QP.AddToLayout( vbox, self._gallery_seed_log_control, CC.FLAGS_EXPAND_PERPENDICULAR ) - QP.AddToLayout( vbox, gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR ) - QP.AddToLayout( vbox, self._tag_import_options, CC.FLAGS_EXPAND_PERPENDICULAR ) - - self.widget().setLayout( vbox ) - - # - - self._check_now.clicked.connect( self._UpdateStatus ) - self._paused.clicked.connect( self._UpdateStatus ) - - self._UpdateStatus() - - self._query_text.selectAll() - - HG.client_controller.CallAfterQtSafe( self._query_text, self._query_text.setFocus, QC.Qt.OtherFocusReason ) - - - def _GetValue( self ) -> ClientImportSubscriptionQuery.SubscriptionQuery: - - query = self._original_query.Duplicate() - - query.SetQueryAndSeeds( self._query_text.text(), self._file_seed_cache, self._gallery_seed_log ) - - query.SetPaused( self._paused.isChecked() ) - - query.SetCheckNow( self._check_now.isChecked() ) - - query.SetDisplayName( self._display_name.GetValue() ) - - query.SetTagImportOptions( self._tag_import_options.GetValue() ) - - return query - - - def _UpdateStatus( self ): - - query = self._GetValue() - - self._status_st.setText( 'next check: {}'.format( query.GetNextCheckStatusString() ) ) - - - def GetValue( self ) -> ClientImportSubscriptionQuery.SubscriptionQuery: - - query = self._GetValue() - - return query - - -class EditSubscriptionsPanel( ClientGUIScrolledPanels.EditPanel ): - - def __init__( self, parent: QW.QWidget, subscriptions: typing.Iterable[ ClientImportSubscriptions.Subscription ], subs_are_globally_paused: bool = False ): - - ClientGUIScrolledPanels.EditPanel.__init__( self, parent ) - - # - - menu_items = [] - - page_func = HydrusData.Call( ClientPaths.LaunchPathInWebBrowser, os.path.join( HC.HELP_DIR, 'getting_started_subscriptions.html' ) ) - - menu_items.append( ( 'normal', 'open the html subscriptions help', 'Open the help page for subscriptions in your web browser.', page_func ) ) - - help_button = ClientGUICommon.MenuBitmapButton( self, CC.global_pixmaps().help, menu_items ) - - help_hbox = ClientGUICommon.WrapInText( help_button, self, 'help for this panel -->', QG.QColor( 0, 0, 255 ) ) - - subscriptions_panel = ClientGUIListCtrl.BetterListCtrlPanel( self ) - - columns = [ ( 'name', -1 ), ( 'source', 20 ), ( 'query status', 25 ), ( 'last new file time', 20 ), ( 'last checked', 20 ), ( 'recent error/delay?', 20 ), ( 'items', 13 ), ( 'paused', 8 ) ] - - self._subscriptions = ClientGUIListCtrl.BetterListCtrl( subscriptions_panel, 'subscriptions', 12, 20, columns, self._ConvertSubscriptionToListCtrlTuples, use_simple_delete = True, activation_callback = self.Edit ) - - subscriptions_panel.SetListCtrl( self._subscriptions ) - - subscriptions_panel.AddButton( 'add', self.Add ) - subscriptions_panel.AddButton( 'edit', self.Edit, enabled_only_on_selection = True ) - subscriptions_panel.AddDeleteButton() - - subscriptions_panel.AddSeparator() - - subscriptions_panel.AddImportExportButtons( ( ClientImportSubscriptions.Subscription, ), self._AddSubscription ) - - subscriptions_panel.NewButtonRow() - - subscriptions_panel.AddButton( 'merge', self.Merge, enabled_check_func = self._CanMerge ) - subscriptions_panel.AddButton( 'separate', self.Separate, enabled_check_func = self._CanSeparate ) - - subscriptions_panel.AddSeparator() - - subscriptions_panel.AddButton( 'pause/resume', self.PauseResume, enabled_only_on_selection = True ) - subscriptions_panel.AddButton( 'retry failures', self.RetryFailures, enabled_check_func = self._CanRetryFailures ) - subscriptions_panel.AddButton( 'retry ignored', self.RetryIgnored, enabled_check_func = self._CanRetryIgnored ) - subscriptions_panel.AddButton( 'scrub delays', self.ScrubDelays, enabled_check_func = self._CanScrubDelays ) - subscriptions_panel.AddButton( 'check queries now', self.CheckNow, enabled_check_func = self._CanCheckNow ) - - subscriptions_panel.AddButton( 'reset', self.Reset, enabled_check_func = self._CanReset ) - - subscriptions_panel.NewButtonRow() - - subscriptions_panel.AddButton( 'select subscriptions', self.SelectSubscriptions ) - subscriptions_panel.AddButton( 'overwrite checker timings', self.SetCheckerOptions, enabled_only_on_selection = True ) - subscriptions_panel.AddButton( 'overwrite tag import options', self.SetTagImportOptions, enabled_only_on_selection = True ) - - # - - self._subscriptions.AddDatas( subscriptions ) - - self._subscriptions.Sort( 0 ) - - # - - vbox = QP.VBoxLayout() - - QP.AddToLayout( vbox, help_hbox, CC.FLAGS_BUTTON_SIZER ) - - message = 'Subscriptions do not work well if they get too large! If any sub has >200,000 items, separate it into smaller pieces immediately!' - - st = ClientGUICommon.BetterStaticText( self, message ) - st.setObjectName( 'HydrusWarning' ) - - QP.AddToLayout( vbox, st, CC.FLAGS_EXPAND_PERPENDICULAR ) - - if subs_are_globally_paused: - - message = 'SUBSCRIPTIONS ARE CURRENTLY GLOBALLY PAUSED! CHECK THE NETWORK MENU TO UNPAUSE THEM.' - - st = ClientGUICommon.BetterStaticText( self, message ) - st.setObjectName( 'HydrusWarning' ) - - QP.AddToLayout( vbox, st, CC.FLAGS_EXPAND_PERPENDICULAR ) - - - QP.AddToLayout( vbox, subscriptions_panel, CC.FLAGS_EXPAND_BOTH_WAYS ) - - self.widget().setLayout( vbox ) - - - def _AddSubscription( self, subscription ): - - subscription.SetNonDupeName( self._GetExistingNames() ) - - self._subscriptions.AddDatas( ( subscription, ) ) - - - def _CanCheckNow( self ): - - subscriptions = self._subscriptions.GetData( only_selected = True ) - - return True in ( subscription.CanCheckNow() for subscription in subscriptions ) - - - def _CanMerge( self ): - - subscriptions = self._subscriptions.GetData( only_selected = True ) - - # only subs with queries can be merged - - mergeable_subscriptions = [ subscription for subscription in subscriptions if len( subscription.GetQueries() ) > 0 ] - - unique_gug_names = { subscription.GetGUGKeyAndName()[1] for subscription in mergeable_subscriptions } - - # if there are fewer, there must be dupes, so we must be able to merge - - return len( unique_gug_names ) < len( subscriptions ) - - - def _CanReset( self ): - - subscriptions = self._subscriptions.GetData( only_selected = True ) - - return True in ( subscription.CanReset() for subscription in subscriptions ) - - - def _CanRetryFailures( self ): - - subscriptions = self._subscriptions.GetData( only_selected = True ) - - return True in ( subscription.CanRetryFailures() for subscription in subscriptions ) - - - def _CanRetryIgnored( self ): - - subscriptions = self._subscriptions.GetData( only_selected = True ) - - return True in ( subscription.CanRetryIgnored() for subscription in subscriptions ) - - - def _CanScrubDelays( self ): - - subscriptions = self._subscriptions.GetData( only_selected = True ) - - return True in ( subscription.CanScrubDelay() for subscription in subscriptions ) - - - def _CanSeparate( self ): - - subscriptions = self._subscriptions.GetData( only_selected = True ) - - if len( subscriptions ) != 1: - - return False - - - subscription = subscriptions[0] - - if len( subscription.GetQueries() ) > 1: - - return True - - - return False - - - def _ConvertSubscriptionToListCtrlTuples( self, subscription ): - - ( name, gug_key_and_name, queries, checker_options, initial_file_limit, periodic_file_limit, paused, file_import_options, tag_import_options, no_work_until, no_work_until_reason ) = subscription.ToTuple() - - pretty_site = gug_key_and_name[1] - - period = 100 - pretty_period = 'fix this' - - if len( queries ) > 0: - - last_new_file_time = max( ( query.GetLatestAddedTime() for query in queries ) ) - - last_checked = max( ( query.GetLastChecked() for query in queries ) ) - - - else: - - last_new_file_time = 0 - - last_checked = 0 - - - if last_new_file_time is None or last_new_file_time == 0: - - pretty_last_new_file_time = 'n/a' - - else: - - pretty_last_new_file_time = HydrusData.TimestampToPrettyTimeDelta( last_new_file_time ) - - - if last_checked is None or last_checked == 0: - - pretty_last_checked = 'n/a' - - else: - - pretty_last_checked = HydrusData.TimestampToPrettyTimeDelta( last_checked ) - - - # - - num_queries = len( queries ) - num_dead = 0 - num_paused = 0 - - for query in queries: - - if query.IsDead(): - - num_dead += 1 - - elif query.IsPaused(): - - num_paused += 1 - - - - num_ok = num_queries - ( num_dead + num_paused ) - - status = ( num_queries, num_paused, num_dead ) - - if num_queries == 0: - - pretty_status = 'no queries' - - else: - - status_components = [ HydrusData.ToHumanInt( num_ok ) + ' working' ] - - if num_paused > 0: - - status_components.append( HydrusData.ToHumanInt( num_paused ) + ' paused' ) - - - if num_dead > 0: - - status_components.append( HydrusData.ToHumanInt( num_dead ) + ' dead' ) - - - pretty_status = ', '.join( status_components ) - - - # - - if HydrusData.TimeHasPassed( no_work_until ): - - try: - - ( min_estimate, max_estimate ) = subscription.GetBandwidthWaitingEstimateMinMax() - - if max_estimate == 0: # don't seem to be any delays of any kind - - pretty_delay = '' - delay = 0 - - elif min_estimate == 0: # some are good to go, but there are delays - - pretty_delay = 'bandwidth: some ok, some up to ' + HydrusData.TimeDeltaToPrettyTimeDelta( max_estimate ) - delay = max_estimate - - else: - - if min_estimate == max_estimate: # probably just one query, and it is delayed - - pretty_delay = 'bandwidth: up to ' + HydrusData.TimeDeltaToPrettyTimeDelta( max_estimate ) - delay = max_estimate - - else: - - pretty_delay = 'bandwidth: from ' + HydrusData.TimeDeltaToPrettyTimeDelta( min_estimate ) + ' to ' + HydrusData.TimeDeltaToPrettyTimeDelta( max_estimate ) - delay = max_estimate - - - - except: - - pretty_delay = 'could not determine bandwidth, there may be an error with the sub or its urls' - delay = 0 - - - else: - - pretty_delay = 'delayed--retrying ' + HydrusData.TimestampToPrettyTimeDelta( no_work_until, just_now_threshold = 0 ) + ' - because: ' + no_work_until_reason - delay = HydrusData.GetTimeDeltaUntilTime( no_work_until ) - - - file_seed_caches = [ query.GetFileSeedCache() for query in queries ] - - ( queries_status, queries_simple_status, ( num_done, num_total ) ) = ClientImportFileSeeds.GenerateFileSeedCachesStatus( file_seed_caches ) - - items = ( num_total, num_done ) - - pretty_items = queries_simple_status - - if paused: - - pretty_paused = 'yes' - - else: - - pretty_paused = '' - - - sort_last_new_file_time = ClientGUIListCtrl.SafeNoneInt( last_new_file_time ) - sort_last_checked = ClientGUIListCtrl.SafeNoneInt( last_checked ) - - display_tuple = ( name, pretty_site, pretty_status, pretty_last_new_file_time, pretty_last_checked, pretty_delay, pretty_items, pretty_paused ) - sort_tuple = ( name, pretty_site, status, sort_last_new_file_time, sort_last_checked, delay, items, paused ) - - return ( display_tuple, sort_tuple ) - - - def _GetExistingNames( self ): - - subscriptions = self._subscriptions.GetData() - - names = { subscription.GetName() for subscription in subscriptions } - - return names - - - def _GetExportObject( self ): - - to_export = HydrusSerialisable.SerialisableList() - - for subscription in self._subscriptions.GetData( only_selected = True ): - - to_export.append( subscription ) - - - if len( to_export ) == 0: - - return None - - elif len( to_export ) == 1: - - return to_export[0] - - else: - - return to_export - - - - def _ImportObject( self, obj ): - - if isinstance( obj, HydrusSerialisable.SerialisableList ): - - for sub_obj in obj: - - self._ImportObject( sub_obj ) - - - else: - - if isinstance( obj, ClientImportSubscriptions.Subscription ): - - subscription = obj - - subscription.SetNonDupeName( self._GetExistingNames() ) - - self._subscriptions.AddDatas( ( subscription, ) ) - - else: - - QW.QMessageBox.warning( self, 'Warning', 'That was not a subscription--it was a: '+type(obj).__name__ ) - - - - - def Add( self ): - - gug_key_and_name = HG.client_controller.network_engine.domain_manager.GetDefaultGUGKeyAndName() - - empty_subscription = ClientImportSubscriptions.Subscription( 'new subscription', gug_key_and_name = gug_key_and_name ) - - frame_key = 'edit_subscription_dialog' - - with ClientGUITopLevelWindowsPanels.DialogEdit( self, 'edit subscription', frame_key ) as dlg_edit: - - panel = EditSubscriptionPanel( dlg_edit, empty_subscription ) - - dlg_edit.SetPanel( panel ) - - if dlg_edit.exec() == QW.QDialog.Accepted: - - new_subscription = panel.GetValue() - - self._AddSubscription( new_subscription ) - - self._subscriptions.Sort() - - - - - def CheckNow( self ): - - subscriptions = self._subscriptions.GetData( only_selected = True ) - - for subscription in subscriptions: - - subscription.CheckNow() - - - self._subscriptions.UpdateDatas( subscriptions ) - - - def Edit( self ): - - subs_to_edit = self._subscriptions.GetData( only_selected = True ) - - for subscription in subs_to_edit: - - frame_key = 'edit_subscription_dialog' - - with ClientGUITopLevelWindowsPanels.DialogEdit( self, 'edit subscription', frame_key ) as dlg: - - original_name = subscription.GetName() - - panel = EditSubscriptionPanel( dlg, subscription ) - - dlg.SetPanel( panel ) - - result = dlg.exec() - - if result == QW.QDialog.Accepted: - - self._subscriptions.DeleteDatas( ( subscription, ) ) - - edited_subscription = panel.GetValue() - - edited_subscription.SetNonDupeName( self._GetExistingNames() ) - - self._subscriptions.AddDatas( ( edited_subscription, ) ) - - elif dlg.WasCancelled(): - - break - - - - - self._subscriptions.Sort() - - - def GetValue( self ) -> typing.List[ ClientImportSubscriptions.Subscription ]: - - subscriptions = self._subscriptions.GetData() - - return subscriptions - - - def Merge( self ): - - message = 'Are you sure you want to merge the selected subscriptions? This will combine all selected subscriptions that share the same downloader, wrapping all their different queries into one subscription.' - message += os.linesep * 2 - message += 'This is a big operation, so if it does not do what you expect, hit cancel afterwards!' - message += os.linesep * 2 - message += 'Please note that all other subscription settings settings (like paused status and file limits and tag options) will be merged as well, so double-check your merged subs\' settings afterwards.' - - result = ClientGUIDialogsQuick.GetYesNo( self, message ) - - if result == QW.QDialog.Accepted: - - original_subs = self._subscriptions.GetData( only_selected = True ) - - potential_mergees = [ sub.Duplicate() for sub in original_subs ] - - mergeable_groups = [] - merged_subs = [] - unmergeable_subs = [] - - while len( potential_mergees ) > 0: - - potential_primary = potential_mergees.pop() - - ( mergeables_with_our_primary, not_mergeable_with_our_primary ) = potential_primary.GetMergeable( potential_mergees ) - - if len( mergeables_with_our_primary ) > 0: - - mergeable_group = [] - - mergeable_group.append( potential_primary ) - mergeable_group.extend( mergeables_with_our_primary ) - - mergeable_groups.append( mergeable_group ) - - else: - - unmergeable_subs.append( potential_primary ) - - - potential_mergees = not_mergeable_with_our_primary - - - if len( mergeable_groups ) == 0: - - QW.QMessageBox.information( self, 'Information', 'Unfortunately, none of those subscriptions appear to be mergeable!' ) - - return - - - for mergeable_group in mergeable_groups: - - mergeable_group.sort( key = lambda sub: sub.GetName() ) - - choice_tuples = [ ( sub.GetName(), sub ) for sub in mergeable_group ] - - try: - - primary_sub = ClientGUIDialogsQuick.SelectFromList( self, 'select the primary subscription--into which to merge the others', choice_tuples ) - - except HydrusExceptions.CancelledException: - - return - - - mergeable_group.remove( primary_sub ) - - primary_sub.Merge( mergeable_group ) - - primary_sub_name = primary_sub.GetName() - - message = primary_sub_name + ' was able to merge ' + HydrusData.ToHumanInt( len( mergeable_group ) ) + ' other subscriptions. If you wish to change its name, do so here.' - - with ClientGUIDialogs.DialogTextEntry( self, message, default = primary_sub_name ) as dlg: - - if dlg.exec() == QW.QDialog.Accepted: - - name = dlg.GetValue() - - primary_sub.SetName( name ) - - - # don't care about a cancel here--we'll take that as 'I didn't want to change its name', not 'abort' - - - merged_subs.append( primary_sub ) - - - # we are ready to do it - - self._subscriptions.DeleteDatas( original_subs ) - - self._subscriptions.AddDatas( unmergeable_subs ) - - for merged_sub in merged_subs: - - merged_sub.SetNonDupeName( self._GetExistingNames() ) - - self._subscriptions.AddDatas( ( merged_sub, ) ) - - - self._subscriptions.Sort() - - - - def PauseResume( self ): - - subscriptions = self._subscriptions.GetData( only_selected = True ) - - for subscription in subscriptions: - - subscription.PauseResume() - - - self._subscriptions.UpdateDatas( subscriptions ) - - - def Reset( self ): - - message = 'Resetting these subscriptions will delete all their remembered urls, meaning when they next run, they will try to download them all over again. This may be expensive in time and data. Only do it if you are willing to wait. Do you want to do it?' - - result = ClientGUIDialogsQuick.GetYesNo( self, message ) - - if result == QW.QDialog.Accepted: - - subscriptions = self._subscriptions.GetData( only_selected = True ) - - for subscription in subscriptions: - - subscription.Reset() - - - self._subscriptions.UpdateDatas( subscriptions ) - - - - def RetryFailures( self ): - - subscriptions = self._subscriptions.GetData( only_selected = True ) - - for subscription in subscriptions: - - subscription.RetryFailures() - - - self._subscriptions.UpdateDatas( subscriptions ) - - - def RetryIgnored( self ): - - subscriptions = self._subscriptions.GetData( only_selected = True ) - - for subscription in subscriptions: - - subscription.RetryIgnored() - - - self._subscriptions.UpdateDatas( subscriptions ) - - - def ScrubDelays( self ): - - subscriptions = self._subscriptions.GetData( only_selected = True ) - - for subscription in subscriptions: - - subscription.ScrubDelay() - - - self._subscriptions.UpdateDatas( subscriptions ) - - - def SelectSubscriptions( self ): - - message = 'This selects subscriptions based on query text. Please enter some search text, and any subscription that has a query that includes that text will be selected.' - - with ClientGUIDialogs.DialogTextEntry( self, message ) as dlg: - - if dlg.exec() == QW.QDialog.Accepted: - - search_text = dlg.GetValue() - - self._subscriptions.clearSelection() - - selectee_subscriptions = [] - - for subscription in self._subscriptions.GetData(): - - if subscription.HasQuerySearchTextFragment( search_text ): - - selectee_subscriptions.append( subscription ) - - - - self._subscriptions.SelectDatas( selectee_subscriptions ) - - - - - def Separate( self ): - - subscriptions = self._subscriptions.GetData( only_selected = True ) - - if len( subscriptions ) != 1: - - QW.QMessageBox.critical( self, 'Error', 'Separate only works if one subscription is selected!' ) - - return - - - subscription = subscriptions[0] - - num_queries = len( subscription.GetQueries() ) - - if num_queries <= 1: - - QW.QMessageBox.critical( self, 'Error', 'Separate only works if the selected subscription has more than one query!' ) - - return - - - if num_queries > 100: - - message = 'This is a large subscription. It is difficult to separate it on a per-query basis, so instead the system will automatically cut it into two halves. Is this ok?' - - result = ClientGUIDialogsQuick.GetYesNo( self, message ) - - if result != QW.QDialog.Accepted: - - return - - - action = 'half' - - elif num_queries > 2: - - message = 'Are you sure you want to separate the selected subscriptions? Separating breaks merged subscriptions apart into smaller pieces.' - yes_tuples = [ ( 'break it in half', 'half' ), ( 'break it all into single-query subscriptions', 'whole' ), ( 'only extract some of the subscription', 'part' ) ] - - with ClientGUIDialogs.DialogYesYesNo( self, message, yes_tuples = yes_tuples, no_label = 'forget it' ) as dlg: - - if dlg.exec() == QW.QDialog.Accepted: - - action = dlg.GetValue() - - else: - - return - - - - else: - - action = 'whole' - - - want_post_merge = False - - if action == 'part': - - queries = subscription.GetQueries() - - choice_tuples = [ ( query.GetHumanName(), query, False ) for query in queries ] - - with ClientGUITopLevelWindowsPanels.DialogEdit( self, 'select the queries to extract' ) as dlg: - - panel = EditChooseMultiple( dlg, choice_tuples ) - - dlg.SetPanel( panel ) - - if dlg.exec() == QW.QDialog.Accepted: - - queries_to_extract = panel.GetValue() - - else: - - return - - - - if len( queries_to_extract ) == num_queries: # the madman selected them all - - action = 'whole' - - elif len( queries_to_extract ) > 1: - - yes_tuples = [ ( 'one new merged subscription', True ), ( 'many subscriptions with only one query', False ) ] - - message = 'Do you want the extracted queries to be a new merged subscription, or many subscriptions with only one query?' - - with ClientGUIDialogs.DialogYesYesNo( self, message, yes_tuples = yes_tuples, no_label = 'forget it' ) as dlg: - - if dlg.exec() == QW.QDialog.Accepted: - - want_post_merge = dlg.GetValue() - - else: - - return - - - - - - if action != 'half': - - if want_post_merge: - - message = 'Please enter the name for the new subscription.' - - else: - - message = 'Please enter the base name for the new subscriptions. They will be named \'[NAME]: query\'.' - - - with ClientGUIDialogs.DialogTextEntry( self, message, default = subscription.GetName() ) as dlg: - - if dlg.exec() == QW.QDialog.Accepted: - - name = dlg.GetValue() - - else: - - return - - - - - # ok, let's do it - - final_subscriptions = [] - - self._subscriptions.DeleteDatas( ( subscription, ) ) - - if action == 'whole': - - final_subscriptions.extend( subscription.Separate( name ) ) - - elif action == 'part': - - extracted_subscriptions = list( subscription.Separate( name, queries_to_extract ) ) - - if want_post_merge: - - # it is ok to do a blind merge here since they all share the same settings and will get a new name - - primary_sub = extracted_subscriptions.pop() - - primary_sub.Merge( extracted_subscriptions ) - - primary_sub.SetName( name ) - - final_subscriptions.append( primary_sub ) - - else: - - final_subscriptions.extend( extracted_subscriptions ) - - - final_subscriptions.append( subscription ) - - elif action == 'half': - - queries = subscription.GetQueries() - - queries_to_extract = queries[ : len( queries ) // 2 ] - - name = subscription.GetName() - - extracted_subscriptions = list( subscription.Separate( name, queries_to_extract ) ) - - primary_sub = extracted_subscriptions.pop() - - primary_sub.Merge( extracted_subscriptions ) - - primary_sub.SetName( '{} (A)'.format( name ) ) - subscription.SetName( '{} (B)'.format( name ) ) - - final_subscriptions.append( primary_sub ) - final_subscriptions.append( subscription ) - - - for final_subscription in final_subscriptions: - - final_subscription.SetNonDupeName( self._GetExistingNames() ) - - self._subscriptions.AddDatas( ( final_subscription, ) ) - - - self._subscriptions.Sort() - - - def SetCheckerOptions( self ): - - subscriptions = self._subscriptions.GetData( only_selected = True ) - - if len( subscriptions ) == 0: - - return - - - checker_options = subscriptions[0].GetCheckerOptions() - - with ClientGUITopLevelWindowsPanels.DialogEdit( self, 'edit check timings' ) as dlg: - - panel = ClientGUITime.EditCheckerOptions( dlg, checker_options ) - - dlg.SetPanel( panel ) - - if dlg.exec() == QW.QDialog.Accepted: - - checker_options = panel.GetValue() - - for subscription in subscriptions: - - subscription.SetCheckerOptions( checker_options ) - - - self._subscriptions.UpdateDatas( subscriptions ) - - - - - def SetTagImportOptions( self ): - - subscriptions = self._subscriptions.GetData( only_selected = True ) - - if len( subscriptions ) == 0: - - return - - - tag_import_options = subscriptions[0].GetTagImportOptions() - show_downloader_options = True - - with ClientGUITopLevelWindowsPanels.DialogEdit( self, 'edit tag import options' ) as dlg: - - panel = EditTagImportOptionsPanel( dlg, tag_import_options, show_downloader_options, allow_default_selection = True ) - - dlg.SetPanel( panel ) - - if dlg.exec() == QW.QDialog.Accepted: - - tag_import_options = panel.GetValue() - - for subscription in subscriptions: - - subscription.SetTagImportOptions( tag_import_options ) - - - self._subscriptions.UpdateDatas( subscriptions ) - - - - class EditTagImportOptionsPanel( ClientGUIScrolledPanels.EditPanel ): def __init__( self, parent: QW.QWidget, tag_import_options: ClientImportOptions.TagImportOptions, show_downloader_options: bool, allow_default_selection: bool = False ): @@ -5588,15 +3788,15 @@ class EditTagImportOptionsPanel( ClientGUIScrolledPanels.EditPanel ): tag_blacklist = tag_import_options.GetTagBlacklist() - message = 'Any tag that this filter _excludes_ will be considered a blacklisted tag and will stop the file importing. So if you only want to stop \'scat\' or \'gore\', just add them to the simple blacklist and hit ok.' + message = 'If a file about to be downloaded has a tag on the site that this blacklist blocks, the file will not be downloaded and imported. If you want to stop \'scat\' or \'gore\', just type them into the list.' message += os.linesep * 2 - message += 'This system tests the tags that are parsed from the site, as hydrus would end up getting them. Siblings of all the tags will also be tested. If you do not have excellent siblings, it is worth adding multiple versions of your tag, just to catch different sites terms. Add \'gore\', \'guro\', \'violence\', etc...' + message += 'This system tests the all tags that are parsed from the site, not any other tags the files may have in different places. Siblings of all those tags will also be tested. If none of your tag services have excellent siblings, it is worth adding multiple versions of your tag, just to catch different sites terms. Link up \'gore\', \'guro\', \'violence\', etc...' message += os.linesep * 2 - message += 'Additionally, for blacklists, unnamespaced rules will apply to namespaced tags. \'metroid\' in the blacklist will catch \'series:metroid\' as parsed from a site.' + message += 'Additionally, unnamespaced rules will apply to namespaced tags. \'metroid\' in the blacklist will catch \'series:metroid\' as parsed from a site.' message += os.linesep * 2 message += 'It is worth doing a small test here, just to make sure it is all set up how you want.' - self._tag_filter_button = ClientGUITags.TagFilterButton( downloader_options_panel, message, tag_blacklist, is_blacklist = True ) + self._tag_filter_button = ClientGUITags.TagFilterButton( downloader_options_panel, message, tag_blacklist, only_show_blacklist = True ) self._services_vbox = QP.VBoxLayout() diff --git a/hydrus/client/gui/ClientGUIShortcuts.py b/hydrus/client/gui/ClientGUIShortcuts.py index 09aeacb0..9cb4b861 100644 --- a/hydrus/client/gui/ClientGUIShortcuts.py +++ b/hydrus/client/gui/ClientGUIShortcuts.py @@ -214,7 +214,7 @@ shortcut_names_to_descriptions[ 'preview_media_window' ] = 'Actions for any vide SHORTCUTS_RESERVED_NAMES = [ 'global', 'archive_delete_filter', 'duplicate_filter', 'media', 'main_gui', 'media_viewer_browser', 'media_viewer', 'media_viewer_media_window', 'preview_media_window' ] SHORTCUTS_GLOBAL_ACTIONS = [ 'global_audio_mute', 'global_audio_unmute', 'global_audio_mute_flip', 'exit_application', 'exit_application_force_maintenance', 'restart_application', 'hide_to_system_tray' ] -SHORTCUTS_MEDIA_ACTIONS = [ 'manage_file_tags', 'manage_file_ratings', 'manage_file_urls', 'manage_file_notes', 'archive_file', 'inbox_file', 'delete_file', 'undelete_file', 'export_files', 'export_files_quick_auto_export', 'remove_file_from_view', 'open_file_in_external_program', 'open_selection_in_new_page', 'launch_the_archive_delete_filter', 'copy_bmp', 'copy_file', 'copy_path', 'copy_sha256_hash', 'get_similar_to_exact', 'get_similar_to_very_similar', 'get_similar_to_similar', 'get_similar_to_speculative', 'duplicate_media_set_alternate', 'duplicate_media_set_alternate_collections', 'duplicate_media_set_custom', 'duplicate_media_set_focused_better', 'duplicate_media_set_focused_king', 'duplicate_media_set_same_quality', 'open_known_url' ] +SHORTCUTS_MEDIA_ACTIONS = [ 'manage_file_tags', 'manage_file_ratings', 'manage_file_urls', 'manage_file_notes', 'archive_file', 'inbox_file', 'delete_file', 'undelete_file', 'export_files', 'export_files_quick_auto_export', 'remove_file_from_view', 'open_file_in_external_program', 'open_selection_in_new_page', 'launch_the_archive_delete_filter', 'copy_bmp', 'copy_bmp_or_file_if_not_bmpable', 'copy_file', 'copy_path', 'copy_sha256_hash', 'get_similar_to_exact', 'get_similar_to_very_similar', 'get_similar_to_similar', 'get_similar_to_speculative', 'duplicate_media_set_alternate', 'duplicate_media_set_alternate_collections', 'duplicate_media_set_custom', 'duplicate_media_set_focused_better', 'duplicate_media_set_focused_king', 'duplicate_media_set_same_quality', 'open_known_url' ] SHORTCUTS_MEDIA_VIEWER_ACTIONS = [ 'pause_media', 'pause_play_media', 'move_animation_to_previous_frame', 'move_animation_to_next_frame', 'switch_between_fullscreen_borderless_and_regular_framed_window', 'pan_up', 'pan_down', 'pan_left', 'pan_right', 'pan_top_edge', 'pan_bottom_edge', 'pan_left_edge', 'pan_right_edge', 'pan_vertical_center', 'pan_horizontal_center', 'zoom_in', 'zoom_out', 'switch_between_100_percent_and_canvas_zoom', 'flip_darkmode', 'close_media_viewer' ] SHORTCUTS_MEDIA_VIEWER_BROWSER_ACTIONS = [ 'view_next', 'view_first', 'view_last', 'view_previous', 'pause_play_slideshow', 'show_menu', 'close_media_viewer' ] SHORTCUTS_MAIN_GUI_ACTIONS = [ 'refresh', 'refresh_all_pages', 'refresh_page_of_pages_pages', 'new_page', 'new_page_of_pages', 'new_duplicate_filter_page', 'new_gallery_downloader_page', 'new_url_downloader_page', 'new_simple_downloader_page', 'new_watcher_downloader_page', 'synchronised_wait_switch', 'set_media_focus', 'show_hide_splitters', 'set_search_focus', 'unclose_page', 'close_page', 'redo', 'undo', 'flip_darkmode', 'check_all_import_folders', 'flip_debug_force_idle_mode_do_not_set_this', 'show_and_focus_manage_tags_favourite_tags', 'show_and_focus_manage_tags_related_tags', 'show_and_focus_manage_tags_file_lookup_script_tags', 'show_and_focus_manage_tags_recent_tags', 'focus_media_viewer' ] diff --git a/hydrus/client/gui/ClientGUISubscriptions.py b/hydrus/client/gui/ClientGUISubscriptions.py new file mode 100644 index 00000000..d209edac --- /dev/null +++ b/hydrus/client/gui/ClientGUISubscriptions.py @@ -0,0 +1,2196 @@ +import os +import typing + +from qtpy import QtCore as QC +from qtpy import QtWidgets as QW +from qtpy import QtGui as QG + +from hydrus.core import HydrusConstants as HC +from hydrus.core import HydrusData +from hydrus.core import HydrusExceptions +from hydrus.core import HydrusGlobals as HG +from hydrus.core import HydrusSerialisable +from hydrus.core import HydrusText +from hydrus.client import ClientConstants as CC +from hydrus.client import ClientPaths +from hydrus.client.gui import ClientGUICommon +from hydrus.client.gui import ClientGUIDialogs +from hydrus.client.gui import ClientGUIDialogsQuick +from hydrus.client.gui import ClientGUIFunctions +from hydrus.client.gui import ClientGUIImport +from hydrus.client.gui import ClientGUIListCtrl +from hydrus.client.gui import ClientGUIScrolledPanels +from hydrus.client.gui import ClientGUIFileSeedCache +from hydrus.client.gui import ClientGUIGallerySeedLog +from hydrus.client.gui import ClientGUIScrolledPanelsEdit +from hydrus.client.gui import ClientGUITime +from hydrus.client.gui import ClientGUITopLevelWindowsPanels +from hydrus.client.gui import QtPorting as QP +from hydrus.client.importing import ClientImporting +from hydrus.client.importing import ClientImportFileSeeds +from hydrus.client.importing import ClientImportSubscriptions +from hydrus.client.importing import ClientImportSubscriptionQuery +from hydrus.client.importing import ClientImportSubscriptionLegacy # keep this here so the serialisable stuff is registered, it has to be imported somewhere + +def AsyncGetQueryHeadersQualityInfo( win: QW.QWidget, query_headers: typing.Iterable[ ClientImportSubscriptionQuery.SubscriptionQueryHeader ], call ): + + data = [] + + for query_header in query_headers: + + try: + + query_log_container = HG.client_controller.Read( 'serialisable_named', HydrusSerialisable.SERIALISABLE_TYPE_SUBSCRIPTION_QUERY_LOG_CONTAINER, query_header.GetQueryLogContainerName() ) + + except HydrusExceptions.DataMissing: + + continue + + + fsc = query_log_container.GetFileSeedCache() + + hashes = fsc.GetHashes() + + media_results = HG.client_controller.Read( 'media_results', hashes ) + + num_inbox = 0 + num_archived = 0 + num_deleted = 0 + + for media_result in media_results: + + lm = media_result.GetLocationsManager() + + if lm.IsLocal() and not lm.IsTrashed(): + + if media_result.GetInbox(): + + num_inbox += 1 + + else: + + num_archived += 1 + + + else: + + num_deleted += 1 + + + + data.append( ( query_header.GetHumanName(), num_inbox, num_archived, num_deleted ) ) + + + try: + + HG.client_controller.CallBlockingToQt( win, call, data ) + + except HydrusExceptions.QtDeadWindowException: + + pass + + +def AsyncGetQueryLogContainers( win: QW.QWidget, query_headers: typing.Iterable[ ClientImportSubscriptionQuery.SubscriptionQueryHeader ], receiving_call, action_call ): + + query_log_containers = [] + + for query_header in query_headers: + + try: + + query_log_container = HG.client_controller.Read( 'serialisable_named', HydrusSerialisable.SERIALISABLE_TYPE_SUBSCRIPTION_QUERY_LOG_CONTAINER, query_header.GetQueryLogContainerName() ) + + except HydrusExceptions.DataMissing: + + continue + + + query_log_containers.append( query_log_container ) + + + try: + + HG.client_controller.CallBlockingToQt( win, receiving_call, query_log_containers, action_call ) + + except HydrusExceptions.QtDeadWindowException: + + pass + + +class EditSubscriptionPanel( ClientGUIScrolledPanels.EditPanel ): + + def __init__( self, parent: QW.QWidget, subscription: ClientImportSubscriptions.Subscription, names_to_edited_query_log_containers: typing.Mapping[ str, ClientImportSubscriptionQuery.SubscriptionQueryLogContainer ] ): + + subscription = subscription.Duplicate() + + ClientGUIScrolledPanels.EditPanel.__init__( self, parent ) + + self._original_subscription = subscription + self._names_to_edited_query_log_containers = dict( names_to_edited_query_log_containers ) + + # + + self._name = QW.QLineEdit( self ) + self._delay_st = ClientGUICommon.BetterStaticText( self ) + + # + + ( name, gug_key_and_name, query_headers, checker_options, initial_file_limit, periodic_file_limit, paused, file_import_options, tag_import_options, self._no_work_until, self._no_work_until_reason ) = subscription.ToTuple() + + self._query_panel = ClientGUICommon.StaticBox( self, 'site and queries' ) + + self._gug_key_and_name = ClientGUIImport.GUGKeyAndNameSelector( self._query_panel, gug_key_and_name ) + + queries_panel = ClientGUIListCtrl.BetterListCtrlPanel( self._query_panel ) + + columns = [ ( 'name/query', 20 ), ( 'paused', 8 ), ( 'status', 8 ), ( 'last new file time', 20 ), ( 'last check time', 20 ), ( 'next check time', 20 ), ( 'file velocity', 20 ), ( 'recent delays', 20 ), ( 'items', 13 ) ] + + self._query_headers = ClientGUIListCtrl.BetterListCtrl( queries_panel, 'subscription_queries', 10, 20, columns, self._ConvertQueryHeaderToListCtrlTuples, use_simple_delete = True, activation_callback = self._EditQuery ) + + queries_panel.SetListCtrl( self._query_headers ) + + queries_panel.AddButton( 'add', self._AddQuery ) + queries_panel.AddButton( 'copy queries', self._CopyQueries, enabled_only_on_selection = True ) + queries_panel.AddButton( 'paste queries', self._PasteQueries ) + queries_panel.AddButton( 'edit', self._EditQuery, enabled_only_on_selection = True ) + queries_panel.AddDeleteButton() + queries_panel.AddSeparator() + queries_panel.AddButton( 'pause/play', self._PausePlay, enabled_only_on_selection = True ) + queries_panel.AddButton( 'retry failed', self._STARTRetryFailed, enabled_check_func = self._ListCtrlCanRetryFailed ) + queries_panel.AddButton( 'retry ignored', self._STARTRetryIgnored, enabled_check_func = self._ListCtrlCanRetryIgnored ) + queries_panel.AddButton( 'check now', self._CheckNow, enabled_check_func = self._ListCtrlCanCheckNow ) + queries_panel.AddButton( 'reset', self._STARTReset, enabled_check_func = self._ListCtrlCanResetCache ) + + if HG.client_controller.new_options.GetBoolean( 'advanced_mode' ): + + queries_panel.AddSeparator() + + menu_items = [] + + menu_items.append( ( 'normal', 'show', 'Show quality info.', self._STARTShowQualityInfo ) ) + menu_items.append( ( 'normal', 'copy csv data to clipboard', 'Copy quality info to clipboard.', self._STARTCopyQualityInfo ) ) + + queries_panel.AddMenuButton( 'quality info', menu_items, enabled_only_on_selection = True ) + + + # + + self._file_limits_panel = ClientGUICommon.StaticBox( self, 'file limits' ) + + message = '''****Subscriptions are not for large one-time syncs**** + +tl;dr: Do not change the checker options or file limits until you really know what you are doing. The limits are now only 1000 (10000 in advanced mode) anyway, but you should leave them at 100/100. + +A subscription will start at a site's newest files and keep searching further and further back into the past. It will stop naturally if it reaches the end of results or starts to see files it saw in a previous check (and so assumes it has 'caught up' to where it was before). It will stop 'artificially' if it finds enough new files to hit the file limits here. + +Unless you have a very special reason, it is important to keep these file limit numbers low. Being automated, subscriptions typically run when you are not looking at the client, and if they go wrong, it is good to have some brakes to stop them going very wrong. + +First of all, making sure you only get a few dozen or hundred on the first check means you do not spend twenty minutes fetching all the search's thousands of file URLs that you may well have previously downloaded, but it is even more important for regular checks, where the sub is trying to find where it got to before: if a site changes its URL format (say from artistname.deviantart.com to deviantart.com/artistname) or changes its markup or otherwise starts delivering unusual results, the subscription may not realise it is seeing the wrong urls and will keep syncing until it hits its regular limit. If the periodic limit is 100, this is no big deal--you'll likely get a popup message out of it and might need to update the respective downloader--but if it were 60000 (or infinite, and the site were somehow serving you random/full results!), you could run into a huge problem completely by accident. + +Subscription sync searches are somewhat 'fragile' (they cannot pause/resume the gallery pagewalk, only completely cancel), so it is best if they are short--say, no more than five pages. It is better for a sub to pick up a small number of new files every few weeks than trying to catch up in a giant rush once a year. + +If you are not experienced with subscriptions, I strongly suggest you set these to something like 100 for the first check and 100 thereafter, which is likely your default. This works great for typical artist and character queries. + +If you want to get all of an artist's files from a site, use the manual gallery download page first. A good routine is to check that you have the right search text and it all works correctly and that you know what tags you want, and then once that big queue is fully downloaded synced, start a new sub with the same settings to continue checking for anything posted in future.''' + + help_button = ClientGUICommon.BetterBitmapButton( self._file_limits_panel, CC.global_pixmaps().help, QW.QMessageBox.information, None, 'Information', message ) + + help_hbox_1 = ClientGUICommon.WrapInText( help_button, self._file_limits_panel, 'help about file limits -->', QG.QColor( 0, 0, 255 ) ) + + message = '''****Hitting the normal/periodic limit may or may not be a big deal**** + +If one of your subscriptions hits the file limit just doing a normal sync, you will get a little popup telling you. It is likely because of: + +1) The query has not run in a while, or many new files were suddenly posted, so the backlog of to-be-synced files has built up. + +2) The site has changed how it formats file post urls, so the subscription thinks it is seeing new files when it truly is not. + +If 1 is true, you might want to increase its periodic limit a little, or speed up its checking times, and fill in whatever gap of files you missing with a manual download page. + +But if 2 is--and is also perhaps accompanied by many 'could not parse' errors--the maintainer for the site's download parser (hydrus dev or whoever), would be interested in knowing what has happened so they can roll out a fix.'.''' + + help_button = ClientGUICommon.BetterBitmapButton( self._file_limits_panel, CC.global_pixmaps().help, QW.QMessageBox.information, None, 'Information', message ) + + help_hbox_2 = ClientGUICommon.WrapInText( help_button, self._file_limits_panel, 'help about hitting the normal file limit -->', QG.QColor( 0, 0, 255 ) ) + + if HG.client_controller.new_options.GetBoolean( 'advanced_mode' ): + + limits_max = 10000 + + else: + + limits_max = 1000 + + + self._initial_file_limit = QP.MakeQSpinBox( self._file_limits_panel, min=1, max=limits_max ) + self._initial_file_limit.setToolTip( 'The first sync will add no more than this many URLs.' ) + + self._periodic_file_limit = QP.MakeQSpinBox( self._file_limits_panel, min=1, max=limits_max ) + self._periodic_file_limit.setToolTip( 'Normal syncs will add no more than this many URLs, stopping early if they find several URLs the query has seen before.' ) + + self._file_presentation_panel = ClientGUICommon.StaticBox( self, 'presentation' ) + + self._show_a_popup_while_working = QW.QCheckBox( self._file_presentation_panel ) + self._show_a_popup_while_working.setToolTip( 'Careful with this! Leave it on to begin with, just in case it goes wrong!' ) + + self._publish_files_to_popup_button = QW.QCheckBox( self._file_presentation_panel ) + self._publish_files_to_page = QW.QCheckBox( self._file_presentation_panel ) + self._publish_label_override = ClientGUICommon.NoneableTextCtrl( self._file_presentation_panel, none_phrase = 'no, use subscription name' ) + self._merge_query_publish_events = QW.QCheckBox( self._file_presentation_panel ) + + tt = 'This is great to merge multiple subs to a combined location!' + + self._publish_label_override.setToolTip( tt ) + + tt = 'If unchecked, each query will produce its own \'subscription_name: query\' button or page.' + + self._merge_query_publish_events.setToolTip( tt ) + + # + + self._control_panel = ClientGUICommon.StaticBox( self, 'control' ) + + self._paused = QW.QCheckBox( self._control_panel ) + + # + + show_downloader_options = True + + self._checker_options = ClientGUIImport.CheckerOptionsButton( self, checker_options, update_callable = self._CheckerOptionsUpdated ) + self._file_import_options = ClientGUIImport.FileImportOptionsButton( self, file_import_options, show_downloader_options ) + self._tag_import_options = ClientGUIImport.TagImportOptionsButton( self, tag_import_options, show_downloader_options, allow_default_selection = True ) + + # + + self._name.setText( name ) + + self._query_headers.AddDatas( query_headers ) + + self._query_headers.Sort() + + self._initial_file_limit.setValue( initial_file_limit ) + self._periodic_file_limit.setValue( periodic_file_limit ) + + ( show_a_popup_while_working, publish_files_to_popup_button, publish_files_to_page, publish_label_override, merge_query_publish_events ) = subscription.GetPresentationOptions() + + self._show_a_popup_while_working.setChecked( show_a_popup_while_working ) + self._publish_files_to_popup_button.setChecked( publish_files_to_popup_button ) + self._publish_files_to_page.setChecked( publish_files_to_page ) + self._publish_label_override.SetValue( publish_label_override ) + self._merge_query_publish_events.setChecked( merge_query_publish_events ) + + self._paused.setChecked( paused ) + + # + + self._query_panel.Add( self._gug_key_and_name, CC.FLAGS_EXPAND_PERPENDICULAR ) + self._query_panel.Add( queries_panel, CC.FLAGS_EXPAND_BOTH_WAYS ) + + # + + rows = [] + + rows.append( ( 'on first check, get at most this many files: ', self._initial_file_limit ) ) + rows.append( ( 'on normal checks, get at most this many newer files: ', self._periodic_file_limit ) ) + + gridbox = ClientGUICommon.WrapInGrid( self._file_limits_panel, rows ) + + self._file_limits_panel.Add( help_hbox_1, CC.FLAGS_EXPAND_PERPENDICULAR ) + self._file_limits_panel.Add( help_hbox_2, CC.FLAGS_EXPAND_PERPENDICULAR ) + self._file_limits_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR ) + + # + + rows = [] + + rows.append( ( 'show a popup while working: ', self._show_a_popup_while_working ) ) + rows.append( ( 'publish new files to a popup button: ', self._publish_files_to_popup_button ) ) + rows.append( ( 'publish new files to a page: ', self._publish_files_to_page ) ) + rows.append( ( 'publish to a specific label: ', self._publish_label_override ) ) + rows.append( ( 'publish all queries to the same page/popup button: ', self._merge_query_publish_events ) ) + + gridbox = ClientGUICommon.WrapInGrid( self._file_presentation_panel, rows ) + + self._file_presentation_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR ) + + # + + rows = [] + + rows.append( ( 'currently paused: ', self._paused ) ) + + gridbox = ClientGUICommon.WrapInGrid( self._control_panel, rows ) + + self._control_panel.Add( gridbox, CC.FLAGS_LONE_BUTTON ) + + # + + vbox = QP.VBoxLayout() + + QP.AddToLayout( vbox, ClientGUICommon.WrapInText(self._name,self,'name: '), CC.FLAGS_EXPAND_SIZER_PERPENDICULAR ) + QP.AddToLayout( vbox, self._delay_st, CC.FLAGS_EXPAND_PERPENDICULAR ) + QP.AddToLayout( vbox, self._query_panel, CC.FLAGS_EXPAND_BOTH_WAYS ) + QP.AddToLayout( vbox, self._control_panel, CC.FLAGS_EXPAND_PERPENDICULAR ) + QP.AddToLayout( vbox, self._file_limits_panel, CC.FLAGS_EXPAND_PERPENDICULAR ) + QP.AddToLayout( vbox, self._file_presentation_panel, CC.FLAGS_EXPAND_PERPENDICULAR ) + QP.AddToLayout( vbox, self._checker_options, CC.FLAGS_EXPAND_PERPENDICULAR ) + QP.AddToLayout( vbox, self._file_import_options, CC.FLAGS_EXPAND_PERPENDICULAR ) + QP.AddToLayout( vbox, self._tag_import_options, CC.FLAGS_EXPAND_PERPENDICULAR ) + + self.widget().setLayout( vbox ) + + self._UpdateDelayText() + + + def _AddQuery( self ): + + gug_key_and_name = self._gug_key_and_name.GetValue() + + initial_search_text = HG.client_controller.network_engine.domain_manager.GetInitialSearchText( gug_key_and_name ) + + query_header = ClientImportSubscriptionQuery.SubscriptionQueryHeader() + + query_header.SetQueryText( initial_search_text ) + + query_log_container = ClientImportSubscriptionQuery.SubscriptionQueryLogContainer( query_header.GetQueryLogContainerName() ) + + with ClientGUITopLevelWindowsPanels.DialogEdit( self, 'edit subscription query' ) as dlg: + + panel = EditSubscriptionQueryPanel( dlg, query_header, query_log_container ) + + dlg.SetPanel( panel ) + + if dlg.exec() == QW.QDialog.Accepted: + + ( query_header, query_log_container ) = panel.GetValue() + + query_text = query_header.GetQueryText() + + if query_text in self._GetCurrentQueryTexts(): + + QW.QMessageBox.warning( self, 'Warning', 'You already have a query for "{}", so nothing new has been added.'.format( query_text ) ) + + return + + + self._query_headers.AddDatas( ( query_header, ) ) + + self._names_to_edited_query_log_containers[ query_log_container.GetName() ] = query_log_container + + + + + def _CATCHQueryLogContainers( self, query_log_containers: typing.Iterable[ ClientImportSubscriptionQuery.SubscriptionQueryLogContainer ], action_call: HydrusData.Call ): + + self.setEnabled( True ) + + for query_log_container in query_log_containers: + + self._names_to_edited_query_log_containers[ query_log_container.GetName() ] = query_log_container + + + action_call() + + + def _CheckerOptionsUpdated( self, checker_options ): + + checker_options = self._checker_options.GetValue() + + for query_header in self._query_headers.GetData(): + + query_log_container_name = query_header.GetQueryLogContainerName() + + if query_log_container_name in self._names_to_edited_query_log_containers: + + query_log_container = self._names_to_edited_query_log_containers[ query_log_container_name ] + + query_header.SyncToQueryLogContainer( checker_options, query_log_container ) + + else: + + query_header.SetQueryLogContainerStatus( ClientImportSubscriptionQuery.LOG_CONTAINER_UNSYNCED ) + + + + self._query_headers.UpdateDatas() + + + def _CheckNow( self ): + + selected_queries = self._query_headers.GetData( only_selected = True ) + + for query_header in selected_queries: + + query_header.CheckNow() + + + self._query_headers.UpdateDatas( selected_queries ) + + self._query_headers.Sort() + + self._no_work_until = 0 + + self._UpdateDelayText() + + + def _ConvertQueryHeaderToListCtrlTuples( self, query_header: ClientImportSubscriptionQuery.SubscriptionQueryHeader ): + + last_check_time = query_header.GetLastCheckTime() + next_check_time = query_header.GetNextCheckTime() + paused = query_header.IsPaused() + checker_status = query_header.GetCheckerStatus() + + name = query_header.GetHumanName() + pretty_name = name + + if paused: + + pretty_paused = 'yes' + + else: + + pretty_paused = '' + + + if checker_status == ClientImporting.CHECKER_STATUS_OK: + + pretty_status = 'ok' + + else: + + pretty_status = 'dead' + + + file_seed_cache_status = query_header.GetFileSeedCacheStatus() + + latest_new_file_time = file_seed_cache_status.GetLatestAddedTime() + + if latest_new_file_time is None or latest_new_file_time == 0: + + pretty_latest_new_file_time = 'n/a' + + else: + + pretty_latest_new_file_time = HydrusData.TimestampToPrettyTimeDelta( latest_new_file_time ) + + + if last_check_time is None or last_check_time == 0: + + pretty_last_check_time = '(initial check has not yet occurred)' + + else: + + pretty_last_check_time = HydrusData.TimestampToPrettyTimeDelta( last_check_time ) + + + pretty_next_check_time = query_header.GetNextCheckStatusString() + + checker_options = self._checker_options.GetValue() + + ( file_velocity, pretty_file_velocity ) = query_header.GetFileVelocityInfo() + + file_velocity = tuple( file_velocity ) # for sorting, list/tuple -> tuple + + try: + + estimate = query_header.GetBandwidthWaitingEstimate( HG.client_controller.network_engine.bandwidth_manager, self._original_subscription.GetName() ) + + if estimate == 0: + + pretty_delay = '' + delay = 0 + + else: + + pretty_delay = 'bandwidth: ' + HydrusData.TimeDeltaToPrettyTimeDelta( estimate ) + delay = estimate + + + except: + + pretty_delay = 'could not determine bandwidth--there may be a problem with some of the urls in this query' + delay = 0 + + + ( num_done, num_total ) = file_seed_cache_status.GetValueRange() + + items = ( num_total, num_done ) + + pretty_items = file_seed_cache_status.GetStatusText( simple = True ) + + sort_latest_new_file_time = ClientGUIListCtrl.SafeNoneInt( latest_new_file_time ) + sort_last_check_time = ClientGUIListCtrl.SafeNoneInt( last_check_time ) + sort_next_check_time = ClientGUIListCtrl.SafeNoneInt( next_check_time ) + + display_tuple = ( pretty_name, pretty_paused, pretty_status, pretty_latest_new_file_time, pretty_last_check_time, pretty_next_check_time, pretty_file_velocity, pretty_delay, pretty_items ) + sort_tuple = ( name, paused, checker_status, sort_latest_new_file_time, sort_last_check_time, sort_next_check_time, file_velocity, delay, items ) + + return ( display_tuple, sort_tuple ) + + + def _CopyQueries( self ): + + query_texts = [] + + for query_header in self._query_headers.GetData( only_selected = True ): + + query_texts.append( query_header.GetQueryText() ) + + + clipboard_text = os.linesep.join( query_texts ) + + if len( clipboard_text ) > 0: + + HG.client_controller.pub( 'clipboard', 'text', clipboard_text ) + + + + def _DoAsyncGetQueryLogContainers( self, query_headers: typing.Iterable[ ClientImportSubscriptionQuery.SubscriptionQueryHeader ], call: HydrusData.Call ): + + missing_query_headers = [ query_header for query_header in query_headers if query_header.GetQueryLogContainerName() not in self._names_to_edited_query_log_containers ] + + if len( missing_query_headers ) > 0: + + self.setEnabled( False ) + + HG.client_controller.CallToThread( AsyncGetQueryLogContainers, self, query_headers, self._CATCHQueryLogContainers, call ) + + else: + + call() + + + + def _EditQuery( self ): + + selected_query_headers = self._query_headers.GetData( only_selected = True ) + + for old_query_header in selected_query_headers: + + query_log_container_name = old_query_header.GetQueryLogContainerName() + + if query_log_container_name not in self._names_to_edited_query_log_containers: + + try: + + old_query_log_container = HG.client_controller.Read( 'serialisable_named', HydrusSerialisable.SERIALISABLE_TYPE_SUBSCRIPTION_QUERY_LOG_CONTAINER, query_log_container_name ) + + except HydrusExceptions.DataMissing: + + QW.QMessageBox.critical( self, 'Error', 'Some data for this query, "{}" was missing! This should have been dealt with when the dialog launched, so something is very wrong! Please exit the manage subscriptions dialog immediately, pause your subs, and contact hydrus dev!' ) + + return + + + self._names_to_edited_query_log_containers[ query_log_container_name ] = old_query_log_container + + + old_query_log_container = self._names_to_edited_query_log_containers[ query_log_container_name ] + + with ClientGUITopLevelWindowsPanels.DialogEdit( self, 'edit subscription query' ) as dlg: + + panel = EditSubscriptionQueryPanel( dlg, old_query_header, old_query_log_container ) + + dlg.SetPanel( panel ) + + if dlg.exec() == QW.QDialog.Accepted: + + ( edited_query_header, edited_query_log_container ) = panel.GetValue() + + edited_query_header.SyncToQueryLogContainer( self._checker_options.GetValue(), edited_query_log_container ) + + edited_query_text = edited_query_header.GetQueryText() + + if edited_query_text != old_query_header.GetQueryText() and edited_query_text in self._GetCurrentQueryTexts(): + + QW.QMessageBox.warning( self, 'Warning', 'You already have a query for "'+edited_query_text+'"! The edit you just made will not be saved.' ) + + break + + + self._query_headers.DeleteDatas( ( old_query_header, ) ) + + self._query_headers.AddDatas( ( edited_query_header, ) ) + + self._names_to_edited_query_log_containers[ query_log_container_name ] = edited_query_log_container + + else: + + break + + + + + self._query_headers.Sort() + + + def _GetCurrentQueryTexts( self ): + + query_strings = set() + + for query_header in self._query_headers.GetData(): + + query_strings.add( query_header.GetQueryText() ) + + + return query_strings + + + def _STARTCopyQualityInfo( self ): + + self.setEnabled( False ) + + query_headers = self._query_headers.GetData( only_selected = True ) + + HG.client_controller.CallToThread( AsyncGetQueryHeadersQualityInfo, self, query_headers, self._CopyQualityInfo ) + + + def _CopyQualityInfo( self, data ): + + self.setEnabled( True ) + + data_strings = [] + + for ( name, num_inbox, num_archived, num_deleted ) in data: + + if num_archived + num_deleted > 0: + + percent = HydrusData.ConvertFloatToPercentage( num_archived / ( num_archived + num_deleted ) ) + + else: + + percent = '0.0%' + + + data_string = '{},{},{},{},{}'.format( name, HydrusData.ToHumanInt( num_inbox ), HydrusData.ToHumanInt( num_archived ), HydrusData.ToHumanInt( num_deleted ), percent ) + + data_strings.append( data_string ) + + + text = os.linesep.join( data_strings ) + + HG.client_controller.pub( 'clipboard', 'text', text ) + + + def _STARTShowQualityInfo( self ): + + self.setEnabled( False ) + + query_headers = self._query_headers.GetData( only_selected = True ) + + HG.client_controller.CallToThread( AsyncGetQueryHeadersQualityInfo, self, query_headers, self._ShowQualityInfo ) + + + def _ShowQualityInfo( self, data ): + + self.setEnabled( True ) + + data_strings = [] + + for ( name, num_inbox, num_archived, num_deleted ) in data: + + data_string = '{}: inbox {} | archive {} | deleted {}'.format( name, HydrusData.ToHumanInt( num_inbox ), HydrusData.ToHumanInt( num_archived ), HydrusData.ToHumanInt( num_deleted ) ) + + if num_archived + num_deleted > 0: + + data_string += ' | good {}'.format( HydrusData.ConvertFloatToPercentage( num_archived / ( num_archived + num_deleted ) ) ) + + + data_strings.append( data_string ) + + + message = os.linesep.join( data_strings ) + + QW.QMessageBox.information( self, 'Information', message ) + + + def _ListCtrlCanCheckNow( self ): + + for query_header in self._query_headers.GetData( only_selected = True ): + + if query_header.CanCheckNow(): + + return True + + + + return False + + + def _ListCtrlCanResetCache( self ): + + for query_header in self._query_headers.GetData( only_selected = True ): + + if not query_header.IsInitialSync(): + + return True + + + + return False + + + def _ListCtrlCanRetryFailed( self ): + + for query_header in self._query_headers.GetData( only_selected = True ): + + if query_header.CanRetryFailed(): + + return True + + + + return False + + + def _ListCtrlCanRetryIgnored( self ): + + for query_header in self._query_headers.GetData( only_selected = True ): + + if query_header.CanRetryIgnored(): + + return True + + + + return False + + + def _PasteQueries( self ): + + message = 'This will add new queries by pulling them from your clipboard. It assumes they are currently in your clipboard and newline separated. Is that ok?' + + result = ClientGUIDialogsQuick.GetYesNo( self, message ) + + if result != QW.QDialog.Accepted: + + return + + + try: + + text = HG.client_controller.GetClipboardText() + + except HydrusExceptions.DataMissing as e: + + QW.QMessageBox.critical( self, 'Error', str(e) ) + + return + + + try: + + query_texts = HydrusText.DeserialiseNewlinedTexts( text ) + + current_query_texts = self._GetCurrentQueryTexts() + + already_existing_query_texts = sorted( current_query_texts.intersection( query_texts ) ) + new_query_texts = sorted( set( query_texts ).difference( current_query_texts ) ) + + if len( already_existing_query_texts ) > 0: + + if len( already_existing_query_texts ) > 50: + + message = '{} queries were already in the subscription, so they need not be added.'.format( HydrusData.ToHumanInt( len( already_existing_query_texts ) ) ) + + else: + + if len( already_existing_query_texts ) > 5: + + aeqt_separator = ', ' + + else: + + aeqt_separator = os.linesep + + + message = 'The queries:' + message += os.linesep * 2 + message += aeqt_separator.join( already_existing_query_texts ) + message += os.linesep * 2 + message += 'Were already in the subscription, so they need not be added.' + + + if len( new_query_texts ) > 0: + + if len( new_query_texts ) > 50: + + message = '{} queries were new and will be added.'.format( HydrusData.ToHumanInt( len( new_query_texts ) ) ) + + else: + + if len( new_query_texts ) > 5: + + nqt_separator = ', ' + + else: + + nqt_separator = os.linesep + + + message += os.linesep * 2 + message += 'The queries:' + message += os.linesep * 2 + message += nqt_separator.join( new_query_texts ) + message += os.linesep * 2 + message += 'Were new and will be added.' + + + + QW.QMessageBox.information( self, 'Information', message ) + + + query_headers = [] + + for query_text in query_texts: + + query_header = ClientImportSubscriptionQuery.SubscriptionQueryHeader() + + query_header.SetQueryText( query_text ) + + query_headers.append( query_header ) + + query_log_container_name = query_header.GetQueryLogContainerName() + + query_log_container = ClientImportSubscriptionQuery.SubscriptionQueryLogContainer( query_log_container_name ) + + self._names_to_edited_query_log_containers[ query_log_container_name ] = query_log_container + + + self._query_headers.AddDatas( query_headers ) + + except: + + QW.QMessageBox.critical( self, 'Error', 'I could not understand what was in the clipboard' ) + + + + def _PausePlay( self ): + + selected_query_headers = self._query_headers.GetData( only_selected = True ) + + for query_header in selected_query_headers: + + query_header.PausePlay() + + + self._query_headers.UpdateDatas( selected_query_headers ) + + + def _STARTReset( self ): + + message = 'Resetting these queries will delete all their cached urls, meaning when the subscription next runs, they will have to download all those links over again. This may be expensive in time and data. Only do this if you know what it means. Do you want to do it?' + + result = ClientGUIDialogsQuick.GetYesNo( self, message ) + + if result == QW.QDialog.Accepted: + + selected_query_headers = self._query_headers.GetData( only_selected = True ) + + call = HydrusData.Call( self._Reset, selected_query_headers ) + + self._DoAsyncGetQueryLogContainers( selected_query_headers, call ) + + + + def _Reset( self, query_headers ): + + for query_header in query_headers: + + query_log_container_name = query_header.GetQueryLogContainerName() + + if query_log_container_name not in self._names_to_edited_query_log_containers: + + continue + + + query_log_container = self._names_to_edited_query_log_containers[ query_log_container_name ] + + query_header.Reset( query_log_container ) + + + self._query_headers.UpdateDatas( query_headers ) + + + def _STARTRetryFailed( self ): + + selected_query_headers = self._query_headers.GetData( only_selected = True ) + + query_headers = [ query_header for query_header in selected_query_headers if query_header.CanRetryFailed() ] + + call = HydrusData.Call( self._RetryFailed, query_headers ) + + self._DoAsyncGetQueryLogContainers( query_headers, call ) + + + def _RetryFailed( self, query_headers: typing.Iterable[ ClientImportSubscriptionQuery.SubscriptionQueryHeader ] ): + + for query_header in query_headers: + + query_log_container_name = query_header.GetQueryLogContainerName() + + if query_log_container_name not in self._names_to_edited_query_log_containers: + + continue + + + query_log_container = self._names_to_edited_query_log_containers[ query_log_container_name ] + + query_log_container.GetFileSeedCache().RetryFailed() + + query_header.UpdateFileStatus( query_log_container ) + + + self._query_headers.UpdateDatas( query_headers ) + + self._no_work_until = 0 + + self._UpdateDelayText() + + + def _STARTRetryIgnored( self ): + + selected_query_headers = self._query_headers.GetData( only_selected = True ) + + query_headers = [ query_header for query_header in selected_query_headers if query_header.CanRetryIgnored() ] + + call = HydrusData.Call( self._RetryIgnored, query_headers ) + + self._DoAsyncGetQueryLogContainers( query_headers, call ) + + + def _RetryIgnored( self, query_headers: typing.Iterable[ ClientImportSubscriptionQuery.SubscriptionQueryHeader ] ): + + for query_header in query_headers: + + query_log_container_name = query_header.GetQueryLogContainerName() + + if query_log_container_name not in self._names_to_edited_query_log_containers: + + continue + + + query_log_container = self._names_to_edited_query_log_containers[ query_log_container_name ] + + query_log_container.GetFileSeedCache().RetryIgnored() + + query_header.UpdateFileStatus( query_log_container ) + + + self._query_headers.UpdateDatas( query_headers ) + + self._no_work_until = 0 + + self._UpdateDelayText() + + + def _UpdateDelayText( self ): + + if HydrusData.TimeHasPassed( self._no_work_until ): + + status = 'no recent errors' + + else: + + status = 'delayed--retrying ' + HydrusData.TimestampToPrettyTimeDelta( self._no_work_until, just_now_threshold = 0 ) + ' because: ' + self._no_work_until_reason + + + self._delay_st.setText( status ) + + + def GetValue( self ) -> ClientImportSubscriptions.Subscription: + + name = self._name.text() + + subscription = ClientImportSubscriptions.Subscription( name ) + + gug_key_and_name = self._gug_key_and_name.GetValue() + + initial_file_limit = self._initial_file_limit.value() + periodic_file_limit = self._periodic_file_limit.value() + + paused = self._paused.isChecked() + + checker_options = self._checker_options.GetValue() + file_import_options = self._file_import_options.GetValue() + tag_import_options = self._tag_import_options.GetValue() + + query_headers = self._query_headers.GetData() + + subscription.SetTuple( gug_key_and_name, checker_options, initial_file_limit, periodic_file_limit, paused, file_import_options, tag_import_options, self._no_work_until ) + + subscription.SetQueryHeaders( query_headers ) + + show_a_popup_while_working = self._show_a_popup_while_working.isChecked() + publish_files_to_popup_button = self._publish_files_to_popup_button.isChecked() + publish_files_to_page = self._publish_files_to_page.isChecked() + publish_label_override = self._publish_label_override.GetValue() + merge_query_publish_events = self._merge_query_publish_events.isChecked() + + subscription.SetPresentationOptions( show_a_popup_while_working, publish_files_to_popup_button, publish_files_to_page, publish_label_override, merge_query_publish_events ) + + return ( subscription, self._names_to_edited_query_log_containers ) + + +class EditSubscriptionQueryPanel( ClientGUIScrolledPanels.EditPanel ): + + def __init__( self, parent: QW.QWidget, query_header: ClientImportSubscriptionQuery.SubscriptionQueryHeader, query_log_container: ClientImportSubscriptionQuery.SubscriptionQueryLogContainer ): + + ClientGUIScrolledPanels.EditPanel.__init__( self, parent ) + + self._original_query_header = query_header + self._original_query_log_container = query_log_container + + query_header = query_header.Duplicate() + query_log_container = query_log_container.Duplicate() + + self._status_st = ClientGUICommon.BetterStaticText( self ) + + st_width = ClientGUIFunctions.ConvertTextToPixelWidth( self._status_st, 50 ) + + self._status_st.setMinimumWidth( st_width ) + + self._display_name = ClientGUICommon.NoneableTextCtrl( self, none_phrase = 'show query text' ) + self._query_text = QW.QLineEdit( self ) + self._check_now = QW.QCheckBox( self ) + self._paused = QW.QCheckBox( self ) + + self._file_seed_cache_control = ClientGUIFileSeedCache.FileSeedCacheStatusControl( self, HG.client_controller ) + + self._gallery_seed_log_control = ClientGUIGallerySeedLog.GallerySeedLogStatusControl( self, HG.client_controller, True, True ) + + tag_import_options = query_header.GetTagImportOptions() + show_downloader_options = False # just for additional tags, no parsing gubbins needed + + self._tag_import_options = ClientGUIImport.TagImportOptionsButton( self, tag_import_options, show_downloader_options ) + + # + + display_name = query_header.GetDisplayName() + query_text = query_header.GetQueryText() + check_now = query_header.IsCheckingNow() + paused = query_header.IsPaused() + + self._display_name.SetValue( display_name ) + + self._query_text.setText( query_text ) + + self._check_now.setChecked( check_now ) + + self._paused.setChecked( paused ) + + self._file_seed_cache = query_log_container.GetFileSeedCache() + + self._file_seed_cache_control.SetFileSeedCache( self._file_seed_cache ) + + self._gallery_seed_log = query_log_container.GetGallerySeedLog() + + self._gallery_seed_log_control.SetGallerySeedLog( self._gallery_seed_log ) + + # + + rows = [] + + rows.append( ( 'optional display name: ', self._display_name ) ) + rows.append( ( 'query text: ', self._query_text ) ) + rows.append( ( 'check now: ', self._check_now ) ) + rows.append( ( 'paused: ', self._paused ) ) + + gridbox = ClientGUICommon.WrapInGrid( self, rows ) + + vbox = QP.VBoxLayout() + + QP.AddToLayout( vbox, self._status_st, CC.FLAGS_EXPAND_PERPENDICULAR ) + QP.AddToLayout( vbox, self._file_seed_cache_control, CC.FLAGS_EXPAND_PERPENDICULAR ) + QP.AddToLayout( vbox, self._gallery_seed_log_control, CC.FLAGS_EXPAND_PERPENDICULAR ) + QP.AddToLayout( vbox, gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR ) + QP.AddToLayout( vbox, self._tag_import_options, CC.FLAGS_EXPAND_PERPENDICULAR ) + + self.widget().setLayout( vbox ) + + # + + self._check_now.clicked.connect( self._UpdateStatus ) + self._paused.clicked.connect( self._UpdateStatus ) + + self._UpdateStatus() + + self._query_text.selectAll() + + HG.client_controller.CallAfterQtSafe( self._query_text, self._query_text.setFocus, QC.Qt.OtherFocusReason ) + + + def _GetValue( self ) -> typing.Tuple[ ClientImportSubscriptionQuery.SubscriptionQueryHeader, ClientImportSubscriptionQuery.SubscriptionQueryLogContainer ]: + + query_header = self._original_query_header.Duplicate() + + query_header.SetQueryText( self._query_text.text() ) + + query_header.SetPaused( self._paused.isChecked() ) + + query_header.SetCheckNow( self._check_now.isChecked() ) + + query_header.SetDisplayName( self._display_name.GetValue() ) + + query_header.SetTagImportOptions( self._tag_import_options.GetValue() ) + + query_log_container = self._original_query_log_container.Duplicate() + + query_log_container.SetFileSeedCache( self._file_seed_cache ) + query_log_container.SetGallerySeedLog( self._gallery_seed_log ) + + return ( query_header, query_log_container ) + + + def _UpdateStatus( self ): + + ( query_header, query_log_container ) = self._GetValue() + + self._status_st.setText( 'next check: {}'.format( query_header.GetNextCheckStatusString() ) ) + + + def GetValue( self ) -> typing.Tuple[ ClientImportSubscriptionQuery.SubscriptionQueryHeader, ClientImportSubscriptionQuery.SubscriptionQueryLogContainer ]: + + return self._GetValue() + + +class EditSubscriptionsPanel( ClientGUIScrolledPanels.EditPanel ): + + def __init__( self, parent: QW.QWidget, subscriptions: typing.Iterable[ ClientImportSubscriptions.Subscription ], subs_are_globally_paused: bool = False ): + + subscriptions = [ subscription.Duplicate() for subscription in subscriptions ] + + ClientGUIScrolledPanels.EditPanel.__init__( self, parent ) + + self._existing_query_log_container_names = set() + + for subscription in subscriptions: + + self._existing_query_log_container_names.update( subscription.GetAllQueryLogContainerNames() ) + + + self._names_to_edited_query_log_containers = {} + + # + + menu_items = [] + + page_func = HydrusData.Call( ClientPaths.LaunchPathInWebBrowser, os.path.join( HC.HELP_DIR, 'getting_started_subscriptions.html' ) ) + + menu_items.append( ( 'normal', 'open the html subscriptions help', 'Open the help page for subscriptions in your web browser.', page_func ) ) + + help_button = ClientGUICommon.MenuBitmapButton( self, CC.global_pixmaps().help, menu_items ) + + help_hbox = ClientGUICommon.WrapInText( help_button, self, 'help for this panel -->', QG.QColor( 0, 0, 255 ) ) + + subscriptions_panel = ClientGUIListCtrl.BetterListCtrlPanel( self ) + + columns = [ ( 'name', -1 ), ( 'source', 20 ), ( 'query status', 25 ), ( 'last new file time', 20 ), ( 'last checked', 20 ), ( 'recent error/delay?', 20 ), ( 'items', 13 ), ( 'paused', 8 ) ] + + self._subscriptions = ClientGUIListCtrl.BetterListCtrl( subscriptions_panel, 'subscriptions', 12, 20, columns, self._ConvertSubscriptionToListCtrlTuples, use_simple_delete = True, activation_callback = self.Edit ) + + subscriptions_panel.SetListCtrl( self._subscriptions ) + + subscriptions_panel.AddButton( 'add', self.Add ) + subscriptions_panel.AddButton( 'edit', self.Edit, enabled_only_on_selection = True ) + subscriptions_panel.AddDeleteButton() + + subscriptions_panel.AddSeparator() + + # disabled for now + #subscriptions_panel.AddImportExportButtons( ( ClientImportSubscriptions.Subscription, ), self._AddSubscription ) + + subscriptions_panel.NewButtonRow() + + subscriptions_panel.AddButton( 'merge', self.Merge, enabled_check_func = self._CanMerge ) + subscriptions_panel.AddButton( 'separate', self.Separate, enabled_check_func = self._CanSeparate ) + + subscriptions_panel.AddSeparator() + + subscriptions_panel.AddButton( 'pause/resume', self.PauseResume, enabled_only_on_selection = True ) + subscriptions_panel.AddButton( 'retry failed', self._STARTRetryFailed, enabled_check_func = self._CanRetryFailed ) + subscriptions_panel.AddButton( 'retry ignored', self._STARTRetryIgnored, enabled_check_func = self._CanRetryIgnored ) + subscriptions_panel.AddButton( 'scrub delays', self.ScrubDelays, enabled_check_func = self._CanScrubDelays ) + subscriptions_panel.AddButton( 'check queries now', self.CheckNow, enabled_check_func = self._CanCheckNow ) + + subscriptions_panel.AddButton( 'reset', self._STARTReset, enabled_check_func = self._CanReset ) + + subscriptions_panel.NewButtonRow() + + subscriptions_panel.AddButton( 'select subscriptions', self.SelectSubscriptions ) + subscriptions_panel.AddButton( 'overwrite checker timings', self.SetCheckerOptions, enabled_only_on_selection = True ) + subscriptions_panel.AddButton( 'overwrite tag import options', self.SetTagImportOptions, enabled_only_on_selection = True ) + + # + + self._subscriptions.AddDatas( subscriptions ) + + self._subscriptions.Sort( 0 ) + + # + + vbox = QP.VBoxLayout() + + QP.AddToLayout( vbox, help_hbox, CC.FLAGS_BUTTON_SIZER ) + + if subs_are_globally_paused: + + message = 'SUBSCRIPTIONS ARE CURRENTLY GLOBALLY PAUSED! CHECK THE NETWORK MENU TO UNPAUSE THEM.' + + st = ClientGUICommon.BetterStaticText( self, message ) + st.setObjectName( 'HydrusWarning' ) + + QP.AddToLayout( vbox, st, CC.FLAGS_EXPAND_PERPENDICULAR ) + + + QP.AddToLayout( vbox, subscriptions_panel, CC.FLAGS_EXPAND_BOTH_WAYS ) + + self.widget().setLayout( vbox ) + + + def _AddSubscription( self, subscription ): + + subscription.SetNonDupeName( self._GetExistingNames() ) + + self._subscriptions.AddDatas( ( subscription, ) ) + + + def _CanCheckNow( self ): + + subscriptions = self._subscriptions.GetData( only_selected = True ) + + return True in ( subscription.CanCheckNow() for subscription in subscriptions ) + + + def _CanMerge( self ): + + subscriptions = self._subscriptions.GetData( only_selected = True ) + + # only subs with queries can be merged + + mergeable_subscriptions = [ subscription for subscription in subscriptions if len( subscription.GetQueryHeaders() ) > 0 ] + + unique_gug_names = { subscription.GetGUGKeyAndName()[1] for subscription in mergeable_subscriptions } + + # if there are fewer, there must be dupes, so we must be able to merge + + return len( unique_gug_names ) < len( subscriptions ) + + + def _CanReset( self ): + + subscriptions = self._subscriptions.GetData( only_selected = True ) + + return True in ( subscription.CanReset() for subscription in subscriptions ) + + + def _CanRetryFailed( self ): + + subscriptions = self._subscriptions.GetData( only_selected = True ) + + return True in ( subscription.CanRetryFailed() for subscription in subscriptions ) + + + def _CanRetryIgnored( self ): + + subscriptions = self._subscriptions.GetData( only_selected = True ) + + return True in ( subscription.CanRetryIgnored() for subscription in subscriptions ) + + + def _CanScrubDelays( self ): + + subscriptions = self._subscriptions.GetData( only_selected = True ) + + return True in ( subscription.CanScrubDelay() for subscription in subscriptions ) + + + def _CanSeparate( self ): + + subscriptions = self._subscriptions.GetData( only_selected = True ) + + if len( subscriptions ) != 1: + + return False + + + subscription = subscriptions[0] + + if len( subscription.GetQueryHeaders() ) > 1: + + return True + + + return False + + + def _CATCHQueryLogContainers( self, query_log_containers: typing.Iterable[ ClientImportSubscriptionQuery.SubscriptionQueryLogContainer ], action_call: HydrusData.Call ): + + self.setEnabled( True ) + + for query_log_container in query_log_containers: + + self._names_to_edited_query_log_containers[ query_log_container.GetName() ] = query_log_container + + + action_call() + + + def _ConvertSubscriptionToListCtrlTuples( self, subscription ): + + ( name, gug_key_and_name, query_headers, checker_options, initial_file_limit, periodic_file_limit, paused, file_import_options, tag_import_options, no_work_until, no_work_until_reason ) = subscription.ToTuple() + + pretty_site = gug_key_and_name[1] + + period = 100 + pretty_period = 'fix this' + + if len( query_headers ) > 0: + + latest_new_file_time = max( ( query_header.GetLatestAddedTime() for query_header in query_headers ) ) + + last_checked = max( ( query_header.GetLastCheckTime() for query_header in query_headers ) ) + + else: + + latest_new_file_time = 0 + + last_checked = 0 + + + if latest_new_file_time is None or latest_new_file_time == 0: + + pretty_latest_new_file_time = 'n/a' + + else: + + pretty_latest_new_file_time = HydrusData.TimestampToPrettyTimeDelta( latest_new_file_time ) + + + if last_checked is None or last_checked == 0: + + pretty_last_checked = 'n/a' + + else: + + pretty_last_checked = HydrusData.TimestampToPrettyTimeDelta( last_checked ) + + + # + + num_queries = len( query_headers ) + num_dead = 0 + num_paused = 0 + + for query_header in query_headers: + + if query_header.IsDead(): + + num_dead += 1 + + elif query_header.IsPaused(): + + num_paused += 1 + + + + num_ok = num_queries - ( num_dead + num_paused ) + + status = ( num_queries, num_paused, num_dead ) + + if num_queries == 0: + + pretty_status = 'no queries' + + else: + + status_components = [ HydrusData.ToHumanInt( num_ok ) + ' working' ] + + if num_paused > 0: + + status_components.append( HydrusData.ToHumanInt( num_paused ) + ' paused' ) + + + if num_dead > 0: + + status_components.append( HydrusData.ToHumanInt( num_dead ) + ' dead' ) + + + pretty_status = ', '.join( status_components ) + + + # + + if HydrusData.TimeHasPassed( no_work_until ): + + try: + + ( min_estimate, max_estimate ) = subscription.GetBandwidthWaitingEstimateMinMax( HG.client_controller.network_engine.bandwidth_manager ) + + if max_estimate == 0: # don't seem to be any delays of any kind + + pretty_delay = '' + delay = 0 + + elif min_estimate == 0: # some are good to go, but there are delays + + pretty_delay = 'bandwidth: some ok, some up to ' + HydrusData.TimeDeltaToPrettyTimeDelta( max_estimate ) + delay = max_estimate + + else: + + if min_estimate == max_estimate: # probably just one query, and it is delayed + + pretty_delay = 'bandwidth: up to ' + HydrusData.TimeDeltaToPrettyTimeDelta( max_estimate ) + delay = max_estimate + + else: + + pretty_delay = 'bandwidth: from ' + HydrusData.TimeDeltaToPrettyTimeDelta( min_estimate ) + ' to ' + HydrusData.TimeDeltaToPrettyTimeDelta( max_estimate ) + delay = max_estimate + + + + except: + + pretty_delay = 'could not determine bandwidth, there may be an error with the sub or its urls' + delay = 0 + + + else: + + pretty_delay = 'delayed--retrying ' + HydrusData.TimestampToPrettyTimeDelta( no_work_until, just_now_threshold = 0 ) + ' - because: ' + no_work_until_reason + delay = HydrusData.GetTimeDeltaUntilTime( no_work_until ) + + + file_seed_cache_status = ClientImportSubscriptionQuery.GenerateQueryHeadersStatus( query_headers ) + + ( num_done, num_total ) = file_seed_cache_status.GetValueRange() + + items = ( num_total, num_done ) + + pretty_items = file_seed_cache_status.GetStatusText( simple = True ) + + if paused: + + pretty_paused = 'yes' + + else: + + pretty_paused = '' + + + sort_latest_new_file_time = ClientGUIListCtrl.SafeNoneInt( latest_new_file_time ) + sort_last_checked = ClientGUIListCtrl.SafeNoneInt( last_checked ) + + display_tuple = ( name, pretty_site, pretty_status, pretty_latest_new_file_time, pretty_last_checked, pretty_delay, pretty_items, pretty_paused ) + sort_tuple = ( name, pretty_site, status, sort_latest_new_file_time, sort_last_checked, delay, items, paused ) + + return ( display_tuple, sort_tuple ) + + + def _DoAsyncGetQueryLogContainers( self, query_headers: typing.Iterable[ ClientImportSubscriptionQuery.SubscriptionQueryHeader ], call: HydrusData.Call ): + + missing_query_headers = [ query_header for query_header in query_headers if query_header.GetQueryLogContainerName() not in self._names_to_edited_query_log_containers ] + + if len( missing_query_headers ) > 0: + + self.setEnabled( False ) + + HG.client_controller.CallToThread( AsyncGetQueryLogContainers, self, query_headers, self._CATCHQueryLogContainers, call ) + + else: + + call() + + + + def _GetExistingNames( self ): + + subscriptions = self._subscriptions.GetData() + + names = { subscription.GetName() for subscription in subscriptions } + + return names + + + def _GetExportObject( self ): + + to_export = HydrusSerialisable.SerialisableList() + + for subscription in self._subscriptions.GetData( only_selected = True ): + + to_export.append( subscription ) + + + if len( to_export ) == 0: + + return None + + elif len( to_export ) == 1: + + return to_export[0] + + else: + + return to_export + + + + def _ImportObject( self, obj ): + + if isinstance( obj, HydrusSerialisable.SerialisableList ): + + for sub_obj in obj: + + self._ImportObject( sub_obj ) + + + else: + + if isinstance( obj, ClientImportSubscriptions.Subscription ): + + subscription = obj + + subscription.SetNonDupeName( self._GetExistingNames() ) + + self._subscriptions.AddDatas( ( subscription, ) ) + + else: + + QW.QMessageBox.warning( self, 'Warning', 'That was not a subscription--it was a: '+type(obj).__name__ ) + + + + + def _STARTReset( self ): + + message = 'Resetting these subscriptions will delete all their remembered urls, meaning when they next run, they will try to download them all over again. This may be expensive in time and data. Only do it if you are willing to wait. Do you want to do it?' + + result = ClientGUIDialogsQuick.GetYesNo( self, message ) + + if result == QW.QDialog.Accepted: + + query_headers = [] + + subscriptions = self._subscriptions.GetData( only_selected = True ) + + for subscription in subscriptions: + + query_headers.extend( subscription.GetQueryHeaders() ) + + + call = HydrusData.Call( self._Reset, query_headers ) + + self._DoAsyncGetQueryLogContainers( query_headers, call ) + + + + def _Reset( self, query_headers: typing.Iterable[ ClientImportSubscriptionQuery.SubscriptionQueryHeader ] ): + + for query_header in query_headers: + + query_log_container_name = query_header.GetQueryLogContainerName() + + if query_log_container_name not in self._names_to_edited_query_log_containers: + + continue + + + query_log_container = self._names_to_edited_query_log_containers[ query_log_container_name ] + + query_header.Reset( query_log_container ) + + + self._subscriptions.UpdateDatas() + + + def _STARTRetryFailed( self ): + + query_headers = [] + + subscriptions = self._subscriptions.GetData( only_selected = True ) + + for subscription in subscriptions: + + query_headers.extend( subscription.GetQueryHeaders() ) + + + query_headers = [ query_header for query_header in query_headers if query_header.CanRetryFailed() ] + + call = HydrusData.Call( self._RetryFailed, query_headers ) + + self._DoAsyncGetQueryLogContainers( query_headers, call ) + + + def _RetryFailed( self, query_headers: typing.Iterable[ ClientImportSubscriptionQuery.SubscriptionQueryHeader ] ): + + for query_header in query_headers: + + query_log_container_name = query_header.GetQueryLogContainerName() + + if query_log_container_name not in self._names_to_edited_query_log_containers: + + continue + + + query_log_container = self._names_to_edited_query_log_containers[ query_log_container_name ] + + query_log_container.GetFileSeedCache().RetryFailed() + + query_header.UpdateFileStatus( query_log_container ) + + + self._subscriptions.UpdateDatas() + + + def _STARTRetryIgnored( self ): + + query_headers = [] + + subscriptions = self._subscriptions.GetData( only_selected = True ) + + for subscription in subscriptions: + + query_headers.extend( subscription.GetQueryHeaders() ) + + + query_headers = [ query_header for query_header in query_headers if query_header.CanRetryIgnored() ] + + call = HydrusData.Call( self._RetryIgnored, query_headers ) + + self._DoAsyncGetQueryLogContainers( query_headers, call ) + + + def _RetryIgnored( self, query_headers: typing.Iterable[ ClientImportSubscriptionQuery.SubscriptionQueryHeader ] ): + + for query_header in query_headers: + + query_log_container_name = query_header.GetQueryLogContainerName() + + if query_log_container_name not in self._names_to_edited_query_log_containers: + + continue + + + query_log_container = self._names_to_edited_query_log_containers[ query_log_container_name ] + + query_log_container.GetFileSeedCache().RetryIgnored() + + query_header.UpdateFileStatus( query_log_container ) + + + self._subscriptions.UpdateDatas() + + + def Add( self ): + + gug_key_and_name = HG.client_controller.network_engine.domain_manager.GetDefaultGUGKeyAndName() + + empty_subscription = ClientImportSubscriptions.Subscription( 'new subscription', gug_key_and_name = gug_key_and_name ) + + frame_key = 'edit_subscription_dialog' + + with ClientGUITopLevelWindowsPanels.DialogEdit( self, 'edit subscription', frame_key ) as dlg_edit: + + panel = EditSubscriptionPanel( dlg_edit, empty_subscription, self._names_to_edited_query_log_containers ) + + dlg_edit.SetPanel( panel ) + + if dlg_edit.exec() == QW.QDialog.Accepted: + + ( new_subscription, self._names_to_edited_query_log_containers ) = panel.GetValue() + + self._AddSubscription( new_subscription ) + + self._subscriptions.Sort() + + + + + def CheckNow( self ): + + subscriptions = self._subscriptions.GetData( only_selected = True ) + + for subscription in subscriptions: + + subscription.CheckNow() + + + self._subscriptions.UpdateDatas( subscriptions ) + + + def Edit( self ): + + subs_to_edit = self._subscriptions.GetData( only_selected = True ) + + for subscription in subs_to_edit: + + frame_key = 'edit_subscription_dialog' + + with ClientGUITopLevelWindowsPanels.DialogEdit( self, 'edit subscription', frame_key ) as dlg: + + original_name = subscription.GetName() + + panel = EditSubscriptionPanel( dlg, subscription, self._names_to_edited_query_log_containers ) + + dlg.SetPanel( panel ) + + result = dlg.exec() + + if result == QW.QDialog.Accepted: + + self._subscriptions.DeleteDatas( ( subscription, ) ) + + ( edited_subscription, self._names_to_edited_query_log_containers ) = panel.GetValue() + + edited_subscription.SetNonDupeName( self._GetExistingNames() ) + + self._subscriptions.AddDatas( ( edited_subscription, ) ) + + elif dlg.WasCancelled(): + + break + + + + + self._subscriptions.Sort() + + + def GetValue( self ) -> typing.List[ ClientImportSubscriptions.Subscription ]: + + subscriptions = self._subscriptions.GetData() + + edited_query_log_containers = list( self._names_to_edited_query_log_containers.values() ) + + new_query_log_container_names = set() + + for subscription in subscriptions: + + new_query_log_container_names.update( subscription.GetAllQueryLogContainerNames() ) + + + deletee_query_log_container_names = self._existing_query_log_container_names.difference( new_query_log_container_names ) + + return ( subscriptions, edited_query_log_containers, deletee_query_log_container_names ) + + + def Merge( self ): + + message = 'Are you sure you want to merge the selected subscriptions? This will combine all selected subscriptions that share the same downloader, wrapping all their different queries into one subscription.' + message += os.linesep * 2 + message += 'This is a big operation, so if it does not do what you expect, hit cancel afterwards!' + message += os.linesep * 2 + message += 'Please note that all other subscription settings settings (like paused status and file limits and tag options) will be merged as well, so double-check your merged subs\' settings afterwards.' + + result = ClientGUIDialogsQuick.GetYesNo( self, message ) + + if result == QW.QDialog.Accepted: + + original_subs = self._subscriptions.GetData( only_selected = True ) + + potential_mergees = [ sub.Duplicate() for sub in original_subs ] + + mergeable_groups = [] + merged_subs = [] + unmergeable_subs = [] + + while len( potential_mergees ) > 0: + + potential_primary = potential_mergees.pop() + + ( mergeables_with_our_primary, not_mergeable_with_our_primary ) = potential_primary.GetMergeable( potential_mergees ) + + if len( mergeables_with_our_primary ) > 0: + + mergeable_group = [] + + mergeable_group.append( potential_primary ) + mergeable_group.extend( mergeables_with_our_primary ) + + mergeable_groups.append( mergeable_group ) + + else: + + unmergeable_subs.append( potential_primary ) + + + potential_mergees = not_mergeable_with_our_primary + + + if len( mergeable_groups ) == 0: + + QW.QMessageBox.information( self, 'Information', 'Unfortunately, none of those subscriptions appear to be mergeable!' ) + + return + + + for mergeable_group in mergeable_groups: + + mergeable_group.sort( key = lambda sub: sub.GetName() ) + + choice_tuples = [ ( sub.GetName(), sub ) for sub in mergeable_group ] + + try: + + primary_sub = ClientGUIDialogsQuick.SelectFromList( self, 'select the primary subscription--into which to merge the others', choice_tuples ) + + except HydrusExceptions.CancelledException: + + return + + + mergeable_group.remove( primary_sub ) + + unmerged = primary_sub.Merge( mergeable_group ) + + unmergeable_subs.extend( unmerged ) + + primary_sub_name = primary_sub.GetName() + + message = primary_sub_name + ' was able to merge ' + HydrusData.ToHumanInt( len( mergeable_group ) ) + ' other subscriptions. If you wish to change its name, do so here.' + + with ClientGUIDialogs.DialogTextEntry( self, message, default = primary_sub_name ) as dlg: + + if dlg.exec() == QW.QDialog.Accepted: + + name = dlg.GetValue() + + primary_sub.SetName( name ) + + + # don't care about a cancel here--we'll take that as 'I didn't want to change its name', not 'abort' + + + merged_subs.append( primary_sub ) + + + # we are ready to do it + + self._subscriptions.DeleteDatas( original_subs ) + + self._subscriptions.AddDatas( unmergeable_subs ) + + for merged_sub in merged_subs: + + merged_sub.SetNonDupeName( self._GetExistingNames() ) + + self._subscriptions.AddDatas( ( merged_sub, ) ) + + + self._subscriptions.Sort() + + + + def PauseResume( self ): + + subscriptions = self._subscriptions.GetData( only_selected = True ) + + for subscription in subscriptions: + + subscription.PauseResume() + + + self._subscriptions.UpdateDatas( subscriptions ) + + + def ScrubDelays( self ): + + subscriptions = self._subscriptions.GetData( only_selected = True ) + + for subscription in subscriptions: + + subscription.ScrubDelay() + + + self._subscriptions.UpdateDatas( subscriptions ) + + + def SelectSubscriptions( self ): + + message = 'This selects subscriptions based on query text. Please enter some search text, and any subscription that has a query that includes that text will be selected.' + + with ClientGUIDialogs.DialogTextEntry( self, message ) as dlg: + + if dlg.exec() == QW.QDialog.Accepted: + + search_text = dlg.GetValue() + + self._subscriptions.clearSelection() + + selectee_subscriptions = [] + + for subscription in self._subscriptions.GetData(): + + if subscription.HasQuerySearchTextFragment( search_text ): + + selectee_subscriptions.append( subscription ) + + + + self._subscriptions.SelectDatas( selectee_subscriptions ) + + + + + def Separate( self ): + + subscriptions = self._subscriptions.GetData( only_selected = True ) + + if len( subscriptions ) != 1: + + QW.QMessageBox.critical( self, 'Error', 'Separate only works if one subscription is selected!' ) + + return + + + subscription = subscriptions[0] + + num_queries = len( subscription.GetQueryHeaders() ) + + if num_queries <= 1: + + QW.QMessageBox.critical( self, 'Error', 'Separate only works if the selected subscription has more than one query!' ) + + return + + + if num_queries > 100: + + message = 'This is a large subscription. It is difficult to separate it on a per-query basis, so instead the system will automatically cut it into two halves. Is this ok?' + + result = ClientGUIDialogsQuick.GetYesNo( self, message ) + + if result != QW.QDialog.Accepted: + + return + + + action = 'half' + + elif num_queries > 2: + + message = 'Are you sure you want to separate the selected subscriptions? Separating breaks merged subscriptions apart into smaller pieces.' + yes_tuples = [ ( 'break it in half', 'half' ), ( 'break it all into single-query subscriptions', 'whole' ), ( 'only extract some of the subscription', 'part' ) ] + + with ClientGUIDialogs.DialogYesYesNo( self, message, yes_tuples = yes_tuples, no_label = 'forget it' ) as dlg: + + if dlg.exec() == QW.QDialog.Accepted: + + action = dlg.GetValue() + + else: + + return + + + + else: + + action = 'whole' + + + want_post_merge = False + + if action == 'part': + + query_headers = subscription.GetQueryHeaders() + + choice_tuples = [ ( query_header.GetHumanName(), query_header, False ) for query_header in query_headers ] + + with ClientGUITopLevelWindowsPanels.DialogEdit( self, 'select the queries to extract' ) as dlg: + + panel = ClientGUIScrolledPanelsEdit.EditChooseMultiple( dlg, choice_tuples ) + + dlg.SetPanel( panel ) + + if dlg.exec() == QW.QDialog.Accepted: + + query_headers_to_extract = panel.GetValue() + + else: + + return + + + + if len( query_headers_to_extract ) == num_queries: # the madman selected them all + + action = 'whole' + + elif len( query_headers_to_extract ) > 1: + + yes_tuples = [ ( 'one new merged subscription', True ), ( 'many subscriptions with only one query', False ) ] + + message = 'Do you want the extracted queries to be a new merged subscription, or many subscriptions with only one query?' + + with ClientGUIDialogs.DialogYesYesNo( self, message, yes_tuples = yes_tuples, no_label = 'forget it' ) as dlg: + + if dlg.exec() == QW.QDialog.Accepted: + + want_post_merge = dlg.GetValue() + + else: + + return + + + + + + if action != 'half': + + if want_post_merge: + + message = 'Please enter the name for the new subscription.' + + else: + + message = 'Please enter the base name for the new subscriptions. They will be named \'[NAME]: query\'.' + + + with ClientGUIDialogs.DialogTextEntry( self, message, default = subscription.GetName() ) as dlg: + + if dlg.exec() == QW.QDialog.Accepted: + + name = dlg.GetValue() + + else: + + return + + + + + # ok, let's do it + + final_subscriptions = [] + + self._subscriptions.DeleteDatas( ( subscription, ) ) + + if action == 'whole': + + final_subscriptions.extend( subscription.Separate( name ) ) + + elif action == 'part': + + extracted_subscriptions = list( subscription.Separate( name, query_headers_to_extract ) ) + + if want_post_merge: + + # it is ok to do a blind merge here since they all share the same settings and will get a new name + + primary_sub = extracted_subscriptions.pop() + + unmerged = primary_sub.Merge( extracted_subscriptions ) + + final_subscriptions.extend( unmerged ) + + primary_sub.SetName( name ) + + final_subscriptions.append( primary_sub ) + + else: + + final_subscriptions.extend( extracted_subscriptions ) + + + final_subscriptions.append( subscription ) + + elif action == 'half': + + query_headers = subscription.GetQueryHeaders() + + query_headers_to_extract = query_headers[ : len( query_headers ) // 2 ] + + name = subscription.GetName() + + extracted_subscriptions = list( subscription.Separate( name, query_headers_to_extract ) ) + + primary_sub = extracted_subscriptions.pop() + + unmerged = primary_sub.Merge( extracted_subscriptions ) + + final_subscriptions.extend( unmerged ) + + primary_sub.SetName( '{} (A)'.format( name ) ) + subscription.SetName( '{} (B)'.format( name ) ) + + final_subscriptions.append( primary_sub ) + final_subscriptions.append( subscription ) + + + for final_subscription in final_subscriptions: + + final_subscription.SetNonDupeName( self._GetExistingNames() ) + + self._subscriptions.AddDatas( ( final_subscription, ) ) + + + self._subscriptions.Sort() + + + def SetCheckerOptions( self ): + + subscriptions = self._subscriptions.GetData( only_selected = True ) + + if len( subscriptions ) == 0: + + return + + + checker_options = subscriptions[0].GetCheckerOptions() + + with ClientGUITopLevelWindowsPanels.DialogEdit( self, 'edit check timings' ) as dlg: + + panel = ClientGUITime.EditCheckerOptions( dlg, checker_options ) + + dlg.SetPanel( panel ) + + if dlg.exec() == QW.QDialog.Accepted: + + checker_options = panel.GetValue() + + for subscription in subscriptions: + + subscription.SetCheckerOptions( checker_options ) + + + self._subscriptions.UpdateDatas( subscriptions ) + + + + + def SetTagImportOptions( self ): + + subscriptions = self._subscriptions.GetData( only_selected = True ) + + if len( subscriptions ) == 0: + + return + + + tag_import_options = subscriptions[0].GetTagImportOptions() + show_downloader_options = True + + with ClientGUITopLevelWindowsPanels.DialogEdit( self, 'edit tag import options' ) as dlg: + + panel = ClientGUIScrolledPanelsEdit.EditTagImportOptionsPanel( dlg, tag_import_options, show_downloader_options, allow_default_selection = True ) + + dlg.SetPanel( panel ) + + if dlg.exec() == QW.QDialog.Accepted: + + tag_import_options = panel.GetValue() + + for subscription in subscriptions: + + subscription.SetTagImportOptions( tag_import_options ) + + + self._subscriptions.UpdateDatas( subscriptions ) + + + + diff --git a/hydrus/client/gui/ClientGUITagSuggestions.py b/hydrus/client/gui/ClientGUITagSuggestions.py index c882152e..8f82dfaf 100644 --- a/hydrus/client/gui/ClientGUITagSuggestions.py +++ b/hydrus/client/gui/ClientGUITagSuggestions.py @@ -231,6 +231,11 @@ class RecentTagsPanel( QW.QWidget ): self._UpdateTagDisplay() + if len( self._recent_tags.GetTags() ) > 0: + + self._recent_tags.SelectTopItem() + + recent_tags = HG.client_controller.Read( 'recent_tags', service_key ) @@ -242,17 +247,10 @@ class RecentTagsPanel( QW.QWidget ): def _UpdateTagDisplay( self ): - had_selection_before = len( self._recent_tags.GetSelectedTags() ) > 0 - tags = FilterSuggestedTagsForMedia( self._last_fetched_tags, self._media, self._service_key ) self._recent_tags.SetTags( tags ) - if had_selection_before and len( tags ) > 0: - - self._recent_tags.SelectTopItem() - - def EventClear( self ): diff --git a/hydrus/client/gui/ClientGUITags.py b/hydrus/client/gui/ClientGUITags.py index d6a65c9f..5c2ec179 100644 --- a/hydrus/client/gui/ClientGUITags.py +++ b/hydrus/client/gui/ClientGUITags.py @@ -344,11 +344,11 @@ class EditTagFilterPanel( ClientGUIScrolledPanels.EditPanel ): TEST_RESULT_DEFAULT = 'Enter a tag here to test if it passes the current filter:' TEST_RESULT_BLACKLIST_DEFAULT = 'Enter a tag here to test if it passes the current filter in a tag import options blacklist (siblings tested, unnamespaced rules match namespaced tags):' - def __init__( self, parent, tag_filter, prefer_blacklist = False, namespaces = None, message = None ): + def __init__( self, parent, tag_filter, only_show_blacklist = False, namespaces = None, message = None ): ClientGUIScrolledPanels.EditPanel.__init__( self, parent ) - self._prefer_blacklist = prefer_blacklist + self._only_show_blacklist = only_show_blacklist self._namespaces = namespaces self._wildcard_replacements = {} @@ -384,19 +384,19 @@ class EditTagFilterPanel( ClientGUIScrolledPanels.EditPanel ): # - if self._prefer_blacklist: + if self._only_show_blacklist: + self._whitelist_panel.setVisible( False ) self._notebook.addTab( self._blacklist_panel, 'blacklist' ) - self._notebook.addTab( self._whitelist_panel, 'whitelist' ) + self._advanced_panel.setVisible( False ) else: self._notebook.addTab( self._whitelist_panel, 'whitelist' ) self._notebook.addTab( self._blacklist_panel, 'blacklist' ) + self._notebook.addTab( self._advanced_panel, 'advanced' ) - self._notebook.addTab( self._advanced_panel, 'advanced' ) - # self._redundant_st = ClientGUICommon.BetterStaticText( self, '', ellipsize_end = True ) @@ -423,7 +423,11 @@ class EditTagFilterPanel( ClientGUIScrolledPanels.EditPanel ): if message is not None: - QP.AddToLayout( vbox, ClientGUICommon.BetterStaticText(self,message), CC.FLAGS_EXPAND_PERPENDICULAR ) + st = ClientGUICommon.BetterStaticText( self, message ) + + st.setWordWrap( True ) + + QP.AddToLayout( vbox, st, CC.FLAGS_EXPAND_PERPENDICULAR ) hbox = QP.HBoxLayout() @@ -592,6 +596,8 @@ class EditTagFilterPanel( ClientGUIScrolledPanels.EditPanel ): def _CleanTagSliceInput( self, tag_slice ): + tag_slice = tag_slice.lower().strip() + while '**' in tag_slice: tag_slice = tag_slice.replace( '**', '*' ) @@ -1246,9 +1252,16 @@ class EditTagFilterPanel( ClientGUIScrolledPanels.EditPanel ): tag_filter = self.GetValue() - pretty_tag_filter = tag_filter.ToPermittedString() + if self._only_show_blacklist: + + pretty_tag_filter = tag_filter.ToBlacklistString() + + else: + + pretty_tag_filter = 'currently keeping: {}'.format( tag_filter.ToPermittedString() ) + - self._current_filter_st.setText( 'currently keeping: '+pretty_tag_filter ) + self._current_filter_st.setText( pretty_tag_filter ) self._UpdateTest() @@ -1388,11 +1401,9 @@ class EditTagFilterPanel( ClientGUIScrolledPanels.EditPanel ): selection_tests = [] - if self._prefer_blacklist: + if self._only_show_blacklist: selection_tests.append( ( blacklist_possible, self._blacklist_panel ) ) - selection_tests.append( ( whitelist_possible, self._whitelist_panel ) ) - selection_tests.append( ( True, self._advanced_panel ) ) else: @@ -1738,7 +1749,7 @@ class ManageTagsPanel( ClientGUIScrolledPanels.ManagePanel ): menu_items = [] - check_manager = ClientGUICommon.CheckboxManagerOptions( 'add_parents_on_manage_tags' ) + check_manager = ClientGUICommon.CheckboxManagerCalls( self._FlipExpandParents, lambda: self._new_options.GetBoolean( 'add_parents_on_manage_tags' ) ) menu_items.append( ( 'check', 'auto-add entered tags\' parents on add/pend action', 'If checked, adding any tag that has parents will also add those parents.', check_manager ) ) @@ -2259,6 +2270,7 @@ class ManageTagsPanel( ClientGUIScrolledPanels.ManagePanel ): addee_action = HC.CONTENT_UPDATE_ADD removee_action = HC.CONTENT_UPDATE_DELETE + other_removee_action = HC.CONTENT_UPDATE_RESCIND_PEND reason = None content_updates = [] @@ -2266,6 +2278,7 @@ class ManageTagsPanel( ClientGUIScrolledPanels.ManagePanel ): for ( tag, hashes ) in removee_tags_to_hashes.items(): content_updates.append( HydrusData.ContentUpdate( HC.CONTENT_TYPE_MAPPINGS, removee_action, ( tag, hashes ), reason = reason ) ) + content_updates.append( HydrusData.ContentUpdate( HC.CONTENT_TYPE_MAPPINGS, other_removee_action, ( tag, hashes ), reason = reason ) ) for ( tag, hashes ) in addee_tags_to_hashes.items(): @@ -2302,6 +2315,15 @@ class ManageTagsPanel( ClientGUIScrolledPanels.ManagePanel ): self._tags_box.SetTagsByMedia( self._media ) + def _FlipExpandParents( self ): + + value = not self._new_options.GetBoolean( 'add_parents_on_manage_tags' ) + + self._new_options.SetBoolean( 'add_parents_on_manage_tags', value ) + + self._add_tag_box.SetExpandParents( value ) + + def _FlipShowDeleted( self ): self._show_deleted = not self._show_deleted @@ -4258,13 +4280,13 @@ class ManageTagSiblings( ClientGUIScrolledPanels.ManagePanel ): class TagFilterButton( ClientGUICommon.BetterButton ): - def __init__( self, parent, message, tag_filter, is_blacklist = False, label_prefix = None ): + def __init__( self, parent, message, tag_filter, only_show_blacklist = False, label_prefix = None ): ClientGUICommon.BetterButton.__init__( self, parent, 'tag filter', self._EditTagFilter ) self._message = message self._tag_filter = tag_filter - self._is_blacklist = is_blacklist + self._only_show_blacklist = only_show_blacklist self._label_prefix = label_prefix self._UpdateLabel() @@ -4272,11 +4294,20 @@ class TagFilterButton( ClientGUICommon.BetterButton ): def _EditTagFilter( self ): - with ClientGUITopLevelWindowsPanels.DialogEdit( self, 'edit tag filter' ) as dlg: + if self._only_show_blacklist: + + title = 'edit blacklist' + + else: + + title = 'edit tag filter' + + + with ClientGUITopLevelWindowsPanels.DialogEdit( self, title ) as dlg: namespaces = HG.client_controller.network_engine.domain_manager.GetParserNamespaces() - panel = EditTagFilterPanel( dlg, self._tag_filter, prefer_blacklist = self._is_blacklist, namespaces = namespaces, message = self._message ) + panel = EditTagFilterPanel( dlg, self._tag_filter, only_show_blacklist = self._only_show_blacklist, namespaces = namespaces, message = self._message ) dlg.SetPanel( panel ) @@ -4291,7 +4322,7 @@ class TagFilterButton( ClientGUICommon.BetterButton ): def _UpdateLabel( self ): - if self._is_blacklist: + if self._only_show_blacklist: tt = self._tag_filter.ToBlacklistString() diff --git a/hydrus/client/importing/ClientImportFileSeeds.py b/hydrus/client/importing/ClientImportFileSeeds.py index 7303ef4b..076aaee2 100644 --- a/hydrus/client/importing/ClientImportFileSeeds.py +++ b/hydrus/client/importing/ClientImportFileSeeds.py @@ -1,5 +1,6 @@ import collections import os +import random import threading import time import traceback @@ -1443,6 +1444,219 @@ class FileSeed( HydrusSerialisable.SerialisableBase ): HydrusSerialisable.SERIALISABLE_TYPES_TO_OBJECT_TYPES[ HydrusSerialisable.SERIALISABLE_TYPE_FILE_SEED ] = FileSeed +class FileSeedCacheStatus( HydrusSerialisable.SerialisableBase ): + + SERIALISABLE_TYPE = HydrusSerialisable.SERIALISABLE_TYPE_FILE_SEED_CACHE_STATUS + SERIALISABLE_NAME = 'Import File Status Cache Status' + SERIALISABLE_VERSION = 1 + + def __init__( self ): + + self._generation_time = HydrusData.GetNow() + self._statuses_to_counts = collections.Counter() + self._latest_added_time = 0 + + + def _GetSerialisableInfo( self ): + + serialisable_statuses_to_counts = list( self._statuses_to_counts.items() ) + + return ( self._generation_time, serialisable_statuses_to_counts, self._latest_added_time ) + + + def _InitialiseFromSerialisableInfo( self, serialisable_info ): + + ( self._generation_time, serialisable_statuses_to_counts, self._latest_added_time ) = serialisable_info + + self._statuses_to_counts = collections.Counter() + + self._statuses_to_counts.update( dict( serialisable_statuses_to_counts ) ) + + + def GetFileSeedCount( self, status: typing.Optional[ int ] = None ) -> int: + + if status is None: + + return sum( self._statuses_to_counts.values() ) + + else: + + return self._statuses_to_counts[ status ] + + + + def GetGenerationTime( self ) -> int: + + return self._generation_time + + + def GetLatestAddedTime( self ) -> int: + + return self._latest_added_time + + + def GetStatusText( self, simple = False ) -> str: + + num_successful_and_new = self._statuses_to_counts[ CC.STATUS_SUCCESSFUL_AND_NEW ] + num_successful_but_redundant = self._statuses_to_counts[ CC.STATUS_SUCCESSFUL_BUT_REDUNDANT ] + num_ignored = self._statuses_to_counts[ CC.STATUS_VETOED ] + num_deleted = self._statuses_to_counts[ CC.STATUS_DELETED ] + num_failed = self._statuses_to_counts[ CC.STATUS_ERROR ] + num_skipped = self._statuses_to_counts[ CC.STATUS_SKIPPED ] + num_unknown = self._statuses_to_counts[ CC.STATUS_UNKNOWN ] + + if simple: + + total = sum( self._statuses_to_counts.values() ) + + total_processed = total - num_unknown + + # + + status_text = '' + + if total > 0: + + if num_unknown > 0: + + status_text += HydrusData.ConvertValueRangeToPrettyString( total_processed, total ) + + else: + + status_text += HydrusData.ToHumanInt( total_processed ) + + + show_new_on_file_seed_short_summary = HG.client_controller.new_options.GetBoolean( 'show_new_on_file_seed_short_summary' ) + + if show_new_on_file_seed_short_summary and num_successful_and_new: + + status_text += ' - {}N'.format( HydrusData.ToHumanInt( num_successful_and_new ) ) + + + simple_status_strings = [] + + if num_ignored > 0: + + simple_status_strings.append( '{}Ig'.format( HydrusData.ToHumanInt( num_ignored ) ) ) + + + show_deleted_on_file_seed_short_summary = HG.client_controller.new_options.GetBoolean( 'show_deleted_on_file_seed_short_summary' ) + + if show_deleted_on_file_seed_short_summary and num_deleted > 0: + + simple_status_strings.append( '{}D'.format( HydrusData.ToHumanInt( num_deleted ) ) ) + + + if num_failed > 0: + + simple_status_strings.append( '{}F'.format( HydrusData.ToHumanInt( num_failed ) ) ) + + + if num_skipped > 0: + + simple_status_strings.append( '{}S'.format( HydrusData.ToHumanInt( num_skipped ) ) ) + + + if len( simple_status_strings ) > 0: + + status_text += ' - {}'.format( ''.join( simple_status_strings ) ) + + + + else: + + status_strings = [] + + num_successful = num_successful_and_new + num_successful_but_redundant + + if num_successful > 0: + + s = '{} successful'.format( HydrusData.ToHumanInt( num_successful ) ) + + if num_successful_and_new > 0: + + if num_successful_but_redundant > 0: + + s += ' ({} already in db)'.format( HydrusData.ToHumanInt( num_successful_but_redundant ) ) + + + else: + + s += ' (all already in db)' + + + status_strings.append( s ) + + + if num_ignored > 0: + + status_strings.append( '{} ignored'.format( HydrusData.ToHumanInt( num_ignored ) ) ) + + + if num_deleted > 0: + + status_strings.append( '{} previously deleted'.format( HydrusData.ToHumanInt( num_deleted ) ) ) + + + if num_failed > 0: + + status_strings.append( '{} failed'.format( HydrusData.ToHumanInt( num_failed ) ) ) + + + if num_skipped > 0: + + status_strings.append( '{} skipped'.format( HydrusData.ToHumanInt( num_skipped ) ) ) + + + status_text = ', '.join( status_strings ) + + + return status_text + + + def GetStatusesToCounts( self ) -> typing.Mapping[ int, int ]: + + return self._statuses_to_counts + + + def GetValueRange( self ) -> typing.Tuple[ int, int ]: + + total = sum( self._statuses_to_counts.values() ) + + num_unknown = self._statuses_to_counts[ CC.STATUS_UNKNOWN ] + + total_processed = total - num_unknown + + return ( total_processed, total ) + + + def HasWorkToDo( self ): + + ( num_done, num_total ) = self.GetValueRange() + + return num_done < num_total + + + def Merge( self, file_seed_cache_status: "FileSeedCacheStatus" ): + + self._latest_added_time = max( self._latest_added_time, file_seed_cache_status.GetLatestAddedTime() ) + self._statuses_to_counts.update( file_seed_cache_status.GetStatusesToCounts() ) + + + def SetStatusesToCounts( self, statuses_to_counts: typing.Mapping[ int, int ] ): + + self._statuses_to_counts = collections.Counter() + + self._statuses_to_counts.update( statuses_to_counts ) + + + def SetLatestAddedTime( self, latest_added_time: int ): + + self._latest_added_time = latest_added_time + + +HydrusSerialisable.SERIALISABLE_TYPES_TO_OBJECT_TYPES[ HydrusSerialisable.SERIALISABLE_TYPE_FILE_SEED_CACHE_STATUS ] = FileSeedCacheStatus + class FileSeedCache( HydrusSerialisable.SerialisableBase ): SERIALISABLE_TYPE = HydrusSerialisable.SERIALISABLE_TYPE_FILE_SEED_CACHE @@ -1461,8 +1675,7 @@ class FileSeedCache( HydrusSerialisable.SerialisableBase ): self._file_seed_cache_key = HydrusData.GenerateKey() - self._status_cache = None - self._status_cache_generation_time = 0 + self._status_cache = FileSeedCacheStatus() self._status_dirty = True @@ -1476,8 +1689,12 @@ class FileSeedCache( HydrusSerialisable.SerialisableBase ): def _GenerateStatus( self ): - self._status_cache = GenerateStatusesToCountsStatus( self._GetStatusesToCounts() ) - self._status_cache_generation_time = HydrusData.GetNow() + fscs = FileSeedCacheStatus() + + fscs.SetLatestAddedTime( self._GetLatestAddedTime() ) + fscs.SetStatusesToCounts( self._GetStatusesToCounts() ) + + self._status_cache = fscs self._status_dirty = False @@ -1494,6 +1711,33 @@ class FileSeedCache( HydrusSerialisable.SerialisableBase ): + def _GetLatestAddedTime( self ): + + if len( self._file_seeds ) == 0: + + latest_timestamp = 0 + + else: + + latest_timestamp = max( ( file_seed.created for file_seed in self._file_seeds ) ) + + + return latest_timestamp + + + def _GetNextFileSeed( self, status: int ) -> typing.Optional[ FileSeed ]: + + for file_seed in self._file_seeds: + + if file_seed.status == status: + + return file_seed + + + + return None + + def _GetSerialisableInfo( self ): return self._file_seeds.GetSerialisableTuple() @@ -1912,12 +2156,13 @@ class FileSeedCache( HydrusSerialisable.SerialisableBase ): self._GenerateStatus() - ( status, simple_status, ( total_processed, total ) ) = self._status_cache + d[ 'status' ] = self._status_cache.GetStatusText() + d[ 'simple_status' ] = self._status_cache.GetStatusText( simple = True ) - d[ 'status' ] = status - d[ 'simple_status' ] = status - d[ 'total_processed' ] = total_processed - d[ 'total_to_process' ] = total + ( num_done, num_total ) = self._status_cache.GetValueRange() + + d[ 'total_processed' ] = num_done + d[ 'total_to_process' ] = num_total if not simple: @@ -1943,6 +2188,35 @@ class FileSeedCache( HydrusSerialisable.SerialisableBase ): return earliest_timestamp + def GetExampleFileSeed( self ): + + with self._lock: + + if len( self._file_seeds ) == 0: + + return None + + else: + + example_seed = self._GetNextFileSeed( CC.STATUS_UNKNOWN ) + + if example_seed is None: + + example_seed = random.choice( self._file_seeds[-10:] ) + + + if example_seed.file_seed_type == FILE_SEED_TYPE_HDD: + + return None + + else: + + return example_seed + + + + + def GetFileSeedCacheKey( self ): return self._file_seed_cache_key @@ -1999,21 +2273,6 @@ class FileSeedCache( HydrusSerialisable.SerialisableBase ): return hashes - def GetLatestAddedTime( self ): - - with self._lock: - - if len( self._file_seeds ) == 0: - - return 0 - - - latest_timestamp = max( ( file_seed.created for file_seed in self._file_seeds ) ) - - - return latest_timestamp - - def GetLatestSourceTime( self ): with self._lock: @@ -2029,21 +2288,13 @@ class FileSeedCache( HydrusSerialisable.SerialisableBase ): return latest_timestamp - def GetNextFileSeed( self, status ): + def GetNextFileSeed( self, status: int ): with self._lock: - for file_seed in self._file_seeds: - - if file_seed.status == status: - - return file_seed - - + return self._GetNextFileSeed( status ) - return None - def GetNumNewFilesSince( self, since: int ): @@ -2113,27 +2364,6 @@ class FileSeedCache( HydrusSerialisable.SerialisableBase ): - def GetStatusGenerationTime( self ): - - with self._lock: - - if self._status_dirty: - - return HydrusData.GetNow() - - - return self._status_cache_generation_time - - - - def GetStatusesToCounts( self ): - - with self._lock: - - return self._GetStatusesToCounts() - - - def GetValueRange( self ): with self._lock: @@ -2143,9 +2373,7 @@ class FileSeedCache( HydrusSerialisable.SerialisableBase ): self._GenerateStatus() - ( status, simple_status, ( total_processed, total ) ) = self._status_cache - - return ( total_processed, total ) + return self._status_cache.GetValueRange() @@ -2242,7 +2470,7 @@ class FileSeedCache( HydrusSerialisable.SerialisableBase ): self.RemoveFileSeeds( file_seeds_to_delete ) - def RetryFailures( self ): + def RetryFailed( self ): with self._lock: @@ -2281,143 +2509,20 @@ class FileSeedCache( HydrusSerialisable.SerialisableBase ): self._GenerateStatus() - ( status, simple_status, ( total_processed, total ) ) = self._status_cache - - return total_processed < total + return self._status_cache.HasWorkToDo() HydrusSerialisable.SERIALISABLE_TYPES_TO_OBJECT_TYPES[ HydrusSerialisable.SERIALISABLE_TYPE_FILE_SEED_CACHE ] = FileSeedCache -def GenerateFileSeedCacheStatus( file_seed_cache: FileSeedCache ): - - statuses_to_counts = file_seed_cache.GetStatusesToCounts() - - return GenerateStatusesToCountsStatus( statuses_to_counts ) - def GenerateFileSeedCachesStatus( file_seed_caches: typing.Iterable[ FileSeedCache ] ): - statuses_to_counts = collections.Counter() + fscs = FileSeedCacheStatus() for file_seed_cache in file_seed_caches: - statuses_to_counts.update( file_seed_cache.GetStatusesToCounts() ) + fscs.Merge( file_seed_cache.GetStatus() ) - return GenerateStatusesToCountsStatus( statuses_to_counts ) - -def GenerateStatusesToCountsStatus( statuses_to_counts: collections.Counter ): - - num_successful_and_new = statuses_to_counts[ CC.STATUS_SUCCESSFUL_AND_NEW ] - num_successful_but_redundant = statuses_to_counts[ CC.STATUS_SUCCESSFUL_BUT_REDUNDANT ] - num_ignored = statuses_to_counts[ CC.STATUS_VETOED ] - num_deleted = statuses_to_counts[ CC.STATUS_DELETED ] - num_failed = statuses_to_counts[ CC.STATUS_ERROR ] - num_skipped = statuses_to_counts[ CC.STATUS_SKIPPED ] - num_unknown = statuses_to_counts[ CC.STATUS_UNKNOWN ] - - status_strings = [] - - num_successful = num_successful_and_new + num_successful_but_redundant - - if num_successful > 0: - - s = HydrusData.ToHumanInt( num_successful ) + ' successful' - - if num_successful_and_new > 0: - - if num_successful_but_redundant > 0: - - s += ' (' + HydrusData.ToHumanInt( num_successful_but_redundant ) + ' already in db)' - - - else: - - s += ' (all already in db)' - - - status_strings.append( s ) - - - if num_ignored > 0: - - status_strings.append( HydrusData.ToHumanInt( num_ignored ) + ' ignored' ) - - - if num_deleted > 0: - - status_strings.append( HydrusData.ToHumanInt( num_deleted ) + ' previously deleted' ) - - - if num_failed > 0: - - status_strings.append( HydrusData.ToHumanInt( num_failed ) + ' failed' ) - - - if num_skipped > 0: - - status_strings.append( HydrusData.ToHumanInt( num_skipped ) + ' skipped' ) - - - status = ', '.join( status_strings ) - - # - - total = sum( statuses_to_counts.values() ) - - total_processed = total - num_unknown - - # - - simple_status = '' - - if total > 0: - - if num_unknown > 0: - - simple_status += HydrusData.ConvertValueRangeToPrettyString( total_processed, total ) - - else: - - simple_status += HydrusData.ToHumanInt( total_processed ) - - - show_new_on_file_seed_short_summary = HG.client_controller.new_options.GetBoolean( 'show_new_on_file_seed_short_summary' ) - - if show_new_on_file_seed_short_summary and num_successful_and_new: - - simple_status += ' - ' + HydrusData.ToHumanInt( num_successful_and_new ) + 'N' - - - simple_status_strings = [] - - if num_ignored > 0: - - simple_status_strings.append( HydrusData.ToHumanInt( num_ignored ) + 'Ig' ) - - - show_deleted_on_file_seed_short_summary = HG.client_controller.new_options.GetBoolean( 'show_deleted_on_file_seed_short_summary' ) - - if show_deleted_on_file_seed_short_summary and num_deleted > 0: - - simple_status_strings.append( HydrusData.ToHumanInt( num_deleted ) + 'D' ) - - - if num_failed > 0: - - simple_status_strings.append( HydrusData.ToHumanInt( num_failed ) + 'F' ) - - - if num_skipped > 0: - - simple_status_strings.append( HydrusData.ToHumanInt( num_skipped ) + 'S' ) - - - if len( simple_status_strings ) > 0: - - simple_status += ' - ' + ''.join( simple_status_strings ) - - - - return ( status, simple_status, ( total_processed, total ) ) + return fscs diff --git a/hydrus/client/importing/ClientImportGallery.py b/hydrus/client/importing/ClientImportGallery.py index 70f1fe1f..3fc926ec 100644 --- a/hydrus/client/importing/ClientImportGallery.py +++ b/hydrus/client/importing/ClientImportGallery.py @@ -715,7 +715,7 @@ class GalleryImport( HydrusSerialisable.SerialisableBase ): with self._lock: - self._file_seed_cache.RetryFailures() + self._file_seed_cache.RetryFailed() @@ -935,8 +935,7 @@ class MultipleGalleryImport( HydrusSerialisable.SerialisableBase ): self._gallery_import_keys_to_gallery_imports = {} self._status_dirty = True - self._status_cache = None - self._status_cache_generation_time = 0 + self._status_cache = ClientImportFileSeeds.FileSeedCacheStatus() self._last_time_imports_changed = HydrusData.GetNowPrecise() @@ -1024,7 +1023,6 @@ class MultipleGalleryImport( HydrusSerialisable.SerialisableBase ): self._status_cache = ClientImportFileSeeds.GenerateFileSeedCachesStatus( file_seed_caches ) self._status_dirty = False - self._status_cache_generation_time = HydrusData.GetNow() def _RemoveGalleryImport( self, gallery_import_key ): @@ -1285,7 +1283,7 @@ class MultipleGalleryImport( HydrusSerialisable.SerialisableBase ): - def GetTotalStatus( self ): + def GetTotalStatus( self ) -> ClientImportFileSeeds.FileSeedCacheStatus: with self._lock: @@ -1499,7 +1497,7 @@ class MultipleGalleryImport( HydrusSerialisable.SerialisableBase ): file_seed_cache = gallery_import.GetFileSeedCache() - if file_seed_cache.GetStatusGenerationTime() > self._status_cache_generation_time: # has there has been an update? + if file_seed_cache.GetStatus().GetGenerationTime() > self._status_cache.GetGenerationTime(): # has there has been an update? self._SetDirty() diff --git a/hydrus/client/importing/ClientImportGallerySeeds.py b/hydrus/client/importing/ClientImportGallerySeeds.py index cee6e8d1..f33aa92e 100644 --- a/hydrus/client/importing/ClientImportGallerySeeds.py +++ b/hydrus/client/importing/ClientImportGallerySeeds.py @@ -1,9 +1,11 @@ import collections import itertools import os +import random import threading import time import traceback +import typing from hydrus.client import ClientConstants as CC from hydrus.client import ClientParsing @@ -557,7 +559,6 @@ class GallerySeedLog( HydrusSerialisable.SerialisableBase ): self._gallery_seed_log_key = HydrusData.GenerateKey() self._status_cache = None - self._status_cache_generation_time = 0 self._status_dirty = True @@ -574,11 +575,23 @@ class GallerySeedLog( HydrusSerialisable.SerialisableBase ): statuses_to_counts = self._GetStatusesToCounts() self._status_cache = GenerateGallerySeedLogStatus( statuses_to_counts ) - self._status_cache_generation_time = HydrusData.GetNow() self._status_dirty = False + def _GetNextGallerySeed( self, status: int ) -> typing.Optional[ GallerySeed ]: + + for gallery_seed in self._gallery_seeds: + + if gallery_seed.status == status: + + return gallery_seed + + + + return None + + def _GetStatusesToCounts( self ): statuses_to_counts = collections.Counter() @@ -775,21 +788,27 @@ class GallerySeedLog( HydrusSerialisable.SerialisableBase ): self.NotifyGallerySeedsUpdated( ( gallery_seed, ) ) - def GetNextGallerySeed( self, status ): + def GetExampleGallerySeed( self ): with self._lock: - for gallery_seed in self._gallery_seeds: + if len( self._gallery_seeds ) == 0: - if gallery_seed.status == status: + return None + + else: + + example_seed = self._GetNextGallerySeed( CC.STATUS_UNKNOWN ) + + if example_seed is None: - return gallery_seed + example_seed = random.choice( self._gallery_seeds[-10:] ) + return example_seed + - return None - def GetAPIInfoDict( self, simple ): @@ -863,6 +882,14 @@ class GallerySeedLog( HydrusSerialisable.SerialisableBase ): + def GetNextGallerySeed( self, status ): + + with self._lock: + + return self._GetNextGallerySeed( status ) + + + def GetStatus( self ): with self._lock: @@ -876,19 +903,6 @@ class GallerySeedLog( HydrusSerialisable.SerialisableBase ): - def GetStatusGenerationTime( self ): - - with self._lock: - - if self._status_dirty: - - return HydrusData.GetNow() - - - return self._status_cache_generation_time - - - def GetStatusesToCounts( self ): with self._lock: @@ -985,7 +999,7 @@ class GallerySeedLog( HydrusSerialisable.SerialisableBase ): self.NotifyGallerySeedsUpdated( new_gallery_seeds ) - def RetryFailures( self ): + def RetryFailed( self ): with self._lock: diff --git a/hydrus/client/importing/ClientImportSimpleURLs.py b/hydrus/client/importing/ClientImportSimpleURLs.py index 5ded141d..c50213f5 100644 --- a/hydrus/client/importing/ClientImportSimpleURLs.py +++ b/hydrus/client/importing/ClientImportSimpleURLs.py @@ -942,14 +942,6 @@ class URLsImport( HydrusSerialisable.SerialisableBase ): - def GetStatus( self ): - - with self._lock: - - return ( self._file_seed_cache.GetStatus(), self._paused ) - - - def GetValueRange( self ): with self._lock: diff --git a/hydrus/client/importing/ClientImportSubscriptionLegacy.py b/hydrus/client/importing/ClientImportSubscriptionLegacy.py new file mode 100644 index 00000000..ef33be63 --- /dev/null +++ b/hydrus/client/importing/ClientImportSubscriptionLegacy.py @@ -0,0 +1,1962 @@ +import os +import random +import time +import typing + +from hydrus.core import HydrusConstants as HC +from hydrus.core import HydrusData +from hydrus.core import HydrusExceptions +from hydrus.core import HydrusGlobals as HG +from hydrus.core import HydrusSerialisable +from hydrus.core import HydrusThreading +from hydrus.client import ClientConstants as CC +from hydrus.client import ClientDownloading +from hydrus.client import ClientThreading +from hydrus.client.importing import ClientImporting +from hydrus.client.importing import ClientImportFileSeeds +from hydrus.client.importing import ClientImportGallerySeeds +from hydrus.client.importing import ClientImportOptions +from hydrus.client.importing import ClientImportSubscriptions +from hydrus.client.importing import ClientImportSubscriptionQuery +from hydrus.client.networking import ClientNetworkingContexts +from hydrus.client.networking import ClientNetworkingJobs + +# this object is no longer used, it exists only to update to the new objects below +class SubscriptionQueryLegacy( HydrusSerialisable.SerialisableBase ): + + SERIALISABLE_TYPE = HydrusSerialisable.SERIALISABLE_TYPE_SUBSCRIPTION_QUERY_LEGACY + SERIALISABLE_NAME = 'Legacy Subscription Query' + SERIALISABLE_VERSION = 3 + + def __init__( self, query = 'query text' ): + + HydrusSerialisable.SerialisableBase.__init__( self ) + + self._query = query + self._display_name = None + self._check_now = False + self._last_check_time = 0 + self._next_check_time = 0 + self._paused = False + self._status = ClientImporting.CHECKER_STATUS_OK + self._gallery_seed_log = ClientImportGallerySeeds.GallerySeedLog() + self._file_seed_cache = ClientImportFileSeeds.FileSeedCache() + self._tag_import_options = ClientImportOptions.TagImportOptions() + + + def _GetExampleNetworkContexts( self, subscription_name ): + + file_seed = self._file_seed_cache.GetNextFileSeed( CC.STATUS_UNKNOWN ) + + subscription_key = self.GetNetworkJobSubscriptionKey( subscription_name ) + + if file_seed is None: + + return [ ClientNetworkingContexts.NetworkContext( CC.NETWORK_CONTEXT_SUBSCRIPTION, subscription_key ), ClientNetworkingContexts.GLOBAL_NETWORK_CONTEXT ] + + + url = file_seed.file_seed_data + + try: # if the url is borked for some reason + + example_nj = ClientNetworkingJobs.NetworkJobSubscription( subscription_key, 'GET', url ) + example_network_contexts = example_nj.GetNetworkContexts() + + except: + + return [ ClientNetworkingContexts.NetworkContext( CC.NETWORK_CONTEXT_SUBSCRIPTION, subscription_key ), ClientNetworkingContexts.GLOBAL_NETWORK_CONTEXT ] + + + return example_network_contexts + + + def _GetSerialisableInfo( self ): + + serialisable_gallery_seed_log = self._gallery_seed_log.GetSerialisableTuple() + serialisable_file_seed_cache = self._file_seed_cache.GetSerialisableTuple() + serialisable_tag_import_options = self._tag_import_options.GetSerialisableTuple() + + return ( self._query, self._display_name, self._check_now, self._last_check_time, self._next_check_time, self._paused, self._status, serialisable_gallery_seed_log, serialisable_file_seed_cache, serialisable_tag_import_options ) + + + def _InitialiseFromSerialisableInfo( self, serialisable_info ): + + ( self._query, self._display_name, self._check_now, self._last_check_time, self._next_check_time, self._paused, self._status, serialisable_gallery_seed_log, serialisable_file_seed_cache, serialisable_tag_import_options ) = serialisable_info + + self._gallery_seed_log = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_gallery_seed_log ) + self._file_seed_cache = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_file_seed_cache ) + self._tag_import_options = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_tag_import_options ) + + + def _UpdateSerialisableInfo( self, version, old_serialisable_info ): + + if version == 1: + + ( query, check_now, last_check_time, next_check_time, paused, status, serialisable_file_seed_cache ) = old_serialisable_info + + gallery_seed_log = ClientImportGallerySeeds.GallerySeedLog() + + serialisable_gallery_seed_log = gallery_seed_log.GetSerialisableTuple() + + new_serialisable_info = ( query, check_now, last_check_time, next_check_time, paused, status, serialisable_gallery_seed_log, serialisable_file_seed_cache ) + + return ( 2, new_serialisable_info ) + + + if version == 2: + + ( query, check_now, last_check_time, next_check_time, paused, status, serialisable_gallery_seed_log, serialisable_file_seed_cache ) = old_serialisable_info + + display_name = None + tag_import_options = ClientImportOptions.TagImportOptions() + + serialisable_tag_import_options = tag_import_options.GetSerialisableTuple() + + new_serialisable_info = ( query, display_name, check_now, last_check_time, next_check_time, paused, status, serialisable_gallery_seed_log, serialisable_file_seed_cache, serialisable_tag_import_options ) + + return ( 3, new_serialisable_info ) + + + + def BandwidthOK( self, subscription_name ): + + example_network_contexts = self._GetExampleNetworkContexts( subscription_name ) + + threshold = 90 + + bandwidth_ok = HG.client_controller.network_engine.bandwidth_manager.CanDoWork( example_network_contexts, threshold = threshold ) + + if HG.subscription_report_mode: + + HydrusData.ShowText( 'Query "' + self.GetHumanName() + '" bandwidth/domain test. Bandwidth ok: {}'.format( bandwidth_ok ) ) + + + return bandwidth_ok + + + def CanCheckNow( self ): + + return not self._check_now + + + def CanRetryFailed( self ): + + return self._file_seed_cache.GetFileSeedCount( CC.STATUS_ERROR ) > 0 + + + def CanRetryIgnored( self ): + + return self._file_seed_cache.GetFileSeedCount( CC.STATUS_VETOED ) > 0 + + + def CheckNow( self ): + + self._check_now = True + self._paused = False + + self._next_check_time = 0 + self._status = ClientImporting.CHECKER_STATUS_OK + + + def DomainOK( self ): + + file_seed = self._file_seed_cache.GetNextFileSeed( CC.STATUS_UNKNOWN ) + + if file_seed is None: + + return True + + + url = file_seed.file_seed_data + + domain_ok = HG.client_controller.network_engine.domain_manager.DomainOK( url ) + + if HG.subscription_report_mode: + + HydrusData.ShowText( 'Query "' + self.GetHumanName() + '" domain test. Domain ok: {}'.format( domain_ok ) ) + + + return domain_ok + + + def GetBandwidthWaitingEstimate( self, subscription_name ): + + example_network_contexts = self._GetExampleNetworkContexts( subscription_name ) + + ( estimate, bandwidth_network_context ) = HG.client_controller.network_engine.bandwidth_manager.GetWaitingEstimateAndContext( example_network_contexts ) + + return estimate + + + def GetDisplayName( self ): + + return self._display_name + + + def GetFileSeedCache( self ): + + return self._file_seed_cache + + + def GetGallerySeedLog( self ): + + return self._gallery_seed_log + + + def GetHumanName( self ): + + if self._display_name is None: + + return self._query + + else: + + return self._display_name + + + + def GetLastCheckTime( self ): + + return self._last_check_time + + + def GetLatestAddedTime( self ): + + return self._file_seed_cache.GetStatus().GetLatestAddedTime() + + + def GetNextCheckStatusString( self ): + + if self._check_now: + + return 'checking on dialog ok' + + elif self._status == ClientImporting.CHECKER_STATUS_DEAD: + + return 'dead, so not checking' + + else: + + if HydrusData.TimeHasPassed( self._next_check_time ): + + s = 'imminent' + + else: + + s = HydrusData.TimestampToPrettyTimeDelta( self._next_check_time ) + + + if self._paused: + + s = 'paused, but would be ' + s + + + return s + + + + def GetNextWorkTime( self, subscription_name ): + + if self.IsPaused(): + + return None + + + work_times = set() + + if self.HasFileWorkToDo(): + + try: + + file_bandwidth_estimate = self.GetBandwidthWaitingEstimate( subscription_name ) + + except: + + # this is tricky, but if there is a borked url in here causing trouble, we should let it run and error out immediately tbh + + file_bandwidth_estimate = 0 + + + if file_bandwidth_estimate == 0: + + work_times.add( 0 ) + + else: + + file_work_time = HydrusData.GetNow() + file_bandwidth_estimate + + work_times.add( file_work_time ) + + + + if not self.IsDead(): + + work_times.add( self._next_check_time ) + + + if len( work_times ) == 0: + + return None + + + return min( work_times ) + + + def GetNetworkJobSubscriptionKey( self, subscription_name ): + + return subscription_name + ': ' + self.GetHumanName() + + + def GetQueryText( self ): + + return self._query + + + def GetTagImportOptions( self ): + + return self._tag_import_options + + + def HasFileWorkToDo( self ): + + file_seed = self._file_seed_cache.GetNextFileSeed( CC.STATUS_UNKNOWN ) + + if HG.subscription_report_mode: + + HydrusData.ShowText( 'Query "' + self._query + '" HasFileWorkToDo test. Next import is ' + repr( file_seed ) + '.' ) + + + return file_seed is not None + + + def IsDead( self ): + + return self._status == ClientImporting.CHECKER_STATUS_DEAD + + + def IsInitialSync( self ): + + return self._last_check_time == 0 + + + def IsPaused( self ): + + return self._paused + + + def IsSyncDue( self ): + + if HG.subscription_report_mode: + + HydrusData.ShowText( 'Query "' + self._query + '" IsSyncDue test. Paused/dead status is {}/{}, check time due is {}, and check_now is {}.'.format( self._paused, self.IsDead(), HydrusData.TimeHasPassed( self._next_check_time ), self._check_now ) ) + + + if self._paused or self.IsDead(): + + return False + + + return HydrusData.TimeHasPassed( self._next_check_time ) or self._check_now + + + def PausePlay( self ): + + self._paused = not self._paused + + + def RegisterSyncComplete( self, checker_options: ClientImportOptions.CheckerOptions ): + + self._last_check_time = HydrusData.GetNow() + + self._check_now = False + + death_period = checker_options.GetDeathFileVelocityPeriod() + + compact_before_this_time = self._last_check_time - death_period + + if self._gallery_seed_log.CanCompact( compact_before_this_time ): + + self._gallery_seed_log.Compact( compact_before_this_time ) + + + if self._file_seed_cache.CanCompact( compact_before_this_time ): + + self._file_seed_cache.Compact( compact_before_this_time ) + + + + def Reset( self ): + + self._last_check_time = 0 + self._next_check_time = 0 + self._status = ClientImporting.CHECKER_STATUS_OK + self._paused = False + + self._file_seed_cache = ClientImportFileSeeds.FileSeedCache() + + + def RetryFailed( self ): + + self._file_seed_cache.RetryFailed() + + + def RetryIgnored( self ): + + self._file_seed_cache.RetryIgnored() + + + def SetCheckNow( self, check_now ): + + self._check_now = check_now + + + def SetDisplayName( self, display_name ): + + self._display_name = display_name + + + def SetPaused( self, paused ): + + self._paused = paused + + + def SetQueryAndSeeds( self, query, file_seed_cache, gallery_seed_log ): + + self._query = query + self._file_seed_cache = file_seed_cache + self._gallery_seed_log = gallery_seed_log + + + def SetTagImportOptions( self, tag_import_options ): + + self._tag_import_options = tag_import_options + + + def UpdateNextCheckTime( self, checker_options: ClientImportOptions.CheckerOptions ): + + if self._check_now: + + self._next_check_time = 0 + + self._status = ClientImporting.CHECKER_STATUS_OK + + else: + + if checker_options.IsDead( self._file_seed_cache, self._last_check_time ): + + self._status = ClientImporting.CHECKER_STATUS_DEAD + + if not self.HasFileWorkToDo(): + + self._paused = True + + + + last_next_check_time = self._next_check_time + + self._next_check_time = checker_options.GetNextCheckTime( self._file_seed_cache, self._last_check_time, last_next_check_time ) + + + + def ToTuple( self ): + + return ( self._query, self._check_now, self._last_check_time, self._next_check_time, self._paused, self._status ) + + +HydrusSerialisable.SERIALISABLE_TYPES_TO_OBJECT_TYPES[ HydrusSerialisable.SERIALISABLE_TYPE_SUBSCRIPTION_QUERY_LEGACY ] = SubscriptionQueryLegacy + +class SubscriptionLegacy( HydrusSerialisable.SerialisableBaseNamed ): + + SERIALISABLE_TYPE = HydrusSerialisable.SERIALISABLE_TYPE_SUBSCRIPTION_LEGACY + SERIALISABLE_NAME = 'Legacy Subscription' + SERIALISABLE_VERSION = 10 + + def __init__( self, name, gug_key_and_name = None ): + + HydrusSerialisable.SerialisableBaseNamed.__init__( self, name ) + + if gug_key_and_name is None: + + gug_key_and_name = ( HydrusData.GenerateKey(), 'unknown source' ) + + + self._gug_key_and_name = gug_key_and_name + + self._queries: typing.List[ SubscriptionQueryLegacy ] = [] + + new_options = HG.client_controller.new_options + + self._checker_options = new_options.GetDefaultSubscriptionCheckerOptions() + + if HC.options[ 'gallery_file_limit' ] is None: + + self._initial_file_limit = 100 + + else: + + self._initial_file_limit = min( 100, HC.options[ 'gallery_file_limit' ] ) + + + self._periodic_file_limit = 100 + self._paused = False + + self._file_import_options = new_options.GetDefaultFileImportOptions( 'quiet' ) + self._tag_import_options = ClientImportOptions.TagImportOptions( is_default = True ) + + self._no_work_until = 0 + self._no_work_until_reason = '' + + self._show_a_popup_while_working = True + self._publish_files_to_popup_button = True + self._publish_files_to_page = False + self._publish_label_override = None + self._merge_query_publish_events = True + + + def _CanDoWorkNow( self ): + + p1 = not ( self._paused or HG.client_controller.options[ 'pause_subs_sync' ] or HG.client_controller.new_options.GetBoolean( 'pause_all_new_network_traffic' ) ) + p2 = not ( HG.view_shutdown or HydrusThreading.IsThreadShuttingDown() ) + p3 = self._NoDelays() + + if HG.subscription_report_mode: + + message = 'Subscription "{}" CanDoWork check.'.format( self._name ) + message += os.linesep + message += 'Paused/Global/Network Pause: {}/{}/{}'.format( self._paused, HG.client_controller.options[ 'pause_subs_sync' ], HG.client_controller.new_options.GetBoolean( 'pause_all_new_network_traffic' ) ) + message += os.linesep + message += 'View/Thread shutdown: {}/{}'.format( HG.view_shutdown, HydrusThreading.IsThreadShuttingDown() ) + message += os.linesep + message += 'No delays: {}'.format( self._NoDelays() ) + + HydrusData.ShowText( message ) + + + return p1 and p2 and p3 + + + def _DelayWork( self, time_delta, reason ): + + self._no_work_until = HydrusData.GetNow() + time_delta + self._no_work_until_reason = reason + + + def _GetPublishingLabel( self, query ): + + if self._publish_label_override is None: + + label = self._name + + else: + + label = self._publish_label_override + + + if not self._merge_query_publish_events: + + label += ': ' + query.GetHumanName() + + + return label + + + def _GetQueriesForProcessing( self ) -> typing.List[ SubscriptionQueryLegacy ]: + + queries = list( self._queries ) + + if HG.client_controller.new_options.GetBoolean( 'process_subs_in_random_order' ): + + random.shuffle( queries ) + + else: + + def key( q ): + + return q.GetHumanName() + + + queries.sort( key = key ) + + + return queries + + + def _GetSerialisableInfo( self ): + + ( gug_key, gug_name ) = self._gug_key_and_name + + serialisable_gug_key_and_name = ( gug_key.hex(), gug_name ) + serialisable_queries = [ query.GetSerialisableTuple() for query in self._queries ] + serialisable_checker_options = self._checker_options.GetSerialisableTuple() + serialisable_file_import_options = self._file_import_options.GetSerialisableTuple() + serialisable_tag_import_options = self._tag_import_options.GetSerialisableTuple() + + return ( serialisable_gug_key_and_name, serialisable_queries, serialisable_checker_options, self._initial_file_limit, self._periodic_file_limit, self._paused, serialisable_file_import_options, serialisable_tag_import_options, self._no_work_until, self._no_work_until_reason, self._show_a_popup_while_working, self._publish_files_to_popup_button, self._publish_files_to_page, self._publish_label_override, self._merge_query_publish_events ) + + + def _InitialiseFromSerialisableInfo( self, serialisable_info ): + + ( serialisable_gug_key_and_name, serialisable_queries, serialisable_checker_options, self._initial_file_limit, self._periodic_file_limit, self._paused, serialisable_file_import_options, serialisable_tag_import_options, self._no_work_until, self._no_work_until_reason, self._show_a_popup_while_working, self._publish_files_to_popup_button, self._publish_files_to_page, self._publish_label_override, self._merge_query_publish_events ) = serialisable_info + + ( serialisable_gug_key, gug_name ) = serialisable_gug_key_and_name + + self._gug_key_and_name = ( bytes.fromhex( serialisable_gug_key ), gug_name ) + self._queries = [ HydrusSerialisable.CreateFromSerialisableTuple( serialisable_query ) for serialisable_query in serialisable_queries ] + self._checker_options = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_checker_options ) + self._file_import_options = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_file_import_options ) + self._tag_import_options = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_tag_import_options ) + + + def _GenerateNetworkJobFactory( self, query ): + + subscription_key = query.GetNetworkJobSubscriptionKey( self._name ) + + def network_job_factory( *args, **kwargs ): + + network_job = ClientNetworkingJobs.NetworkJobSubscription( subscription_key, *args, **kwargs ) + + network_job.OverrideBandwidth( 30 ) + + return network_job + + + return network_job_factory + + + def _NoDelays( self ): + + return HydrusData.TimeHasPassed( self._no_work_until ) + + + def _QueryFileLoginOK( self, query ): + + file_seed_cache = query.GetFileSeedCache() + + file_seed = file_seed_cache.GetNextFileSeed( CC.STATUS_UNKNOWN ) + + if file_seed is None: + + result = True + + else: + + nj = file_seed.GetExampleNetworkJob( self._GenerateNetworkJobFactory( query ) ) + + nj.engine = HG.client_controller.network_engine + + if nj.NeedsLogin(): + + try: + + nj.CheckCanLogin() + + result = True + + except Exception as e: + + result = False + + if not self._paused: + + login_fail_reason = str( e ) + + message = 'Query "' + query.GetHumanName() + '" for subscription "' + self._name + '" seemed to have an invalid login for one of its file imports. The reason was:' + message += os.linesep * 2 + message += login_fail_reason + message += os.linesep * 2 + message += 'The subscription has paused. Please see if you can fix the problem and then unpause. Hydrus dev would like feedback on this process.' + + HydrusData.ShowText( message ) + + self._DelayWork( 300, login_fail_reason ) + + self._paused = True + + + + else: + + result = True + + + + if HG.subscription_report_mode: + + HydrusData.ShowText( 'Query "' + query.GetHumanName() + '" pre-work file login test. Login ok: ' + str( result ) + '.' ) + + + return result + + + def _QuerySyncLoginOK( self, query ): + + gallery_seed_log = query.GetGallerySeedLog() + + gallery_seed = gallery_seed_log.GetNextGallerySeed( CC.STATUS_UNKNOWN ) + + if gallery_seed is None: + + result = True + + else: + + nj = gallery_seed.GetExampleNetworkJob( self._GenerateNetworkJobFactory( query ) ) + + nj.engine = HG.client_controller.network_engine + + if nj.NeedsLogin(): + + try: + + nj.CheckCanLogin() + + result = True + + except Exception as e: + + result = False + + if not self._paused: + + login_fail_reason = str( e ) + + message = 'Query "' + query.GetHumanName() + '" for subscription "' + self._name + '" seemed to have an invalid login. The reason was:' + message += os.linesep * 2 + message += login_fail_reason + message += os.linesep * 2 + message += 'The subscription has paused. Please see if you can fix the problem and then unpause. Hydrus dev would like feedback on this process.' + + HydrusData.ShowText( message ) + + self._DelayWork( 300, login_fail_reason ) + + self._paused = True + + + + else: + + result = True + + + + if HG.subscription_report_mode: + + HydrusData.ShowText( 'Query "' + query.GetHumanName() + '" pre-work sync login test. Login ok: ' + str( result ) + '.' ) + + + return result + + + def _ShowHitPeriodicFileLimitMessage( self, query_text ): + + message = 'The query "' + query_text + '" for subscription "' + self._name + '" hit its periodic file limit without seeing any already-seen files.' + + HydrusData.ShowText( message ) + + + def _UpdateSerialisableInfo( self, version, old_serialisable_info ): + + if version == 1: + + ( serialisable_gallery_identifier, serialisable_gallery_stream_identifiers, query, period, get_tags_if_url_recognised_and_file_redundant, initial_file_limit, periodic_file_limit, paused, serialisable_file_import_options, serialisable_tag_import_options, last_checked, last_error, serialisable_file_seed_cache ) = old_serialisable_info + + check_now = False + + new_serialisable_info = ( serialisable_gallery_identifier, serialisable_gallery_stream_identifiers, query, period, get_tags_if_url_recognised_and_file_redundant, initial_file_limit, periodic_file_limit, paused, serialisable_file_import_options, serialisable_tag_import_options, last_checked, check_now, last_error, serialisable_file_seed_cache ) + + return ( 2, new_serialisable_info ) + + + if version == 2: + + ( serialisable_gallery_identifier, serialisable_gallery_stream_identifiers, query, period, get_tags_if_url_recognised_and_file_redundant, initial_file_limit, periodic_file_limit, paused, serialisable_file_import_options, serialisable_tag_import_options, last_checked, check_now, last_error, serialisable_file_seed_cache ) = old_serialisable_info + + no_work_until = 0 + no_work_until_reason = '' + + new_serialisable_info = ( serialisable_gallery_identifier, serialisable_gallery_stream_identifiers, query, period, get_tags_if_url_recognised_and_file_redundant, initial_file_limit, periodic_file_limit, paused, serialisable_file_import_options, serialisable_tag_import_options, last_checked, check_now, last_error, no_work_until, no_work_until_reason, serialisable_file_seed_cache ) + + return ( 3, new_serialisable_info ) + + + if version == 3: + + ( serialisable_gallery_identifier, serialisable_gallery_stream_identifiers, query, period, get_tags_if_url_recognised_and_file_redundant, initial_file_limit, periodic_file_limit, paused, serialisable_file_import_options, serialisable_tag_import_options, last_checked, check_now, last_error, no_work_until, no_work_until_reason, serialisable_file_seed_cache ) = old_serialisable_info + + checker_options = ClientImportOptions.CheckerOptions( 5, period // 5, period * 10, ( 1, period * 10 ) ) + + file_seed_cache = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_file_seed_cache ) + + query = SubscriptionQueryLegacy( query ) + + query._file_seed_cache = file_seed_cache + query._last_check_time = last_checked + + query.UpdateNextCheckTime( checker_options ) + + queries = [ query ] + + serialisable_queries = [ query.GetSerialisableTuple() for query in queries ] + serialisable_checker_options = checker_options.GetSerialisableTuple() + + new_serialisable_info = ( serialisable_gallery_identifier, serialisable_gallery_stream_identifiers, serialisable_queries, serialisable_checker_options, get_tags_if_url_recognised_and_file_redundant, initial_file_limit, periodic_file_limit, paused, serialisable_file_import_options, serialisable_tag_import_options, no_work_until, no_work_until_reason ) + + return ( 4, new_serialisable_info ) + + + if version == 4: + + ( serialisable_gallery_identifier, serialisable_gallery_stream_identifiers, serialisable_queries, serialisable_checker_options, get_tags_if_url_recognised_and_file_redundant, initial_file_limit, periodic_file_limit, paused, serialisable_file_import_options, serialisable_tag_import_options, no_work_until, no_work_until_reason ) = old_serialisable_info + + new_serialisable_info = ( serialisable_gallery_identifier, serialisable_gallery_stream_identifiers, serialisable_queries, serialisable_checker_options, initial_file_limit, periodic_file_limit, paused, serialisable_file_import_options, serialisable_tag_import_options, no_work_until, no_work_until_reason ) + + return ( 5, new_serialisable_info ) + + + if version == 5: + + ( serialisable_gallery_identifier, serialisable_gallery_stream_identifiers, serialisable_queries, serialisable_checker_options, initial_file_limit, periodic_file_limit, paused, serialisable_file_import_options, serialisable_tag_import_options, no_work_until, no_work_until_reason ) = old_serialisable_info + + publish_files_to_popup_button = True + publish_files_to_page = False + merge_query_publish_events = True + + new_serialisable_info = ( serialisable_gallery_identifier, serialisable_gallery_stream_identifiers, serialisable_queries, serialisable_checker_options, initial_file_limit, periodic_file_limit, paused, serialisable_file_import_options, serialisable_tag_import_options, no_work_until, no_work_until_reason, publish_files_to_popup_button, publish_files_to_page, merge_query_publish_events ) + + return ( 6, new_serialisable_info ) + + + if version == 6: + + ( serialisable_gallery_identifier, serialisable_gallery_stream_identifiers, serialisable_queries, serialisable_checker_options, initial_file_limit, periodic_file_limit, paused, serialisable_file_import_options, serialisable_tag_import_options, no_work_until, no_work_until_reason, publish_files_to_popup_button, publish_files_to_page, merge_query_publish_events ) = old_serialisable_info + + if initial_file_limit is None or initial_file_limit > 1000: + + initial_file_limit = 1000 + + + if periodic_file_limit is None or periodic_file_limit > 1000: + + periodic_file_limit = 1000 + + + new_serialisable_info = ( serialisable_gallery_identifier, serialisable_gallery_stream_identifiers, serialisable_queries, serialisable_checker_options, initial_file_limit, periodic_file_limit, paused, serialisable_file_import_options, serialisable_tag_import_options, no_work_until, no_work_until_reason, publish_files_to_popup_button, publish_files_to_page, merge_query_publish_events ) + + return ( 7, new_serialisable_info ) + + + if version == 7: + + ( serialisable_gallery_identifier, serialisable_gallery_stream_identifiers, serialisable_queries, serialisable_checker_options, initial_file_limit, periodic_file_limit, paused, serialisable_file_import_options, serialisable_tag_import_options, no_work_until, no_work_until_reason, publish_files_to_popup_button, publish_files_to_page, merge_query_publish_events ) = old_serialisable_info + + gallery_identifier = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_gallery_identifier ) + + ( gug_key, gug_name ) = ClientDownloading.ConvertGalleryIdentifierToGUGKeyAndName( gallery_identifier ) + + serialisable_gug_key_and_name = ( gug_key.hex(), gug_name ) + + new_serialisable_info = ( serialisable_gug_key_and_name, serialisable_queries, serialisable_checker_options, initial_file_limit, periodic_file_limit, paused, serialisable_file_import_options, serialisable_tag_import_options, no_work_until, no_work_until_reason, publish_files_to_popup_button, publish_files_to_page, merge_query_publish_events ) + + return ( 8, new_serialisable_info ) + + + if version == 8: + + ( serialisable_gug_key_and_name, serialisable_queries, serialisable_checker_options, initial_file_limit, periodic_file_limit, paused, serialisable_file_import_options, serialisable_tag_import_options, no_work_until, no_work_until_reason, publish_files_to_popup_button, publish_files_to_page, merge_query_publish_events ) = old_serialisable_info + + show_a_popup_while_working = True + + new_serialisable_info = ( serialisable_gug_key_and_name, serialisable_queries, serialisable_checker_options, initial_file_limit, periodic_file_limit, paused, serialisable_file_import_options, serialisable_tag_import_options, no_work_until, no_work_until_reason, show_a_popup_while_working, publish_files_to_popup_button, publish_files_to_page, merge_query_publish_events ) + + return ( 9, new_serialisable_info ) + + + if version == 9: + + ( serialisable_gug_key_and_name, serialisable_queries, serialisable_checker_options, initial_file_limit, periodic_file_limit, paused, serialisable_file_import_options, serialisable_tag_import_options, no_work_until, no_work_until_reason, show_a_popup_while_working, publish_files_to_popup_button, publish_files_to_page, merge_query_publish_events ) = old_serialisable_info + + publish_label_override = None + + new_serialisable_info = ( serialisable_gug_key_and_name, serialisable_queries, serialisable_checker_options, initial_file_limit, periodic_file_limit, paused, serialisable_file_import_options, serialisable_tag_import_options, no_work_until, no_work_until_reason, show_a_popup_while_working, publish_files_to_popup_button, publish_files_to_page, publish_label_override, merge_query_publish_events ) + + return ( 10, new_serialisable_info ) + + + + def _WorkOnFiles( self, job_key ): + + error_count = 0 + + queries = self._GetQueriesForProcessing() + + queries = [ query for query in queries if query.HasFileWorkToDo() ] + + num_queries = len( queries ) + + for ( i, query ) in enumerate( queries ): + + this_query_has_done_work = False + + query_name = query.GetHumanName() + file_seed_cache = query.GetFileSeedCache() + + text_1 = 'downloading files' + query_summary_name = self._name + + if query_name != self._name: + + text_1 += ' for "' + query_name + '"' + query_summary_name += ': ' + query_name + + + if num_queries > 1: + + text_1 += ' (' + HydrusData.ConvertValueRangeToPrettyString( i + 1, num_queries ) + ')' + + + job_key.SetVariable( 'popup_text_1', text_1 ) + + presentation_hashes = [] + presentation_hashes_fast = set() + + starting_num_urls = file_seed_cache.GetFileSeedCount() + starting_num_unknown = file_seed_cache.GetFileSeedCount( CC.STATUS_UNKNOWN ) + starting_num_done = starting_num_urls - starting_num_unknown + + try: + + while True: + + file_seed = file_seed_cache.GetNextFileSeed( CC.STATUS_UNKNOWN ) + + if file_seed is None: + + if HG.subscription_report_mode: + + HydrusData.ShowText( 'Query "' + query_name + '" can do no more file work due to running out of unknown urls.' ) + + + break + + + if job_key.IsCancelled(): + + self._DelayWork( 300, 'recently cancelled' ) + + break + + + p1 = not self._CanDoWorkNow() + p3 = not query.DomainOK() + p4 = not query.BandwidthOK( self._name ) + p5 = not self._QueryFileLoginOK( query ) + + if p1 or p4 or p5: + + if p3 and this_query_has_done_work: + + job_key.SetVariable( 'popup_text_2', 'domain had errors, will try again later' ) + + self._DelayWork( 3600, 'domain errors, will try again later' ) + + time.sleep( 5 ) + + + if p4 and this_query_has_done_work: + + job_key.SetVariable( 'popup_text_2', 'no more bandwidth to download files, will do some more later' ) + + time.sleep( 5 ) + + + break + + + try: + + num_urls = file_seed_cache.GetFileSeedCount() + num_unknown = file_seed_cache.GetFileSeedCount( CC.STATUS_UNKNOWN ) + num_done = num_urls - num_unknown + + # 4001/4003 is not as useful as 1/3 + + human_num_urls = num_urls - starting_num_done + human_num_done = num_done - starting_num_done + + x_out_of_y = 'file ' + HydrusData.ConvertValueRangeToPrettyString( human_num_done + 1, human_num_urls ) + ': ' + + job_key.SetVariable( 'popup_gauge_2', ( human_num_done, human_num_urls ) ) + + def status_hook( text ): + + if len( text ) > 0: + + text = text.splitlines()[0] + + + job_key.SetVariable( 'popup_text_2', x_out_of_y + text ) + + + file_seed.WorkOnURL( file_seed_cache, status_hook, self._GenerateNetworkJobFactory( query ), ClientImporting.GenerateMultiplePopupNetworkJobPresentationContextFactory( job_key ), self._file_import_options, self._tag_import_options ) + + query_tag_import_options = query.GetTagImportOptions() + + if query_tag_import_options.HasAdditionalTags() and file_seed.status in CC.SUCCESSFUL_IMPORT_STATES: + + if file_seed.HasHash(): + + hash = file_seed.GetHash() + + media_result = HG.client_controller.Read( 'media_result', hash ) + + downloaded_tags = [] + + service_keys_to_content_updates = query_tag_import_options.GetServiceKeysToContentUpdates( file_seed.status, media_result, downloaded_tags ) # additional tags + + if len( service_keys_to_content_updates ) > 0: + + HG.client_controller.WriteSynchronous( 'content_updates', service_keys_to_content_updates ) + + + + + if file_seed.ShouldPresent( self._file_import_options ): + + hash = file_seed.GetHash() + + if hash not in presentation_hashes_fast: + + presentation_hashes.append( hash ) + + presentation_hashes_fast.add( hash ) + + + + except HydrusExceptions.CancelledException as e: + + self._DelayWork( 300, str( e ) ) + + break + + except HydrusExceptions.VetoException as e: + + status = CC.STATUS_VETOED + + note = str( e ) + + file_seed.SetStatus( status, note = note ) + + except HydrusExceptions.NotFoundException: + + status = CC.STATUS_VETOED + + note = '404' + + file_seed.SetStatus( status, note = note ) + + except Exception as e: + + status = CC.STATUS_ERROR + + job_key.SetVariable( 'popup_text_2', x_out_of_y + 'file failed' ) + + file_seed.SetStatus( status, exception = e ) + + if isinstance( e, HydrusExceptions.DataMissing ): + + # DataMissing is a quick thing to avoid subscription abandons when lots of deleted files in e621 (or any other booru) + # this should be richer in any case in the new system + + pass + + else: + + error_count += 1 + + time.sleep( 5 ) + + + error_count_threshold = HG.client_controller.new_options.GetNoneableInteger( 'subscription_file_error_cancel_threshold' ) + + if error_count_threshold is not None and error_count >= error_count_threshold: + + raise Exception( 'The subscription ' + self._name + ' encountered several errors when downloading files, so it abandoned its sync.' ) + + + + this_query_has_done_work = True + + if len( presentation_hashes ) > 0: + + job_key.SetVariable( 'popup_files', ( list( presentation_hashes ), query_summary_name ) ) + + + time.sleep( ClientImporting.DID_SUBSTANTIAL_FILE_WORK_MINIMUM_SLEEP_TIME ) + + HG.client_controller.WaitUntilViewFree() + + + finally: + + if len( presentation_hashes ) > 0: + + publishing_label = self._GetPublishingLabel( query ) + + ClientImporting.PublishPresentationHashes( publishing_label, presentation_hashes, self._publish_files_to_popup_button, self._publish_files_to_page ) + + + + + job_key.DeleteVariable( 'popup_files' ) + job_key.DeleteVariable( 'popup_text_1' ) + job_key.DeleteVariable( 'popup_text_2' ) + job_key.DeleteVariable( 'popup_gauge_2' ) + + + def _WorkOnFilesCanDoWork( self ): + + for query in self._queries: + + if query.HasFileWorkToDo(): + + bandwidth_ok = query.BandwidthOK( self._name ) + domain_ok = query.DomainOK() + + if HG.subscription_report_mode: + + HydrusData.ShowText( 'Subscription "{}" checking if any file work due: True, bandwidth ok: {}, domain ok: {}'.format( self._name, bandwidth_ok, domain_ok ) ) + + + if bandwidth_ok and domain_ok: + + return True + + + if not domain_ok: + + self._DelayWork( 3600, 'domain errors, will try again later' ) + + + + + if HG.subscription_report_mode: + + HydrusData.ShowText( 'Subscription "{}" checking if any file work due: False'.format( self._name ) ) + + + return False + + + def _SyncQuery( self, job_key ): + + have_made_an_initial_sync_bandwidth_notification = False + + gug = HG.client_controller.network_engine.domain_manager.GetGUG( self._gug_key_and_name ) + + if gug is None: + + self._paused = True + + HydrusData.ShowText( 'The subscription "' + self._name + '" could not find a Gallery URL Generator for "' + self._gug_key_and_name[1] + '"! The sub has paused!' ) + + return + + + if not gug.IsFunctional(): + + self._paused = True + + HydrusData.ShowText( 'The subscription "' + self._name + '"\'s Gallery URL Generator, "' + self._gug_key_and_name[1] + '" seems not to be functional! Maybe it needs a gallery url class or a gallery parser? The sub has paused!' ) + + return + + + self._gug_key_and_name = gug.GetGUGKeyAndName() # just a refresher, to keep up with any changes + + queries = self._GetQueriesForProcessing() + + queries = [ query for query in queries if query.IsSyncDue() ] + + num_queries = len( queries ) + + for ( i, query ) in enumerate( queries ): + + query_text = query.GetQueryText() + query_name = query.GetHumanName() + file_seed_cache = query.GetFileSeedCache() + gallery_seed_log = query.GetGallerySeedLog() + + this_is_initial_sync = query.IsInitialSync() + total_new_urls_for_this_sync = 0 + total_already_in_urls_for_this_sync = 0 + + gallery_urls_seen_this_sync = set() + + if this_is_initial_sync: + + file_limit_for_this_sync = self._initial_file_limit + + else: + + file_limit_for_this_sync = self._periodic_file_limit + + + file_seeds_to_add = set() + file_seeds_to_add_ordered = [] + + stop_reason = 'unknown stop reason' + + prefix = 'synchronising' + + if query_name != self._name: + + prefix += ' "' + query_name + '"' + + + if num_queries > 1: + + prefix += ' (' + HydrusData.ConvertValueRangeToPrettyString( i + 1, num_queries ) + ')' + + + job_key.SetVariable( 'popup_text_1', prefix ) + + initial_search_urls = gug.GenerateGalleryURLs( query_text ) + + if len( initial_search_urls ) == 0: + + self._paused = True + + HydrusData.ShowText( 'The subscription "' + self._name + '"\'s Gallery URL Generator, "' + self._gug_key_and_name[1] + '" did not generate any URLs! The sub has paused!' ) + + return + + + gallery_seeds = [ ClientImportGallerySeeds.GallerySeed( url, can_generate_more_pages = True ) for url in initial_search_urls ] + + gallery_seed_log.AddGallerySeeds( gallery_seeds ) + + try: + + while gallery_seed_log.WorkToDo(): + + p1 = not self._CanDoWorkNow() + p3 = not self._QuerySyncLoginOK( query ) + + if p1 or p3: + + if p3: + + stop_reason = 'Login was invalid!' + + + return + + + if job_key.IsCancelled(): + + stop_reason = 'gallery parsing cancelled, likely by user' + + self._DelayWork( 600, stop_reason ) + + return + + + gallery_seed = gallery_seed_log.GetNextGallerySeed( CC.STATUS_UNKNOWN ) + + if gallery_seed is None: + + stop_reason = 'thought there was a page to check, but apparently there was not!' + + break + + + def status_hook( text ): + + if len( text ) > 0: + + text = text.splitlines()[0] + + + job_key.SetVariable( 'popup_text_1', prefix + ': ' + text ) + + + def title_hook( text ): + + pass + + + def file_seeds_callable( file_seeds ): + + num_urls_added = 0 + num_urls_already_in_file_seed_cache = 0 + can_search_for_more_files = True + stop_reason = 'unknown stop reason' + current_contiguous_num_urls_already_in_file_seed_cache = 0 + + for file_seed in file_seeds: + + if file_seed in file_seeds_to_add: + + # this catches the occasional overflow when a new file is uploaded while gallery parsing is going on + # we don't want to count these 'seen before this run' urls in the 'caught up to last time' count + + continue + + + # When are we caught up? This is not a trivial problem. Tags are not always added when files are uploaded, so the order we find files is not completely reliable. + # Ideally, we want to search a _bit_ deeper than the first already-seen. + # And since we have a page of urls here and now, there is no point breaking early if there might be some new ones at the end. + # Current rule is "We are caught up if the final X contiguous files are 'already in'". X is 5 for now. + + if file_seed_cache.HasFileSeed( file_seed ): + + num_urls_already_in_file_seed_cache += 1 + current_contiguous_num_urls_already_in_file_seed_cache += 1 + + if current_contiguous_num_urls_already_in_file_seed_cache >= 100: + + can_search_for_more_files = False + stop_reason = 'saw 100 previously seen urls in a row, so assuming this is a large gallery' + + break + + + else: + + num_urls_added += 1 + current_contiguous_num_urls_already_in_file_seed_cache = 0 + + file_seeds_to_add.add( file_seed ) + file_seeds_to_add_ordered.append( file_seed ) + + + if file_limit_for_this_sync is not None and total_new_urls_for_this_sync + num_urls_added >= file_limit_for_this_sync: + + # we have found enough new files this sync, so should stop adding files and new gallery pages + + if this_is_initial_sync: + + stop_reason = 'hit initial file limit' + + else: + + if total_already_in_urls_for_this_sync + num_urls_already_in_file_seed_cache > 0: + + # this sync produced some knowns, so it is likely we have stepped through a mix of old and tagged-late new files + # we might also be on the second sync with a periodic limit greater than the initial limit + # either way, this is no reason to go crying to the user + + stop_reason = 'hit periodic file limit after seeing several already-seen files' + + else: + + # this page had all entirely new files + + self._ShowHitPeriodicFileLimitMessage( query_name ) + + stop_reason = 'hit periodic file limit without seeing any already-seen files!' + + + + can_search_for_more_files = False + + break + + + + WE_HIT_OLD_GROUND_THRESHOLD = 5 + + if current_contiguous_num_urls_already_in_file_seed_cache >= WE_HIT_OLD_GROUND_THRESHOLD: + + # this gallery page has caught up to before, so it should not spawn any more gallery pages + + can_search_for_more_files = False + + stop_reason = 'saw ' + HydrusData.ToHumanInt( current_contiguous_num_urls_already_in_file_seed_cache ) + ' previously seen urls, so assuming we caught up' + + + if num_urls_added == 0: + + can_search_for_more_files = False + stop_reason = 'no new urls found' + + + return ( num_urls_added, num_urls_already_in_file_seed_cache, can_search_for_more_files, stop_reason ) + + + job_key.SetVariable( 'popup_text_1', prefix + ': found ' + HydrusData.ToHumanInt( total_new_urls_for_this_sync ) + ' new urls, checking next page' ) + + try: + + ( num_urls_added, num_urls_already_in_file_seed_cache, num_urls_total, result_404, added_new_gallery_pages, stop_reason ) = gallery_seed.WorkOnURL( 'subscription', gallery_seed_log, file_seeds_callable, status_hook, title_hook, self._GenerateNetworkJobFactory( query ), ClientImporting.GenerateMultiplePopupNetworkJobPresentationContextFactory( job_key ), self._file_import_options, gallery_urls_seen_before = gallery_urls_seen_this_sync ) + + except HydrusExceptions.CancelledException as e: + + stop_reason = 'gallery network job cancelled, likely by user' + + self._DelayWork( 600, stop_reason ) + + return + + except Exception as e: + + stop_reason = str( e ) + + raise + + + total_new_urls_for_this_sync += num_urls_added + total_already_in_urls_for_this_sync += num_urls_already_in_file_seed_cache + + if file_limit_for_this_sync is not None and total_new_urls_for_this_sync >= file_limit_for_this_sync: + + # we have found enough new files this sync, so stop and cancel any outstanding gallery urls + + if this_is_initial_sync: + + stop_reason = 'hit initial file limit' + + else: + + stop_reason = 'hit periodic file limit' + + + break + + + + finally: + + while gallery_seed_log.WorkToDo(): + + gallery_seed = gallery_seed_log.GetNextGallerySeed( CC.STATUS_UNKNOWN ) + + if gallery_seed is None: + + break + + + gallery_seed.SetStatus( CC.STATUS_VETOED, note = stop_reason ) + + + + file_seeds_to_add_ordered.reverse() + + # 'first' urls are now at the end, so the file_seed_cache should stay roughly in oldest->newest order + + file_seed_cache.AddFileSeeds( file_seeds_to_add_ordered ) + + query.RegisterSyncComplete( self._checker_options ) + query.UpdateNextCheckTime( self._checker_options ) + + # + + if query.IsDead(): + + if this_is_initial_sync: + + HydrusData.ShowText( 'The query "' + query_name + '" for subscription "' + self._name + '" did not find any files on its first sync! Could the query text have a typo, like a missing underscore?' ) + + else: + + HydrusData.ShowText( 'The query "' + query_name + '" for subscription "' + self._name + '" appears to be dead!' ) + + + else: + + if this_is_initial_sync: + + if not query.BandwidthOK( self._name ) and not have_made_an_initial_sync_bandwidth_notification: + + HydrusData.ShowText( 'FYI: The query "' + query_name + '" for subscription "' + self._name + '" performed its initial sync ok, but that domain is short on bandwidth right now, so no files will be downloaded yet. The subscription will catch up in future as bandwidth becomes available. You can review the estimated time until bandwidth is available under the manage subscriptions dialog. If more queries are performing initial syncs in this run, they may be the same.' ) + + have_made_an_initial_sync_bandwidth_notification = True + + + + + + + def _SyncQueryCanDoWork( self ): + + result = True in ( query.IsSyncDue() for query in self._queries ) + + if HG.subscription_report_mode: + + HydrusData.ShowText( 'Subscription "{}" checking if any sync work due: {}'.format( self._name, result ) ) + + + return result + + + def AllPaused( self ): + + if self._paused: + + return True + + + for query in self._queries: + + if not query.IsPaused(): + + return False + + + + return True + + + def CanCheckNow( self ): + + return True in ( query.CanCheckNow() for query in self._queries ) + + + def CanReset( self ): + + return True in ( not query.IsInitialSync() for query in self._queries ) + + + def CanRetryFailed( self ): + + return True in ( query.CanRetryFailed() for query in self._queries ) + + + def CanRetryIgnored( self ): + + return True in ( query.CanRetryIgnored() for query in self._queries ) + + + def CanScrubDelay( self ): + + return not HydrusData.TimeHasPassed( self._no_work_until ) + + + def CheckNow( self ): + + for query in self._queries: + + query.CheckNow() + + + self.ScrubDelay() + + + def GetBandwidthWaitingEstimateMinMax( self ): + + if len( self._queries ) == 0: + + return ( 0, 0 ) + + + estimates = [] + + for query in self._queries: + + estimate = query.GetBandwidthWaitingEstimate( self._name ) + + estimates.append( estimate ) + + + min_estimate = min( estimates ) + max_estimate = max( estimates ) + + return ( min_estimate, max_estimate ) + + + def GetBestEarliestNextWorkTime( self ): + + next_work_times = set() + + for query in self._queries: + + next_work_time = query.GetNextWorkTime( self._name ) + + if next_work_time is not None: + + next_work_times.add( next_work_time ) + + + + if len( next_work_times ) == 0: + + return None + + + # if there are three queries due fifty seconds after our first one runs, we should wait that little bit longer + LAUNCH_WINDOW = 15 * 60 + + earliest_next_work_time = min( next_work_times ) + + latest_nearby_next_work_time = max( ( work_time for work_time in next_work_times if work_time < earliest_next_work_time + LAUNCH_WINDOW ) ) + + # but if we are expecting to launch it right now (e.g. check_now call), we won't wait + if HydrusData.TimeUntil( earliest_next_work_time ) < 60: + + best_next_work_time = earliest_next_work_time + + else: + + best_next_work_time = latest_nearby_next_work_time + + + if not HydrusData.TimeHasPassed( self._no_work_until ): + + best_next_work_time = max( ( best_next_work_time, self._no_work_until ) ) + + + return best_next_work_time + + + def GetCheckerOptions( self ): + + return self._checker_options + + + def GetGUGKeyAndName( self ): + + return self._gug_key_and_name + + + def GetQueries( self ) -> typing.List[ SubscriptionQueryLegacy ]: + + return self._queries + + + def GetMergeable( self, potential_mergees ): + + mergeable = [] + unmergeable = [] + + for subscription in potential_mergees: + + if subscription._gug_key_and_name[1] == self._gug_key_and_name[1]: + + mergeable.append( subscription ) + + else: + + unmergeable.append( subscription ) + + + + return ( mergeable, unmergeable ) + + + def GetPresentationOptions( self ): + + return ( self._show_a_popup_while_working, self._publish_files_to_popup_button, self._publish_files_to_page, self._publish_label_override, self._merge_query_publish_events ) + + + def GetTagImportOptions( self ): + + return self._tag_import_options + + + def HasQuerySearchTextFragment( self, search_text_fragment ): + + for query in self._queries: + + query_text = query.GetQueryText() + + if search_text_fragment in query_text: + + return True + + + + return False + + + def Merge( self, mergees ): + + for subscription in mergees: + + if subscription._gug_key_and_name[1] == self._gug_key_and_name[1]: + + my_new_queries = [ query.Duplicate() for query in subscription._queries ] + + self._queries.extend( my_new_queries ) + + else: + + raise Exception( self._name + ' was told to merge an unmergeable subscription, ' + subscription.GetName() + '!' ) + + + + + def PauseResume( self ): + + self._paused = not self._paused + + + def Reset( self ): + + for query in self._queries: + + query.Reset() + + + self.ScrubDelay() + + + def RetryFailed( self ): + + for query in self._queries: + + query.RetryFailed() + + + + def RetryIgnored( self ): + + for query in self._queries: + + query.RetryIgnored() + + + + def Separate( self, base_name, only_these_queries = None ): + + if only_these_queries is None: + + only_these_queries = set( self._queries ) + + else: + + only_these_queries = set( only_these_queries ) + + + my_queries = self._queries + + self._queries = [] + + base_sub = self.Duplicate() + + self._queries = my_queries + + subscriptions = [] + + for query in my_queries: + + if query not in only_these_queries: + + continue + + + subscription = base_sub.Duplicate() + + subscription._queries = [ query ] + + subscription.SetName( base_name + ': ' + query.GetHumanName() ) + + subscriptions.append( subscription ) + + + self._queries = [ query for query in my_queries if query not in only_these_queries ] + + return subscriptions + + + def SetCheckerOptions( self, checker_options ): + + self._checker_options = checker_options + + for query in self._queries: + + query.UpdateNextCheckTime( self._checker_options ) + + + + def SetPresentationOptions( self, show_a_popup_while_working, publish_files_to_popup_button, publish_files_to_page, publish_label_override, merge_query_publish_events ): + + self._show_a_popup_while_working = show_a_popup_while_working + self._publish_files_to_popup_button = publish_files_to_popup_button + self._publish_files_to_page = publish_files_to_page + self._publish_label_override = publish_label_override + self._merge_query_publish_events = merge_query_publish_events + + + def SetQueries( self, queries: typing.Iterable[ SubscriptionQueryLegacy ] ): + + self._queries = list( queries ) + + + def SetTagImportOptions( self, tag_import_options ): + + self._tag_import_options = tag_import_options.Duplicate() + + + def SetTuple( self, gug_key_and_name, checker_options: ClientImportOptions.CheckerOptions, initial_file_limit, periodic_file_limit, paused, file_import_options: ClientImportOptions.FileImportOptions, tag_import_options: ClientImportOptions.TagImportOptions, no_work_until ): + + self._gug_key_and_name = gug_key_and_name + self._checker_options = checker_options + self._initial_file_limit = initial_file_limit + self._periodic_file_limit = periodic_file_limit + self._paused = paused + + self._file_import_options = file_import_options + self._tag_import_options = tag_import_options + + self._no_work_until = no_work_until + + + def ScrubDelay( self ): + + self._no_work_until = 0 + self._no_work_until_reason = '' + + + def Sync( self ): + + sync_ok = self._SyncQueryCanDoWork() + files_ok = self._WorkOnFilesCanDoWork() + + if self._CanDoWorkNow() and ( sync_ok or files_ok ): + + job_key = ClientThreading.JobKey( pausable = False, cancellable = True ) + + try: + + job_key.SetVariable( 'popup_title', 'subscriptions - ' + self._name ) + + if self._show_a_popup_while_working: + + HG.client_controller.pub( 'message', job_key ) + + + # it is possible a query becomes due for a check while others are syncing, so we repeat this while watching for a stop signal + while self._CanDoWorkNow() and self._SyncQueryCanDoWork(): + + self._SyncQuery( job_key ) + + + self._WorkOnFiles( job_key ) + + except HydrusExceptions.NetworkException as e: + + delay = HG.client_controller.new_options.GetInteger( 'subscription_network_error_delay' ) + + HydrusData.Print( 'The subscription ' + self._name + ' encountered an exception when trying to sync:' ) + + HydrusData.Print( e ) + + job_key.SetVariable( 'popup_text_1', 'Encountered a network error, will retry again later' ) + + self._DelayWork( delay, 'network error: ' + str( e ) ) + + time.sleep( 5 ) + + except Exception as e: + + HydrusData.ShowText( 'The subscription ' + self._name + ' encountered an exception when trying to sync:' ) + HydrusData.ShowException( e ) + + delay = HG.client_controller.new_options.GetInteger( 'subscription_other_error_delay' ) + + self._DelayWork( delay, 'error: ' + str( e ) ) + + finally: + + job_key.DeleteVariable( 'popup_network_job' ) + + + HG.client_controller.WriteSynchronous( 'serialisable', self ) + + if job_key.HasVariable( 'popup_files' ): + + job_key.Finish() + + else: + + job_key.Delete() + + + + + def ToTuple( self ): + + return ( self._name, self._gug_key_and_name, self._queries, self._checker_options, self._initial_file_limit, self._periodic_file_limit, self._paused, self._file_import_options, self._tag_import_options, self._no_work_until, self._no_work_until_reason ) + + +HydrusSerialisable.SERIALISABLE_TYPES_TO_OBJECT_TYPES[ HydrusSerialisable.SERIALISABLE_TYPE_SUBSCRIPTION_LEGACY ] = SubscriptionLegacy + +def ConvertLegacySubscriptionToNew( legacy_subscription: SubscriptionLegacy ): + + ( + name, + gug_key_and_name, + queries, + checker_options, + initial_file_limit, + periodic_file_limit, + paused, + file_import_options, + tag_import_options, + no_work_until, + no_work_until_reason + ) = legacy_subscription.ToTuple() + + subscription = ClientImportSubscriptions.Subscription( name ) + + subscription.SetTuple( + gug_key_and_name, + checker_options, + initial_file_limit, + periodic_file_limit, + paused, + file_import_options, + tag_import_options, + no_work_until + ) + + ( + show_a_popup_while_working, + publish_files_to_popup_button, + publish_files_to_page, + publish_label_override, + merge_query_publish_events + ) = legacy_subscription.GetPresentationOptions() + + subscription.SetPresentationOptions( + show_a_popup_while_working, + publish_files_to_popup_button, + publish_files_to_page, + publish_label_override, + merge_query_publish_events + ) + + query_headers = [] + query_log_containers = [] + + for query in queries: + + query_header = ClientImportSubscriptionQuery.SubscriptionQueryHeader() + + ( query_text, check_now, last_check_time, next_check_time, query_paused, status ) = query.ToTuple() + + query_header.SetQueryText( query_text ) + query_header.SetDisplayName( query.GetDisplayName() ) + query_header.SetCheckNow( check_now ) + query_header.SetLastCheckTime( last_check_time ) + query_header.SetNextCheckTime( next_check_time ) + query_header.SetPaused( query_paused ) + query_header.SetCheckerStatus( status ) + query_header.SetTagImportOptions( query.GetTagImportOptions() ) + + query_log_container = ClientImportSubscriptionQuery.SubscriptionQueryLogContainer( query_header.GetQueryLogContainerName() ) + + query_log_container.SetGallerySeedLog( query.GetGallerySeedLog() ) + query_log_container.SetFileSeedCache( query.GetFileSeedCache() ) + + query_header.SyncToQueryLogContainer( checker_options, query_log_container ) + + query_headers.append( query_header ) + query_log_containers.append( query_log_container ) + + + subscription.SetQueryHeaders( query_headers ) + + return ( subscription, query_log_containers ) + diff --git a/hydrus/client/importing/ClientImportSubscriptionQuery.py b/hydrus/client/importing/ClientImportSubscriptionQuery.py index 6aedd04e..850cb154 100644 --- a/hydrus/client/importing/ClientImportSubscriptionQuery.py +++ b/hydrus/client/importing/ClientImportSubscriptionQuery.py @@ -1,3 +1,5 @@ +import typing + from hydrus.core import HydrusData from hydrus.core import HydrusGlobals as HG from hydrus.core import HydrusSerialisable @@ -6,178 +8,44 @@ from hydrus.client.importing import ClientImporting from hydrus.client.importing import ClientImportFileSeeds from hydrus.client.importing import ClientImportGallerySeeds from hydrus.client.importing import ClientImportOptions +from hydrus.client.networking import ClientNetworking +from hydrus.client.networking import ClientNetworkingBandwidth from hydrus.client.networking import ClientNetworkingContexts +from hydrus.client.networking import ClientNetworkingDomain from hydrus.client.networking import ClientNetworkingJobs -class SubscriptionQuery( HydrusSerialisable.SerialisableBase ): +def GenerateSubQueryName() -> str: - SERIALISABLE_TYPE = HydrusSerialisable.SERIALISABLE_TYPE_SUBSCRIPTION_QUERY - SERIALISABLE_NAME = 'Subscription Query' - SERIALISABLE_VERSION = 3 + return HydrusData.GenerateKey().hex() - def __init__( self, query = 'query text' ): +class SubscriptionQueryLogContainer( HydrusSerialisable.SerialisableBaseNamed ): + + SERIALISABLE_TYPE = HydrusSerialisable.SERIALISABLE_TYPE_SUBSCRIPTION_QUERY_LOG_CONTAINER + SERIALISABLE_NAME = 'Subscription Query Container' + SERIALISABLE_VERSION = 1 + + def __init__( self, name ): - HydrusSerialisable.SerialisableBase.__init__( self ) + HydrusSerialisable.SerialisableBaseNamed.__init__( self, name ) - self._query = query - self._display_name = None - self._check_now = False - self._last_check_time = 0 - self._next_check_time = 0 - self._paused = False - self._status = ClientImporting.CHECKER_STATUS_OK self._gallery_seed_log = ClientImportGallerySeeds.GallerySeedLog() self._file_seed_cache = ClientImportFileSeeds.FileSeedCache() - self._tag_import_options = ClientImportOptions.TagImportOptions() - - - def _GetExampleNetworkContexts( self, subscription_name ): - - file_seed = self._file_seed_cache.GetNextFileSeed( CC.STATUS_UNKNOWN ) - - subscription_key = self.GetNetworkJobSubscriptionKey( subscription_name ) - - if file_seed is None: - - return [ ClientNetworkingContexts.NetworkContext( CC.NETWORK_CONTEXT_SUBSCRIPTION, subscription_key ), ClientNetworkingContexts.GLOBAL_NETWORK_CONTEXT ] - - - url = file_seed.file_seed_data - - try: # if the url is borked for some reason - - example_nj = ClientNetworkingJobs.NetworkJobSubscription( subscription_key, 'GET', url ) - example_network_contexts = example_nj.GetNetworkContexts() - - except: - - return [ ClientNetworkingContexts.NetworkContext( CC.NETWORK_CONTEXT_SUBSCRIPTION, subscription_key ), ClientNetworkingContexts.GLOBAL_NETWORK_CONTEXT ] - - - return example_network_contexts def _GetSerialisableInfo( self ): serialisable_gallery_seed_log = self._gallery_seed_log.GetSerialisableTuple() serialisable_file_seed_cache = self._file_seed_cache.GetSerialisableTuple() - serialisable_tag_import_options = self._tag_import_options.GetSerialisableTuple() - return ( self._query, self._display_name, self._check_now, self._last_check_time, self._next_check_time, self._paused, self._status, serialisable_gallery_seed_log, serialisable_file_seed_cache, serialisable_tag_import_options ) + return ( serialisable_gallery_seed_log, serialisable_file_seed_cache ) def _InitialiseFromSerialisableInfo( self, serialisable_info ): - ( self._query, self._display_name, self._check_now, self._last_check_time, self._next_check_time, self._paused, self._status, serialisable_gallery_seed_log, serialisable_file_seed_cache, serialisable_tag_import_options ) = serialisable_info + ( serialisable_gallery_seed_log, serialisable_file_seed_cache ) = serialisable_info self._gallery_seed_log = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_gallery_seed_log ) self._file_seed_cache = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_file_seed_cache ) - self._tag_import_options = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_tag_import_options ) - - - def _UpdateSerialisableInfo( self, version, old_serialisable_info ): - - if version == 1: - - ( query, check_now, last_check_time, next_check_time, paused, status, serialisable_file_seed_cache ) = old_serialisable_info - - gallery_seed_log = ClientImportGallerySeeds.GallerySeedLog() - - serialisable_gallery_seed_log = gallery_seed_log.GetSerialisableTuple() - - new_serialisable_info = ( query, check_now, last_check_time, next_check_time, paused, status, serialisable_gallery_seed_log, serialisable_file_seed_cache ) - - return ( 2, new_serialisable_info ) - - - if version == 2: - - ( query, check_now, last_check_time, next_check_time, paused, status, serialisable_gallery_seed_log, serialisable_file_seed_cache ) = old_serialisable_info - - display_name = None - tag_import_options = ClientImportOptions.TagImportOptions() - - serialisable_tag_import_options = tag_import_options.GetSerialisableTuple() - - new_serialisable_info = ( query, display_name, check_now, last_check_time, next_check_time, paused, status, serialisable_gallery_seed_log, serialisable_file_seed_cache, serialisable_tag_import_options ) - - return ( 3, new_serialisable_info ) - - - - def BandwidthOK( self, subscription_name ): - - example_network_contexts = self._GetExampleNetworkContexts( subscription_name ) - - threshold = 90 - - bandwidth_ok = HG.client_controller.network_engine.bandwidth_manager.CanDoWork( example_network_contexts, threshold = threshold ) - - if HG.subscription_report_mode: - - HydrusData.ShowText( 'Query "' + self.GetHumanName() + '" bandwidth/domain test. Bandwidth ok: {}'.format( bandwidth_ok ) ) - - - return bandwidth_ok - - - def CanCheckNow( self ): - - return not self._check_now - - - def CanRetryFailed( self ): - - return self._file_seed_cache.GetFileSeedCount( CC.STATUS_ERROR ) > 0 - - - def CanRetryIgnored( self ): - - return self._file_seed_cache.GetFileSeedCount( CC.STATUS_VETOED ) > 0 - - - def CheckNow( self ): - - self._check_now = True - self._paused = False - - self._next_check_time = 0 - self._status = ClientImporting.CHECKER_STATUS_OK - - - def DomainOK( self ): - - file_seed = self._file_seed_cache.GetNextFileSeed( CC.STATUS_UNKNOWN ) - - if file_seed is None: - - return True - - - url = file_seed.file_seed_data - - domain_ok = HG.client_controller.network_engine.domain_manager.DomainOK( url ) - - if HG.subscription_report_mode: - - HydrusData.ShowText( 'Query "' + self.GetHumanName() + '" domain test. Domain ok: {}'.format( domain_ok ) ) - - - return domain_ok - - - def GetBandwidthWaitingEstimate( self, subscription_name ): - - example_network_contexts = self._GetExampleNetworkContexts( subscription_name ) - - ( estimate, bandwidth_network_context ) = HG.client_controller.network_engine.bandwidth_manager.GetWaitingEstimateAndContext( example_network_contexts ) - - return estimate - - - def GetDisplayName( self ): - - return self._display_name def GetFileSeedCache( self ): @@ -190,11 +58,147 @@ class SubscriptionQuery( HydrusSerialisable.SerialisableBase ): return self._gallery_seed_log - def GetHumanName( self ): + def SetFileSeedCache( self, file_seed_cache: ClientImportFileSeeds.FileSeedCache ): + + self._file_seed_cache = file_seed_cache + + + def SetGallerySeedLog( self, gallery_seed_log: ClientImportGallerySeeds.GallerySeedLog ): + + self._gallery_seed_log = gallery_seed_log + + +HydrusSerialisable.SERIALISABLE_TYPES_TO_OBJECT_TYPES[ HydrusSerialisable.SERIALISABLE_TYPE_SUBSCRIPTION_QUERY_LOG_CONTAINER ] = SubscriptionQueryLogContainer + +LOG_CONTAINER_SYNCED = 0 +LOG_CONTAINER_UNSYNCED = 1 +LOG_CONTAINER_MISSING = 2 + +class SubscriptionQueryHeader( HydrusSerialisable.SerialisableBase ): + + SERIALISABLE_TYPE = HydrusSerialisable.SERIALISABLE_TYPE_SUBSCRIPTION_QUERY_HEADER + SERIALISABLE_NAME = 'Subscription Query Summary' + SERIALISABLE_VERSION = 1 + + def __init__( self ): + + HydrusSerialisable.SerialisableBase.__init__( self ) + + self._query_log_container_name = GenerateSubQueryName() + self._query_text = 'query' + self._display_name = None + self._check_now = False + self._last_check_time = 0 + self._next_check_time = 0 + self._paused = False + self._checker_status = ClientImporting.CHECKER_STATUS_OK + self._query_log_container_status = LOG_CONTAINER_UNSYNCED + self._file_seed_cache_status = ClientImportFileSeeds.FileSeedCacheStatus() + self._tag_import_options = ClientImportOptions.TagImportOptions() + self._raw_file_velocity = ( 0, 1 ) + self._pretty_file_velocity = 'unknown' + self._example_file_seed = None + self._example_gallery_seed = None + + # a status cache, so we know number complete, unknown, ignored, etc... + # prob should have a new serialisable object for this mate + + + def _DomainOK( self, domain_manager: ClientNetworkingDomain.NetworkDomainManager, example_url: typing.Optional[ str ] ): + + if example_url is None: + + domain_ok = True + + else: + + domain_ok = domain_manager.DomainOK( example_url ) + + + if HG.subscription_report_mode: + + HydrusData.ShowText( 'Query "{}" domain test. Domain ok: {}'.format( self._GetHumanName(), domain_ok ) ) + + + return domain_ok + + + def _GenerateNetworkJobFactory( self, subscription_name: str ): + + subscription_key = self._GenerateNetworkJobSubscriptionKey( subscription_name ) + + def network_job_factory( *args, **kwargs ): + + network_job = ClientNetworkingJobs.NetworkJobSubscription( subscription_key, *args, **kwargs ) + + network_job.OverrideBandwidth( 30 ) + + return network_job + + + return network_job_factory + + + def _GenerateNetworkJobSubscriptionKey( self, subscription_name: str ): + + return '{}: {}'.format( subscription_name, self._GetHumanName() ) + + + def _GetExampleFileURL( self ): + + if self._example_file_seed is None or self._example_file_seed.file_seed_type == ClientImportFileSeeds.FILE_SEED_TYPE_HDD: + + example_url = None + + else: + + example_url = self._example_file_seed.file_seed_data + + + return example_url + + + def _GetExampleGalleryURL( self ): + + if self._example_gallery_seed is None: + + example_url = None + + else: + + example_url = self._example_gallery_seed.url + + + return example_url + + + def _GetExampleNetworkContexts( self, example_url: typing.Optional[ str ], subscription_name: str ): + + subscription_key = self._GenerateNetworkJobSubscriptionKey( subscription_name ) + + if example_url is None: + + return [ ClientNetworkingContexts.NetworkContext( CC.NETWORK_CONTEXT_SUBSCRIPTION, subscription_key ), ClientNetworkingContexts.GLOBAL_NETWORK_CONTEXT ] + + + try: # if the url is borked for some reason + + example_nj = ClientNetworkingJobs.NetworkJobSubscription( subscription_key, 'GET', example_url ) + example_network_contexts = example_nj.GetNetworkContexts() + + except: + + return [ ClientNetworkingContexts.NetworkContext( CC.NETWORK_CONTEXT_SUBSCRIPTION, subscription_key ), ClientNetworkingContexts.GLOBAL_NETWORK_CONTEXT ] + + + return example_network_contexts + + + def _GetHumanName( self ) -> str: if self._display_name is None: - return self._query + return self._query_text else: @@ -202,14 +206,250 @@ class SubscriptionQuery( HydrusSerialisable.SerialisableBase ): - def GetLastChecked( self ): + def _GetSerialisableInfo( self ): + + serialisable_file_seed_cache_status = self._file_seed_cache_status.GetSerialisableTuple() + serialisable_tag_import_options = self._tag_import_options.GetSerialisableTuple() + + serialisable_example_file_seed = HydrusSerialisable.GetNoneableSerialisableTuple( self._example_file_seed ) + serialisable_example_gallery_seed = HydrusSerialisable.GetNoneableSerialisableTuple( self._example_gallery_seed ) + + return ( + self._query_log_container_name, + self._query_text, + self._display_name, + self._check_now, + self._last_check_time, + self._next_check_time, + self._paused, + self._checker_status, + self._query_log_container_status, + serialisable_file_seed_cache_status, + serialisable_tag_import_options, + self._raw_file_velocity, + self._pretty_file_velocity, + serialisable_example_file_seed, + serialisable_example_gallery_seed + ) + + + def _InitialiseFromSerialisableInfo( self, serialisable_info ): + + ( + self._query_log_container_name, + self._query_text, + self._display_name, + self._check_now, + self._last_check_time, + self._next_check_time, + self._paused, + self._checker_status, + self._query_log_container_status, + serialisable_file_seed_cache_status, + serialisable_tag_import_options, + self._raw_file_velocity, + self._pretty_file_velocity, + serialisable_example_file_seed, + serialisable_example_gallery_seed + ) = serialisable_info + + self._file_seed_cache_status = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_file_seed_cache_status ) + self._tag_import_options = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_tag_import_options ) + + self._example_file_seed = HydrusSerialisable.CreateFromNoneableSerialisableTuple( serialisable_example_file_seed ) + self._example_gallery_seed = HydrusSerialisable.CreateFromNoneableSerialisableTuple( serialisable_example_gallery_seed ) + + + def CanCheckNow( self ): + + return not self._check_now + + + def CanRetryFailed( self ): + + return self._file_seed_cache_status.GetFileSeedCount( CC.STATUS_ERROR ) > 0 + + + def CanRetryIgnored( self ): + + return self._file_seed_cache_status.GetFileSeedCount( CC.STATUS_VETOED ) > 0 + + + def CheckNow( self ): + + self._check_now = True + self._paused = False + + self._next_check_time = 0 + self._checker_status = ClientImporting.CHECKER_STATUS_OK + + + def FileBandwidthOK( self, bandwidth_manager: ClientNetworkingBandwidth.NetworkBandwidthManager, subscription_name: str ): + + example_url = self._GetExampleFileURL() + + example_network_contexts = self._GetExampleNetworkContexts( example_url, subscription_name ) + + threshold = 90 + + bandwidth_ok = bandwidth_manager.CanDoWork( example_network_contexts, threshold = threshold ) + + if HG.subscription_report_mode: + + HydrusData.ShowText( 'Query "' + self._GetHumanName() + '" bandwidth/domain test. Bandwidth ok: {}'.format( bandwidth_ok ) ) + + + return bandwidth_ok + + + def FileDomainOK( self, domain_manager: ClientNetworkingDomain.NetworkDomainManager ): + + example_url = self._GetExampleFileURL() + + return self._DomainOK( domain_manager, example_url ) + + + def FileLoginOK( self, network_engine: ClientNetworking.NetworkEngine, subscription_name: str ) -> typing.Tuple[ bool, str ]: + + reason = 'login looks good!' + + if self._example_file_seed is None: + + result = True + + else: + + nj = self._example_file_seed.GetExampleNetworkJob( self._GenerateNetworkJobFactory( subscription_name ) ) + + nj.engine = network_engine + + if nj.NeedsLogin(): + + try: + + nj.CheckCanLogin() + + result = True + + except Exception as e: + + result = False + reason = str( e ) + + + else: + + result = True + + + + if HG.subscription_report_mode: + + HydrusData.ShowText( 'Query "{}" pre-work file login test. Login ok: {}. {}'.format( self._GetHumanName(), str( result ), reason ) ) + + + return ( result, reason ) + + + + def GalleryDomainOK( self, domain_manager: ClientNetworkingDomain.NetworkDomainManager ): + + example_url = self._GetExampleGalleryURL() + + return self._DomainOK( domain_manager, example_url ) + + + def GalleryLoginOK( self, network_engine: ClientNetworking.NetworkEngine, subscription_name: str ) -> typing.Tuple[ bool, str ]: + + reason = 'login looks good!' + + if self._example_gallery_seed is None: + + result = True + + else: + + nj = self._example_gallery_seed.GetExampleNetworkJob( self._GenerateNetworkJobFactory( subscription_name ) ) + + nj.engine = network_engine + + if nj.NeedsLogin(): + + try: + + nj.CheckCanLogin() + + result = True + + except Exception as e: + + result = False + reason = str( e ) + + + else: + + result = True + + + + if HG.subscription_report_mode: + + HydrusData.ShowText( 'Query "{}" pre-work sync login test. Login ok: {}. {}'.format( self._GetHumanName(), str( result ), reason ) ) + + + return ( result, reason ) + + + def GenerateNetworkJobFactory( self, subscription_name: str ): + + return self._GenerateNetworkJobFactory( subscription_name ) + + + def GetBandwidthWaitingEstimate( self, bandwidth_manager: ClientNetworkingBandwidth.NetworkBandwidthManager, subscription_name: str ): + + example_url = self._GetExampleFileURL() + + example_network_contexts = self._GetExampleNetworkContexts( example_url, subscription_name ) + + ( estimate, bandwidth_network_context ) = bandwidth_manager.GetWaitingEstimateAndContext( example_network_contexts ) + + return estimate + + + def GetCheckerStatus( self ): + + return self._checker_status + + + def GetDisplayName( self ): + + return self._display_name + + + def GetHumanName( self ): + + return self._GetHumanName() + + + def GetFileSeedCacheStatus( self ): + + return self._file_seed_cache_status + + + def GetFileVelocityInfo( self ): + + return ( self._raw_file_velocity, self._pretty_file_velocity ) + + + def GetLastCheckTime( self ): return self._last_check_time def GetLatestAddedTime( self ): - return self._file_seed_cache.GetLatestAddedTime() + return self._file_seed_cache_status.GetLatestAddedTime() def GetNextCheckStatusString( self ): @@ -218,7 +458,7 @@ class SubscriptionQuery( HydrusSerialisable.SerialisableBase ): return 'checking on dialog ok' - elif self._status == ClientImporting.CHECKER_STATUS_DEAD: + elif self._checker_status == ClientImporting.CHECKER_STATUS_DEAD: return 'dead, so not checking' @@ -242,20 +482,32 @@ class SubscriptionQuery( HydrusSerialisable.SerialisableBase ): - def GetNextWorkTime( self, subscription_name ): + def GetNextCheckTime( self ): - if self.IsPaused(): + return self._next_check_time + + + def GetNextWorkTime( self, bandwidth_manager: ClientNetworkingBandwidth.NetworkBandwidthManager, subscription_name: str ): + + if not self.IsExpectingToWorkInFuture(): return None work_times = set() + if self._query_log_container_status == LOG_CONTAINER_UNSYNCED: + + work_times.add( 0 ) + + + work_times.add( self._next_check_time ) + if self.HasFileWorkToDo(): try: - file_bandwidth_estimate = self.GetBandwidthWaitingEstimate( subscription_name ) + file_bandwidth_estimate = self.GetBandwidthWaitingEstimate( bandwidth_manager, subscription_name ) except: @@ -276,11 +528,6 @@ class SubscriptionQuery( HydrusSerialisable.SerialisableBase ): - if not self.IsDead(): - - work_times.add( self._next_check_time ) - - if len( work_times ) == 0: return None @@ -289,19 +536,19 @@ class SubscriptionQuery( HydrusSerialisable.SerialisableBase ): return min( work_times ) - def GetNumURLsAndFailed( self ): + def GetQueryLogContainerName( self ): - return ( self._file_seed_cache.GetFileSeedCount( CC.STATUS_UNKNOWN ), len( self._file_seed_cache ), self._file_seed_cache.GetFileSeedCount( CC.STATUS_ERROR ) ) + return self._query_log_container_name - def GetNetworkJobSubscriptionKey( self, subscription_name ): + def GetQueryLogContainerStatus( self ): - return subscription_name + ': ' + self.GetHumanName() + return self._query_log_container_status def GetQueryText( self ): - return self._query + return self._query_text def GetTagImportOptions( self ): @@ -311,19 +558,34 @@ class SubscriptionQuery( HydrusSerialisable.SerialisableBase ): def HasFileWorkToDo( self ): - file_seed = self._file_seed_cache.GetNextFileSeed( CC.STATUS_UNKNOWN ) + result = self._file_seed_cache_status.HasWorkToDo() if HG.subscription_report_mode: - HydrusData.ShowText( 'Query "' + self._query + '" HasFileWorkToDo test. Next import is ' + repr( file_seed ) + '.' ) + HydrusData.ShowText( 'Query "{}" HasFileWorkToDo test. Result is {}.'.format( self._query_text, result ) ) - return file_seed is not None + return result + + + def IsCheckingNow( self ): + + return self._check_now def IsDead( self ): - return self._status == ClientImporting.CHECKER_STATUS_DEAD + return self._checker_status == ClientImporting.CHECKER_STATUS_DEAD + + + def IsExpectingToWorkInFuture( self ): + + if self.IsPaused() or self.IsDead() or not self.IsLogContainerOK(): + + return False + + + return True def IsInitialSync( self ): @@ -331,6 +593,11 @@ class SubscriptionQuery( HydrusSerialisable.SerialisableBase ): return self._last_check_time == 0 + def IsLogContainerOK( self ): + + return self._query_log_container_status != LOG_CONTAINER_MISSING + + def IsPaused( self ): return self._paused @@ -340,10 +607,10 @@ class SubscriptionQuery( HydrusSerialisable.SerialisableBase ): if HG.subscription_report_mode: - HydrusData.ShowText( 'Query "' + self._query + '" IsSyncDue test. Paused/dead status is {}/{}, check time due is {}, and check_now is {}.'.format( self._paused, self.IsDead(), HydrusData.TimeHasPassed( self._next_check_time ), self._check_now ) ) + HydrusData.ShowText( 'Query "' + self._query_text + '" IsSyncDue test. Paused/dead/container status is {}/{}/{}, check time due is {}, and check_now is {}.'.format( self._paused, self.IsDead(), self.IsLogContainerOK(), HydrusData.TimeHasPassed( self._next_check_time ), self._check_now ) ) - if self._paused or self.IsDead(): + if not self.IsExpectingToWorkInFuture(): return False @@ -356,7 +623,7 @@ class SubscriptionQuery( HydrusSerialisable.SerialisableBase ): self._paused = not self._paused - def RegisterSyncComplete( self, checker_options: ClientImportOptions.CheckerOptions ): + def RegisterSyncComplete( self, checker_options: ClientImportOptions.CheckerOptions, query_log_container: SubscriptionQueryLogContainer ): self._last_check_time = HydrusData.GetNow() @@ -366,77 +633,109 @@ class SubscriptionQuery( HydrusSerialisable.SerialisableBase ): compact_before_this_time = self._last_check_time - death_period - if self._gallery_seed_log.CanCompact( compact_before_this_time ): + gallery_seed_log = query_log_container.GetGallerySeedLog() + + if gallery_seed_log.CanCompact( compact_before_this_time ): - self._gallery_seed_log.Compact( compact_before_this_time ) + gallery_seed_log.Compact( compact_before_this_time ) - if self._file_seed_cache.CanCompact( compact_before_this_time ): + file_seed_cache = query_log_container.GetFileSeedCache() + + if file_seed_cache.CanCompact( compact_before_this_time ): - self._file_seed_cache.Compact( compact_before_this_time ) + file_seed_cache.Compact( compact_before_this_time ) + self.SyncToQueryLogContainer( checker_options, query_log_container ) + - def Reset( self ): + def Reset( self, query_log_container: SubscriptionQueryLogContainer ): self._last_check_time = 0 self._next_check_time = 0 - self._status = ClientImporting.CHECKER_STATUS_OK + self._checker_status = ClientImporting.CHECKER_STATUS_OK self._paused = False - self._file_seed_cache = ClientImportFileSeeds.FileSeedCache() + file_seed_cache = ClientImportFileSeeds.FileSeedCache() + + query_log_container.SetFileSeedCache( file_seed_cache ) + + self.UpdateFileStatus( query_log_container ) - def RetryFailures( self ): - - self._file_seed_cache.RetryFailures() - - - def RetryIgnored( self ): - - self._file_seed_cache.RetryIgnored() - - - def SetCheckNow( self, check_now ): + def SetCheckNow( self, check_now: bool ): self._check_now = check_now + def SetCheckerStatus( self, checker_status: int ): + + self._checker_status = checker_status + + def SetDisplayName( self, display_name ): self._display_name = display_name - def SetPaused( self, paused ): + def SetLastCheckTime( self, last_check_time: int ): + + self._last_check_time = last_check_time + + + def SetNextCheckTime( self, next_check_time: int ): + + self._next_check_time = next_check_time + + + def SetPaused( self, paused: bool ): self._paused = paused - def SetQueryAndSeeds( self, query, file_seed_cache, gallery_seed_log ): + def SetQueryLogContainerStatus( self, log_container_status: int ): - self._query = query - self._file_seed_cache = file_seed_cache - self._gallery_seed_log = gallery_seed_log + self._query_log_container_status = log_container_status + + if self._query_log_container_status == LOG_CONTAINER_UNSYNCED: + + self._raw_file_velocity = ( 0, 1 ) + self._pretty_file_velocity = 'unknown' + - def SetTagImportOptions( self, tag_import_options ): + def SetQueryText( self, query_text: str ): + + self._query_text = query_text + + + def SetTagImportOptions( self, tag_import_options: ClientImportOptions.TagImportOptions ): self._tag_import_options = tag_import_options - def UpdateNextCheckTime( self, checker_options: ClientImportOptions.CheckerOptions ): + def SyncToQueryLogContainer( self, checker_options: ClientImportOptions.CheckerOptions, query_log_container: SubscriptionQueryLogContainer ): + + gallery_seed_log = query_log_container.GetGallerySeedLog() + + self._example_gallery_seed = gallery_seed_log.GetExampleGallerySeed() + + self.UpdateFileStatus( query_log_container ) + + file_seed_cache = query_log_container.GetFileSeedCache() if self._check_now: self._next_check_time = 0 - self._status = ClientImporting.CHECKER_STATUS_OK + self._checker_status = ClientImporting.CHECKER_STATUS_OK else: - if checker_options.IsDead( self._file_seed_cache, self._last_check_time ): + if checker_options.IsDead( file_seed_cache, self._last_check_time ): - self._status = ClientImporting.CHECKER_STATUS_DEAD + self._checker_status = ClientImporting.CHECKER_STATUS_DEAD if not self.HasFileWorkToDo(): @@ -446,13 +745,38 @@ class SubscriptionQuery( HydrusSerialisable.SerialisableBase ): last_next_check_time = self._next_check_time - self._next_check_time = checker_options.GetNextCheckTime( self._file_seed_cache, self._last_check_time, last_next_check_time ) + self._next_check_time = checker_options.GetNextCheckTime( file_seed_cache, self._last_check_time, last_next_check_time ) - - def ToTuple( self ): + self._raw_file_velocity = checker_options.GetRawCurrentVelocity( file_seed_cache, self._last_check_time ) + self._pretty_file_velocity = checker_options.GetPrettyCurrentVelocity( file_seed_cache, self._last_check_time, no_prefix = True ) - return ( self._query, self._check_now, self._last_check_time, self._next_check_time, self._paused, self._status ) + self._query_log_container_status = LOG_CONTAINER_SYNCED -HydrusSerialisable.SERIALISABLE_TYPES_TO_OBJECT_TYPES[ HydrusSerialisable.SERIALISABLE_TYPE_SUBSCRIPTION_QUERY ] = SubscriptionQuery + def UpdateFileStatus( self, query_log_container: SubscriptionQueryLogContainer ): + + file_seed_cache = query_log_container.GetFileSeedCache() + + self._file_seed_cache_status = file_seed_cache.GetStatus() + self._example_file_seed = file_seed_cache.GetExampleFileSeed() + + + def WantsToResyncWithLogContainer( self ): + + return self._query_log_container_status == LOG_CONTAINER_UNSYNCED + + +HydrusSerialisable.SERIALISABLE_TYPES_TO_OBJECT_TYPES[ HydrusSerialisable.SERIALISABLE_TYPE_SUBSCRIPTION_QUERY_HEADER ] = SubscriptionQueryHeader + +def GenerateQueryHeadersStatus( query_headers: typing.Iterable[ SubscriptionQueryHeader ] ): + + fscs = ClientImportFileSeeds.FileSeedCacheStatus() + + for query_header in query_headers: + + fscs.Merge( query_header.GetFileSeedCacheStatus() ) + + + return fscs + diff --git a/hydrus/client/importing/ClientImportSubscriptions.py b/hydrus/client/importing/ClientImportSubscriptions.py index ec880eb8..17199b7b 100644 --- a/hydrus/client/importing/ClientImportSubscriptions.py +++ b/hydrus/client/importing/ClientImportSubscriptions.py @@ -13,18 +13,18 @@ from hydrus.core import HydrusSerialisable from hydrus.core import HydrusThreading from hydrus.client import ClientThreading from hydrus.client import ClientConstants as CC -from hydrus.client import ClientDownloading from hydrus.client.importing import ClientImporting from hydrus.client.importing import ClientImportGallerySeeds from hydrus.client.importing import ClientImportOptions from hydrus.client.importing import ClientImportSubscriptionQuery -from hydrus.client.networking import ClientNetworkingJobs +from hydrus.client.networking import ClientNetworkingBandwidth +from hydrus.client.networking import ClientNetworkingDomain class Subscription( HydrusSerialisable.SerialisableBaseNamed ): SERIALISABLE_TYPE = HydrusSerialisable.SERIALISABLE_TYPE_SUBSCRIPTION SERIALISABLE_NAME = 'Subscription' - SERIALISABLE_VERSION = 10 + SERIALISABLE_VERSION = 1 def __init__( self, name, gug_key_and_name = None ): @@ -37,7 +37,7 @@ class Subscription( HydrusSerialisable.SerialisableBaseNamed ): self._gug_key_and_name = gug_key_and_name - self._queries: typing.List[ ClientImportSubscriptionQuery.SubscriptionQuery ] = [] + self._query_headers: typing.List[ ClientImportSubscriptionQuery.SubscriptionQueryHeader ] = [] new_options = HG.client_controller.new_options @@ -67,6 +67,9 @@ class Subscription( HydrusSerialisable.SerialisableBaseNamed ): self._publish_label_override = None self._merge_query_publish_events = True + self._have_made_an_initial_sync_bandwidth_notification = False + self._file_error_count = 0 + def _CanDoWorkNow( self ): @@ -90,13 +93,22 @@ class Subscription( HydrusSerialisable.SerialisableBaseNamed ): return p1 and p2 and p3 + def _DealWithMissingQueryLogContainerError( self, query_header: ClientImportSubscriptionQuery.SubscriptionQueryHeader ): + + query_header.SetQueryLogContainerStatus( ClientImportSubscriptionQuery.LOG_CONTAINER_MISSING ) + + self._paused = True + + HydrusData.ShowText( 'The subscription "{}"\'s "{}" query was missing database data! This could be a serious error! Please go to _manage subscriptions_ to reset the data, and you may want to contact hydrus dev. The sub has paused!'.format( self._name, query_header.GetHumanName() ) ) + + def _DelayWork( self, time_delta, reason ): self._no_work_until = HydrusData.GetNow() + time_delta self._no_work_until_reason = reason - def _GetPublishingLabel( self, query ): + def _GetPublishingLabel( self, query_header: ClientImportSubscriptionQuery.SubscriptionQueryHeader ): if self._publish_label_override is None: @@ -109,19 +121,19 @@ class Subscription( HydrusSerialisable.SerialisableBaseNamed ): if not self._merge_query_publish_events: - label += ': ' + query.GetHumanName() + label += ': ' + query_header.GetHumanName() return label - def _GetQueriesForProcessing( self ) -> typing.List[ ClientImportSubscriptionQuery.SubscriptionQuery ]: + def _GetQueryHeadersForProcessing( self ) -> typing.List[ ClientImportSubscriptionQuery.SubscriptionQueryHeader ]: - queries = list( self._queries ) + query_headers = list( self._query_headers ) if HG.client_controller.new_options.GetBoolean( 'process_subs_in_random_order' ): - random.shuffle( queries ) + random.shuffle( query_headers ) else: @@ -130,10 +142,10 @@ class Subscription( HydrusSerialisable.SerialisableBaseNamed ): return q.GetHumanName() - queries.sort( key = key ) + query_headers.sort( key = key ) - return queries + return query_headers def _GetSerialisableInfo( self ): @@ -141,320 +153,84 @@ class Subscription( HydrusSerialisable.SerialisableBaseNamed ): ( gug_key, gug_name ) = self._gug_key_and_name serialisable_gug_key_and_name = ( gug_key.hex(), gug_name ) - serialisable_queries = [ query.GetSerialisableTuple() for query in self._queries ] + serialisable_query_headers = [ query_header.GetSerialisableTuple() for query_header in self._query_headers ] serialisable_checker_options = self._checker_options.GetSerialisableTuple() serialisable_file_import_options = self._file_import_options.GetSerialisableTuple() serialisable_tag_import_options = self._tag_import_options.GetSerialisableTuple() - return ( serialisable_gug_key_and_name, serialisable_queries, serialisable_checker_options, self._initial_file_limit, self._periodic_file_limit, self._paused, serialisable_file_import_options, serialisable_tag_import_options, self._no_work_until, self._no_work_until_reason, self._show_a_popup_while_working, self._publish_files_to_popup_button, self._publish_files_to_page, self._publish_label_override, self._merge_query_publish_events ) + return ( + serialisable_gug_key_and_name, + serialisable_query_headers, + serialisable_checker_options, + self._initial_file_limit, + self._periodic_file_limit, + self._paused, + serialisable_file_import_options, + serialisable_tag_import_options, + self._no_work_until, + self._no_work_until_reason, + self._show_a_popup_while_working, + self._publish_files_to_popup_button, + self._publish_files_to_page, + self._publish_label_override, + self._merge_query_publish_events + ) def _InitialiseFromSerialisableInfo( self, serialisable_info ): - ( serialisable_gug_key_and_name, serialisable_queries, serialisable_checker_options, self._initial_file_limit, self._periodic_file_limit, self._paused, serialisable_file_import_options, serialisable_tag_import_options, self._no_work_until, self._no_work_until_reason, self._show_a_popup_while_working, self._publish_files_to_popup_button, self._publish_files_to_page, self._publish_label_override, self._merge_query_publish_events ) = serialisable_info + ( + serialisable_gug_key_and_name, + serialisable_query_headers, + serialisable_checker_options, + self._initial_file_limit, + self._periodic_file_limit, + self._paused, + serialisable_file_import_options, + serialisable_tag_import_options, + self._no_work_until, + self._no_work_until_reason, + self._show_a_popup_while_working, + self._publish_files_to_popup_button, + self._publish_files_to_page, + self._publish_label_override, + self._merge_query_publish_events + ) = serialisable_info ( serialisable_gug_key, gug_name ) = serialisable_gug_key_and_name self._gug_key_and_name = ( bytes.fromhex( serialisable_gug_key ), gug_name ) - self._queries = [ HydrusSerialisable.CreateFromSerialisableTuple( serialisable_query ) for serialisable_query in serialisable_queries ] + self._query_headers = [ HydrusSerialisable.CreateFromSerialisableTuple( serialisable_query ) for serialisable_query in serialisable_query_headers ] self._checker_options = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_checker_options ) self._file_import_options = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_file_import_options ) self._tag_import_options = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_tag_import_options ) - def _GenerateNetworkJobFactory( self, query ): - - subscription_key = query.GetNetworkJobSubscriptionKey( self._name ) - - def network_job_factory( *args, **kwargs ): - - network_job = ClientNetworkingJobs.NetworkJobSubscription( subscription_key, *args, **kwargs ) - - network_job.OverrideBandwidth( 30 ) - - return network_job - - - return network_job_factory - - def _NoDelays( self ): return HydrusData.TimeHasPassed( self._no_work_until ) - def _QueryFileLoginOK( self, query ): - - file_seed_cache = query.GetFileSeedCache() - - file_seed = file_seed_cache.GetNextFileSeed( CC.STATUS_UNKNOWN ) - - if file_seed is None: - - result = True - - else: - - nj = file_seed.GetExampleNetworkJob( self._GenerateNetworkJobFactory( query ) ) - - nj.engine = HG.client_controller.network_engine - - if nj.NeedsLogin(): - - try: - - nj.CheckCanLogin() - - result = True - - except Exception as e: - - result = False - - if not self._paused: - - login_fail_reason = str( e ) - - message = 'Query "' + query.GetHumanName() + '" for subscription "' + self._name + '" seemed to have an invalid login for one of its file imports. The reason was:' - message += os.linesep * 2 - message += login_fail_reason - message += os.linesep * 2 - message += 'The subscription has paused. Please see if you can fix the problem and then unpause. Hydrus dev would like feedback on this process.' - - HydrusData.ShowText( message ) - - self._DelayWork( 300, login_fail_reason ) - - self._paused = True - - - - else: - - result = True - - - - if HG.subscription_report_mode: - - HydrusData.ShowText( 'Query "' + query.GetHumanName() + '" pre-work file login test. Login ok: ' + str( result ) + '.' ) - - - return result - - - def _QuerySyncLoginOK( self, query ): - - gallery_seed_log = query.GetGallerySeedLog() - - gallery_seed = gallery_seed_log.GetNextGallerySeed( CC.STATUS_UNKNOWN ) - - if gallery_seed is None: - - result = True - - else: - - nj = gallery_seed.GetExampleNetworkJob( self._GenerateNetworkJobFactory( query ) ) - - nj.engine = HG.client_controller.network_engine - - if nj.NeedsLogin(): - - try: - - nj.CheckCanLogin() - - result = True - - except Exception as e: - - result = False - - if not self._paused: - - login_fail_reason = str( e ) - - message = 'Query "' + query.GetHumanName() + '" for subscription "' + self._name + '" seemed to have an invalid login. The reason was:' - message += os.linesep * 2 - message += login_fail_reason - message += os.linesep * 2 - message += 'The subscription has paused. Please see if you can fix the problem and then unpause. Hydrus dev would like feedback on this process.' - - HydrusData.ShowText( message ) - - self._DelayWork( 300, login_fail_reason ) - - self._paused = True - - - - else: - - result = True - - - - if HG.subscription_report_mode: - - HydrusData.ShowText( 'Query "' + query.GetHumanName() + '" pre-work sync login test. Login ok: ' + str( result ) + '.' ) - - - return result - - def _ShowHitPeriodicFileLimitMessage( self, query_text ): - message = 'The query "' + query_text + '" for subscription "' + self._name + '" hit its periodic file limit without seeing any already-seen files.' + message = 'The query "{}" for subscription "{}" hit its periodic file limit without seeing any already-seen files.'.format( query_text, self._name ) HydrusData.ShowText( message ) - def _UpdateSerialisableInfo( self, version, old_serialisable_info ): + def _WorkOnQueriesFiles( self, job_key ): - if version == 1: - - ( serialisable_gallery_identifier, serialisable_gallery_stream_identifiers, query, period, get_tags_if_url_recognised_and_file_redundant, initial_file_limit, periodic_file_limit, paused, serialisable_file_import_options, serialisable_tag_import_options, last_checked, last_error, serialisable_file_seed_cache ) = old_serialisable_info - - check_now = False - - new_serialisable_info = ( serialisable_gallery_identifier, serialisable_gallery_stream_identifiers, query, period, get_tags_if_url_recognised_and_file_redundant, initial_file_limit, periodic_file_limit, paused, serialisable_file_import_options, serialisable_tag_import_options, last_checked, check_now, last_error, serialisable_file_seed_cache ) - - return ( 2, new_serialisable_info ) - + self._file_error_count = 0 - if version == 2: - - ( serialisable_gallery_identifier, serialisable_gallery_stream_identifiers, query, period, get_tags_if_url_recognised_and_file_redundant, initial_file_limit, periodic_file_limit, paused, serialisable_file_import_options, serialisable_tag_import_options, last_checked, check_now, last_error, serialisable_file_seed_cache ) = old_serialisable_info - - no_work_until = 0 - no_work_until_reason = '' - - new_serialisable_info = ( serialisable_gallery_identifier, serialisable_gallery_stream_identifiers, query, period, get_tags_if_url_recognised_and_file_redundant, initial_file_limit, periodic_file_limit, paused, serialisable_file_import_options, serialisable_tag_import_options, last_checked, check_now, last_error, no_work_until, no_work_until_reason, serialisable_file_seed_cache ) - - return ( 3, new_serialisable_info ) - + query_headers = self._GetQueryHeadersForProcessing() - if version == 3: - - ( serialisable_gallery_identifier, serialisable_gallery_stream_identifiers, query, period, get_tags_if_url_recognised_and_file_redundant, initial_file_limit, periodic_file_limit, paused, serialisable_file_import_options, serialisable_tag_import_options, last_checked, check_now, last_error, no_work_until, no_work_until_reason, serialisable_file_seed_cache ) = old_serialisable_info - - checker_options = ClientImportOptions.CheckerOptions( 5, period // 5, period * 10, ( 1, period * 10 ) ) - - file_seed_cache = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_file_seed_cache ) - - query = ClientImportSubscriptionQuery.SubscriptionQuery( query ) - - query._file_seed_cache = file_seed_cache - query._last_check_time = last_checked - - query.UpdateNextCheckTime( checker_options ) - - queries = [ query ] - - serialisable_queries = [ query.GetSerialisableTuple() for query in queries ] - serialisable_checker_options = checker_options.GetSerialisableTuple() - - new_serialisable_info = ( serialisable_gallery_identifier, serialisable_gallery_stream_identifiers, serialisable_queries, serialisable_checker_options, get_tags_if_url_recognised_and_file_redundant, initial_file_limit, periodic_file_limit, paused, serialisable_file_import_options, serialisable_tag_import_options, no_work_until, no_work_until_reason ) - - return ( 4, new_serialisable_info ) - + query_headers = [ query_header for query_header in query_headers if query_header.HasFileWorkToDo() ] - if version == 4: - - ( serialisable_gallery_identifier, serialisable_gallery_stream_identifiers, serialisable_queries, serialisable_checker_options, get_tags_if_url_recognised_and_file_redundant, initial_file_limit, periodic_file_limit, paused, serialisable_file_import_options, serialisable_tag_import_options, no_work_until, no_work_until_reason ) = old_serialisable_info - - new_serialisable_info = ( serialisable_gallery_identifier, serialisable_gallery_stream_identifiers, serialisable_queries, serialisable_checker_options, initial_file_limit, periodic_file_limit, paused, serialisable_file_import_options, serialisable_tag_import_options, no_work_until, no_work_until_reason ) - - return ( 5, new_serialisable_info ) - + num_queries = len( query_headers ) - if version == 5: + for ( i, query_header ) in enumerate( query_headers ): - ( serialisable_gallery_identifier, serialisable_gallery_stream_identifiers, serialisable_queries, serialisable_checker_options, initial_file_limit, periodic_file_limit, paused, serialisable_file_import_options, serialisable_tag_import_options, no_work_until, no_work_until_reason ) = old_serialisable_info - - publish_files_to_popup_button = True - publish_files_to_page = False - merge_query_publish_events = True - - new_serialisable_info = ( serialisable_gallery_identifier, serialisable_gallery_stream_identifiers, serialisable_queries, serialisable_checker_options, initial_file_limit, periodic_file_limit, paused, serialisable_file_import_options, serialisable_tag_import_options, no_work_until, no_work_until_reason, publish_files_to_popup_button, publish_files_to_page, merge_query_publish_events ) - - return ( 6, new_serialisable_info ) - - - if version == 6: - - ( serialisable_gallery_identifier, serialisable_gallery_stream_identifiers, serialisable_queries, serialisable_checker_options, initial_file_limit, periodic_file_limit, paused, serialisable_file_import_options, serialisable_tag_import_options, no_work_until, no_work_until_reason, publish_files_to_popup_button, publish_files_to_page, merge_query_publish_events ) = old_serialisable_info - - if initial_file_limit is None or initial_file_limit > 1000: - - initial_file_limit = 1000 - - - if periodic_file_limit is None or periodic_file_limit > 1000: - - periodic_file_limit = 1000 - - - new_serialisable_info = ( serialisable_gallery_identifier, serialisable_gallery_stream_identifiers, serialisable_queries, serialisable_checker_options, initial_file_limit, periodic_file_limit, paused, serialisable_file_import_options, serialisable_tag_import_options, no_work_until, no_work_until_reason, publish_files_to_popup_button, publish_files_to_page, merge_query_publish_events ) - - return ( 7, new_serialisable_info ) - - - if version == 7: - - ( serialisable_gallery_identifier, serialisable_gallery_stream_identifiers, serialisable_queries, serialisable_checker_options, initial_file_limit, periodic_file_limit, paused, serialisable_file_import_options, serialisable_tag_import_options, no_work_until, no_work_until_reason, publish_files_to_popup_button, publish_files_to_page, merge_query_publish_events ) = old_serialisable_info - - gallery_identifier = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_gallery_identifier ) - - ( gug_key, gug_name ) = ClientDownloading.ConvertGalleryIdentifierToGUGKeyAndName( gallery_identifier ) - - serialisable_gug_key_and_name = ( gug_key.hex(), gug_name ) - - new_serialisable_info = ( serialisable_gug_key_and_name, serialisable_queries, serialisable_checker_options, initial_file_limit, periodic_file_limit, paused, serialisable_file_import_options, serialisable_tag_import_options, no_work_until, no_work_until_reason, publish_files_to_popup_button, publish_files_to_page, merge_query_publish_events ) - - return ( 8, new_serialisable_info ) - - - if version == 8: - - ( serialisable_gug_key_and_name, serialisable_queries, serialisable_checker_options, initial_file_limit, periodic_file_limit, paused, serialisable_file_import_options, serialisable_tag_import_options, no_work_until, no_work_until_reason, publish_files_to_popup_button, publish_files_to_page, merge_query_publish_events ) = old_serialisable_info - - show_a_popup_while_working = True - - new_serialisable_info = ( serialisable_gug_key_and_name, serialisable_queries, serialisable_checker_options, initial_file_limit, periodic_file_limit, paused, serialisable_file_import_options, serialisable_tag_import_options, no_work_until, no_work_until_reason, show_a_popup_while_working, publish_files_to_popup_button, publish_files_to_page, merge_query_publish_events ) - - return ( 9, new_serialisable_info ) - - - if version == 9: - - ( serialisable_gug_key_and_name, serialisable_queries, serialisable_checker_options, initial_file_limit, periodic_file_limit, paused, serialisable_file_import_options, serialisable_tag_import_options, no_work_until, no_work_until_reason, show_a_popup_while_working, publish_files_to_popup_button, publish_files_to_page, merge_query_publish_events ) = old_serialisable_info - - publish_label_override = None - - new_serialisable_info = ( serialisable_gug_key_and_name, serialisable_queries, serialisable_checker_options, initial_file_limit, periodic_file_limit, paused, serialisable_file_import_options, serialisable_tag_import_options, no_work_until, no_work_until_reason, show_a_popup_while_working, publish_files_to_popup_button, publish_files_to_page, publish_label_override, merge_query_publish_events ) - - return ( 10, new_serialisable_info ) - - - - def _WorkOnFiles( self, job_key ): - - error_count = 0 - - queries = self._GetQueriesForProcessing() - - queries = [ query for query in queries if query.HasFileWorkToDo() ] - - num_queries = len( queries ) - - for ( i, query ) in enumerate( queries ): - - this_query_has_done_work = False - - query_name = query.GetHumanName() - file_seed_cache = query.GetFileSeedCache() + query_name = query_header.GetHumanName() text_1 = 'downloading files' query_summary_name = self._name @@ -465,201 +241,32 @@ class Subscription( HydrusSerialisable.SerialisableBaseNamed ): query_summary_name += ': ' + query_name - if num_queries > 1: - - text_1 += ' (' + HydrusData.ConvertValueRangeToPrettyString( i + 1, num_queries ) + ')' - + text_1 += ' (' + HydrusData.ConvertValueRangeToPrettyString( i + 1, num_queries ) + ')' job_key.SetVariable( 'popup_text_1', text_1 ) - presentation_hashes = [] - presentation_hashes_fast = set() - - starting_num_urls = file_seed_cache.GetFileSeedCount() - starting_num_unknown = file_seed_cache.GetFileSeedCount( CC.STATUS_UNKNOWN ) - starting_num_done = starting_num_urls - starting_num_unknown + try: + + query_log_container = HG.client_controller.Read( 'serialisable_named', HydrusSerialisable.SERIALISABLE_TYPE_SUBSCRIPTION_QUERY_LOG_CONTAINER, query_header.GetQueryLogContainerName() ) + + except HydrusExceptions.DBException: + + self._DealWithMissingQueryLogContainerError( query_header ) + + break + try: - while True: - - file_seed = file_seed_cache.GetNextFileSeed( CC.STATUS_UNKNOWN ) - - if file_seed is None: - - if HG.subscription_report_mode: - - HydrusData.ShowText( 'Query "' + query_name + '" can do no more file work due to running out of unknown urls.' ) - - - break - - - if job_key.IsCancelled(): - - self._DelayWork( 300, 'recently cancelled' ) - - break - - - p1 = not self._CanDoWorkNow() - p3 = not query.DomainOK() - p4 = not query.BandwidthOK( self._name ) - p5 = not self._QueryFileLoginOK( query ) - - if p1 or p4 or p5: - - if p3 and this_query_has_done_work: - - job_key.SetVariable( 'popup_text_2', 'domain had errors, will try again later' ) - - self._DelayWork( 3600, 'domain errors, will try again later' ) - - time.sleep( 5 ) - - - if p4 and this_query_has_done_work: - - job_key.SetVariable( 'popup_text_2', 'no more bandwidth to download files, will do some more later' ) - - time.sleep( 5 ) - - - break - - - try: - - num_urls = file_seed_cache.GetFileSeedCount() - num_unknown = file_seed_cache.GetFileSeedCount( CC.STATUS_UNKNOWN ) - num_done = num_urls - num_unknown - - # 4001/4003 is not as useful as 1/3 - - human_num_urls = num_urls - starting_num_done - human_num_done = num_done - starting_num_done - - x_out_of_y = 'file ' + HydrusData.ConvertValueRangeToPrettyString( human_num_done + 1, human_num_urls ) + ': ' - - job_key.SetVariable( 'popup_gauge_2', ( human_num_done, human_num_urls ) ) - - def status_hook( text ): - - if len( text ) > 0: - - text = text.splitlines()[0] - - - job_key.SetVariable( 'popup_text_2', x_out_of_y + text ) - - - file_seed.WorkOnURL( file_seed_cache, status_hook, self._GenerateNetworkJobFactory( query ), ClientImporting.GenerateMultiplePopupNetworkJobPresentationContextFactory( job_key ), self._file_import_options, self._tag_import_options ) - - query_tag_import_options = query.GetTagImportOptions() - - if query_tag_import_options.HasAdditionalTags() and file_seed.status in CC.SUCCESSFUL_IMPORT_STATES: - - if file_seed.HasHash(): - - hash = file_seed.GetHash() - - media_result = HG.client_controller.Read( 'media_result', hash ) - - downloaded_tags = [] - - service_keys_to_content_updates = query_tag_import_options.GetServiceKeysToContentUpdates( file_seed.status, media_result, downloaded_tags ) # additional tags - - if len( service_keys_to_content_updates ) > 0: - - HG.client_controller.WriteSynchronous( 'content_updates', service_keys_to_content_updates ) - - - - - if file_seed.ShouldPresent( self._file_import_options ): - - hash = file_seed.GetHash() - - if hash not in presentation_hashes_fast: - - presentation_hashes.append( hash ) - - presentation_hashes_fast.add( hash ) - - - - except HydrusExceptions.CancelledException as e: - - self._DelayWork( 300, str( e ) ) - - break - - except HydrusExceptions.VetoException as e: - - status = CC.STATUS_VETOED - - note = str( e ) - - file_seed.SetStatus( status, note = note ) - - except HydrusExceptions.NotFoundException: - - status = CC.STATUS_VETOED - - note = '404' - - file_seed.SetStatus( status, note = note ) - - except Exception as e: - - status = CC.STATUS_ERROR - - job_key.SetVariable( 'popup_text_2', x_out_of_y + 'file failed' ) - - file_seed.SetStatus( status, exception = e ) - - if isinstance( e, HydrusExceptions.DataMissing ): - - # DataMissing is a quick thing to avoid subscription abandons when lots of deleted files in e621 (or any other booru) - # this should be richer in any case in the new system - - pass - - else: - - error_count += 1 - - time.sleep( 5 ) - - - error_count_threshold = HG.client_controller.new_options.GetNoneableInteger( 'subscription_file_error_cancel_threshold' ) - - if error_count_threshold is not None and error_count >= error_count_threshold: - - raise Exception( 'The subscription ' + self._name + ' encountered several errors when downloading files, so it abandoned its sync.' ) - - - - this_query_has_done_work = True - - if len( presentation_hashes ) > 0: - - job_key.SetVariable( 'popup_files', ( list( presentation_hashes ), query_summary_name ) ) - - - time.sleep( ClientImporting.DID_SUBSTANTIAL_FILE_WORK_MINIMUM_SLEEP_TIME ) - - HG.client_controller.WaitUntilViewFree() - + self._WorkOnQueryFiles( job_key, query_header, query_log_container, query_summary_name ) + + except HydrusExceptions.CancelledException: + + break finally: - if len( presentation_hashes ) > 0: - - publishing_label = self._GetPublishingLabel( query ) - - ClientImporting.PublishPresentationHashes( publishing_label, presentation_hashes, self._publish_files_to_popup_button, self._publish_files_to_page ) - + HG.client_controller.WriteSynchronous( 'serialisable', query_log_container ) @@ -669,14 +276,19 @@ class Subscription( HydrusSerialisable.SerialisableBaseNamed ): job_key.DeleteVariable( 'popup_gauge_2' ) - def _WorkOnFilesCanDoWork( self ): + def _WorkOnQueriesFilesCanDoWork( self ): - for query in self._queries: + for query_header in self._query_headers: - if query.HasFileWorkToDo(): + if not query_header.IsExpectingToWorkInFuture(): - bandwidth_ok = query.BandwidthOK( self._name ) - domain_ok = query.DomainOK() + continue + + + if query_header.HasFileWorkToDo(): + + bandwidth_ok = query_header.FileBandwidthOK( HG.client_controller.network_engine.bandwidth_manager, self._name ) + domain_ok = query_header.FileDomainOK( HG.client_controller.network_engine.domain_manager ) if HG.subscription_report_mode: @@ -690,7 +302,7 @@ class Subscription( HydrusSerialisable.SerialisableBaseNamed ): if not domain_ok: - self._DelayWork( 3600, 'domain errors, will try again later' ) + self._DelayWork( 3600, 'recent domain errors, will try again later' ) @@ -703,9 +315,238 @@ class Subscription( HydrusSerialisable.SerialisableBaseNamed ): return False - def _SyncQuery( self, job_key ): + def _WorkOnQueryFiles( + self, + job_key: ClientThreading.JobKey, + query_header: ClientImportSubscriptionQuery.SubscriptionQueryHeader, + query_log_container: ClientImportSubscriptionQuery.SubscriptionQueryLogContainer, + query_summary_name: str + ): - have_made_an_initial_sync_bandwidth_notification = False + this_query_has_done_work = False + + query_name = query_header.GetHumanName() + file_seed_cache = query_log_container.GetFileSeedCache() + + presentation_hashes = [] + presentation_hashes_fast = set() + + starting_num_urls = file_seed_cache.GetFileSeedCount() + starting_num_unknown = file_seed_cache.GetFileSeedCount( CC.STATUS_UNKNOWN ) + starting_num_done = starting_num_urls - starting_num_unknown + + try: + + while True: + + file_seed = file_seed_cache.GetNextFileSeed( CC.STATUS_UNKNOWN ) + + if file_seed is None: + + if HG.subscription_report_mode: + + HydrusData.ShowText( 'Query "' + query_name + '" can do no more file work due to running out of unknown urls.' ) + + + break + + + if job_key.IsCancelled(): + + self._DelayWork( 300, 'recently cancelled' ) + + break + + + p1 = not self._CanDoWorkNow() + p3 = not query_header.FileDomainOK( HG.client_controller.network_engine.domain_manager ) + p4 = not query_header.FileBandwidthOK( HG.client_controller.network_engine.bandwidth_manager, self._name ) + ( login_ok, login_reason ) = query_header.FileLoginOK( HG.client_controller.network_engine, self._name ) + + if p1 or p4 or not login_ok: + + if p3 and this_query_has_done_work: + + job_key.SetVariable( 'popup_text_2', 'domain had errors, will try again later' ) + + self._DelayWork( 3600, 'domain errors, will try again later' ) + + time.sleep( 5 ) + + + if p4 and this_query_has_done_work: + + job_key.SetVariable( 'popup_text_2', 'no more bandwidth to download files, will do some more later' ) + + time.sleep( 5 ) + + + if not login_ok: + + if not self._paused: + + message = 'Query "{}" for subscription "{}" seemed to have an invalid login for one of its file imports. The reason was:'.format( query_header.GetHumanName(), self._name ) + message += os.linesep * 2 + message += login_reason + message += os.linesep * 2 + message += 'The subscription has paused. Please see if you can fix the problem and then unpause. Hydrus dev would like feedback on this process.' + + HydrusData.ShowText( message ) + + self._DelayWork( 300, login_reason ) + + self._paused = True + + + + break + + + try: + + num_urls = file_seed_cache.GetFileSeedCount() + num_unknown = file_seed_cache.GetFileSeedCount( CC.STATUS_UNKNOWN ) + num_done = num_urls - num_unknown + + # 4001/4003 is not as useful as 1/3 + + human_num_urls = num_urls - starting_num_done + human_num_done = num_done - starting_num_done + + x_out_of_y = 'file ' + HydrusData.ConvertValueRangeToPrettyString( human_num_done + 1, human_num_urls ) + ': ' + + job_key.SetVariable( 'popup_gauge_2', ( human_num_done, human_num_urls ) ) + + def status_hook( text ): + + if len( text ) > 0: + + text = text.splitlines()[0] + + + job_key.SetVariable( 'popup_text_2', x_out_of_y + text ) + + + file_seed.WorkOnURL( file_seed_cache, status_hook, query_header.GenerateNetworkJobFactory( self._name ), ClientImporting.GenerateMultiplePopupNetworkJobPresentationContextFactory( job_key ), self._file_import_options, self._tag_import_options ) + + query_tag_import_options = query_header.GetTagImportOptions() + + if query_tag_import_options.HasAdditionalTags() and file_seed.status in CC.SUCCESSFUL_IMPORT_STATES: + + if file_seed.HasHash(): + + hash = file_seed.GetHash() + + media_result = HG.client_controller.Read( 'media_result', hash ) + + downloaded_tags = [] + + service_keys_to_content_updates = query_tag_import_options.GetServiceKeysToContentUpdates( file_seed.status, media_result, downloaded_tags ) # additional tags + + if len( service_keys_to_content_updates ) > 0: + + HG.client_controller.WriteSynchronous( 'content_updates', service_keys_to_content_updates ) + + + + + if file_seed.ShouldPresent( self._file_import_options ): + + hash = file_seed.GetHash() + + if hash not in presentation_hashes_fast: + + presentation_hashes.append( hash ) + + presentation_hashes_fast.add( hash ) + + + + except HydrusExceptions.CancelledException as e: + + self._DelayWork( 300, str( e ) ) + + break + + except HydrusExceptions.VetoException as e: + + status = CC.STATUS_VETOED + + note = str( e ) + + file_seed.SetStatus( status, note = note ) + + except HydrusExceptions.NotFoundException: + + status = CC.STATUS_VETOED + + note = '404' + + file_seed.SetStatus( status, note = note ) + + except Exception as e: + + status = CC.STATUS_ERROR + + job_key.SetVariable( 'popup_text_2', x_out_of_y + 'file failed' ) + + file_seed.SetStatus( status, exception = e ) + + if isinstance( e, HydrusExceptions.DataMissing ): + + # DataMissing is a quick thing to avoid subscription abandons when lots of deleted files in e621 (or any other booru) + # this should be richer in any case in the new system + + pass + + else: + + self._file_error_count += 1 + + time.sleep( 5 ) + + + error_count_threshold = HG.client_controller.new_options.GetNoneableInteger( 'subscription_file_error_cancel_threshold' ) + + if error_count_threshold is not None and self._file_error_count >= error_count_threshold: + + raise Exception( 'The subscription ' + self._name + ' encountered several errors when downloading files, so it abandoned its sync.' ) + + + + this_query_has_done_work = True + + if len( presentation_hashes ) > 0: + + job_key.SetVariable( 'popup_files', ( list( presentation_hashes ), query_summary_name ) ) + + else: + + # although it is nice to have the file popup linger a little once a query is done, if the next query has 15 'already in db', it has outstayed its welcome + job_key.DeleteVariable( 'popup_files' ) + + + time.sleep( ClientImporting.DID_SUBSTANTIAL_FILE_WORK_MINIMUM_SLEEP_TIME ) + + HG.client_controller.WaitUntilViewFree() + + + finally: + + query_header.UpdateFileStatus( query_log_container ) + + if len( presentation_hashes ) > 0: + + publishing_label = self._GetPublishingLabel( query_header ) + + ClientImporting.PublishPresentationHashes( publishing_label, presentation_hashes, self._publish_files_to_popup_button, self._publish_files_to_page ) + + + + + def _SyncQueries( self, job_key ): + + self._have_made_an_initial_sync_bandwidth_notification = False gug = HG.client_controller.network_engine.domain_manager.GetGUG( self._gug_key_and_name ) @@ -729,313 +570,54 @@ class Subscription( HydrusSerialisable.SerialisableBaseNamed ): self._gug_key_and_name = gug.GetGUGKeyAndName() # just a refresher, to keep up with any changes - queries = self._GetQueriesForProcessing() + query_headers = self._GetQueryHeadersForProcessing() - queries = [ query for query in queries if query.IsSyncDue() ] + query_headers = [ query_header for query_header in query_headers if query_header.IsSyncDue() ] - num_queries = len( queries ) + num_queries = len( query_headers ) - for ( i, query ) in enumerate( queries ): + for ( i, query_header ) in enumerate( query_headers ): - query_text = query.GetQueryText() - query_name = query.GetHumanName() - file_seed_cache = query.GetFileSeedCache() - gallery_seed_log = query.GetGallerySeedLog() + status_prefix = 'synchronising' - this_is_initial_sync = query.IsInitialSync() - total_new_urls_for_this_sync = 0 - total_already_in_urls_for_this_sync = 0 - - gallery_urls_seen_this_sync = set() - - if this_is_initial_sync: - - file_limit_for_this_sync = self._initial_file_limit - - else: - - file_limit_for_this_sync = self._periodic_file_limit - - - file_seeds_to_add = set() - file_seeds_to_add_ordered = [] - - stop_reason = 'unknown stop reason' - - prefix = 'synchronising' + query_name = query_header.GetHumanName() if query_name != self._name: - prefix += ' "' + query_name + '"' + status_prefix += ' "' + query_name + '"' - if num_queries > 1: - - prefix += ' (' + HydrusData.ConvertValueRangeToPrettyString( i + 1, num_queries ) + ')' - - - job_key.SetVariable( 'popup_text_1', prefix ) - - initial_search_urls = gug.GenerateGalleryURLs( query_text ) - - if len( initial_search_urls ) == 0: - - self._paused = True - - HydrusData.ShowText( 'The subscription "' + self._name + '"\'s Gallery URL Generator, "' + self._gug_key_and_name[1] + '" did not generate any URLs! The sub has paused!' ) - - return - - - gallery_seeds = [ ClientImportGallerySeeds.GallerySeed( url, can_generate_more_pages = True ) for url in initial_search_urls ] - - gallery_seed_log.AddGallerySeeds( gallery_seeds ) + status_prefix += ' (' + HydrusData.ConvertValueRangeToPrettyString( i + 1, num_queries ) + ')' try: - while gallery_seed_log.WorkToDo(): - - p1 = not self._CanDoWorkNow() - p3 = not self._QuerySyncLoginOK( query ) - - if p1 or p3: - - if p3: - - stop_reason = 'Login was invalid!' - - - return - - - if job_key.IsCancelled(): - - stop_reason = 'gallery parsing cancelled, likely by user' - - self._DelayWork( 600, stop_reason ) - - return - - - gallery_seed = gallery_seed_log.GetNextGallerySeed( CC.STATUS_UNKNOWN ) - - if gallery_seed is None: - - stop_reason = 'thought there was a page to check, but apparently there was not!' - - break - - - def status_hook( text ): - - if len( text ) > 0: - - text = text.splitlines()[0] - - - job_key.SetVariable( 'popup_text_1', prefix + ': ' + text ) - - - def title_hook( text ): - - pass - - - def file_seeds_callable( file_seeds ): - - num_urls_added = 0 - num_urls_already_in_file_seed_cache = 0 - can_search_for_more_files = True - stop_reason = 'unknown stop reason' - current_contiguous_num_urls_already_in_file_seed_cache = 0 - - for file_seed in file_seeds: - - if file_seed in file_seeds_to_add: - - # this catches the occasional overflow when a new file is uploaded while gallery parsing is going on - # we don't want to count these 'seen before this run' urls in the 'caught up to last time' count - - continue - - - # When are we caught up? This is not a trivial problem. Tags are not always added when files are uploaded, so the order we find files is not completely reliable. - # Ideally, we want to search a _bit_ deeper than the first already-seen. - # And since we have a page of urls here and now, there is no point breaking early if there might be some new ones at the end. - # Current rule is "We are caught up if the final X contiguous files are 'already in'". X is 5 for now. - - if file_seed_cache.HasFileSeed( file_seed ): - - num_urls_already_in_file_seed_cache += 1 - current_contiguous_num_urls_already_in_file_seed_cache += 1 - - if current_contiguous_num_urls_already_in_file_seed_cache >= 100: - - can_search_for_more_files = False - stop_reason = 'saw 100 previously seen urls in a row, so assuming this is a large gallery' - - break - - - else: - - num_urls_added += 1 - current_contiguous_num_urls_already_in_file_seed_cache = 0 - - file_seeds_to_add.add( file_seed ) - file_seeds_to_add_ordered.append( file_seed ) - - - if file_limit_for_this_sync is not None and total_new_urls_for_this_sync + num_urls_added >= file_limit_for_this_sync: - - # we have found enough new files this sync, so should stop adding files and new gallery pages - - if this_is_initial_sync: - - stop_reason = 'hit initial file limit' - - else: - - if total_already_in_urls_for_this_sync + num_urls_already_in_file_seed_cache > 0: - - # this sync produced some knowns, so it is likely we have stepped through a mix of old and tagged-late new files - # we might also be on the second sync with a periodic limit greater than the initial limit - # either way, this is no reason to go crying to the user - - stop_reason = 'hit periodic file limit after seeing several already-seen files' - - else: - - # this page had all entirely new files - - self._ShowHitPeriodicFileLimitMessage( query_name ) - - stop_reason = 'hit periodic file limit without seeing any already-seen files!' - - - - can_search_for_more_files = False - - break - - - - WE_HIT_OLD_GROUND_THRESHOLD = 5 - - if current_contiguous_num_urls_already_in_file_seed_cache >= WE_HIT_OLD_GROUND_THRESHOLD: - - # this gallery page has caught up to before, so it should not spawn any more gallery pages - - can_search_for_more_files = False - - stop_reason = 'saw ' + HydrusData.ToHumanInt( current_contiguous_num_urls_already_in_file_seed_cache ) + ' previously seen urls, so assuming we caught up' - - - if num_urls_added == 0: - - can_search_for_more_files = False - stop_reason = 'no new urls found' - - - return ( num_urls_added, num_urls_already_in_file_seed_cache, can_search_for_more_files, stop_reason ) - - - job_key.SetVariable( 'popup_text_1', prefix + ': found ' + HydrusData.ToHumanInt( total_new_urls_for_this_sync ) + ' new urls, checking next page' ) - - try: - - ( num_urls_added, num_urls_already_in_file_seed_cache, num_urls_total, result_404, added_new_gallery_pages, stop_reason ) = gallery_seed.WorkOnURL( 'subscription', gallery_seed_log, file_seeds_callable, status_hook, title_hook, self._GenerateNetworkJobFactory( query ), ClientImporting.GenerateMultiplePopupNetworkJobPresentationContextFactory( job_key ), self._file_import_options, gallery_urls_seen_before = gallery_urls_seen_this_sync ) - - except HydrusExceptions.CancelledException as e: - - stop_reason = 'gallery network job cancelled, likely by user' - - self._DelayWork( 600, stop_reason ) - - return - - except Exception as e: - - stop_reason = str( e ) - - raise - - - total_new_urls_for_this_sync += num_urls_added - total_already_in_urls_for_this_sync += num_urls_already_in_file_seed_cache - - if file_limit_for_this_sync is not None and total_new_urls_for_this_sync >= file_limit_for_this_sync: - - # we have found enough new files this sync, so stop and cancel any outstanding gallery urls - - if this_is_initial_sync: - - stop_reason = 'hit initial file limit' - - else: - - stop_reason = 'hit periodic file limit' - - - break - - + query_log_container = HG.client_controller.Read( 'serialisable_named', HydrusSerialisable.SERIALISABLE_TYPE_SUBSCRIPTION_QUERY_LOG_CONTAINER, query_header.GetQueryLogContainerName() ) + + except HydrusExceptions.DBException: + + self._DealWithMissingQueryLogContainerError( query_header ) + + break + + + try: + + self._SyncQuery( job_key, gug, query_header, query_log_container, status_prefix ) + + except HydrusExceptions.CancelledException: + + break finally: - while gallery_seed_log.WorkToDo(): - - gallery_seed = gallery_seed_log.GetNextGallerySeed( CC.STATUS_UNKNOWN ) - - if gallery_seed is None: - - break - - - gallery_seed.SetStatus( CC.STATUS_VETOED, note = stop_reason ) - - - - file_seeds_to_add_ordered.reverse() - - # 'first' urls are now at the end, so the file_seed_cache should stay roughly in oldest->newest order - - file_seed_cache.AddFileSeeds( file_seeds_to_add_ordered ) - - query.RegisterSyncComplete( self._checker_options ) - query.UpdateNextCheckTime( self._checker_options ) - - # - - if query.IsDead(): - - if this_is_initial_sync: - - HydrusData.ShowText( 'The query "' + query_name + '" for subscription "' + self._name + '" did not find any files on its first sync! Could the query text have a typo, like a missing underscore?' ) - - else: - - HydrusData.ShowText( 'The query "' + query_name + '" for subscription "' + self._name + '" appears to be dead!' ) - - - else: - - if this_is_initial_sync: - - if not query.BandwidthOK( self._name ) and not have_made_an_initial_sync_bandwidth_notification: - - HydrusData.ShowText( 'FYI: The query "' + query_name + '" for subscription "' + self._name + '" performed its initial sync ok, but that domain is short on bandwidth right now, so no files will be downloaded yet. The subscription will catch up in future as bandwidth becomes available. You can review the estimated time until bandwidth is available under the manage subscriptions dialog. If more queries are performing initial syncs in this run, they may be the same.' ) - - have_made_an_initial_sync_bandwidth_notification = True - - + HG.client_controller.WriteSynchronous( 'serialisable', query_log_container ) - def _SyncQueryCanDoWork( self ): + def _SyncQueriesCanDoWork( self ): - result = True in ( query.IsSyncDue() for query in self._queries ) + result = True in ( query_header.IsSyncDue() for query_header in self._query_headers ) if HG.subscription_report_mode: @@ -1045,42 +627,371 @@ class Subscription( HydrusSerialisable.SerialisableBaseNamed ): return result - def AllPaused( self ): + def _SyncQuery( + self, + job_key: ClientThreading.JobKey, + gug: ClientNetworkingDomain.GalleryURLGenerator, # not actually correct for an ngug, but _whatever_ + query_header: ClientImportSubscriptionQuery.SubscriptionQueryHeader, + query_log_container: ClientImportSubscriptionQuery.SubscriptionQueryLogContainer, + status_prefix: str + ): - if self._paused: + query_text = query_header.GetQueryText() + query_name = query_header.GetHumanName() + + file_seed_cache = query_log_container.GetFileSeedCache() + gallery_seed_log = query_log_container.GetGallerySeedLog() + + this_is_initial_sync = query_header.IsInitialSync() + total_new_urls_for_this_sync = 0 + total_already_in_urls_for_this_sync = 0 + + gallery_urls_seen_this_sync = set() + + if this_is_initial_sync: - return True + file_limit_for_this_sync = self._initial_file_limit + + else: + + file_limit_for_this_sync = self._periodic_file_limit - for query in self._queries: + file_seeds_to_add = set() + file_seeds_to_add_ordered = [] + + stop_reason = 'unknown stop reason' + + job_key.SetVariable( 'popup_text_1', status_prefix ) + + initial_search_urls = gug.GenerateGalleryURLs( query_text ) + + if len( initial_search_urls ) == 0: - if not query.IsPaused(): + self._paused = True + + HydrusData.ShowText( 'The subscription "' + self._name + '"\'s Gallery URL Generator, "' + self._gug_key_and_name[1] + '" did not generate any URLs! The sub has paused!' ) + + raise HydrusExceptions.CancelledException( 'Bad GUG.' ) + + + gallery_seeds = [ ClientImportGallerySeeds.GallerySeed( url, can_generate_more_pages = True ) for url in initial_search_urls ] + + gallery_seed_log.AddGallerySeeds( gallery_seeds ) + + try: + + while gallery_seed_log.WorkToDo(): - return False + p1 = not self._CanDoWorkNow() + ( login_ok, login_reason ) = query_header.GalleryLoginOK( HG.client_controller.network_engine, self._name ) + + if p1 or not login_ok: + + if not login_ok: + + if not self._paused: + + message = 'Query "{}" for subscription "{}" seemed to have an invalid login. The reason was:'.format( query_header.GetHumanName(), self._name ) + message += os.linesep * 2 + message += login_reason + message += os.linesep * 2 + message += 'The subscription has paused. Please see if you can fix the problem and then unpause. Hydrus dev would like feedback on this process.' + + HydrusData.ShowText( message ) + + self._DelayWork( 300, login_reason ) + + self._paused = True + + + + raise HydrusExceptions.CancelledException( 'A problem, so stopping.' ) + + + if job_key.IsCancelled(): + + stop_reason = 'gallery parsing cancelled, likely by user' + + self._DelayWork( 600, stop_reason ) + + raise HydrusExceptions.CancelledException( 'User cancelled.' ) + + + gallery_seed = gallery_seed_log.GetNextGallerySeed( CC.STATUS_UNKNOWN ) + + if gallery_seed is None: + + stop_reason = 'thought there was a page to check, but apparently there was not!' + + break + + + def status_hook( text ): + + if len( text ) > 0: + + text = text.splitlines()[0] + + + job_key.SetVariable( 'popup_text_1', status_prefix + ': ' + text ) + + + def title_hook( text ): + + pass + + + def file_seeds_callable( file_seeds ): + + num_urls_added = 0 + num_urls_already_in_file_seed_cache = 0 + can_search_for_more_files = True + stop_reason = 'unknown stop reason' + current_contiguous_num_urls_already_in_file_seed_cache = 0 + + for file_seed in file_seeds: + + if file_seed in file_seeds_to_add: + + # this catches the occasional overflow when a new file is uploaded while gallery parsing is going on + # we don't want to count these 'seen before this run' urls in the 'caught up to last time' count + + continue + + + # When are we caught up? This is not a trivial problem. Tags are not always added when files are uploaded, so the order we find files is not completely reliable. + # Ideally, we want to search a _bit_ deeper than the first already-seen. + # And since we have a page of urls here and now, there is no point breaking early if there might be some new ones at the end. + # Current rule is "We are caught up if the final X contiguous files are 'already in'". X is 5 for now. + + if file_seed_cache.HasFileSeed( file_seed ): + + num_urls_already_in_file_seed_cache += 1 + current_contiguous_num_urls_already_in_file_seed_cache += 1 + + if current_contiguous_num_urls_already_in_file_seed_cache >= 100: + + can_search_for_more_files = False + stop_reason = 'saw 100 previously seen urls in a row, so assuming this is a large gallery' + + break + + + else: + + num_urls_added += 1 + current_contiguous_num_urls_already_in_file_seed_cache = 0 + + file_seeds_to_add.add( file_seed ) + file_seeds_to_add_ordered.append( file_seed ) + + + if file_limit_for_this_sync is not None and total_new_urls_for_this_sync + num_urls_added >= file_limit_for_this_sync: + + # we have found enough new files this sync, so should stop adding files and new gallery pages + + if this_is_initial_sync: + + stop_reason = 'hit initial file limit' + + else: + + if total_already_in_urls_for_this_sync + num_urls_already_in_file_seed_cache > 0: + + # this sync produced some knowns, so it is likely we have stepped through a mix of old and tagged-late new files + # we might also be on the second sync with a periodic limit greater than the initial limit + # either way, this is no reason to go crying to the user + + stop_reason = 'hit periodic file limit after seeing several already-seen files' + + else: + + # this page had all entirely new files + + self._ShowHitPeriodicFileLimitMessage( query_name ) + + stop_reason = 'hit periodic file limit without seeing any already-seen files!' + + + + can_search_for_more_files = False + + break + + + + WE_HIT_OLD_GROUND_THRESHOLD = 5 + + if current_contiguous_num_urls_already_in_file_seed_cache >= WE_HIT_OLD_GROUND_THRESHOLD: + + # this gallery page has caught up to before, so it should not spawn any more gallery pages + + can_search_for_more_files = False + + stop_reason = 'saw ' + HydrusData.ToHumanInt( current_contiguous_num_urls_already_in_file_seed_cache ) + ' previously seen urls, so assuming we caught up' + + + if num_urls_added == 0: + + can_search_for_more_files = False + stop_reason = 'no new urls found' + + + return ( num_urls_added, num_urls_already_in_file_seed_cache, can_search_for_more_files, stop_reason ) + + + job_key.SetVariable( 'popup_text_1', status_prefix + ': found ' + HydrusData.ToHumanInt( total_new_urls_for_this_sync ) + ' new urls, checking next page' ) + + try: + + ( num_urls_added, num_urls_already_in_file_seed_cache, num_urls_total, result_404, added_new_gallery_pages, stop_reason ) = gallery_seed.WorkOnURL( 'subscription', gallery_seed_log, file_seeds_callable, status_hook, title_hook, query_header.GenerateNetworkJobFactory( self._name ), ClientImporting.GenerateMultiplePopupNetworkJobPresentationContextFactory( job_key ), self._file_import_options, gallery_urls_seen_before = gallery_urls_seen_this_sync ) + + except HydrusExceptions.CancelledException as e: + + stop_reason = 'gallery network job cancelled, likely by user' + + self._DelayWork( 600, stop_reason ) + + raise HydrusExceptions.CancelledException( 'User cancelled.' ) + + except Exception as e: + + stop_reason = str( e ) + + raise + + + total_new_urls_for_this_sync += num_urls_added + total_already_in_urls_for_this_sync += num_urls_already_in_file_seed_cache + + if file_limit_for_this_sync is not None and total_new_urls_for_this_sync >= file_limit_for_this_sync: + + # we have found enough new files this sync, so stop and cancel any outstanding gallery urls + + if this_is_initial_sync: + + stop_reason = 'hit initial file limit' + + else: + + stop_reason = 'hit periodic file limit' + + + break + + + + finally: + + # now clean up any lingering gallery seeds + + while gallery_seed_log.WorkToDo(): + + gallery_seed = gallery_seed_log.GetNextGallerySeed( CC.STATUS_UNKNOWN ) + + if gallery_seed is None: + + break + + + gallery_seed.SetStatus( CC.STATUS_VETOED, note = stop_reason ) - return True + file_seeds_to_add_ordered.reverse() + + # 'first' urls are now at the end, so the file_seed_cache should stay roughly in oldest->newest order + + file_seed_cache.AddFileSeeds( file_seeds_to_add_ordered ) + + query_header.RegisterSyncComplete( self._checker_options, query_log_container ) + + # + + if query_header.IsDead(): + + if this_is_initial_sync: + + HydrusData.ShowText( 'The query "{}" for subscription "{}" did not find any files on its first sync! Could the query text have a typo, like a missing underscore?'.format( query_name, self._name ) ) + + else: + + HydrusData.ShowText( 'The query "{}" for subscription "{}" appears to be dead!'.format( query_name, self._name ) ) + + + else: + + if this_is_initial_sync: + + if not query_header.FileBandwidthOK( HG.client_controller.network_engine.bandwidth_manager, self._name ) and not self._have_made_an_initial_sync_bandwidth_notification: + + HydrusData.ShowText( 'FYI: The query "{}" for subscription "{}" performed its initial sync ok, but it is short on bandwidth right now, so no files will be downloaded yet. The subscription will catch up in future as bandwidth becomes available. You can review the estimated time until bandwidth is available under the manage subscriptions dialog. If more queries are performing initial syncs in this run, they may be the same.'.format( query_name, self._name ) ) + + self._have_made_an_initial_sync_bandwidth_notification = True + + + + + + def _SyncQueryLogContainersCanDoWork( self ): + + result = True in ( query_header.WantsToResyncWithLogContainer() for query_header in self._query_headers ) + + if HG.subscription_report_mode: + + HydrusData.ShowText( 'Subscription "{}" checking if any log containers need to be resynced: {}'.format( self._name, result ) ) + + + return result + + + def _SyncQueryLogContainers( self ): + + query_headers_to_do = [ query_header for query_header in self._query_headers if query_header.WantsToResyncWithLogContainer() ] + + for query_header in self._query_headers: + + if not query_header.WantsToResyncWithLogContainer(): + + continue + + + try: + + query_log_container = HG.client_controller.Read( 'serialisable_named', HydrusSerialisable.SERIALISABLE_TYPE_SUBSCRIPTION_QUERY_LOG_CONTAINER, query_header.GetQueryLogContainerName() ) + + except HydrusExceptions.DBException: + + self._DealWithMissingQueryLogContainerError( query_header ) + + break + + + query_header.SyncToQueryLogContainer( self._checker_options, query_log_container ) + + # don't need to save the container back, we made no changes + def CanCheckNow( self ): - return True in ( query.CanCheckNow() for query in self._queries ) + return True in ( query_header.CanCheckNow() for query_header in self._query_headers ) def CanReset( self ): - return True in ( not query.IsInitialSync() for query in self._queries ) + return True in ( not query_header.IsInitialSync() for query_header in self._query_headers ) - def CanRetryFailures( self ): + def CanRetryFailed( self ): - return True in ( query.CanRetryFailed() for query in self._queries ) + return True in ( query_header.CanRetryFailed() for query_header in self._query_headers ) def CanRetryIgnored( self ): - return True in ( query.CanRetryIgnored() for query in self._queries ) + return True in ( query_header.CanRetryIgnored() for query_header in self._query_headers ) def CanScrubDelay( self ): @@ -1090,26 +1001,33 @@ class Subscription( HydrusSerialisable.SerialisableBaseNamed ): def CheckNow( self ): - for query in self._queries: + for query_header in self._query_headers: - query.CheckNow() + query_header.CheckNow() self.ScrubDelay() - def GetBandwidthWaitingEstimateMinMax( self ): + def GetAllQueryLogContainerNames( self ) -> typing.Set[ str ]: - if len( self._queries ) == 0: + names = { query_header.GetQueryLogContainerName() for query_header in self._query_headers } + + return names + + + def GetBandwidthWaitingEstimateMinMax( self, bandwidth_manager: ClientNetworkingBandwidth.NetworkBandwidthManager ): + + if len( self._query_headers ) == 0: return ( 0, 0 ) estimates = [] - for query in self._queries: + for query_header in self._query_headers: - estimate = query.GetBandwidthWaitingEstimate( self._name ) + estimate = query_header.GetBandwidthWaitingEstimate( bandwidth_manager, self._name ) estimates.append( estimate ) @@ -1124,9 +1042,9 @@ class Subscription( HydrusSerialisable.SerialisableBaseNamed ): next_work_times = set() - for query in self._queries: + for query_header in self._query_headers: - next_work_time = query.GetNextWorkTime( self._name ) + next_work_time = query_header.GetNextWorkTime( HG.client_controller.network_engine.bandwidth_manager, self._name ) if next_work_time is not None: @@ -1139,8 +1057,8 @@ class Subscription( HydrusSerialisable.SerialisableBaseNamed ): return None - # if there are three queries due fifty seconds after our first one runs, we should wait that little bit longer - LAUNCH_WINDOW = 15 * 60 + # if there are three queries due say fifty seconds after our first one runs, we should wait that little bit longer + LAUNCH_WINDOW = 5 * 60 earliest_next_work_time = min( next_work_times ) @@ -1174,9 +1092,9 @@ class Subscription( HydrusSerialisable.SerialisableBaseNamed ): return self._gug_key_and_name - def GetQueries( self ) -> typing.List[ ClientImportSubscriptionQuery.SubscriptionQuery ]: + def GetQueryHeaders( self ) -> typing.List[ ClientImportSubscriptionQuery.SubscriptionQueryHeader ]: - return self._queries + return self._query_headers def GetMergeable( self, potential_mergees ): @@ -1186,7 +1104,7 @@ class Subscription( HydrusSerialisable.SerialisableBaseNamed ): for subscription in potential_mergees: - if subscription._gug_key_and_name[1] == self._gug_key_and_name[1]: + if subscription.GetGUGKeyAndName()[1] == self._gug_key_and_name[1]: mergeable.append( subscription ) @@ -1211,9 +1129,9 @@ class Subscription( HydrusSerialisable.SerialisableBaseNamed ): def HasQuerySearchTextFragment( self, search_text_fragment ): - for query in self._queries: + for query_header in self._query_headers: - query_text = query.GetQueryText() + query_text = query_header.GetQueryText() if search_text_fragment in query_text: @@ -1224,22 +1142,38 @@ class Subscription( HydrusSerialisable.SerialisableBaseNamed ): return False - def Merge( self, mergees ): + def IsExpectingToWorkInFuture( self ): + + if self._paused: + + return False + + + result = True in ( query_header.IsExpectingToWorkInFuture() for query_header in self._query_headers ) + + return result + + + def Merge( self, mergees: typing.Iterable[ "Subscription" ] ): + + unmerged = [] for subscription in mergees: - if subscription._gug_key_and_name[1] == self._gug_key_and_name[1]: + if subscription.GetGUGKeyAndName()[1] == self._gug_key_and_name[1]: - my_new_queries = [ query.Duplicate() for query in subscription._queries ] + self._query_headers.extend( subscription.GetQueryHeaders() ) - self._queries.extend( my_new_queries ) + subscription.SetQueryHeaders( [] ) else: - raise Exception( self._name + ' was told to merge an unmergeable subscription, ' + subscription.GetName() + '!' ) + unmerged.append( subscription ) + return unmerged + def PauseResume( self ): @@ -1248,79 +1182,84 @@ class Subscription( HydrusSerialisable.SerialisableBaseNamed ): def Reset( self ): - for query in self._queries: + for query_header in self._query_headers: - query.Reset() + query_header.Reset() self.ScrubDelay() - def RetryFailures( self ): + def RetryFailed( self ): - for query in self._queries: + for query_header in self._query_headers: - query.RetryFailures() + query_header.RetryFailed() def RetryIgnored( self ): - for query in self._queries: + for query_header in self._query_headers: - query.RetryIgnored() + query_header.RetryIgnored() - def Separate( self, base_name, only_these_queries = None ): + def Separate( self, base_name, only_these_query_headers = None ): - if only_these_queries is None: + if only_these_query_headers is None: - only_these_queries = set( self._queries ) + only_these_query_headers = set( self._query_headers ) else: - only_these_queries = set( only_these_queries ) + only_these_query_headers = set( only_these_query_headers ) - my_queries = self._queries + my_query_headers = self._query_headers - self._queries = [] + self._query_headers = [] base_sub = self.Duplicate() - self._queries = my_queries + self._query_headers = my_query_headers subscriptions = [] - for query in my_queries: + for query_header in my_query_headers: - if query not in only_these_queries: + if query_header not in only_these_query_headers: continue subscription = base_sub.Duplicate() - subscription._queries = [ query ] + subscription.SetQueryHeaders( [ query_header ] ) - subscription.SetName( base_name + ': ' + query.GetHumanName() ) + subscription.SetName( base_name + ': ' + query_header.GetHumanName() ) subscriptions.append( subscription ) - self._queries = [ query for query in my_queries if query not in only_these_queries ] + self._query_headers = [ query_header for query_header in my_query_headers if query_header not in only_these_query_headers ] return subscriptions - def SetCheckerOptions( self, checker_options ): + def SetCheckerOptions( self, checker_options: ClientImportOptions.CheckerOptions ): + + changes_made = self._checker_options.GetSerialisableTuple() != checker_options.GetSerialisableTuple() self._checker_options = checker_options - for query in self._queries: + if changes_made: - query.UpdateNextCheckTime( self._checker_options ) + for query_header in self._query_headers: + + query_header.SetQueryLogContainerStatus( ClientImportSubscriptionQuery.LOG_CONTAINER_UNSYNCED ) + @@ -1333,9 +1272,9 @@ class Subscription( HydrusSerialisable.SerialisableBaseNamed ): self._merge_query_publish_events = merge_query_publish_events - def SetQueries( self, queries: typing.Iterable[ ClientImportSubscriptionQuery.SubscriptionQuery ] ): + def SetQueryHeaders( self, query_headers: typing.Iterable[ ClientImportSubscriptionQuery.SubscriptionQueryHeader ] ): - self._queries = list( queries ) + self._query_headers = list( query_headers ) def SetTagImportOptions( self, tag_import_options ): @@ -1365,10 +1304,31 @@ class Subscription( HydrusSerialisable.SerialisableBaseNamed ): def Sync( self ): - sync_ok = self._SyncQueryCanDoWork() - files_ok = self._WorkOnFilesCanDoWork() + log_sync_work_to_do = self._SyncQueryLogContainersCanDoWork() - if self._CanDoWorkNow() and ( sync_ok or files_ok ): + if self._CanDoWorkNow() and log_sync_work_to_do: + + try: + + self._SyncQueryLogContainers() + + except Exception as e: + + HydrusData.ShowText( 'The subscription ' + self._name + ' encountered an exception when trying to sync:' ) + HydrusData.ShowException( e ) + + self._paused = True + + self._DelayWork( 300, 'error: {}'.format( str( e ) ) ) + + return + + + + sync_work_to_do = self._SyncQueriesCanDoWork() + files_work_to_do = self._WorkOnQueriesFilesCanDoWork() + + if self._CanDoWorkNow() and ( sync_work_to_do or files_work_to_do ): job_key = ClientThreading.JobKey( pausable = False, cancellable = True ) @@ -1382,12 +1342,12 @@ class Subscription( HydrusSerialisable.SerialisableBaseNamed ): # it is possible a query becomes due for a check while others are syncing, so we repeat this while watching for a stop signal - while self._CanDoWorkNow() and self._SyncQueryCanDoWork(): + while self._CanDoWorkNow() and self._SyncQueriesCanDoWork(): - self._SyncQuery( job_key ) + self._SyncQueries( job_key ) - self._WorkOnFiles( job_key ) + self._WorkOnQueriesFiles( job_key ) except HydrusExceptions.NetworkException as e: @@ -1417,8 +1377,6 @@ class Subscription( HydrusSerialisable.SerialisableBaseNamed ): job_key.DeleteVariable( 'popup_network_job' ) - HG.client_controller.WriteSynchronous( 'serialisable', self ) - if job_key.HasVariable( 'popup_files' ): job_key.Finish() @@ -1432,7 +1390,7 @@ class Subscription( HydrusSerialisable.SerialisableBaseNamed ): def ToTuple( self ): - return ( self._name, self._gug_key_and_name, self._queries, self._checker_options, self._initial_file_limit, self._periodic_file_limit, self._paused, self._file_import_options, self._tag_import_options, self._no_work_until, self._no_work_until_reason ) + return ( self._name, self._gug_key_and_name, self._query_headers, self._checker_options, self._initial_file_limit, self._periodic_file_limit, self._paused, self._file_import_options, self._tag_import_options, self._no_work_until, self._no_work_until_reason ) HydrusSerialisable.SERIALISABLE_TYPES_TO_OBJECT_TYPES[ HydrusSerialisable.SERIALISABLE_TYPE_SUBSCRIPTION ] = Subscription @@ -1455,6 +1413,8 @@ class SubscriptionJob( object ): self._subscription.Sync() + self._controller.WriteSynchronous( 'serialisable', self._subscription ) + def IsDone( self ): @@ -1475,16 +1435,14 @@ class SubscriptionJob( object ): class SubscriptionsManager( object ): - def __init__( self, controller ): + def __init__( self, controller, subscriptions: typing.List[ Subscription ] ): self._controller = controller - self._running_subscriptions = {} - self._current_subscription_names = set() - self._names_to_next_work_time = {} + self._names_to_subscriptions = { subscription.GetName() : subscription for subscription in subscriptions } + self._names_to_running_subscription_info = {} self._names_that_cannot_run = set() - - self._loading_sub = False + self._names_to_next_work_time = {} self._lock = threading.Lock() @@ -1493,13 +1451,6 @@ class SubscriptionsManager( object ): self._wake_event = threading.Event() - # cache deals with 'don't need to check, but have more files to do' and delay timings - # no prob if cache is empty of a sub, we'll just repopulate naturally - # also cache deals with pause info - # ideally this lad will launch subs exactly on time, rather than every twenty mins or whatever, but we should have a buffer on natural timings in order to get multiple queries together - - self._ReinitialiseNames() - self._controller.sub( self, 'Shutdown', 'shutdown' ) @@ -1507,22 +1458,51 @@ class SubscriptionsManager( object ): done_some = False - for ( name, ( thread, job, subscription ) ) in list( self._running_subscriptions.items() ): + for ( name, ( thread, job, subscription ) ) in list( self._names_to_running_subscription_info.items() ): if job.IsDone(): self._UpdateSubscriptionInfo( subscription, just_finished_work = True ) - del self._running_subscriptions[ name ] + del self._names_to_running_subscription_info[ name ] done_some = True - return done_some + if done_some: + + gc.collect() + - def _GetNameReadyToGo( self ): + def _GetMainLoopWaitTime( self ): + + if self._shutdown: + + return 0.1 + + + if len( self._names_to_running_subscription_info ) > 0: + + return 1 + + else: + + subscription = self._GetSubscriptionReadyToGo() + + if subscription is not None: + + return 1 + + else: + + return 15 + + + + + def _GetSubscriptionReadyToGo( self ): p1 = HG.client_controller.options[ 'pause_subs_sync' ] p2 = HG.client_controller.new_options.GetBoolean( 'pause_all_new_network_traffic' ) @@ -1535,13 +1515,13 @@ class SubscriptionsManager( object ): max_simultaneous_subscriptions = HG.client_controller.new_options.GetInteger( 'max_simultaneous_subscriptions' ) - if len( self._running_subscriptions ) >= max_simultaneous_subscriptions: + if len( self._names_to_running_subscription_info ) >= max_simultaneous_subscriptions: return None - possible_names = set( self._current_subscription_names ) - possible_names.difference_update( set( self._running_subscriptions.keys() ) ) + possible_names = set( self._names_to_subscriptions.keys() ) + possible_names.difference_update( set( self._names_to_running_subscription_info.keys() ) ) possible_names.difference_update( self._names_that_cannot_run ) # just a couple of seconds for calculation and human breathing room @@ -1574,44 +1554,10 @@ class SubscriptionsManager( object ): HydrusData.ShowText( 'Subscription manager selected "{}" to start.'.format( subscription_name ) ) - return subscription_name + return self._names_to_subscriptions[ subscription_name ] - def _GetMainLoopWaitTime( self ): - - if self._shutdown: - - return 0.1 - - - if len( self._running_subscriptions ) > 0: - - return 1 - - else: - - subscription_name = self._GetNameReadyToGo() - - if subscription_name is not None: - - return 1 - - else: - - return 15 - - - - - def _ReinitialiseNames( self ): - - self._current_subscription_names = set( HG.client_controller.Read( 'serialisable_names', HydrusSerialisable.SERIALISABLE_TYPE_SUBSCRIPTION ) ) - - self._names_that_cannot_run = set() - self._names_to_next_work_time = {} - - - def _UpdateSubscriptionInfo( self, subscription, just_finished_work = False ): + def _UpdateSubscriptionInfo( self, subscription: Subscription, just_finished_work = False ): name = subscription.GetName() @@ -1625,7 +1571,7 @@ class SubscriptionsManager( object ): del self._names_to_next_work_time[ name ] - if subscription.AllPaused(): + if not subscription.IsExpectingToWorkInFuture(): self._names_that_cannot_run.add( name ) @@ -1654,13 +1600,11 @@ class SubscriptionsManager( object ): - def ClearCacheAndWake( self ): + def GetSubscriptions( self ) -> typing.List[ Subscription ]: with self._lock: - self._ReinitialiseNames() - - self.Wake() + return list( self._names_to_subscriptions.values() ) @@ -1669,34 +1613,6 @@ class SubscriptionsManager( object ): return self._mainloop_finished - def LoadAndBootSubscription( self, subscription_name ): - - # keep this in its own thing lmao, you don't want a local() 'subscription' variable hanging around eating 400MB in the mainloop, nor the trouble of 'del'-ing it all over the place - - try: - - subscription = self._controller.Read( 'serialisable_named', HydrusSerialisable.SERIALISABLE_TYPE_SUBSCRIPTION, subscription_name ) - - except Exception as e: - - HydrusData.ShowText( 'Subscription "{}" failed to load! Error information should follow. No more subscriptions will run this boot.'.format( subscription_name ) ) - HydrusData.ShowException( e ) - - return - - - job = SubscriptionJob( self._controller, subscription ) - - thread = threading.Thread( target = job.Work, name = 'subscription thread' ) - - thread.start() - - with self._lock: - - self._running_subscriptions[ subscription_name ] = ( thread, job, subscription ) - - - def MainLoop( self ): try: @@ -1707,37 +1623,20 @@ class SubscriptionsManager( object ): with self._lock: - subscription_name = self._GetNameReadyToGo() + subscription = self._GetSubscriptionReadyToGo() - if subscription_name is not None: + if subscription is not None: - self._loading_sub = True + job = SubscriptionJob( self._controller, subscription ) + + thread = threading.Thread( target = job.Work, name = 'subscription thread' ) + + thread.start() + + self._names_to_running_subscription_info[ subscription.GetName() ] = ( thread, job, subscription ) - - if subscription_name is not None: - - try: - - self.LoadAndBootSubscription( subscription_name ) - - finally: - - with self._lock: - - self._loading_sub = False - - - - - with self._lock: - - some_cleared = self._ClearFinishedSubscriptions() - - if some_cleared: - - gc.collect() - + self._ClearFinishedSubscriptions() wait_time = self._GetMainLoopWaitTime() @@ -1751,7 +1650,7 @@ class SubscriptionsManager( object ): with self._lock: - for ( name, ( thread, job, subscription ) ) in self._running_subscriptions.items(): + for ( thread, job, subscription ) in self._names_to_running_subscription_info.values(): HydrusThreading.ShutdownThread( thread ) @@ -1763,7 +1662,7 @@ class SubscriptionsManager( object ): self._ClearFinishedSubscriptions() - if len( self._running_subscriptions ) == 0: + if len( self._names_to_running_subscription_info ) == 0: break @@ -1774,11 +1673,11 @@ class SubscriptionsManager( object ): - def NewSubscriptions( self, subscriptions ): + def SetSubscriptions( self, subscriptions ): with self._lock: - self._current_subscription_names = { subscription.GetName() for subscription in subscriptions } + self._names_to_subscriptions = { subscription.GetName() : subscription for subscription in subscriptions } self._names_that_cannot_run = set() self._names_to_next_work_time = {} @@ -1794,17 +1693,17 @@ class SubscriptionsManager( object ): with self._lock: - subs = sorted( self._current_subscription_names ) + sub_names = sorted( self._names_to_subscriptions.keys() ) - running = sorted( self._running_subscriptions.keys() ) + running = sorted( self._names_to_running_subscription_info.keys() ) cannot_run = sorted( self._names_that_cannot_run ) next_times = sorted( self._names_to_next_work_time.items(), key = lambda n, nwt: nwt ) - message = '{} subs: {}'.format( HydrusData.ToHumanInt( len( self._current_subscription_names ) ), ', '.join( subs ) ) + message = '{} subs: {}'.format( HydrusData.ToHumanInt( len( self._names_to_subscriptions ) ), ', '.join( sub_names ) ) message += os.linesep * 2 - message += '{} running: {}'.format( HydrusData.ToHumanInt( len( self._running_subscriptions ) ), ', '.join( running ) ) + message += '{} running: {}'.format( HydrusData.ToHumanInt( len( self._names_to_running_subscription_info ) ), ', '.join( running ) ) message += os.linesep * 2 message += '{} not runnable: {}'.format( HydrusData.ToHumanInt( len( self._names_that_cannot_run ) ), ', '.join( cannot_run ) ) message += os.linesep * 2 @@ -1830,7 +1729,9 @@ class SubscriptionsManager( object ): with self._lock: - return self._loading_sub or len( self._running_subscriptions ) > 0 + self._ClearFinishedSubscriptions() + + return len( self._names_to_running_subscription_info ) > 0 diff --git a/hydrus/client/importing/ClientImportWatchers.py b/hydrus/client/importing/ClientImportWatchers.py index 60c7f28e..a536a1d9 100644 --- a/hydrus/client/importing/ClientImportWatchers.py +++ b/hydrus/client/importing/ClientImportWatchers.py @@ -45,8 +45,7 @@ class MultipleWatcherImport( HydrusSerialisable.SerialisableBase ): self._watchers_repeating_job = None self._status_dirty = True - self._status_cache = None - self._status_cache_generation_time = 0 + self._status_cache = ClientImportFileSeeds.FileSeedCacheStatus() # @@ -134,7 +133,6 @@ class MultipleWatcherImport( HydrusSerialisable.SerialisableBase ): self._status_cache = ClientImportFileSeeds.GenerateFileSeedCachesStatus( file_seed_caches ) self._status_dirty = False - self._status_cache_generation_time = HydrusData.GetNow() def _RemoveWatcher( self, watcher_key ): @@ -334,7 +332,7 @@ class MultipleWatcherImport( HydrusSerialisable.SerialisableBase ): - def GetTotalStatus( self ): + def GetTotalStatus( self ) -> ClientImportFileSeeds.FileSeedCacheStatus: with self._lock: @@ -492,7 +490,7 @@ class MultipleWatcherImport( HydrusSerialisable.SerialisableBase ): file_seed_cache = watcher.GetFileSeedCache() - if file_seed_cache.GetStatusGenerationTime() > self._status_cache_generation_time: # has there has been an update? + if file_seed_cache.GetStatus().GetGenerationTime() > self._status_cache.GetGenerationTime(): # has there has been an update? self._SetDirty() @@ -1334,7 +1332,7 @@ class WatcherImport( HydrusSerialisable.SerialisableBase ): with self._lock: - self._file_seed_cache.RetryFailures() + self._file_seed_cache.RetryFailed() diff --git a/hydrus/client/networking/ClientNetworkingDomain.py b/hydrus/client/networking/ClientNetworkingDomain.py index 2650d4ba..f1ee85b4 100644 --- a/hydrus/client/networking/ClientNetworkingDomain.py +++ b/hydrus/client/networking/ClientNetworkingDomain.py @@ -1184,7 +1184,10 @@ class NetworkDomainManager( HydrusSerialisable.SerialisableBase ): def ConvertURLsToMediaViewerTuples( self, urls ): + show_unmatched_urls_in_media_viewer = HG.client_controller.new_options.GetBoolean( 'show_unmatched_urls_in_media_viewer' ) + url_tuples = [] + unmatched_url_tuples = [] with self._lock: @@ -1199,7 +1202,23 @@ class NetworkDomainManager( HydrusSerialisable.SerialisableBase ): continue - if url_class is not None: + if url_class is None: + + if show_unmatched_urls_in_media_viewer: + + try: + + domain = ConvertURLIntoDomain( url ) + + except HydrusExceptions.URLClassException: + + continue + + + unmatched_url_tuples.append( ( domain, url ) ) + + + else: url_class_key = url_class.GetMatchKey() @@ -1220,6 +1239,10 @@ class NetworkDomainManager( HydrusSerialisable.SerialisableBase ): url_tuples.sort() + unmatched_url_tuples.sort() + + url_tuples.extend( unmatched_url_tuples ) + return url_tuples diff --git a/hydrus/core/HydrusConstants.py b/hydrus/core/HydrusConstants.py index cf3db66a..ad53a40c 100644 --- a/hydrus/core/HydrusConstants.py +++ b/hydrus/core/HydrusConstants.py @@ -73,8 +73,8 @@ options = {} # Misc NETWORK_VERSION = 18 -SOFTWARE_VERSION = 399 -CLIENT_API_VERSION = 11 +SOFTWARE_VERSION = 400 +CLIENT_API_VERSION = 12 SERVER_THUMBNAIL_DIMENSIONS = ( 200, 200 ) diff --git a/hydrus/core/HydrusData.py b/hydrus/core/HydrusData.py index 0d5afa75..25034a08 100644 --- a/hydrus/core/HydrusData.py +++ b/hydrus/core/HydrusData.py @@ -21,6 +21,8 @@ from hydrus.core import HydrusExceptions from hydrus.core import HydrusGlobals as HG from hydrus.core import HydrusText +ORIGINAL_PATH = None + def default_dict_list(): return collections.defaultdict( list ) def default_dict_set(): return collections.defaultdict( set ) @@ -681,18 +683,23 @@ def GetSubprocessEnv(): ShowText( 'Your unmodified env is: {}'.format( env ) ) + env = os.environ.copy() + + if ORIGINAL_PATH is not None: + + env[ 'PATH' ] = ORIGINAL_PATH + + if HC.RUNNING_FROM_FROZEN_BUILD: # let's make a proper env for subprocess that doesn't have pyinstaller woo woo in it - env = os.environ.copy() - changes_made = False - swaperoo_strings = [ 'LD_LIBRARY_PATH', 'XDG_DATA_DIRS' ] + orig_swaperoo_strings = [ 'LD_LIBRARY_PATH', 'XDG_DATA_DIRS' ] ok_to_remove_absent_orig = [ 'LD_LIBRARY_PATH' ] - for key in swaperoo_strings: + for key in orig_swaperoo_strings: orig_key = '{}_ORIG'.format( key ) @@ -710,6 +717,19 @@ def GetSubprocessEnv(): + remove_if_hydrus_base_dir = [ 'QT_PLUGIN_PATH', 'QML2_IMPORT_PATH', 'SSL_CERT_FILE' ] + hydrus_base_dir = HG.client_controller.GetDBDir() + + for key in remove_if_hydrus_base_dir: + + if key in env and env[ key ].startswith( hydrus_base_dir ): + + del env[ key ] + + changes_made = True + + + if ( HC.PLATFORM_LINUX or HC.PLATFORM_MACOS ): if 'PATH' in env: diff --git a/hydrus/core/HydrusPaths.py b/hydrus/core/HydrusPaths.py index e31f9f3f..9b3256a3 100644 --- a/hydrus/core/HydrusPaths.py +++ b/hydrus/core/HydrusPaths.py @@ -24,6 +24,8 @@ def AddBaseDirToEnvPath(): if 'PATH' in os.environ: + HydrusData.ORIGINAL_PATH = os.environ[ 'PATH' ] + os.environ[ 'PATH' ] = HC.BASE_DIR + os.pathsep + os.environ[ 'PATH' ] diff --git a/hydrus/core/HydrusSerialisable.py b/hydrus/core/HydrusSerialisable.py index d9b15fb4..56041758 100644 --- a/hydrus/core/HydrusSerialisable.py +++ b/hydrus/core/HydrusSerialisable.py @@ -18,7 +18,7 @@ except: # ImportError wasn't enough here as Linux went up the shoot with a __ver SERIALISABLE_TYPE_BASE = 0 SERIALISABLE_TYPE_BASE_NAMED = 1 SERIALISABLE_TYPE_SHORTCUT_SET = 2 -SERIALISABLE_TYPE_SUBSCRIPTION = 3 +SERIALISABLE_TYPE_SUBSCRIPTION_LEGACY = 3 SERIALISABLE_TYPE_PERIODIC = 4 SERIALISABLE_TYPE_GALLERY_IDENTIFIER = 5 SERIALISABLE_TYPE_TAG_IMPORT_OPTIONS = 6 @@ -69,7 +69,7 @@ SERIALISABLE_TYPE_URL_CLASS = 50 SERIALISABLE_TYPE_STRING_MATCH = 51 SERIALISABLE_TYPE_CHECKER_OPTIONS = 52 SERIALISABLE_TYPE_NETWORK_DOMAIN_MANAGER = 53 -SERIALISABLE_TYPE_SUBSCRIPTION_QUERY = 54 +SERIALISABLE_TYPE_SUBSCRIPTION_QUERY_LEGACY = 54 SERIALISABLE_TYPE_STRING_CONVERTER = 55 SERIALISABLE_TYPE_FILENAME_TAGGING_OPTIONS = 56 SERIALISABLE_TYPE_FILE_SEED = 57 @@ -101,6 +101,10 @@ SERIALISABLE_TYPE_NOTE_IMPORT_OPTIONS = 82 SERIALISABLE_TYPE_STRING_SPLITTER = 83 SERIALISABLE_TYPE_STRING_PROCESSOR = 84 SERIALISABLE_TYPE_TAG_AUTOCOMPLETE_OPTIONS = 85 +SERIALISABLE_TYPE_SUBSCRIPTION_QUERY_LOG_CONTAINER = 86 +SERIALISABLE_TYPE_SUBSCRIPTION_QUERY_HEADER = 87 +SERIALISABLE_TYPE_SUBSCRIPTION = 88 +SERIALISABLE_TYPE_FILE_SEED_CACHE_STATUS = 89 SERIALISABLE_TYPES_TO_OBJECT_TYPES = {} @@ -126,6 +130,17 @@ def CreateFromNetworkBytes( network_string ): return CreateFromString( obj_string ) +def CreateFromNoneableSerialisableTuple( obj_tuple_or_none ): + + if obj_tuple_or_none is None: + + return None + + else: + + return CreateFromSerialisableTuple( obj_tuple_or_none ) + + def CreateFromString( obj_string ): obj_tuple = json.loads( obj_string ) @@ -151,6 +166,17 @@ def CreateFromSerialisableTuple( obj_tuple ): return obj +def GetNoneableSerialisableTuple( obj_or_none ): + + if obj_or_none is None: + + return None + + else: + + return obj_or_none.GetSerialisableTuple() + + def SetNonDupeName( obj, disallowed_names ): non_dupe_name = HydrusData.GetNonDupeName( obj.GetName(), disallowed_names ) diff --git a/hydrus/core/HydrusServerResources.py b/hydrus/core/HydrusServerResources.py index 967ec8cd..1c9bd74c 100644 --- a/hydrus/core/HydrusServerResources.py +++ b/hydrus/core/HydrusServerResources.py @@ -739,7 +739,7 @@ class HydrusResource( Resource ): if self._service.SupportsCORS(): - request.setHeader( 'Access-Control-Allow-Headers', 'Hydrus-Client-API-Access-Key' ) + request.setHeader( 'Access-Control-Allow-Headers', '*' ) request.setHeader( 'Access-Control-Allow-Origin', '*' ) request.setHeader( 'Access-Control-Allow-Methods', allowed_methods_string ) diff --git a/hydrus/test/TestClientAPI.py b/hydrus/test/TestClientAPI.py index c14a56d1..a10b63a1 100644 --- a/hydrus/test/TestClientAPI.py +++ b/hydrus/test/TestClientAPI.py @@ -442,7 +442,7 @@ class TestClientAPI( unittest.TestCase ): self.assertEqual( response.status, 200 ) self.assertEqual( response.getheader( 'Access-Control-Allow-Methods' ), 'GET' ) - self.assertEqual( response.getheader( 'Access-Control-Allow-Headers' ), 'Hydrus-Client-API-Access-Key' ) + self.assertEqual( response.getheader( 'Access-Control-Allow-Headers' ), '*' ) self.assertEqual( response.getheader( 'Access-Control-Allow-Origin' ), '*' ) @@ -1587,7 +1587,7 @@ class TestClientAPI( unittest.TestCase ): tags_manager = ClientMediaManagers.TagsManager( service_keys_to_statuses_to_tags ) - locations_manager = ClientMediaManagers.LocationsManager( set(), set(), set(), set(), urls = urls ) + locations_manager = ClientMediaManagers.LocationsManager( set(), set(), set(), set(), inbox = False, urls = urls ) ratings_manager = ClientMediaManagers.RatingsManager( {} ) notes_manager = ClientMediaManagers.NotesManager( {} ) file_viewing_stats_manager = ClientMediaManagers.FileViewingStatsManager( 0, 0, 0, 0 ) @@ -1621,6 +1621,10 @@ class TestClientAPI( unittest.TestCase ): metadata_row[ 'num_frames' ] = file_info_manager.num_frames metadata_row[ 'num_words' ] = file_info_manager.num_words + metadata_row[ 'is_inbox' ] = False + metadata_row[ 'is_local' ] = False + metadata_row[ 'is_trashed' ] = False + metadata_row[ 'known_urls' ] = list( sorted_urls ) tags_manager = media_result.GetTagsManager() diff --git a/hydrus/test/TestDialogs.py b/hydrus/test/TestDialogs.py index 3e9a6d35..a7862dd5 100644 --- a/hydrus/test/TestDialogs.py +++ b/hydrus/test/TestDialogs.py @@ -1,5 +1,6 @@ from hydrus.client.gui import ClientGUIDialogs from hydrus.client.gui import ClientGUIScrolledPanelsEdit +from hydrus.client.gui import ClientGUISubscriptions from hydrus.client.gui import ClientGUITopLevelWindowsPanels import unittest from hydrus.core import HydrusGlobals as HG @@ -61,7 +62,7 @@ class TestDBDialogs( unittest.TestCase ): with ClientGUITopLevelWindowsPanels.DialogEdit( None, title ) as dlg: - panel = ClientGUIScrolledPanelsEdit.EditSubscriptionsPanel( dlg, [] ) + panel = ClientGUISubscriptions.EditSubscriptionsPanel( dlg, [] ) dlg.SetPanel( panel ) diff --git a/hydrus/test/TestHydrusSerialisable.py b/hydrus/test/TestHydrusSerialisable.py index 43c2b436..5e84ef82 100644 --- a/hydrus/test/TestHydrusSerialisable.py +++ b/hydrus/test/TestHydrusSerialisable.py @@ -469,7 +469,7 @@ class TestSerialisables( unittest.TestCase ): self.assertEqual( obj.GetName(), dupe_obj.GetName() ) self.assertEqual( obj._gug_key_and_name, dupe_obj._gug_key_and_name ) - self.assertEqual( len( obj._queries ), len( dupe_obj._queries ) ) + self.assertEqual( len( obj._query_headers ), len( dupe_obj._query_headers ) ) self.assertEqual( obj._initial_file_limit, dupe_obj._initial_file_limit ) self.assertEqual( obj._periodic_file_limit, dupe_obj._periodic_file_limit ) self.assertEqual( obj._paused, dupe_obj._paused ) @@ -485,7 +485,17 @@ class TestSerialisables( unittest.TestCase ): self._dump_and_load_and_test( sub, test ) gug_key_and_name = ( HydrusData.GenerateKey(), 'muh test gug' ) - queries = [ ClientImportSubscriptionQuery.SubscriptionQuery( 'test query' ), ClientImportSubscriptionQuery.SubscriptionQuery( 'test query 2' ) ] + + query_headers = [] + + q = ClientImportSubscriptionQuery.SubscriptionQueryHeader() + q.SetQueryText( 'test query' ) + query_headers.append( q ) + + q = ClientImportSubscriptionQuery.SubscriptionQueryHeader() + q.SetQueryText( 'test query 2' ) + query_headers.append( q ) + checker_options = ClientImportOptions.CheckerOptions() initial_file_limit = 100 periodic_file_limit = 50 @@ -501,11 +511,11 @@ class TestSerialisables( unittest.TestCase ): sub.SetTuple( gug_key_and_name, checker_options, initial_file_limit, periodic_file_limit, paused, file_import_options, tag_import_options, no_work_until ) - sub.SetQueries( queries ) + sub.SetQueryHeaders( query_headers ) self.assertEqual( sub.GetGUGKeyAndName(), gug_key_and_name ) self.assertEqual( sub.GetTagImportOptions(), tag_import_options ) - self.assertEqual( sub.GetQueries(), queries ) + self.assertEqual( sub.GetQueryHeaders(), query_headers ) self.assertEqual( sub._paused, False ) sub.PauseResume() diff --git a/requirementspy3.8.txt b/requirementspy3.8.txt new file mode 100644 index 00000000..7455c5d0 --- /dev/null +++ b/requirementspy3.8.txt @@ -0,0 +1,23 @@ +beautifulsoup4>=4.0.0 +chardet>=3.0.4 +cloudscraper>=1.2.33 +html5lib>=1.0.1 +lxml>=4.5.0 +lz4>=3.0.0 +nose>=1.3.0 +numpy>=1.16.0 +opencv-python-headless>=4.0.0 +Pillow>=6.0.0 +psutil>=5.0.0 +pylzma>=0.5.0 +pyOpenSSL>=19.1.0 +PySide2==5.15.0 +PySocks>=1.7.0 +python-mpv>=0.4.5 +PyYAML>=5.0.0 +QtPy>=1.9.0 +requests>=2.23.0 +Send2Trash>=1.5.0 +service-identity>=18.1.0 +six>=1.14.0 +Twisted>=20.3.0 diff --git a/static/default/gugs/derpibooru tag search - no filter.png b/static/default/gugs/derpibooru tag search - no filter.png index 8a44feaa53617a7c02e0cc73dd61ac011324778e..43536ce073c059c9485f04bd60afd51919ffdc1b 100644 GIT binary patch delta 1922 zcmah}dpOe#1O077LfGsz$z>SP@=ENb*JZOAL$4^4q*O>QNx6mc8|yZN$Ye%juDOL; zxfhwtbP>hc$|aHz)d=zSegA*Ycm6v6obx>AoaZ^$m2N0yU_dV?dpnQwBeOmsp-3vS zEyo!B^bWC*u{j+6gw7?1j?Eik~A-=7rRMQ04W3j_k3L>N2HHunK7T$=H^hI@5c}x?t1hRLzvi^ z_q6sC~gC=*lnNcgQWpI1KlruQAr ze0+egT(~bbp*_~-E3r6@*Rjh1GBD-+eyu;3We2?kDto*!YSD!dc|dPFZ23&&B^?M+ z))Q~xqzgY6dEDl=TVmxw5FMf4|~wILEDC4NCJl5*BKPy z2dxN(2z06ztv)1|A7?-kp%OEIQM*g{jjb8a2>E$f7{u^6xyCE8e$&_qejGwXEtviR zdOZgXG9uWDHvbIOxG|h3qJlF4ficsh=GOG6Q6Xo!iR{#nJQfwu4OAgiXDA;m55;5! z#s#1nc5`XEpHAAF$BXil6s!D)H`5Lidfc6%k#j|nd>yPPfRnehm`#Z7ZqXXaFdh$= z@2N0$5FTj5oRT|yXolajzn)h<(+JLtb#^-YT(c>&9;2zcE_DYs97#`UEYjm4$_iza zNA#$kGR=g-v{$FO`Xs1Ag||^|F5JCOEYWxC#`|;8VT0-MS;M6l0L!gscHZbw+`OYA zEYVSedwJmElj^B!92r-{5Rkc2(%XLnCHvZa$IUPfQpaiLL_&|c-?Ul~4h+XG z3QaoTcis!BgmvnP-|$xMWv-K`BRHzn&1Wdwox zm+e(p`s(lU*{Z`~18uHXF*Hh(hLmzeu|)R6eV-uN7sk?xe&Xva?oD&#rbgvRtUAS# z|3sG1OQD_9gtf}|z=C>`S{B?>?vz5?WAo}=Tnt~8E)`im0uSFLY2qT}>bUmtBv&~b z7#`czqJ<7ZDp<$>7XEh_2W#DduEZn5K~J?|6Vnhm1|;*z+mZ(lXqX$RB_&3a3OFsV zhB=Nxb@`$<=3}s7fGVY1|10tsE?ZE{RQ&-b;4I#7%>0v-ZYOuDbtrJN944f@Ajb9X z0jyu}wL}ZSo5M-b4^({DPI{xU zilL((*H;4s=|IvvL`H@723}Dtv$4KMSfq}YSXw^`GJUqP2uX&4-VdqwZ#`z;@-&1LzWBXR7qYzAqny0~ z_h9jKbIvfiV=$VVt-{Ewv$+}l7SBkhzAxG;{+Y9>?sjbwGK&P* z*D&-h83G!j{NFE0l=?pu?g<6mh%dv)q$5->_Qb{+I{P5oFe2}#IBrLIa|_2W{8aFU zQr%b@;so@qNs8jQ2xk!Nc`SxBWQOf>FbMe^&^ObP z%lpPk6R)3jNw_O`qZuKQo1A^MHO2dKfmPz(^4=F5)=~MgjxwtP3HUd=s!V>o^R@;4 zR-RF0ovx$0&UV^;jf+;szAOE>4ogUhs~%S#%+VV78KSty8TS+zv;&0t2i`;ZR<@{U zM{>wt?U3@Ty1J`E7}XY5c}5;hr(2Sq>;9uO+IXnOS|j9I>0?Q)jK`W0dYikF<30M= zf;H?rw7W8F^IF6z(wz2G@1wmjcB)NV=2)Q5SLsvNV zI7QsBr86_M|Gjiyv)=;VAwudq(&3Lj!9l)Jp+ka3a{JlpQDa~>U5fBw#IA7D|ES() zdxH4g&rgy9o)9H|IGYRV)tc0-hCiN*@D=#P`trs~zu}E9o}O-sQCwml#ku&lf~zju zpSLeOuQt1(XF(2b@i>PTenW#y45|slq}mr5P#tXWoUy?<2p0}5&(e& zK!g>e&;+5u;i-8s5GGeA_2na0Vung@7MBqYX1-j z2@oDc`1_whnGBAyuWCR5N?3&j>rnSVoJ+nyqC0BSy?^uK{m=E}ET_Zy*>e;R25$&V>wn=yspm}fYrlf9SIyJt0ud;S93M%a|x(`ZAQehWmgI+ z5XqVo0GEJ}qnZRbzLV0<10sD|PcPZUX9DFH#`(ZnCy&FVOhf_4;v%DaTb>+go6YQy>A9c4_74U}pH3qk7A2)sO~F zDk3E_BO;2M%f1Bi_(8Ta5<))VPC7+-1F$phSR(B-34|0o!=;X0!m2)P%ULQZft`dB zKvS<)P9J%Ok2$J^>a~<5BqL77K~@>d#YxKOaeq|F@t()@ysTu{-XefvW++6`lZvF< z6(m8?Aq0RIDH3%zZz5{sPXj&Dj!PUts;Z&RR zR5~mH{In}T06%jy9|7DWR~-WMNrBB6ccaFwSPrTKO88Xk*N|^7OCtcOApvT&4RlW+ z7o=e`ipIyun34^A@-}UUnvO{wA3${qmw%{*Ihtk{U@nQ~f}HX741BK;cYA=0o^h=) z0JtF&)qnQt89*Iam6?utV}K$(Kq)am#y56pt|cHtBzfb)3lK$RW2>b`Jp@pU#WiC!ndZ%B6EFra!tIP6c>#4m2`35_+sIgn1b--U z7ohl?qxlHH%w0Y2VahKC5J-7f##Djl0B}PuyKKkB(%^<{EhnD%%@_ek9azE=O?Y)J zjsQN-0frKQ%lMh22?@Xwk{))OJHs)hRbA3@c5Ez*C3 z+=VXyLisYNv$vm+<2|Ge2fenhgKOmh%=atk02<*3AtQm5+My5slfItEukN^lI00IesKms6;00<-iBCG_Ugn#0=RScsV zV#G=D|2D|_i4$dZG0(4_N~*3X?HD|Zgy1Mc0#w4ljF4;k?HcndR$_I-${7N#KXcTt zRcTxTe8;hZlW7F1)13`9&kZr_8_EMv=62r9<#`Q9Z+_fT+-&ny$E>x2xvR9=qP9bK z?v&>-kN{v>Zb-I$TECv0Q_HTh##DK!56UA;yso@wxU0 zJ5D&LI!_ftu04`L<80|VXDW$WARFMpr9@cw6u+q%wtxNZN$4QCI3w~9R9Is1pZuk0B6(z53^otW7_ zYf9VapXSG}{O5`8?85G^H;zxv-SE!B$e!D@R~Njp?cQ~-tzLR^=7l@{eDB<%Q~S=W zkKCe7T6SP>^s5E)Cw_Es!m)SbizffObjIfW^KRaC{dFUhg?`RWZSnrCvHzAZ)zQLsAJ7zII1P!e^%ioclDXNJ^%m!07*qoM6N<$ Ef>SMg(*OVf diff --git a/static/default/gugs/derpibooru tag search.png b/static/default/gugs/derpibooru tag search.png index 140d099a22e737e23e49fd6008de45c9df7a77ff..1aee7899c03d9ce48894b9274f53b80e33fade13 100644 GIT binary patch delta 1750 zcmV;{1}XXa4fhR@B!9k1L_t(|+U;C_h*U)!pKf@Wz@q7_k){$b?1 zPAfuCAtTlcF|Z#7&q$O=)}VhR_6iKejR@g7(5Utxq}C3ScSOcs8LWRu*JbP-q^It_ zY2LhF^WMI_+uPgZy?dVz+|JDR&39&QzMuKdciz6Ylx+Nh0DtiXWtf9lV+y7K1OPw) zAg-8-DTobv=ax;Amy9GaxC#V_7v{Da1W24`i$w|#5I2;|WlHqsefsCaQwSqv5Fmb- zKI32gCS7m3d1M4JKn&6JAc0 z^18O(1<}1+NerU1&odlJ!)~5`wpsPWkanC2_x>4HWTg>mW=`ctipcr#+6F{>B8c}#wQ~1nFSxL2m0CJ(5B)#2RNE(u&mhue8)v%lAD%ql_ zCnN?1RS;nONQ+fvS|&}3WN4w>J6U-+ z(k1J_>+AUHd=2N+PQc4J&k8kbT< zd4D*PMnJPc9fb`MIR<)zH4l_A1vTSnS4u0eYRh%r;e0lw?EY9D&gdGEn%c0z+Ptj| zITz>|#91Nj^#D08s5b_XE~q3Qyn4An8jjqQl~4I&fN*<&(2gxf6uz|bDP#8XBCuuy zIaRizCTfv_Gyercpzx&XDZo@!jYDBR>3<^tub>!~HM`7;?h6^IQp$+&b3F2sh9haX zQK)&0Oo0FsgLSGGv)3II1DNP`7oaPsb{{}K(5oK1h-z8sf@*IkzIz1*QQYCk8g|u; zf2<%teaI*znYCuGkmd_k%?!u{$ntqt^O6VF903zq&6jlykxRNQOAJt1Hc1rw0)Ovb z$;d~zd|EMYKFGt7G@PpWG-pma9}NOT_BVoV%-L#awz?1B1;i42&kPaLe~3IW$^TDy zfU51A+xsMU0C*_08oKklx%ty zKqJi02&wDYH5Q~c&gSTNh$sKmJ7{bHfp%=*Vrp=6)-TNnQ*)t9{x&D=aQ z9liB(OKq#o*BrC=3g&In?u(j2&y@;7MLHyj(qrLN^69d-oj`L=6a2$pHh(^tQy6b& z4UaTpO#pK_G9MQV>)2@(<5ZE_N=_60COxL}*9_cP{5?e_28ad$ESF4u3v6uKuu+<$ zL2I_Q4rt}-4&3)6@akpswg6HY#PN9!n&~l|HXE%h_cgXEaifT+A@rvJcV*SHnkv?1 zjRAUnD>sU-Sw-L&NCOI{9a(9BtFj-1$=`%rcKQ;3jE)k}Q~NjQtmu(SyFmmz!X$Y)6z{lB;w=F72LS-A z9RMDaa0C^9m>_!I>R%e|&VT?edfBXMOp1X-<3Q@Hf}5 z4LwTTOzB_gB+XYSS&-9&me&@>G zZRZEpec18r=BLlS*?Z5Y$Jf2{^R`q+$I<5woh;pPy7BCNTaT6IeDczr_nzo3{W5%Z zXyKLiH7iy(>3g=c9N6FT+t4f5`nR|2JTf!aI{nC3%V(^a_s`thE*39u*u1Jx+BMYj s*zl&;9vEExa^Dp;W%Gu&VZe#tKO??bVw}JJ4*&oF07*qoM6N<$f)HO#@Bjb+ delta 1770 zcmViP$52dcdzee-y8ZL3X>(lk|4x-FauGGta#9zH_@r$#eS%kbhiIhA~JqCSV9a000C4 zl8OclL1J)bMpaDtHS?0Xv=jtL7G}&c36MI+=J6{mK+;gHRw>b^Y}UWLGJ!Bs1p$(W znQv933sJ$df~RYU>FgFdtMTl0Ew-7FnpU=qE6rUl~U zcm7>f5Fmk|x_?gPD+rJ{22g$maJ4bTK!5~-O<7PNsTBF)z9|GzKoara#^vpQR@;C1 zx*rxGkrv?Jg{S6T{A1qnx&5#JiF5&2|J0Rv{lzN-gn|G`v;dS{t6tWtRq8wl1Og-n zDgOP>fJz1$oLuw?KnbgJ!3g&K#^93p{%0Ebozvmc9DlJZi~|DjG9bXHfG8m07#BVO zOU7b_1pq(*AUOo+?Vz;nut;Ap(k*+qt)@I-==Y_ZDmF0DNk?fo;V$V)xcL`pT1gk-I4Uy)?|Lv;i> zQ9kEQx__#?0r`LT`Vx7!NvV+L2t3_!H0OMo&V1#vMy)DmNrEa@@_ zHLX?naAyozSuw}nCV*xMG@=+uO)*AZN=mAO2q0&=NZQ-Hg|sQDXc2F}CBkNwvB(|-wRAEkK5OZ813o)mldqnGAmbXG^s9OzKPyu_bLDs^;Z0U< z@fF&-KK=ac`MhgV{OM@8)e6m!hTsDX*EC7XRqm~C88b&F34)WgEkK_V+KhQOZQjaM zy1;fS{of)mn!j|jI9i@CmS-(^6&d?gM8rrDA+Ptj| zxe#a>c&w21T7ZHf7_AQ=-B3v>c=Z|rsXKC2RyyPN0itaIBHOnDQH-Tk%9x{9;*K@z zE2#1;YP=LFJL6wA1&VPAjRJ(KY6gX+w11BPMghf~SxGIiG6MpP4OUdoN3Sc$2N39T7oaN$I|q;hdet*8yjWhkq1xMt@2tQiirXDo!!DZh z_Z0*f9dZgu=dIBzXN?7`=K5p;@x zKxqA9dmrZp01t(Rp{umsnEvJm2`m5r9{>OW01yBG0RRvH0096H000315C8xH01yBG z0RRvH0096H000315a7Ck5{h}N8-E&NG2>+TzXN1H;zXIF=6mXCMAZ$yNou_z${`w$ z!|x6?#9;avA)|VBjRmQVusS*t;u(MT`^Tg1G`4`id934Ns&jSLFU|0&w$^>D9<+@9 zFNHF%@n&wC2}5ta+%mjT_f^O2y@GkGwA-RW=(#h9P_cGN;`CTJm2A3Yaev3M*gRDj zAO5`g!JJHu#+Y=Uvg_e#B$@zXIWiv?n6>A!@W~00+Dc9p{wh6F=Pwz;0IyP9e1Lcm zz%t3y=gQZn4I8358Wd~{uK_xEb-N^#W<3V4UdC?=AcLJaW1NFVy2cK{hRVxr)mD|P z1P+4gz4+;fBeFp4Fu@q3zkj-_f&k&?K)#HaM;c&itr@ljgD0Glz$ymieI;wmYj7F( z0Cm~_Er(i!Ycc~^_@ZYrH)M&M^H(9~owoQNqeD3KR2(kZWt@}9m_z~NB4GN-s^ z-MWvKA33q%!`)jxU-r_r=Els~gFm%*oW9n*@qFu@FOd6}F5LFdquq1Lr1!93kN&zw@16PG!@qsA_nl*t-hcOic6H;E zPq$6&UVY@%O$T4;nYi=Q-c3hV&))KJ&s%rg)^}*}`K`_SFneRf|MI9}sG}rlxBvhE M07*qoM6N<$f`O|;h5!Hn diff --git a/static/default/gugs/nitter media and retweets lookup.png b/static/default/gugs/nitter media and retweets lookup.png new file mode 100644 index 0000000000000000000000000000000000000000..c6ab7b9db99d5379c422bcac73b0d3251b3484f1 GIT binary patch literal 2040 zcmVCPYTceU~%ER(g-ByzYH7(NJ>)g3_X3w3U?QE^ld~ zb2tH-&as7di4h=fC>D#9lxDtBI&x(SVWfxzh##iU_^Wh7snmSq=qTm@F+}rsKR>jz zRGN17n|qfP@e~k0ly2Sn1uu_pn^VGLU>vi6`Afu`-^KHaNPrlEmP! zu1#xVNPrlE&0LZuO-5$6N+=Q_juC*evEt=Yu}Iwy0wDq7gC_g`KUWQMb*kHH1OiaPYCdu@ zrl`T}Du%`Xe{Ld!HXVlV61&3nBW zt*Z}A40@mqaAbKVF)^^kwyuK?S@k~pne}&wcBmG3g+)hqpXdJjx?H{ReE^EF((8zL0=cxdYwtKsE*L0ca&DII-IUZAkg=-kDgj zg(FGQRy{#<#wSlI7WjlSUcGiTDK)88ektSD%J-nTI&0#Z8WBLx@S8W>yM_0hBSx07 zLW1-N$%20%(X(Pakj)X>HMueYZ1mW!f)G8x24y6esi4{-lxC%mNS=3Ey9AXN}RIr)1Ckk`CBFzf-UNH_tAp8!1WYduy`$carV;N62GoY#5F zEJP!K$OVu90e+}J0H3IKBEQ3>Fi-Ew=@N&xaCowleDy4lD^Hf4nzGa3O3 znw^UT6F`d(0TSXdL?Qr?mHTN~tKWwKPZn&-!8t(MKf96So4jr$J=O*Z2UNpz04oj6 z1)G^7iAE-{H7n@`T2ngFQi=-Dqo@KSyZm)7Owiq_?-1QK51Y(f(Uc~vcc zk7Wo}l&j*B&dHG0!HuN2M7Bo$XZRaI4fF99{AUmX34lNXAgbGPFbBX0fItEukN^lI z00IesKms6;00<-i0ttXX0w9n82qXXk34lNXAdmnEBme>l@IQlwU!dsFYEdUY6X$#| zsk#p++5To#PvH*p%vP1UNY=r-qC&agC_@5-xF`Ar_jjhMf0!(2Se5F=mE!_Hh&q!_ z)aVA^0@wtiH=EqW^dG2W@<(4By{!KBsJi*3(DGMQWatzjlZvJjPHFHGq`-4Rlm~jL zGTLhMsAJAv!BmxjcF8NDOsMD7r2c*?R)$U)GMO7X-H=LyohLo0i2f@JT(#bIh+R=N zV-8R;1W*RT0ZgK-KL2wT%aM7wzy)13!OjWkb?0_ecvUK{^Ga5pMMkkY%K_>r0yqqC zlL9vW$xp5*8sU%_n4br#qZQAcbi8v33t<@~WBq(5v!1p9x%R-;$&~!8^8QT&L<6IS z>Wa{8Fr*!_3x$e?Bs%yaC<-AeCe7uG6M3pW)e#f)D(1RAzI0Vr1mI2(j!<9fIeknG z_cS0fJEHz2Cq3+R_#IA3pbB5xBN^uBptnrrlMdzpl^MXCLUbt&svfT#QOE!mzUUd` z2J9)fyeeSZoy)b0Rmun4tb)NaWj&n&8vVZV1N7}cxKQVADoqOfWuQo0^y?N8fg-cy4PX1CXT^`%F`t?Oe``R{5*|~ki zvzIQePd=~hZ$Gm6%)37hOx?SB?v_;@%UeF)vSRUpM=$Rx?tNr_@Al3=)@?X@_|5)K zegEKlYo@*R@ZIZwOU!P*a_~(13o|;OY+blCb?DE*)CWIYxVG=>^B;cFpIv*$$vfYE z;GKN)!J)@Le|qJCb0=P2esg#0N1xvI(tX?7myN91U3&1f#|Ask&)u{0xmVGzk?;?Y WhHOkg3xo~;0000gtkhH7F6&>q3A|MYTrbqcuhdWh!oifmQa+?hoT@? ziVq?QQcI6uThKd&(&mpC|nF}lc2;vF|pYlXLk4YW_K@_%jHtKx4$3U&dkm? z-)!dl&A0Q-?OqtYaE5^Bf^m#Nq_F}+00ICY01#C)U`!`}IQpw$2YA_!)hOie+6$T5IfdjPi@V;lsC zAb9hR0gA=w%I;=D6c9!Hcl7!0zl`pYvor7jk@NunJ~+7b#>DnZTW8<_BDnXldJVqm(bRg4RoLFa!)H+i8SkT z+7>R%x>84fDDJ4C0FiE8ZN|!52j#}iGj^(56$$mvavQqs71@+B2#QupsU3?$uYJ1L zo0U}=H&;}lx%xvfvSA@WJR?V6cZXXY^gQV~&ZiR8Z%a-E8&W)Hw^wBIL<3f9D(VyH zr?{5@8PX(iG(jIFE33veod*ceBlPDrl|>*x4G18QQ;Mn7K~Iova%nlAPgBLLM3GH6 zM)DEBqeA9;EXN~^-=k(_m2H0?0k|y@5*HAXjUYAXv-9eiwq!Aw^TmbUXi+4JY+I81 zH-fUm)0dKJOvn_KB2}{DqaT}ESNiJ0vrSYN1YlG-0Tfw}t>j_iaMHgiP?i8Gy`67^e7J(Tz$%)8KEbYbz^o19JitGSsV!r2kT&C9QGf^} zH6f(SU@#YV^vLTPAj%^OSXKh`+U&KH{vw?hB7iDu?+xg#)|67NY#%^G5_(~4L07jD zWJC%AEGGe^DReifKmY&)0I(AXzykmv00062AOHXY03ZMW0stTY00ICY00062 zAOHXY03ZMW0stTY00NW;Q(grFmWqZ@Sez>V4WGgzPK>+Fd`~?sO5I|*v@?u~uPbs8 zpcdxmL{&Yz#!EF@q`Jj)%L)zTr`n6gEuf?wYdD!|td88DqhT)Lc!=&%cf1EsClOOD z=XhHA!ZusPZ1bt(g?k0}sx0)y?313F0HGL#WVVNO4M2e@R`j$>7Y?uhL^Wzhv2txa}=MvjzwU0h}4dd<(9H zX~WB?W2M$`m36>^v%9bY^pG(GuU>}l7C^y8oEYa)BfWsr=H<$2Ukf)aPI%W_U?Zqj zlb=-+kp)}NcVg+$U$_y1Fp!lf*mh(TbhX^OCJ!2%QArhnvQM%?N_b_l2B^vX-&s)G z;hM?-KL4cWDmT*fP0b6fB~#Sblg3ZvW000000 LNkvXXu0mjfB%AS{ literal 0 HcmV?d00001 diff --git a/static/default/gugs/nitter retweets lookup.png b/static/default/gugs/nitter retweets lookup.png new file mode 100644 index 0000000000000000000000000000000000000000..2f116778def0af1a3aa422ec2e56b4074204af4d GIT binary patch literal 1685 zcmai!eK^wz0LOo3F)y-ca+=ElcW#ROb23|&4dF3z?frVGp?(*23w8b`WEOggGRtEH< zpg6)%ef`*jKu=cG`Q+-TXo=g+H<$NV^tpL?*W zUH6k`;seEp6#k{d#qHU^GQV2zEpVY{IW9yS26uda?y$FaZ{E}FhpqWRdfsaW}p zE3lV}%#A&D;hv(ys@D&c(3rKsAIe8e7;4KoLmQijQnSpMmTY>INe!D40PW=vg@W(i`ZDpx|RfmCbGlkL~nsllPU3dwXJe{(D?1o62Of z7Ex4YQtX(pHf9NE(N)`!G*)hYm++`WUtR6pA8JNDw|LR-&xz9E5O6+hu3?e&l(M^N z-4~5VZI`9)xN!nfv$pQ0^9QU*uZFZhv6#z&f&8>2JNirzp&;&&A?&TwalaNUalU@CjQ$E-23T2X0Y_{ipNc7!YtXTG ze$$?-4GJa$5xIa?62it02<0EsxEugEc4R*BEB1*oiTX_`hy!OaQC)l4eD~Xj#TUJqNY2ZB*cWvF2nT0gw-W^)gw72#Cmd_028g zm0KCs^&WQV^iXqgV)cie=pG20NH?_i%rMQGcFws0NX)^2eae3rh$@t{$+n(CO?6yP zgrzpngf2~OqIB!lz)BWvb@utzv-OA{vlgGf47W)sIi1#~m`PCfME0oOMl$Hs(UnX|IxCe1r9uNSOs&RlL(d?s+sik%d%MSkU9dW_YB}#e=h?7 zDHI!6Rv^8-gx8jJO8fk{Pojm%mAsJ0WPmNHT>6wkrTtym;qAh02*|l{O4l6l3Q_MOtb^79FVpxW(PiiVCPwM#DG@h5#_JrL^phvnc+n!5Ad#{W zO^yX+)Kj4edLViqvMo&=H~-l{cS+tT8WRFf$7^>ngUQUJRepM zfB^v70f0~ZS+U}pR#q{6)CVZE1wgot7n%W}v6bor6m)ml*as*PQqlvECm8_H3IMM! zZDQo4Uv8@aIAKuadp5q*mJ3!L0G617i>7BJ;qDi>_omCP7|0v5MOzPmTDGlS0JpOd zy$}D^3z7@~&_r3e))u@iAYTLIzDZicPm@8ep${Op?N+cL%e{QAsSl8^6{!b6E!*C6 z06uA0k^2b!h#Z~+v_XI+EHC$5rW`VU-^(;VC}sHIva;oR&#GW@=iiGhW(>Il*JW2N zoi~Y*Z?FWYWh1MIcvNILEP5)FOaVjfKKLOoaumC+yYU@C>3pF9e++^E00aOa_yCvz zAOHXX00;m;000625CDJx00aOa000312mn9;00ICI0Du4h1ORTfX!s?EQ0MAPgvAMI z`=#386(`EVVzIZLW>nn_nxvvupH10#628l*F9GIfgv{#QHKr>z!|K{@)~{D#fh6LE zy9CStY;FhB99I{vM{E|<0DzedL8|n#9Asjm_dndyUa!ZhW5HU%qE!ZM(Qsm~rPKjM zNrGRlP10AzQe{r3toGAEbzv3!5Wf}&7GAV!Oo5ZK8{uiB8UWe5@kv%#CkPuDr$KB` zD6Ar0WyEyxoNYNA@B!x74WP<$8$E4?4Qo;zZLpZP(E!ztZboAmB;cc$$y)+Qy%Wd# zS=ZAoCT-U2T5YR!(FCjO9|SSliEqtltB4MFe{)(C05siasTb0YR5R8377iln4Ni%+ zin{Aq&K7pn=YbC}C+olOP|NL_N&ss-=$Xof8dJN9SD{{qZFxV8l4k!ufcFq6$P+$^ zOvqAki+nf~pK-l~mjKuw06-W!5M}@f06+i$0ss&IfB*ml03ZMW0RX5&W#9N^^PcAh zPCRtqj?adl-u8hz^7+9<<>4JuyN%(WhJO70()ix@UOPVi{@oilt~tKs@C)k(mL7U# z^>S&>>bIe5tM_Mz_HEeo_L=S9t@!@nq0`&gS3Qg8zk9Cd&7)rqj#XaY z;U2SIzP$O-eao+|-7vLl&-uZv&pbN>OWgqd53WKbH9j$C7XSbN07*qoM6N<$g0l}c AX8-^I diff --git a/static/default/parsers/danbooru file page parser - get webm ugoira.png b/static/default/parsers/danbooru file page parser - get webm ugoira.png index 6c913b58c4228285eb810bb7a8845f4f2477bef6..3f667cbbc62f51f3d81ffc87f36db2513b83ed3c 100644 GIT binary patch delta 2732 zcmV;d3RCsB6|@zQB!7EJL_t(|+U;BocuZFsK360}kmyDx9u<;}?jj|trT)6Bv{ey% z{p~gyY;{$4(rVXFJjr7x{wW;~8%m9z(x=<;V7nwr*qxN36BLv9om3h@OQ_xWsUVp- zd;jO$J98&9c`_PvzxR2#-`srP`M&d=_kQP`d+!W|MsG&|?SF#8kONJl38VlB00062 zw2G#X0?puPTL(Ynwd^6}MsFa1w$N6`5+I}uCvr!q09u2?;h>PS#XRTM+f5Kg4j_Q` z(5(4S&TywQEaLusSOaJfVMosP>)~{Uo;%#NmjiYI+JiIZ?cMBg&pSFNYy-8p3h3B{ zzw`T42L}*9gMXl$PKrDN0W`+|in|9;6B>en02%~t(Zz~FLeSUAEfDMiw1|h1NwfdM^eDt)RK0Vss)^*0}IedlDPU)=x=HGlF_{IhiK2LJr0HTr?0!;N@; zR%lcN7_pNtRX~78)lTEu1?<{stite`>Z3&CS_2$K<0H{yeCa>xvef|I8rKv+SU;9i zfUiL}qRzpMAdSO60+gT>gup506!Mn78i|o``s<2img3)3;MtuJKaULRI@@n* z7rVKkGL&&i0kOf2;IhXa3T08d4x~d_E?uFL>F{S7@dFmZNRTX?Xy9ZUAsZOk%&VB? zB0?r%`r_IoBakLG@w^R5jY%rBbczVUMnvYbk$(`0O)@1c#V9>P#hompW@KglHz7Tz zV^$~{++>?8iI~g`>4cMIxs)YT>X{$#hX56b>r;g*FZo>e5YpS&xPxxvnyJcvLs~>>Fr_?-3UNBl;;vFLgjlFAOe_#1QOkJU97l@keS{au#so8 z4H-#Mvi5-hCd7A)=#-#tb0vybv=Fk7Ms5+1mWqpCUM*GK$vdNGUE0_IHzJR%vsqF^ zq0Fs&|J-sZ57ob#?jHeY+0fZcMjE*#l7ARp&!RsBpxq}bnY9!GEhg6=Bk{Bm=`D7? ze=bX}1W-EMdSAg^OdgdlI< zM3rr;Rl%42`V&Acf3j}S^;p%_p8!T=v{iU0rGJtR z-5>-|kAWsb@YH^tMlMaeg}HQ$iuI_^1W>EL%&^;;1wbXTS{W*MY@G?H!3aS1EI^4Y)yup*kFnBBSR3x>6)ufq&>MlUcBC|@i>%9@c&TML!$*l?jI{KcM^$&^E{+{4I-g&d8!=Y2E@er5{D=gR z4n6DwgoTN#X8|Gr7B;VZNRp@^0FQhx_W{Vgw80wznB2Kf=TiG*2z+F;e}6_&+~Nj2 z-^Vq8synmHt(DsXGIIdGhL^SO%91JpY%2Y8zYjp%q%j1P0N6}3D`wuKMUQBUrzwEF zp<&=F7AM*iq7_yF8zz_(B zLGmytr|1?!rOVL3pmY~Vhe1*SgeNd67f6Fw0PpSs((OPw43d{Z^X)U3^rZ9&PIrNH z2)`2u?*jmU001BW00;m80sw#j03ZMW2mk;A0Du4hAOHXe00062fPVl0AOHXe00062 zf(eiRD`DL0a=0Os)a=+FSQL)j87!X1K!C;ob{Oh>v&|Hr$z+Kou{o|CuPXJ0N^OP{AVzpLP zQpze7U2Kp0mb;`2Ykz=7H~3gyu!U-kJu11z4!aa_9cuy3UMMR;vjzwP0#MR;cwE7& zD~TnaQf*N_%9yMiNJ%dsz|Xl0kdrU{-KK|YO&QVG|& zq7U}3P6gSWKuXx8iAO}K%o)T{T~T4eE)yt{6R-wobhiR9C4c{?tmcxE>w{-SMFEgh z#6@7x>;i(d!oinbwK)ZdCc%U2485R5;RN`O|U000mG00aO40RTV%01yBG1ONa5 z06+kM8VBxWmEKGn|Ms`Z(T;9|qRyvWD9ZbO`B$+u5jRdX&t5Ub9&<;Rdm*!%<9{o+ z7wE^W*_)moc7J48lY1YY*jv4E`adQO`=r;Lc2jb9w|%}S`|!Xa5qIJff3cqm9euIc zune2-X7-#x_Pjd1=h3_ZFn46@+aLpY?v^j^n9cPI4ldyLU|A7x_XYYStyq@?>H{>>qB9?R2vFq31iyO^Z*x@*88pgfX_1 zH~#)?T;8QGHZJ>pY44@C7cF@;wNDeiBzqMW2z<%?$yoh6?mZLSg znKff)>_fAX?!M7#?ZZWrMwR#b<)%;bPWLakGJDmEr~0fZtN5nuiP24G_D>7xIcWM5 zC7oJKk3Kwd`^n7JdB!#iYR~_wIPT*4abbTR74zA(>}w|?$4~1y_}rG)6DVWC)y%lw zXMd7=y|VDl$^G*VgdM4&`hKwJ89K7MI4oQ_r1?eJ%IEoUlW4 z_il)qHLb_&mQxRnJGy^epYRPAXK&mPVHlbH(TmLvFE9Gze`kK!!m-QJd3fsPv8$8x zLnAW!FW#}YwqRxV^;2KSI9YglTY0B8nSUR(9QyR}^sNt)@63$3_8)xU;l~m0&do|6 zlealEI^%3u)IWEo#tjbtK-WQ+@RtcKceQF?*=E(d$A|1S=jQJI_p^C@Eqk`rteUX2 z;FI?8lTMyHcp~ei*sc*J+wNXUSiAa8%`1JAahiSQ-_|~idA;+U4%w7$!@WnaG}f zZ!#&OWM8sV8Otyxgh{r0?)`K>+~>LP^ZWR|AKp56BRp9Nj5as9U>7>E>Iw#5ag=}S z$en<+DP^6F)~Sj1scuL(8(&cBcIN?yWJj7VMf%fVug9ECL$2|Q_-;~3XPyevKp}DC zgB#Q4|NHpK(0|_qhhFJf6Oehvg)#pWT%eVgZ%`09E9g8N# zYABo%+KUKPQYZp6z8}<(M;f!s%C>b=1gM_`f&xD>GoEKv`<{OClIzA@L4u57pBAlf z+Mg>~44niA@^F5!pP9t(v1lSNapCChzE!HcPCeYCJ{}OGBBB=f0uXeO;N}Zj!(bP7 z-Rew}0ehFCNqt^$aoTpOw7(Rp)tszuPXt6iUtKZaIi{(jCl+>P%Pa5xDx^2YF|YMR zB-5Bx1LqDb$Md(=AwQv7XQ?g&Uq3!kcydCE%XLP?3#U3coy*UR*8l7Wwb1tL@OO`A zTg~)rcWLv8zrtb<)6cfm?O|E(L8=F4PvcUGE-ztHii1FKsb@HK`Ts6cGI`$>_(%E; z0g5VmMhS=B@X;4hk|bQkCnkfgsPv#VpwDET;-Q`lSiJC1 zRP}MbFGXgRX1q^(96VcTc@0QzEv^91F>2_YGDDkes`Sck2?GwPVBT5BmKEd~GChO; ziB;GB#;btO`IKTNncKW2PjS>Nc){y1bmRnp$}}h&Ktn1`!0SWmv%*Ax%)y`r5ErRL z25+9yo7;dv?Gssw%g{-P6Ps+IpYur@_#wV2YQu#@I-GHo)sy zHQ{Sf0;*i(c?5Kwwho2f-9{EO(&{pq*v_xj$gM2EVkX#VjJ|LJ zFscy~CwN4?2~{6z1iY=c0mP%JEdSjM$oVR-l>&-|d&M0~N|;^nV)d~V%kBh+^<}K<5@xa!qg6{9w}hxBq>I$XHS|YlyZ-0gw<0}UY4Py@?*R12w$~Y``JFZi zq9IwIXH6Ey`G_$A+L7+f$?X+<5IxDACQ$;VV2ch#FZM?0ORdf+29azYswRMU1o_sE z8*_j}_7#an#5VMDC1gt8{rYqc%9QI*c*kNC5}=vqi>gAB6Y@g3v(0KntZ~gKLi39k zi)x6;`taZnPz{5D3<*f`^kol7xUGh=7h=_0QHx}wZ02e4-mCla9bJK-9zKT6ryTyb z5c|&*ll!bK&3blk%3YpL7)UhS{8&JZDQW}H-Fz#~_=zV!uK3N0`!$`ELQ7VZ6tUm% zwO(NI0-AxqoE=k>EMk8R@+;6&QZt$A2+XoEXIR$ycAqZ3Fm3n{9IlKi9dc8VOPS?Y z>N_GP#`XV!|6MBP|Lfp{7i;dW%2A9(c(yV_xJrXtL+3J7EV8OLHbzmYvSWWEZp@S| zI=pr+$07~jjzuXs@`^pEFI$zi_{YB8sbDGmL8G#7VG=9`y76erZMvBg83YusOFP5G zfel~zje7#5ZsIpgZq%c3I18e55j=w-e9_*yym0;59#dACk!#36m_)M_*BszmXdZ4t zYm0IE_&aq$eiTy8ncQ2`s!X!K%p+v|NfmvqbiM&!gNdIUK7mSdCWHrY@7(`KxWEg( zT%qBEB3m^Y47$^n+s{>8h4U=0p?318j4PwFVLqb$%K5pm)^xCX&&UW%3so?VTqz{`h)eOA1dk6GsfTu-fze~C*9tfvfst_`3%uk%p<5B%ldP7Hjn<7s&hP1hqg$wV0T`Ba|?kcN64x#Fj z1e3!3d#1V{Il+wFf&w~+1(9D69+=b5)}?yiCesy|xxEtd(fjLzG0&#)4TVB7wEGo! zyYW#Q?N^)tIKd+S2G)I3qO6Y2RM| zfgBc&^m|8PqlB3=*~1GZEKA?H(LgsTl4~^0D*d;3l4yC^F-IxdJ=5@s0T#;tcgf-{ zeWwAlpQ(OntcY(Ov)hO3x_%sGkP;bX|7Uys%VCb?tXyfa%<%T(0&wl;9GE%7$uAzS zNatm1`v`~a_jGRKK&|x;OCGwzQF|LrH`^G)Yo;fjMy*t}>eJGK_4F7WIKAI<&n4$_ z-W{R4Zil(Iw{I5(8nP=bm+7Z*gNNbdY03dJBk|kGAE#Dv$M)mekNA#MtC!YRQZjt# zVIhmgW~&i?lU3v5pw*8aXWMJ?9>HatD)N&#ORGsy%dFNw@qvDf!NC)Y+Y z(ddWoLHX_Pdio2qlh!7g6fv2g$Vdhk#te^`1EYPjI|^uUT+{-N#O z)C|ng5~_hW=pmxr*WUe)2~3si%G~650*x+2DV4KMOPzn$LBOTiF^26rHTa_Y5}u z%HOF1^gXz)2k*G7|)o00Xvx~6aWAK diff --git a/static/default/parsers/danbooru file page parser.png b/static/default/parsers/danbooru file page parser.png index 6cddd08b9c6ca33f85bf80ba698360db865faf0f..c8494f23fdb2391e2b08173a0319ca010865bf2d 100644 GIT binary patch delta 2281 zcmV0$Pj#>& z5S+Pv^L}&ZheH-!ea<_3ExqTxIp=b`ci(f*x$g~9DDqPTkbf>H3}YZ^41ys50sw#j z0I6ay41r{D?m-hf<##NNSKE_70BPYtKMMi8uHcDG3lTtSFqupgG7nETAHF#VVPpaV zNDssA>oj|t%|l0YcS8;!K@9z>a^~Y^^N^~ulc$-W3XmSmzHxkOciiktu$hocn3%(2RMgTjzX~XofHNV*#Zb_*aD=u1pq`&fa+|DN-bu@ z>{N=DE11Pfq2gF7wwzO<<;8~>sUnqEAw*jH8r_YhIxzjW|zqf+=Cidnpcx#18{PWo*OW(I7c9SaO|Zj@f}z z(6T{BkFXBOXi+Dli9v{rw$VECHiOJw2m~-7g?}DnqY7yV4W*vzpKyWn%xxpy6S>9K zs8iZl@?HV~2&lLLB<9SzSh10iv7Tmwk_id)FPUTGN|nPsq8 z(QZ%;7EKx5xU2%$-V~OiP%rO`0D8Jyqw}(sG6kWbV?3ezCtT1no*8ocWyE`;w50oG zbmm7|0s)+XEAiPO7eqS(u+a2z4uH9w+}eW+0?_4}<906ePr9Jsx1IhN34ZgZ7=IeE z<^ZB`CRc{!T6S=>awO9_+X8g-=qlKV%!oQWQ5f1XxzL#_0$_uR4iSpn7D7WS^iQ~8 z@l2L$?4J=|vZ`e4K>=wAWuRD}i}h8Wb&OV|{|{bLfM)^XM{0QoLe-h*X?Pqg%?Q9f zwQ(K*?)?Y=FNNL>8hsKy{m;%o5R;w+Gk>`EUkPL0*~0^&uKdVJAVFRI-;{M0DXcg&j!-1Q5^UW83*+TAQ!FIe+Zo zTzG81J3^V5Wgb8(cSUBYbEaY z^yAT`Ss|P*`X^ha8?$ z1u*l7Dce8x(OginUN~2@&j9RIJs+kD@YDzgUwW117QlyVn13m+6bk-h2ndw`>23i4 zAd}$*6@O=-XbxQYDCYjSAwCH&*0)UY^V#ydtoEgm>#s)FjjnB;wr);OSYlFSP(-k% zOO;yvLt4zt=kR~hq7Ss;qIVN#cNAU<{XDko^XEeUm9c*8iQB&I-{u8;t$%BxuBkv1 z{wwbfx4z`vd4A-fHGdj6`Ro0SZ@-fF+u{h_^M5<9{q>o~=zS;R<9wGc+ghMK^YH1? z^wN^c=j+hQi0}i=n~?)@vpvMjO`4YP;h2pT2^*R_s^GYnN+#^ zSn}Y`5U&l%d*1B5+%qenpmo^olOb8ZPFy+fgLTUDil3}(Xli(@$7^r$s0Q!p>5sg2 zxPP$r;#BQzpLYwl%-y;+X6>uT8;+>wpUU3wQPJVeT@$McZ~ZxU))$*bgmi@braF1W z(&dvY65pD<@!YlXyH9ld^i)Iq>Pc*%3b|yK0 zcSvSX!lNnjkH`pQ)Q~YjwQ*a0dE)xKIXj}l7AW>q9?EL}>^(zZ@~zs2h^m=NXuwNHKUrH-TX^f8SI;OC1E;>cIpUz#j(?U6t60)H_`7jP@t4c>*H@)< zBwTI^c)=U%>t!$2rIn2FePhZl-SVBQQbtZu)qY~Ey!HIP7p^p_WZxbORL7TRbSM5} z?wEonR_mu;(3BR%ej0vieO6%IhJOd%EY7Q4;de5+&PVmQ|IGKd`^`yNkbhuz&a8FI zhN$(e4`+<8{_;k{7c;u=4_}ygJ+CY|HYIfSrMA_-*q&e2qlw>J921`~Ouj!at~1g% z?YpEJ#iI723m>ofzEBsg8G3rxK5atHi1yb`7cANpU77#QrKFK7T9-<|MSQFRzG4Lu z6cTq&@7}xj?%5YE88UO-`+qZf{(JZT55MRC&VSB-?p?&_wSRpGAYEVrV<2htfFS?^ z0Du4hslp$IKr*NdF>@h*U~T+vM*{(*g^+<(0{C4d!`T)xfYe|%n=!J;)>sN#dmw_$ zKmh5XXRkI(Z;K`HvG#V@14s~or)nnt)?x{${rc$%W~c(B2TSPU5^GzUKF|VXpcHoj zzaGV%{6_rB41WZWAh5-P=@tkeIR;?%4B*EAKLP}hAc$;~9{Ks9ljCFvssJhChvf+$ z+%bPpzrP(afFv`({m~UeZ`>MQKC~S&fF!p7r2X+^oh7lg17RS56f*!O56mqVvl$C7 z0s#S}13&ipPlpn9_zPEJBLGI^Cw=AxZfO@H#nx`<=zo#LZSMKcFHjRphucHARY58Q zh}UviP`U(g3&~UghqS6@0)cmdWcC1+Xm${SsqdyRkjN51S;G<_#U%hBast%lW1MxA z74x$&BUiDL6{DkRIIY5_#K`fVvr<(SuR@88_En}EYiqlt5&(0F>04G!TW1K_e-?L( za)DLMI)BPpdTYdc!s_Q&IauEO{@*0g?D&LP*aw{GDQ?Ugkem?>a)ua`$Z9v+BkwTC+=V~@6Ms?~329cNVoF10wDnK9Kzru35$}n@ zYTIm3Iau;u0s$zfZ39Tf%syF3GbIxv!v+2Cf&gm7#efZ$gO$NP!8t`8 zC3|V);sI?bn#s>XBvnAXmQWk?s>;@KEq!K#>GK<=buZEyF1vm1Lg>hLEQFA2alp2M z3xDz2<{o!04Qt9 z1;_rqG~#dHR9LT7%?_60$^arP1NUfE22*E;cJgjzzPtWCy3j?V(5*(DN}jR3Q%lGI z3~VboJ&!$Q06kK7%N{`QAq(l4{;3X>$bU%f0lYMF`O&ncxH5o9oY5mWGkJ_E>)^<( zgT;wXgKDyB%9+Mv6~OhTv=)U%MP~#sGUXbxFT1H%QW^%<6Q+O41s&^|Nz)}G-V>E| zyRT<jPXmiZkdvDPGk?Rwv{lI{2P z5Klci-QkmjPi+Hy=ODno2VZ)X<`NRXPfm!xlxGTt{}=*NB|y4Mlb-|^f4~z5 zZe-ToNxHD1Zd_k;$iMq1q--1!Go>iujkbYDn}7CnUX(s~>u7ad%h<%s z@KtM;WM>_kQ8e>h$bMtnXU|>!Wx)S7zaPDSe9wR{Odk$PT0E)Z@WPKq<)suS{#bUo z@vndW``)MaHsn>RE(I4ZI2j!O{et_u{~~*8eCdqk7cmZeG3eC|+3Dpge`|wJULTN< zAN1~)&`;a`liyKnnx6UODs}d>r9Cgcd1_Z%ZqxY0S%#&h_dBW+CJZa;fB5MW6-Vlt z8jhYUU6fF|>g>3$PgD;(cFD9Yrgd@nv5AA*&VN&>*dxm*8&tV3F+O$O#BU4UUGV%L zX3g&RM5!*gPi*vL%|~}5e{(mW9b1rGn}9pc{B23E*a3!HWjU*7oJm23 z&6AH08yT@pJ!#(Jnq_l-$osxe#mEu0WkVE0&du>JRJ}dEedX%vf5fH^`7SxAD02Sr zo;&tT`FO|3=`mM=URu^K@|A@xjq*E@jeY!6j)z^B#eBFYc0eC}e~v1u_VH_TBQZtyw!LxlU1o^Zh?>{5Wj)l7J}9d&^`ihwfZT zzS?|n>WN*|^%it@OJwxEEh}Qi-CH~8_KoBxj%{pxP`W88GVbkN_0!0=>lR$tIrGrQ zFSjeg4o%2)OVHq)$ diff --git a/static/default/parsers/derpibooru gallery page api parser.png b/static/default/parsers/derpibooru gallery page api parser.png index aadcee0359502673559096da56231c9f0c9815f1..172e6ebfea6b907932b6d7d852d529b8930d0590 100644 GIT binary patch delta 2064 zcmai!Ydq5n1IPb!$+g^*3UixF#fc2dWgH_*OU%h7awp^-(qVs+mgaI~MQm1?+=aOu z(&Vg_5W-wzxsB%1k-4ns>GQmQKF_=FtM7~N@ALVkOOvE?HF5I}w$|9hiB+$#a4brm z{A9uRCJU)1jjmR3!Fqdm?_D~lCGKYMT2D80;jS%GnmoHlp_(lZ?VH_v9kA$R8nhX;mA#$JnI^SgtCv{m;GAC~kt|{ti_K<=3&%-^ z2`b|`z_c(9_S*q66M94d3IL+OS3N^tQS;mD%r_&sSBa`h%CR6Y-PQW9A;fUv+^dar zNwzU#E}FooJ{mpq#u`sCmF~QC1hLYPcwgs`()5tvEkMfe+)%f$k!|$XipVkr=4W}f z1>TG(vcP%5Vcw6D+?>_-L8tP&*N3~Kp`b`A3MVsk5ejmSfW&@aU%A*C1XP5&_3#ve zlJ;W zprR1~yFBORXP%vPABhCkp}+6$KYwK3QOK;~+5&`kfW%hIH>?iEB(8ksC&7lUhl^Wk zz2Kz~aaf@AB*r8gSnfoa3MU>n=~wBzql-|!k|xQH1|T4yi{fGkRmX-m%qm{~&{CBG=;!j5fo+V z@((oUF>Egl`m`eF2)Y`kgcfSnu+ykCDLUWH3yl}eh-d0hO*IEll@%l)E?CdzoxL3Bi?i0AEj2M4b`XAl zrW-puFM6=!m)L>#C4MJ>M@lO(Ltk?Q!Npp0S;+PLKsxWQL2{a7V(e2{S)$&LnAU~u zA8~`z`A7ZRjBLb|tlSQN)zRZEYO`RE9nuZStN0ryWqPQkNpe#@;{{A1c6mtjaHao! z_0wO<#p6$lK{jvVs{as!Slz7_@^OdOL9B^a@4*VuA)k)>@6vcdjU(=L-mCVnqOdpA z2bpJJ;EglRjsTw&pFMp@vmWU!Xv%;WI?vAIGt;N@NNGaK4&7&j|kw*u$wqIvXTURud@f4(fz^JNp& zXx0{daaRd_wMw$#`hT5hv_A|f+Q{Z6l#%!3oA%byd)2mU#w%zkPan%jjAuBWtN>^D>T zQiI$nxZ?im*Jm7`<`1-8Gv=?|Uh7-@rDduhr`PODNve}fniepK;bopsW2qof?+6dv zKW~}=$xEDNYv;3b!(ie1V=J3#o-w7oNp(3;B19`c!sFoHHIWi_DiLt(u!B26O~(pe z2|=wf6EmT;41tg73%6I#K)ODfe!ZlpA&#>XzLc3(6GA{Ufbe5*bE=LI?=dGdR> zvufPgCc69jf!5_7%9m$eqD>Lj65of)WlmivT#EyX48g4?2f8fakwUR}E(PPc;}Y5e z0TVCrgQ%)f=~JZ95_q4;PBC^lLlOYL1HgY@2@n9l|BoP-ut}FKn__7Qs?85OMLwS* zcb(gN=092~E&3Z9S}tWW$=77ytws)U6$(P?6aGm?JUR)%2}$KUsYlY{VTz)5YpF#M z!rWz#{_O%{sjHgfeAon5FwjrCy=|2>*{H7$TB3MVkA7CR%~pd8!F}E9TbY1pZUbA6 ziydh&_7yLz*C?Y5MW(-K{ruPhlo0g^A3p7$2b25gVu@+yjW!CNU^9dzqq=oLOX@h6 zbO+Cuz1HdH${j6YKGkCdN$M*vf}Ab}zh?B->1pdDlv+9R z6h?**m(B>skmhbc9zq*3-SqH97)XT0beBO#Qmoj(5S%g7Vxz5QpZ%fkMDI*XfK7Lp|F;cshH<1m zAt%(+bF69AJ7v6K&UmP&iW9fMAzmymX^Q_iFk7RV5u@n(un%{`E7@g#^3<&il9{)0 zP96ORa)DqJS%YByY)#~6_XT6Fu(tv%l2YcoU%uTg(c7VgbmOn%4kY7B$l7pM1r@T3 z3=V%K>G>Bo_xPTvEaj@;NAg!6NB1-MnUilA-iZ)e3`FX_d* o30DiZ_U>=1h8Gm*5B`+WQaGMS6Z~Tf001$-0fn)xwed;&4|ERIvH$=8 delta 2038 zcmai!c{r4d0>-~tFo-k8NRo`5W6Ksr&S=KYa4kg`EfZO%p{$wj8z$FIjATtDvadzi zw;Zx`Y*{ACQue7h4vO1z|G&?D|N8y;KELOA-&`~kogxbkm=ld0LZ&y|e0&{@v3b{& z%wi#n_>!LYDf0Q|8r|F$5e{84I5IXC9SN68EsA-^LHGvsV2}%0W(7Z-P*NsNjD4q7 z{xrV?XkdZoC2z8C4bw5|VQ=Go%)>VyzP4^ny$;k>tsQCEy~i8w={cNp&T}Yu`0Z@2 zD(kJEJPO>3)I5c-xd8~mFaQII4-rAoPzF(jH^o#Jby@LMW!${;s;opi)co4j8{^6z zK$TS5pSIHJXU)Ua1aS$J6yQVG{A)E!f~?ob$eoju!0GOMnPTrUx#x@9PrlkvZi~)e zFk2FsQ=XK(l$3f~{7a8xd)Gp}LYt$Am@LF&1j@yBDr4FMkL9mwL2Gh`(MS%?X{Ax` z>t0`#um$fXygA0l$9zF+ZHFtWmX{8AI+ot_JBAQInNDIh6 z--HLf*u*le`j}K2;3FQ);xn)TMX(wpi6!aqfCnknhNgV^qzK+eB0|=Nsc!XIr4o1^ z;z%Yr&N;5>c=z2Mn!Wi8^Jka`TQO6pZI3{uHZoiA-Pn*=QDi~&Y8p{u`spX*R3s*$ zsPm~YZBYH0gD4@u&Ca&B69<)Cjv=PR7vPCbNb3zGl;>-OUTJvMHupAXz~!X=}TD@CB`eRp>40VL65RJa^ZR6j~eDOu` z1d>c$E5Cd{iO(GJGpenE1*qCyX2-h173E=awz>{!K9)(Ap*tQt&h)N^nh;*JLZsCW z(TpZ1wH3Y?Td#E*`WMzw={g&2UUV@k<)sbO6%qb?Oul$2!$u!|?({FCWs`5Ye<$*Y$7cX4(tT?# z>~xA#>ovp!k{R^KK#+m#iz#sK7y}D#DQ6X8c3rY6hL~m+6>)p9=sIhd0Y7U*zK-{5 zCX$6`;}bi8_t}F*Kf)3ts7@oNu^c}Y+~)cm>i2HZWws35Is&sxU1>0H8QBn5Uul4<3^w)|zGJ6zuC7I19RU93r|DD5f9Z5oI z%HlylrbTaRLRpIR-QX=5MRnBJN$ZuyxHT)VTu`fT?Dlo?0*NbHvuAcFP%Ifk}7v@fVzRlN| zn+!iOc$kqr*zp}8KniBH#`S(&=zu}B_KH6Qdn8-J0{PDPlM>o8wy+kQKTw?xv7B}6 z|1dajw3q4wezUhs))DsFDcF8I&QB+Yw0yPQ|DN_H`i_Hk`R>Nv6}0=K;Jv;VI%Si_ zQNmM90cu4FwJI!lyOzD{{yu1VCNC?GFfcX>#kFN-OHP1y0mUnoccsSz+PB3%x}5~L!1&zZD7G z%zh&o5l&xdX=}dZvd^x}>a82Sq(lDTDoPLNZC3RW)?fL@NTjZRIEfzA^NBG$Ih<_x zNPb-M1vO*hly7SzX;<04cQ15Pi%V)Y0lFjRjnTZO$%`c06*9l!}IlJi& zlJ2BdYL1RlQ}~D3SKnd<7`IA3@1$BEcqaTq8Q;|PdG(q4fo8M8l9-wMawSMF*Lg0i z;IN<5k_g_bHqi37a?_?)dv0}HBX}Hh$6ny9zUZF<(|>LszOp`u%L9&JO`re;DPkuE P{EE3TiCB5@YUF|wB_ diff --git a/static/default/parsers/nitter media parser.png b/static/default/parsers/nitter media parser.png new file mode 100644 index 0000000000000000000000000000000000000000..2a38bdf92cd721c8e14ae7bc41ee0ea66588fcea GIT binary patch literal 1804 zcmai#`#aN%1IIs`Rm)sP$iu!&DAzF&a%sjWW|RBnJ4QW@OD@Zzuj_CeqmsK^GM6~+ zmn20CJ4QNcM!80WjsO7u zg@7~kpXKHZZvepY(JNLK&bOz&l3;i#8`)Bjbl!o6izfSko;`8UU{61ZG@D* zdvI_ud8a|no;y)((>^ve;JYH^CFUquW@fpSNUydRn|9o3amkqS#AIPAX$sL;In2z% zS@VfvuMGm8_}AAXAJ&g96Ty*5hrm!Ubg83@oa;aX*1@Nq9mgv}Q6M+~K!BEX^YD`o zdROL47QdS$L0|Hu;52O!MRhpxSw0@FY6Xn-P!QHfJ$w5{V@NCb2?$wS>4#wH#8zzg zaE90eAzga>(lVp8e>+Zqit)Q}!UKqzvw86!eCB}R-9;tQ>l7JX|LiB<<&BGkz`D?( z?Fac7;Ik}^Z9tojOFjvwS!5>W5X$GsRJ|Z%k`@j3A=)H93f6J>saK2+;H_!A!Le`l zZs&3{HfhkFMJXCpBzlr{?rG#15eOL#Cf`i$8jL`=hC;yLmSpm`JqAH&b-i*O1nPT5 zfA9>wQM7Z&AyaXW{%WU;)Br!TMf6x;10;-$NHl`LFN*OS7xbcI^sRt?5Ci~2`uoHy zvv+SZU1nvl-AxV`@1EI_5m{s!1%KPzi*?@H=~s8XCLmB-kx!wfa|BhPzFn ztL;+TMDD??*Kn7FktJUvrr$hdwt$8FOmAo2OkWG^9D%O0wY-cVGPmLxTCrU?@Y(%_r}YsxMY7(54v? z)qMG+?YK(Rt=Afj_^|1|moxTW+#PPx>^+PJv?9kmu-IJE?TfHT-HTx}FRHMuVN+(! z=_CzeN50I6u~Ky#!L2sd$iSmp>x|_74UiQezyY6B!NWewp%OjX+itrn{-ln2?8*(H#-gOgeo+LHl zVPnjkBqp3`A+dXQnL0!c-2j2qNPc5V%#_DLT;+#21BiPG>U4H&hoeW;)MAOO92?uL zP1DycE8m@l%ncy6`muxZ}{b(`S^vS<)LG$q!?}>mQhjxR(f`Q99!z0C5iW{+@ON}S>NL3Zz z!bf&HS~#35RRH(C{yRVyURH4EBew0%t2OdJrD(evLX;rI;qCR1m`#c~C8|(uMj@^; z_uns-QEUZ1Ys1?d;#!OWdPR9lP{Pf#i_!hDN42={xrAFZf z+ZQ~zIcu#5UH!ac0k>k4Q@tk<;7*o4pRrV$RQO|490LF%p#O1LSkL5iaC&#cOs>2E zb#L|Y*yMp|>Io7fFyY4fyLb-X-7sLjHR4*n{8aD=Z!IE-gE*qR6^pUnDV29&A3OiK zcwqMm!PDpzZZja*v!muce>ttLq^wdA0XsBb9PwHRWv}I2*eJ|KpsAmx_+@X_yww>l z+>QjNpV@yamXGJOd3zS2*S~c(`YMT*_h%9lL<>*JJDs;<#PAFk+};q+7P(a`UVP3n zXX-1}hg`*fsfxI>TwuL)H)E&^t#NI0Kdf!Jk-wh85m$|Ml#cQH$lj99(IJ?cYXpRm zy!#)2Xl+OHT3)Cg(+SIHL7a2D_0N zKgQ~gl+XK(Ha0HECnUW4!&Sk@DYKLjxkN5ir*g)8!(vOG_#Z^XaDJk@8s3^BDG^w& z>q6s&Ma~aN@m3vef%{p78Lt%cvs1OTVV@&Qf+qdbWF>j?iJeLz_m7+@^EY2Mn$5Fv zcR=;Ybx|^gOdc`F{zv^_@$uirU7W9nPFfd<6Gxk)DOYMU*}(y1Fql*R!vxP(tiQN#Xu?m literal 0 HcmV?d00001 diff --git a/static/default/parsers/nitter retweet parser.png b/static/default/parsers/nitter retweet parser.png new file mode 100644 index 0000000000000000000000000000000000000000..dc4b8833c7fde8510f9d3c52679e76ba19853bc8 GIT binary patch literal 1922 zcmV-|2YvX7P)S%<=4-eV zW8#df5^;6~bH zf9F5{IrrXzQTAa3kS;KRYanTa!6g6z06+i$siF^D0?D8@+QFK1i+JGd?{tO_Xm5uYK!ONAS3hN()7jU2 zI{6g`%mSnb=kO1Y(dqI05l)x}N-+u;BWG8BlSVs001^atIx#l|0Z2{(m{|jO)K^A8 z01^a=lpB#uhR&o#BA5k85f9g872b6eUOmzd9zc>Fpe^-mTvKyGO8zmCqmL&C0+0~yu=jtW&@Dy|cV1ve zh2AuTA$WqZrvbA7$;XAgugR<*coKK;SY@Q)Z@>e<5C8xHgtEiO=$Aa4chnxq&6lAj;22&;vH&gp{xSZNH<2=M=_>uX|V9%6npiXS4<9-QKtfUSF zX-~+m4TLxlQZel`G5gGhX|^jdv8jqVq2c~P?ktZ%LyL^mz6L3&tqExfm*GeD)~4_| zgACp;F?lX#;P!;-^$mRiC@I@(U6+)2wrpZWDhnZY4brnrJHMcv&tS_~Xrt3sv=O35 z7B0h|?5#~rov|tyv(wNs0&;zclGc>nqz={|5WtfPfZp0&o!?gg8pK{=+8REc&6VXc z+!ZM4o@%0vq((f30GVH#i4mgr2)8XHczZwqPa^;lKmZEPP$nz@F2i+9MOjIsBud&y zDjq|C%&(12O&Ya=$5A7t!9s$!2L$je1c15}0P?pf?e`UWDzRHxSM6b-?g+r9@Z;E& zbY5)_)}9bWufG>S9-lE5H6Wb^O+;pdl6(!{Rc#8iU#HF(9E-T&{qW)ye_yIujaH%F zw|LaMJ%EOZ==q)0HwG~JSEka^gS=9u^{GRM%*z;%MxaGrPLTG%5YTf1FnJOSH5;SC zE}KymNB~@hyQmr|^lTun<}-xI?%L>#5g{uz&QnIVsCh8}9qt>c$S+8HKma`@04?jd z*hJq|48Uc$iyE0JAhEG?c`ieMOn9M$p7sLFOrJjsaIaP9Aqaft0-!CU@+4Lb6@aO< zwtxcQFx+Kq%ruXlzFU16{$zJ;2Fm0rCe$}5k;1@hkwv3m7Yc&3Cqx0i5hU?jgK#ql zZUzBB008NZZ4d*12LONo03ZMW2mk;A0Du4hAOHXe00062fB*m>000O800IDj001BW z00;m80sx2zz5i$8m{+t&Pmlr@ChqsZ2vZYr)hCJvg+=f?fdqBq$+m8h0*i;Z_vkw4 z4?##A&(n!y2q3tpNJ9Oevqzq|9`FGF%V0n_{vHcCX_kZu{iX+}0MoIB-u@8#Q?nux z0Cu@~O!(e9Ajx-M$kv==ywgt=RDb}UiSY^FPMG!9M3CgQ9q|%_+h3p_F?a_N2*4Ah zVS&g$7>QuPa@??wJ%@QAq}S|Xc(YO@vjDG|{Er7F5%!%=5~xPOjxMsGKs*aU-7<_X ziF7-n$3&b#p~zMOv1^j%P7w|;W^mHL#I6(sF+j*I1rW^t__}uw$(s=-1~fc&j~ybJ z1&DQ;3opG&a|m$vQLw+1dkRMIuVuhh0wg;G06+i$5C8xK00031KmY&`000C400963 z0ROqRp<+SqHzjFbKa*4USHO>JBE=2uNApAfe);O+ zGn;2;E|_`x*Q+Qyyk*q8Su5u!znM8Bc>p>zch88dRYTLp%j2tS(ietLKUH|)*c*+f z%C1D`j(t>^T0gkXvG8zIMRM!`ohiHide}w9dlPPqG9>(N9+}@f+?Jcskb8MfTV%|Z zY4Kw+GnW6bv3TmRw71p{xpuhq#N?sNF2zLH8W#UCP(Jres}gP5k#E2FQAxwjs*#g& z3R;S8C1p<8{w2OWD?I+y*+;|nr|kPWxp$6zZUCG4Em&I=08!}2Bw=D%U(=b9#ek3rbAb~JF%+lq$B^bg_KwH)6Uyw zxHekWw!}Po*9Y@gRja&j0`b literal 0 HcmV?d00001 diff --git a/static/default/parsers/nitter tweet parser (video from koto.reisen).png b/static/default/parsers/nitter tweet parser (video from koto.reisen).png new file mode 100644 index 0000000000000000000000000000000000000000..13eca21ca47c90d168cb8231ca0ed2712982656c GIT binary patch literal 2732 zcmZ`)X*d*W82yHsu?~ZTQCU+{$U4!;ND-MLTPd<9$udTTxtXbKV+&W-%gD%{rLu2B zvTqj=LL?+xBPI-P_x``<$NRj``Ej1}o*(a_nq9ew;6?EQ01(DT7t8_R_`L!v?tdq$ zkJAJIUZU}ZbC&*N%eDwJ%MsCrzRUVUhRxzgo=#Il=mE)rf#fC)b1YT$gh}Ds%<=)Y zmdy}8RYOxHz>oK6MMoAD<(x?M6uvJ+e~!Z{*_}CY|LV{=~oy+U8f&@w~RO3WRAvG=<($Q z8b%3i=i}sn9+5k<$_1S&x>5R*Oul|uyOY0y+$(F}elf#c0GACZ z4GB3dUFpP_W1Hup{VqI|oz?bo{~-VS;3@!0hJbg(aI~66j&GzfA5+Wd6-i z8c3PDOYtp`^@-G|@^{lV_$pYkisGc7$YZ`Jy{~Okqvbhfu##!g&Pz8{T$tOkbsV>? z(dr)ybGm3PDrwZ_Lkv~0w{$W^z2Jy8pwQ?Dx=_h0wVE|(@D9AS^am$sl+!Pg)R;w_ zRDfeC7|Rs;fni;jGaHz;>E` z6~o>*ZUQ~)vH2+<$B%Ycru6$V7!p_ut>1$4&MB_k?l4a2g_Mzq;# zvnBFTq6}@(B_=KZMn&md=iL&3KG|)!I9rk+rYaVZY7G19dVsq5)yYt`s(7l?>B8s} z!p;QAcNX$3W==9a23Z25Gxcu*B|etB5_Xyd8m6lrdz|I=YErg`4Ya87nMn`I3#ueM zHsl5h9Z|u^ENyqmhf(kGM+GoJ+0}ds<_L0AR8e;sk3{l5#-%lzg11&KU{ya+yamH_ zWH36T+nWTet>uaxM;LZ*kfd|mX(8qD)_jJ{H^Te_LH?Bn2ZR(8h&!E;-3t55e-6)j zq~f1*N?cs%h~r>5IC~0@9N(Mg;>ubUuf^wu&ii%9r;r~Ir2Z=y&-T#v`Xoo-)Pj`&W&&^f#3_Doe|YSa$V#iEAouTW~lz1bF2=X(%ExKEqv!O z{{zBJd7j*iHci!Zil#lRxy6^y5-(?29UBw{VR#rs0F@t-(WB9u(W)Gkk0*en2UVy8 zYGh*%e4h5y)&~ckDva>`k#L(bpTO`CMuyDtsWM~A;wlrnE#uEMaO*1(+qdj>54U|T z(TiO#*L$y#0n+8MioMV!+<1fqe#qFKsQ5Z zM;8jTHiC4naFV?I^3h_ZqlXwfAujQ!C(dzFVrRo3z@BdipOy#Kru??FzdXQ-g}t(d z^XjUK6w*{id&8qJVI^v{Aw0Z>nqT!147fCwX2gbM9`Z4SfTUXyn&;t^l2%4efa)un z9#A1D6hbIG?Ayh{^d4P~>>^p9*nR-%xkQcdeZl8I1goMl&12yx#-gi$If7xC6yrDy zE*(6crf^GYN0}i7Fz%)52o$kI`_)@vaF4z6&EuE;WVkCwwxS_Z_*3gv%;=TgM-S*l zaEI@1Cq>4s5=O!iPz!8;y82Uin3kkW(J+kr!|_oV6b?9T>g~lGEEf5o+W%fCVr5fR z?5+cGn6nVU4MB?`P~DRw9BSGW7LSbHp@{D5i71bj-t@$W5T~1mu26WFne#oKzF)HSFZZbS*}=^y3}vgbqr>c00$ldN(ccFi>m(Hx%z}=b**a5 zvt`0o-jb-a;Co}3>%mrI4Y$1al%EH;Je>#D1$l=OObjT`xp$%tp0#j`{6M{i-LVX- zn5F<}@gWG~x_hE&+qglUY~O9)f1R6rj!Oi;6t<%?rR$y!tl1H-7&VBQ#d+9h^sd4u z4*)$ItKK~1Px5?JT!TbjxnOWz=!4)a8?5*{0M5UVw=g!~R-gzE8sUWCjUsvXpLoL| zfC7LX0MY;;|NHOI|7z%e=n(M=C%MyqU^@9bf}0Ue+o(( z3Zy5p6ZF59v<9OMw6{D@Wf?^wU9uT66bkYi2a-VFZa9+6vu>a(ph346qMmPEeK8E< z*F3VLHXU2AP>ukq{>~BNLZZ@cNM{E_f?9*UbX)&7A^lvw&wqrVw`krPNy&A`I-|0I zwF4qWS=v(4?$KS%gdK51+&T9J) zJYtrT1N%r^PWZ5x$@-b-u;w1u=XRuZ$=VU@sq#IyFT3kE@g)Z?oS8ba)}Oe&TVl}i z>Z8>tIqQwe%vMg_C!98`NPn|YXUn{Atn>Rx9In)z=42An!47$;M=nT?@lAl8VA!$5{kmm-TOl3|wW)UYM2PW+k1 z;^r2!byLJ?oj=p!P!Zf@mYK~IeV}to#m%V1NJ;1h!f+Er9@%V*uvY03OI?1PCBO zkY2+okxYh8#`Z!`1xOM1mrXCY$rM~DZG{LR$q3LgqH6Gs`l#cBTOk5SG6f*5Prt3V zrrx=SFc3hB5df1m=C+k#u;WG`Ab@lrn>OQ~{FD3;p=J3vr;`aPXhvh17xzL;#oq06+l02>2M4W#OzUTbm2kti*PK zt`uWD?K|6E?u|33a|&6wo^99QeeAX#XCc~vwzVCN?o-%Pyqw*!yoT<8H!H3|K05b< zwE=$tLMr|X8~8TA;lIC47zC-}cWCIpOxU4Cm26vslX7i3DF@VZ9v-+7vCYI zX7|)7c(-%V@dUDUpyW7ZF{piZ4g}~t0&rW4ZScDjK!b#{&N;|^{ModX+(*E#elZ;NB}_^G-`#5 zjv6WTM$%5_K!C?X0Msr4xVW#-vBFX(gldNY>IebMiidQ}O76Yd;FHC<$P22qRmdv6+~;Qss#N24Tup=!8UGO^SVisz7+JI`> zmFXn}#N`Znq0)D)0_+PDI!FSa1OYf_sT`tC!{%fvt=WSQ1O+o+=5ghF9WR{*bklxN zTW5OK@k@hnF$gXO0e}Di>CSSH13&}-0096% z000mG00aO40RTV%01yBG1ONa506+i$5C8xK00031KmY&`000C4hzS1wXc+&n7U>Kn zuf{|;0zvjC!-yaW5TGl7pbp&G)*&SytAG6fLaSxR2UKl$JG5CI>3wsH`D zFWR^+drN`7`S%n>Me`e*Oly7~SU$Zr-bgY|Es0Ck73|H11^EJT&?J zU7D3l=|=hFQ3J*t504r(lq~7?*5;%U^5DX_!o0!3Q7Or1oN!gp48C$%%`fYR*^} zyI_6VY*T`KvG&jh`CDFjH~;g9`1xgh|A-kHvo-qY($twP+xM333b~yzb@hhJc@ZVA zX#&13GB*{CyP@27Y)aMaOhsVO`k>RfNB%nds%dOeaO2JfdHJyVghd-ugJp%AFMl&& z^cM@vX*H{t1oUW{t23OQI3_86PHB(*_luL-dd|N!G9VQ7iC=qo{shaoz^mWiI&)@p zOf-Hz&tw^IYQ7Y<>}arNd-(2W!uF;&OtP#9FVKEC@LFDgA?y6k6^lfB$-{XZeJFKMs9%+CL!) z+X`>w2B}siEzgmkcrJ5oQfo-#nhlnnkw2A`ch4<4`0eOfEzH!uSIqjh+%NBB&bqkp z&9d~0?DUF>4d*vsXff^9=Ra6`D1^%`))oSy>#dF z>vAfEcY1f$iWnJa?1A{;-JaaeZh=lYiDPxYd-R5 z;`oQFmuWo?Wb%T5<9|jm4Lmg29bNT_&Wd7n{6?qF<%}!FWW?K|+1M7AF<=1R+}13L z1HvnT{#njfv|-~vyDwTjvAkU2Ubn=-t$@+|4zpLEyd8R9oRfZFgRk(Uv=yGxtBh>0?@{uPFxN&ij?|{J} zZU8JuhXL2|#9)zVx1>m7y|rFi-=jK+aCn>Gf`#Hvfd*DS#LGeR@J&Yrc=cS9uWNX& zk(-N~7vPpSJc+cI zm_)QRrKDZ5dQmI=W8rHKq@%MdBkY}lviV{Y#7CifYW}S1C;0|>U%y&9uWp+qsD}*= zYu6O^iBG)k48*TNPZ;t{m=!FQmO;D_NZa5XL|B~8g2?-Ed30nZkS$7xxSZK@BS|CS zgGt-+;&Ou>5PHArwXbApsgxKlCGaKH9Pjqjn!w>zrA<%+U2f)uenC}I`{iID@E8Ii z+?JmEaQJUK2c(~=!$TDg!4bi*6iws>?Nh+uik6O!e@NebC(Wi+VX*OJnHMN3mv;5y zOMYn90ph3gTtbJ=q71<$-s+n2?ob>fRJ@gSFSC=901!CjzV5j?0$)edqcs(XqqQV3 zcNWchF;PH22^0Z@j}4U+cI8w+GRc-Tm;GKPONtR^A9vtAK+pu#CS6QwKYf$ zH>ao@a8y~C6cBN}%6u&S6oF%o*?H2iifJH(vzJirQUZ7RT?1@RhBOu^jW66g@~rtl zY9QEYqy5C?D=Kl#^T#>wfAy4!e#i>{T~!1T8o(bvP2tF7K33A1#7o@9%66LfOPfJTW!q{*9pB|-(N2-0NHAdS^*gAyRNN5nwxUau>*g8 zI2N?tr17@d!w%Q~{%dzUYXH*#EMgK~3xfW&D6At0yz40-Q=iqH{9S`vh6{9%7c3J& z;2Z#ef&a#b;QvQs&R4&&O6mJXkWhX6+xExc`_7M&E9dn=Ah*MFiKPR!%`f}RBUmZk z&-M9_u*?WzaNEj(2>-c{3@wlpNqa1gyCpz7pArd z=PhGkWbvKKL+o*dILBFWEjp(Plr3K&*6vdM&>goIe0S5qb|BE;&jhfhxwl_VZ zB0BR;ecsrL`<`||zi1xHSn$KVT=McOx_&DIq?;q4Px214m`q&s+2~7`d2hLoLf^)` z-^eHzFw__(>YQJ$hS=K+x7V>TrZ;#rjl4fJdwbFcg`9*U%b`bQ&)UVC&Okce;I%N6KkQ`TGmlEhIH!N@ML86(6{r!f!0=G zo1YJ(KT*e1{KZ2@FC{5qESQ_eOGa;KLW*}q_WRoj7EZQEYM(%q7SqG(J!|{3;>+!C zGFm-_4(C*V1uvDIF0P94@0cj0T#9@#SV%RIkLx*UYS-ZfO{aVMU3cnM9(8-){Vv>1+g^E{!c`JN(_nq_d_DQ|;0F^wuFlb&5iFrFLDPM?>&E zzrF3fmg4?(SEn)7cw6e4_b^{YH2)0jOk;cbiqJFvD{n`%J7rt-x6^GSSBc^l0? zuxf?uJ3r2P{+?c2NY~xD_KVRzT~4`)PM6qUx2~C4*MB}cuQpVtuU{E`*T$vyr<$}O zJR7SUw$eOn3B6eh7uzx;FBE5|Uzvs1M`*)Dc-h}jdg8$~i_Ey1G z`-iQS*FLs6chg$Xws1)wa%8B_Y&*(+92>G<5Ix#jDZZE0#CjTA*`ogQZ$taN&6NyB zP1Tm9itL+8(ps+1C9?->ZFHj;%g$t2t>i03Hsxq13JRAns(F`NsKlC3p}TG`2V7l7Q$`2bKBM@5d67WSGvgKM`W@duX_G@g#Zgv8y_=E0982M!;bLH>rM4=C<9DP#A{XHH=#6n7}Kl0D4-^z0Lf1j&qW>@^d6 zN?08Am`q#LHv`XB;W5+m*twh`DIA4;GAB_&O&@(jKY|n~f9B4R5s^oRy^MZwa13B7RdFr+Nzi|vLuL){$LQM#0gJ^R96Ix zTKp`j^k#Dg=tPiB!iB>3R!7)iP(V(GD0*-N3vxyjuS;#BdAeWi7?-s|$Sr#$ZK4S)X{VYsV;yZu*3 zTz7q(fbja#6lKv+^*QNv)m=GPVuE`|LZmldD{M;S@h(qy2^|n0A$bk;YldAr(X#N< zTA@Q#j&C%BV^ijP>dLcEbye*@D++%jkMElHWRH6jn;qnpLw@XwEyW}e@QO<&(J~Vf z!BN>oZ-0_TKQ;061RR@m?fW)oCqhF8{)G^?$07u2{bTo_6WxNX1mkm4Dr;$D8N3b^ z?{1!P(K^WH5Q_)r!8*5g($~F7K+#E7nmGM z`fL>Mdr|?vs2{Mb``lsAWjbxF82c5b_O84@NK+P)K)ybZQys2-97_IW<4NP3nh{m- zer2;3%)R{Z10?gY;BEDNH`GdU|5%YRE?q^ilq%SM$SbF4rW8e$w-GX6!4{rvIbsH&U+ z<7@pQFZuFy6n$PO^Xq@2GXUz-*lAY>eqBrJo(z`?z%7DMu7i27 z^WLVdpJ{<8WSA=EK9ezF+|0>q&FY0iI#mCpt|cGSfX{ISpx9VFJ%0r}UL!h01^pS|cqv*dr9 zgAiR60gyH@BbiU1sBb^6!xinYs{|-Bo}#*8db$a@bDWt7NJeky?7dhhfY|d{P`cB# z!VKD`tsil4(=MufR2w&j63g4+02DAA6z&Uw$&0^veLoC;O#(F_Qr!QN*!(-=Xp5@B z?$8tDo49W?)VQ!CjyBL$mUg)Osb0`yp^gE>46}|4_n;`8(kI+;)5@D16dA81%gajg z`q%+5fdCo3dMrJqHDFcJ&9rd&QS*}oFQn5kS5DN7hlziH84EpL*-rDNSy}hywY0 z+iR?&YVsk^X=mtqHE@J&2@V7w!M`u7!aLY^O=e|G_^&)_Lf{Kx*&4wet$K@*=}Dgf zi=i&FB@|U(rUrnTpKygHX21{FD)&Edn!4_Mn}%ZstB$T)A4VcnYLl%<$g21+a0eRR zH-e4EI?D&IIogFC%d2O%=c!?zVKIS=?Z2%oM03pumQKzNXBg^ie$0^nsnWDBG?yCM z&edc`V;P88MJ6yqLKm2CJg>>R(KR40RDC*$l4Qm3a$!j=EN+#>q^rJgBxmnjMkz(* zqZpE-ZW%&c9}hLSiX18}4~M@Rlhg&=?k015y}0RAKVvSW7VFcn1|7Bf;##y*QPl6E-Rx8lhIC>1Ei z)-_jq2h$oV+_%SN81sRW5EWD5I zSTi&;#vb(P`ImRm1qz}7k)#?+O1@%epKcsP7PKeZ&AX(@$({uOb7o7QS4s)T>zHG- zX!Q71xucd-LY;7FC=19iy+XF$7F9Y*JC=KgZ>tKCY> zju}!udDv9cAgmlj*a=?V36CffP-Qec3?%H2J^#8WypR&R6zp+*wi&sb&c%p#?xGi| zH#Mj~n(lj2XLsmTsL*|)xR6T=`=_hLEu`8eo>FfP7 zOgRU4r~dpoD3-ysy~HO*rw#8`h#QtlTyMyqGHMia8%yP?ci101by)ndpg)g!k)3^v$lwG z&Hauq-ND*-*58ou0nVaYMRW1i4Y~AI`KGc>E{U>vwfq<{;duPq+KURu5MTVZbqcFF zeakSo_gap&LSujNhCmSZ{U+i01a0zGr?w@WtuG&~QOZPckyzrCD zb39>p*yj6KW^J0MP8rtF?BRj*ZF%JWrq%ZNVT-v&)nRz^QuJytpw%oL)${4pxUZ`t#lhV~uGu*<&0gHN)-!#WbWjWzdgZbkiJ^V8bq+}u9l zEvnsfA-gau9F)J-z zayLf*%x=)$-$*@Z%X=E5Vvo7Hu~5?ZWwj;+9#Wi9*pmIE=hvSPm^lSDrx?f4jSvy} Wo(wlRffq;r9x%Rau3vc3DeQkpm;c-V diff --git a/static/default/parsers/twitter tweet parser.png b/static/default/parsers/twitter tweet parser.png deleted file mode 100644 index af5203856b8aa0d769fee6b71e8dc09958d25168..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2448 zcmaKsS5VW762|`t5kd()g7hkgNIf9s1PP%E62Op9)dK`1N=GTd&;%(eh#H3#JB(efLU3ZIsgFp zmjp1--}&Rik#7LNiL)|AV#234-C!8Zdw3DYz?Rzbl~S4YJT<5a3ob}bk+^3Hixsvz za4tO~T3v~8znbc+s`9ZHN+noUJ+%(Lj0-WS&@TkvYJMlbIK1h}~px#sGi}5L3fX zR+@tmlvZ1lr|L^2z#Q zRud-td~A-5!GB-pEXe>}6V@DoALlkm0`GDhzVj0abg7>~@%<$u-4Wxfom+T$_L+eU zz=vG@dE1u@GAGFfjpQqt?J9zR8T@5m2UWZR(pcKFaHKHV;-J=W$2jd9c@wPT1YB~7 z>6{wNAi5jD1%H>VK-sk44E`|h5=<@zhh|^Y_g`l0Q&QL#{FHx&uqYVa{XY@0u&c zrcUbeYFF)fCV>HC0Pw9!*FLn)E)6YkZ01m2Q%KwpN;SCfJeNAD5Mb<*s9g6KrcNx^ z+h)vTE^H^6LlNJsn|P=)RdVtgq7E@h60V&*v@Xa*tl~I>1bq;CB{)A{d3yVjcZhut z!K<%AZ0?{9fNzBmSfyWd)-DQnOl+p z03*0N>r`fXT~dT&oXr|C>Rp49G4PFlu#|me+HJAQ{%Y#Mp&t^yG~UCL$i?nuZy!f( zIlcojF91IF58w>tE7$UCI=pWjE2aHiG2yV64^vYmYi-wl>lq+SXM=l#rlB5QKzNo7yM zMRW|{W@l5=voPYiMbDKUW$-nk9ILgg$`{@m7ZB)#mMRY&tqHrq9Y|StEV{3v0NM5D z(((2aBcAK=Blo6NP7!!Ug(f-XxKz@;cNP|*95g*?lvsC!r_SkeGGyh4_iXL2TdI25 zgn{|AvL@ScBWRFC?!>2>w{6^P#5-wUIXBn>IpCJ!wCkq%Zb8Z^uk8G6+Ez88tDKQw z$7=J9L49q>d`4}m!FOI`WzyX}8dX|ABIxOzs4WddoemkUNh{fDtI5$j9<8~1O@;!3 z{6f-yAHO(88Bhg)Uu^of^Z#XIqFWS~ww;cg1GeN=mcO&rGqMGWQFnzIi)|HwGBO29 z6!w*fH}557;xVI5v6#e(^b|iTd5fad$HmO#FUJIhs`Jl1Lj}H@-?p@ZB^bjM<@bOAiD8aY%UE1KL)|T2Rrq^UQy@5VQOUYZ)%tNOeKCywXffR@ zBUwT1k8`XL2eRm%pJPL%qWaJkxUKkbrDL|g8|5_wTLYgPRx3ty&TWXo1cN^H_blWYAp|W@OS2d)DCx`vB0o>22#M5Is}f zWOe;kH)#7lu?sS9N4sjD($Zum2kYe~mn@R;YSTE_eWT|z%|JN~#g~N}N6L@Xyf7b& z@H0nZu9J7!bk<8_f+ML^5=A8;OFbWxWWyKY@~FT6yg-j|t>RjmR-lH{{{2t!<5?mF zVOYz(2i(`K(cL+yo@){mYaErSaIYhi0a_JCu4|B0ul2NvZ?8giI1+TP8IF z<=5pQFfT(6GZ_gZ&j;4|mw5f*d0h%NFmy^p4k%Uyh{OtIuHNFOkdgDJK1XfSp`y;6 zw_}&zp!MKQ$)M z&|*ei+X%t>d-i}}f2huT6V2@|8hkUO?1c;VO9+Ybj2o}6J9*eZ=esjOaGABVvX1)B zEQl_&ZOktouO(R!Js;%p8-*ISdLPE2J6Yac)cRHJGZcyF;h#?Ejt?ugCT1A_$)D~X zYZYZu5o^7CM>&&u&sqIM%GO)K%fxG8Ni)>&r*;%;`f0~6KD}m;@|iVvLj-G|nW}B% zbg6M38<0c5&Tr5bQAaigL8Gh&*aT&Emx$Wm9rQuGDZHGoPu$xZhil0#Q-(<5-y~e! z;3p*O=8#d_s5bG__Zzl`oumHznT1%{`nhLbG!v~=>P+7q%Lgk(cqBK`Q?^?tW__6p zkHWk9@Z}Lww)?_gj~eGUneN3z2oBfW43&7Ot+K9-Eppm1Ut=w1dxnwpWMu|%on@*e zgubueC_j4K-KZUr{nhQKr7f^$6N|%Z!g_miFa8;|;N4tZD9~Ww|8@RV)3WKEA2#QA zv3s#TKK^yrn2*mPf>rxInHF*R=Nt@eS-pQPZ>avWV@wynzjNb3oq!sjL-ueVG~3(u ze03UYYNtu)AkPGxA1%1@P{B3l!K+|gUtwt)>2=g)965KaSaBf3TDvpnO~u0KS=VEcGa)Q9{OqcrR=lYGEw&IAUS*fEA`h;_a(^5_T!C7ZTax~~wI&mg78YBz+3M(^P K)0ZY!;{F4NJ{%gE?RoOLcEmo8bd-E|OD-KCk z5udX6%Kqy2$M3K2_w{(ZUXSPN^~Y;Hp08Jo5#l~09VZPie;c4V(u zwqO(WL8R7;Bdv2);>WtFn^e#tSD8^p_%JdZ3|ajL$YU{=Mz?K8Q|kB}47TXh%Lx+v zl<6_8GpZ>W8{YpsU66y$FbrzvCLnl$Q1!%3MhxSPKa^?!zznpR)Ba@Lq=vOr>!-V; z`QBd!C^qR^rtg3#eSB!GI~dyJTx%>TmblJ}702wtwezVcHkoSuM6y~C-v*37yx_t` zEg~_;%C=bu--SMOsQ4ze=%*0=%?a)m@Huo|E;2v8OckF8W&hAmR);R12i;Lbg2K4w}E%8s;Wm&G-fqtu2h zXmcUFHSiNonUZH`{wbtbP9bIV<_or$cejeb-wIhzBRIr~#wW$l zw3ttll^w&K6}~6lO({}AcQO(;lTEcMOixg!{Sy4~`!gl$S_g95D4&IXYvDo~j z?kZxw^k!Qcz9Jj7`n73@&YVjC(KU;NGg$>B51Xx2O9y67UUP`5&0~GN^=8bA+73Ss ze=$C()GFosgADjpS@Q7Z*Ppz5%_+Lfs>vz{J++yAJH1h zXIFfpwnqwjX+I{OQ^ER8l^(PAjSB~s!%zakwX$Sn#NdM99j?%tu(8PJ6C$RiubpE6 z4p$j*&UxdQo0!6wvHH2|85q0SxVI%!lCqQ)(T7XB8CY-o4BjgK+F>n{i@V0cUC<|t zQ;Bp2-#SlPD$!@^4!tFT?kdb~Ty&A`$Ev>la;&4%^7Hl%GXt+qaBa#%-S*Lnrl$>k z8g?(E9XmUncRkD&m4tCa>C2f9c%-Xgq~6{Av(P5p0-wWMV;qvH#K<~N*3t+I0TVL% ziXHKriid)EDMQ|i4R3>@E;hA4LG#ED#tt7Yh8AM-OZ~SvP3Q%P)LSjC|74;5p}qIG zLsEc1xJ!w~!j8I8;vWpP3~J5U%*LqM(YSv~AF^+GvHgCCtYO|5gEB}LcF7HL=WD^! zBeP~@s0BakMNwqrpdN+Om-#_~&2{Wa$jAP8pPf7pwvw0IJ=f#(478H=NcZ3ckn+nr zR^A!wYR+tKu|$H)*z$@ohAqKh*d>^&Iz~T^c@Bd$SZr3N$=&bzY>~yPm;NGN;QxdF z8xBn*#y+u%N49{)q4oMRzw-?$5(GZMi+&Vcr5$SDbmzA^-X~!NEZ4w2sI!M_(GSkw z3jWet(o-YqCclzG{F-{FhPx7lE?Jy@Ga?mLJ!5cxnK4MH2YOp4QexwgHRUD>%}P78 zh4zOpJ{MKLH_r#r8yHvIeJZ=~K-Nn1*)B(Mr%amgm+`@-z)Lb9(-toMQ!Y!A21i9H zo|iz!!>iVP>0mJRiy#NL=H9uDP=FGM)!v0QL#|@7`I^XtrfehShG4Idn8Ea|yNV#* zQU2~$E1n`lhU_0{hN%AY(vnc6N_S1Tc;^7*=MjC@pWa&O0<-uWI|qp8<1$#^<|#sc zM>oaT#Cd;MwuR|qmDvEKHq!LQuS6_0KM^0QTG>wqgb)#@VZOp&LV1XD3a10x#V(GQ zHC}~~ORw#y;9W)^DkV=zGnT2k7Tkbhm3rI~dWXHV%JTlaU6=$!a^8zLO7$ntRMz2y zW(U?O(7Ggu+Q5AJIEC{)J==Gw`qc6R(!=>XU1G&v^|YB_{$bISxVj+YtL>7AdjFIa zXn(O3ubPXRw>3#Xq%iQ)61c;#iqHS4e5-m%9K|*de%iJ{(J05Ysf5&vT62|K$yUk1 zh36t_z`222?T;grPSTAOYpyn{<4JHrjYyRx14Ph7`Ea^f7!UnS@QKi6na?&_njKlY zpXl6$`5oKp9N8qtoC|G(-j0pVP4l!5w7tm>E3mDG`Ncce>}m@}>#$$?{Fu|2WXM4H z{+>|~d(>Wqxy`q&=fXmtwAtwR?F{{7;cNM57CSc>X9xvo2%p`C&Xt64!m*YmOqq<~ z*)=Rrrp`HqGyjFJtz2bUw6IyYAnk?zSB~QLd6`*uGn&euY0ky}+F2jlxB0#8a6ryO zPG7&H{d-c`_G`%Za%H_botv>YPd^^49u|%~(L=4#KN_s_H@jbI>YN(vuG%_iQ7>ha zK8hAPNrm{B=Cf`V5@=aS_GR7t9#f(w(=MLs;R*x+^RpI0Z=i8l9z^%-3g zbK;V!|NWe$ET@GL! zP#6H6(+$@+pWsbSBP(uMCs=t^1syS|ZY~Brny2ZvJXp1S+t}iAcJF5&b}%o@F=5}( zFPdeF8_QoDA7`OtXQIFQU8mvOIWov`o8WDzx{*|8&|rP?WpcK;BSnA^d6+l18nt#< z&?LNPGeLv>C@1#M>2~^T^&-JLD1otx2UV7}9f>!z#PYJ77M;8jEcKn=4R3Krl5aQZ zn(md(AX}hD7C#j+0d2<_wgunJzK6<_$j$jRY&kD?j^@J@7aqyYUr`nzWOALGCLc%% zd^@&RGxBg7h`syEw#+(nVjgR_Vdyz>ZSQ6I09chovR$eXTTVgSI~G`No?>T|xzFa( z%#D_=9hZlh)Cp>C3)_HI{An$`d%ixK4^-`*P?5JF1}x-A==W}z1RW&@;>m(;`*OWB z--CV!&G^M+l}n$Iss2%sbu9Z~J+fWaNMMXQ?n~YJHNcMe-OODDQspH3?YI+)} zEVMS{SxiV_l~J diff --git a/static/default/simple_downloader_formulae/twitter image tweet (single or multiple images).png b/static/default/simple_downloader_formulae/twitter image tweet (single or multiple images).png deleted file mode 100644 index 72dd05153d6ff40e7df14954471e4eda51728616..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3355 zcmV+$4dn8PP)wZL@T+OKGucv1ZDJH(>__B%{QH|eK^GS_V}M4FezVMTOtlePXp1;d6v`qsY3y&# z9v5t5H7YhF@xZ;1me0TQatkbGr^Fb*x4mp3AJ?#yzwVIIolN!sX!NY+Ph3k?S%*{u zLqZMYuBHLhpj)@Ocn}wMaoC#Euc{`c+@w+Bb^ypm2)Dq0-DUvn@DqpBhFs#$AdTA4 z=cBfaE|C=&36qvtzER^N8Yn6UpuFyv(!wGrPDPh%170H8ZYU?fZo zGuyI3^T=qjLf&!jQLtr{*l|mWR0TxV76J$IVOKaXUx?8<=IR81j8H*RLxR0Ayza63 zRPh0QiSz?NbP(fyX0D;BLO%f7X(DV#V%evrfF%Nf6pW3mt_E2IVqgSD!o)DME!hBQ zCrTZ%ZW$ZR3`jBygmCD=k@%oEJ)?EZjV1tOzuLp;9=lH+D&BVGI%E)NAAtje4@aLY zZ5smJ9$U7-IHDD7!Bz9E=Z0XBvqXm{qYMiKMMY>H^PoHp7KHWaqL+&JrcFm+cuWj) zxR!mSC;}TaD{L9ncoKZ~h+7Lp*FK;I464j}M(dj!k#aWSVv$)^6waV)_bKax(}uHv z`PQ}}|AA$e^4?z1N7U(%|8r0&Ae!pwLm&(E0wsMGDQBuGov?)@BI6cgfnhN*%wbys zqT@-&V4lzvip_Lu2oPg93ZG?~p3!>dMo&~`Bv)L!Pt`uKn2o1_rn{j}Pu>LzM%?}b zIL_ipuDvu*$3$bmgx+vSbue32Ces3S0(`3Sf%i(704N_k+TC9U-@0c1Ny8ohzB*Db zGLRcRR{w#Ft`_aX@t=$&P>YFS4%@OoSxFNeBO8apMhONI&OXWqyaNT<@qUcm9!X}5 zzK!0eY9ACDF9a1X>87JKiX*ilZ~kgBY-ww9pMzxsGgxqR?x*rwtV8A`RH@9A7iOMtOJeM}6qwq@Ip zx@SEl*LXUl4ph4^#sDFt2dC0{VmcZ}Irq|mTST&!jx2#KTXRLztC&?&L9vZS5#$defdnZe?vwAZL2O#5w;K3QNS zOe}NgjcO$jA(NI$YP8ts+=v09wpS01qDaSRjk(lb;8JU)2xTO@l*J`u-DCEt$_Lad z+G$|^LAl?=QDQ9x>Sc7PS}reOiN%*<`mUuEp|$srWcMV(m(jJxSfC*$hMB!N$fj+S z@=ULou@{WKhGl_-fpeKGy85EFxl~W`L6&Dd@<~qrn((^E>Ql83JRPboOiVbD)tGT2 zx{RE7Aci`XRoMyisY6wn1m6aBdkp&KxPSGTNFYh%(=+>2Uy&z10Q^4(%W}t0{LZ}H z$pIvJd|E&HD*%$93mCqYu4j*L;$^;ENFtE9plQm3Mb71fbN>48R)bl*$_+j@Gh;Ab%a*%9p?H z)FkWzPOExGXN3!O2QHld{14Q+q(n$^AiIy-v=#ld-$gnQ+^^zJ8eTb3LphUE4kfKR zrAjK&RdPg7WfRLZg_l&JJJudfFU$v3oyWnM#%fY^?1N1cmyIUu7x45%CeeY;t3iD!KB}sr zcyQE)PcNF@g{n|Nk3GVb1jCl<+Lq;iC<_=kF1O^tRKQ9O)fhLU=MC zYzBR{N85%zoS#b?7LapPWDcC4?*pLwGTq#+h*XYsIFTbvUDpC6UQ(4`d5@B%sAyS= zG6)>VRp}xkS#z|V#eFofKcJdHbP)B>DPlM3o}8=yBaSvz(DCVsI88*8Z5^PlVMD|L zJ$YN0QLuM#mU3f1clB_Y<>J)?s|}}E^@Gy3Y(tUgLDq)w)`^R9;yZU&D?krhnn5_1 zDqEH7s&>Qd|9UnYmUbSe<5O?% zGjftSx-TEjMn_;+a?S#DY?hu#i6mYn1l*nMUQ$_#t}f%@Zm)p?nif91lq@G0PuMiD zVhHV7)Zen$+C+y!Xa)q6guhyq0nb=UYU#&_$8_5-znyI=|Lru*Sy3_vdOBye0AO9P zaR_wm)f34)NyJMv)iK8>VUnfj>eB7%%Ihx3Em#GzGq%(7=6st`Z%|)N)c7}Bis-n| zQZ(r@Ku-?rh>Kfv>$Wus=_XoLw;@kR&&-y)YGLlXsHX1KJks1$-cts^qEACk>5(|{ z_f!h2VOMKvnUN#xZP1EjpCsj_v{Mk4e#AaiMT(v-U3zSBp4>vCKqh>A+q4U&Gq70h zfnj}cQ4aJ0T7fztT}%um!PYQ;MR6@iZgSOslno{)@oxj83DE<#$@=_x07QEBm2lp& z|GlzFYasu(aS{BpG!M=rJ-L(CK)yXZ#ff(TJODWH0N}&}fD;b@PCNiO@c`h&1Ar3` z08S}Hm#h0;EMKvrW8MwHq9qSxo!)x*RMycukL}2~-m@%c+wLEp-}{@qt}k0=1)nQ? zC&N7d_3kgWt@N&P*O?aQ%_*MxkNz&OcF*j0kDo8Bnl)#0;fm=;TvuPXbIvn!{^)(^ zyPbzOz4zb6&-PTjecIVkGH`##6VIO5pJBE+`u2aP#VdGLO&Rca^#xoD#7&3a-22i~ z15bSKpUtzYPo1~TwSIi|`WahR{dr{B+`sJb&Y$;IUr+vR_wL@e@^a7nTYqq6<;(3i z4AdS9Jht_r!P)1g>`t?Hzqq#Z(?@^uyC1!^>+Gh7fBNXZkG|CD-S~p_(D26Rw)8%7 lNB;efS2sJq;166){|6SXvNOmx6|(>U002ovPDHLkV1h_pK@0!@ diff --git a/static/default/url_classes/derpibooru gallery page api.png b/static/default/url_classes/derpibooru gallery page api.png index e42e62cb4404940ea7adb0bbc6b86f2ec8405109..2663bd249d5cf45de7843a126818021ec4a7ebf1 100644 GIT binary patch delta 1897 zcmah}={wX51O3gILADtqOD6lsmS&VeO36Ch7)wRQ*v6i{#TGNTWaqkO>{~)8%g7)K zH`|c1CQFvck}QerT+jOly!U)MpU!!nbIx<#^6c;=qsZ3{bZ*=V7+bLp@i?t}IOJ|x>knoP z;Z(S-2{}q-S~#Q+f&(B3pjBb->8bkGu$21w4P>`;CFv}Jf=*49gLLixefzmZJkTT| znq;0LYm@o;aRJO8l?a)JMe3&U9U@}FkBG!{7N6+wLvp;>-?%w3cadG(Sv=@K!c^V6 z#P18X`FvQXrf3bto;sE@#kLwF1yX|;p=8NTlAwALaC;48{qZ9S2k^AUDrlq7Ne34h z>CcRWb*f;MQF<_qza8<>)3o8O`U2iChpmefp zjwS@SNQ#!s$7$k#(BzV9#|`o+Z&7UT{(N@G^&@`7cCf$a^GMtEIU;3(D3~w|n zhBHGXontw{9axP#}jGmM~+-rF@ax;3l3@bu}|>v~f{hDZgo7=SRmmFY~XN z0C=4SFr=RdX53|ypNLbHpj0Q;$}<=EqjLZ+VcB4c?W_gekOr3}_>9_$6kdMtEe&{d zZxaKWR$T$#WFvb$4WqNMw-KGhQNudQs05k@q^pdSFXa>t_qzL+sETrMk?RJIDa^Hr z_4fG9Ye6+*HxQT|GwHfe=)$@;|I#~0Vq2{|oB#1|c6f@yzz0z3%0%_=?8};TR(C@& z<3u3VP?J5CyB)QhqAXoeM}hOsQF(vM)7X1DQGrsPWLQjJT*_Fij?ph&D#wkt&#pp+ zR%tMw8CJVlin`@c<;tK0$+`#aCH#CrrSXyxl>(PV#wyWaYA1DA^HG*cf`$=_UY99R z0zLRUx@G&odLLQ~8K<|1UA-Rt2wd-$f8Mn@2PkGWVNd36Z)p^97~o0<=*y4+c%LHg z7MaO;XR}Tl3~{W9v4?9)*MyhqGEJ2egx#x9gosl|=~n!%;#w`$&=aA06?Yj*5dsc& zVo_pcnRZQ-a}v0FbUi%iYgpidxUZX4FY=B*p~$Q216>tgA0#w4I%0rxlrA+Y8MHAQ zA<=8>V}70nZR(@lpn%!bgv}T?^)*@XN{=L9fsSPSDagD~R&HE^C)8MunbVcP;utnf zaS;4}&kzRrPr6f?zjz<%H&4aP4xj73C3q=ilffswbYL!8gRKo>g0V*!l%~qN_T7%g z^e>?-iNY@v#H0eq7%ji=y6T|}fc9x%*lM}lmQ%iAcMpa3hK@ItJn`R`Ry>8H*x*^a zf20w1C*CVz*cKmhPGXzJd){L?|740qi1^70&q;z7U(%lEcAJwTT)fzz$p=b>NizYo zD_mXu<}F4dj%ulH`Yg_ewX{OP&!T>wGYflil>Q<9mIU(6X15kuf2QpRUd`GS&+?=$13@M zL;CAiKA)gIqgQRijkKE|z{Cef5EOn9Ebv=_vFa}5saltQ#-6v}!Tq;C_My#qjcXTz z7G4eBV-wrVy=tbszx;Dd(MM$@ZH2=v#*Wp4=?a6+ zyvdN&ccFzGYg5}hJw~v)oH83MV{`7=;Q$|u=#_1T@pvEmxwJpYhIb=kS*C4Ikd{N7 bAiOz7f3Bijb2aSYyX}Q5wSXYcRziXKl1TuvBr74 z)KLaWuN5?_Y9^};lClOMP4c&X5r`}o3~gwAx~5Hu*++v8`k3k^0L7`^vpoPJfWHna zmEolpb^HiG`eSk-0!cIKbq4jt4(+`jauFPvrSexgGpbEP@_74h`C&^UOduKkPMkXZ zKA6J^G7{P0ipv|?bnmA8)!=~vsH_%y(%*8>9wl&XyriJA7nBJRNY$qjG9W?^<3cX< zIst{;XX)wvU&H!`3iahd+&qAE^#X1?%41?XFr3`7 z6n)OF;U@|WI8tU^`oTjn&fKiO$~FaT=YA|cp6ER^i_HG}-tkc|=IfQ}ejW*$>?*GV zSu?yWR|sHxa7MzB1)x^ZE&nVnikd$}plA(X@KI4!*;C)RKF1zvPY>;2Y~u7aLC^agl86C`N-1$OG`s8s}*P zXYT>>DTa?s2};EuZ3QE-91gLba?+h?10JpIA>baQt)EUc;!oj}5Sq@wuNo>2B^oM> zee-$;KZDugoLp2W4ORZTQyrFIZm(A=QB)gphgCeIfgo%UGD2`g0OOj9w27Ln5ZzBY zHcl3hs6pFi(124453^aRYBYl_b^}8bvuv(BGU@HKQV#f&`t$0reVHXL`gclo8yU}h zHg6BR6CQ~@`sms%-dojv=CQ6~9>sD-TN`f9aJCfJ4gSdtD^k2)k@a$JAk@}d77y@c z@Sf30bdNYzUwa~4i22^ViP5J*IS&=EDdk`kt~=VLtO+|Nd02ZR zmti!}i+@@9iaF=0{hF$RXv& zQT9J;$W89X_9@Qp<(7%e^G4V0?Xb#_;mm-MB~iuEv)D#xPo7ifjqKXlQ+TWCy`t;% zPsV`J=iG_GDrEBLp=4DN;GC zD^TR^H8u;tuRe)&-7{euzaVKfi}k#+docMH_uL{TM11`vALY7N7M2HyErOM4fEWf4 zf|Bt<|96kv96$5GT}jxH)F;_`qtEl+c6>0mG5JwUmHA#tl@9pg1R-omLmjKOBD=n7 z-B?~GJ9(*JK0&$#&-F-8rWaHqdSG79LhLO4W4HhNzNn)^ZgZ=Ge&SwP21XeqVuVzU z@ku6L^-cnyI*unhg7qRxJ&1(*s^u2SX(&F$t7_1-qCC4I=`=$1=0#vFarLyw!Dqw- zmDc$5W$O55_e?-ibr!LWVth$hr@UK;OGg^Qfc9Ou(zlr$bH+7|0tpAr-jN_y_L&yV57zh=*m@J9kKu0|Z_wR6H# zc4tFzzKY+*1dUGBm0cIQ+BVD9iNu3WBc0tZ$k57rgoFpH;|I-^GPwiy(F>=vyq4!D z^f{dC+~mE_b!#(;t2bW-mdeItDcvp1N@%#B#!Bfzc+E9$^bKKUTk;!}+Tz%y zB!|n1H+G(`{kiP+M<{MaqdtA5LG%ot^c(+p^4*m;g`dLshQ>Onmiwn=yWG5EJ6PVSJ R%SqthY%Cni8%_N({sH?WOhNzv diff --git a/static/default/url_classes/derpibooru gallery page.png b/static/default/url_classes/derpibooru gallery page.png index 8997fb370199ca4cc2365f532ec93b4d36b58ce4..f780604cae71c653ad08aa78af945640bbc75278 100644 GIT binary patch delta 1764 zcmV0w<9_ekyS;1ISh()KU((KWZs*P0dB2(W-tJr}d3rBFcz+j^;T(7xqi_n400<-i z!mAjKQ{Wk#C`qX%zabaCOLLI`-a<*4OaR|+tXwH^2k;tFsT3t*(MIv>-cf{+6cWID zxUHa1EEL7Dz6iMPBonS zwjXx@PwoK!JkU1%&x_@))BAA;@MH+U`iq~5iYs~t2t@*TaR;F6Myf|lrKoWb2npaF z_>}iQ13VcRZTywL1fYcd&lfr!d^iN(Qe0Bs|4bmA3V$6g-K% z>kg3?d1n1Ww=%;)sV&LLbF33U&>957NmYR(0<40r=>&j-sc0d7NB|oRx(O$J$dPV9 z08=GXNVXe60^~+Wn?`xwLfS0L^QqfND+PHqnSTqFv4qqRl+7)F7KGLGu7Ch=Fa|C% ze%WcjlY}%{kXgHEDlwqE{f!_2a)Sfb;((({vm!IvK!bfX2LbeTtTqXuAV;hOu+u>H z0b!M2nJSq=plmLGJ#OABt&dg$|5A^jbIjFf#$q5*BQ6pXXRWWWZePcQiJ0dKz8S8 z`$T#Skg0(Z(kC)^09hp<1j>eD&;{g1oN&?-N(AM3OGx>Y4=KwJ2=i#o$>&`=e{=i!aV5fn+pu<)I7#ct!KsFSE1Q-lqj`*Y6bSFd7 zg5|?qkN{C>X88ozt_7okDODk@YD|0FZ%Q@?ko(UN#WGST9BPWrS*_|oB$jXMAWW0Q~s7L$+!7JmR| zplEXoTyrE$KiWi@Vq!)zPCHTJ=)Ve4?P|5uc%uq;(?2J2@0|AR$8}Ev@ynqe2fhtN0w9n82qXXk34lNXAb*em2qXXk z34lNXAdmnEBmlzQkY9_=;#ReO|H0Ohedl^6z5UmL)qh+}OzL_4aO%g-&zqjB^(O}Q zk80+3zrT7?qd0xtnY-4^KOcK#!=AGH>#r=F`NXU_Er&NRN^F^Sp|iST)}5cMD5#EA zZhms<(%60Fcbw^~t@^!r!heMsjcb>8$1c|u)t7x1-@WOv;|=Zi^j2=GUv;Ri=JfTM z4X3UydbF$m=kb*!xNggp4PxE#1y%38STV1ycHK{hrcNtvZC?4!t}h z$Hs4eYD&Q0Sbg#EJ1-rnJ@UBt!)x^?<_-K>xMcf|Lz@b>4%F@+e|2L=U*GY!pQppW&cltI$7asVG6PRzNL~@GuAlv>L(U zBKQbK(5Ou~Fvch%5BE$e_y-;sgO7L(CJK6piYXT!A-RgFdWbx>Ca6~-$!mu*E_ zl-qG#U0s+1cn}5O{Wy7yEcg5Ao6!@JxC`(eKz@ ztr(g_0(cNqmVYTMkpP}!09Bs>{N2yTkN_S8D;yOgJ|FpZY$3s2fEV%C@;R%|Bv&8V z+Jzp#lOCY+mc1idPL?;0=t2+R$rONfT|F-mNC5A^ zr@sHWAd(Ax&A;-O0FbmrEgP86?BEY z%mD%oYEzxk!RTziz@|ZfHghFuz)Bf`N>B)p4aDSK9L+aGfo76c6O}3qEpl>l%s4H4pp1aQ!xjR?wz0%-#T zuvD^yWV#U~Kz4-mY2@=3(r1~^r(q+#l=y107Jn*Zait;1&CNdx!fI+)Kma%x0~eXU zoHP(gTp2CM>|Jz~7*O8%Mvws6ApmO$AW)@Pk(q6v!MU1+0LD7jnxs%rAa(*cX~1JZ zSmRf^N|q2PTgcytn|B)7E!V>YfS59cX< zA~gp{*Fa6_PJbGx`#yWrGF1XXplm1x3D6DnUA;Qp5vLqL zL&UO^)sX2q0NnJ8(Cdjt@Gzb~h;{-vX}}kB*iHab11JQ@hGLKa-61Rxe^j6DL`Ye% zVz?C&Agau)7zgfJG8>p$l~Staw8#9GWNR=X+@>}~{X~eSga9WExM>r10$3^mAzn~6 z5R+%xtLi z+uzyv+=`B=56<{}^0;*?&JSr>`N@}4`mL>Mh`s#A4b`J7W?n-pZtOq&!}0e&`RKTP zBex#!yQp~8y$|@df3d1+Q~BxOyx`n|e>qh@cT_$6(T0i|X?kVZi%;D?;oLp)rWuDO zK7Vl3`Kc>+MHZH~Y}+1bJkoin>-2)k#j%=$O(&A??pe@L`*qR6`>vZh;E#i6J{>gj z@b@>rHR)LOkli0|Sh{W9t3QaSSrM@V0RVs^&IRKI05U%m z@PTizx8Q>i05)~tFle6y?m|#hq&HsOE<$4~?DOU4T{#@N%Lqhvn0xifP~=wXpEPZQ zmev;a@cvf#mQ(J6klcOl?&6c7JAb0k?-vgZh|!-v7`{Gkb}l_6)3C40eZ z^pwasx`(~MjWeXP1&OI&*qWlmp4pVLmC}__-UYXJndUNC3LPJ1SX|Zj8!XIy@=UqO4-;0 zfX8fKF>^gUM@YH(@)2gH{+WU{kl?VcQKnH8E1$Bj)^SHcb`oi>pr!BgTvB}Lz`eeC8 zh}vVNVx&<|lGP-b9+|IbaS7R<8c^+pl5??@*;m$l#-iW;(1Z>;s=C|qoKd^H#~)Sk zaoq-W??sCSu~9`jV6ShmHakyfK@s2crY^@(B)v(`_m8zd6G5U$F zf@fUS;ZJ6khk~`cjANX(g88#s!Tzci2XG?d#X+Gf2E0Xnbk^*YP&tge<4h^D%ja9} zQ$9 z$P^3W(EXB7Qsebk>q7AyE|QpuXqWNokP-VgIuluYlY8s(3>C)XSji)VDhcAS zU(yc{k>&CyG6PT066KECOqyXh_-(XO`)V}caWnCu%bhajT%9gH^_o13q7M&wWZ_G# z*-X#^tu~6(`d{EMD6a*3hh`4N=9O7^M@{V3JvhHxA4jO#{gE0|btbG=Qx?*s8 zu^RS3*)`VEuOozZ=X;D_i8fd@828aHV|G4;RUX>2N3G(8{=k&2Eu$FgibR3gda~iC z4^k&MCR;$DsBdC&3v?!m2I~KMD&Cex;ev@_s+zo%cTQtW=nqtSYmW2VzZ65W5vH@u0}zs?(?t^hnP-!2 zM-I$eVxKCLWrgCJfJ!|2c572;0zPk6v=*wOl2*Y9?dBd(;3(+G!AhZ&A07M~!+!y? z@7nNpjD8z<(EL%alX#IQ&1|y|NP1Hsy=@U~_nZ{oTG~g68{5LRl#oY_{YJ#RAq^>v z$#;~j?Bop}UC8|Y@R6G!=oK;c*!uFRZ%7FrpEOF79mR@%@8OGHt-M{H%&|)ud;C?e zyE8gFEq)QzuMibLwBzIX4WWO>$zET$K~0o)z9dVQF5Zo23f5{Lh*x7eXO|kHs%mmb z@yP5H)%NI8QZRd(#ul8FxpB#=a!gIK9^5pO4%6f=eMZLfbnBo3zp__fE0vH8;}p^0 z$zTE%c_{TQ#XID*e`H#m*w0;W~S1 Z$~vHhSXfDZF^SoDcO2FOQ|k~y`3Kls&yWBB literal 0 HcmV?d00001 diff --git a/static/default/url_classes/nitter timeline.png b/static/default/url_classes/nitter timeline.png new file mode 100644 index 0000000000000000000000000000000000000000..7662c1eb2b85646112aa2772ec218ae306c02bbb GIT binary patch literal 1328 zcmZ|N`!~~n90&0C$J}iZ`zFz$v{LeQNy%4iX=K7&R-z=iFPCpQQut<6uGuWJ$t{;! zjHwiwdkGyQ`f`xl7&VtB-^47fef0@9U4ttz%eN!zW#QyDUehFgWV>{Ac#$bN2Pj#W{{8tv}y*F1WZ^(_4E`E)|DlB@S^x-5sj zM4l*YocPe(IGJ;sX|*Cnf3B|=^@d#&Snzu4>!WB*H?uGc>1N}wM>)670iXv!1xxFt zTz9#DU1xrLk4ej`V2I?M=R*Wji-9Xn8H&Yu=FVkU<8gLE!oC!fQV6Wlq;GwiF~ciC zotJW92IWmr0^hTxi=6k%r3VmRq}YE2P5%r&ZEf*j=_3eNwwc82O89Ce#UX%!0c(-) z0$?m<%Ej=ho7EHu$aN@d7NKl(EoY4Pyp$b@gIJyxf; zo{)9Ds@!(TZqa2Sa#Cv-5|nR;473N5qn*o!GB5zMQ{2N#uulpINM2O}0bBC1^!iWo z;xFiq*}Sy&DUW!9Yh#5O!_<1Vq|GP*>h4oPAwFnrgw+*#Prjol0tkSqRT3T-XT;8Y zBg#y*jtG67)$(LN;xi@Ta{N<$D^b~hjxs^01G+p50tKP7|SWa39AbN?}DXCtqN2!vx&x=1gD}B2TUH zWJcN#?BfpX;yi%ec9@-}#r3B{ZG`eMGh?b?EJaBxbr3(ru+?C2J0Y(&S`frP*nG$L z4-58MV1se3os;}TG>41g)H+_3=Ej5VM%piWa$I{~hI6QJ5aR$NHJ-1@7n01?Uh8aP zR|BL?;=f!$KoMYm`Tu>zJcA5h$}&IIuKvWrqva`u`Y@V8b0QwstZk}j?lx&5WV;y+G#z7CGp?>2r9gIB)VgBi;aVRz@ADL$Be(0Rxew4TS{?KhEelRzB z@td%?X`cVtAvamCHuGQ-6Rjt|wtfO;zt1uqkmg5cuVuMtiFPI+50Dk^k3RT|jFU}q zV({V8LCGLlb(!`!`k>#yqr@|QL_w0%Q8>_pL%-jyhc{>tzAovDy^Lot%n69hUlVQAm)|V}%9H_#Neg#Vo~T_7|pz17}TS5#%=@k7LKj*DPK zSNk4Y%nZT%tYiEJKB9S*H^P^g8a}ImlrP_fm7YA|xVvf1p1+&w8^jbZiCkG(hgF7@ hYh8CS80CFLMWUK(*0<7lrGg4J4`=6st+u_G^gmJK( literal 0 HcmV?d00001 diff --git a/static/default/url_classes/nitter tweet media.png b/static/default/url_classes/nitter tweet media.png new file mode 100644 index 0000000000000000000000000000000000000000..5f34a770016e33f04a3a99ea9b9dbe299342d351 GIT binary patch literal 1606 zcmZ{j`#%$U9LB#hMs5w&Tuv?>w^KwcT@ zt+7hfnagn!&KPn@NvlM1o1;IV=ZEL@yq=$)*XQ$kr@1)UE6A$L0ss^oj-p)wkl2X8 zP3D*PX8j`q0H)x8KH?T9_*`)Q!WQ=}9X&K-PxVS9=Yw>5_LVIf-d8QkP6nj>TxLs! zRqC9mC_d|5qU**<^Hz8KvlrvgXLvJ{qs!2-y?}jYB8N_StosV9a$HVz0L^#R;qRK? zOL{mU8u5A(em*oPM!a513TayG?Cf3=CW!9e=VlbHWooaa6MUfF=hb&Z02zQlkDzho zK|6yvJ>gVI=-oXo5HL$yZQKdersG&xs&q}hu7kgB&+v{fTrGVo^ky7ibIcC!(Yqw~ z(*7o_7dl9k;Yo*Z$MViI<&~fyW)`BVm9v|l+2(H|0Rbz9s06Y3B=u3};5KkCB`L>f z?zE6BUNy)Q_eH9D7TZht%)j1`;Je&041`$ZVNd|Eayk2Pgl*UN(Yhdhiw{MU_kQD9 zJTs`ZU8=e#Dv!mC=|A?GDz(;<#i}&PaDGdPqcgGRaER=M@E>*Tr#$teNO%~9`|`FI zZTl%>#rsvRMQj1_hQ9%)7PBYp;}cJ0UB(R`ftqw(beIm>PqXiM-jB=ntD(!%zOsg& zG>JR;+VPF&aO}PL>Kz%|PnXXin2#cDC#Sb%(m>=+#jon&<1CtQ%B#2h_%@UkH@GdZ z!swzqy~;|KBr5Ukwc$*aStuRWKajA1%4Z-;Z3zImglq%6WC*z_LuSBRl80*IzvUMV zR;$%L3|&#E^o_}SZNoVssr|^*YiUAD7K@#6H>4m%Y=@(UYaHl>BnyH1GNQZ zfEV~u#o6!!!BPgSngEw$pf73k-Kq4WM{lb*5PxQadkkeDV`;RHIev&zC{@~oAZaa} zq^IsIPd!JVP8dv>Hd_jJ1tZ!uIXe^zT;t1kVJNl-zm?RIWecQ}PC?-Eu0#5k8Trb} zE&2ja>^W{sz(+VdV=MRAGA*O$ImCkvFMdnPdO_d(MmW4N$ZpqdC5e0K6&{2BL-40qnTsT#31wyA^V<=b@ZARdB<7Apy!9+_ZEp447^O<=w=W z>32FRvPU8_(T>${3$rMLYaFw1ZKhr9K^X9NufAH4^e$!9iTKkv7v_IU`;goQAd)~E z00@u)Hosto|6=C%)u*&))B3b7=NHuKedf%@dv8vd7w@5_b!{!}5o6j#ZDEQaJV=j_ z6eXg6Yn7_*cN0@#aBBbiQfu)AU%QNM^Ti! z77tlj8fnFvTrnEk7DA@1ZGOfYZouEU5U1730>Ef;uDm%s=C7NvV!L3I_N#8+{D%By zlkt+6gNa*7ixI=hj`Ckaog&cRk{zKX?F2e~`k{>@{4D7mUi-I~DCQveiM0`a?qL%{ zQ>4;_n=OeUM^N^QsN?-iZrsYpl50{ghOEydnz17jMF&SH0COBu(Ejpx5wjNR=+{9L{4p*2_^75!z^ zJpaj=wZ^HhO34%7p+)QSHB}1~Q5Dnth1JL29|iTNEB(9^7Y3ScE!CMnUs;(Bx(=O6 zNG)oNS{|lG*Y0C9UeYs(WUn#{S7)Y38YJIC|25R%V}%_dU&OOh<6VX63GC(AVQV)m xhf$L>^lVt5qnBCn6cwRx9$j3J7J%*9GKgkgpV$s^>Vz#hPCZ$XQ_+*o^4xLylWcIdX%}o^viC5AvtGI3htx&w39glkVK**#N;t2MeU%dhSw3seDBcrEt z{V4jbm|GB9UanU~!1R`$MZkOfElm3*WjN*?|3h3paoYdObqR3xtc#d&DbP}fJ`aMA zlTyOC08iuj#PaxN9duDUsvU~yEo0;RkQ?(vd!9H5tsU$a&b92hIK$kQ$*xF>#5Ok&>B%GRyDGwmE=MW)=<%rY zYSg`_RGevt(uxJ}Om!KA)J@*6tw^dg(iy-YLLm0^#Ci7(75lJ}a%!2I^&s)cTfkBOKkTy`BIEco#^tVW3F2vcx9UpS2b6Ug63GqrdG$s*tgZI~_U?62-Y( z7XJ%RY`EMKSZSOL6Uv}SY5L@?St>-9l+MuQ0e9F`4ZuBpqv2>o@Ll+zUeC!h9xN`O ze>-r2`G(^z+j_F8!nUa4toLCn&SHjB$`RxvFS=hKe@j?|iH!#j@2g`=QPx`g0LRlN zsS*n5DBY;{<`2lNf2-N?%`QuO#6&&3V_8r7F!S(}z=P7s0B{4;uL1G_tzR$sofS)R z3=g1XZ@XD-t++I?g@{!Xd-_oX5(ipBs{XjVypOBX2g*94GFub=8e===%NDm5Vw$B8 zCQ^L`WxdCsTk9sb3uIhZd|*$>m=h-(KBI<>Jp=%1+5vZUc=TU_NT6~rkoB)?mVjlB zTBBCfIBrfRfy#h-iKAq`+wLt6uH{$I zSD#qxmJ8eeum-LXmV6FVOEQAr-`?6uAH{kO z|BOZs%rNkD`X`EL&EmjX$jS*%LP(|xM?Gsb*W6?CZhp&4X<`Lb!2-tp}@1|W+&5fl)Ammd_0*xgTAA*ki0qkO3s7ACj6|x z5i({%Ls^2fQjC0Sup6`CS@Wt1y%K%@y2p^}`cglaYY`TCZCY-4?IyE72$9(D-HA*x zN-7jcV9{UG@H9Ghrr^g?YP{wL+-%WU3^GRYQblrPgg@YQybVM9f9&OHsa@xrd77ETCR=GkVFAGSeHG%j|kHvh%TGX QTYv)?-Z0TEx{iDNKM(?=>Hq)$ literal 0 HcmV?d00001 diff --git a/static/default/url_classes/twitter tweet.png b/static/default/url_classes/twitter tweet.png index 437590eaef76f29c1d02d3432bd7d456575e359f..67e3d51cf40db4f3dca9faf5a5e44f4aa5ed9c60 100644 GIT binary patch delta 1414 zcmZ{i`9ITt0LR}$SuL!z%GJns-*TUA${BM#gne^|oJ%1*9LX^sN6yMoBQ{rwGQQ+n z2{|Jql>2JFG)Kcsg|U6{tv{gGkIx@oKRh4L@4~r=Oj&53n>qf1?X5{xzUxIvyQ5tn zmC&_>J0h+roxa8$o>W7H%<=Juqt!XK{v)*11C*uq&Gwh6CwyH*QPI-R%&4EL%7iFs z_WtqF!vX_X)OEi^Oj+s6K|ZydeH8|pt_QWp3i71tERq#@wwu#?`W_!wKA*#R^s@`2 zR(n@eBO9P3>9BWN{c(eo1RDUP0Vt`d1O&*kv0`JO^NXhzAP{t9O0qpYzb;k^yp=aF zcKx@he2&&K$C1_Z1{jUjSH#mcJi$S$C)`XJSrJyp9z9NZwP<3J3Iiw&z+mip^s#+J zMfhMK?5Nl0J+ZKXYbzZ%P@K8HkEf6LYR#YA7S-!?VL>iqt;0*p163UD3Z2<;6sb+L z6g)hSxPBj@4$1%=;P(z0+^zZBU-~({ZHd2#-@l?c@Nu8|Fz0}mHR-yR8p9%2Pe5=0 zH%>h?w%4xBh>7Lq@ z(Mq90_D}y(X_SR?_c7C%JH{In%jZ=fi(Xyq`Lg=YUe#D@E`yar5D!HC&MIsCB`iqskBz%a_T%^Ei=PG4EenP9dNtQ%GaPELH9e_L17tDFn* zJtid~PbUx(#0?70f66~(*mt)FQIUcdLfY|9-&BXw7c^FlYsC#rfMcCZPi0!6wKz4= zP_-9}ipA@flS5HRy;2GCiJHX+Y|^|&|0&0Py}|p(*R?ycBpa`_I_2iMT{*d6b}U%o zA})z+`6m>Tkox0-x7}^LYWI5e?J@|$ZqN$OwP^$dINod^7SHQoU}oVMq)H0GrA`9= zTLos3YNi8;fjt>5_KjP$9K_|Dn?*Dy9jLERC`$R>yIu0me^vQ!UxZd^gg9}sR=Fii zpLD6HJ3b^UhX8c__>jw+z2J$p`;;I?`v(#iZUFtQFo}CoGI&yh-{E}%OCn6gK9q2-zuVI&B zdwgl!t%R-b1<6;o*Q0sy=}kpM@(8rmA*JMu-93A0a^0BO(y~s{B-pM+UUi6a-rZn* zphtvQpmlF85Ce%}BTH#%*}CQs&wk|%r2)6wx+S7*a$h;4OIfa*{>W^7Pq0^BRqR0} QrXBp8xv>?#%FsRjesENTb!5=teQ^ydrsGX95xH)V` zv2XkCz4tA(Z=q7NWwd>t&p+3@_wIS)d*6HRxqG=+0=<0<0e}33px_zsnj!EAKmY&; z0N^VEcm%xRvcHk-auO#V59A;Ke&HX81>m8k#xW^)0KQ=~8VO|b-eCH+YY3u{5d^?L zhKh71FO$hrtk**fz(YLievO)HG6|c`zw)XPRssCO6p(cir>9m1nqV2o$0#5qjNSPS z9c2Uo@DRdeB7dw?5CHEKKrnj%2B8N90^lK(ci2Vb;ejqp^G2`=;3H4+k~TgxZoG9| z4<3M*9-w>DCF$=S<7%XO@Bq9F0Vw^j)GE`$t{y~y0Ql$u2#PfRWilEG%O3){s{r&9 z+Rw?M$2oW~?|*teMLhyb&gEkOeMr6|mj|fkaGihOWPcEO(2i;I^=)H*Ja7vSa9GoS zx#%wEvb}1KnEG#<3?eojaSW^i01yB`xiZD1ylsK!Fglm~3k?IciX}7w}rj{m$ zduy*obhQCYaT-}?tj)SC?y(AZU2+X4)qfgvVw|MdUmP&AwxXF0-c;KiF~w!~GSBkCM5#c9OJSj@XH?hylsk_~qd z($sz?X{N{Mu~4y{dY3B0&(YVU6{r+vo3_gxFvV$PV*!FdCn2%tB<-&a8PSzGRIiD* zCxCLahPf0Od{;*&K5sf=iqpu(0tmoKhzzdOk$)%V1}$i8L@h(-+o?s~L<$1qrD?(4 z*RQ9rHN|OUYXJn{BnYO`opW^x3hhyoCuA}wF$Dtn{{*l=TUFmapB}!Y*;=S{`AERUm$CGMoZX8nMnmh^JhOLa-UsNt z%t>i*UGrw&fTlQ&`pGC^A7Bun7vwCVNI04qz9^-i%a*hw9w*@s<ep+RxkWy< z$)+Fx=K>$~s8JlTE#AtW-`O2zRi=s<@KDc&(q3!KDU(zzvpD)!K6@wiu6z)H6Mr%z zzS*W^-s47QKv(uWpQTN5grZlMBGl$@K5tsPPyO^vs5Jc=2*4%qif8_#l*5x|o89@a z4l-eBgAeTkut&$PK!pnjwp5;Dbtag+IRymZgdF%j)3zu}MRzWFDi{Agf109}uMkue zhik8hRdebn!(4>cYp8C&75&Bm$A3}~YH9ExM3-@9YyvA7%%#3J0s#y%bVm`Iz9;vk zDOGfTwK89OSWNr%#53$@*RL;;BSC=}CFUcMvcHeM=qE(bQ-1~>0qmKMgMs%yp$sq0 z(lK|6$q4`fIA0K0T+q+a??gHbEN8#&gS~x>xw!rh^HFl_%R||ZzK{vM0e?T9^S#oK zn}25DU;yRPg88bgk-3whAwmZNa5^%wKIq#mGo+2J*Am#J-EJ*a^NjX_3Uk1ZWBxgH zPXm1!_7M1s5ka16wkD$l5Wrv~WBfTYS2Jwv+|svKvkR(U6SITJbp(;CX0oy%R|UyH0E3T6P13V=^*QlVMa1clHjuMbC(3=D zjo74H6+{Z2;%B@O7@^y?GmsF9#CElX_xtGOAvTt+p}SpLpG646kAJh_rciK2?D9_! zJ`llk;AId900031AOHXa0Du4h5C8xI06+i$2mk;903ZMW1OT`(1b!iyn^E}Nx`X~@ zcREM@l;+dyJ1=&JeE*L}cS-^ zitUcOUY5P=;sB@VC#2S!Zv|@=cQ0dSs6Y4M|(pl)fxz!Onvpe^;as zlr8PrJb9r~n^?Z`P|e6q@6A4QzdJ47PrqgJlFW%OA4?&fW>-P8W{Qh~*bKXCm^FGge&gbq~drN*kDLw!IzqOT_BLL7n37mNU zn}cZs%K-2MSev1pZhTzIiSQRQm+ADm3HvA_*Y*C+Qz7hf489;k4R*Zq;n4+ zWLzZPSIx^V0>EABSUhy%<*CFWF-34Kv2*H>3SXVF0ILhl{RRR!dWe;WW<*8pz(AZt z55#b&HCUqCTUmrlGdI&wB;g#0wn=uwo_;Ut*Qrd9d^@F{|J=D@W2fCS2#`)snnA18 zK&vdoxrryxhm-(-Orn@r0=4s2U!OYaIp2iC3j@v3@3_cMGoG$Okoshhdu9Z)^{8Tg zGfwjkxM&ZtVo+uZ+N)f~dAYzz3t+e0@kw;eP~V*!3bv@0QJnW0h07O%1;LF0v^Z4` zvHmLr8^O^PW79te1~=*-*<{haBryua`}-_bh{4XVK>AqoyH4H+P0y0Ir3|l(ZS-ab zXS4X28R&!UeazSdDxVw#3&7S%uLjLj^Wz3QXOAnP89xY(^Z6He%Zw!bk+2U{S;xM$ z=D{~keBVlc)C?cLdDXnCxCTe}a)~>-NXZ(y;D_$*Dt;!OsefoEUMg}#>ZuyWWXujj zaMv1>p1*Qs7=G4y^t;ltb+t#p*gsEK1aE8pE$(5{2R;1_0z1c! z-8O4`SF*0%(F5^Dt}?*JHyYA=fA)yS+K42nPSsR+ z=s*b7ll4LdIxlW6`INwHVgMiNcJ|eca^?~!w4+xOuigOFJSb^l%s}~+%Rv=`G zvA`1C!75{}y*|F4WkjXCQ!4U8(WakNsGbn=;UPT=8x|blM1lNd1<3T=KA`d^&pcQ< z$d&x1BA65i2mZgq`XFV_?spLM;sV$tlh!|$@VZ9*5OuD25p~5zAn&)Nz+D?&)ee=< zwl>3`RugVGrhWo4LAa&XN*mU(4LK-mZ{G!5Y`pWh0d+pXL#$T-6w{C*g~`TcQ6=Y4 zd_*f(-c^7?NlU(7T*usHecZ+S!Pz-9?)UwWoU2IK1B@s8s%GQ2WjcLAqna%Lj{v9> zAH3A>%UUo&@d4sDZb2{*FZ7gA!<;W-_xZz@a`MdBT|JlsH7Dxd4DPi(?xbtE8(TMK zuy0jS!K+?gSuqszPzCsFK+t#}q~_(X+1jKL#S%%98QW;{`Gjfg&$kV{ORt+6COw2Y zPmtZ>N@7m>X&r$9!}G6kpXZs99D)E?;UWPb^cyPx>=6$FIO8F~I z?H!}Pe&n}xo8G?gvs{i@VSQ5!2_{Y*LRP9~KG8z`Cc7`dr-#-5Xv zusS@}fsQc{T>eNFFAw;N(Av6zo1yGFuP$tdXUy66=F)HtwLe#)&dFXIGRWx9i-)JG zOGaP*yt>tV+SH&|GP0q5t+N-K)Fl|&bh2t`eD++L+m{E-&7s!T_|kV%{k1Q~P@$h# z^yUjDA-^(zl&Du6e;Ke6lVrr$hTabD>z^NMBo2(&um)v^6D*z1?0QUoX&`2YUtv*m z@JWL{EG^UZ#Vr-n4EFlM^Zj|@jAqv% z#1?f{^#0$LR&(UZUvf;R7wn|EkBi+KHq5p+5)v}moRNBdm6G_3obhG%+Mo{B#zyXR6v@OwB1*5>wR^izb`e*vKreb@j1 diff --git a/static/default/url_classes/twitter tweets api.png b/static/default/url_classes/twitter tweets api.png deleted file mode 100644 index 1cd88411e647538e75724f68e81d0a4b97ebc851..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2138 zcmV-g2&MOlP)9LMk3^1ry@2AI0ljcp~4MK(lqn?{`OpBu)aY@?e?MCTtg{z0-oZl*@#f(p2q zxMiC-Hx1|@N~rPA9Ge^UPZ=6DK?E(4I#F=YsBLKJpSQbvucfcj*=1;G-|rLT*L!#O z>&v_E{odW-9fM|8Awaw^EIb2IGXx$12mk;90AfW7k3clk$2++ucMv1FkpTh_7x9Ti z01^ZHB()F$#D>%9WRNR*t?Q>-LlBFcAOP_Z5ovKnxm@8fRx6AFL`b;){0pfrS6I`r zr^h;B79c)c35yR9dvHmj3#Nf$90iOV#jX4*A9aENLOm-bpA0*EpK^gMGi>3ZjrbxBr;0HPcMu-4ez zTG!jRY={8?h%o{%tiyTJ<#aMkh`a) z02>69akr`|LmHAa#HE?3QL#M=S%+6PXFXNQ-K7fAFSiBW1z7h5T9@2O zkHVcz?<*(>s%c{;bBd_?Gp!-z2&E?$%eQv?XAF+k)j`(=cTx^;257h0>7F@DOO)TaG` z^O9;%B}q1wW(43$P|3#wM(Tg1Xu^S4+5BBu5IX;hP!X2oTg1GOECzJ(X*C%itSsdRgRl-Tkl+b9w-#51 z$fTR3RbDG@?red@kh1(@-(6h^D)p1lkVON}N+YF?rqi~NOZZMc=km*0SO*wz@PwR0 ziD(B@Oh7sPWP5BUZLGAejQ`+DP)V4eRit0UQAPBv4Nf2mP@Qr|{JA74>&OkN002kC z3=r1jZbJgh0ss&I00IC&000O8fLQ2@+}-X?c;(wuW3fZ&0);+^50 zSRB)Nb0_V+PawwyatuLcdn5MpmU_I$;J8vhg=CUNW#%g$ejtdqC0 z>>{`6M>fl!&p_?B$wzs(0ehA9h5eJi<%hz!uY!O=X_ z${F9*&(@-H^4joh2Hw8R;QbW|5CI15Ah4|HE+;kPar{L59#@Yek)Jo*D4(A}kOQDt zwt<`mV&?~rx@DfdfA}lxr^i@80E3mh#fyZkBM(0ym=SI@)2-blY^E&RWB68Q`*!!1 zEH3hl21af)Ct}&0mht-dqP-CWFlb0QLT2pND)*N0^?z>?_@f{$_Q$j-lY)C*)@1Q4v)Y0EPiZnBKeuu{U zG!L9tprXfmr5%!Wkb^%W^ek`sambzWL29~>jH2G@sbH$mcyKIi4HG%=*FX>e00IC& z000O80096X000C4fB*mx00062KmY&;00?0i{FC}x-gkx#o8?uPyM|w?-a9v<`bz!u zi}RK|BpIJSbLp0}{7mQWI26U?D4Cp^-t-0MH@>7C+A=ftYV&)sN9-Hor|NeeuiLFn zd0<4ripW=LOlx;`uSWZOFEzc}R#foF#I~=mjej9I%6YVD+nFT0{F8N)mt1*pVWqjd ztT|!RsE&>EXD!`P-gEuu*yN9{^@gFt@6K*H_Qt7s<32Dy_RVvq=!#Q0P3wL+tjy6} zN>^+?^1Qinm%3)snH}FximfPFa3Ev&i$$OB+;y|{kKsL6Q|IouHD6&e#{XD)K{Dst z_zk5xQ*ZdUHXZuaaK$*&_P`Gfl?k?-S)Xld zt2+F;C4EF^g$X^r^siT~(JK-*-+X6J%xMcMj#*WxES~bvwh=d{hL8aM0g}V_>?DMG QVE_OC07*qoM6N<$f