Version 141
This commit is contained in:
parent
5127f7ba6c
commit
b7edfb9719
|
@ -8,6 +8,29 @@
|
|||
<div class="content">
|
||||
<h3>changelog</h3>
|
||||
<ul>
|
||||
<li><h3>version 141</h3></li>
|
||||
<ul>
|
||||
<li>combined mappings are no longer calculated and stored</li>
|
||||
<li>recalculate combined mappings obviously removed from database menu</li>
|
||||
<li>combined mappings no longer have to be recalculated on a service deletion; the accompanying 'this could take a long time' warning dialog is gone as well</li>
|
||||
<li>combined mappings autocomplete counts are now calculated on-the-fly</li>
|
||||
<li>combined mappings tag queries are now performed on-the-fly</li>
|
||||
<li>combined mappings namespace queries are now performed on-the-fly</li>
|
||||
<li>combined mappings zero/non-zero tag count queries are now performed on-the-fly</li>
|
||||
<li>combined mappings regular tag count queries are now performed on-the-fly</li>
|
||||
<li>corrected some logic with regular tag count queries, I think</li>
|
||||
<li>autocomplete tag cache dirties and culls itself more efficiently on tag update</li>
|
||||
<li>autocomplete tag cache dirties and culls itself more efficiently on file import/delete</li>
|
||||
<li>removed a couple of non-useful AMP tests that were breaking due to a previous change in connection code</li>
|
||||
<li>improved how popup messages give the gui permission to update; hopefully the gui will lock up less when big jobs with popups are happening</li>
|
||||
<li>improved some misc timing logic</li>
|
||||
<li>improved some repo sync timing logic</li>
|
||||
<li>added simple emergency throttling to the repo sync daemon when the CPU/HDD is getting hammered</li>
|
||||
<li>improved some repo sync text number-grammar and timing</li>
|
||||
<li>added sankaku channel booru, including flash</li>
|
||||
<li>the booru downloading code that discovers image urls is more flexible</li>
|
||||
<li>improved my job pause/cancel logic so paused jobs, when cancelled/completed, will report themselves as no longer paused (this was affecting the downloader page, with paused jobs not dismissing themselves on a subsequent cancel)</li>
|
||||
</ul>
|
||||
<li><h3>version 140</h3></li>
|
||||
<ul>
|
||||
<li>if a repository or subscription popup message has nothing to report, it will dismiss itself</li>
|
||||
|
|
|
@ -1051,6 +1051,17 @@ tag_classnames_to_namespaces = { 'tag-type-general' : '', 'tag-type-character' :
|
|||
|
||||
DEFAULT_BOORUS[ 'tbib' ] = Booru( name, search_url, search_separator, advance_by_page_num, thumb_classname, image_id, image_data, tag_classnames_to_namespaces )
|
||||
|
||||
name = 'sankaku chan'
|
||||
search_url = 'https://chan.sankakucomplex.com/?tags=%tags%&page=%index%'
|
||||
search_separator = '+'
|
||||
advance_by_page_num = True
|
||||
thumb_classname = 'thumb'
|
||||
image_id = 'highres'
|
||||
image_data = None
|
||||
tag_classnames_to_namespaces = { 'tag-type-general' : '', 'tag-type-character' : 'character', 'tag-type-copyright' : 'series', 'tag-type-artist' : 'creator' }
|
||||
|
||||
DEFAULT_BOORUS[ 'sankaku chan' ] = Booru( name, search_url, search_separator, advance_by_page_num, thumb_classname, image_id, image_data, tag_classnames_to_namespaces )
|
||||
|
||||
class LocalBooruCache( object ):
|
||||
|
||||
def __init__( self ):
|
||||
|
@ -2708,7 +2719,7 @@ class ThumbnailCache( object ):
|
|||
|
||||
if HC.GetNowPrecise() - last_paused > 0.005:
|
||||
|
||||
time.sleep( 0.0001 )
|
||||
time.sleep( 0.00001 )
|
||||
|
||||
last_paused = HC.GetNowPrecise()
|
||||
|
||||
|
|
|
@ -633,7 +633,7 @@ class Controller( wx.App ):
|
|||
|
||||
if HC.shutdown: raise Exception( 'Client shutting down!' )
|
||||
elif HC.pubsub.NoJobsQueued() and not HC.currently_doing_pubsub: return
|
||||
else: time.sleep( 0.0001 )
|
||||
else: time.sleep( 0.00001 )
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -1515,6 +1515,8 @@ class ServiceDB( FileDB, MessageDB, TagDB, RatingDB ):
|
|||
job_key.Finish()
|
||||
|
||||
|
||||
def _ClearCombinedAutocompleteTags( self ): self._c.execute( 'DELETE FROM autocomplete_tags_cache WHERE tag_service_id = ?;', ( self._combined_tag_service_id, ) )
|
||||
|
||||
def _DeleteFiles( self, service_id, hash_ids ):
|
||||
|
||||
splayed_hash_ids = HC.SplayListForDB( hash_ids )
|
||||
|
@ -1827,17 +1829,6 @@ class ServiceDB( FileDB, MessageDB, TagDB, RatingDB ):
|
|||
tag_service_id = self._GetServiceId( tag_service_key )
|
||||
file_service_id = self._GetServiceId( file_service_key )
|
||||
|
||||
if file_service_key == HC.COMBINED_FILE_SERVICE_KEY:
|
||||
|
||||
current_tables_phrase = 'mappings WHERE service_id = ' + HC.u( tag_service_id ) + ' AND status = ' + HC.u( HC.CURRENT ) + ' AND '
|
||||
pending_tables_phrase = 'mappings WHERE service_id = ' + HC.u( tag_service_id ) + ' AND status = ' + HC.u( HC.PENDING ) + ' AND '
|
||||
|
||||
else:
|
||||
|
||||
current_tables_phrase = 'mappings, files_info USING ( hash_id ) WHERE mappings.service_id = ' + HC.u( tag_service_id ) + ' AND mappings.status = ' + HC.u( HC.CURRENT ) + ' AND files_info.service_id = ' + HC.u( file_service_id ) + ' AND '
|
||||
pending_tables_phrase = 'mappings, files_info USING ( hash_id ) WHERE mappings.service_id = ' + HC.u( tag_service_id ) + ' AND mappings.status = ' + HC.u( HC.PENDING ) + ' AND files_info.service_id = ' + HC.u( file_service_id ) + ' AND '
|
||||
|
||||
|
||||
# precache search
|
||||
|
||||
there_was_a_namespace = False
|
||||
|
@ -1951,28 +1942,50 @@ class ServiceDB( FileDB, MessageDB, TagDB, RatingDB ):
|
|||
|
||||
zero = lambda: 0
|
||||
|
||||
predicates = [ 'status = ?', 'namespace_id = ?']
|
||||
|
||||
if tag_service_key == HC.COMBINED_TAG_SERVICE_KEY:
|
||||
|
||||
count_phrase = 'SELECT tag_id, COUNT( DISTINCT hash_id ) FROM '
|
||||
|
||||
else:
|
||||
|
||||
count_phrase = 'SELECT tag_id, COUNT( * ) FROM '
|
||||
|
||||
predicates.append( 'mappings.service_id = ' + HC.u( tag_service_id ) )
|
||||
|
||||
|
||||
if file_service_key == HC.COMBINED_FILE_SERVICE_KEY:
|
||||
|
||||
table_phrase = 'mappings '
|
||||
|
||||
else:
|
||||
|
||||
table_phrase = 'mappings, files_info USING ( hash_id ) '
|
||||
|
||||
predicates.append( 'files_info.service_id = ' + HC.u( file_service_id ) )
|
||||
|
||||
|
||||
predicates_phrase = 'WHERE ' + ' AND '.join( predicates ) + ' AND '
|
||||
|
||||
for ( namespace_id, tag_ids ) in HC.BuildKeyToListDict( results_missed ).items():
|
||||
|
||||
current_counts = collections.defaultdict( zero )
|
||||
pending_counts = collections.defaultdict( zero )
|
||||
|
||||
current_counts.update( { tag_id : count for ( tag_id, count ) in self._c.execute( 'SELECT tag_id, COUNT( * ) FROM ' + current_tables_phrase + 'namespace_id = ? AND tag_id IN ' + HC.SplayListForDB( tag_ids ) + ' GROUP BY tag_id;', ( namespace_id, ) ) } )
|
||||
pending_counts.update( { tag_id : count for ( tag_id, count ) in self._c.execute( 'SELECT tag_id, COUNT( * ) FROM ' + pending_tables_phrase + 'namespace_id = ? AND tag_id IN ' + HC.SplayListForDB( tag_ids ) + ' GROUP BY tag_id;', ( namespace_id, ) ) } )
|
||||
current_counts.update( { tag_id : count for ( tag_id, count ) in self._c.execute( count_phrase + table_phrase + predicates_phrase + 'tag_id IN ' + HC.SplayListForDB( tag_ids ) + ' GROUP BY tag_id;', ( HC.CURRENT, namespace_id ) ) } )
|
||||
pending_counts.update( { tag_id : count for ( tag_id, count ) in self._c.execute( count_phrase + table_phrase + predicates_phrase + 'tag_id IN ' + HC.SplayListForDB( tag_ids ) + ' GROUP BY tag_id;', ( HC.PENDING, namespace_id ) ) } )
|
||||
|
||||
self._c.executemany( 'INSERT OR IGNORE INTO autocomplete_tags_cache ( file_service_id, tag_service_id, namespace_id, tag_id, current_count, pending_count ) VALUES ( ?, ?, ?, ?, ?, ? );', [ ( file_service_id, tag_service_id, namespace_id, tag_id, current_counts[ tag_id ], pending_counts[ tag_id ] ) for tag_id in tag_ids ] )
|
||||
|
||||
cache_results.extend( [ ( namespace_id, tag_id, current_counts[ tag_id ], pending_counts[ tag_id ] ) for tag_id in tag_ids ] )
|
||||
|
||||
|
||||
ids = set()
|
||||
|
||||
current_ids_to_count = collections.Counter()
|
||||
pending_ids_to_count = collections.Counter()
|
||||
|
||||
for ( namespace_id, tag_id, current_count, pending_count ) in cache_results:
|
||||
|
||||
ids.add( ( namespace_id, tag_id ) )
|
||||
|
||||
current_ids_to_count[ ( namespace_id, tag_id ) ] += current_count
|
||||
pending_ids_to_count[ ( namespace_id, tag_id ) ] += pending_count
|
||||
|
||||
|
@ -2182,7 +2195,10 @@ class ServiceDB( FileDB, MessageDB, TagDB, RatingDB ):
|
|||
|
||||
if num_tags_zero or num_tags_nonzero:
|
||||
|
||||
nonzero_tag_query_hash_ids = { id for ( id, ) in self._c.execute( 'SELECT hash_id FROM mappings WHERE service_id = ? AND hash_id IN ' + HC.SplayListForDB( query_hash_ids ) + ' AND status IN ' + HC.SplayListForDB( statuses ) + namespace_predicate + ';', ( tag_service_id, ) ) }
|
||||
if tag_service_key == HC.COMBINED_TAG_SERVICE_KEY: service_phrase = ''
|
||||
else: service_phrase = 'service_id = ' + HC.u( tag_service_id ) + ' AND '
|
||||
|
||||
nonzero_tag_query_hash_ids = { id for ( id, ) in self._c.execute( 'SELECT DISTINCT hash_id FROM mappings WHERE ' + service_phrase + 'hash_id IN ' + HC.SplayListForDB( query_hash_ids ) + ' AND status IN ' + HC.SplayListForDB( statuses ) + namespace_predicate + ';' ) }
|
||||
|
||||
if num_tags_zero: query_hash_ids.difference_update( nonzero_tag_query_hash_ids )
|
||||
elif num_tags_nonzero: query_hash_ids = nonzero_tag_query_hash_ids
|
||||
|
@ -2190,7 +2206,10 @@ class ServiceDB( FileDB, MessageDB, TagDB, RatingDB ):
|
|||
|
||||
if len( tag_predicates ) > 0:
|
||||
|
||||
query_hash_ids = { id for ( id, count ) in self._c.execute( 'SELECT hash_id, COUNT( * ) as num_tags FROM mappings WHERE service_id = ? AND hash_id IN ' + HC.SplayListForDB( query_hash_ids ) + ' AND status IN ' + HC.SplayListForDB( statuses ) + namespace_predicate + ' GROUP BY hash_id;', ( tag_service_id, ) ) if False not in ( pred( count ) for pred in tag_predicates ) }
|
||||
if tag_service_key == HC.COMBINED_TAG_SERVICE_KEY: service_phrase = ''
|
||||
else: service_phrase = 'service_id = ' + HC.u( tag_service_id ) + ' AND '
|
||||
|
||||
query_hash_ids = { id for ( id, count ) in self._c.execute( 'SELECT hash_id, COUNT( DISTINCT tag_id ) FROM mappings WHERE ' + service_phrase + 'hash_id IN ' + HC.SplayListForDB( query_hash_ids ) + ' AND status IN ' + HC.SplayListForDB( statuses ) + namespace_predicate + ' GROUP BY hash_id;' ) if False not in ( pred( count ) for pred in tag_predicates ) }
|
||||
|
||||
|
||||
#
|
||||
|
@ -2392,17 +2411,38 @@ class ServiceDB( FileDB, MessageDB, TagDB, RatingDB ):
|
|||
if include_current_tags: statuses.append( HC.CURRENT )
|
||||
if include_pending_tags: statuses.append( HC.PENDING )
|
||||
|
||||
if len( statuses ) > 0: status_phrase = 'mappings.status IN ' + HC.SplayListForDB( statuses ) + ' AND '
|
||||
else: status_phrase = ''
|
||||
|
||||
tag_service_id = self._GetServiceId( tag_service_key )
|
||||
|
||||
file_service_id = self._GetServiceId( file_service_key )
|
||||
if len( statuses ) == 0: return {}
|
||||
|
||||
namespace_id = self._GetNamespaceId( namespace )
|
||||
|
||||
if file_service_key == HC.COMBINED_FILE_SERVICE_KEY: hash_ids = { id for ( id, ) in self._c.execute( 'SELECT hash_id FROM mappings WHERE service_id = ? AND ' + status_phrase + 'namespace_id = ?;', ( tag_service_id, namespace_id ) ) }
|
||||
else: hash_ids = { id for ( id, ) in self._c.execute( 'SELECT hash_id FROM mappings, files_info USING ( hash_id ) WHERE mappings.service_id = ? AND files_info.service_id = ? AND ' + status_phrase + 'namespace_id = ?;', ( tag_service_id, file_service_id, namespace_id ) ) }
|
||||
predicates = []
|
||||
|
||||
if len( statuses ) > 0: predicates.append( 'mappings.status IN ' + HC.SplayListForDB( statuses ) )
|
||||
|
||||
if file_service_key == HC.COMBINED_FILE_SERVICE_KEY:
|
||||
|
||||
table_phrase = 'mappings'
|
||||
|
||||
else:
|
||||
|
||||
table_phrase = 'mappings, files_info USING ( hash_id )'
|
||||
|
||||
file_service_id = self._GetServiceId( file_service_key )
|
||||
|
||||
predicates.append( 'files_info.service_id = ' + HC.u( file_service_id ) )
|
||||
|
||||
|
||||
if tag_service_key != HC.COMBINED_TAG_SERVICE_KEY:
|
||||
|
||||
tag_service_id = self._GetServiceId( tag_service_key )
|
||||
|
||||
predicates.append( 'mappings.service_id = ' + HC.u( tag_service_id ) )
|
||||
|
||||
|
||||
if len( predicates ) > 0: predicates_phrase = ' AND '.join( predicates ) + ' AND '
|
||||
else: predicates_phrase = ''
|
||||
|
||||
hash_ids = { id for ( id, ) in self._c.execute( 'SELECT hash_id FROM ' + table_phrase + ' WHERE ' + predicates_phrase + 'namespace_id = ?;', ( namespace_id, ) ) }
|
||||
|
||||
return hash_ids
|
||||
|
||||
|
@ -2416,12 +2456,7 @@ class ServiceDB( FileDB, MessageDB, TagDB, RatingDB ):
|
|||
if include_current_tags: statuses.append( HC.CURRENT )
|
||||
if include_pending_tags: statuses.append( HC.PENDING )
|
||||
|
||||
if len( statuses ) > 0: status_phrase = 'mappings.status IN ' + HC.SplayListForDB( statuses ) + ' AND '
|
||||
else: status_phrase = ''
|
||||
|
||||
tag_service_id = self._GetServiceId( tag_service_key )
|
||||
|
||||
file_service_id = self._GetServiceId( file_service_key )
|
||||
if len( statuses ) == 0: return {}
|
||||
|
||||
siblings_manager = HC.app.GetManager( 'tag_siblings' )
|
||||
|
||||
|
@ -2433,6 +2468,33 @@ class ServiceDB( FileDB, MessageDB, TagDB, RatingDB ):
|
|||
|
||||
hash_ids = set()
|
||||
|
||||
predicates = []
|
||||
|
||||
if len( statuses ) > 0: predicates.append( 'mappings.status IN ' + HC.SplayListForDB( statuses ) )
|
||||
|
||||
if file_service_key == HC.COMBINED_FILE_SERVICE_KEY:
|
||||
|
||||
table_phrase = 'mappings'
|
||||
|
||||
else:
|
||||
|
||||
table_phrase = 'mappings, files_info USING ( hash_id )'
|
||||
|
||||
file_service_id = self._GetServiceId( file_service_key )
|
||||
|
||||
predicates.append( 'files_info.service_id = ' + HC.u( file_service_id ) )
|
||||
|
||||
|
||||
if tag_service_key != HC.COMBINED_TAG_SERVICE_KEY:
|
||||
|
||||
tag_service_id = self._GetServiceId( tag_service_key )
|
||||
|
||||
predicates.append( 'mappings.service_id = ' + HC.u( tag_service_id ) )
|
||||
|
||||
|
||||
if len( predicates ) > 0: predicates_phrase = ' AND '.join( predicates ) + ' AND '
|
||||
else: predicates_phrase = ''
|
||||
|
||||
for tag in tags:
|
||||
|
||||
try: ( namespace_id, tag_id ) = self._GetNamespaceIdTagId( tag )
|
||||
|
@ -2440,13 +2502,11 @@ class ServiceDB( FileDB, MessageDB, TagDB, RatingDB ):
|
|||
|
||||
if ':' in tag:
|
||||
|
||||
if file_service_key == HC.COMBINED_FILE_SERVICE_KEY: hash_ids.update( ( id for ( id, ) in self._c.execute( 'SELECT hash_id FROM mappings WHERE service_id = ? AND ' + status_phrase + 'namespace_id = ? AND tag_id = ?;', ( tag_service_id, namespace_id, tag_id ) ) ) )
|
||||
else: hash_ids.update( ( id for ( id, ) in self._c.execute( 'SELECT hash_id FROM mappings, files_info USING ( hash_id ) WHERE mappings.service_id = ? AND files_info.service_id = ? AND ' + status_phrase + 'namespace_id = ? AND tag_id = ?;', ( tag_service_id, file_service_id, namespace_id, tag_id ) ) ) )
|
||||
hash_ids.update( ( id for ( id, ) in self._c.execute( 'SELECT hash_id FROM ' + table_phrase + ' WHERE ' + predicates_phrase + 'namespace_id = ? AND tag_id = ?;', ( namespace_id, tag_id ) ) ) )
|
||||
|
||||
else:
|
||||
|
||||
if file_service_key == HC.COMBINED_FILE_SERVICE_KEY: hash_ids.update( ( id for ( id, ) in self._c.execute( 'SELECT hash_id FROM mappings WHERE service_id = ? AND ' + status_phrase + 'tag_id = ?;', ( tag_service_id, tag_id ) ) ) )
|
||||
else: hash_ids.update( ( id for ( id, ) in self._c.execute( 'SELECT hash_id FROM mappings, files_info USING ( hash_id ) WHERE mappings.service_id = ? AND files_info.service_id = ? AND ' + status_phrase + 'tag_id = ?;', ( tag_service_id, file_service_id, tag_id ) ) ) )
|
||||
hash_ids.update( ( id for ( id, ) in self._c.execute( 'SELECT hash_id FROM ' + table_phrase + ' WHERE ' + predicates_phrase + 'tag_id = ?;', ( tag_id, ) ) ) )
|
||||
|
||||
|
||||
|
||||
|
@ -4056,7 +4116,7 @@ class ServiceDB( FileDB, MessageDB, TagDB, RatingDB ):
|
|||
|
||||
self._c.execute( 'DELETE FROM services WHERE service_id = ?;', ( service_id, ) )
|
||||
|
||||
if service_type == HC.TAG_REPOSITORY: self._RecalcCombinedMappings()
|
||||
if service_type == HC.TAG_REPOSITORY: self._ClearCombinedAutocompleteTags()
|
||||
|
||||
if service_type in HC.REPOSITORIES:
|
||||
|
||||
|
@ -4191,6 +4251,10 @@ class ServiceDB( FileDB, MessageDB, TagDB, RatingDB ):
|
|||
self._c.executemany( 'UPDATE autocomplete_tags_cache SET current_count = current_count + ? WHERE file_service_id = ? AND tag_service_id = ? AND namespace_id = ? AND tag_id = ?;', [ ( count * direction, file_service_id, tag_service_id, namespace_id, tag_id ) for ( tag_service_id, namespace_id, tag_id, count ) in current_tags ] )
|
||||
self._c.executemany( 'UPDATE autocomplete_tags_cache SET pending_count = pending_count + ? WHERE file_service_id = ? AND tag_service_id = ? AND namespace_id = ? AND tag_id = ?;', [ ( count * direction, file_service_id, tag_service_id, namespace_id, tag_id ) for ( tag_service_id, namespace_id, tag_id, count ) in pending_tags ] )
|
||||
|
||||
dirty_tags = { ( namespace_id, tag_id ) for ( tag_service_id, namespace_id, tag_id, count ) in current_tags + pending_tags }
|
||||
|
||||
self._c.executemany( 'DELETE FROM autocomplete_tags_cache WHERE tag_service_id = ? AND namespace_id = ? AND tag_id = ?;', ( ( self._combined_tag_service_id, namespace_id, tag_id ) for ( namespace_id, tag_id ) in dirty_tags ) )
|
||||
|
||||
|
||||
def _UpdateMappings( self, tag_service_id, mappings_ids = [], deleted_mappings_ids = [], pending_mappings_ids = [], pending_rescinded_mappings_ids = [], petitioned_mappings_ids = [], petitioned_rescinded_mappings_ids = [] ):
|
||||
|
||||
|
@ -4221,68 +4285,18 @@ class ServiceDB( FileDB, MessageDB, TagDB, RatingDB ):
|
|||
|
||||
num_old_made_new = self._GetRowCount()
|
||||
|
||||
if old_status != HC.PENDING and new_status == HC.PENDING:
|
||||
|
||||
UpdateAutocompleteTagCache( tag_service_id, namespace_id, tag_id, pertinent_hash_ids, HC.PENDING, 1 )
|
||||
|
||||
UpdateCombinedMappings( namespace_id, tag_id, pertinent_hash_ids, HC.PENDING, 1 )
|
||||
|
||||
|
||||
if old_status == HC.PENDING and new_status != HC.PENDING:
|
||||
|
||||
UpdateAutocompleteTagCache( tag_service_id, namespace_id, tag_id, pertinent_hash_ids, HC.PENDING, -1 )
|
||||
|
||||
UpdateCombinedMappings( namespace_id, tag_id, pertinent_hash_ids, HC.PENDING, -1 )
|
||||
|
||||
|
||||
if old_status != HC.CURRENT and new_status == HC.CURRENT:
|
||||
|
||||
UpdateAutocompleteTagCache( tag_service_id, namespace_id, tag_id, pertinent_hash_ids, HC.CURRENT, 1 )
|
||||
|
||||
UpdateCombinedMappings( namespace_id, tag_id, pertinent_hash_ids, HC.CURRENT, 1 )
|
||||
|
||||
|
||||
if old_status == HC.CURRENT and new_status != HC.CURRENT:
|
||||
|
||||
UpdateAutocompleteTagCache( tag_service_id, namespace_id, tag_id, pertinent_hash_ids, HC.CURRENT, -1 )
|
||||
|
||||
UpdateCombinedMappings( namespace_id, tag_id, pertinent_hash_ids, HC.CURRENT, -1 )
|
||||
|
||||
ClearAutocompleteTagCache( tag_service_id, namespace_id, tag_id )
|
||||
|
||||
return ( num_old_deleted + num_old_made_new, num_old_made_new )
|
||||
|
||||
|
||||
def UpdateCombinedMappings( namespace_id, tag_id, hash_ids, status, direction ):
|
||||
|
||||
if direction == -1:
|
||||
|
||||
existing_other_service_hash_ids = { hash_id for ( hash_id, ) in self._c.execute( 'SELECT hash_id FROM mappings WHERE service_id IN ' + splayed_other_service_ids + ' AND namespace_id = ? AND tag_id = ? AND hash_id IN ' + HC.SplayListForDB( hash_ids ) + ' AND status = ?;', ( namespace_id, tag_id, status ) ) }
|
||||
|
||||
pertinent_hash_ids = set( hash_ids ).difference( existing_other_service_hash_ids )
|
||||
|
||||
self._c.execute( 'DELETE FROM mappings WHERE service_id = ? AND namespace_id = ? AND tag_id = ? AND hash_id IN ' + HC.SplayListForDB( pertinent_hash_ids ) + ' AND status = ?;', ( self._combined_tag_service_id, namespace_id, tag_id, status ) )
|
||||
|
||||
elif direction == 1:
|
||||
|
||||
existing_combined_hash_ids = { hash_id for ( hash_id, ) in self._c.execute( 'SELECT hash_id FROM mappings WHERE service_id = ? AND namespace_id = ? AND tag_id = ? AND hash_id IN ' + HC.SplayListForDB( hash_ids ) + ' AND status = ?;', ( self._combined_tag_service_id, namespace_id, tag_id, status ) ) }
|
||||
|
||||
pertinent_hash_ids = set( hash_ids ).difference( existing_combined_hash_ids )
|
||||
|
||||
self._c.executemany( 'INSERT OR IGNORE INTO mappings VALUES ( ?, ?, ?, ?, ? );', [ ( self._combined_tag_service_id, namespace_id, tag_id, hash_id, status ) for hash_id in pertinent_hash_ids ] )
|
||||
|
||||
|
||||
if len( pertinent_hash_ids ) > 0: UpdateAutocompleteTagCache( self._combined_tag_service_id, namespace_id, tag_id, pertinent_hash_ids, status, direction )
|
||||
|
||||
|
||||
def DeletePending( namespace_id, tag_id, hash_ids ):
|
||||
|
||||
self._c.execute( 'DELETE FROM mappings WHERE service_id = ? AND namespace_id = ? AND tag_id = ? AND hash_id IN ' + HC.SplayListForDB( hash_ids ) + ' AND status = ?;', ( tag_service_id, namespace_id, tag_id, HC.PENDING ) )
|
||||
|
||||
num_deleted = self._GetRowCount()
|
||||
|
||||
UpdateAutocompleteTagCache( tag_service_id, namespace_id, tag_id, hash_ids, HC.PENDING, -1 )
|
||||
|
||||
UpdateCombinedMappings( namespace_id, tag_id, hash_ids, HC.PENDING, -1 )
|
||||
ClearAutocompleteTagCache( tag_service_id, namespace_id, tag_id )
|
||||
|
||||
return num_deleted
|
||||
|
||||
|
@ -4307,18 +4321,7 @@ class ServiceDB( FileDB, MessageDB, TagDB, RatingDB ):
|
|||
|
||||
num_rows_added = self._GetRowCount()
|
||||
|
||||
if status == HC.CURRENT:
|
||||
|
||||
UpdateAutocompleteTagCache( tag_service_id, namespace_id, tag_id, new_hash_ids, HC.CURRENT, 1 )
|
||||
|
||||
UpdateCombinedMappings( namespace_id, tag_id, new_hash_ids, HC.CURRENT, 1 )
|
||||
|
||||
elif status == HC.PENDING:
|
||||
|
||||
UpdateAutocompleteTagCache( tag_service_id, namespace_id, tag_id, new_hash_ids, HC.PENDING, 1 )
|
||||
|
||||
UpdateCombinedMappings( namespace_id, tag_id, new_hash_ids, HC.PENDING, 1 )
|
||||
|
||||
ClearAutocompleteTagCache( tag_service_id, namespace_id, tag_id )
|
||||
|
||||
return num_rows_added
|
||||
|
||||
|
@ -4332,18 +4335,9 @@ class ServiceDB( FileDB, MessageDB, TagDB, RatingDB ):
|
|||
return num_rows_added
|
||||
|
||||
|
||||
def UpdateAutocompleteTagCache( tag_service_id, namespace_id, tag_id, hash_ids, status, direction ):
|
||||
def ClearAutocompleteTagCache( tag_service_id, namespace_id, tag_id ):
|
||||
|
||||
#file_service_info = self._c.execute( 'SELECT service_id, COUNT( * ) FROM files_info WHERE hash_id IN ' + HC.SplayListForDB( hash_ids ) + ' GROUP BY service_id;' ).fetchall()
|
||||
|
||||
#file_service_info.append( ( self._combined_file_service_id, len( hash_ids ) ) )
|
||||
|
||||
#if status == HC.CURRENT: critical_phrase = 'current_count = current_count + ?'
|
||||
#elif status == HC.PENDING: critical_phrase = 'pending_count = pending_count + ?'
|
||||
|
||||
#self._c.executemany( 'UPDATE autocomplete_tags_cache SET ' + critical_phrase + ' WHERE file_service_id = ? AND tag_service_id = ? AND namespace_id = ? AND tag_id = ?;', [ ( count * direction, file_service_id, tag_service_id, namespace_id, tag_id ) for ( file_service_id, count ) in file_service_info ] )
|
||||
|
||||
self._c.execute( 'DELETE FROM autocomplete_tags_cache WHERE namespace_id = ? AND tag_id = ?;', ( namespace_id, tag_id ) )
|
||||
self._c.execute( 'DELETE FROM autocomplete_tags_cache WHERE tag_service_id IN ( ?, ? ) AND namespace_id = ? AND tag_id = ?;', ( tag_service_id, self._combined_tag_service_id, namespace_id, tag_id ) )
|
||||
|
||||
|
||||
change_in_num_mappings = 0
|
||||
|
@ -4500,7 +4494,7 @@ class ServiceDB( FileDB, MessageDB, TagDB, RatingDB ):
|
|||
|
||||
#
|
||||
|
||||
recalc_combined_mappings = False
|
||||
clear_combined_autocomplete = False
|
||||
|
||||
for ( action, data ) in edit_log:
|
||||
|
||||
|
@ -4548,7 +4542,7 @@ class ServiceDB( FileDB, MessageDB, TagDB, RatingDB ):
|
|||
|
||||
|
||||
|
||||
if service_type == HC.TAG_REPOSITORY: recalc_combined_mappings = True
|
||||
if service_type == HC.TAG_REPOSITORY: clear_combined_autocomplete = True
|
||||
|
||||
|
||||
elif action == HC.EDIT:
|
||||
|
@ -4568,7 +4562,7 @@ class ServiceDB( FileDB, MessageDB, TagDB, RatingDB ):
|
|||
|
||||
|
||||
|
||||
if recalc_combined_mappings: self._RecalcCombinedMappings()
|
||||
if clear_combined_autocomplete: self._ClearCombinedAutocompleteTags()
|
||||
|
||||
self.pub_after_commit( 'notify_new_pending' )
|
||||
|
||||
|
@ -4580,8 +4574,7 @@ class ServiceDB( FileDB, MessageDB, TagDB, RatingDB ):
|
|||
|
||||
HC.repos_changed = True
|
||||
|
||||
recalc_combined_mappings = False
|
||||
message = None
|
||||
clear_combined_autocomplete = False
|
||||
|
||||
for entry in edit_log:
|
||||
|
||||
|
@ -4601,19 +4594,7 @@ class ServiceDB( FileDB, MessageDB, TagDB, RatingDB ):
|
|||
|
||||
service = self._GetService( service_id )
|
||||
|
||||
if service.GetServiceType() == HC.TAG_REPOSITORY:
|
||||
|
||||
recalc_combined_mappings = True
|
||||
|
||||
if message is None:
|
||||
|
||||
job_key = HC.JobKey()
|
||||
|
||||
job_key.SetVariable( 'popup_message_text_1', 'updating services: deleting tag data' )
|
||||
|
||||
HC.pubsub.pub( 'message', job_key )
|
||||
|
||||
|
||||
if service.GetServiceType() == HC.TAG_REPOSITORY: clear_combined_autocomplete = True
|
||||
|
||||
self._c.execute( 'DELETE FROM services WHERE service_id = ?;', ( service_id, ) )
|
||||
|
||||
|
@ -4635,8 +4616,6 @@ class ServiceDB( FileDB, MessageDB, TagDB, RatingDB ):
|
|||
|
||||
|
||||
|
||||
if service.GetServiceType() == HC.TAG_REPOSITORY: recalc_combined_mappings = True
|
||||
|
||||
elif action == HC.EDIT:
|
||||
|
||||
( service_key, service_type, new_name, info_update ) = entry.GetData()
|
||||
|
@ -4690,14 +4669,7 @@ class ServiceDB( FileDB, MessageDB, TagDB, RatingDB ):
|
|||
|
||||
|
||||
|
||||
if recalc_combined_mappings:
|
||||
|
||||
job_key.SetVariable( 'popup_message_text_1', 'updating services: recalculating combined tag data' )
|
||||
|
||||
self._RecalcCombinedMappings()
|
||||
|
||||
job_key.SetVariable( 'popup_message_text_1', 'updating services: done!' )
|
||||
|
||||
if clear_combined_autocomplete: self._ClearCombinedAutocompleteTags()
|
||||
|
||||
self.pub_after_commit( 'notify_new_pending' )
|
||||
|
||||
|
@ -4792,6 +4764,8 @@ class DB( ServiceDB ):
|
|||
self._combined_file_service_id = self._GetServiceId( HC.COMBINED_FILE_SERVICE_KEY )
|
||||
self._combined_tag_service_id = self._GetServiceId( HC.COMBINED_TAG_SERVICE_KEY )
|
||||
|
||||
self._null_namespace_id = self._c.execute( 'SELECT namespace_id FROM namespaces WHERE namespace = ?;', ( '', ) )
|
||||
|
||||
options = self._GetOptions()
|
||||
|
||||
HC.options = options
|
||||
|
@ -5204,20 +5178,6 @@ class DB( ServiceDB ):
|
|||
|
||||
def _UpdateDB( self, version ):
|
||||
|
||||
if version == 90:
|
||||
|
||||
( HC.options, ) = self._c.execute( 'SELECT options FROM options;' ).fetchone()
|
||||
|
||||
shortcuts = HC.options[ 'shortcuts' ]
|
||||
|
||||
shortcuts[ wx.ACCEL_CTRL ][ ord( 'Z' ) ] = 'undo'
|
||||
shortcuts[ wx.ACCEL_CTRL ][ ord( 'Y' ) ] = 'redo'
|
||||
|
||||
HC.options[ 'shortcuts' ] = shortcuts
|
||||
|
||||
self._c.execute( 'UPDATE options SET options = ?;', ( HC.options, ) )
|
||||
|
||||
|
||||
if version == 91:
|
||||
|
||||
( HC.options, ) = self._c.execute( 'SELECT options FROM options;' ).fetchone()
|
||||
|
@ -5859,6 +5819,17 @@ class DB( ServiceDB ):
|
|||
self._c.execute( 'DELETE FROM autocomplete_tags_cache WHERE current_count < ?;', ( 5, ) )
|
||||
|
||||
|
||||
if version == 140:
|
||||
|
||||
self._combined_tag_service_id = self._GetServiceId( HC.COMBINED_TAG_SERVICE_KEY )
|
||||
|
||||
self._c.execute( 'DELETE FROM mappings WHERE service_id = ?;', ( self._combined_tag_service_id, ) )
|
||||
|
||||
#
|
||||
|
||||
self._c.execute( 'REPLACE INTO yaml_dumps VALUES ( ?, ?, ? );', ( YAML_DUMP_ID_REMOTE_BOORU, 'sankaku chan', CC.DEFAULT_BOORUS[ 'sankaku chan' ] ) )
|
||||
|
||||
|
||||
self._c.execute( 'UPDATE version SET version = ?;', ( version + 1, ) )
|
||||
|
||||
HC.is_db_updated = True
|
||||
|
@ -6015,7 +5986,6 @@ class DB( ServiceDB ):
|
|||
elif action == 'message_info_since': result = self._AddMessageInfoSince( *args, **kwargs )
|
||||
elif action == 'message_statuses': result = self._UpdateMessageStatuses( *args, **kwargs )
|
||||
elif action == 'pixiv_account': result = self._SetYAMLDump( YAML_DUMP_ID_SINGLE, 'pixiv_account', *args, **kwargs )
|
||||
elif action == 'regenerate_combined_mappings': result = self._RecalcCombinedMappings()
|
||||
elif action == 'remote_booru': result = self._SetYAMLDump( YAML_DUMP_ID_REMOTE_BOORU, *args, **kwargs )
|
||||
elif action == 'reset_service': result = self._ResetService( *args, **kwargs )
|
||||
elif action == 'save_options': result = self._SaveOptions( *args, **kwargs )
|
||||
|
@ -6178,7 +6148,7 @@ class DB( ServiceDB ):
|
|||
|
||||
if HC.shutdown: raise Exception( 'Client shutting down!' )
|
||||
elif self._jobs.empty() and not self._currently_doing_job: return
|
||||
else: time.sleep( 0.0001 )
|
||||
else: time.sleep( 0.00001 )
|
||||
|
||||
|
||||
|
||||
|
@ -6889,8 +6859,6 @@ def DAEMONSynchroniseRepositories():
|
|||
|
||||
HC.app.WriteSynchronous( 'service_updates', service_keys_to_service_updates )
|
||||
|
||||
time.sleep( 0.10 )
|
||||
|
||||
# this waits for pubsubs to flush, so service updates are processed
|
||||
HC.app.WaitUntilGoodTimeToUseGUIThread()
|
||||
|
||||
|
@ -7022,8 +6990,6 @@ def DAEMONSynchroniseRepositories():
|
|||
|
||||
HC.app.WaitUntilGoodTimeToUseGUIThread()
|
||||
|
||||
time.sleep( 0.0001 )
|
||||
|
||||
before_precise = HC.GetNowPrecise()
|
||||
|
||||
HC.app.WriteSynchronous( 'content_updates', { service_key : content_updates } )
|
||||
|
@ -7039,9 +7005,20 @@ def DAEMONSynchroniseRepositories():
|
|||
current_weight = 0
|
||||
|
||||
|
||||
if WEIGHT_THRESHOLD < 1:
|
||||
|
||||
job_key.SetVariable( 'popup_message_text_2', 'taking a break' )
|
||||
|
||||
time.sleep( 5 )
|
||||
|
||||
WEIGHT_THRESHOLD = 1
|
||||
|
||||
|
||||
|
||||
if len( content_updates ) > 0:
|
||||
|
||||
content_update_index_string = 'content part ' + HC.ConvertIntToPrettyString( num_content_updates ) + '/' + HC.ConvertIntToPrettyString( num_content_updates ) + ': '
|
||||
|
||||
job_key.SetVariable( 'popup_message_text_2', content_update_index_string + 'committing' )
|
||||
|
||||
HC.app.WriteSynchronous( 'content_updates', { service_key : content_updates } )
|
||||
|
@ -7065,11 +7042,12 @@ def DAEMONSynchroniseRepositories():
|
|||
|
||||
HC.pubsub.pub( 'notify_new_pending' )
|
||||
|
||||
time.sleep( 0.10 )
|
||||
|
||||
# this waits for pubsubs to flush, so service updates are processed
|
||||
HC.app.WaitUntilGoodTimeToUseGUIThread()
|
||||
|
||||
job_key.SetVariable( 'popup_message_gauge_2', ( 0, 1 ) )
|
||||
job_key.SetVariable( 'popup_message_text_2', '' )
|
||||
|
||||
num_updates_processed += 1
|
||||
|
||||
|
||||
|
|
|
@ -845,7 +845,6 @@ class FrameGUI( ClientGUICommon.FrameThatResizes ):
|
|||
menu.Append( CC.MENU_EVENT_ID_TO_ACTION_CACHE.GetId( 'vacuum_db' ), p( '&Vacuum' ), p( 'Rebuild the Database.' ) )
|
||||
menu.Append( CC.MENU_EVENT_ID_TO_ACTION_CACHE.GetId( 'delete_orphans' ), p( '&Delete Orphan Files' ), p( 'Go through the client\'s file store, deleting any files that are no longer needed.' ) )
|
||||
menu.Append( CC.MENU_EVENT_ID_TO_ACTION_CACHE.GetId( 'delete_service_info' ), p( '&Clear Service Info Cache' ), p( 'Delete all cache service info, in case it has become desynchronised.' ) )
|
||||
menu.Append( CC.MENU_EVENT_ID_TO_ACTION_CACHE.GetId( 'regenerate_combined_mappings' ), p( '&Regenerate Combined Mappings' ), p( 'Delete and rebuild the combined cache of all your services\' mappings.' ) )
|
||||
menu.Append( CC.MENU_EVENT_ID_TO_ACTION_CACHE.GetId( 'regenerate_thumbnails' ), p( '&Regenerate All Thumbnails' ), p( 'Delete all thumbnails and regenerate from original files.' ) )
|
||||
menu.Append( CC.MENU_EVENT_ID_TO_ACTION_CACHE.GetId( 'file_integrity' ), p( '&Check File Integrity' ), p( 'Review and fix all local file records.' ) )
|
||||
menu.AppendSeparator()
|
||||
|
@ -1453,14 +1452,6 @@ class FrameGUI( ClientGUICommon.FrameThatResizes ):
|
|||
self._statusbar.SetStatusText( self._statusbar_db_locked, number = 3 )
|
||||
|
||||
|
||||
def _RegenerateCombinedMappings( self ):
|
||||
|
||||
with ClientGUIDialogs.DialogYesNo( self, 'Are you sure you want to regenerate the combined mappings? This can take a long time.' ) as dlg:
|
||||
|
||||
if dlg.ShowModal() == wx.ID_YES: HC.app.Write( 'regenerate_combined_mappings' )
|
||||
|
||||
|
||||
|
||||
def _RegenerateThumbnails( self ):
|
||||
|
||||
text = 'This will rebuild all your thumbnails from the original files. You probably only want to do this if you experience thumbnail errors. If you have a lot of files, it will take some time. A popup message will show its progress.'
|
||||
|
@ -1909,7 +1900,6 @@ The password is cleartext here but obscured in the entry dialog. Enter a blank p
|
|||
|
||||
if page is not None: page.RefreshQuery()
|
||||
|
||||
elif command == 'regenerate_combined_mappings': self._RegenerateCombinedMappings()
|
||||
elif command == 'regenerate_thumbnails': self._RegenerateThumbnails()
|
||||
elif command == 'restore_database': HC.app.RestoreDatabase()
|
||||
elif command == 'review_services': self._ReviewServices()
|
||||
|
|
|
@ -5116,20 +5116,6 @@ class DialogManageServices( ClientGUIDialogs.Dialog ):
|
|||
|
||||
( service_key, service_type, name, info ) = service_panel.GetInfo()
|
||||
|
||||
if service_type == HC.TAG_REPOSITORY:
|
||||
|
||||
text = 'Deleting a tag service is a potentially very expensive operation.'
|
||||
text += os.linesep * 2
|
||||
text += 'If you have millions of tags, it could take twenty minutes or more, during which time your database will be locked.'
|
||||
text += os.linesep * 2
|
||||
text += 'Are you sure you want to delete ' + name + '?'
|
||||
|
||||
with ClientGUIDialogs.DialogYesNo( self, text ) as dlg:
|
||||
|
||||
if dlg.ShowModal() != wx.ID_YES: return
|
||||
|
||||
|
||||
|
||||
self._edit_log.append( HC.EditLogActionDelete( service_key ) )
|
||||
|
||||
services_listbook.DeleteCurrentPage()
|
||||
|
|
|
@ -65,7 +65,7 @@ options = {}
|
|||
# Misc
|
||||
|
||||
NETWORK_VERSION = 15
|
||||
SOFTWARE_VERSION = 140
|
||||
SOFTWARE_VERSION = 141
|
||||
|
||||
UNSCALED_THUMBNAIL_DIMENSIONS = ( 200, 200 )
|
||||
|
||||
|
@ -1989,6 +1989,8 @@ class JobKey( object ):
|
|||
if name in self._variables: del self._variables[ name ]
|
||||
|
||||
|
||||
time.sleep( 0.00001 )
|
||||
|
||||
|
||||
def Finish( self ): self._done.set()
|
||||
|
||||
|
@ -2016,7 +2018,7 @@ class JobKey( object ):
|
|||
|
||||
def IsPausable( self ): return self._pausable and not self.IsDone()
|
||||
|
||||
def IsPaused( self ): return self._paused.is_set()
|
||||
def IsPaused( self ): return self._paused.is_set() and not self.IsDone()
|
||||
|
||||
def IsWorking( self ): return self.IsBegun() and not self.IsDone()
|
||||
|
||||
|
@ -2038,6 +2040,8 @@ class JobKey( object ):
|
|||
|
||||
with self._variable_lock: self._variables[ name ] = value
|
||||
|
||||
time.sleep( 0.00001 )
|
||||
|
||||
|
||||
def WaitOnPause( self ):
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@ import HydrusThreading
|
|||
import json
|
||||
import os
|
||||
import pafy
|
||||
import re
|
||||
import threading
|
||||
import time
|
||||
import traceback
|
||||
|
@ -198,6 +199,16 @@ class DownloaderBooru( Downloader ):
|
|||
|
||||
thumbnails = soup.find_all( class_ = starts_with_classname )
|
||||
|
||||
# this is a sankaku thing
|
||||
popular_thumbnail_parent = soup.find( id = 'popular-preview' )
|
||||
|
||||
if popular_thumbnail_parent is not None:
|
||||
|
||||
popular_thumbnails = popular_thumbnail_parent.find_all( class_ = starts_with_classname )
|
||||
|
||||
thumbnails = thumbnails[ len( popular_thumbnails ) : ]
|
||||
|
||||
|
||||
if self._gallery_advance_num is None:
|
||||
|
||||
if len( thumbnails ) == 0: self._we_are_done = True
|
||||
|
@ -241,16 +252,34 @@ class DownloaderBooru( Downloader ):
|
|||
|
||||
image = soup.find( id = image_id )
|
||||
|
||||
image_url = image[ 'src' ]
|
||||
|
||||
if 'sample/sample-' in image_url:
|
||||
if image is None:
|
||||
|
||||
# danbooru resized image
|
||||
image_string = soup.find( text = re.compile( 'Save this file' ) )
|
||||
|
||||
image = soup.find( id = 'image-resize-link' )
|
||||
image = image_string.parent
|
||||
|
||||
image_url = image[ 'href' ]
|
||||
|
||||
else:
|
||||
|
||||
if image.name == 'img':
|
||||
|
||||
image_url = image[ 'src' ]
|
||||
|
||||
if 'sample/sample-' in image_url:
|
||||
|
||||
# danbooru resized image
|
||||
|
||||
image = soup.find( id = 'image-resize-link' )
|
||||
|
||||
image_url = image[ 'href' ]
|
||||
|
||||
|
||||
elif image.name == 'a':
|
||||
|
||||
image_url = image[ 'href' ]
|
||||
|
||||
|
||||
|
||||
|
||||
if image_data is not None:
|
||||
|
@ -1436,15 +1465,19 @@ class ImportController( object ):
|
|||
|
||||
while not self._controller_job_key.IsDone():
|
||||
|
||||
if self._controller_job_key.IsPaused():
|
||||
while self._controller_job_key.IsPaused():
|
||||
|
||||
time.sleep( 0.1 )
|
||||
|
||||
self._import_job_key.Pause()
|
||||
self._import_queue_position_job_key.Pause()
|
||||
self._import_queue_job_key.Pause()
|
||||
|
||||
self._controller_job_key.WaitOnPause()
|
||||
if HC.shutdown or self._controller_job_key.IsDone(): break
|
||||
|
||||
|
||||
if HC.shutdown or self._controller_job_key.IsDone(): break
|
||||
|
||||
with self._lock:
|
||||
|
||||
queue_position = self._import_queue_position_job_key.GetVariable( 'queue_position' )
|
||||
|
@ -1586,14 +1619,16 @@ class ImportQueueGeneratorGallery( ImportQueueGenerator ):
|
|||
|
||||
for downloader in downloaders:
|
||||
|
||||
if self._job_key.IsPaused():
|
||||
while self._job_key.IsPaused():
|
||||
|
||||
time.sleep( 0.1 )
|
||||
|
||||
self._job_key.SetVariable( 'status', 'paused after ' + HC.u( total_urls_found ) + ' urls' )
|
||||
|
||||
self._job_key.WaitOnPause()
|
||||
if HC.shutdown or self._job_key.IsDone(): break
|
||||
|
||||
|
||||
if self._job_key.IsCancelled(): break
|
||||
if HC.shutdown or self._job_key.IsDone(): break
|
||||
|
||||
self._job_key.SetVariable( 'status', 'found ' + HC.u( total_urls_found ) + ' urls' )
|
||||
|
||||
|
@ -1618,14 +1653,16 @@ class ImportQueueGeneratorGallery( ImportQueueGenerator ):
|
|||
|
||||
if len( downloaders ) == 0: break
|
||||
|
||||
if self._job_key.IsPaused():
|
||||
while self._job_key.IsPaused():
|
||||
|
||||
time.sleep( 0.1 )
|
||||
|
||||
self._job_key.SetVariable( 'status', 'paused after ' + HC.u( total_urls_found ) + ' urls' )
|
||||
|
||||
self._job_key.WaitOnPause()
|
||||
if HC.shutdown or self._job_key.IsDone(): break
|
||||
|
||||
|
||||
if self._job_key.IsCancelled(): break
|
||||
if HC.shutdown or self._job_key.IsDone(): break
|
||||
|
||||
|
||||
self._job_key.SetVariable( 'status', '' )
|
||||
|
@ -1705,14 +1742,16 @@ class ImportQueueGeneratorThread( ImportQueueGenerator ):
|
|||
|
||||
|
||||
|
||||
if self._job_key.IsPaused():
|
||||
while self._job_key.IsPaused():
|
||||
|
||||
time.sleep( 0.1 )
|
||||
|
||||
self._job_key.SetVariable( 'status', 'paused' )
|
||||
|
||||
self._job_key.WaitOnPause()
|
||||
if HC.shutdown or self._job_key.IsDone(): break
|
||||
|
||||
|
||||
if self._job_key.IsCancelled(): break
|
||||
if HC.shutdown or self._ob_key.IsDone(): break
|
||||
|
||||
thread_time = self._job_key.GetVariable( 'thread_time' )
|
||||
|
||||
|
|
|
@ -2215,16 +2215,6 @@ class DB( ServiceDB ):
|
|||
|
||||
def _UpdateDB( self, version ):
|
||||
|
||||
if version == 90:
|
||||
|
||||
for ( service_id, options ) in self._c.execute( 'SELECT service_id, options FROM services;' ).fetchall():
|
||||
|
||||
options[ 'upnp' ] = None
|
||||
|
||||
self._c.execute( 'UPDATE services SET options = ? WHERE service_id = ?;', ( options, service_id ) )
|
||||
|
||||
|
||||
|
||||
if version == 93:
|
||||
|
||||
self._c.execute( 'CREATE TABLE messaging_sessions ( service_id INTEGER REFERENCES services ON DELETE CASCADE, session_key BLOB_BYTES, account_id INTEGER, identifier BLOB_BYTES, name TEXT, expiry INTEGER );' )
|
||||
|
|
|
@ -142,6 +142,56 @@ class TestDownloaders( unittest.TestCase ):
|
|||
self.assertEqual( info, expected_info )
|
||||
|
||||
|
||||
def test_sankaku( self ):
|
||||
|
||||
with open( HC.STATIC_DIR + os.path.sep + 'testing' + os.path.sep + 'sankaku_gallery.html' ) as f: sankaku_gallery = f.read()
|
||||
with open( HC.STATIC_DIR + os.path.sep + 'testing' + os.path.sep + 'sankaku_page.html' ) as f: sankaku_page = f.read()
|
||||
|
||||
HC.http.SetResponse( HC.GET, 'https://chan.sankakucomplex.com/?tags=animal_ears&page=1', sankaku_gallery )
|
||||
HC.http.SetResponse( HC.GET, 'https://chan.sankakucomplex.com/post/show/4324703', sankaku_page )
|
||||
|
||||
#
|
||||
|
||||
downloader = HydrusDownloading.DownloaderBooru( CC.DEFAULT_BOORUS[ 'sankaku chan' ], [ 'animal_ears' ] )
|
||||
|
||||
#
|
||||
|
||||
gallery_urls = downloader.GetAnotherPage()
|
||||
|
||||
expected_gallery_urls = [(u'https://chan.sankakucomplex.com/post/show/4324703',), (u'https://chan.sankakucomplex.com/post/show/4324435',), (u'https://chan.sankakucomplex.com/post/show/4324426',), (u'https://chan.sankakucomplex.com/post/show/4324365',), (u'https://chan.sankakucomplex.com/post/show/4324343',), (u'https://chan.sankakucomplex.com/post/show/4324309',), (u'https://chan.sankakucomplex.com/post/show/4324134',), (u'https://chan.sankakucomplex.com/post/show/4324107',), (u'https://chan.sankakucomplex.com/post/show/4324095',), (u'https://chan.sankakucomplex.com/post/show/4324086',), (u'https://chan.sankakucomplex.com/post/show/4323969',), (u'https://chan.sankakucomplex.com/post/show/4323967',), (u'https://chan.sankakucomplex.com/post/show/4323665',), (u'https://chan.sankakucomplex.com/post/show/4323620',), (u'https://chan.sankakucomplex.com/post/show/4323586',), (u'https://chan.sankakucomplex.com/post/show/4323581',), (u'https://chan.sankakucomplex.com/post/show/4323580',), (u'https://chan.sankakucomplex.com/post/show/4323520',), (u'https://chan.sankakucomplex.com/post/show/4323512',), (u'https://chan.sankakucomplex.com/post/show/4323498',)]
|
||||
|
||||
self.assertEqual( gallery_urls, expected_gallery_urls )
|
||||
|
||||
#
|
||||
|
||||
HC.http.SetResponse( HC.GET, 'https://cs.sankakucomplex.com/data/c5/c3/c5c3c91ca68bd7662f546cc44fe0d378.jpg?4324703', 'image file' )
|
||||
|
||||
info = downloader.GetFileAndTags( 'https://chan.sankakucomplex.com/post/show/4324703' )
|
||||
|
||||
( temp_path, tags ) = info
|
||||
|
||||
with open( temp_path, 'rb' ) as f: data = f.read()
|
||||
|
||||
info = ( data, tags )
|
||||
|
||||
expected_info = ('image file', [u'character:heinrike prinzessin zu sayn-wittgenstein', u'character:rosalie de hemricourt de grunne', u'2girls', u'alternative costume', u'anal beads', u'animal ears', u'anus', u'ass', u'ass cutout', u'backless panties', u'blonde', u'blue eyes', u'blush', u'braid', u'butt plug', u'butt plug tail', u'cameltoe', u'cat tail', u'cheerleader', u'dildo', u'fake animal ears', u'fang', u'green eyes', u'hands', u'happy', u'heart cutout', u'kneepits', u'long hair', u'looking at viewer', u'multiple girls', u'nekomimi', u'open mouth', u'pantsu', u'spread anus', u'sweat', u'tail', u'tape', u'underwear', u'white panties', u'creator:null (nyanpyoun)', u'series:strike witches'])
|
||||
|
||||
self.assertEqual( info, expected_info )
|
||||
|
||||
# flash is tricky for sankaku
|
||||
|
||||
with open( HC.STATIC_DIR + os.path.sep + 'testing' + os.path.sep + 'sankaku_flash.html' ) as f: sankaku_flash = f.read()
|
||||
|
||||
HC.http.SetResponse( HC.GET, 'https://chan.sankakucomplex.com/post/show/4318061', sankaku_flash )
|
||||
HC.http.SetResponse( HC.GET, 'https://cs.sankakucomplex.com/data/48/ce/48cecd707d8a562d47db74d934505f51.swf?4318061', 'swf file' )
|
||||
|
||||
temp_path = downloader.GetFile( 'https://chan.sankakucomplex.com/post/show/4318061' )
|
||||
|
||||
with open( temp_path, 'rb' ) as f: data = f.read()
|
||||
|
||||
self.assertEqual( data, 'swf file' )
|
||||
|
||||
|
||||
def test_booru_e621( self ):
|
||||
|
||||
with open( HC.STATIC_DIR + os.path.sep + 'testing' + os.path.sep + 'e621_gallery.html' ) as f: e621_gallery = f.read()
|
||||
|
|
|
@ -577,7 +577,7 @@ class TestServer( unittest.TestCase ):
|
|||
self._test_basics( host, port )
|
||||
self._test_local_booru( host, port )
|
||||
|
||||
|
||||
'''
|
||||
class TestAMP( unittest.TestCase ):
|
||||
|
||||
@classmethod
|
||||
|
@ -777,4 +777,4 @@ class TestAMP( unittest.TestCase ):
|
|||
[ ( args, kwargs ) ] = result
|
||||
|
||||
self.assertEqual( args, ( temp_identifier, temp_name, persistent_identifier, persistent_name, message ) )
|
||||
|
||||
'''
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
Loading…
Reference in New Issue