Version 205
This commit is contained in:
parent
79ad271710
commit
ca18d65577
|
@ -0,0 +1,3 @@
|
|||
"[[], [[0, 342, [null, ""manage_tags""]], [0, 378, [null, ""next""]], [0, 312, [null, ""last""]], [0, 127, [null, ""delete""]], [2, 78, [null, ""frame_next""]], [0, 317, [null, ""next""]], [0, 343, [null, ""manage_ratings""]], [0, 379, [null, ""next""]], [4, 316, [null, ""pan_right""]], [0, 346, [null, ""archive""]], [0, 70, [null, ""fullscreen_switch""]], [0, 313, [null, ""first""]], [0, 366, [null, ""previous""]], [4, 346, [null, ""inbox""]], [0, 380, [null, ""previous""]], [4, 317, [null, ""pan_down""]], [0, 314, [null, ""previous""]], [0, 367, [null, ""next""]], [0, 376, [null, ""previous""]], [4, 314, [null, ""pan_left""]], [2, 69, [null, ""open_externally""]], [0, 381, [null, ""next""]], [0, 315, [null, ""previous""]], [0, 377, [null, ""previous""]], [4, 315, [null, ""pan_up""]], [2, 66, [null, ""frame_back""]], [0, 382, [null, ""last""]], [0, 375, [null, ""first""]], [0, 316, [null, ""next""]]]]"
|
||||
"[[], [[0, 342, [null, ""manage_tags""]], [0, 378, [null, ""next""]], [0, 312, [null, ""last""]], [0, 127, [null, ""delete""]], [2, 78, [null, ""frame_next""]], [0, 317, [null, ""next""]], [0, 343, [null, ""manage_ratings""]], [0, 379, [null, ""next""]], [4, 316, [null, ""pan_right""]], [0, 346, [null, ""archive""]], [0, 70, [null, ""fullscreen_switch""]], [0, 313, [null, ""first""]], [0, 366, [null, ""previous""]], [4, 346, [null, ""inbox""]], [0, 380, [null, ""previous""]], [4, 317, [null, ""pan_down""]], [0, 314, [null, ""previous""]], [0, 367, [null, ""next""]], [0, 376, [null, ""previous""]], [4, 314, [null, ""pan_left""]], [2, 69, [null, ""open_externally""]], [0, 381, [null, ""next""]], [0, 315, [null, ""previous""]], [0, 377, [null, ""previous""]], [4, 315, [null, ""pan_up""]], [2, 66, [null, ""frame_back""]], [0, 382, [null, ""last""]], [0, 375, [null, ""first""]], [0, 316, [null, ""next""]]]]"
|
||||
"[[], [[0, 342, [null, ""manage_tags""]], [0, 378, [null, ""next""]], [0, 312, [null, ""last""]], [0, 127, [null, ""delete""]], [2, 78, [null, ""frame_next""]], [0, 317, [null, ""next""]], [0, 343, [null, ""manage_ratings""]], [0, 379, [null, ""next""]], [4, 316, [null, ""pan_right""]], [0, 346, [null, ""archive""]], [0, 70, [null, ""fullscreen_switch""]], [0, 313, [null, ""first""]], [0, 366, [null, ""previous""]], [4, 346, [null, ""inbox""]], [0, 380, [null, ""previous""]], [4, 317, [null, ""pan_down""]], [0, 314, [null, ""previous""]], [0, 367, [null, ""next""]], [0, 376, [null, ""previous""]], [4, 314, [null, ""pan_left""]], [2, 69, [null, ""open_externally""]], [0, 381, [null, ""next""]], [0, 315, [null, ""previous""]], [0, 377, [null, ""previous""]], [4, 315, [null, ""pan_up""]], [2, 66, [null, ""frame_back""]], [0, 382, [null, ""last""]], [0, 375, [null, ""first""]], [0, 316, [null, ""next""]]]]"
|
|
@ -8,6 +8,25 @@
|
|||
<div class="content">
|
||||
<h3>changelog</h3>
|
||||
<ul>
|
||||
<li><h3>version 205</h3></li>
|
||||
<ul>
|
||||
<li>fixed v201->v202 update code, which v204 retroactively broke</li>
|
||||
<li>wrote a new maintenance routine to regenerate the ac cache if it ends up miscounting or not generating correctly</li>
|
||||
<li>fixed specific ac_cache generation code, which was double-counting because of recent file-add optimisation</li>
|
||||
<li>client db maintenance now vacuums attached databases according to their own vacuum clocks</li>
|
||||
<li>client will more aggressively vacuum and analyze if the client has been idle for an hour</li>
|
||||
<li>client vacuum now prints success statements, with duration, to the log</li>
|
||||
<li>analyze now produces an auto-dismissing vacuum-like popup message</li>
|
||||
<li>the 'is maintenance due' check on shutdown now accounts for the new vacuum logic and includes analyze needs as well</li>
|
||||
<li>shutdown maintenance will use more of its allotted time if needed</li>
|
||||
<li>tag archive sync now always filters to local files on initial sync</li>
|
||||
<li>if you import a tag archive with sha256 hash, you are now given the option to filter to local files or import everything</li>
|
||||
<li>the occasionally crashtastic opencv.imread call is now completely replaced with PIL, let's see if it's too slow</li>
|
||||
<li>the new-but-now-redundant 'disable opencv for static images' option is removed as a result</li>
|
||||
<li>fixed a redirection location parsing bug that was affecting sankaku subs with >1000 initial files</li>
|
||||
<li>fixed some bad analyze timestamp tracking code</li>
|
||||
<li>fixed advanced content update code, which still had some mapping service_ids floating around</li>
|
||||
</ul>
|
||||
<li><h3>version 204</h3></li>
|
||||
<ul>
|
||||
<li>current, deleted, pending, and petitioned mappings are now stored on service-separated dynamic tables</li>
|
||||
|
|
|
@ -52,6 +52,7 @@ class Controller( HydrusController.HydrusController ):
|
|||
self._last_mouse_position = None
|
||||
self._menu_open = False
|
||||
self._previously_idle = False
|
||||
self._idle_started = None
|
||||
|
||||
|
||||
def _InitDB( self ):
|
||||
|
@ -302,6 +303,8 @@ class Controller( HydrusController.HydrusController ):
|
|||
|
||||
if HydrusGlobals.force_idle_mode:
|
||||
|
||||
self._idle_started = 0
|
||||
|
||||
return True
|
||||
|
||||
|
||||
|
@ -339,18 +342,35 @@ class Controller( HydrusController.HydrusController ):
|
|||
currently_idle = False
|
||||
|
||||
|
||||
turning_idle = not self._previously_idle and currently_idle
|
||||
turning_idle = currently_idle and not self._previously_idle
|
||||
|
||||
self._previously_idle = currently_idle
|
||||
|
||||
if turning_idle:
|
||||
|
||||
self._idle_started = HydrusData.GetNow()
|
||||
|
||||
self.pub( 'wake_daemons' )
|
||||
|
||||
|
||||
if not currently_idle:
|
||||
|
||||
self._idle_started = None
|
||||
|
||||
|
||||
return currently_idle
|
||||
|
||||
|
||||
def CurrentlyVeryIdle( self ):
|
||||
|
||||
if self._idle_started is not None and HydrusData.TimeHasPassed( self._idle_started + 3600 ):
|
||||
|
||||
return True
|
||||
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def DoHTTP( self, *args, **kwargs ): return self._http.Request( *args, **kwargs )
|
||||
|
||||
def DoIdleShutdownWork( self ):
|
||||
|
@ -359,7 +379,7 @@ class Controller( HydrusController.HydrusController ):
|
|||
|
||||
self._client_files_manager.Rebalance( partial = False, stop_time = stop_time )
|
||||
|
||||
self.MaintainDB()
|
||||
self.MaintainDB( stop_time = stop_time )
|
||||
|
||||
if not self._options[ 'pause_repo_sync' ]:
|
||||
|
||||
|
@ -586,48 +606,38 @@ class Controller( HydrusController.HydrusController ):
|
|||
if HydrusGlobals.is_db_updated: wx.CallLater( 1, HydrusData.ShowText, 'The client has updated to version ' + str( HC.SOFTWARE_VERSION ) + '!' )
|
||||
|
||||
|
||||
def MaintainDB( self ):
|
||||
def MaintainDB( self, stop_time = None ):
|
||||
|
||||
now = HydrusData.GetNow()
|
||||
|
||||
shutdown_timestamps = self.Read( 'shutdown_timestamps' )
|
||||
|
||||
maintenance_vacuum_period = self._options[ 'maintenance_vacuum_period' ]
|
||||
|
||||
if maintenance_vacuum_period is not None and maintenance_vacuum_period > 0:
|
||||
if stop_time is None:
|
||||
|
||||
if HydrusData.TimeHasPassed( shutdown_timestamps[ CC.SHUTDOWN_TIMESTAMP_VACUUM ] + maintenance_vacuum_period ):
|
||||
if not self.CurrentlyVeryIdle():
|
||||
|
||||
self.WriteInterruptable( 'vacuum' )
|
||||
stop_time = HydrusData.GetNow() + 30
|
||||
|
||||
|
||||
|
||||
stop_time = HydrusData.GetNow() + 120
|
||||
self.WriteInterruptable( 'vacuum', stop_time = stop_time )
|
||||
|
||||
self.pub( 'splash_set_status_text', 'analyzing' )
|
||||
|
||||
only_when_idle = self.CurrentlyIdle()
|
||||
self.WriteInterruptable( 'analyze', stop_time = stop_time, only_when_idle = True )
|
||||
|
||||
self.WriteInterruptable( 'analyze', stop_time = stop_time, only_when_idle = only_when_idle )
|
||||
|
||||
if self._timestamps[ 'last_service_info_cache_fatten' ] == 0:
|
||||
if stop_time is None or not HydrusData.TimeHasPassed( stop_time ):
|
||||
|
||||
self._timestamps[ 'last_service_info_cache_fatten' ] = HydrusData.GetNow()
|
||||
|
||||
|
||||
if HydrusData.TimeHasPassed( self._timestamps[ 'last_service_info_cache_fatten' ] + ( 60 * 20 ) ):
|
||||
|
||||
self.pub( 'splash_set_status_text', 'fattening service info' )
|
||||
|
||||
services = self.GetServicesManager().GetServices()
|
||||
|
||||
for service in services:
|
||||
if HydrusData.TimeHasPassed( self._timestamps[ 'last_service_info_cache_fatten' ] + ( 60 * 20 ) ):
|
||||
|
||||
try: self.Read( 'service_info', service.GetServiceKey() )
|
||||
except: pass # sometimes this breaks when a service has just been removed and the client is closing, so ignore the error
|
||||
self.pub( 'splash_set_status_text', 'fattening service info' )
|
||||
|
||||
services = self.GetServicesManager().GetServices()
|
||||
|
||||
for service in services:
|
||||
|
||||
try: self.Read( 'service_info', service.GetServiceKey() )
|
||||
except: pass # sometimes this breaks when a service has just been removed and the client is closing, so ignore the error
|
||||
|
||||
|
||||
self._timestamps[ 'last_service_info_cache_fatten' ] = HydrusData.GetNow()
|
||||
|
||||
|
||||
self._timestamps[ 'last_service_info_cache_fatten' ] = HydrusData.GetNow()
|
||||
|
||||
|
||||
|
||||
|
@ -975,18 +985,11 @@ class Controller( HydrusController.HydrusController ):
|
|||
|
||||
def ThereIsIdleShutdownWorkDue( self ):
|
||||
|
||||
now = HydrusData.GetNow()
|
||||
maintenance_due = self.Read( 'maintenance_due' )
|
||||
|
||||
shutdown_timestamps = self.Read( 'shutdown_timestamps' )
|
||||
|
||||
maintenance_vacuum_period = self._options[ 'maintenance_vacuum_period' ]
|
||||
|
||||
if maintenance_vacuum_period is not None and maintenance_vacuum_period > 0:
|
||||
if maintenance_due:
|
||||
|
||||
if HydrusData.TimeHasPassed( shutdown_timestamps[ CC.SHUTDOWN_TIMESTAMP_VACUUM ] + maintenance_vacuum_period ):
|
||||
|
||||
return True
|
||||
|
||||
return True
|
||||
|
||||
|
||||
if not self._options[ 'pause_repo_sync' ]:
|
||||
|
@ -1086,12 +1089,13 @@ class Controller( HydrusController.HydrusController ):
|
|||
except HydrusExceptions.ShutdownException: pass
|
||||
except:
|
||||
|
||||
traceback.print_exc()
|
||||
|
||||
text = 'A serious error occured while trying to exit the program. Its traceback may be shown next. It should have also been written to client.log. You may need to quit the program from task manager.'
|
||||
|
||||
HydrusData.DebugPrint( text )
|
||||
|
||||
traceback.print_exc()
|
||||
|
||||
wx.CallAfter( wx.MessageBox, traceback.format_exc() )
|
||||
wx.CallAfter( wx.MessageBox, text )
|
||||
|
||||
finally:
|
||||
|
|
|
@ -1310,33 +1310,53 @@ class DB( HydrusDB.HydrusDB ):
|
|||
names_to_analyze = [ name for name in all_names if name not in existing_names_to_timestamps or HydrusData.TimeHasPassed( existing_names_to_timestamps[ name ] + stale_time_delta ) ]
|
||||
|
||||
|
||||
random.shuffle( names_to_analyze )
|
||||
|
||||
for name in names_to_analyze:
|
||||
if len( names_to_analyze ) > 0:
|
||||
|
||||
started = HydrusData.GetNowPrecise()
|
||||
job_key = ClientThreading.JobKey()
|
||||
|
||||
self._c.execute( 'ANALYZE ' + name + ';' )
|
||||
job_key.SetVariable( 'popup_title', 'database maintenance - analyzing' )
|
||||
|
||||
self._c.execute( 'REPLACE INTO analyze_timestamps ( name, timestamp ) VALUES ( ?, ? );', ( name, HydrusData.GetNow() ) )
|
||||
self._controller.pub( 'message', job_key )
|
||||
|
||||
time_took = HydrusData.GetNowPrecise() - started
|
||||
random.shuffle( names_to_analyze )
|
||||
|
||||
if time_took > 1:
|
||||
for name in names_to_analyze:
|
||||
|
||||
HydrusData.Print( 'Analyzed ' + name + ' in ' + HydrusData.ConvertTimeDeltaToPrettyString( time_took ) )
|
||||
self._controller.pub( 'splash_set_status_text', 'analyzing ' + name )
|
||||
job_key.SetVariable( 'popup_text_1', 'analyzing ' + name )
|
||||
|
||||
started = HydrusData.GetNowPrecise()
|
||||
|
||||
self._c.execute( 'ANALYZE ' + name + ';' )
|
||||
|
||||
self._c.execute( 'DELETE FROM analyze_timestamps WHERE name = ?;', ( name, ) )
|
||||
|
||||
self._c.execute( 'INSERT OR IGNORE INTO analyze_timestamps ( name, timestamp ) VALUES ( ?, ? );', ( name, HydrusData.GetNow() ) )
|
||||
|
||||
time_took = HydrusData.GetNowPrecise() - started
|
||||
|
||||
if time_took > 1:
|
||||
|
||||
HydrusData.Print( 'Analyzed ' + name + ' in ' + HydrusData.ConvertTimeDeltaToPrettyString( time_took ) )
|
||||
|
||||
|
||||
p1 = stop_time is not None and HydrusData.TimeHasPassed( stop_time )
|
||||
p2 = only_when_idle and not self._controller.CurrentlyIdle()
|
||||
|
||||
if p1 or p2:
|
||||
|
||||
break
|
||||
|
||||
|
||||
|
||||
p1 = stop_time is not None and HydrusData.TimeHasPassed( stop_time )
|
||||
p2 = only_when_idle and not self._controller.CurrentlyIdle()
|
||||
self._c.execute( 'ANALYZE sqlite_master;' ) # this reloads the current stats into the query planner
|
||||
|
||||
if p1 or p2:
|
||||
|
||||
break
|
||||
|
||||
job_key.SetVariable( 'popup_text_1', 'done!' )
|
||||
|
||||
HydrusData.Print( job_key.ToString() )
|
||||
|
||||
wx.CallLater( 1000 * 30, job_key.Delete )
|
||||
|
||||
|
||||
self._c.execute( 'ANALYZE sqlite_master;' ) # this reloads the current stats into the query planner
|
||||
|
||||
|
||||
def _ArchiveFiles( self, hash_ids ):
|
||||
|
@ -1733,29 +1753,8 @@ class DB( HydrusDB.HydrusDB ):
|
|||
|
||||
if len( hash_ids ) > 0:
|
||||
|
||||
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = GenerateMappingsTableNames( tag_service_id )
|
||||
|
||||
self._CacheSpecificMappingsAddFiles( file_service_id, tag_service_id, hash_ids )
|
||||
|
||||
all_known_ids = self._c.execute( 'SELECT namespace_id, tag_id FROM existing_tags;' ).fetchall()
|
||||
|
||||
for group_of_ids in HydrusData.SplitListIntoChunks( all_known_ids, 10000 ):
|
||||
|
||||
current_mappings_ids = [ ( namespace_id, tag_id, [ hash_id for ( hash_id, ) in self._c.execute( 'SELECT hash_id FROM ' + current_mappings_table_name + ', current_files USING ( hash_id ) WHERE current_files.service_id = ? AND namespace_id = ? AND tag_id = ?;', ( file_service_id, namespace_id, tag_id ) ) ] ) for ( namespace_id, tag_id ) in group_of_ids ]
|
||||
|
||||
if len( current_mappings_ids ) > 0:
|
||||
|
||||
self._CacheSpecificMappingsAddMappings( file_service_id, tag_service_id, current_mappings_ids )
|
||||
|
||||
|
||||
pending_mappings_ids = [ ( namespace_id, tag_id, [ hash_id for ( hash_id, ) in self._c.execute( 'SELECT hash_id FROM ' + pending_mappings_table_name + ', current_files USING ( hash_id ) WHERE current_files.service_id = ? AND namespace_id = ? AND tag_id = ?;', ( file_service_id, namespace_id, tag_id ) ) ] ) for ( namespace_id, tag_id ) in group_of_ids ]
|
||||
|
||||
if len( pending_mappings_ids ) > 0:
|
||||
|
||||
self._CacheSpecificMappingsPendMappings( file_service_id, tag_service_id, pending_mappings_ids )
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def _CacheSpecificMappingsGetAutocompleteCounts( self, file_service_id, tag_service_id, namespace_ids_to_tag_ids ):
|
||||
|
@ -2386,8 +2385,6 @@ class DB( HydrusDB.HydrusDB ):
|
|||
|
||||
self._c.execute( 'CREATE TABLE service_info ( service_id INTEGER REFERENCES services ON DELETE CASCADE, info_type INTEGER, info INTEGER, PRIMARY KEY ( service_id, info_type ) );' )
|
||||
|
||||
self._c.execute( 'CREATE TABLE shutdown_timestamps ( shutdown_type INTEGER PRIMARY KEY, timestamp INTEGER );' )
|
||||
|
||||
self._c.execute( 'CREATE TABLE statuses ( status_id INTEGER PRIMARY KEY, status TEXT );' )
|
||||
self._c.execute( 'CREATE UNIQUE INDEX statuses_status_index ON statuses ( status );' )
|
||||
|
||||
|
@ -2408,6 +2405,8 @@ class DB( HydrusDB.HydrusDB ):
|
|||
self._c.execute( 'CREATE TABLE urls ( url TEXT PRIMARY KEY, hash_id INTEGER );' )
|
||||
self._c.execute( 'CREATE INDEX urls_hash_id ON urls ( hash_id );' )
|
||||
|
||||
self._c.execute( 'CREATE TABLE vacuum_timestamps ( name TEXT, timestamp INTEGER );' )
|
||||
|
||||
self._c.execute( 'CREATE TABLE version ( version INTEGER );' )
|
||||
|
||||
self._c.execute( 'CREATE TABLE web_sessions ( name TEXT PRIMARY KEY, cookies TEXT_YAML, expiry INTEGER );' )
|
||||
|
@ -2865,6 +2864,37 @@ class DB( HydrusDB.HydrusDB ):
|
|||
job_key.Finish()
|
||||
|
||||
|
||||
def _FilterHashes( self, hashes, file_service_key ):
|
||||
|
||||
if file_service_key == CC.COMBINED_FILE_SERVICE_KEY:
|
||||
|
||||
return hashes
|
||||
|
||||
|
||||
service_id = self._GetServiceId( file_service_key )
|
||||
|
||||
hashes_result = []
|
||||
|
||||
for hash in hashes:
|
||||
|
||||
if not self._HashExists( hash ):
|
||||
|
||||
continue
|
||||
|
||||
|
||||
hash_id = self._GetHashId( hash )
|
||||
|
||||
result = self._c.execute( 'SELECT 1 FROM current_files WHERE service_id = ?;', ( service_id, ) ).fetchone()
|
||||
|
||||
if result is not None:
|
||||
|
||||
hashes_result.append( hash )
|
||||
|
||||
|
||||
|
||||
return hashes_result
|
||||
|
||||
|
||||
def _GetAutocompleteCounts( self, tag_service_id, file_service_id, namespace_id_tag_ids, there_was_a_namespace, add_namespaceless ):
|
||||
|
||||
namespace_ids_to_tag_ids = HydrusData.BuildKeyToListDict( namespace_id_tag_ids )
|
||||
|
@ -4894,15 +4924,6 @@ class DB( HydrusDB.HydrusDB ):
|
|||
return results
|
||||
|
||||
|
||||
def _GetShutdownTimestamps( self ):
|
||||
|
||||
shutdown_timestamps = collections.defaultdict( lambda: 0 )
|
||||
|
||||
shutdown_timestamps.update( self._c.execute( 'SELECT shutdown_type, timestamp FROM shutdown_timestamps;' ).fetchall() )
|
||||
|
||||
return shutdown_timestamps
|
||||
|
||||
|
||||
def _GetSiteId( self, name ):
|
||||
|
||||
result = self._c.execute( 'SELECT site_id FROM imageboard_sites WHERE name = ?;', ( name, ) ).fetchone()
|
||||
|
@ -5287,9 +5308,10 @@ class DB( HydrusDB.HydrusDB ):
|
|||
if archive_name in self._tag_archives:
|
||||
|
||||
( hta_path, hta ) = self._tag_archives[ archive_name ]
|
||||
|
||||
adding = True
|
||||
|
||||
try: self._SyncHashesToTagArchive( [ hash ], hta_path, adding, namespaces, service_key )
|
||||
try: self._SyncHashesToTagArchive( [ hash ], hta_path, service_key, adding, namespaces )
|
||||
except: pass
|
||||
|
||||
|
||||
|
@ -5363,9 +5385,54 @@ class DB( HydrusDB.HydrusDB ):
|
|||
|
||||
def _InitExternalDatabases( self ):
|
||||
|
||||
self._db_filenames[ 'caches' ] = 'client.caches.db'
|
||||
self._db_filenames[ 'mappings' ] = 'client.mappings.db'
|
||||
self._db_filenames[ 'master' ] = 'client.master.db'
|
||||
self._db_filenames[ 'external_caches' ] = 'client.caches.db'
|
||||
self._db_filenames[ 'external_mappings' ] = 'client.mappings.db'
|
||||
self._db_filenames[ 'external_master' ] = 'client.master.db'
|
||||
|
||||
|
||||
def _MaintenanceDue( self ):
|
||||
|
||||
# vacuum
|
||||
|
||||
stale_time_delta = HC.options[ 'maintenance_vacuum_period' ]
|
||||
|
||||
if stale_time_delta is not None:
|
||||
|
||||
existing_names_to_timestamps = dict( self._c.execute( 'SELECT name, timestamp FROM vacuum_timestamps;' ).fetchall() )
|
||||
|
||||
db_names = [ name for ( index, name, path ) in self._c.execute( 'PRAGMA database_list;' ) if name not in ( 'mem', 'temp' ) ]
|
||||
|
||||
due_names = [ name for name in db_names if name not in existing_names_to_timestamps or HydrusData.TimeHasPassed( existing_names_to_timestamps[ name ] + stale_time_delta ) ]
|
||||
|
||||
if len( due_names ) > 0:
|
||||
|
||||
return True
|
||||
|
||||
|
||||
|
||||
# analyze
|
||||
|
||||
stale_time_delta = 14 * 86400
|
||||
|
||||
existing_names_to_timestamps = dict( self._c.execute( 'SELECT name, timestamp FROM analyze_timestamps;' ).fetchall() )
|
||||
|
||||
db_names = [ name for ( index, name, path ) in self._c.execute( 'PRAGMA database_list;' ) if name not in ( 'mem', 'temp' ) ]
|
||||
|
||||
all_names = set()
|
||||
|
||||
for db_name in db_names:
|
||||
|
||||
all_names.update( ( name for ( name, ) in self._c.execute( 'SELECT name FROM ' + db_name + '.sqlite_master;' ) ) )
|
||||
|
||||
|
||||
names_to_analyze = [ name for name in all_names if name not in existing_names_to_timestamps or HydrusData.TimeHasPassed( existing_names_to_timestamps[ name ] + stale_time_delta ) ]
|
||||
|
||||
if len( names_to_analyze ) > 0:
|
||||
|
||||
return True
|
||||
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def _ManageDBError( self, job, e ):
|
||||
|
@ -5637,16 +5704,12 @@ class DB( HydrusDB.HydrusDB ):
|
|||
|
||||
self._c.execute( 'CREATE TEMPORARY TABLE temp_operation ( job_id INTEGER PRIMARY KEY AUTOINCREMENT, namespace_id INTEGER, tag_id INTEGER, hash_id INTEGER );' )
|
||||
|
||||
predicates = [ 'service_id = ' + str( service_id ) ]
|
||||
|
||||
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = GenerateMappingsTableNames( service_id )
|
||||
|
||||
if sub_action == 'copy':
|
||||
|
||||
( tag, hashes, service_key_target ) = sub_row
|
||||
|
||||
service_id_target = self._GetServiceId( service_key_target )
|
||||
|
||||
source_table_name = current_mappings_table_name
|
||||
|
||||
elif sub_action == 'delete':
|
||||
|
@ -5662,6 +5725,8 @@ class DB( HydrusDB.HydrusDB ):
|
|||
source_table_name = deleted_mappings_table_name
|
||||
|
||||
|
||||
predicates = []
|
||||
|
||||
if tag is not None:
|
||||
|
||||
( tag_type, tag ) = tag
|
||||
|
@ -5689,7 +5754,14 @@ class DB( HydrusDB.HydrusDB ):
|
|||
predicates.append( 'hash_id IN ' + HydrusData.SplayListForDB( hash_ids ) )
|
||||
|
||||
|
||||
self._c.execute( 'INSERT INTO temp_operation ( namespace_id, tag_id, hash_id ) SELECT namespace_id, tag_id, hash_id FROM ' + source_table_name + ' WHERE ' + ' AND '.join( predicates ) + ';' )
|
||||
if len( predicates ) == 0:
|
||||
|
||||
self._c.execute( 'INSERT INTO temp_operation ( namespace_id, tag_id, hash_id ) SELECT namespace_id, tag_id, hash_id FROM ' + source_table_name + ';' )
|
||||
|
||||
else:
|
||||
|
||||
self._c.execute( 'INSERT INTO temp_operation ( namespace_id, tag_id, hash_id ) SELECT namespace_id, tag_id, hash_id FROM ' + source_table_name + ' WHERE ' + ' AND '.join( predicates ) + ';' )
|
||||
|
||||
|
||||
num_to_do = self._GetRowCount()
|
||||
|
||||
|
@ -5707,6 +5779,8 @@ class DB( HydrusDB.HydrusDB ):
|
|||
|
||||
if sub_action == 'copy':
|
||||
|
||||
service_id_target = self._GetServiceId( service_key_target )
|
||||
|
||||
service_target = self._GetService( service_id_target )
|
||||
|
||||
if service_target.GetServiceType() == HC.LOCAL_TAG: kwarg = 'mappings_ids'
|
||||
|
@ -6132,6 +6206,7 @@ class DB( HydrusDB.HydrusDB ):
|
|||
elif action == 'file_hashes': result = self._GetFileHashes( *args, **kwargs )
|
||||
elif action == 'file_query_ids': result = self._GetHashIdsFromQuery( *args, **kwargs )
|
||||
elif action == 'file_system_predicates': result = self._GetFileSystemPredicates( *args, **kwargs )
|
||||
elif action == 'filter_hashes': result = self._FilterHashes( *args, **kwargs )
|
||||
elif action == 'hydrus_sessions': result = self._GetHydrusSessions( *args, **kwargs )
|
||||
elif action == 'imageboards': result = self._GetYAMLDump( YAML_DUMP_ID_IMAGEBOARD, *args, **kwargs )
|
||||
elif action == 'known_urls': result = self._GetKnownURLs( *args, **kwargs )
|
||||
|
@ -6143,6 +6218,7 @@ class DB( HydrusDB.HydrusDB ):
|
|||
elif action == 'local_booru_share_keys': result = self._GetYAMLDumpNames( YAML_DUMP_ID_LOCAL_BOORU )
|
||||
elif action == 'local_booru_share': result = self._GetYAMLDump( YAML_DUMP_ID_LOCAL_BOORU, *args, **kwargs )
|
||||
elif action == 'local_booru_shares': result = self._GetYAMLDump( YAML_DUMP_ID_LOCAL_BOORU )
|
||||
elif action == 'maintenance_due': result = self._MaintenanceDue( *args, **kwargs )
|
||||
elif action == 'md5_status': result = self._GetMD5Status( *args, **kwargs )
|
||||
elif action == 'media_results': result = self._GetMediaResultsFromHashes( *args, **kwargs )
|
||||
elif action == 'media_results_from_ids': result = self._GetMediaResults( *args, **kwargs )
|
||||
|
@ -6155,7 +6231,6 @@ class DB( HydrusDB.HydrusDB ):
|
|||
elif action == 'remote_boorus': result = self._GetYAMLDump( YAML_DUMP_ID_REMOTE_BOORU )
|
||||
elif action == 'service_info': result = self._GetServiceInfo( *args, **kwargs )
|
||||
elif action == 'services': result = self._GetServices( *args, **kwargs )
|
||||
elif action == 'shutdown_timestamps': result = self._GetShutdownTimestamps( *args, **kwargs )
|
||||
elif action == 'tag_censorship': result = self._GetTagCensorship( *args, **kwargs )
|
||||
elif action == 'tag_parents': result = self._GetTagParents( *args, **kwargs )
|
||||
elif action == 'tag_siblings': result = self._GetTagSiblings( *args, **kwargs )
|
||||
|
@ -6192,6 +6267,52 @@ class DB( HydrusDB.HydrusDB ):
|
|||
|
||||
|
||||
|
||||
def _RegenerateACCache( self ):
|
||||
|
||||
job_key = ClientThreading.JobKey()
|
||||
|
||||
job_key.SetVariable( 'popup_title', 'regenerating autocomplete cache' )
|
||||
|
||||
self._controller.pub( 'message', job_key )
|
||||
|
||||
tag_service_ids = self._GetServiceIds( HC.TAG_SERVICES )
|
||||
file_service_ids = self._GetServiceIds( ( HC.LOCAL_FILE, HC.FILE_REPOSITORY ) )
|
||||
|
||||
for ( file_service_id, tag_service_id ) in itertools.product( file_service_ids, tag_service_ids ):
|
||||
|
||||
job_key.SetVariable( 'popup_text_1', 'generating specific ac_cache ' + str( file_service_id ) + '_' + str( tag_service_id ) )
|
||||
|
||||
try:
|
||||
|
||||
self._CacheSpecificMappingsDrop( file_service_id, tag_service_id )
|
||||
|
||||
except:
|
||||
|
||||
pass
|
||||
|
||||
|
||||
self._CacheSpecificMappingsGenerate( file_service_id, tag_service_id )
|
||||
|
||||
|
||||
for tag_service_id in tag_service_ids:
|
||||
|
||||
job_key.SetVariable( 'popup_text_1', 'generating combined files ac_cache ' + str( tag_service_id ) )
|
||||
|
||||
try:
|
||||
|
||||
self._CacheCombinedFilesMappingsDrop( tag_service_id )
|
||||
|
||||
except:
|
||||
|
||||
pass
|
||||
|
||||
|
||||
self._CacheCombinedFilesMappingsGenerate( tag_service_id )
|
||||
|
||||
|
||||
job_key.SetVariable( 'popup_text_1', 'done!' )
|
||||
|
||||
|
||||
def _ResetService( self, service_key, delete_updates = False ):
|
||||
|
||||
self._c.execute( 'COMMIT;' )
|
||||
|
@ -6391,7 +6512,7 @@ class DB( HydrusDB.HydrusDB ):
|
|||
|
||||
|
||||
|
||||
def _SyncHashesToTagArchive( self, hashes, hta_path, adding, namespaces, service_key ):
|
||||
def _SyncHashesToTagArchive( self, hashes, hta_path, tag_service_key, adding, namespaces ):
|
||||
|
||||
hta = None
|
||||
|
||||
|
@ -6433,7 +6554,7 @@ class DB( HydrusDB.HydrusDB ):
|
|||
|
||||
if len( desired_tags ) > 0:
|
||||
|
||||
if service_key != CC.LOCAL_TAG_SERVICE_KEY and not adding:
|
||||
if tag_service_key != CC.LOCAL_TAG_SERVICE_KEY and not adding:
|
||||
|
||||
action = HC.CONTENT_UPDATE_PETITION
|
||||
|
||||
|
@ -6443,7 +6564,7 @@ class DB( HydrusDB.HydrusDB ):
|
|||
|
||||
if adding:
|
||||
|
||||
if service_key == CC.LOCAL_TAG_SERVICE_KEY: action = HC.CONTENT_UPDATE_ADD
|
||||
if tag_service_key == CC.LOCAL_TAG_SERVICE_KEY: action = HC.CONTENT_UPDATE_ADD
|
||||
else: action = HC.CONTENT_UPDATE_PEND
|
||||
|
||||
else: action = HC.CONTENT_UPDATE_DELETE
|
||||
|
@ -6457,7 +6578,7 @@ class DB( HydrusDB.HydrusDB ):
|
|||
|
||||
if len( content_updates ) > 0:
|
||||
|
||||
service_keys_to_content_updates = { service_key : content_updates }
|
||||
service_keys_to_content_updates = { tag_service_key : content_updates }
|
||||
|
||||
self._ProcessContentUpdates( service_keys_to_content_updates )
|
||||
|
||||
|
@ -7638,14 +7759,167 @@ class DB( HydrusDB.HydrusDB ):
|
|||
|
||||
self._controller.pub( 'splash_set_status_text', 'generating specific ac_cache ' + str( file_service_id ) + '_' + str( tag_service_id ) )
|
||||
|
||||
self._CacheSpecificMappingsGenerate( file_service_id, tag_service_id )
|
||||
# this is a direct copy of the old code, as v204 ditched current_mappings, breaking the call
|
||||
# I've flattened all the other subcalls as well, just in case they soon change
|
||||
|
||||
suffix = str( file_service_id ) + '_' + str( tag_service_id )
|
||||
|
||||
files_table_name = 'external_caches.specific_files_cache_' + suffix
|
||||
|
||||
current_mappings_table_name = 'external_caches.specific_current_mappings_cache_' + suffix
|
||||
|
||||
pending_mappings_table_name = 'external_caches.specific_pending_mappings_cache_' + suffix
|
||||
|
||||
ac_cache_table_name = 'external_caches.specific_ac_cache_' + suffix
|
||||
|
||||
self._c.execute( 'CREATE TABLE ' + files_table_name + ' ( hash_id INTEGER PRIMARY KEY );' )
|
||||
|
||||
self._c.execute( 'CREATE TABLE ' + current_mappings_table_name + ' ( hash_id INTEGER, namespace_id INTEGER, tag_id INTEGER, PRIMARY KEY( hash_id, namespace_id, tag_id ) ) WITHOUT ROWID;' )
|
||||
|
||||
self._c.execute( 'CREATE TABLE ' + pending_mappings_table_name + ' ( hash_id INTEGER, namespace_id INTEGER, tag_id INTEGER, PRIMARY KEY( hash_id, namespace_id, tag_id ) ) WITHOUT ROWID;' )
|
||||
|
||||
self._c.execute( 'CREATE TABLE ' + ac_cache_table_name + ' ( namespace_id INTEGER, tag_id INTEGER, current_count INTEGER, pending_count INTEGER, PRIMARY KEY( namespace_id, tag_id ) ) WITHOUT ROWID;' )
|
||||
|
||||
#
|
||||
|
||||
hash_ids = [ hash_id for ( hash_id, ) in self._c.execute( 'SELECT hash_id FROM current_files WHERE service_id = ?;', ( file_service_id, ) ) ]
|
||||
|
||||
if len( hash_ids ) > 0:
|
||||
|
||||
self._c.executemany( 'INSERT OR IGNORE INTO ' + files_table_name + ' VALUES ( ? );', ( ( hash_id, ) for hash_id in hash_ids ) )
|
||||
|
||||
ac_cache_changes = []
|
||||
|
||||
for group_of_hash_ids in HydrusData.SplitListIntoChunks( hash_ids, 100 ):
|
||||
|
||||
splayed_group_of_hash_ids = HydrusData.SplayListForDB( group_of_hash_ids )
|
||||
|
||||
current_mapping_ids_raw = self._c.execute( 'SELECT namespace_id, tag_id, hash_id FROM current_mappings WHERE service_id = ? AND hash_id IN ' + splayed_group_of_hash_ids + ';', ( tag_service_id, ) ).fetchall()
|
||||
|
||||
current_mapping_ids_dict = HydrusData.BuildKeyToSetDict( [ ( ( namespace_id, tag_id ), hash_id ) for ( namespace_id, tag_id, hash_id ) in current_mapping_ids_raw ] )
|
||||
|
||||
pending_mapping_ids_raw = self._c.execute( 'SELECT namespace_id, tag_id, hash_id FROM pending_mappings WHERE service_id = ? AND hash_id IN ' + splayed_group_of_hash_ids + ';', ( tag_service_id, ) ).fetchall()
|
||||
|
||||
pending_mapping_ids_dict = HydrusData.BuildKeyToSetDict( [ ( ( namespace_id, tag_id ), hash_id ) for ( namespace_id, tag_id, hash_id ) in pending_mapping_ids_raw ] )
|
||||
|
||||
all_ids_seen = set( current_mapping_ids_dict.keys() )
|
||||
all_ids_seen.update( pending_mapping_ids_dict.keys() )
|
||||
|
||||
for ( namespace_id, tag_id ) in all_ids_seen:
|
||||
|
||||
current_hash_ids = current_mapping_ids_dict[ ( namespace_id, tag_id ) ]
|
||||
|
||||
num_current = len( current_hash_ids )
|
||||
|
||||
if num_current > 0:
|
||||
|
||||
self._c.executemany( 'INSERT OR IGNORE INTO ' + current_mappings_table_name + ' ( hash_id, namespace_id, tag_id ) VALUES ( ?, ?, ? );', ( ( hash_id, namespace_id, tag_id ) for hash_id in current_hash_ids ) )
|
||||
|
||||
|
||||
pending_hash_ids = pending_mapping_ids_dict[ ( namespace_id, tag_id ) ]
|
||||
|
||||
num_pending = len( pending_hash_ids )
|
||||
|
||||
if num_pending > 0:
|
||||
|
||||
self._c.executemany( 'INSERT OR IGNORE INTO ' + pending_mappings_table_name + ' ( hash_id, namespace_id, tag_id ) VALUES ( ?, ?, ? );', ( ( hash_id, namespace_id, tag_id ) for hash_id in pending_hash_ids ) )
|
||||
|
||||
|
||||
if num_current > 0 or num_pending > 0:
|
||||
|
||||
ac_cache_changes.append( ( namespace_id, tag_id, num_current, num_pending ) )
|
||||
|
||||
|
||||
|
||||
|
||||
if len( ac_cache_changes ) > 0:
|
||||
|
||||
self._c.executemany( 'INSERT OR IGNORE INTO ' + ac_cache_table_name + ' ( namespace_id, tag_id, current_count, pending_count ) VALUES ( ?, ?, ?, ? );', ( ( namespace_id, tag_id, 0, 0 ) for ( namespace_id, tag_id, num_current, num_pending ) in ac_cache_changes ) )
|
||||
|
||||
self._c.executemany( 'UPDATE ' + ac_cache_table_name + ' SET current_count = current_count + ?, pending_count = pending_count + ? WHERE namespace_id = ? AND tag_id = ?;', ( ( num_current, num_pending, namespace_id, tag_id ) for ( namespace_id, tag_id, num_current, num_pending ) in ac_cache_changes ) )
|
||||
|
||||
|
||||
|
||||
|
||||
for tag_service_id in tag_service_ids:
|
||||
|
||||
self._controller.pub( 'splash_set_status_text', 'generating combined files ac_cache ' + str( tag_service_id ) )
|
||||
|
||||
self._CacheCombinedFilesMappingsGenerate( tag_service_id )
|
||||
# this is a direct copy of the old code, as v204 ditched current_mappings, breaking the call
|
||||
# I've flattened all the other subcalls as well, just in case they soon change
|
||||
|
||||
ac_cache_table_name = 'external_caches.combined_files_ac_cache_' + str( tag_service_id )
|
||||
|
||||
self._c.execute( 'CREATE TABLE ' + ac_cache_table_name + ' ( namespace_id INTEGER, tag_id INTEGER, current_count INTEGER, pending_count INTEGER, PRIMARY KEY( namespace_id, tag_id ) ) WITHOUT ROWID;' )
|
||||
|
||||
#
|
||||
|
||||
current_mappings_exist = self._c.execute( 'SELECT 1 FROM current_mappings WHERE service_id = ? LIMIT 1;', ( tag_service_id, ) ).fetchone() is not None
|
||||
pending_mappings_exist = self._c.execute( 'SELECT 1 FROM pending_mappings WHERE service_id = ? LIMIT 1;', ( tag_service_id, ) ).fetchone() is not None
|
||||
|
||||
if current_mappings_exist or pending_mappings_exist:
|
||||
|
||||
all_known_ids = self._c.execute( 'SELECT namespace_id, tag_id FROM existing_tags;' ).fetchall()
|
||||
|
||||
for group_of_ids in HydrusData.SplitListIntoChunks( all_known_ids, 10000 ):
|
||||
|
||||
current_counter = collections.Counter()
|
||||
|
||||
if current_mappings_exist:
|
||||
|
||||
for ( namespace_id, tag_id ) in group_of_ids:
|
||||
|
||||
result = self._c.execute( 'SELECT COUNT( * ) FROM current_mappings WHERE service_id = ? AND namespace_id = ? AND tag_id = ?;', ( tag_service_id, namespace_id, tag_id ) ).fetchone()
|
||||
|
||||
if result is not None:
|
||||
|
||||
( count, ) = result
|
||||
|
||||
if count > 0:
|
||||
|
||||
current_counter[ ( namespace_id, tag_id ) ] = count
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#
|
||||
|
||||
pending_counter = collections.Counter()
|
||||
|
||||
if pending_mappings_exist:
|
||||
|
||||
for ( namespace_id, tag_id ) in group_of_ids:
|
||||
|
||||
result = self._c.execute( 'SELECT COUNT( * ) FROM pending_mappings WHERE service_id = ? AND namespace_id = ? AND tag_id = ?;', ( tag_service_id, namespace_id, tag_id ) ).fetchone()
|
||||
|
||||
if result is not None:
|
||||
|
||||
( count, ) = result
|
||||
|
||||
if count > 0:
|
||||
|
||||
pending_counter[ ( namespace_id, tag_id ) ] = count
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
all_ids_seen = set( current_counter.keys() )
|
||||
all_ids_seen.update( pending_counter.keys() )
|
||||
|
||||
count_ids = [ ( namespace_id, tag_id, current_counter[ ( namespace_id, tag_id ) ], pending_counter[ ( namespace_id, tag_id ) ] ) for ( namespace_id, tag_id ) in all_ids_seen ]
|
||||
|
||||
if len( count_ids ) > 0:
|
||||
|
||||
self._c.executemany( 'INSERT OR IGNORE INTO ' + ac_cache_table_name + ' ( namespace_id, tag_id, current_count, pending_count ) VALUES ( ?, ?, ?, ? );', ( ( namespace_id, tag_id, 0, 0 ) for ( namespace_id, tag_id, current_delta, pending_delta ) in count_ids ) )
|
||||
|
||||
self._c.executemany( 'UPDATE ' + ac_cache_table_name + ' SET current_count = current_count + ?, pending_count = pending_count + ? WHERE namespace_id = ? AND tag_id = ?;', ( ( current_delta, pending_delta, namespace_id, tag_id ) for ( namespace_id, tag_id, current_delta, pending_delta ) in count_ids ) )
|
||||
|
||||
self._c.executemany( 'DELETE FROM ' + ac_cache_table_name + ' WHERE namespace_id = ? AND tag_id = ? AND current_count = ? AND pending_count = ?;', ( ( namespace_id, tag_id, 0, 0 ) for ( namespace_id, tag_id, current_delta, pending_delta ) in count_ids ) )
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
cache_dir = os.path.join( HC.DB_DIR, 'client_cache' )
|
||||
|
@ -7753,6 +8027,13 @@ class DB( HydrusDB.HydrusDB ):
|
|||
self._c.execute( 'BEGIN IMMEDIATE;' )
|
||||
|
||||
|
||||
if version == 204:
|
||||
|
||||
self._c.execute( 'DROP TABLE shutdown_timestamps;' )
|
||||
|
||||
self._c.execute( 'CREATE TABLE vacuum_timestamps ( name TEXT, timestamp INTEGER );' )
|
||||
|
||||
|
||||
self._controller.pub( 'splash_set_title_text', 'updated db to v' + str( version + 1 ) )
|
||||
|
||||
self._c.execute( 'UPDATE version SET version = ?;', ( version + 1, ) )
|
||||
|
@ -8214,16 +8495,11 @@ class DB( HydrusDB.HydrusDB ):
|
|||
|
||||
( hta_path, hta ) = self._tag_archives[ archive_name ]
|
||||
|
||||
if hta.GetHashType() == HydrusTagArchive.HASH_TYPE_SHA256:
|
||||
|
||||
HydrusData.ShowText( 'The Hydrus Tag Archive at ' + hta_path + ' uses SHA256 for its hash, which means all of its metadata can be imported in one go. This data will be imported now, but as further syncing is pointless, the tag archive sync has not been saved to service ' + new_name + '.' )
|
||||
|
||||
del new_tag_archive_sync[ archive_name ]
|
||||
|
||||
file_service_key = CC.LOCAL_FILE_SERVICE_KEY
|
||||
|
||||
adding = True
|
||||
|
||||
self._controller.pub( 'sync_to_tag_archive', hta_path, adding, namespaces, service_key )
|
||||
self._controller.pub( 'sync_to_tag_archive', hta_path, service_key, file_service_key, adding, namespaces )
|
||||
|
||||
|
||||
|
||||
|
@ -8257,73 +8533,108 @@ class DB( HydrusDB.HydrusDB ):
|
|||
self._c.execute( 'UPDATE services SET info = ? WHERE service_id = ?;', ( info, service_id ) )
|
||||
|
||||
|
||||
def _Vacuum( self, name = 'main', stop_time = None ):
|
||||
def _Vacuum( self, stop_time = None, force_vacuum = False ):
|
||||
|
||||
self._c.execute( 'COMMIT;' )
|
||||
stale_time_delta = HC.options[ 'maintenance_vacuum_period' ]
|
||||
|
||||
self._controller.pub( 'splash_set_status_text', 'vacuuming db' )
|
||||
|
||||
prefix = 'database maintenance - vacuum: '
|
||||
|
||||
job_key = ClientThreading.JobKey()
|
||||
|
||||
job_key.SetVariable( 'popup_text_1', prefix + 'vacuuming' )
|
||||
|
||||
self._controller.pub( 'message', job_key )
|
||||
|
||||
self._CloseDBCursor()
|
||||
|
||||
time.sleep( 1 )
|
||||
|
||||
try:
|
||||
if stale_time_delta is None:
|
||||
|
||||
db_path = os.path.join( self._db_dir, self._db_filenames[ name ] )
|
||||
return
|
||||
|
||||
if HydrusDB.CanVacuum( db_path, stop_time = stop_time ):
|
||||
|
||||
existing_names_to_timestamps = dict( self._c.execute( 'SELECT name, timestamp FROM vacuum_timestamps;' ).fetchall() )
|
||||
|
||||
db_names = [ name for ( index, name, path ) in self._c.execute( 'PRAGMA database_list;' ) if name not in ( 'mem', 'temp' ) ]
|
||||
|
||||
if force_vacuum:
|
||||
|
||||
due_names = db_names
|
||||
|
||||
else:
|
||||
|
||||
due_names = [ name for name in db_names if name not in existing_names_to_timestamps or HydrusData.TimeHasPassed( existing_names_to_timestamps[ name ] + stale_time_delta ) ]
|
||||
|
||||
|
||||
if len( due_names ) > 0:
|
||||
|
||||
self._c.execute( 'COMMIT;' )
|
||||
|
||||
job_key = ClientThreading.JobKey()
|
||||
|
||||
job_key.SetVariable( 'popup_title', 'database maintenance - vacuum' )
|
||||
|
||||
self._controller.pub( 'message', job_key )
|
||||
|
||||
self._CloseDBCursor()
|
||||
|
||||
time.sleep( 1 )
|
||||
|
||||
names_done = []
|
||||
|
||||
for name in due_names:
|
||||
|
||||
HydrusDB.VacuumDB( db_path )
|
||||
self._controller.pub( 'splash_set_status_text', 'vacuuming ' + name )
|
||||
job_key.SetVariable( 'popup_text_1', 'vacuuming ' + name )
|
||||
|
||||
try:
|
||||
|
||||
db_path = os.path.join( self._db_dir, self._db_filenames[ name ] )
|
||||
|
||||
if HydrusDB.CanVacuum( db_path, stop_time = stop_time ):
|
||||
|
||||
started = HydrusData.GetNowPrecise()
|
||||
|
||||
HydrusDB.VacuumDB( db_path )
|
||||
|
||||
time_took = HydrusData.GetNowPrecise() - started
|
||||
|
||||
HydrusData.Print( 'Vacuumed ' + db_path + ' in ' + HydrusData.ConvertTimeDeltaToPrettyString( time_took ) )
|
||||
|
||||
names_done.append( name )
|
||||
|
||||
|
||||
except Exception as e:
|
||||
|
||||
HydrusData.Print( 'vacuum failed:' )
|
||||
|
||||
HydrusData.ShowException( e )
|
||||
|
||||
size = os.path.getsize( db_path )
|
||||
|
||||
pretty_size = HydrusData.ConvertIntToBytes( size )
|
||||
|
||||
text = 'An attempt to vacuum the database failed.'
|
||||
text += os.linesep * 2
|
||||
text += 'For now, automatic vacuuming has been disabled. If the error is not obvious, please contact the hydrus developer.'
|
||||
|
||||
HydrusData.ShowText( text )
|
||||
|
||||
self._InitDBCursor()
|
||||
|
||||
self._c.execute( 'BEGIN IMMEDIATE;' )
|
||||
|
||||
HC.options[ 'maintenance_vacuum_period' ] = None
|
||||
|
||||
self._SaveOptions( HC.options )
|
||||
|
||||
return
|
||||
|
||||
|
||||
|
||||
except Exception as e:
|
||||
|
||||
HydrusData.Print( 'vacuum failed:' )
|
||||
|
||||
HydrusData.ShowException( e )
|
||||
|
||||
size = os.path.getsize( db_path )
|
||||
|
||||
pretty_size = HydrusData.ConvertIntToBytes( size )
|
||||
|
||||
text = 'An attempt to vacuum the database failed.'
|
||||
text += os.linesep * 2
|
||||
text += 'For now, automatic vacuuming has been disabled. If the error is not obvious, please contact the hydrus developer.'
|
||||
|
||||
HydrusData.ShowText( text )
|
||||
job_key.SetVariable( 'popup_text_1', 'cleaning up' )
|
||||
|
||||
self._InitDBCursor()
|
||||
|
||||
self._c.execute( 'BEGIN IMMEDIATE;' )
|
||||
|
||||
HC.options[ 'maintenance_vacuum_period' ] = None
|
||||
self._c.executemany( 'DELETE FROM vacuum_timestamps WHERE name = ?;', ( ( name, ) for name in names_done ) )
|
||||
|
||||
self._SaveOptions( HC.options )
|
||||
self._c.executemany( 'INSERT OR IGNORE INTO vacuum_timestamps ( name, timestamp ) VALUES ( ?, ? );', ( ( name, HydrusData.GetNow() ) for name in names_done ) )
|
||||
|
||||
return
|
||||
job_key.SetVariable( 'popup_text_1', 'done!' )
|
||||
|
||||
wx.CallLater( 1000 * 30, job_key.Delete )
|
||||
|
||||
|
||||
job_key.SetVariable( 'popup_text_1', prefix + 'cleaning up' )
|
||||
|
||||
self._InitDBCursor()
|
||||
|
||||
self._c.execute( 'REPLACE INTO shutdown_timestamps ( shutdown_type, timestamp ) VALUES ( ?, ? );', ( CC.SHUTDOWN_TIMESTAMP_VACUUM, HydrusData.GetNow() ) )
|
||||
|
||||
self._c.execute( 'BEGIN IMMEDIATE;' )
|
||||
|
||||
job_key.SetVariable( 'popup_text_1', prefix + 'done!' )
|
||||
|
||||
HydrusData.Print( job_key.ToString() )
|
||||
|
||||
wx.CallLater( 1000 * 30, job_key.Delete )
|
||||
|
||||
|
||||
def _Write( self, action, *args, **kwargs ):
|
||||
|
@ -8348,6 +8659,7 @@ class DB( HydrusDB.HydrusDB ):
|
|||
elif action == 'imageboard': result = self._SetYAMLDump( YAML_DUMP_ID_IMAGEBOARD, *args, **kwargs )
|
||||
elif action == 'import_file': result = self._ImportFile( *args, **kwargs )
|
||||
elif action == 'local_booru_share': result = self._SetYAMLDump( YAML_DUMP_ID_LOCAL_BOORU, *args, **kwargs )
|
||||
elif action == 'regenerate_ac_cache': result = self._RegenerateACCache( *args, **kwargs )
|
||||
elif action == 'relocate_client_files': result = self._RelocateClientFiles( *args, **kwargs )
|
||||
elif action == 'remote_booru': result = self._SetYAMLDump( YAML_DUMP_ID_REMOTE_BOORU, *args, **kwargs )
|
||||
elif action == 'reset_service': result = self._ResetService( *args, **kwargs )
|
||||
|
@ -8361,7 +8673,7 @@ class DB( HydrusDB.HydrusDB ):
|
|||
elif action == 'thumbnails': result = self._AddThumbnails( *args, **kwargs )
|
||||
elif action == 'update_server_services': result = self._UpdateServerServices( *args, **kwargs )
|
||||
elif action == 'update_services': result = self._UpdateServices( *args, **kwargs )
|
||||
elif action == 'vacuum': result = self._Vacuum()
|
||||
elif action == 'vacuum': result = self._Vacuum( *args, **kwargs )
|
||||
elif action == 'web_session': result = self._AddWebSession( *args, **kwargs )
|
||||
else: raise Exception( 'db received an unknown write command: ' + action )
|
||||
|
||||
|
|
|
@ -451,7 +451,6 @@ class ClientOptions( HydrusSerialisable.SerialisableBase ):
|
|||
self._dictionary[ 'booleans' ][ 'show_thumbnail_title_banner' ] = True
|
||||
self._dictionary[ 'booleans' ][ 'show_thumbnail_page' ] = True
|
||||
|
||||
self._dictionary[ 'booleans' ][ 'disable_cv_for_static_images' ] = False
|
||||
self._dictionary[ 'booleans' ][ 'disable_cv_for_gifs' ] = False
|
||||
|
||||
self._dictionary[ 'noneable_integers' ] = {}
|
||||
|
@ -2012,6 +2011,51 @@ class ServiceIPFS( ServiceRemote ):
|
|||
return multihash
|
||||
|
||||
|
||||
def PinDirectory( self, hashes ):
|
||||
|
||||
# this needs a popup message
|
||||
# it needs some good error handling as well
|
||||
|
||||
file_info = []
|
||||
|
||||
for hash in hashes:
|
||||
|
||||
# get the multihash from the db
|
||||
|
||||
# if missing, then pin it with PinFile obviously (how does this update the db? maybe it doesn't! maybe it should!)
|
||||
|
||||
pass
|
||||
|
||||
|
||||
api_base_url = self._GetAPIBaseURL()
|
||||
|
||||
url = api_base_url + 'object/new?arg=unixfs-dir'
|
||||
|
||||
response = ClientNetworking.RequestsGet( url )
|
||||
|
||||
for ( hash, mime, multihash ) in file_info:
|
||||
|
||||
object_multihash = response.json()[ 'Hash' ]
|
||||
|
||||
filename = hash.encode( 'hex' ) + HC.mime_ext_lookup[ mime ]
|
||||
|
||||
url = api_base_url + 'object/patch/add-link?arg=' + object_multihash + '&arg=' + filename + '&arg=' + multihash
|
||||
|
||||
response = ClientNetworking.RequestsGet( url )
|
||||
|
||||
|
||||
directory_multihash = response.json()[ 'Hash' ]
|
||||
|
||||
url = api_base_url + 'pin/add?arg=' + directory_multihash
|
||||
|
||||
response = ClientNetworking.RequestsGet( url )
|
||||
|
||||
if response.ok:
|
||||
|
||||
return directory_multihash
|
||||
|
||||
|
||||
|
||||
def UnpinFile( self, multihash ):
|
||||
|
||||
api_base_url = self._GetAPIBaseURL()
|
||||
|
|
|
@ -187,9 +187,9 @@ class FrameGUI( ClientGUICommon.FrameThatResizes ):
|
|||
|
||||
def _AnalyzeDatabase( self ):
|
||||
|
||||
message = 'This will gather statistical information on the database\'s indices, helping the query planner design efficient queries.'
|
||||
message = 'This will gather statistical information on the database\'s indices, helping the query planner design efficient queries. It typically happens automatically every few days, but you can force it here. If you have a large database, it will take a few minutes, during which your gui may hang. A popup message will show its status.'
|
||||
message += os.linesep * 2
|
||||
message += 'A \'soft\' analyze will only reanalyze those indices that are due for a check in the normal db maintenance cycle. This will typically take less than a second, but if it needs to do work, it will attempt not to take more that a few minutes, during which time your database will be locked and your gui may hang.'
|
||||
message += 'A \'soft\' analyze will only reanalyze those indices that are due for a check in the normal db maintenance cycle. If nothing is due, it will return immediately.'
|
||||
message += os.linesep * 2
|
||||
message += 'A \'full\' analyze will force a run over every index in the database. This can take substantially longer. If you do not have a specific reason to select this, it is probably pointless.'
|
||||
|
||||
|
@ -923,6 +923,7 @@ class FrameGUI( ClientGUICommon.FrameThatResizes ):
|
|||
submenu.Append( ClientCaches.MENU_EVENT_ID_TO_ACTION_CACHE.GetPermanentId( 'vacuum_db' ), p( '&Vacuum' ), p( 'Rebuild the Database.' ) )
|
||||
submenu.Append( ClientCaches.MENU_EVENT_ID_TO_ACTION_CACHE.GetPermanentId( 'analyze_db' ), p( '&Analyze' ), p( 'Reanalyze the Database.' ) )
|
||||
submenu.Append( ClientCaches.MENU_EVENT_ID_TO_ACTION_CACHE.GetPermanentId( 'rebalance_client_files' ), p( '&Rebalance File Storage' ), p( 'Move your files around your chosen storage directories until they satisfy the weights you have set in the options.' ) )
|
||||
submenu.Append( ClientCaches.MENU_EVENT_ID_TO_ACTION_CACHE.GetPermanentId( 'regenerate_ac_cache' ), p( '&Regenerate Autocomplete Cache' ), p( 'Delete and recreate the tag autocomplete cache.' ) )
|
||||
submenu.Append( ClientCaches.MENU_EVENT_ID_TO_ACTION_CACHE.GetPermanentId( 'regenerate_thumbnails' ), p( '&Regenerate All Thumbnails' ), p( 'Delete all thumbnails and regenerate from original files.' ) )
|
||||
submenu.Append( ClientCaches.MENU_EVENT_ID_TO_ACTION_CACHE.GetPermanentId( 'file_integrity' ), p( '&Check File Integrity' ), p( 'Review and fix all local file records.' ) )
|
||||
submenu.Append( ClientCaches.MENU_EVENT_ID_TO_ACTION_CACHE.GetPermanentId( 'clear_orphans' ), p( '&Clear Orphans' ), p( 'Clear out surplus files that have found their way into the database.' ) )
|
||||
|
@ -1651,6 +1652,25 @@ class FrameGUI( ClientGUICommon.FrameThatResizes ):
|
|||
self._statusbar.SetStatusText( db_status, number = 3 )
|
||||
|
||||
|
||||
def _RegenerateACCache( self ):
|
||||
|
||||
message = 'This will delete and then recreate the entire autocomplete cache. This is useful if miscounting has somehow occured.'
|
||||
message += os.linesep * 2
|
||||
message += 'If you have a lot of tags and files, it can take a long time, during which the gui may hang.'
|
||||
message += os.linesep * 2
|
||||
message += 'If you do not have a specific reason to run this, it is pointless.'
|
||||
|
||||
with ClientGUIDialogs.DialogYesNo( self, message, yes_label = 'do it', no_label = 'forget it' ) as dlg:
|
||||
|
||||
result = dlg.ShowModal()
|
||||
|
||||
if result == wx.ID_YES:
|
||||
|
||||
self._controller.Write( 'regenerate_ac_cache' )
|
||||
|
||||
|
||||
|
||||
|
||||
def _RegenerateThumbnails( self ):
|
||||
|
||||
text = 'This will rebuild all your thumbnails from the original files. You probably only want to do this if you experience thumbnail errors. If you have a lot of files, it will take some time. A popup message will show its progress.'
|
||||
|
@ -2001,15 +2021,28 @@ The password is cleartext here but obscured in the entry dialog. Enter a blank p
|
|||
|
||||
def _VacuumDatabase( self ):
|
||||
|
||||
text = 'This will rebuild the database, rewriting all indices and tables to be contiguous and optimising most operations. It happens automatically every few days, but you can force it here. If you have a large database, it will take a few minutes. A popup message will show its status'
|
||||
text = 'This will rebuild the database, rewriting all indices and tables to be contiguous and optimising most operations. It typically happens automatically every few days, but you can force it here. If you have a large database, it will take a few minutes, during which your gui may hang. A popup message will show its status.'
|
||||
text += os.linesep * 2
|
||||
text += 'A \'soft\' vacuum will only reanalyze those databases that are due for a check in the normal db maintenance cycle. If nothing is due, it will return immediately.'
|
||||
text += os.linesep * 2
|
||||
text += 'A \'full\' vacuum will immediately force a vacuum for the entire database. This can take substantially longer.'
|
||||
|
||||
with ClientGUIDialogs.DialogYesNo( self, text ) as dlg:
|
||||
with ClientGUIDialogs.DialogYesNo( self, text, title = 'Choose how thorough your vacuum will be.', yes_label = 'soft', no_label = 'full' ) as dlg:
|
||||
|
||||
if dlg.ShowModal() == wx.ID_YES: self._controller.Write( 'vacuum' )
|
||||
result = dlg.ShowModal()
|
||||
|
||||
if result == wx.ID_YES:
|
||||
|
||||
self._controller.Write( 'vacuum' )
|
||||
|
||||
elif result == wx.ID_NO:
|
||||
|
||||
self._controller.Write( 'vacuum', force_vacuum = True )
|
||||
|
||||
|
||||
|
||||
|
||||
def _THREADSyncToTagArchive( self, hta_path, adding, namespaces, service_key ):
|
||||
def _THREADSyncToTagArchive( self, hta_path, tag_service_key, file_service_key, adding, namespaces ):
|
||||
|
||||
job_key = ClientThreading.JobKey( pausable = True, cancellable = True )
|
||||
|
||||
|
@ -2057,6 +2090,11 @@ The password is cleartext here but obscured in the entry dialog. Enter a blank p
|
|||
chunk_of_hydrus_hashes = self._controller.Read( 'file_hashes', chunk_of_hta_hashes, given_hash_type, 'sha256' )
|
||||
|
||||
|
||||
if file_service_key != CC.COMBINED_FILE_SERVICE_KEY:
|
||||
|
||||
chunk_of_hydrus_hashes = self._controller.Read( 'filter_hashes', chunk_of_hydrus_hashes, file_service_key )
|
||||
|
||||
|
||||
hydrus_hashes.extend( chunk_of_hydrus_hashes )
|
||||
|
||||
total_num_hta_hashes += len( chunk_of_hta_hashes )
|
||||
|
@ -2086,7 +2124,7 @@ The password is cleartext here but obscured in the entry dialog. Enter a blank p
|
|||
|
||||
|
||||
|
||||
self._controller.WriteSynchronous( 'sync_hashes_to_tag_archive', chunk_of_hydrus_hashes, hta_path, adding, namespaces, service_key )
|
||||
self._controller.WriteSynchronous( 'sync_hashes_to_tag_archive', chunk_of_hydrus_hashes, hta_path, tag_service_key, adding, namespaces )
|
||||
|
||||
total_num_processed += len( chunk_of_hydrus_hashes )
|
||||
|
||||
|
@ -2457,6 +2495,7 @@ The password is cleartext here but obscured in the entry dialog. Enter a blank p
|
|||
|
||||
if page is not None: page.RefreshQuery()
|
||||
|
||||
elif command == 'regenerate_ac_cache': self._RegenerateACCache()
|
||||
elif command == 'regenerate_thumbnails': self._RegenerateThumbnails()
|
||||
elif command == 'restart': self.Exit( restart = True )
|
||||
elif command == 'restore_database': self._controller.RestoreDatabase()
|
||||
|
@ -2796,9 +2835,9 @@ The password is cleartext here but obscured in the entry dialog. Enter a blank p
|
|||
FrameSeedCache( self._controller, seed_cache )
|
||||
|
||||
|
||||
def SyncToTagArchive( self, hta, adding, namespaces, service_key ):
|
||||
def SyncToTagArchive( self, hta_path, tag_service_key, file_service_key, adding, namespaces ):
|
||||
|
||||
self._controller.CallToThread( self._THREADSyncToTagArchive, hta, adding, namespaces, service_key )
|
||||
self._controller.CallToThread( self._THREADSyncToTagArchive, hta_path, tag_service_key, file_service_key, adding, namespaces )
|
||||
|
||||
|
||||
'''
|
||||
|
|
|
@ -105,17 +105,17 @@ def ExportToHTA( parent, service_key, hashes ):
|
|||
HydrusGlobals.client_controller.Write( 'export_mappings', path, service_key, hash_type, hashes )
|
||||
|
||||
|
||||
def ImportFromHTA( parent, path, service_key ):
|
||||
def ImportFromHTA( parent, path, tag_service_key ):
|
||||
|
||||
hta = HydrusTagArchive.HydrusTagArchive( path )
|
||||
|
||||
potential_namespaces = hta.GetNamespaces()
|
||||
|
||||
hta.GetHashType() # this tests if the hta can produce a hashtype
|
||||
hash_type = hta.GetHashType() # this tests if the hta can produce a hashtype
|
||||
|
||||
del hta
|
||||
|
||||
service = HydrusGlobals.client_controller.GetServicesManager().GetService( service_key )
|
||||
service = HydrusGlobals.client_controller.GetServicesManager().GetService( tag_service_key )
|
||||
|
||||
service_type = service.GetServiceType()
|
||||
|
||||
|
@ -161,6 +161,35 @@ def ImportFromHTA( parent, path, service_key ):
|
|||
|
||||
namespaces = HydrusData.ConvertPrettyStringsToUglyNamespaces( dlg_namespaces.GetChecked() )
|
||||
|
||||
if hash_type == HydrusTagArchive.HASH_TYPE_SHA256:
|
||||
|
||||
text = 'This tag archive can be fully merged into your database, but this may be more than you want.'
|
||||
text += os.linesep * 2
|
||||
text += 'Would you like to import the tags only for files you actually have, or do you want absolutely everything?'
|
||||
|
||||
with DialogYesNo( parent, text, title = 'How much do you want?', yes_label = 'just for my local files', no_label = 'everything' ) as dlg_add:
|
||||
|
||||
result = dlg_add.ShowModal()
|
||||
|
||||
if result == wx.ID_YES:
|
||||
|
||||
file_service_key = CC.LOCAL_FILE_SERVICE_KEY
|
||||
|
||||
elif result == wx.ID_NO:
|
||||
|
||||
file_service_key = CC.COMBINED_FILE_SERVICE_KEY
|
||||
|
||||
else:
|
||||
|
||||
return
|
||||
|
||||
|
||||
|
||||
else:
|
||||
|
||||
file_service_key = CC.LOCAL_FILE_SERVICE_KEY
|
||||
|
||||
|
||||
text = 'Are you absolutely sure you want to '
|
||||
|
||||
if adding: text += 'add'
|
||||
|
@ -171,8 +200,12 @@ def ImportFromHTA( parent, path, service_key ):
|
|||
text += os.linesep.join( HydrusData.ConvertUglyNamespacesToPrettyStrings( namespaces ) )
|
||||
text += os.linesep * 2
|
||||
|
||||
if adding: text += 'To '
|
||||
else: text += 'From '
|
||||
file_service = HydrusGlobals.client_controller.GetServicesManager().GetService( file_service_key )
|
||||
|
||||
text += 'For ' + file_service.GetName()
|
||||
|
||||
if adding: text += ' to '
|
||||
else: text += ' from '
|
||||
|
||||
text += service.GetName() + ' ?'
|
||||
|
||||
|
@ -180,7 +213,7 @@ def ImportFromHTA( parent, path, service_key ):
|
|||
|
||||
if dlg_final.ShowModal() == wx.ID_YES:
|
||||
|
||||
HydrusGlobals.client_controller.pub( 'sync_to_tag_archive', path, adding, namespaces, service_key )
|
||||
HydrusGlobals.client_controller.pub( 'sync_to_tag_archive', path, tag_service_key, file_service_key, adding, namespaces )
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -4362,7 +4362,6 @@ class DialogManageOptions( ClientGUIDialogs.Dialog ):
|
|||
self._fit_to_canvas = wx.CheckBox( self, label = '' )
|
||||
self._animation_start_position = wx.SpinCtrl( self, min = 0, max = 100 )
|
||||
|
||||
self._disable_cv_for_static_images = wx.CheckBox( self, label = '' )
|
||||
self._disable_cv_for_gifs = wx.CheckBox( self, label = '' )
|
||||
|
||||
self._mime_media_viewer_panel = ClientGUICommon.StaticBox( self, 'media viewer mime handling' )
|
||||
|
@ -4391,7 +4390,6 @@ class DialogManageOptions( ClientGUIDialogs.Dialog ):
|
|||
|
||||
self._fit_to_canvas.SetValue( HC.options[ 'fit_to_canvas' ] )
|
||||
self._animation_start_position.SetValue( int( HC.options[ 'animation_start_position' ] * 100.0 ) )
|
||||
self._disable_cv_for_static_images.SetValue( self._new_options.GetBoolean( 'disable_cv_for_static_images' ) )
|
||||
self._disable_cv_for_gifs.SetValue( self._new_options.GetBoolean( 'disable_cv_for_gifs' ) )
|
||||
|
||||
gridbox = wx.FlexGridSizer( 0, 2 )
|
||||
|
@ -4420,9 +4418,6 @@ class DialogManageOptions( ClientGUIDialogs.Dialog ):
|
|||
gridbox.AddF( wx.StaticText( self, label = 'Start animations this % in: ' ), CC.FLAGS_MIXED )
|
||||
gridbox.AddF( self._animation_start_position, CC.FLAGS_MIXED )
|
||||
|
||||
gridbox.AddF( wx.StaticText( self, label = 'Disable OpenCV for static images: ' ), CC.FLAGS_MIXED )
|
||||
gridbox.AddF( self._disable_cv_for_static_images, CC.FLAGS_MIXED )
|
||||
|
||||
gridbox.AddF( wx.StaticText( self, label = 'Disable OpenCV for gifs: ' ), CC.FLAGS_MIXED )
|
||||
gridbox.AddF( self._disable_cv_for_gifs, CC.FLAGS_MIXED )
|
||||
|
||||
|
@ -4446,7 +4441,6 @@ class DialogManageOptions( ClientGUIDialogs.Dialog ):
|
|||
|
||||
HC.options[ 'mime_media_viewer_actions' ] = mime_media_viewer_actions
|
||||
|
||||
self._new_options.SetBoolean( 'disable_cv_for_static_images', self._disable_cv_for_static_images.GetValue() )
|
||||
self._new_options.SetBoolean( 'disable_cv_for_gifs', self._disable_cv_for_gifs.GetValue() )
|
||||
|
||||
|
||||
|
@ -6769,7 +6763,7 @@ class DialogManageServices( ClientGUIDialogs.Dialog ):
|
|||
|
||||
if self._archive_sync.GetCount() == 0:
|
||||
|
||||
wx.MessageBox( 'Be careful with this tool! Synching a lot of files to a large archive can take a very long time to initialise.' )
|
||||
wx.MessageBox( 'Be careful with this tool! Syncing a lot of files to a large archive can take a very long time to initialise.' )
|
||||
|
||||
|
||||
potential_archives = self._GetPotentialArchives()
|
||||
|
|
|
@ -34,35 +34,12 @@ def EfficientlyThumbnailNumpyImage( numpy_image, ( target_x, target_y ) ):
|
|||
|
||||
def GenerateNumpyImage( path ):
|
||||
|
||||
new_options = HydrusGlobals.client_controller.GetNewOptions()
|
||||
# this used to be a regular cv.imread call, but it was crashing the whole process on random thumbs, hooray
|
||||
# it was just the read that was the problem, so this seems to work fine, even if pil is only about half as fast
|
||||
|
||||
if new_options.GetBoolean( 'disable_cv_for_static_images' ):
|
||||
|
||||
raise Exception( 'Cannot read image--OpenCV for images is currently disabled.' )
|
||||
|
||||
pil_image = HydrusImageHandling.GeneratePILImage( path )
|
||||
|
||||
numpy_image = cv2.imread( path, flags = -1 ) # flags = -1 loads alpha channel, if present
|
||||
|
||||
if numpy_image is None:
|
||||
|
||||
raise Exception( 'CV could not understand this image!' )
|
||||
|
||||
|
||||
( width, height, depth ) = numpy_image.shape
|
||||
|
||||
if width * height * depth != len( numpy_image.data ):
|
||||
|
||||
raise Exception( 'CV could not understand this image; it was probably an unusual png!' )
|
||||
|
||||
|
||||
if depth == 4:
|
||||
|
||||
raise Exception( 'CV is bad at alpha!' )
|
||||
|
||||
else:
|
||||
|
||||
numpy_image = cv2.cvtColor( numpy_image, cv2.COLOR_BGR2RGB )
|
||||
|
||||
numpy_image = GenerateNumPyImageFromPILImage( pil_image )
|
||||
|
||||
return numpy_image
|
||||
|
||||
|
@ -85,14 +62,7 @@ def GenerateNumPyImageFromPILImage( pil_image ):
|
|||
|
||||
def GeneratePerceptualHash( path ):
|
||||
|
||||
new_options = HydrusGlobals.client_controller.GetNewOptions()
|
||||
|
||||
if new_options.GetBoolean( 'disable_cv_for_static_images' ):
|
||||
|
||||
raise Exception( 'Cannot generate perceptual hash--OpenCV for images is currently disabled.' )
|
||||
|
||||
|
||||
numpy_image = cv2.imread( path, IMREAD_UNCHANGED )
|
||||
numpy_image = GenerateNumpyImage( path )
|
||||
|
||||
( y, x, depth ) = numpy_image.shape
|
||||
|
||||
|
|
|
@ -666,7 +666,11 @@ class HTTPConnection( object ):
|
|||
|
||||
url = location
|
||||
|
||||
if ' ' in url:
|
||||
if ', ' in url:
|
||||
|
||||
url = url.split( ', ' )[0]
|
||||
|
||||
elif ' ' in url:
|
||||
|
||||
# some booru is giving daft redirect responses
|
||||
HydrusData.Print( url )
|
||||
|
|
|
@ -18,41 +18,22 @@ def GenerateHydrusBitmap( path, compressed = True ):
|
|||
|
||||
new_options = HydrusGlobals.client_controller.GetNewOptions()
|
||||
|
||||
if new_options.GetBoolean( 'disable_cv_for_static_images' ):
|
||||
|
||||
pil_image = HydrusImageHandling.GeneratePILImage( path )
|
||||
|
||||
return GenerateHydrusBitmapFromPILImage( pil_image, compressed = compressed )
|
||||
|
||||
else:
|
||||
|
||||
numpy_image = None
|
||||
|
||||
try:
|
||||
|
||||
numpy_image = ClientImageHandling.GenerateNumpyImage( path )
|
||||
|
||||
return GenerateHydrusBitmapFromNumPyImage( numpy_image, compressed = compressed )
|
||||
|
||||
except:
|
||||
|
||||
if numpy_image is not None:
|
||||
|
||||
del numpy_image
|
||||
|
||||
|
||||
pil_image = HydrusImageHandling.GeneratePILImage( path )
|
||||
|
||||
return GenerateHydrusBitmapFromPILImage( pil_image, compressed = compressed )
|
||||
|
||||
|
||||
numpy_image = ClientImageHandling.GenerateNumpyImage( path )
|
||||
|
||||
return GenerateHydrusBitmapFromNumPyImage( numpy_image, compressed = compressed )
|
||||
|
||||
def GenerateHydrusBitmapFromNumPyImage( numpy_image, compressed = True ):
|
||||
|
||||
( y, x, depth ) = numpy_image.shape
|
||||
|
||||
if depth == 4: buffer_format = wx.BitmapBufferFormat_RGBA
|
||||
else: buffer_format = wx.BitmapBufferFormat_RGB
|
||||
if depth == 4:
|
||||
|
||||
buffer_format = wx.BitmapBufferFormat_RGBA
|
||||
|
||||
else:
|
||||
|
||||
buffer_format = wx.BitmapBufferFormat_RGB
|
||||
|
||||
|
||||
return HydrusBitmap( numpy_image.data, buffer_format, ( x, y ), compressed = compressed )
|
||||
|
||||
|
@ -62,16 +43,16 @@ def GenerateHydrusBitmapFromPILImage( pil_image, compressed = True ):
|
|||
|
||||
if pil_image.mode == 'P': pil_image = pil_image.convert( 'RGBA' )
|
||||
|
||||
format = wx.BitmapBufferFormat_RGBA
|
||||
buffer_format = wx.BitmapBufferFormat_RGBA
|
||||
|
||||
else:
|
||||
|
||||
if pil_image.mode != 'RGB': pil_image = pil_image.convert( 'RGB' )
|
||||
|
||||
format = wx.BitmapBufferFormat_RGB
|
||||
buffer_format = wx.BitmapBufferFormat_RGB
|
||||
|
||||
|
||||
return HydrusBitmap( pil_image.tobytes(), format, pil_image.size, compressed = compressed )
|
||||
return HydrusBitmap( pil_image.tobytes(), buffer_format, pil_image.size, compressed = compressed )
|
||||
|
||||
class RasterContainer( object ):
|
||||
|
||||
|
@ -130,35 +111,11 @@ class RasterContainerImage( RasterContainer ):
|
|||
|
||||
time.sleep( 0.00001 )
|
||||
|
||||
new_options = HydrusGlobals.client_controller.GetNewOptions()
|
||||
numpy_image = ClientImageHandling.GenerateNumpyImage( self._path )
|
||||
|
||||
if new_options.GetBoolean( 'disable_cv_for_static_images' ):
|
||||
|
||||
pil_image = HydrusImageHandling.GeneratePILImage( self._path )
|
||||
|
||||
resized_pil_image = HydrusImageHandling.EfficientlyResizePILImage( pil_image, self._target_resolution )
|
||||
|
||||
hydrus_bitmap = GenerateHydrusBitmapFromPILImage( resized_pil_image )
|
||||
|
||||
else:
|
||||
|
||||
try:
|
||||
|
||||
numpy_image = ClientImageHandling.GenerateNumpyImage( self._path )
|
||||
|
||||
resized_numpy_image = ClientImageHandling.EfficientlyResizeNumpyImage( numpy_image, self._target_resolution )
|
||||
|
||||
hydrus_bitmap = GenerateHydrusBitmapFromNumPyImage( resized_numpy_image )
|
||||
|
||||
except:
|
||||
|
||||
pil_image = HydrusImageHandling.GeneratePILImage( self._path )
|
||||
|
||||
resized_pil_image = HydrusImageHandling.EfficientlyResizePILImage( pil_image, self._target_resolution )
|
||||
|
||||
hydrus_bitmap = GenerateHydrusBitmapFromPILImage( resized_pil_image )
|
||||
|
||||
|
||||
resized_numpy_image = ClientImageHandling.EfficientlyResizeNumpyImage( numpy_image, self._target_resolution )
|
||||
|
||||
hydrus_bitmap = GenerateHydrusBitmapFromNumPyImage( resized_numpy_image )
|
||||
|
||||
self._hydrus_bitmap = hydrus_bitmap
|
||||
|
||||
|
|
|
@ -53,7 +53,7 @@ options = {}
|
|||
# Misc
|
||||
|
||||
NETWORK_VERSION = 17
|
||||
SOFTWARE_VERSION = 204
|
||||
SOFTWARE_VERSION = 205
|
||||
|
||||
UNSCALED_THUMBNAIL_DIMENSIONS = ( 200, 200 )
|
||||
|
||||
|
|
|
@ -195,7 +195,7 @@ class HydrusController( object ):
|
|||
|
||||
|
||||
|
||||
def MaintainDB( self ):
|
||||
def MaintainDB( self, stop_time = None ):
|
||||
|
||||
pass
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ def CanVacuum( db_path, stop_time = None ):
|
|||
|
||||
if stop_time is not None:
|
||||
|
||||
approx_vacuum_speed_mb_per_s = 1048576 * 3
|
||||
approx_vacuum_speed_mb_per_s = 1048576 * 1
|
||||
|
||||
approx_vacuum_duration = db_size / approx_vacuum_speed_mb_per_s
|
||||
|
||||
|
@ -281,7 +281,7 @@ class HydrusDB( object ):
|
|||
del db
|
||||
|
||||
|
||||
self._c.execute( 'ATTACH ? AS external_' + name + ';', ( db_path, ) )
|
||||
self._c.execute( 'ATTACH ? AS ' + name + ';', ( db_path, ) )
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -342,7 +342,7 @@ class Controller( HydrusController.HydrusController ):
|
|||
|
||||
def JustWokeFromSleep( self ): return False
|
||||
|
||||
def MaintainDB( self ):
|
||||
def MaintainDB( self, stop_time = None ):
|
||||
|
||||
stop_time = HydrusData.GetNow() + 10
|
||||
|
||||
|
|
|
@ -338,7 +338,9 @@ class DB( HydrusDB.HydrusDB ):
|
|||
|
||||
self._c.execute( 'ANALYZE ' + name + ';' )
|
||||
|
||||
self._c.execute( 'REPLACE INTO analyze_timestamps ( name, timestamp ) VALUES ( ?, ? );', ( name, HydrusData.GetNow() ) )
|
||||
self._c.execute( 'DELETE FROM analyze_timestamps WHERE name = ?;', ( name, ) )
|
||||
|
||||
self._c.execute( 'INSERT OR IGNORE INTO analyze_timestamps ( name, timestamp ) VALUES ( ?, ? );', ( name, HydrusData.GetNow() ) )
|
||||
|
||||
time_took = HydrusData.GetNowPrecise() - started
|
||||
|
||||
|
@ -1718,8 +1720,8 @@ class DB( HydrusDB.HydrusDB ):
|
|||
|
||||
def _InitExternalDatabases( self ):
|
||||
|
||||
self._db_filenames[ 'mappings' ] = 'server.mappings.db'
|
||||
self._db_filenames[ 'master' ] = 'server.master.db'
|
||||
self._db_filenames[ 'external_mappings' ] = 'server.mappings.db'
|
||||
self._db_filenames[ 'external_master' ] = 'server.master.db'
|
||||
|
||||
|
||||
def _IterateFileUpdateContentData( self, service_id, begin, end ):
|
||||
|
|
|
@ -45,7 +45,7 @@ class TestClientDB( unittest.TestCase ):
|
|||
del c
|
||||
del db
|
||||
|
||||
mappings_db_path = os.path.join( self._db._db_dir, self._db._db_filenames[ 'mappings' ] )
|
||||
mappings_db_path = os.path.join( self._db._db_dir, self._db._db_filenames[ 'external_mappings' ] )
|
||||
|
||||
db = sqlite3.connect( mappings_db_path, isolation_level = None, detect_types = sqlite3.PARSE_DECLTYPES )
|
||||
|
||||
|
@ -1179,19 +1179,6 @@ class TestClientDB( unittest.TestCase ):
|
|||
self.assertEqual( result, [] )
|
||||
|
||||
|
||||
def test_shutdown_timestamps( self ):
|
||||
|
||||
result = self._read( 'shutdown_timestamps' )
|
||||
|
||||
self.assertEqual( type( result ), collections.defaultdict )
|
||||
|
||||
for ( k, v ) in result.items():
|
||||
|
||||
self.assertEqual( type( k ), int )
|
||||
self.assertEqual( type( v ), int )
|
||||
|
||||
|
||||
|
||||
class TestServerDB( unittest.TestCase ):
|
||||
|
||||
def _read( self, action, *args, **kwargs ): return self._db.Read( action, HC.HIGH_PRIORITY, *args, **kwargs )
|
||||
|
|
Loading…
Reference in New Issue