4322: Upgrading a large database to new version runs out of memory; increase locktable size

svn: r17385
This commit is contained in:
Michiel Nauta 2011-05-02 20:10:37 +00:00
parent a8a8b41210
commit bdf97fabd0
2 changed files with 7 additions and 2 deletions

View File

@ -59,8 +59,8 @@ DBMODE_W = "w" # Full Reaw/Write access
DBPAGE = 16384 # Size of the pages used to hold items in the database
DBMODE = 0666 # Unix mode for database creation
DBCACHE = 0x4000000 # Size of the shared memory buffer pool
DBLOCKS = 25000 # Maximum number of locks supported
DBOBJECTS = 25000 # Maximum number of simultaneously locked objects
DBLOCKS = 100000 # Maximum number of locks supported
DBOBJECTS = 100000 # Maximum number of simultaneously locked objects
DBUNDO = 1000 # Maximum size of undo buffer
from bsddb.db import DB_CREATE, DB_AUTO_COMMIT, DB_DUP, DB_DUPSORT, DB_RDONLY

View File

@ -1016,6 +1016,11 @@ class DbBsddb(DbBsddbRead, DbWriteBase, UpdateCallback):
return
self.env.txn_checkpoint()
lockstats = self.env.lock_stat()
_LOG.debug("lock occupancy: %d%%, locked object occupancy: %d%%" % (
round(lockstats['maxnlocks']*100/lockstats['maxlocks']),
round(lockstats['maxnobjects']*100/lockstats['maxobjects'])))
self.__close_metadata()
self.name_group.close()
self.surnames.close()