perf(db): optimize some tables to stored_only_fields to recrudesce memory usage of big databases #152
This commit is contained in:
parent
83a78c4e6c
commit
d65f214d4d
@ -251,7 +251,6 @@ module.exports = async (callback, mainWindow, sphinxApp) => {
|
|||||||
if(patchWindow)
|
if(patchWindow)
|
||||||
patchWindow.webContents.send('reindex', {field: file.path, index: i++, all: files})
|
patchWindow.webContents.send('reindex', {field: file.path, index: i++, all: files})
|
||||||
|
|
||||||
file.pathIndex = file.path
|
|
||||||
await sphinx.query(`DELETE FROM files WHERE id = ${file.id}`)
|
await sphinx.query(`DELETE FROM files WHERE id = ${file.id}`)
|
||||||
await sphinx.insertValues('files', file)
|
await sphinx.insertValues('files', file)
|
||||||
})
|
})
|
||||||
@ -389,7 +388,6 @@ module.exports = async (callback, mainWindow, sphinxApp) => {
|
|||||||
id: newId++,
|
id: newId++,
|
||||||
hash,
|
hash,
|
||||||
path: filesMap[hash][0].path,
|
path: filesMap[hash][0].path,
|
||||||
pathIndex: filesMap[hash][0].path,
|
|
||||||
size_new: filesMap[hash][0].size.toString()
|
size_new: filesMap[hash][0].size.toString()
|
||||||
});
|
});
|
||||||
logT('patcher', 'patched file', fileIndex, 'from', count, 'hash', hash, 'cIndex', ++hashCount);
|
logT('patcher', 'patched file', fileIndex, 'from', count, 'hash', hash, 'cIndex', ++hashCount);
|
||||||
@ -437,7 +435,7 @@ module.exports = async (callback, mainWindow, sphinxApp) => {
|
|||||||
return
|
return
|
||||||
file.size = file.size_new.toString();
|
file.size = file.size_new.toString();
|
||||||
delete file.size_new;
|
delete file.size_new;
|
||||||
await sphinx.replaceValues('files', file, {particial: false, sphinxIndex: {pathIndex: 'path'}});
|
await sphinx.replaceValues('files', file, {particial: false});
|
||||||
if(patchWindow)
|
if(patchWindow)
|
||||||
patchWindow.webContents.send('reindex', {field: file.id, index: fileIndex, all: count, longTime: false, canBreak: true})
|
patchWindow.webContents.send('reindex', {field: file.id, index: fileIndex, all: count, longTime: false, canBreak: true})
|
||||||
logT('patcher', 'restore patched file', fileIndex++, 'from', count, 'hash', file.hash);
|
logT('patcher', 'restore patched file', fileIndex++, 'from', count, 'hash', file.hash);
|
||||||
|
@ -48,10 +48,10 @@ const writeSphinxConfig = async (rootPath, dbPath, params = {}) => {
|
|||||||
rt_attr_uint = files
|
rt_attr_uint = files
|
||||||
rt_attr_uint = piecelength
|
rt_attr_uint = piecelength
|
||||||
rt_attr_timestamp = added
|
rt_attr_timestamp = added
|
||||||
rt_attr_string = ipv4
|
rt_field = ipv4
|
||||||
rt_attr_uint = port
|
rt_attr_uint = port
|
||||||
rt_attr_string = contentType
|
rt_field = contentType
|
||||||
rt_attr_string = contentCategory
|
rt_field = contentCategory
|
||||||
rt_attr_uint = seeders
|
rt_attr_uint = seeders
|
||||||
rt_attr_uint = leechers
|
rt_attr_uint = leechers
|
||||||
rt_attr_uint = completed
|
rt_attr_uint = completed
|
||||||
@ -60,19 +60,23 @@ const writeSphinxConfig = async (rootPath, dbPath, params = {}) => {
|
|||||||
rt_attr_uint = bad
|
rt_attr_uint = bad
|
||||||
rt_attr_json = info
|
rt_attr_json = info
|
||||||
|
|
||||||
|
stored_only_fields = contentType, contentCategory, ipv4
|
||||||
|
|
||||||
ngram_len = 1
|
ngram_len = 1
|
||||||
ngram_chars = U+3000..U+2FA1F
|
ngram_chars = U+3000..U+2FA1F
|
||||||
}
|
}
|
||||||
|
|
||||||
index files
|
index files
|
||||||
{
|
{
|
||||||
type = rt
|
type = rt
|
||||||
path = ${dbPath}/database/files
|
path = ${dbPath}/database/files
|
||||||
|
|
||||||
rt_attr_string = path
|
rt_field = path
|
||||||
rt_field = pathIndex
|
|
||||||
rt_attr_string = hash
|
rt_attr_string = hash
|
||||||
rt_attr_string = size
|
rt_field = size
|
||||||
|
|
||||||
|
stored_fields = path
|
||||||
|
stored_only_fields = size
|
||||||
}
|
}
|
||||||
|
|
||||||
index version
|
index version
|
||||||
|
@ -606,7 +606,6 @@ module.exports = function (send, recive, dataDirectory, version, env)
|
|||||||
id: torrent.id,
|
id: torrent.id,
|
||||||
hash: torrent.hash,
|
hash: torrent.hash,
|
||||||
path,
|
path,
|
||||||
pathIndex: path,
|
|
||||||
size
|
size
|
||||||
}, function(err, result) {
|
}, function(err, result) {
|
||||||
if(!result) {
|
if(!result) {
|
||||||
|
@ -17,11 +17,11 @@ describe("sphinx", () => {
|
|||||||
})
|
})
|
||||||
|
|
||||||
it("insert", function(done) {
|
it("insert", function(done) {
|
||||||
sphinx.query("INSERT INTO files(id, hash, path, pathIndex, size) VALUES(50001, 'a', 'bashaa', 'bashaa', '50')", (err) => {
|
sphinx.query("INSERT INTO files(id, hash, path, size) VALUES(50001, 'a', 'bashaa', '50')", (err) => {
|
||||||
if(err)
|
if(err)
|
||||||
throw new Error(err)
|
throw new Error(err)
|
||||||
|
|
||||||
sphinx.query("INSERT INTO files(id, hash, path, pathIndex, size) VALUES(50002, 'b', 'biotu', 'biotu', '30')", (err) => {
|
sphinx.query("INSERT INTO files(id, hash, path, size) VALUES(50002, 'b', 'biotu', '30')", (err) => {
|
||||||
if(err)
|
if(err)
|
||||||
throw new Error(err)
|
throw new Error(err)
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user