You've already forked obsidian-livesync
mirror of
https://github.com/vrtmrz/obsidian-livesync.git
synced 2025-08-10 22:11:45 +02:00
Merge branch 'main' of https://github.com/vrtmrz/obsidian-livesync
This commit is contained in:
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"id": "obsidian-livesync",
|
||||
"name": "Self-hosted LiveSync",
|
||||
"version": "0.24.27",
|
||||
"version": "0.24.28",
|
||||
"minAppVersion": "0.9.12",
|
||||
"description": "Community implementation of self-hosted livesync. Reflect your vault changes to some other devices immediately. Please make sure to disable other synchronize solutions to avoid content corruption or duplication.",
|
||||
"author": "vorotamoroz",
|
||||
|
4
package-lock.json
generated
4
package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "obsidian-livesync",
|
||||
"version": "0.24.26",
|
||||
"version": "0.24.28",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "obsidian-livesync",
|
||||
"version": "0.24.26",
|
||||
"version": "0.24.28",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@aws-sdk/client-s3": "^3.808.0",
|
||||
|
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "obsidian-livesync",
|
||||
"version": "0.24.27",
|
||||
"version": "0.24.28",
|
||||
"description": "Reflect your vault changes to some other devices immediately. Please make sure to disable other synchronize solutions to avoid content corruption or duplication.",
|
||||
"main": "main.js",
|
||||
"type": "module",
|
||||
|
2
src/lib
2
src/lib
Submodule src/lib updated: 68434acfdd...3f3cf7d61d
@@ -244,34 +244,49 @@ Even if you choose to clean up, you will see this option again if you exit Obsid
|
||||
async loadQueuedFiles() {
|
||||
if (this.settings.suspendParseReplicationResult) return;
|
||||
if (!this.settings.isConfigured) return;
|
||||
const kvDBKey = "queued-files";
|
||||
// const ids = [...new Set(JSON.parse(localStorage.getItem(lsKey) || "[]"))] as string[];
|
||||
const ids = [...new Set((await this.core.kvDB.get<string[]>(kvDBKey)) ?? [])];
|
||||
const batchSize = 100;
|
||||
const chunkedIds = arrayToChunkedArray(ids, batchSize);
|
||||
try {
|
||||
const kvDBKey = "queued-files";
|
||||
// const ids = [...new Set(JSON.parse(localStorage.getItem(lsKey) || "[]"))] as string[];
|
||||
const ids = [...new Set((await this.core.kvDB.get<string[]>(kvDBKey)) ?? [])];
|
||||
const batchSize = 100;
|
||||
const chunkedIds = arrayToChunkedArray(ids, batchSize);
|
||||
|
||||
// suspendParseReplicationResult is true, so we have to resume it if it is suspended.
|
||||
if (this.replicationResultProcessor.isSuspended) {
|
||||
this.replicationResultProcessor.resume();
|
||||
}
|
||||
for await (const idsBatch of chunkedIds) {
|
||||
const ret = await this.localDatabase.allDocsRaw<EntryDoc>({
|
||||
keys: idsBatch,
|
||||
include_docs: true,
|
||||
limit: 100,
|
||||
});
|
||||
const docs = ret.rows.filter((e) => e.doc).map((e) => e.doc) as PouchDB.Core.ExistingDocument<EntryDoc>[];
|
||||
const errors = ret.rows.filter((e) => !e.doc && !e.value.deleted);
|
||||
if (errors.length > 0) {
|
||||
Logger("Some queued processes were not resurrected");
|
||||
Logger(JSON.stringify(errors), LOG_LEVEL_VERBOSE);
|
||||
// suspendParseReplicationResult is true, so we have to resume it if it is suspended.
|
||||
if (this.replicationResultProcessor.isSuspended) {
|
||||
this.replicationResultProcessor.resume();
|
||||
}
|
||||
for await (const idsBatch of chunkedIds) {
|
||||
const ret = await this.localDatabase.allDocsRaw<EntryDoc>({
|
||||
keys: idsBatch,
|
||||
include_docs: true,
|
||||
limit: 100,
|
||||
});
|
||||
const docs = ret.rows
|
||||
.filter((e) => e.doc)
|
||||
.map((e) => e.doc) as PouchDB.Core.ExistingDocument<EntryDoc>[];
|
||||
const errors = ret.rows.filter((e) => !e.doc && !e.value.deleted);
|
||||
if (errors.length > 0) {
|
||||
Logger("Some queued processes were not resurrected");
|
||||
Logger(JSON.stringify(errors), LOG_LEVEL_VERBOSE);
|
||||
}
|
||||
this.replicationResultProcessor.enqueueAll(docs);
|
||||
}
|
||||
} catch (e) {
|
||||
Logger(`Failed to load queued files.`, LOG_LEVEL_NOTICE);
|
||||
Logger(e, LOG_LEVEL_VERBOSE);
|
||||
} finally {
|
||||
// Check again before awaiting,
|
||||
if (this.replicationResultProcessor.isSuspended) {
|
||||
this.replicationResultProcessor.resume();
|
||||
}
|
||||
this.replicationResultProcessor.enqueueAll(docs);
|
||||
}
|
||||
if (this.replicationResultProcessor.isSuspended) {
|
||||
this.replicationResultProcessor.resume();
|
||||
// Wait for all queued files to be processed.
|
||||
try {
|
||||
await this.replicationResultProcessor.waitForAllProcessed();
|
||||
} catch (e) {
|
||||
Logger(`Failed to wait for all queued files to be processed.`, LOG_LEVEL_NOTICE);
|
||||
Logger(e, LOG_LEVEL_VERBOSE);
|
||||
}
|
||||
await this.replicationResultProcessor.waitForAllProcessed();
|
||||
}
|
||||
|
||||
replicationResultProcessor = new QueueProcessor(
|
||||
|
@@ -2090,7 +2090,12 @@ The pane also can be launched by \`P2P Replicator\` command from the Command Pal
|
||||
.autoWireToggle("syncAfterMerge", { onUpdate: onlyOnNonLiveSync });
|
||||
});
|
||||
|
||||
void addPanel(paneEl, $msg("obsidianLiveSyncSettingTab.titleUpdateThinning")).then((paneEl) => {
|
||||
void addPanel(
|
||||
paneEl,
|
||||
$msg("obsidianLiveSyncSettingTab.titleUpdateThinning"),
|
||||
undefined,
|
||||
visibleOnly(() => !this.isConfiguredAs("syncMode", "LIVESYNC"))
|
||||
).then((paneEl) => {
|
||||
paneEl.addClass("wizardHidden");
|
||||
new Setting(paneEl).setClass("wizardHidden").autoWireToggle("batchSave");
|
||||
new Setting(paneEl).setClass("wizardHidden").autoWireNumeric("batchSaveMinimumDelay", {
|
||||
|
@@ -1,3 +1,12 @@
|
||||
## 0.24.28
|
||||
|
||||
### Fixed
|
||||
|
||||
- Batch Update is no longer available in LiveSync mode to avoid unexpected behaviour. (#653)
|
||||
- Now compatible with Cloudflare R2 again for bucket synchronisation.
|
||||
- @edo-bari-ikutsu, thank you for [your contribution](https://github.com/vrtmrz/livesync-commonlib/pull/12)!
|
||||
- Prevention of broken behaviour due to database connection failures added (#649).
|
||||
|
||||
## 0.24.27
|
||||
|
||||
### Improved
|
||||
|
Reference in New Issue
Block a user