1
0
mirror of https://github.com/vrtmrz/obsidian-livesync.git synced 2025-08-10 22:11:45 +02:00
This commit is contained in:
vorotamoroz
2022-09-06 14:32:09 +09:00
parent f8c1474700
commit 21362adb5b
9 changed files with 162 additions and 161 deletions

1
.gitignore vendored
View File

@@ -12,3 +12,4 @@ main.js
# obsidian
data.json
.vscode

View File

@@ -43,7 +43,7 @@ Note: More information about alternative hosting methods needed! Currently, [usi
### First device
1. Install the plugin on your device.
2. Configure remote database infomation.
2. Configure remote database information.
1. Fill your server's information into the `Remote Database configuration` pane.
2. Enabling `End to End Encryption` is recommended. After entering a passphrase, click `Apply`.
3. Click `Test Database Connection` and make sure that the plugin says `Connected to (your-database-name)`.
@@ -53,7 +53,7 @@ Note: More information about alternative hosting methods needed! Currently, [usi
2. Or, set up the synchronization as you like. By default, none of the settings are enabled, meaning you would need to manually trigger the synchronization process.
3. Additional configurations are also here. I recommend enabling `Use Trash for deleted files`, but you can also leave all configurations as-is.
4. Configure miscellaneous features.
1. Enabling `Show staus inside editor` shows status at the top-right corner of the editor while in editing mode. (Recommended)
1. Enabling `Show status inside editor` shows status at the top-right corner of the editor while in editing mode. (Recommended)
5. Go back to the editor. Wait for the initial scan to complete.
6. When the status no longer changes and shows a ⏹️ for COMPLETED (No ⏳ and 🧩 icons), you are ready to synchronize with the server.
7. Press the replicate icon on the Ribbon or run `Replicate now` from the command palette. This will send all your data to the server.
@@ -115,7 +115,7 @@ If you have deleted or renamed files, please wait until ⏳ icon disappeared.
- While synchronizing, files are compared by their modification time and the older ones will be overwritten by the newer ones. Then plugin checks for conflicts and if a merge is needed, a dialog will open.
- Rarely, a file in the database could be corrupted. The plugin will not write to local storage when a file looks corrupted. If a local version of the file is on your device, the corruption could be fixed by editing the local file and synchronizing it. But if the file does not exist on any of your devices, then it can not be rescued. In this case you can delete these items from the settings dialog.
- If your database looks corrupted, try "Drop History". Usually, It is the easiest way.
- To stop the bootup sequence (eg. for fixing problems on databases), you can put a `redflag.md` file at the root of your vault.
- To stop the boot up sequence (eg. for fixing problems on databases), you can put a `redflag.md` file at the root of your vault.
- Q: Database is growing, how can I shrink it down?
A: each of the docs is saved with their past 100 revisions for detecting and resolving conflicts. Picturing that one device has been offline for a while, and comes online again. The device has to compare its notes with the remotely saved ones. If there exists a historic revision in which the note used to be identical, it could be updated safely (like git fast-forward). Even if that is not in revision histories, we only have to check the differences after the revision that both devices commonly have. This is like git's conflict resolving method. So, We have to make the database again like an enlarged git repo if you want to solve the root of the problem.
- And more technical Information are in the [Technical Information](docs/tech_info.md)

View File

@@ -165,7 +165,7 @@ export class DocumentHistoryModal extends Modal {
const leaf = app.workspace.getLeaf(false);
await leaf.openFile(targetFile);
} else {
Logger("The file cound not view on the editor", LOG_LEVEL.NOTICE)
Logger("The file could not view on the editor", LOG_LEVEL.NOTICE)
}
}
buttons.createEl("button", { text: "Back to this revision" }, (e) => {
@@ -173,7 +173,7 @@ export class DocumentHistoryModal extends Modal {
e.addEventListener("click", async () => {
const pathToWrite = this.file.startsWith("i:") ? this.file.substring("i:".length) : this.file;
if (!isValidPath(pathToWrite)) {
Logger("Path is not vaild to write content.", LOG_LEVEL.INFO);
Logger("Path is not valid to write content.", LOG_LEVEL.INFO);
}
if (this.currentDoc?.datatype == "plain") {
await this.app.vault.adapter.write(pathToWrite, this.currentDoc.data);

View File

@@ -31,7 +31,7 @@ import { KeyValueDatabase, OpenKeyValueDatabase } from "./KeyValueDB";
import { LRUCache } from "./lib/src/LRUCache";
// when replicated, LiveSync checks chunk versions that every node used.
// If all minumum version of every devices were up, that means we can convert database automatically.
// If all minimum version of every devices were up, that means we can convert database automatically.
const currentVersionRange: ChunkVersionRange = {
min: 0,
@@ -163,7 +163,7 @@ export class LocalPouchDB {
this.nodeid = nodeinfo.nodeid;
await putDesignDocuments(this.localDatabase);
// Traceing the leaf id
// Tracings the leaf id
const changes = this.localDatabase
.changes({
since: "now",
@@ -188,7 +188,7 @@ export class LocalPouchDB {
const oi = await old.info();
if (oi.doc_count == 0) {
Logger("Old database is empty, proceed to next step", LOG_LEVEL.VERBOSE);
// aleady converted.
// already converted.
return nextSeq();
}
//
@@ -294,7 +294,7 @@ export class LocalPouchDB {
throw new Error(`Chunk was not found: ${id}`);
}
} else {
Logger(`Something went wrong on retriving chunk`);
Logger(`Something went wrong while retrieving chunks`);
throw ex;
}
}
@@ -611,15 +611,15 @@ export class LocalPouchDB {
}
// let leftData = note.data;
const savenNotes = [];
const savedNotes = [];
let processed = 0;
let made = 0;
let skiped = 0;
let skipped = 0;
const maxChunkSize = MAX_DOC_SIZE_BIN * Math.max(this.settings.customChunkSize, 1);
let pieceSize = maxChunkSize;
let plainSplit = false;
let cacheUsed = 0;
const userpasswordHash = this.h32Raw(new TextEncoder().encode(this.settings.passphrase));
const userPasswordHash = this.h32Raw(new TextEncoder().encode(this.settings.passphrase));
if (!saveAsBigChunk && shouldSplitAsPlainText(note._id)) {
pieceSize = MAX_DOC_SIZE;
plainSplit = true;
@@ -632,7 +632,7 @@ export class LocalPouchDB {
const pieces = splitPieces2(note.data, pieceSize, plainSplit, minimumChunkSize, 0);
for (const piece of pieces()) {
processed++;
let leafid = "";
let leafId = "";
// Get hash of piece.
let hashedPiece = "";
let hashQ = 0; // if hash collided, **IF**, count it up.
@@ -641,40 +641,40 @@ export class LocalPouchDB {
const cache = this.hashCaches.get(piece);
if (cache) {
hashedPiece = "";
leafid = cache;
leafId = cache;
needMake = false;
skiped++;
skipped++;
cacheUsed++;
} else {
if (this.settings.encrypt) {
// When encryption has been enabled, make hash to be different between each passphrase to avoid inferring password.
hashedPiece = "+" + (this.h32Raw(new TextEncoder().encode(piece)) ^ userpasswordHash).toString(16);
hashedPiece = "+" + (this.h32Raw(new TextEncoder().encode(piece)) ^ userPasswordHash).toString(16);
} else {
hashedPiece = this.h32(piece);
}
leafid = "h:" + hashedPiece;
leafId = "h:" + hashedPiece;
do {
let nleafid = leafid;
let newLeafId = leafId;
try {
nleafid = `${leafid}${hashQ}`;
const pieceData = await this.localDatabase.get<EntryLeaf>(nleafid);
newLeafId = `${leafId}${hashQ}`;
const pieceData = await this.localDatabase.get<EntryLeaf>(newLeafId);
if (pieceData.type == "leaf" && pieceData.data == piece) {
leafid = nleafid;
leafId = newLeafId;
needMake = false;
tryNextHash = false;
this.hashCaches.set(piece, leafid);
this.hashCaches.set(piece, leafId);
} else if (pieceData.type == "leaf") {
Logger("hash:collision!!");
hashQ++;
tryNextHash = true;
} else {
leafid = nleafid;
leafId = newLeafId;
tryNextHash = false;
}
} catch (ex) {
if (ex.status && ex.status == 404) {
//not found, we can use it.
leafid = nleafid;
leafId = newLeafId;
needMake = true;
tryNextHash = false;
} else {
@@ -689,18 +689,18 @@ export class LocalPouchDB {
const savePiece = piece;
const d: EntryLeaf = {
_id: leafid,
_id: leafId,
data: savePiece,
type: "leaf",
};
newLeafs.push(d);
this.hashCaches.set(piece, leafid);
this.hashCaches.set(piece, leafId);
made++;
} else {
skiped++;
skipped++;
}
}
savenNotes.push(leafid);
savedNotes.push(leafId);
}
let saved = true;
if (newLeafs.length > 0) {
@@ -709,7 +709,7 @@ export class LocalPouchDB {
for (const item of result) {
if (!(item as any).ok) {
if ((item as any).status && (item as any).status == 409) {
// conflicted, but it would be ok in childrens.
// conflicted, but it would be ok in children.
} else {
Logger(`Save failed:id:${item.id} rev:${item.rev}`, LOG_LEVEL.NOTICE);
Logger(item);
@@ -724,9 +724,9 @@ export class LocalPouchDB {
}
}
if (saved) {
Logger(`Content saved:${note._id} ,pieces:${processed} (new:${made}, skip:${skiped}, cache:${cacheUsed})`);
Logger(`Content saved:${note._id} ,pieces:${processed} (new:${made}, skip:${skipped}, cache:${cacheUsed})`);
const newDoc: PlainEntry | NewEntry = {
children: savenNotes,
children: savedNotes,
_id: note._id,
ctime: note.ctime,
mtime: note.mtime,
@@ -768,7 +768,7 @@ export class LocalPouchDB {
}
updateInfo: () => void = () => {
console.log("default updinfo");
console.log("Update Info default implement");
};
// eslint-disable-next-line require-await
async migrate(from: number, to: number): Promise<boolean> {
@@ -808,15 +808,15 @@ export class LocalPouchDB {
return false;
}
const dbret = await connectRemoteCouchDBWithSetting(setting, this.isMobile);
if (typeof dbret === "string") {
Logger(`could not connect to ${uri}: ${dbret}`, showResult ? LOG_LEVEL.NOTICE : LOG_LEVEL.INFO);
const dbRet = await connectRemoteCouchDBWithSetting(setting, this.isMobile);
if (typeof dbRet === "string") {
Logger(`could not connect to ${uri}: ${dbRet}`, showResult ? LOG_LEVEL.NOTICE : LOG_LEVEL.INFO);
return false;
}
if (!skipCheck) {
await putDesignDocuments(dbret.db);
if (!(await checkRemoteVersion(dbret.db, this.migrate.bind(this), VER))) {
await putDesignDocuments(dbRet.db);
if (!(await checkRemoteVersion(dbRet.db, this.migrate.bind(this), VER))) {
Logger("Remote database is newer or corrupted, make sure to latest version of self-hosted-livesync installed", LOG_LEVEL.NOTICE);
return false;
}
@@ -830,7 +830,7 @@ export class LocalPouchDB {
node_chunk_info: { [this.nodeid]: currentVersionRange }
};
const remoteMilestone: EntryMilestoneInfo = { ...defMilestonePoint, ...(await resolveWithIgnoreKnownError(dbret.db.get(MILSTONE_DOCID), defMilestonePoint)) };
const remoteMilestone: EntryMilestoneInfo = { ...defMilestonePoint, ...(await resolveWithIgnoreKnownError(dbRet.db.get(MILSTONE_DOCID), defMilestonePoint)) };
remoteMilestone.node_chunk_info = { ...defMilestonePoint.node_chunk_info, ...remoteMilestone.node_chunk_info };
this.remoteLocked = remoteMilestone.locked;
this.remoteLockedAndDeviceNotAccepted = remoteMilestone.locked && remoteMilestone.accepted_nodes.indexOf(this.nodeid) == -1;
@@ -844,7 +844,7 @@ export class LocalPouchDB {
if (writeMilestone) {
remoteMilestone.node_chunk_info[this.nodeid].min = currentVersionRange.min;
remoteMilestone.node_chunk_info[this.nodeid].max = currentVersionRange.max;
await dbret.db.put(remoteMilestone);
await dbRet.db.put(remoteMilestone);
}
// Check compatibility and make sure available version
@@ -893,7 +893,7 @@ export class LocalPouchDB {
}
const syncOption: PouchDB.Replication.SyncOptions = keepAlive ? { live: true, retry: true, heartbeat: 30000, ...syncOptionBase } : { ...syncOptionBase };
return { db: dbret.db, info: dbret.info, syncOptionBase, syncOption };
return { db: dbRet.db, info: dbRet.info, syncOptionBase, syncOption };
}
openReplication(setting: RemoteDBSettings, keepAlive: boolean, showResult: boolean, callback: (e: PouchDB.Core.ExistingDocument<EntryDoc>[]) => Promise<void>) {
@@ -932,7 +932,7 @@ export class LocalPouchDB {
Logger("Replication completed", showResult ? LOG_LEVEL.NOTICE : LOG_LEVEL.INFO, showResult ? "sync" : "");
this.syncHandler = this.cancelHandler(this.syncHandler);
}
replicationDeniend(e: any) {
replicationDenied(e: any) {
this.syncStatus = "ERRORED";
this.updateInfo();
this.syncHandler = this.cancelHandler(this.syncHandler);
@@ -958,13 +958,13 @@ export class LocalPouchDB {
callback: (e: PouchDB.Core.ExistingDocument<EntryDoc>[]) => Promise<void>,
retrying: boolean,
callbackDone: (e: boolean | any) => void,
syncmode: "sync" | "pullOnly" | "pushOnly"
syncMode: "sync" | "pullOnly" | "pushOnly"
): Promise<boolean> {
if (this.syncHandler != null) {
Logger("Replication is already in progress.", showResult ? LOG_LEVEL.NOTICE : LOG_LEVEL.INFO, "sync");
return;
}
Logger(`Oneshot Sync begin... (${syncmode})`);
Logger(`Oneshot Sync begin... (${syncMode})`);
let thisCallback = callbackDone;
const ret = await this.checkReplicationConnectivity(setting, true, retrying, showResult);
if (ret === false) {
@@ -984,17 +984,17 @@ export class LocalPouchDB {
this.originalSetting = setting;
}
this.syncHandler = this.cancelHandler(this.syncHandler);
if (syncmode == "sync") {
if (syncMode == "sync") {
this.syncHandler = this.localDatabase.sync(db, { checkpoint: "target", ...syncOptionBase });
this.syncHandler
.on("change", async (e) => {
await this.replicationChangeDetected(e, showResult, docSentOnStart, docArrivedOnStart, callback);
if (retrying) {
if (this.docSent - docSentOnStart + (this.docArrived - docArrivedOnStart) > this.originalSetting.batch_size * 2) {
// restore configration.
// restore configuration.
Logger("Back into original settings once.");
this.syncHandler = this.cancelHandler(this.syncHandler);
this.openOneshotReplication(this.originalSetting, showResult, callback, false, callbackDone, syncmode);
this.openOneshotReplication(this.originalSetting, showResult, callback, false, callbackDone, syncMode);
}
}
})
@@ -1004,17 +1004,17 @@ export class LocalPouchDB {
thisCallback(true);
}
});
} else if (syncmode == "pullOnly") {
} else if (syncMode == "pullOnly") {
this.syncHandler = this.localDatabase.replicate.from(db, { checkpoint: "target", ...syncOptionBase, ...(this.settings.readChunksOnline ? { filter: "replicate/pull" } : {}) });
this.syncHandler
.on("change", async (e) => {
await this.replicationChangeDetected({ direction: "pull", change: e }, showResult, docSentOnStart, docArrivedOnStart, callback);
if (retrying) {
if (this.docSent - docSentOnStart + (this.docArrived - docArrivedOnStart) > this.originalSetting.batch_size * 2) {
// restore configration.
// restore configuration.
Logger("Back into original settings once.");
this.syncHandler = this.cancelHandler(this.syncHandler);
this.openOneshotReplication(this.originalSetting, showResult, callback, false, callbackDone, syncmode);
this.openOneshotReplication(this.originalSetting, showResult, callback, false, callbackDone, syncMode);
}
}
})
@@ -1024,16 +1024,16 @@ export class LocalPouchDB {
thisCallback(true);
}
});
} else if (syncmode == "pushOnly") {
} else if (syncMode == "pushOnly") {
this.syncHandler = this.localDatabase.replicate.to(db, { checkpoint: "target", ...syncOptionBase, ...(this.settings.readChunksOnline ? { filter: "replicate/push" } : {}) });
this.syncHandler.on("change", async (e) => {
await this.replicationChangeDetected({ direction: "push", change: e }, showResult, docSentOnStart, docArrivedOnStart, callback);
if (retrying) {
if (this.docSent - docSentOnStart + (this.docArrived - docArrivedOnStart) > this.originalSetting.batch_size * 2) {
// restore configration.
// restore configuration.
Logger("Back into original settings once.");
this.syncHandler = this.cancelHandler(this.syncHandler);
this.openOneshotReplication(this.originalSetting, showResult, callback, false, callbackDone, syncmode);
this.openOneshotReplication(this.originalSetting, showResult, callback, false, callbackDone, syncMode);
}
}
})
@@ -1048,7 +1048,7 @@ export class LocalPouchDB {
this.syncHandler
.on("active", () => this.replicationActivated(showResult))
.on("denied", (e) => {
this.replicationDeniend(e);
this.replicationDenied(e);
if (thisCallback != null) {
thisCallback(e);
}
@@ -1058,15 +1058,15 @@ export class LocalPouchDB {
Logger("Replication stopped.", showResult ? LOG_LEVEL.NOTICE : LOG_LEVEL.INFO, "sync");
if (getLastPostFailedBySize()) {
// Duplicate settings for smaller batch.
const xsetting: RemoteDBSettings = JSON.parse(JSON.stringify(setting));
xsetting.batch_size = Math.ceil(xsetting.batch_size / 2) + 2;
xsetting.batches_limit = Math.ceil(xsetting.batches_limit / 2) + 2;
if (xsetting.batch_size <= 5 && xsetting.batches_limit <= 5) {
const tempSetting: RemoteDBSettings = JSON.parse(JSON.stringify(setting));
tempSetting.batch_size = Math.ceil(tempSetting.batch_size / 2) + 2;
tempSetting.batches_limit = Math.ceil(tempSetting.batches_limit / 2) + 2;
if (tempSetting.batch_size <= 5 && tempSetting.batches_limit <= 5) {
Logger("We can't replicate more lower value.", showResult ? LOG_LEVEL.NOTICE : LOG_LEVEL.INFO);
} else {
Logger(`Retry with lower batch size:${xsetting.batch_size}/${xsetting.batches_limit}`, showResult ? LOG_LEVEL.NOTICE : LOG_LEVEL.INFO);
Logger(`Retry with lower batch size:${tempSetting.batch_size}/${tempSetting.batches_limit}`, showResult ? LOG_LEVEL.NOTICE : LOG_LEVEL.INFO);
thisCallback = null;
this.openOneshotReplication(xsetting, showResult, callback, true, callbackDone, syncmode);
this.openOneshotReplication(tempSetting, showResult, callback, true, callbackDone, syncMode);
}
} else {
Logger("Replication error", LOG_LEVEL.NOTICE, "sync");
@@ -1108,7 +1108,7 @@ export class LocalPouchDB {
const docArrivedOnStart = this.docArrived;
const docSentOnStart = this.docSent;
if (!retrying) {
//TODO if successfly saven, roll back org setting.
//TODO if successfully saved, roll back org setting.
this.originalSetting = setting;
}
this.syncHandler = this.cancelHandler(this.syncHandler);
@@ -1135,7 +1135,7 @@ export class LocalPouchDB {
}
})
.on("complete", (e) => this.replicationCompleted(showResult))
.on("denied", (e) => this.replicationDeniend(e))
.on("denied", (e) => this.replicationDenied(e))
.on("error", (e) => {
this.replicationErrored(e);
Logger("Replication stopped.", LOG_LEVEL.NOTICE, "sync");
@@ -1184,7 +1184,7 @@ export class LocalPouchDB {
Logger("Remote Database Destroyed", LOG_LEVEL.NOTICE);
await this.tryCreateRemoteDatabase(setting);
} catch (ex) {
Logger("Something happened on Remote Database Destory:", LOG_LEVEL.NOTICE);
Logger("Something happened on Remote Database Destroy:", LOG_LEVEL.NOTICE);
Logger(ex, LOG_LEVEL.NOTICE);
}
}
@@ -1197,13 +1197,13 @@ export class LocalPouchDB {
}
async markRemoteLocked(setting: RemoteDBSettings, locked: boolean) {
const uri = setting.couchDB_URI + (setting.couchDB_DBNAME == "" ? "" : "/" + setting.couchDB_DBNAME);
const dbret = await connectRemoteCouchDBWithSetting(setting, this.isMobile);
if (typeof dbret === "string") {
Logger(`could not connect to ${uri}:${dbret}`, LOG_LEVEL.NOTICE);
const dbRet = await connectRemoteCouchDBWithSetting(setting, this.isMobile);
if (typeof dbRet === "string") {
Logger(`could not connect to ${uri}:${dbRet}`, LOG_LEVEL.NOTICE);
return;
}
if (!(await checkRemoteVersion(dbret.db, this.migrate.bind(this), VER))) {
if (!(await checkRemoteVersion(dbRet.db, this.migrate.bind(this), VER))) {
Logger("Remote database is newer or corrupted, make sure to latest version of self-hosted-livesync installed", LOG_LEVEL.NOTICE);
return;
}
@@ -1216,7 +1216,7 @@ export class LocalPouchDB {
node_chunk_info: { [this.nodeid]: currentVersionRange }
};
const remoteMilestone: EntryMilestoneInfo = { ...defInitPoint, ...await resolveWithIgnoreKnownError(dbret.db.get(MILSTONE_DOCID), defInitPoint) };
const remoteMilestone: EntryMilestoneInfo = { ...defInitPoint, ...await resolveWithIgnoreKnownError(dbRet.db.get(MILSTONE_DOCID), defInitPoint) };
remoteMilestone.node_chunk_info = { ...defInitPoint.node_chunk_info, ...remoteMilestone.node_chunk_info };
remoteMilestone.accepted_nodes = [this.nodeid];
remoteMilestone.locked = locked;
@@ -1225,17 +1225,17 @@ export class LocalPouchDB {
} else {
Logger("Unlock remote database to prevent data corruption", LOG_LEVEL.NOTICE);
}
await dbret.db.put(remoteMilestone);
await dbRet.db.put(remoteMilestone);
}
async markRemoteResolved(setting: RemoteDBSettings) {
const uri = setting.couchDB_URI + (setting.couchDB_DBNAME == "" ? "" : "/" + setting.couchDB_DBNAME);
const dbret = await connectRemoteCouchDBWithSetting(setting, this.isMobile);
if (typeof dbret === "string") {
Logger(`could not connect to ${uri}:${dbret}`, LOG_LEVEL.NOTICE);
const dbRet = await connectRemoteCouchDBWithSetting(setting, this.isMobile);
if (typeof dbRet === "string") {
Logger(`could not connect to ${uri}:${dbRet}`, LOG_LEVEL.NOTICE);
return;
}
if (!(await checkRemoteVersion(dbret.db, this.migrate.bind(this), VER))) {
if (!(await checkRemoteVersion(dbRet.db, this.migrate.bind(this), VER))) {
Logger("Remote database is newer or corrupted, make sure to latest version of self-hosted-livesync installed", LOG_LEVEL.NOTICE);
return;
}
@@ -1248,11 +1248,11 @@ export class LocalPouchDB {
node_chunk_info: { [this.nodeid]: currentVersionRange }
};
// check local database hash status and remote replicate hash status
const remoteMilestone: EntryMilestoneInfo = { ...defInitPoint, ...await resolveWithIgnoreKnownError(dbret.db.get(MILSTONE_DOCID), defInitPoint) };
const remoteMilestone: EntryMilestoneInfo = { ...defInitPoint, ...await resolveWithIgnoreKnownError(dbRet.db.get(MILSTONE_DOCID), defInitPoint) };
remoteMilestone.node_chunk_info = { ...defInitPoint.node_chunk_info, ...remoteMilestone.node_chunk_info };
remoteMilestone.accepted_nodes = Array.from(new Set([...remoteMilestone.accepted_nodes, this.nodeid]));
Logger("Mark this device as 'resolved'.", LOG_LEVEL.NOTICE);
await dbret.db.put(remoteMilestone);
await dbRet.db.put(remoteMilestone);
}
async sanCheck(entry: EntryDoc): Promise<boolean> {
if (entry.type == "plain" || entry.type == "newnote") {
@@ -1315,16 +1315,16 @@ export class LocalPouchDB {
// console.dir(chunks);
let alive = 0;
let nonref = 0;
let unreachable = 0;
for (const chunk of chunks) {
const items = chunk[1];
if (items.size == 0) {
nonref++;
unreachable++;
} else {
alive++;
}
}
Logger(`Garbage checking completed, documents:${docNum}. Used chunks:${alive}, Retained chunks:${nonref}. Retained chunks will be reused, but you can rebuild database if you feel there are too much.`, LOG_LEVEL.NOTICE, "gc");
Logger(`Garbage checking completed, documents:${docNum}. Used chunks:${alive}, Retained chunks:${unreachable}. Retained chunks will be reused, but you can rebuild database if you feel there are too much.`, LOG_LEVEL.NOTICE, "gc");
});
return;
}

View File

@@ -49,7 +49,7 @@ export class ObsidianLiveSyncSettingTab extends PluginSettingTab {
<label class='sls-setting-label'><input type='radio' name='disp' value='60' class='sls-setting-tab' ><div class='sls-setting-menu-btn'>🔌</div></label>
<label class='sls-setting-label'><input type='radio' name='disp' value='70' class='sls-setting-tab' ><div class='sls-setting-menu-btn'>🚑</div></label>
`;
const menutabs = w.querySelectorAll(".sls-setting-label");
const menuTabs = w.querySelectorAll(".sls-setting-label");
const changeDisplay = (screen: string) => {
for (const k in screenElements) {
if (k == screen) {
@@ -59,11 +59,11 @@ export class ObsidianLiveSyncSettingTab extends PluginSettingTab {
}
}
};
menutabs.forEach((element) => {
menuTabs.forEach((element) => {
const e = element.querySelector(".sls-setting-tab");
if (!e) return;
e.addEventListener("change", (event) => {
menutabs.forEach((element) => element.removeClass("selected"));
menuTabs.forEach((element) => element.removeClass("selected"));
changeDisplay((event.currentTarget as HTMLInputElement).value);
element.addClass("selected");
});
@@ -201,11 +201,11 @@ export class ObsidianLiveSyncSettingTab extends PluginSettingTab {
.addToggle((toggle) =>
toggle.setValue(this.plugin.settings.workingEncrypt).onChange(async (value) => {
this.plugin.settings.workingEncrypt = value;
phasspharase.setDisabled(!value);
passphrase.setDisabled(!value);
await this.plugin.saveSettings();
})
);
const phasspharase = new Setting(containerRemoteDatabaseEl)
const passphrase = new Setting(containerRemoteDatabaseEl)
.setName("Passphrase")
.setDesc("Encrypting passphrase. If you change the passphrase of a existing database, overwriting the remote database is strongly recommended.")
.addText((text) => {
@@ -217,7 +217,7 @@ export class ObsidianLiveSyncSettingTab extends PluginSettingTab {
});
text.inputEl.setAttribute("type", "password");
});
phasspharase.setDisabled(!this.plugin.settings.workingEncrypt);
passphrase.setDisabled(!this.plugin.settings.workingEncrypt);
const checkWorkingPassphrase = async (): Promise<boolean> => {
const settingForCheck: RemoteDBSettings = {
...this.plugin.settings,
@@ -417,7 +417,7 @@ export class ObsidianLiveSyncSettingTab extends PluginSettingTab {
const res = await requestToCouchDB(this.plugin.settings.couchDB_URI, this.plugin.settings.couchDB_USER, this.plugin.settings.couchDB_PASSWORD, undefined, key, value);
console.dir(res);
if (res.status == 200) {
Logger(`${title} successfly updated`, LOG_LEVEL.NOTICE);
Logger(`${title} successfully updated`, LOG_LEVEL.NOTICE);
checkResultDiv.removeChild(x);
checkConfig();
} else {
@@ -531,10 +531,10 @@ export class ObsidianLiveSyncSettingTab extends PluginSettingTab {
addResult("✔ CORS origin OK");
}
}
addResult("--Done--", ["ob-btn-config-haed"]);
addResult("--Done--", ["ob-btn-config-head"]);
addResult("If you have some trouble with Connection-check even though all Config-check has been passed, Please check your reverse proxy's configuration.", ["ob-btn-config-info"]);
} catch (ex) {
Logger(`Checking configration failed`);
Logger(`Checking configuration failed`);
Logger(ex);
}
};
@@ -1113,7 +1113,7 @@ export class ObsidianLiveSyncSettingTab extends PluginSettingTab {
c.addClass("op-warn");
}
}
const hatchWarn = containerHatchEl.createEl("div", { text: `To stop the bootup sequence for fixing problems on databases, you can put redflag.md on top of your vault (Rebooting obsidian is required).` });
const hatchWarn = containerHatchEl.createEl("div", { text: `To stop the boot up sequence for fixing problems on databases, you can put redflag.md on top of your vault (Rebooting obsidian is required).` });
hatchWarn.addClass("op-warn-info");
new Setting(containerHatchEl)
@@ -1228,7 +1228,7 @@ export class ObsidianLiveSyncSettingTab extends PluginSettingTab {
new Setting(containerHatchEl)
.setName("Drop old encrypted database")
.setDesc("WARNING: Please use this button only when you have failed on converting old-style localdatabase at v0.10.0.")
.setDesc("WARNING: Please use this button only when you have failed on converting old-style local database at v0.10.0.")
.addButton((button) =>
button
.setButtonText("Drop")
@@ -1242,7 +1242,7 @@ export class ObsidianLiveSyncSettingTab extends PluginSettingTab {
addScreenElement("50", containerHatchEl);
// With great respect, thank you TfTHacker!
// refered: https://github.com/TfTHacker/obsidian42-brat/blob/main/src/features/BetaPlugins.ts
// Refer: https://github.com/TfTHacker/obsidian42-brat/blob/main/src/features/BetaPlugins.ts
const containerPluginSettings = containerEl.createDiv();
containerPluginSettings.createEl("h3", { text: "Plugins and settings (beta)" });

Submodule src/lib updated: 5c01ce3262...aacfa353a9

View File

@@ -29,7 +29,7 @@ import { DocumentHistoryModal } from "./DocumentHistoryModal";
import { clearAllPeriodic, clearAllTriggers, clearTrigger, disposeMemoObject, id2path, memoIfNotExist, memoObject, path2id, retriveMemoObject, setTrigger } from "./utils";
import { clearAllPeriodic, clearAllTriggers, clearTrigger, disposeMemoObject, id2path, memoIfNotExist, memoObject, path2id, retrieveMemoObject, setTrigger } from "./utils";
import { decrypt, encrypt } from "./lib/src/e2ee_v2";
const isDebug = false;
@@ -224,8 +224,8 @@ export default class ObsidianLiveSyncPlugin extends Plugin {
Logger(`Self-hosted LiveSync v${manifestVersion} ${packageVersion} `);
const lsname = "obsidian-live-sync-ver" + this.getVaultName();
const last_version = localStorage.getItem(lsname);
const lsKey = "obsidian-live-sync-ver" + this.getVaultName();
const last_version = localStorage.getItem(lsKey);
await this.loadSettings();
const lastVersion = ~~(versionNumberString2Number(manifestVersion) / 1000);
if (lastVersion > this.settings.lastReadUpdates) {
@@ -245,7 +245,7 @@ export default class ObsidianLiveSyncPlugin extends Plugin {
this.settings.versionUpFlash = "Self-hosted LiveSync has been upgraded and some behaviors have changed incompatibly. All automatic synchronization is now disabled temporary. Ensure that other devices are also upgraded, and enable synchronization again.";
this.saveSettings();
}
localStorage.setItem(lsname, `${VER}`);
localStorage.setItem(lsKey, `${VER}`);
await this.openDatabase();
addIcon(
@@ -630,15 +630,15 @@ export default class ObsidianLiveSyncPlugin extends Plugin {
// So, use history is always enabled.
this.settings.useHistory = true;
const lsname = "obsidian-live-sync-vaultanddevicename-" + this.getVaultName();
const lsKey = "obsidian-live-sync-vaultanddevicename-" + this.getVaultName();
if (this.settings.deviceAndVaultName != "") {
if (!localStorage.getItem(lsname)) {
if (!localStorage.getItem(lsKey)) {
this.deviceAndVaultName = this.settings.deviceAndVaultName;
localStorage.setItem(lsname, this.deviceAndVaultName);
localStorage.setItem(lsKey, this.deviceAndVaultName);
this.settings.deviceAndVaultName = "";
}
}
this.deviceAndVaultName = localStorage.getItem(lsname) || "";
this.deviceAndVaultName = localStorage.getItem(lsKey) || "";
}
triggerRealizeSettingSyncMode() {
@@ -646,9 +646,9 @@ export default class ObsidianLiveSyncPlugin extends Plugin {
}
async saveSettings() {
const lsname = "obsidian-live-sync-vaultanddevicename-" + this.getVaultName();
const lsKey = "obsidian-live-sync-vaultanddevicename-" + this.getVaultName();
localStorage.setItem(lsname, this.deviceAndVaultName || "");
localStorage.setItem(lsKey, this.deviceAndVaultName || "");
await this.saveData(this.settings);
this.localDatabase.settings = this.settings;
this.triggerRealizeSettingSyncMode();
@@ -758,7 +758,7 @@ export default class ObsidianLiveSyncPlugin extends Plugin {
}
if (this.settings.suspendFileWatching) return;
// If batchsave is enabled, queue all changes and do nothing.
// If batchSave is enabled, queue all changes and do nothing.
if (this.settings.batchSave) {
~(async () => {
const meta = await this.localDatabase.getDBEntryMeta(file.path);
@@ -1211,12 +1211,12 @@ export default class ObsidianLiveSyncPlugin extends Plugin {
saveQueuedFiles() {
const saveData = JSON.stringify(this.queuedFiles.filter((e) => !e.done).map((e) => e.entry._id));
const lsname = "obsidian-livesync-queuefiles-" + this.getVaultName();
localStorage.setItem(lsname, saveData);
const lsKey = "obsidian-livesync-queuefiles-" + this.getVaultName();
localStorage.setItem(lsKey, saveData);
}
async loadQueuedFiles() {
const lsname = "obsidian-livesync-queuefiles-" + this.getVaultName();
const ids = JSON.parse(localStorage.getItem(lsname) || "[]") as string[];
const lsKey = "obsidian-livesync-queuefiles-" + this.getVaultName();
const ids = JSON.parse(localStorage.getItem(lsKey) || "[]") as string[];
const ret = await this.localDatabase.localDatabase.allDocs({ keys: ids, include_docs: true });
for (const doc of ret.rows) {
if (doc.doc && !this.queuedFiles.some((e) => e.entry._id == doc.doc._id)) {
@@ -1494,9 +1494,9 @@ export default class ObsidianLiveSyncPlugin extends Plugin {
const pieces = queue.map((e) => e[1].missingChildren).reduce((prev, cur) => prev + cur.length, 0);
queued = ` 🧩 ${queuedCount} (${pieces})`;
}
const procs = getProcessingCounts();
const procsDisp = procs == 0 ? "" : `${procs}`;
const message = `Sync: ${w}${sent}${arrived}${waiting}${procsDisp}${queued}`;
const processes = getProcessingCounts();
const processesDisp = processes == 0 ? "" : `${processes}`;
const message = `Sync: ${w}${sent}${arrived}${waiting}${processesDisp}${queued}`;
const locks = getLocks();
const pendingTask = locks.pending.length
? "\nPending: " +
@@ -1612,22 +1612,22 @@ export default class ObsidianLiveSyncPlugin extends Plugin {
Logger("Updating database by new files");
this.setStatusBarText(`UPDATE DATABASE`);
const runAll = async<T>(procedurename: string, objects: T[], callback: (arg: T) => Promise<void>) => {
const runAll = async<T>(procedureName: string, objects: T[], callback: (arg: T) => Promise<void>) => {
const count = objects.length;
Logger(procedurename);
Logger(procedureName);
let i = 0;
const semaphore = Semaphore(10);
Logger(`${procedurename} exec.`);
Logger(`${procedureName} exec.`);
if (!this.localDatabase.isReady) throw Error("Database is not ready!");
const procs = objects.map(e => (async (v) => {
const releaser = await semaphore.acquire(1, procedurename);
const processes = objects.map(e => (async (v) => {
const releaser = await semaphore.acquire(1, procedureName);
try {
await callback(v);
i++;
if (i % 50 == 0) {
const notify = `${procedurename} : ${i}/${count}`;
const notify = `${procedureName} : ${i}/${count}`;
if (showingNotice) {
Logger(notify, LOG_LEVEL.NOTICE, "syncAll");
} else {
@@ -1636,16 +1636,16 @@ export default class ObsidianLiveSyncPlugin extends Plugin {
this.setStatusBarText(notify);
}
} catch (ex) {
Logger(`Error while ${procedurename}`, LOG_LEVEL.NOTICE);
Logger(`Error while ${procedureName}`, LOG_LEVEL.NOTICE);
Logger(ex);
} finally {
releaser();
}
}
)(e));
await Promise.all(procs);
await Promise.all(processes);
Logger(`${procedurename} done.`);
Logger(`${procedureName} done.`);
};
await runAll("UPDATE DATABASE", onlyInStorage, async (e) => {
@@ -1794,7 +1794,7 @@ export default class ObsidianLiveSyncPlugin extends Plugin {
// Conflicted item could not load, delete this.
await this.localDatabase.deleteDBEntry(path, { rev: test._conflicts[0] });
await this.pullFile(path, null, true);
Logger(`could not get old revisions, automaticaly used newer one:${path}`, LOG_LEVEL.NOTICE);
Logger(`could not get old revisions, automatically used newer one:${path}`, LOG_LEVEL.NOTICE);
return true;
}
// first,check for same contents
@@ -1805,19 +1805,19 @@ export default class ObsidianLiveSyncPlugin extends Plugin {
}
await this.localDatabase.deleteDBEntry(path, { rev: leaf.rev });
await this.pullFile(path, null, true);
Logger(`automaticaly merged:${path}`);
Logger(`automatically merged:${path}`);
return true;
}
if (this.settings.resolveConflictsByNewerFile) {
const lmtime = ~~(leftLeaf.mtime / 1000);
const rmtime = ~~(rightLeaf.mtime / 1000);
const lMtime = ~~(leftLeaf.mtime / 1000);
const rMtime = ~~(rightLeaf.mtime / 1000);
let loser = leftLeaf;
if (lmtime > rmtime) {
if (lMtime > rMtime) {
loser = rightLeaf;
}
await this.localDatabase.deleteDBEntry(path, { rev: loser.rev });
await this.pullFile(path, null, true);
Logger(`Automaticaly merged (newerFileResolve) :${path}`, LOG_LEVEL.NOTICE);
Logger(`Automatically merged (newerFileResolve) :${path}`, LOG_LEVEL.NOTICE);
return true;
}
// make diff.
@@ -1909,7 +1909,7 @@ export default class ObsidianLiveSyncPlugin extends Plugin {
await runWithLock("conflicted", false, async () => {
const conflictCheckResult = await this.getConflictedStatus(file.path);
if (conflictCheckResult === false) {
//nothign to do.
//nothing to do.
return;
}
if (conflictCheckResult === true) {
@@ -2017,9 +2017,9 @@ export default class ObsidianLiveSyncPlugin extends Plugin {
content = await this.app.vault.read(file);
datatype = "plain";
}
const fullpath = path2id(file.path);
const fullPath = path2id(file.path);
const d: LoadedEntry = {
_id: fullpath,
_id: fullPath,
data: content,
ctime: file.stat.ctime,
mtime: file.stat.mtime,
@@ -2030,16 +2030,16 @@ export default class ObsidianLiveSyncPlugin extends Plugin {
};
//upsert should locked
const msg = `DB <- STORAGE (${datatype}) `;
const isNotChanged = await runWithLock("file:" + fullpath, false, async () => {
const isNotChanged = await runWithLock("file:" + fullPath, false, async () => {
if (recentlyTouched(file)) {
return true;
}
const old = await this.localDatabase.getDBEntry(fullpath, null, false, false);
const old = await this.localDatabase.getDBEntry(fullPath, null, false, false);
if (old !== false) {
const oldData = { data: old.data, deleted: old._deleted || old.deleted, };
const newData = { data: d.data, deleted: d._deleted || d.deleted };
if (JSON.stringify(oldData) == JSON.stringify(newData)) {
Logger(msg + "Skipped (not changed) " + fullpath + ((d._deleted || d.deleted) ? " (deleted)" : ""), LOG_LEVEL.VERBOSE);
Logger(msg + "Skipped (not changed) " + fullPath + ((d._deleted || d.deleted) ? " (deleted)" : ""), LOG_LEVEL.VERBOSE);
return true;
}
// d._rev = old._rev;
@@ -2051,7 +2051,7 @@ export default class ObsidianLiveSyncPlugin extends Plugin {
this.queuedFiles = this.queuedFiles.map((e) => ({ ...e, ...(e.entry._id == d._id ? { done: true } : {}) }));
Logger(msg + fullpath);
Logger(msg + fullPath);
if (this.settings.syncOnSave && !this.suspended) {
await this.replicate();
}
@@ -2059,16 +2059,16 @@ export default class ObsidianLiveSyncPlugin extends Plugin {
async deleteFromDB(file: TFile) {
if (!this.isTargetFile(file)) return;
const fullpath = file.path;
Logger(`deleteDB By path:${fullpath}`);
await this.deleteFromDBbyPath(fullpath);
const fullPath = file.path;
Logger(`deleteDB By path:${fullPath}`);
await this.deleteFromDBbyPath(fullPath);
if (this.settings.syncOnSave && !this.suspended) {
await this.replicate();
}
}
async deleteFromDBbyPath(fullpath: string) {
await this.localDatabase.deleteDBEntry(fullpath);
async deleteFromDBbyPath(fullPath: string) {
await this.localDatabase.deleteDBEntry(fullPath);
if (this.settings.syncOnSave && !this.suspended) {
await this.replicate();
}
@@ -2406,8 +2406,8 @@ export default class ObsidianLiveSyncPlugin extends Plugin {
});
}
async ensureDirectoryEx(fullpath: string) {
const pathElements = fullpath.split("/");
async ensureDirectoryEx(fullPath: string) {
const pathElements = fullPath.split("/");
pathElements.pop();
let c = "";
for (const v of pathElements) {
@@ -2417,7 +2417,7 @@ export default class ObsidianLiveSyncPlugin extends Plugin {
} catch (ex) {
// basically skip exceptions.
if (ex.message && ex.message == "Folder already exists.") {
// especialy this message is.
// especially this message is.
} else {
Logger("Folder Create Error");
Logger(ex);
@@ -2500,15 +2500,15 @@ export default class ObsidianLiveSyncPlugin extends Plugin {
// If there is no conflict, return with false.
if (!("_conflicts" in doc)) return false;
if (doc._conflicts.length == 0) return false;
Logger(`Hidden file conflicetd:${id2filenameInternalChunk(id)}`);
Logger(`Hidden file conflicted:${id2filenameInternalChunk(id)}`);
const revA = doc._rev;
const revB = doc._conflicts[0];
const revBdoc = await this.localDatabase.localDatabase.get(id, { rev: revB });
// determine which revision sould been deleted.
const revBDoc = await this.localDatabase.localDatabase.get(id, { rev: revB });
// determine which revision should been deleted.
// simply check modified time
const mtimeA = ("mtime" in doc && doc.mtime) || 0;
const mtimeB = ("mtime" in revBdoc && revBdoc.mtime) || 0;
const mtimeB = ("mtime" in revBDoc && revBDoc.mtime) || 0;
// Logger(`Revisions:${new Date(mtimeA).toLocaleString} and ${new Date(mtimeB).toLocaleString}`);
// console.log(`mtime:${mtimeA} - ${mtimeB}`);
const delRev = mtimeA < mtimeB ? revA : revB;
@@ -2603,7 +2603,7 @@ export default class ObsidianLiveSyncPlugin extends Plugin {
// skip if not extraction performed.
if (!await this.extractInternalFileFromDatabase(filename)) return;
}
// If process successfly updated or file contents are same, update cache.
// If process successfully updated or file contents are same, update cache.
cache.docMtime = fileOnDatabase.mtime;
cache.storageMtime = fileOnStorage.mtime;
caches[filename] = cache;
@@ -2633,7 +2633,7 @@ export default class ObsidianLiveSyncPlugin extends Plugin {
await Promise.all(p);
await this.localDatabase.kvDB.set("diff-caches-internal", caches);
// When files has been retreived from the database. they must be reloaded.
// When files has been retrieved from the database. they must be reloaded.
if (direction == "pull" && filesChanged != 0) {
const configDir = normalizePath(this.app.vault.configDir);
// Show notification to restart obsidian when something has been changed in configDir.
@@ -2658,12 +2658,12 @@ export default class ObsidianLiveSyncPlugin extends Plugin {
a.appendChild(a.createEl("a", null, (anchor) => {
anchor.text = "HERE";
anchor.addEventListener("click", async () => {
Logger(`Unloading plugin: ${updatePluginName}`, LOG_LEVEL.NOTICE, "pluin-reload-" + updatePluginId);
Logger(`Unloading plugin: ${updatePluginName}`, LOG_LEVEL.NOTICE, "plugin-reload-" + updatePluginId);
// @ts-ignore
await this.app.plugins.unloadPlugin(updatePluginId);
// @ts-ignore
await this.app.plugins.loadPlugin(updatePluginId);
Logger(`Plugin reloaded: ${updatePluginName}`, LOG_LEVEL.NOTICE, "pluin-reload-" + updatePluginId);
Logger(`Plugin reloaded: ${updatePluginName}`, LOG_LEVEL.NOTICE, "plugin-reload-" + updatePluginId);
});
}))
@@ -2680,7 +2680,7 @@ export default class ObsidianLiveSyncPlugin extends Plugin {
memoObject(updatedPluginKey, new Notice(fragment, 0))
}
setTrigger(updatedPluginKey + "-close", 20000, () => {
const popup = retriveMemoObject<Notice>(updatedPluginKey)
const popup = retrieveMemoObject<Notice>(updatedPluginKey)
if (!popup) return;
//@ts-ignore
if (popup?.noticeEl?.isShown()) {

View File

@@ -3,7 +3,7 @@ import { normalizePath } from "obsidian";
import { path2id_base, id2path_base } from "./lib/src/utils";
// For backward compatibility, using the path for determining id.
// Only CouchDB nonacceptable ID (that starts with an underscore) has been prefixed with "/".
// Only CouchDB unacceptable ID (that starts with an underscore) has been prefixed with "/".
// The first slash will be deleted when the path is normalized.
export function path2id(filename: string): string {
const x = normalizePath(filename);
@@ -63,7 +63,7 @@ export async function memoIfNotExist<T>(key: string, func: () => T | Promise<T>)
}
return memos[key] as T;
}
export function retriveMemoObject<T>(key: string): T | false {
export function retrieveMemoObject<T>(key: string): T | false {
if (key in memos) {
return memos[key];
} else {

View File

@@ -9,9 +9,9 @@ export const isValidRemoteCouchDBURI = (uri: string): boolean => {
if (uri.startsWith("http://")) return true;
return false;
};
let last_post_successed = false;
let last_successful_post = false;
export const getLastPostFailedBySize = () => {
return !last_post_successed;
return !last_successful_post;
};
const fetchByAPI = async (request: RequestUrlParam): Promise<RequestUrlResponse> => {
const ret = await requestUrl(request);
@@ -62,7 +62,7 @@ const connectRemoteCouchDB = async (uri: string, auth: { username: string; passw
if (opts_length > 1024 * 1024 * 10) {
// over 10MB
if (uri.contains(".cloudantnosqldb.")) {
last_post_successed = false;
last_successful_post = false;
Logger("This request should fail on IBM Cloudant.", LOG_LEVEL.VERBOSE);
throw new Error("This request should fail on IBM Cloudant.");
}
@@ -91,9 +91,9 @@ const connectRemoteCouchDB = async (uri: string, auth: { username: string; passw
try {
const r = await fetchByAPI(requestParam);
if (method == "POST" || method == "PUT") {
last_post_successed = r.status - (r.status % 100) == 200;
last_successful_post = r.status - (r.status % 100) == 200;
} else {
last_post_successed = true;
last_successful_post = true;
}
Logger(`HTTP:${method}${size} to:${localURL} -> ${r.status}`, LOG_LEVEL.DEBUG);
@@ -106,7 +106,7 @@ const connectRemoteCouchDB = async (uri: string, auth: { username: string; passw
Logger(`HTTP:${method}${size} to:${localURL} -> failed`, LOG_LEVEL.VERBOSE);
// limit only in bulk_docs.
if (url.toString().indexOf("_bulk_docs") !== -1) {
last_post_successed = false;
last_successful_post = false;
}
Logger(ex);
throw ex;
@@ -116,19 +116,19 @@ const connectRemoteCouchDB = async (uri: string, auth: { username: string; passw
// -old implementation
try {
const responce: Response = await fetch(url, opts);
const response: Response = await fetch(url, opts);
if (method == "POST" || method == "PUT") {
last_post_successed = responce.ok;
last_successful_post = response.ok;
} else {
last_post_successed = true;
last_successful_post = true;
}
Logger(`HTTP:${method}${size} to:${localURL} -> ${responce.status}`, LOG_LEVEL.DEBUG);
return responce;
Logger(`HTTP:${method}${size} to:${localURL} -> ${response.status}`, LOG_LEVEL.DEBUG);
return response;
} catch (ex) {
Logger(`HTTP:${method}${size} to:${localURL} -> failed`, LOG_LEVEL.VERBOSE);
// limit only in bulk_docs.
if (url.toString().indexOf("_bulk_docs") !== -1) {
last_post_successed = false;
last_successful_post = false;
}
Logger(ex);
throw ex;