diff --git a/.eslintrc b/.eslintrc index f9ca43f..64c653b 100644 --- a/.eslintrc +++ b/.eslintrc @@ -7,7 +7,7 @@ "sourceType": "module", "project": ["tsconfig.json"] }, - "ignorePatterns": ["src/lib/src/API/*.ts"], + "ignorePatterns": [], "rules": { "no-unused-vars": "off", "@typescript-eslint/no-unused-vars": [ diff --git a/manifest-beta.json b/manifest-beta.json new file mode 100644 index 0000000..8c4cc40 --- /dev/null +++ b/manifest-beta.json @@ -0,0 +1,10 @@ +{ + "id": "obsidian-livesync", + "name": "Self-hosted LiveSync", + "version": "0.24.0.dev-rc2", + "minAppVersion": "0.9.12", + "description": "Community implementation of self-hosted livesync. Reflect your vault changes to some other devices immediately. Please make sure to disable other synchronize solutions to avoid content corruption or duplication.", + "author": "vorotamoroz", + "authorUrl": "https://github.com/vrtmrz", + "isDesktopOnly": false +} diff --git a/package-lock.json b/package-lock.json index aa5ee51..6369a8d 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "obsidian-livesync", - "version": "0.24.0.rc1", + "version": "0.24.0.dev-rc2", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "obsidian-livesync", - "version": "0.24.0.rc1", + "version": "0.24.0.dev-rc2", "license": "MIT", "dependencies": { "@aws-sdk/client-s3": "^3.645.0", diff --git a/package.json b/package.json index 4e3b261..9c8ef46 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "obsidian-livesync", - "version": "0.24.0.rc1", + "version": "0.24.0.dev-rc2", "description": "Reflect your vault changes to some other devices immediately. Please make sure to disable other synchronize solutions to avoid content corruption or duplication.", "main": "main.js", "type": "module", @@ -73,4 +73,4 @@ "xxhash-wasm": "0.4.2", "xxhash-wasm-102": "npm:xxhash-wasm@^1.0.2" } -} \ No newline at end of file +} diff --git a/src/lib b/src/lib index ed2b0c0..92d7b03 160000 --- a/src/lib +++ b/src/lib @@ -1 +1 @@ -Subproject commit ed2b0c00fcf98a0fad185e204fbc044e302f9ea1 +Subproject commit 92d7b03916cce550fba4fc2334d7eb4d57960e00 diff --git a/src/modules/AbstractModule.ts b/src/modules/AbstractModule.ts index 2a0a141..c757de1 100644 --- a/src/modules/AbstractModule.ts +++ b/src/modules/AbstractModule.ts @@ -131,7 +131,7 @@ export function injectModules(target: T, modules: ICoreMo export abstract class AbstractModule { _log = (msg: any, level: LOG_LEVEL = LOG_LEVEL_INFO, key?: string) => { if (typeof msg === "string" && level !== LOG_LEVEL_NOTICE) { - msg = `[${this.constructor.name}] ${msg}`; + msg = `[${this.constructor.name}]\u{200A} ${msg}`; } // console.log(msg); Logger(msg, level, key); diff --git a/src/modules/coreObsidian/ModuleFileAccessObsidian.ts b/src/modules/coreObsidian/ModuleFileAccessObsidian.ts index 7f42506..1c885de 100644 --- a/src/modules/coreObsidian/ModuleFileAccessObsidian.ts +++ b/src/modules/coreObsidian/ModuleFileAccessObsidian.ts @@ -18,10 +18,6 @@ export class ModuleFileAccessObsidian extends AbstractObsidianModule implements } $everyOnFirstInitialize(): Promise { this.vaultManager.beginWatch(); - this.plugin.totalQueued = this.vaultManager.totalQueued; - this.plugin.batched = this.vaultManager.batched; - this.plugin.processing = this.vaultManager.processing; - return Promise.resolve(true); } $allOnUnload(): Promise { diff --git a/src/modules/coreObsidian/storageLib/StorageEventManager.ts b/src/modules/coreObsidian/storageLib/StorageEventManager.ts index e6b0380..3a4b926 100644 --- a/src/modules/coreObsidian/storageLib/StorageEventManager.ts +++ b/src/modules/coreObsidian/storageLib/StorageEventManager.ts @@ -6,7 +6,6 @@ import { delay, fireAndForget } from "../../../lib/src/common/utils.ts"; import { type FileEventItem, type FileEventType } from "../../../common/types.ts"; import { serialized, skipIfDuplicated } from "../../../lib/src/concurrency/lock.ts"; import { finishAllWaitingForTimeout, finishWaitingForTimeout, isWaitingForTimeout, waitForTimeout } from "../../../lib/src/concurrency/task.ts"; -import { reactiveSource, type ReactiveSource } from "../../../lib/src/dataobject/reactive.ts"; import { Semaphore } from "../../../lib/src/concurrency/semaphore.ts"; import type { LiveSyncCore } from "../../../main.ts"; import { InternalFileToUXFileInfoStub, TFileToUXFileInfoStub } from "./utilObsidian.ts"; @@ -29,17 +28,12 @@ export abstract class StorageEventManager { abstract appendQueue(items: FileEvent[], ctx?: any): Promise; abstract cancelQueue(key: string): void; abstract isWaiting(filename: FilePath): boolean; - abstract totalQueued: ReactiveSource; - abstract batched: ReactiveSource; - abstract processing: ReactiveSource; } export class StorageEventManagerObsidian extends StorageEventManager { - totalQueued = reactiveSource(0); - batched = reactiveSource(0); - processing = reactiveSource(0); + plugin: ObsidianLiveSyncPlugin; core: LiveSyncCore; @@ -330,9 +324,10 @@ export class StorageEventManagerObsidian extends StorageEventManager { } updateStatus() { const allItems = this.bufferedQueuedItems.filter(e => !e.cancelled) - this.batched.value = allItems.filter(e => e.batched && !e.skipBatchWait).length; - this.processing.value = this.processingCount; - this.totalQueued.value = allItems.length - this.batched.value; + const batchedCount = allItems.filter(e => e.batched && !e.skipBatchWait).length; + this.core.batched.value = batchedCount + this.core.processing.value = this.processingCount; + this.core.totalQueued.value = allItems.length - batchedCount; } async handleFileEvent(queue: FileEventItem): Promise { @@ -340,7 +335,6 @@ export class StorageEventManagerObsidian extends StorageEventManager { const lockKey = `handleFile:${file.path}`; return await serialized(lockKey, async () => { // TODO CHECK - // console.warn(lockKey); const key = `file-last-proc-${queue.type}-${file.path}`; const last = Number(await this.core.kvDB.get(key) || 0); if (queue.type == "INTERNAL" || file.isInternal) { @@ -362,77 +356,11 @@ export class StorageEventManagerObsidian extends StorageEventManager { this.cancelRelativeEvent(queue); return; } - // if (queue.type == "CREATE" || queue.type == "CHANGED") { - // // eventHub.emitEvent("event-file-changed", { file: targetFile, automated: true }); - - // if (!await this.core.updateIntoDB(targetFile, undefined)) { - // Logger(`STORAGE -> DB: failed, cancel the relative operations: ${targetFile.path}`, LOG_LEVEL_INFO); - // // cancel running queues and remove one of atomic operation - // this.cancelRelativeEvent(queue); - // return; - // } - // } - // if (queue.type == "RENAME") { - // // Obsolete , can be called? - // await this.renameVaultItem(targetFile, queue.args.oldPath); - // } - // } - // await this.core.deleteFromDBbyPath(file.path); - // mtime = file.stat.mtime - 1; - // const keyD1 = `file-last-proc-CREATE-${file.path}`; - // const keyD2 = `file-last-proc-CHANGED-${file.path}`; - // await this.core.kvDB.set(keyD1, mtime); - // await this.core.kvDB.set(keyD2, mtime); - // } else { - // const targetFile = this.core.storageAccess.getFileStub(file.path); - // if (!(targetFile)) { - // Logger(`Target file was not found: ${file.path}`, LOG_LEVEL_INFO); - // return; - // } - // if (file.stat.mtime == last) { - // Logger(`File has been already scanned on ${queue.type}, skip: ${file.path}`, LOG_LEVEL_VERBOSE); - // return; - // } - - // // const cache = queue.args.cache; - // if (queue.type == "CREATE" || queue.type == "CHANGED") { - // eventHub.emitEvent("event-file-changed", { file: targetFile, automated: true }); - // // fireAndForget(() => this.addOnObsidianUI.checkAndApplySettingFromMarkdown(queue.args.file.path, true)); - // const keyD1 = `file-last-proc-DELETED-${file.path}`; - // await this.core.kvDB.set(keyD1, mtime); - // if (!await this.core.updateIntoDB(targetFile, undefined)) { - // Logger(`STORAGE -> DB: failed, cancel the relative operations: ${targetFile.path}`, LOG_LEVEL_INFO); - // // cancel running queues and remove one of atomic operation - // this.cancelRelativeEvent(queue); - // return; - // } - // } - // if (queue.type == "RENAME") { - // // Obsolete , can be called? - // await this.renameVaultItem(targetFile, queue.args.oldPath); - // } - // } - // await this.core.kvDB.set(key, mtime); } } }); } - // async renameVaultItem(file: UXFileInfoStub, oldFile: any, cache?: CacheData): Promise { - // Logger(`${oldFile} renamed to ${file.path}`, LOG_LEVEL_VERBOSE); - // if (!file.isFolder) { - // try { - // // Logger(`RENAMING.. ${file.path} into db`); - // if (await this.core.updateIntoDB(file, cache)) { - // // Logger(`deleted ${oldFile} from db`); - // await this.core.deleteFromDBbyPath(oldFile); - // } else { - // Logger(`Could not save new file: ${file.path} `, LOG_LEVEL_NOTICE); - // } - // } catch (ex) { - // Logger(ex); - // } - // } - // } + cancelRelativeEvent(item: FileEventItem): void { this.cancelQueue(item.key); } diff --git a/src/modules/essentialObsidian/ModuleObsidianAPI.ts b/src/modules/essentialObsidian/ModuleObsidianAPI.ts index e3388aa..2d5910d 100644 --- a/src/modules/essentialObsidian/ModuleObsidianAPI.ts +++ b/src/modules/essentialObsidian/ModuleObsidianAPI.ts @@ -115,19 +115,8 @@ export class ModuleObsidianAPI extends AbstractObsidianModule implements IObsidi } } - // -old implementation - try { - // const orgHeaders = opts?.headers; - // const newHeaders = new Headers(); - // newHeaders.append("authorization", authHeader); - // if (orgHeaders && typeof orgHeaders.forEach !== "string") { - // const items = Object.entries(orgHeaders); - // items.forEach(([key, value]) => newHeaders.append(key, value)); - // } - // newHeaders.append("ngrok-skip-browser-warning", "123"); - // opts!.headers = newHeaders; - DEV: { + if (this.settings.enableDebugTools) { // Issue #407 (opts!.headers as Headers).append("ngrok-skip-browser-warning", "123"); } diff --git a/src/modules/features/ModuleLog.ts b/src/modules/features/ModuleLog.ts index c25beeb..17a601f 100644 --- a/src/modules/features/ModuleLog.ts +++ b/src/modules/features/ModuleLog.ts @@ -212,13 +212,14 @@ export class ModuleLog extends AbstractObsidianModule implements IObsidianModule const { message, status } = this.statusBarLabels.value; // const recent = logMessages.value; const newMsg = message; - const newLog = this.settings?.showOnlyIconsOnEditor ? "" : status; + let newLog = this.settings?.showOnlyIconsOnEditor ? "" : status; + const moduleTagEnd = newLog.indexOf(`]\u{200A}`); + if (moduleTagEnd != -1) { + newLog = newLog.substring(moduleTagEnd + 2); + } this.statusBar?.setText(newMsg.split("\n")[0]); if (this.settings?.showStatusOnEditor && this.statusDiv) { - // const root = activeDocument.documentElement; - // root.style.setProperty("--sls-log-text", "'" + (newMsg + "\\A " + newLog) + "'"); - // this.statusDiv.innerText = newMsg + "\\A " + newLog; if (this.settings.showLongerLogInsideEditor) { const now = new Date().getTime(); this.logLines = this.logLines.filter(e => e.ttl > now); diff --git a/tsconfig.json b/tsconfig.json index 0433089..e0da357 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -19,5 +19,5 @@ "strictFunctionTypes": true }, "include": ["**/*.ts"], - "exclude": ["pouchdb-browser-webpack", "utils", "src/modules/coreObsidian/devUtil/tests.ts", "src/lib/src/API/**"] + "exclude": ["pouchdb-browser-webpack", "utils"] } diff --git a/updates.md b/updates.md index b0bd4b5..9323acb 100644 --- a/updates.md +++ b/updates.md @@ -1,97 +1,42 @@ -### 0.23.0 -Incredibly new features! +## 0.24.0 RC Release Note -Now, we can use object storage (MinIO, S3, R2 or anything you like) for synchronising! Moreover, despite that, we can use all the features as if we were using CouchDB. -Note: As this is a pretty experimental feature, hence we have some limitations. -- This is built on the append-only architecture. It will not shrink used storage if we do not perform a rebuild. -- A bit fragile. However, our version x.yy.0 is always so. -- When the first synchronisation, the entire history to date is transferred. For this reason, it is preferable to do this under the WiFi network. -- Do not worry, from the second synchronisation, we always transfer only differences. +**Note:** This will be rewritten with the stable release. I confess, before you take the time, this is quite long. -I hope this feature empowers users to maintain independence and self-host their data, offering an alternative for those who prefer to manage their own storage solutions and avoid being stuck on the right side of a sudden change in business model. +Over the past three years since the inception of the plugin, various features have been implemented to address diverse user needs. This is so honourable and I am grateful for your years of support. +However, However, this process has resulted in a codebase that has become increasingly disorganised, with features becoming entangled. -Of course, I use Self-hosted MinIO for testing and recommend this. It is for the same reason as using CouchDB. -- open, controllable, auditable and indeed already audited by numerous eyes. +Consequently, this has led to a situation where bugs can go unnoticed or resolving one issue may inadvertently introduce another. -Let me write one more acknowledgement. +In 0.24.0, I reorganised the previously disjointed main codebase into clearly defined modules. Although I anticipated that the overall volume of code would not increase, I discovered that it has, in fact, expanded. While the complexity may still be considerable, the refactoring has enhanced clarity regarding the current structure of the code. (The next focus may involve a review of dependencies). -I have a lot of respect for that plugin, even though it is sometimes treated as if it is a competitor, remotely-save. I think it is a great architecture that embodies a different approach to my approach of recreating history. This time, with all due respect, I have used some of its code as a reference. -Hooray for open source, and generous licences, and the sharing of knowledge by experts. +Throughout this process, a significant number of bugs have been resolved. And it may be worth mentioning that these bugs may had given rise to other bugs. I kindly request that you verify whether your issues have been addressed. At least conflict resolution and related issues have improved significantly. -#### Version history -- 0.23.23: - - Refined: - - Setting dialogue very slightly refined. - - The hodgepodge inside the `Hatch` pane has been sorted into more explicit categorised panes. - - Now we have new panes for: - - `Selector` - - `Advanced` - - `Power users` - - `Patches (Edge case)` - - Applying the settings will now be more informative. - - The header bar will be shown for applying the settings which needs a database rebuild. - - Applying methods are now more clearly navigated. - - Definitely, drastic change. I hope this will be more user-friendly. However, if you notice any issues, please let me know. I hope that nothing missed. - - New features: - - Word-segmented chunk building on users language - - Chunks can now be built with word-segmented data, enhancing efficiency for markdown files which contains the multiple sentences in a single line. - - This feature is enabled by default through `Use Segmented-splitter`. - - (Default: Disabled, Please be relived, I have learnt). - - Fixed: - - Sending chunks on `Send chunk in bulk` are now buffered to avoid the out-of-memory error. - - `Send chunk in bulk` is back to default disabled. (Sorry, not applied to the migrated users; I did not think we should deepen the wound any further "automatically"). - - Merging conflicts of JSON files are now works fine even if it contains `null`. - - Development: - - Implemented the logic for automatically generating the stub of document for the setting dialogue. -- 0.23.22: - - Fixed: - - Case-insensitive file handling - - Full-lower-case files are no longer created during database checking. - - Bulk chunk transfer - - The default value will automatically adjust to an acceptable size when using IBM Cloudant. -- 0.23.21: - - New Features: - - Case-insensitive file handling - - Files can now be handled case-insensitively. - - This behaviour can be modified in the settings under `Handle files as Case-Sensitive` (Default: Prompt, Enabled for previous behaviour). - - Improved chunk revision fixing - - Revisions for chunks can now be fixed for faster chunk creation. - - This can be adjusted in the settings under `Compute revisions for chunks` (Default: Prompt, Enabled for previous behaviour). - - Bulk chunk transfer - - Chunks can now be transferred in bulk during uploads. - - This feature is enabled by default through `Send chunks in bulk`. - - Creation of missing chunks without - - Missing chunks can be created without storing notes, enhancing efficiency for first synchronisation or after prolonged periods without synchronisation. - - Improvements: - - File status scanning on the startup - - Quite significant performance improvements. - - No more missing scans of some files. - - Status in editor enhancements - - Significant performance improvements in the status display within the editor. - - Notifications for files that will not be synchronised will now be properly communicated. - - Encryption and Decryption - - These processes are now performed in background threads to ensure fast and stable transfers. - - Verify and repair all files - - Got faster through parallel checking. - - Migration on update - - Migration messages and wizards have become more helpful. - - Behavioural changes: - - Chunk size adjustments - - Large chunks will no longer be created for older, stable files, addressing storage consumption issues. - - Flag file automation - - Confirmation will be shown and we can cancel it. - - Fixed: - - Database File Scanning - - All files in the database will now be enumerated correctly. - - Miscellaneous - - Dependency updated. - - Now, tree shaking is left to terser, from esbuild. -- 0.23.20: - - Fixed: - - Customisation Sync now checks the difference while storing or applying the configuration. - - No longer storing the same configuration multiple times. - - Time difference in the dialogue has been fixed. - - Remote Storage Limit Notification dialogue has been fixed, now the chosen value is saved. - - Improved: - - The Enlarging button on the enlarging threshold dialogue now displays the new value. +It is also the first step towards a fully-fledged-fancy LiveSync, not just a plug-in from Obsidian. Of course, it will still be a plug-in as a first class and foremost, but this development marks a significant step towards the self-hosting concept. -Older notes is in [updates_old.md](https://github.com/vrtmrz/obsidian-livesync/blob/main/updates_old.md). \ No newline at end of file +This dev release is very close to the beta version that I had previously indicated would not be released. As a result, I have faced challenges in maintaining the main branch while working on this dev release. Regrettably, I have not been able to make any commits to the main branch in the last three weeks. Thus, the dev branch will remain reserved for major changes only. + +The Release Candidate will be available for a few days and will only be officially released once users, including myself, have confirmed that there are no issues. + +Finally, I would like to once again express my respect and gratitude to all of you once again. Thank you for your interest in the development version. Your contributions and dedication are greatly appreciated through testing. + +Thank you, and I hope your troubles will be resolved! + +--- + +## 0.24.0.dev-rc2 + +### Fixed + +- Some status icons is now shown correctly. + +## 0.24.0-rc1 + +### Fixed + +- A fair numbers of bugs have been fixed. + +### Tiding + +- The codebase has been reorganised into clearly defined modules. + +Older notes is in [updates_old.md](https://github.com/vrtmrz/obsidian-livesync/blob/main/updates_old.md). diff --git a/updates_old.md b/updates_old.md index 91e9ea0..8961b01 100644 --- a/updates_old.md +++ b/updates_old.md @@ -18,6 +18,81 @@ I have a lot of respect for that plugin, even though it is sometimes treated as Hooray for open source, and generous licences, and the sharing of knowledge by experts. #### Version history +- 0.23.23: + - Refined: + - Setting dialogue very slightly refined. + - The hodgepodge inside the `Hatch` pane has been sorted into more explicit categorised panes. + - Now we have new panes for: + - `Selector` + - `Advanced` + - `Power users` + - `Patches (Edge case)` + - Applying the settings will now be more informative. + - The header bar will be shown for applying the settings which needs a database rebuild. + - Applying methods are now more clearly navigated. + - Definitely, drastic change. I hope this will be more user-friendly. However, if you notice any issues, please let me know. I hope that nothing missed. + - New features: + - Word-segmented chunk building on users language + - Chunks can now be built with word-segmented data, enhancing efficiency for markdown files which contains the multiple sentences in a single line. + - This feature is enabled by default through `Use Segmented-splitter`. + - (Default: Disabled, Please be relived, I have learnt). + - Fixed: + - Sending chunks on `Send chunk in bulk` are now buffered to avoid the out-of-memory error. + - `Send chunk in bulk` is back to default disabled. (Sorry, not applied to the migrated users; I did not think we should deepen the wound any further "automatically"). + - Merging conflicts of JSON files are now works fine even if it contains `null`. + - Development: + - Implemented the logic for automatically generating the stub of document for the setting dialogue. +- 0.23.22: + - Fixed: + - Case-insensitive file handling + - Full-lower-case files are no longer created during database checking. + - Bulk chunk transfer + - The default value will automatically adjust to an acceptable size when using IBM Cloudant. +- 0.23.21: + - New Features: + - Case-insensitive file handling + - Files can now be handled case-insensitively. + - This behaviour can be modified in the settings under `Handle files as Case-Sensitive` (Default: Prompt, Enabled for previous behaviour). + - Improved chunk revision fixing + - Revisions for chunks can now be fixed for faster chunk creation. + - This can be adjusted in the settings under `Compute revisions for chunks` (Default: Prompt, Enabled for previous behaviour). + - Bulk chunk transfer + - Chunks can now be transferred in bulk during uploads. + - This feature is enabled by default through `Send chunks in bulk`. + - Creation of missing chunks without + - Missing chunks can be created without storing notes, enhancing efficiency for first synchronisation or after prolonged periods without synchronisation. + - Improvements: + - File status scanning on the startup + - Quite significant performance improvements. + - No more missing scans of some files. + - Status in editor enhancements + - Significant performance improvements in the status display within the editor. + - Notifications for files that will not be synchronised will now be properly communicated. + - Encryption and Decryption + - These processes are now performed in background threads to ensure fast and stable transfers. + - Verify and repair all files + - Got faster through parallel checking. + - Migration on update + - Migration messages and wizards have become more helpful. + - Behavioural changes: + - Chunk size adjustments + - Large chunks will no longer be created for older, stable files, addressing storage consumption issues. + - Flag file automation + - Confirmation will be shown and we can cancel it. + - Fixed: + - Database File Scanning + - All files in the database will now be enumerated correctly. + - Miscellaneous + - Dependency updated. + - Now, tree shaking is left to terser, from esbuild. +- 0.23.20: + - Fixed: + - Customisation Sync now checks the difference while storing or applying the configuration. + - No longer storing the same configuration multiple times. + - Time difference in the dialogue has been fixed. + - Remote Storage Limit Notification dialogue has been fixed, now the chosen value is saved. + - Improved: + - The Enlarging button on the enlarging threshold dialogue now displays the new value. - 0.23.19: - Not released. - 0.23.18: