You've already forked joplin
mirror of
https://github.com/laurent22/joplin.git
synced 2025-08-30 20:39:46 +02:00
Compare commits
12 Commits
android-v3
...
android-v3
Author | SHA1 | Date | |
---|---|---|---|
|
2fdbb22481 | ||
|
c5bb88ddf4 | ||
|
5d7c78c361 | ||
|
808eb7d49a | ||
|
2142373fff | ||
|
20f7f37b49 | ||
|
04fc634092 | ||
|
d40c9d3ff9 | ||
|
224b4f619a | ||
|
88d1d4b7d1 | ||
|
6a22ffbcb1 | ||
|
d735cf64e0 |
4
.github/ISSUE_TEMPLATE/config.yml
vendored
4
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1,8 +1,8 @@
|
||||
blank_issues_enabled: false
|
||||
blank_issues_enabled: true
|
||||
contact_links:
|
||||
- name: Feature Requests
|
||||
url: https://discourse.joplinapp.org/c/features/
|
||||
about: Discuss ideas for new features or changes
|
||||
- name: Support
|
||||
url: https://discourse.joplinapp.org/c/support/
|
||||
about: Please ask for help here
|
||||
about: Please ask for help here
|
||||
|
@@ -50,9 +50,10 @@ Please see the [donation page](https://github.com/laurent22/joplin/blob/dev/read
|
||||
Name | Description
|
||||
--- | ---
|
||||
[Support Forum](https://discourse.joplinapp.org/) | This is the main place for general discussion about Joplin, user support, software development questions, and to discuss new features. Also where the latest beta versions are released and discussed.
|
||||
[Patreon page](https://www.patreon.com/joplin) |The latest news are often posted there
|
||||
[Bluesky feed](https://bsky.app/profile/joplinapp.bsky.social) | Follow us on Bluesky
|
||||
[Mastodon feed](https://mastodon.social/@joplinapp) | Follow us on Mastodon
|
||||
[Patreon page](https://www.patreon.com/joplin) |The latest news are often posted there
|
||||
[YouTube](https://www.youtube.com/@joplinapp) | Discover information and tutorials on how to use the apps
|
||||
[Discord server](https://discord.gg/VSj7AFHvpq) | Our chat server
|
||||
[LinkedIn](https://www.linkedin.com/company/joplin) | Our LinkedIn page
|
||||
[Lemmy Community](https://sopuli.xyz/c/joplinapp) | Also a good place to get help
|
||||
|
@@ -20,7 +20,7 @@ import handleCustomProtocols, { CustomProtocolHandler } from './utils/customProt
|
||||
import { clearTimeout, setTimeout } from 'timers';
|
||||
import { resolve } from 'path';
|
||||
import { defaultWindowId } from '@joplin/lib/reducer';
|
||||
import { msleep } from '@joplin/utils/time';
|
||||
import { msleep, Second } from '@joplin/utils/time';
|
||||
|
||||
interface RendererProcessQuitReply {
|
||||
canClose: boolean;
|
||||
@@ -674,6 +674,7 @@ export default class ElectronAppWrapper {
|
||||
await this.sendCrossAppIpcMessage(message);
|
||||
|
||||
this.quit();
|
||||
if (this.env() === 'dev') console.warn(`Closing the application because another instance is already running, or the previous instance was force-quit within the last ${Math.round(this.profileLocker_.options.interval / Second)} seconds.`);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@@ -83,6 +83,7 @@ interface Props {
|
||||
notesColumns: NoteListColumns;
|
||||
showInvalidJoplinCloudCredential: boolean;
|
||||
toast: Toast;
|
||||
shouldSwitchToAppleSiliconVersion: boolean;
|
||||
}
|
||||
|
||||
interface ShareFolderDialogOptions {
|
||||
@@ -492,6 +493,11 @@ class MainScreenComponent extends React.Component<Props, State> {
|
||||
});
|
||||
};
|
||||
|
||||
const onDownloadAppleSiliconVersion = () => {
|
||||
// The website should redirect to the correct version
|
||||
shim.openUrl('https://joplinapp.org/download/');
|
||||
};
|
||||
|
||||
const onRestartAndUpgrade = async () => {
|
||||
Setting.setValue('sync.upgradeState', Setting.SYNC_UPGRADE_STATE_MUST_DO);
|
||||
await Setting.saveAll();
|
||||
@@ -574,6 +580,12 @@ class MainScreenComponent extends React.Component<Props, State> {
|
||||
);
|
||||
} else if (this.props.mustUpgradeAppMessage) {
|
||||
msg = this.renderNotificationMessage(this.props.mustUpgradeAppMessage);
|
||||
} else if (this.props.shouldSwitchToAppleSiliconVersion) {
|
||||
msg = this.renderNotificationMessage(
|
||||
_('You are running the Intel version of Joplin on an Apple Silicon processor. Download the Apple Silicon one for better performance.'),
|
||||
_('Download it now'),
|
||||
onDownloadAppleSiliconVersion,
|
||||
);
|
||||
} else if (this.props.showInvalidJoplinCloudCredential) {
|
||||
msg = this.renderNotificationMessage(
|
||||
_('Your Joplin Cloud credentials are invalid, please login.'),
|
||||
@@ -611,7 +623,8 @@ class MainScreenComponent extends React.Component<Props, State> {
|
||||
this.showShareInvitationNotification(props) ||
|
||||
this.props.needApiAuth ||
|
||||
!!this.props.mustUpgradeAppMessage ||
|
||||
props.showInvalidJoplinCloudCredential;
|
||||
props.showInvalidJoplinCloudCredential ||
|
||||
props.shouldSwitchToAppleSiliconVersion;
|
||||
}
|
||||
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any -- Old code before rule was applied
|
||||
@@ -839,6 +852,7 @@ const mapStateToProps = (state: AppState) => {
|
||||
notesColumns: validateColumns(state.settings['notes.columns']),
|
||||
showInvalidJoplinCloudCredential: state.settings['sync.target'] === 10 && state.mustAuthenticate,
|
||||
toast: state.toast,
|
||||
shouldSwitchToAppleSiliconVersion: shim.isAppleSilicon() && process.arch !== 'arm64',
|
||||
};
|
||||
};
|
||||
|
||||
|
@@ -86,8 +86,8 @@ android {
|
||||
applicationId "net.cozic.joplin"
|
||||
minSdkVersion rootProject.ext.minSdkVersion
|
||||
targetSdkVersion rootProject.ext.targetSdkVersion
|
||||
versionCode 2097766
|
||||
versionName "3.3.3"
|
||||
versionCode 2097767
|
||||
versionName "3.3.4"
|
||||
ndk {
|
||||
abiFilters "armeabi-v7a", "x86", "arm64-v8a", "x86_64"
|
||||
}
|
||||
|
@@ -7,8 +7,8 @@
|
||||
#include "findLongestSilence.h"
|
||||
#include "androidUtil.h"
|
||||
|
||||
WhisperSession::WhisperSession(const std::string& modelPath, std::string lang, std::string prompt)
|
||||
: lang_ {std::move(lang)}, prompt_ {std::move(prompt)} {
|
||||
WhisperSession::WhisperSession(const std::string& modelPath, std::string lang, std::string prompt, bool shortAudioContext)
|
||||
: lang_ {std::move(lang)}, prompt_ {std::move(prompt)}, shortAudioContext_ {shortAudioContext} {
|
||||
whisper_context_params contextParams = whisper_context_default_params();
|
||||
|
||||
// Lifetime(pModelPath): Whisper.cpp creates a copy of pModelPath and stores it in a std::string.
|
||||
@@ -34,9 +34,9 @@ WhisperSession::buildWhisperParams_() {
|
||||
// WHISPER_SAMPLING_BEAM_SEARCH is an alternative to greedy:
|
||||
// params.beam_search = { .beam_size = 2 };
|
||||
params.print_realtime = false;
|
||||
// Disable timestamps: They make creating custom Whisper models more difficult:
|
||||
// Disable timestamps: They make creating custom Whisper models more difficult:
|
||||
params.print_timestamps = false;
|
||||
params.no_timestamps = true;
|
||||
params.no_timestamps = true;
|
||||
|
||||
params.print_progress = false;
|
||||
params.translate = false;
|
||||
@@ -54,6 +54,7 @@ WhisperSession::buildWhisperParams_() {
|
||||
params.initial_prompt = prompt_.c_str();
|
||||
params.prompt_tokens = nullptr;
|
||||
params.prompt_n_tokens = 0;
|
||||
params.audio_ctx = 0;
|
||||
|
||||
// Lifetime: lifetime(params) < lifetime(lang_) = lifetime(this).
|
||||
params.language = lang_.c_str();
|
||||
@@ -68,7 +69,26 @@ WhisperSession::transcribe_(const std::vector<float>& audio, size_t transcribeCo
|
||||
return "";
|
||||
}
|
||||
|
||||
float seconds = static_cast<float>(audio.size()) / WHISPER_SAMPLE_RATE;
|
||||
if (seconds > 30.0f) {
|
||||
LOGW("Warning: Audio is longer than 30 seconds. Not all audio will be transcribed");
|
||||
}
|
||||
|
||||
whisper_full_params params = buildWhisperParams_();
|
||||
|
||||
// If supported by the model, allow shortening the transcription. This can significantly
|
||||
// improve performance, but requires a fine-tuned model.
|
||||
// See https://github.com/futo-org/whisper-acft
|
||||
if (this->shortAudioContext_) {
|
||||
// audio_ctx: 1500 every 30 seconds (50 units in one second).
|
||||
// See https://github.com/futo-org/whisper-acft/issues/6
|
||||
float padding = 64.0f;
|
||||
params.audio_ctx = static_cast<int>(seconds * (1500.0f / 30.0f) + padding);
|
||||
|
||||
if (params.audio_ctx > 1500) {
|
||||
params.audio_ctx = 1500;
|
||||
}
|
||||
}
|
||||
whisper_reset_timings(pContext_);
|
||||
|
||||
transcribeCount = std::min(audio.size(), transcribeCount);
|
||||
@@ -104,51 +124,130 @@ WhisperSession::splitAndTranscribeBefore_(int transcribeUpTo, int trimTo) {
|
||||
return result;
|
||||
}
|
||||
|
||||
std::string
|
||||
WhisperSession::transcribeNextChunk(const float *pAudio, int sizeAudio) {
|
||||
std::string finalizedContent;
|
||||
bool WhisperSession::isBufferSilent_() {
|
||||
int toleranceSamples = WHISPER_SAMPLE_RATE / 8; // 0.125s
|
||||
auto silence = findLongestSilence(
|
||||
audioBuffer_,
|
||||
LongestSilenceOptions {
|
||||
.sampleRate = WHISPER_SAMPLE_RATE,
|
||||
.minSilenceLengthSeconds = 0.0f,
|
||||
.maximumSilenceStartSamples = toleranceSamples, // 0.5s
|
||||
.returnFirstMatch = true
|
||||
}
|
||||
);
|
||||
return silence.end >= audioBuffer_.size() - toleranceSamples;
|
||||
}
|
||||
|
||||
// Update the local audio buffer
|
||||
for (int i = 0; i < sizeAudio; i++) {
|
||||
audioBuffer_.push_back(pAudio[i]);
|
||||
std::string
|
||||
WhisperSession::transcribeNextChunkNoPreview_() {
|
||||
std::stringstream result;
|
||||
|
||||
// Handles a silence detected between (splitStart, splitEnd).
|
||||
auto splitAndProcess = [&] (int splitStart, int splitEnd) {
|
||||
int tolerance = WHISPER_SAMPLE_RATE / 20; // 0.05s
|
||||
bool isCompletelySilent = splitStart < tolerance && splitEnd > audioBuffer_.size() - tolerance;
|
||||
LOGD("WhisperSession: Found silence range from %.2f -> %.2f", splitStart / (float) WHISPER_SAMPLE_RATE, splitEnd / (float) WHISPER_SAMPLE_RATE);
|
||||
|
||||
if (isCompletelySilent) {
|
||||
audioBuffer_.clear();
|
||||
return false;
|
||||
} else if (splitEnd > tolerance) { // Anything to transcribe?
|
||||
result << splitAndTranscribeBefore_(splitStart, splitEnd) << "\n\n";
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
};
|
||||
|
||||
int maximumSamples = WHISPER_SAMPLE_RATE * 25;
|
||||
|
||||
// Handle paragraph breaks indicated by long pauses
|
||||
while (audioBuffer_.size() > WHISPER_SAMPLE_RATE * 3) {
|
||||
LOGD("WhisperSession: Checking for a longer pauses.");
|
||||
// Allow brief pauses to create new paragraphs:
|
||||
float minSilenceSeconds = 1.5f;
|
||||
auto splitPoint = findLongestSilence(
|
||||
audioBuffer_,
|
||||
LongestSilenceOptions {
|
||||
.sampleRate = WHISPER_SAMPLE_RATE,
|
||||
.minSilenceLengthSeconds = minSilenceSeconds,
|
||||
.maximumSilenceStartSamples = maximumSamples,
|
||||
.returnFirstMatch = true
|
||||
}
|
||||
);
|
||||
if (!splitPoint.isValid) {
|
||||
break;
|
||||
}
|
||||
if (!splitAndProcess(splitPoint.start, splitPoint.end)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Does the audio buffer need to be split somewhere?
|
||||
int maximumSamples = WHISPER_SAMPLE_RATE * 25;
|
||||
// If there are no long pauses, force a paragraph break somewhere
|
||||
if (audioBuffer_.size() >= maximumSamples) {
|
||||
LOGD("WhisperSession: Allowing shorter pauses to break.");
|
||||
float minSilenceSeconds = 0.3f;
|
||||
auto silenceRange = findLongestSilence(
|
||||
audioBuffer_, WHISPER_SAMPLE_RATE, minSilenceSeconds, maximumSamples
|
||||
audioBuffer_,
|
||||
LongestSilenceOptions {
|
||||
.sampleRate = WHISPER_SAMPLE_RATE,
|
||||
.minSilenceLengthSeconds = minSilenceSeconds,
|
||||
.maximumSilenceStartSamples = maximumSamples,
|
||||
.returnFirstMatch = false
|
||||
}
|
||||
);
|
||||
|
||||
// In this case, the audio is long enough that it needs to be split somewhere. If there's
|
||||
// no suitable pause available, default to splitting in the middle.
|
||||
int halfBufferSize = audioBuffer_.size() / 2;
|
||||
int transcribeTo = silenceRange.isValid ? silenceRange.start : halfBufferSize;
|
||||
int trimTo = silenceRange.isValid ? silenceRange.end : halfBufferSize;
|
||||
|
||||
finalizedContent = splitAndTranscribeBefore_(transcribeTo, trimTo);
|
||||
} else if (audioBuffer_.size() > WHISPER_SAMPLE_RATE * 3) {
|
||||
// Allow brief pauses to create new paragraphs:
|
||||
float minSilenceSeconds = 2.0f;
|
||||
auto splitPoint = findLongestSilence(
|
||||
audioBuffer_, WHISPER_SAMPLE_RATE, minSilenceSeconds, maximumSamples
|
||||
);
|
||||
if (splitPoint.isValid) {
|
||||
int tolerance = WHISPER_SAMPLE_RATE / 20; // 0.05s
|
||||
bool isCompletelySilent = splitPoint.start < tolerance && splitPoint.end > audioBuffer_.size() - tolerance;
|
||||
if (isCompletelySilent) {
|
||||
audioBuffer_.clear();
|
||||
} else {
|
||||
finalizedContent = splitAndTranscribeBefore_(splitPoint.start, splitPoint.end);
|
||||
}
|
||||
}
|
||||
int splitStart = silenceRange.isValid ? silenceRange.start : halfBufferSize;
|
||||
int splitEnd = silenceRange.isValid ? silenceRange.end : halfBufferSize;
|
||||
splitAndProcess(splitStart, splitEnd);
|
||||
}
|
||||
|
||||
return result.str();
|
||||
}
|
||||
|
||||
|
||||
void WhisperSession::addAudio(const float *pAudio, int sizeAudio) {
|
||||
// Update the local audio buffer
|
||||
for (int i = 0; i < sizeAudio; i++) {
|
||||
audioBuffer_.push_back(pAudio[i]);
|
||||
}
|
||||
}
|
||||
|
||||
std::string WhisperSession::transcribeNextChunk() {
|
||||
std::string finalizedContent = transcribeNextChunkNoPreview_();
|
||||
previewText_ = transcribe_(audioBuffer_, audioBuffer_.size());
|
||||
return finalizedContent;
|
||||
}
|
||||
|
||||
std::string WhisperSession::transcribeAll() {
|
||||
if (isBufferSilent_()) {
|
||||
return "";
|
||||
}
|
||||
|
||||
std::stringstream result;
|
||||
|
||||
std::string transcribed;
|
||||
auto update_transcribed = [&] {
|
||||
transcribed = transcribeNextChunkNoPreview_();
|
||||
return !transcribed.empty();
|
||||
};
|
||||
while (update_transcribed()) {
|
||||
result << transcribed << "\n\n";
|
||||
}
|
||||
|
||||
// Transcribe content considered by transcribeNextChunk as partial:
|
||||
if (!isBufferSilent_()) {
|
||||
result << transcribe_(audioBuffer_, audioBuffer_.size());
|
||||
}
|
||||
audioBuffer_.clear();
|
||||
|
||||
previewText_ = "";
|
||||
return result.str();
|
||||
}
|
||||
|
||||
std::string WhisperSession::getPreview() {
|
||||
return previewText_;
|
||||
}
|
||||
|
@@ -5,9 +5,15 @@
|
||||
|
||||
class WhisperSession {
|
||||
public:
|
||||
WhisperSession(const std::string& modelPath, std::string lang, std::string prompt);
|
||||
WhisperSession(const std::string& modelPath, std::string lang, std::string prompt, bool shortAudioContext);
|
||||
~WhisperSession();
|
||||
std::string transcribeNextChunk(const float *pAudio, int sizeAudio);
|
||||
// Adds to the buffer
|
||||
void addAudio(const float *pAudio, int sizeAudio);
|
||||
// Returns the next finalized slice of audio (if any) and updates the preview.
|
||||
std::string transcribeNextChunk();
|
||||
// Transcribes all buffered audio data that hasn't been finalized yet
|
||||
std::string transcribeAll();
|
||||
// Returns the transcription of any unfinalized audio
|
||||
std::string getPreview();
|
||||
|
||||
private:
|
||||
@@ -17,10 +23,18 @@ private:
|
||||
whisper_full_params buildWhisperParams_();
|
||||
std::string transcribe_(const std::vector<float>& audio, size_t samplesToTranscribe);
|
||||
std::string splitAndTranscribeBefore_(int transcribeUpTo, int trimTo);
|
||||
// Like transcribeNextChunk, but does not update the preview state
|
||||
// and does not add a new chunk to the buffer.
|
||||
// Since updating the preview state can be slow, this may be preferred
|
||||
// for internal operations where the preview does not need to be kept up-to-date.
|
||||
std::string transcribeNextChunkNoPreview_();
|
||||
|
||||
bool isBufferSilent_();
|
||||
|
||||
whisper_context *pContext_;
|
||||
const std::string lang_;
|
||||
const std::string prompt_;
|
||||
const bool shortAudioContext_;
|
||||
|
||||
std::vector<float> audioBuffer_;
|
||||
};
|
||||
|
@@ -19,14 +19,18 @@ static void highpass(std::vector<float>& data, int sampleRate) {
|
||||
|
||||
SilenceRange findLongestSilence(
|
||||
const std::vector<float>& audioData,
|
||||
int sampleRate,
|
||||
float minSilenceLengthSeconds,
|
||||
int maxSilencePosition
|
||||
LongestSilenceOptions options
|
||||
) {
|
||||
// Options variables
|
||||
int sampleRate = options.sampleRate;
|
||||
int maxSilencePosition = options.maximumSilenceStartSamples;
|
||||
float minSilenceLengthSeconds = options.minSilenceLengthSeconds;
|
||||
bool returnFirstMatch = options.returnFirstMatch;
|
||||
|
||||
// State
|
||||
int bestCandidateLength = 0;
|
||||
int bestCandidateStart = -1;
|
||||
int bestCandidateEnd = -1;
|
||||
|
||||
int currentCandidateStart = -1;
|
||||
|
||||
std::vector<float> processedAudio { audioData };
|
||||
@@ -35,7 +39,7 @@ SilenceRange findLongestSilence(
|
||||
// Break into windows of size `windowSize`:
|
||||
int windowSize = 256;
|
||||
int windowsPerSecond = sampleRate / windowSize;
|
||||
int quietWindows = 0;
|
||||
int quietWindows = 0; // Number of relatively quiet windows encountered
|
||||
|
||||
// Finishes the current candidate for longest silence
|
||||
auto finalizeCandidate = [&] (int currentOffset) {
|
||||
@@ -86,12 +90,20 @@ SilenceRange findLongestSilence(
|
||||
}
|
||||
|
||||
int minQuietWindows = static_cast<int>(windowsPerSecond * minSilenceLengthSeconds);
|
||||
if (quietWindows >= minQuietWindows && currentCandidateStart == -1) {
|
||||
// Found a candidate. Start it.
|
||||
currentCandidateStart = windowOffset;
|
||||
} else if (quietWindows == 0) {
|
||||
if (quietWindows >= minQuietWindows && currentCandidateStart == -1) { // Found silence
|
||||
// Ignore the first window, which probably contains some of the start of the audio
|
||||
// and the most recent window, which came after windowOffset.
|
||||
int windowsToIgnore = 2;
|
||||
int estimatedQuietSamples = std::max(0, quietWindows - windowsToIgnore) * windowSize;
|
||||
currentCandidateStart = windowOffset - estimatedQuietSamples;
|
||||
} else if (quietWindows == 0) { // Silence ended
|
||||
// Ended a candidate. Is it better than the best?
|
||||
finalizeCandidate(windowOffset);
|
||||
|
||||
// Search for more candidates or return now?
|
||||
if (returnFirstMatch && bestCandidateLength > 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -10,15 +10,24 @@ struct SilenceRange {
|
||||
int end;
|
||||
};
|
||||
|
||||
struct LongestSilenceOptions {
|
||||
int sampleRate;
|
||||
|
||||
// Minimum length of a silence range (e.g. 3.0 seconds)
|
||||
float minSilenceLengthSeconds;
|
||||
|
||||
// The maximum position for a silence range to start (ignore
|
||||
// all silences after this position).
|
||||
int maximumSilenceStartSamples;
|
||||
|
||||
// Return the first silence satisfying the conditions instead of
|
||||
// the longest.
|
||||
bool returnFirstMatch;
|
||||
};
|
||||
|
||||
SilenceRange findLongestSilence(
|
||||
const std::vector<float>& audioData,
|
||||
int sampleRate,
|
||||
|
||||
// Minimum length of silence in seconds
|
||||
float minSilenceLengthSeconds,
|
||||
|
||||
// Doesn't check for silence at a position greater than maximumSilenceStart
|
||||
int maximumSilenceStart
|
||||
LongestSilenceOptions options
|
||||
);
|
||||
|
||||
|
||||
|
@@ -122,9 +122,12 @@ static float samplesToSeconds(int samples, int sampleRate) {
|
||||
static void expectNoSilence(const GeneratedAudio& audio, const std::string& testLabel) {
|
||||
auto silence = findLongestSilence(
|
||||
audio.data,
|
||||
audio.sampleRate,
|
||||
0.02f,
|
||||
audio.sampleCount
|
||||
LongestSilenceOptions {
|
||||
.sampleRate = audio.sampleRate,
|
||||
.minSilenceLengthSeconds = 0.02f,
|
||||
.maximumSilenceStartSamples = audio.sampleCount,
|
||||
.returnFirstMatch = false,
|
||||
}
|
||||
);
|
||||
if (silence.isValid) {
|
||||
std::stringstream errorBuilder;
|
||||
@@ -141,9 +144,12 @@ static void expectNoSilence(const GeneratedAudio& audio, const std::string& test
|
||||
static void expectSilenceBetween(const GeneratedAudio& audio, float startTimeSeconds, float stopTimeSeconds, const std::string& testLabel) {
|
||||
auto silenceResult = findLongestSilence(
|
||||
audio.data,
|
||||
audio.sampleRate,
|
||||
0.02f,
|
||||
audio.sampleCount
|
||||
LongestSilenceOptions {
|
||||
.sampleRate = audio.sampleRate,
|
||||
.minSilenceLengthSeconds = 0.02f,
|
||||
.maximumSilenceStartSamples = audio.sampleCount,
|
||||
.returnFirstMatch = false,
|
||||
}
|
||||
);
|
||||
|
||||
if (!silenceResult.isValid) {
|
||||
|
@@ -54,13 +54,14 @@ Java_net_cozic_joplin_audio_NativeWhisperLib_00024Companion_init(
|
||||
jobject thiz,
|
||||
jstring modelPath,
|
||||
jstring language,
|
||||
jstring prompt
|
||||
jstring prompt,
|
||||
jboolean useShortAudioContext
|
||||
) {
|
||||
whisper_log_set(log_android, nullptr);
|
||||
|
||||
try {
|
||||
auto *pSession = new WhisperSession(
|
||||
stringToCXX(env, modelPath), stringToCXX(env, language), stringToCXX(env, prompt)
|
||||
stringToCXX(env, modelPath), stringToCXX(env, language), stringToCXX(env, prompt), useShortAudioContext
|
||||
);
|
||||
return (jlong) pSession;
|
||||
} catch (const std::exception& exception) {
|
||||
@@ -78,8 +79,8 @@ Java_net_cozic_joplin_audio_NativeWhisperLib_00024Companion_free(JNIEnv *env, jo
|
||||
}
|
||||
|
||||
extern "C"
|
||||
JNIEXPORT jstring JNICALL
|
||||
Java_net_cozic_joplin_audio_NativeWhisperLib_00024Companion_fullTranscribe(JNIEnv *env,
|
||||
JNIEXPORT void JNICALL
|
||||
Java_net_cozic_joplin_audio_NativeWhisperLib_00024Companion_addAudio(JNIEnv *env,
|
||||
jobject thiz,
|
||||
jlong pointer,
|
||||
jfloatArray audio_data) {
|
||||
@@ -89,21 +90,55 @@ Java_net_cozic_joplin_audio_NativeWhisperLib_00024Companion_fullTranscribe(JNIEn
|
||||
std::string result;
|
||||
|
||||
try {
|
||||
LOGD("Starting Whisper, transcribe %d", lenAudioData);
|
||||
result = pSession->transcribeNextChunk(pAudioData, lenAudioData);
|
||||
auto preview = pSession->getPreview();
|
||||
LOGD("Ran Whisper. Got %s (preview %s)", result.c_str(), preview.c_str());
|
||||
pSession->addAudio(pAudioData, lenAudioData);
|
||||
} catch (const std::exception& exception) {
|
||||
LOGW("Failed to run whisper: %s", exception.what());
|
||||
LOGW("Failed to add to audio buffer: %s", exception.what());
|
||||
throwException(env, exception.what());
|
||||
}
|
||||
|
||||
// JNI_ABORT: "free the buffer without copying back the possible changes", pass 0 to copy
|
||||
// changes (there should be no changes)
|
||||
env->ReleaseFloatArrayElements(audio_data, pAudioData, JNI_ABORT);
|
||||
}
|
||||
|
||||
extern "C"
|
||||
JNIEXPORT jstring JNICALL
|
||||
Java_net_cozic_joplin_audio_NativeWhisperLib_00024Companion_transcribeNextChunk(JNIEnv *env,
|
||||
jobject thiz,
|
||||
jlong pointer) {
|
||||
auto *pSession = reinterpret_cast<WhisperSession *> (pointer);
|
||||
std::string result;
|
||||
|
||||
try {
|
||||
result = pSession->transcribeNextChunk();
|
||||
} catch (const std::exception& exception) {
|
||||
LOGW("Failed to run whisper: %s", exception.what());
|
||||
throwException(env, exception.what());
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return stringToJava(env, result);
|
||||
}
|
||||
|
||||
extern "C"
|
||||
JNIEXPORT jstring JNICALL
|
||||
Java_net_cozic_joplin_audio_NativeWhisperLib_00024Companion_transcribeRemaining(JNIEnv *env,
|
||||
jobject thiz,
|
||||
jlong pointer) {
|
||||
auto *pSession = reinterpret_cast<WhisperSession *> (pointer);
|
||||
std::string result;
|
||||
|
||||
try {
|
||||
result = pSession->transcribeAll();
|
||||
} catch (const std::exception& exception) {
|
||||
LOGW("Failed to run whisper: %s", exception.what());
|
||||
throwException(env, exception.what());
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return stringToJava(env, result);
|
||||
}
|
||||
|
||||
extern "C"
|
||||
JNIEXPORT jstring JNICALL
|
||||
Java_net_cozic_joplin_audio_NativeWhisperLib_00024Companion_getPreview(
|
||||
@@ -122,4 +157,4 @@ Java_net_cozic_joplin_audio_NativeWhisperLib_00024Companion_runTests(JNIEnv *env
|
||||
LOGW("Failed to run tests: %s", exception.what());
|
||||
throwException(env, exception.what());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -15,7 +15,9 @@ typealias AudioRecorderFactory = (context: Context)->AudioRecorder;
|
||||
|
||||
class AudioRecorder(context: Context) : Closeable {
|
||||
private val sampleRate = 16_000
|
||||
private val maxLengthSeconds = 30 // Whisper supports a maximum of 30s
|
||||
// Don't allow the unprocessed audio buffer to grow indefinitely -- discard
|
||||
// data if longer than this:
|
||||
private val maxLengthSeconds = 120
|
||||
private val maxBufferSize = sampleRate * maxLengthSeconds
|
||||
private val buffer = FloatArray(maxBufferSize)
|
||||
private var bufferWriteOffset = 0
|
||||
|
@@ -6,6 +6,7 @@ class NativeWhisperLib(
|
||||
modelPath: String,
|
||||
languageCode: String,
|
||||
prompt: String,
|
||||
shortAudioContext: Boolean,
|
||||
) : Closeable {
|
||||
companion object {
|
||||
init {
|
||||
@@ -16,22 +17,40 @@ class NativeWhisperLib(
|
||||
|
||||
// TODO: The example whisper.cpp project transfers pointers as Longs to the Kotlin code.
|
||||
// This seems unsafe. Try changing how this is managed.
|
||||
private external fun init(modelPath: String, languageCode: String, prompt: String): Long;
|
||||
private external fun init(modelPath: String, languageCode: String, prompt: String, shortAudioContext: Boolean): Long;
|
||||
private external fun free(pointer: Long): Unit;
|
||||
|
||||
private external fun fullTranscribe(pointer: Long, audioData: FloatArray): String;
|
||||
private external fun addAudio(pointer: Long, audioData: FloatArray): Unit;
|
||||
private external fun transcribeNextChunk(pointer: Long): String;
|
||||
private external fun transcribeRemaining(pointer: Long): String;
|
||||
private external fun getPreview(pointer: Long): String;
|
||||
}
|
||||
|
||||
private var closed = false
|
||||
private val pointer: Long = init(modelPath, languageCode, prompt)
|
||||
private val pointer: Long = init(modelPath, languageCode, prompt, shortAudioContext)
|
||||
|
||||
fun transcribe(audioData: FloatArray): String {
|
||||
fun addAudio(audioData: FloatArray) {
|
||||
if (closed) {
|
||||
throw Exception("Cannot add audio data to a closed session")
|
||||
}
|
||||
|
||||
Companion.addAudio(pointer, audioData)
|
||||
}
|
||||
|
||||
fun transcribeNextChunk(): String {
|
||||
if (closed) {
|
||||
throw Exception("Cannot transcribe using a closed session")
|
||||
}
|
||||
|
||||
return fullTranscribe(pointer, audioData)
|
||||
return Companion.transcribeNextChunk(pointer)
|
||||
}
|
||||
|
||||
fun transcribeRemaining(): String {
|
||||
if (closed) {
|
||||
throw Exception("Cannot transcribeAll using a closed session")
|
||||
}
|
||||
|
||||
return Companion.transcribeRemaining(pointer)
|
||||
}
|
||||
|
||||
fun getPreview(): String {
|
||||
|
@@ -8,6 +8,7 @@ class SpeechToTextConverter(
|
||||
modelPath: String,
|
||||
locale: String,
|
||||
prompt: String,
|
||||
useShortAudioCtx: Boolean,
|
||||
recorderFactory: AudioRecorderFactory,
|
||||
context: Context,
|
||||
) : Closeable {
|
||||
@@ -17,6 +18,7 @@ class SpeechToTextConverter(
|
||||
modelPath,
|
||||
languageCode,
|
||||
prompt,
|
||||
useShortAudioCtx,
|
||||
)
|
||||
|
||||
fun start() {
|
||||
@@ -25,7 +27,8 @@ class SpeechToTextConverter(
|
||||
|
||||
private fun convert(data: FloatArray): String {
|
||||
Log.d("Whisper", "Pre-transcribe data of size ${data.size}")
|
||||
val result = whisper.transcribe(data)
|
||||
whisper.addAudio(data)
|
||||
val result = whisper.transcribeNextChunk()
|
||||
Log.d("Whisper", "Post transcribe. Got $result")
|
||||
return result;
|
||||
}
|
||||
@@ -47,7 +50,8 @@ class SpeechToTextConverter(
|
||||
// Converts as many seconds of buffered data as possible, without waiting
|
||||
fun convertRemaining(): String {
|
||||
val buffer = recorder.pullAvailable()
|
||||
return convert(buffer)
|
||||
whisper.addAudio(buffer)
|
||||
return whisper.transcribeRemaining()
|
||||
}
|
||||
|
||||
fun getPreview(): String {
|
||||
|
@@ -43,11 +43,11 @@ class SpeechToTextPackage : ReactPackage {
|
||||
}
|
||||
|
||||
@ReactMethod
|
||||
fun openSession(modelPath: String, locale: String, prompt: String, promise: Promise) {
|
||||
fun openSession(modelPath: String, locale: String, prompt: String, useShortAudioCtx: Boolean, promise: Promise) {
|
||||
val appContext = context.applicationContext
|
||||
|
||||
try {
|
||||
val sessionId = sessionManager.openSession(modelPath, locale, prompt, appContext)
|
||||
val sessionId = sessionManager.openSession(modelPath, locale, prompt, useShortAudioCtx, appContext)
|
||||
promise.resolve(sessionId)
|
||||
} catch (exception: Throwable) {
|
||||
promise.reject(exception)
|
||||
|
@@ -21,12 +21,13 @@ class SpeechToTextSessionManager(
|
||||
modelPath: String,
|
||||
locale: String,
|
||||
prompt: String,
|
||||
useShortAudioCtx: Boolean,
|
||||
context: Context,
|
||||
): Int {
|
||||
val sessionId = nextSessionId++
|
||||
sessions[sessionId] = SpeechToTextSession(
|
||||
SpeechToTextConverter(
|
||||
modelPath, locale, prompt, recorderFactory = AudioRecorder.factory, context,
|
||||
modelPath, locale, prompt, useShortAudioCtx, recorderFactory = AudioRecorder.factory, context,
|
||||
)
|
||||
)
|
||||
return sessionId
|
||||
|
@@ -35,6 +35,7 @@ const useVoiceTyping = ({ locale, provider, onSetPreview, onText }: UseVoiceTypi
|
||||
const [error, setError] = useState<Error|null>(null);
|
||||
const [mustDownloadModel, setMustDownloadModel] = useState<boolean | null>(null);
|
||||
const [modelIsOutdated, setModelIsOutdated] = useState(false);
|
||||
const [stoppingSession, setIsStoppingSession] = useState(false);
|
||||
|
||||
const onTextRef = useRef(onText);
|
||||
onTextRef.current = onText;
|
||||
@@ -95,14 +96,16 @@ const useVoiceTyping = ({ locale, provider, onSetPreview, onText }: UseVoiceTypi
|
||||
}, []);
|
||||
|
||||
const onRequestRedownload = useCallback(async () => {
|
||||
setIsStoppingSession(true);
|
||||
await voiceTypingRef.current?.cancel();
|
||||
await builder.clearDownloads();
|
||||
setMustDownloadModel(true);
|
||||
setIsStoppingSession(false);
|
||||
setRedownloadCounter(value => value + 1);
|
||||
}, [builder]);
|
||||
|
||||
return {
|
||||
error, mustDownloadModel, voiceTyping, onRequestRedownload, modelIsOutdated,
|
||||
error, mustDownloadModel, stoppingSession, voiceTyping, onRequestRedownload, modelIsOutdated,
|
||||
};
|
||||
};
|
||||
|
||||
@@ -113,6 +116,7 @@ const SpeechToTextComponent: React.FC<Props> = props => {
|
||||
error: modelError,
|
||||
mustDownloadModel,
|
||||
voiceTyping,
|
||||
stoppingSession,
|
||||
onRequestRedownload,
|
||||
modelIsOutdated,
|
||||
} = useVoiceTyping({
|
||||
@@ -136,6 +140,12 @@ const SpeechToTextComponent: React.FC<Props> = props => {
|
||||
}
|
||||
}, [mustDownloadModel]);
|
||||
|
||||
useEffect(() => {
|
||||
if (stoppingSession) {
|
||||
setRecorderState(RecorderState.Processing);
|
||||
}
|
||||
}, [stoppingSession]);
|
||||
|
||||
useEffect(() => {
|
||||
if (recorderState === RecorderState.Recording) {
|
||||
void voiceTyping.start();
|
||||
@@ -156,7 +166,9 @@ const SpeechToTextComponent: React.FC<Props> = props => {
|
||||
[RecorderState.Loading]: () => _('Loading...'),
|
||||
[RecorderState.Idle]: () => 'Waiting...', // Not used for now
|
||||
[RecorderState.Recording]: () => _('Please record your voice...'),
|
||||
[RecorderState.Processing]: () => _('Converting speech to text...'),
|
||||
[RecorderState.Processing]: () => (
|
||||
stoppingSession ? _('Closing session...') : _('Converting speech to text...')
|
||||
),
|
||||
[RecorderState.Downloading]: () => _('Downloading %s language files...', languageName(props.locale)),
|
||||
[RecorderState.Error]: () => _('Error: %s', modelError?.message),
|
||||
};
|
||||
@@ -165,10 +177,18 @@ const SpeechToTextComponent: React.FC<Props> = props => {
|
||||
};
|
||||
|
||||
const renderPreview = () => {
|
||||
if (recorderState !== RecorderState.Recording) {
|
||||
return null;
|
||||
}
|
||||
return <Text variant='labelSmall'>{preview}</Text>;
|
||||
};
|
||||
|
||||
const reDownloadButton = <Button onPress={onRequestRedownload}>
|
||||
const reDownloadButton = <Button
|
||||
// Usually, stoppingSession is true because the re-download button has
|
||||
// just been pressed.
|
||||
disabled={stoppingSession}
|
||||
onPress={onRequestRedownload}
|
||||
>
|
||||
{modelIsOutdated ? _('Download updated model') : _('Re-download model')}
|
||||
</Button>;
|
||||
const allowReDownload = recorderState === RecorderState.Error || modelIsOutdated;
|
||||
|
@@ -7,4 +7,7 @@
|
||||
# Customize the NODE_BINARY variable here.
|
||||
# For example, to use nvm with brew, add the following line
|
||||
# . "$(brew --prefix nvm)/nvm.sh" --no-use
|
||||
export NODE_BINARY=$(command -v node)
|
||||
|
||||
# Note: `$(command -v node)` doesn't work so hardcode the path here - but it means it needs to be
|
||||
# manually updated when Node is upgraded.
|
||||
export NODE_BINARY=/opt/homebrew/opt/node@20/bin/node
|
||||
|
@@ -535,13 +535,13 @@
|
||||
ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
|
||||
CLANG_ENABLE_MODULES = YES;
|
||||
CODE_SIGN_ENTITLEMENTS = Joplin/Joplin.entitlements;
|
||||
CURRENT_PROJECT_VERSION = 134;
|
||||
CURRENT_PROJECT_VERSION = 135;
|
||||
DEVELOPMENT_TEAM = A9BXAFS6CT;
|
||||
ENABLE_BITCODE = NO;
|
||||
INFOPLIST_FILE = Joplin/Info.plist;
|
||||
IPHONEOS_DEPLOYMENT_TARGET = 13.4;
|
||||
LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks";
|
||||
MARKETING_VERSION = 13.3.1;
|
||||
MARKETING_VERSION = 13.3.2;
|
||||
OTHER_LDFLAGS = (
|
||||
"$(inherited)",
|
||||
"-ObjC",
|
||||
@@ -567,12 +567,12 @@
|
||||
ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
|
||||
CLANG_ENABLE_MODULES = YES;
|
||||
CODE_SIGN_ENTITLEMENTS = Joplin/Joplin.entitlements;
|
||||
CURRENT_PROJECT_VERSION = 134;
|
||||
CURRENT_PROJECT_VERSION = 135;
|
||||
DEVELOPMENT_TEAM = A9BXAFS6CT;
|
||||
INFOPLIST_FILE = Joplin/Info.plist;
|
||||
IPHONEOS_DEPLOYMENT_TARGET = 13.4;
|
||||
LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks";
|
||||
MARKETING_VERSION = 13.3.1;
|
||||
MARKETING_VERSION = 13.3.2;
|
||||
OTHER_LDFLAGS = (
|
||||
"$(inherited)",
|
||||
"-ObjC",
|
||||
@@ -758,14 +758,14 @@
|
||||
CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE;
|
||||
CODE_SIGN_ENTITLEMENTS = ShareExtension/ShareExtension.entitlements;
|
||||
CODE_SIGN_STYLE = Automatic;
|
||||
CURRENT_PROJECT_VERSION = 134;
|
||||
CURRENT_PROJECT_VERSION = 135;
|
||||
DEBUG_INFORMATION_FORMAT = dwarf;
|
||||
DEVELOPMENT_TEAM = A9BXAFS6CT;
|
||||
GCC_C_LANGUAGE_STANDARD = gnu11;
|
||||
INFOPLIST_FILE = ShareExtension/Info.plist;
|
||||
IPHONEOS_DEPLOYMENT_TARGET = 13.4;
|
||||
LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @executable_path/../../Frameworks";
|
||||
MARKETING_VERSION = 13.3.1;
|
||||
MARKETING_VERSION = 13.3.2;
|
||||
MTL_ENABLE_DEBUG_INFO = INCLUDE_SOURCE;
|
||||
MTL_FAST_MATH = YES;
|
||||
OTHER_LDFLAGS = (
|
||||
@@ -797,14 +797,14 @@
|
||||
CODE_SIGN_ENTITLEMENTS = ShareExtension/ShareExtension.entitlements;
|
||||
CODE_SIGN_STYLE = Automatic;
|
||||
COPY_PHASE_STRIP = NO;
|
||||
CURRENT_PROJECT_VERSION = 134;
|
||||
CURRENT_PROJECT_VERSION = 135;
|
||||
DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
|
||||
DEVELOPMENT_TEAM = A9BXAFS6CT;
|
||||
GCC_C_LANGUAGE_STANDARD = gnu11;
|
||||
INFOPLIST_FILE = ShareExtension/Info.plist;
|
||||
IPHONEOS_DEPLOYMENT_TARGET = 13.4;
|
||||
LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @executable_path/../../Frameworks";
|
||||
MARKETING_VERSION = 13.3.1;
|
||||
MARKETING_VERSION = 13.3.2;
|
||||
MTL_FAST_MATH = YES;
|
||||
OTHER_LDFLAGS = (
|
||||
"$(inherited)",
|
||||
|
@@ -2,7 +2,6 @@ import shim from '@joplin/lib/shim';
|
||||
import Logger from '@joplin/utils/Logger';
|
||||
import { PermissionsAndroid, Platform } from 'react-native';
|
||||
import unzip from './utils/unzip';
|
||||
import { _ } from '@joplin/lib/locale';
|
||||
const md5 = require('md5');
|
||||
|
||||
const logger = Logger.create('voiceTyping');
|
||||
@@ -87,12 +86,7 @@ export default class VoiceTyping {
|
||||
}
|
||||
|
||||
public async clearDownloads() {
|
||||
const confirmed = await shim.showConfirmationDialog(
|
||||
_('Delete model and re-download?\nThis cannot be undone.'),
|
||||
);
|
||||
if (confirmed) {
|
||||
await this.provider.deleteCachedModels(this.locale);
|
||||
}
|
||||
await this.provider.deleteCachedModels(this.locale);
|
||||
}
|
||||
|
||||
public async download() {
|
||||
|
@@ -19,6 +19,7 @@ jest.mock('react-native', () => {
|
||||
}),
|
||||
closeSession: jest.fn(),
|
||||
startRecording: jest.fn(),
|
||||
convertAvailable: jest.fn(() => ''),
|
||||
};
|
||||
|
||||
return reactNative;
|
||||
|
@@ -13,6 +13,7 @@ const { SpeechToTextModule } = NativeModules;
|
||||
|
||||
class WhisperConfig {
|
||||
public prompts: Map<string, string> = new Map();
|
||||
public supportsShortAudioCtx = false;
|
||||
public stringReplacements: [string, string][] = [];
|
||||
public regexReplacements: [RegExp, string][] = [];
|
||||
|
||||
@@ -69,6 +70,12 @@ class WhisperConfig {
|
||||
}
|
||||
};
|
||||
|
||||
// Models fine-tuned as per https://github.com/futo-org/whisper-acft should have
|
||||
// "shortAudioContext": true in their config.json.
|
||||
if ('shortAudioContext' in json) {
|
||||
this.supportsShortAudioCtx = !!json.shortAudioContext;
|
||||
}
|
||||
|
||||
processPrompts();
|
||||
processOutputSettings();
|
||||
}
|
||||
@@ -86,15 +93,25 @@ class Whisper implements VoiceTypingSession {
|
||||
}
|
||||
|
||||
private postProcessSpeech(data: string) {
|
||||
data = data.trim();
|
||||
const paragraphs = data.split('\n\n');
|
||||
|
||||
for (const [key, value] of this.config.stringReplacements) {
|
||||
data = data.split(key).join(value);
|
||||
const result = [];
|
||||
for (let paragraph of paragraphs) {
|
||||
paragraph = paragraph.trim();
|
||||
|
||||
for (const [key, value] of this.config.stringReplacements) {
|
||||
paragraph = paragraph.split(key).join(value);
|
||||
}
|
||||
for (const [key, value] of this.config.regexReplacements) {
|
||||
paragraph = paragraph.replace(key, value);
|
||||
}
|
||||
|
||||
if (paragraph) {
|
||||
result.push(paragraph);
|
||||
}
|
||||
}
|
||||
for (const [key, value] of this.config.regexReplacements) {
|
||||
data = data.replace(key, value);
|
||||
}
|
||||
return data;
|
||||
|
||||
return result.join('\n\n');
|
||||
}
|
||||
|
||||
private onDataFinalize(data: string) {
|
||||
@@ -235,8 +252,9 @@ const whisper: VoiceTypingProvider = {
|
||||
throw new Error(`Model not found at path ${modelPath}`);
|
||||
}
|
||||
|
||||
logger.debug('Starting whisper session', config.supportsShortAudioCtx ? '(short audio context)' : '');
|
||||
const sessionId = await SpeechToTextModule.openSession(
|
||||
modelPath, locale, getPrompt(locale, config.prompts),
|
||||
modelPath, locale, getPrompt(locale, config.prompts), config.supportsShortAudioCtx,
|
||||
);
|
||||
return new Whisper(sessionId, callbacks, config);
|
||||
},
|
||||
|
@@ -168,6 +168,14 @@ export default function shimInit() {
|
||||
return Platform.OS;
|
||||
};
|
||||
|
||||
shim.isAppleSilicon = () => {
|
||||
return false;
|
||||
};
|
||||
|
||||
shim.platformArch = () => {
|
||||
return ''; // Not supported
|
||||
};
|
||||
|
||||
shim.appVersion = () => {
|
||||
const p = require('react-native-version-info').default;
|
||||
return p.appVersion;
|
||||
|
@@ -252,6 +252,10 @@ const config = {
|
||||
label: 'Patreon',
|
||||
href: 'https://www.patreon.com/joplin',
|
||||
},
|
||||
{
|
||||
label: 'YouTube',
|
||||
href: 'https://www.youtube.com/@joplinapp',
|
||||
},
|
||||
{
|
||||
label: 'LinkedIn',
|
||||
href: 'https://www.linkedin.com/company/joplin',
|
||||
|
@@ -19,6 +19,7 @@ import FileApiDriverLocal from './file-api-driver-local';
|
||||
import * as mimeUtils from './mime-utils';
|
||||
import BaseItem from './models/BaseItem';
|
||||
import { Size } from '@joplin/utils/types';
|
||||
import { arch } from 'os';
|
||||
const { _ } = require('./locale');
|
||||
const http = require('http');
|
||||
const https = require('https');
|
||||
@@ -170,6 +171,14 @@ function shimInit(options: ShimInitOptions = null) {
|
||||
return Array.from(buffer);
|
||||
};
|
||||
|
||||
shim.isAppleSilicon = () => {
|
||||
return shim.isMac() && arch() === 'arm64';
|
||||
};
|
||||
|
||||
shim.platformArch = () => {
|
||||
return arch();
|
||||
};
|
||||
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any -- Old code before rule was applied
|
||||
shim.detectAndSetLocale = function(Setting: any) {
|
||||
let locale = shim.isElectron() ? shim.electronBridge().getLocale() : process.env.LANG;
|
||||
|
@@ -156,6 +156,12 @@ const shim = {
|
||||
return typeof process !== 'undefined' && process.platform === 'darwin';
|
||||
},
|
||||
|
||||
// Tells whether the computer **CPU** is an Apple Silicon (not whether the running version was
|
||||
// built for ARM64)
|
||||
isAppleSilicon: (): boolean => {
|
||||
throw new Error('Not implemented: isAppleSilicon');
|
||||
},
|
||||
|
||||
platformName: () => {
|
||||
if (shim.isReactNative()) return shim.mobilePlatform();
|
||||
if (shim.isMac()) return 'darwin';
|
||||
@@ -166,6 +172,23 @@ const shim = {
|
||||
throw new Error('Cannot determine platform');
|
||||
},
|
||||
|
||||
// Tells the computer CPU architecture. Which if different from the architecture the running
|
||||
// version was built for. For example, the laptop CPU may be an ARM64, while the version was
|
||||
// built for x64 architecture. Here we want to know the laptop CPU.
|
||||
platformArch: (): string => {
|
||||
throw new Error('Not implemented: platformArch');
|
||||
},
|
||||
|
||||
deviceString: () => {
|
||||
const output: string[] = [];
|
||||
|
||||
output.push(shim.platformName());
|
||||
|
||||
if (shim.platformArch()) output.push(shim.platformArch());
|
||||
|
||||
return output.join(', ');
|
||||
},
|
||||
|
||||
// "ios" or "android", or "" if not on mobile
|
||||
mobilePlatform: () => {
|
||||
return ''; // Default if we're not on mobile (React Native)
|
||||
|
@@ -85,6 +85,7 @@ export default function versionInfo(packageInfo: PackageInfo, plugins: Plugins)
|
||||
const body = [
|
||||
_('%s %s (%s, %s)', p.name, p.version, Setting.value('env'), shim.platformName()),
|
||||
'',
|
||||
_('Device: %s', shim.deviceString()),
|
||||
_('Client ID: %s', Setting.value('clientId')),
|
||||
_('Sync Version: %s', Setting.value('syncVersion')),
|
||||
_('Profile Version: %s', reg.db().version()),
|
||||
|
@@ -22,6 +22,44 @@ function setupMessageHtml() {
|
||||
}
|
||||
}
|
||||
|
||||
// Note: Take the list from `packages/doc-builder/docusaurus.config.js`
|
||||
const socialFeeds = () => {
|
||||
return [
|
||||
{
|
||||
label: 'Bluesky',
|
||||
href: 'https://bsky.app/profile/joplinapp.bsky.social',
|
||||
},
|
||||
{
|
||||
label: 'Patreon',
|
||||
href: 'https://www.patreon.com/joplin',
|
||||
},
|
||||
{
|
||||
label: 'YouTube',
|
||||
href: 'https://www.youtube.com/@joplinapp',
|
||||
},
|
||||
{
|
||||
label: 'LinkedIn',
|
||||
href: 'https://www.linkedin.com/company/joplin',
|
||||
},
|
||||
{
|
||||
label: 'Discord',
|
||||
href: 'https://discord.gg/VSj7AFHvpq',
|
||||
},
|
||||
{
|
||||
label: 'Mastodon',
|
||||
href: 'https://mastodon.social/@joplinapp',
|
||||
},
|
||||
{
|
||||
label: 'Lemmy',
|
||||
href: 'https://sopuli.xyz/c/joplinapp',
|
||||
},
|
||||
{
|
||||
label: 'GitHub',
|
||||
href: 'https://github.com/laurent22/joplin/',
|
||||
},
|
||||
];
|
||||
};
|
||||
|
||||
router.get('home', async (_path: SubPath, ctx: AppContext) => {
|
||||
contextSessionId(ctx);
|
||||
|
||||
@@ -79,6 +117,7 @@ router.get('home', async (_path: SubPath, ctx: AppContext) => {
|
||||
betaExpiredDays: betaUserTrialPeriodDays(user.created_time, 0, 0),
|
||||
betaStartSubUrl: betaStartSubUrl(user.email, user.account_type),
|
||||
setupMessageHtml: setupMessageHtml(),
|
||||
socialFeeds: socialFeeds(),
|
||||
};
|
||||
|
||||
view.cssFiles = ['index/home'];
|
||||
|
@@ -38,3 +38,19 @@
|
||||
<a href="{{{global.baseUrl}}}/upgrade" class="upgrade-button">Upgrade to a Pro account</a> to collaborate on notebooks with other people, to increase the max note size, or the max total size.
|
||||
</p>
|
||||
{{/showUpgradeProButton}}
|
||||
|
||||
<h2 class="title">Follow us on social media</h2>
|
||||
|
||||
<p class="block">Get the latest updates about {{global.appName}} on our social feeds!</p>
|
||||
|
||||
<table class="table is-hoverable user-props-table">
|
||||
<tbody>
|
||||
{{#socialFeeds}}
|
||||
<tr>
|
||||
<td>
|
||||
<strong><a target="_blank" href="{{href}}">{{label}}</a></strong>
|
||||
</td>
|
||||
</tr>
|
||||
{{/socialFeeds}}
|
||||
</tbody>
|
||||
</table>
|
||||
|
@@ -135,6 +135,9 @@
|
||||
"android-v3.3.1": true,
|
||||
"ios-v13.3.1": true,
|
||||
"v3.2.13": true,
|
||||
"android-v3.3.2": true
|
||||
"android-v3.3.2": true,
|
||||
"v3.3.3": true,
|
||||
"android-v3.3.3": true,
|
||||
"ios-v13.3.2": true
|
||||
}
|
||||
}
|
@@ -80,6 +80,13 @@ async function main() {
|
||||
|
||||
await warningMessage();
|
||||
|
||||
// React Native caches a path to Node in there, which appears to point to a copy of the
|
||||
// executable in a temp folder. If those temp folders are deleted it will still try to use that
|
||||
// path and fail. Running "Clean build" won't remove `.xcode.env.local` so it's safer to always
|
||||
// delete it, since if there's an issue the error makes no sense whatsoever, and several hours
|
||||
// will be lost trying to fix the issue.
|
||||
await fs.remove(`${mobileDir}/ios/Pods/../.xcode.env.local`);
|
||||
|
||||
const pbxprojFilePath = `${mobileDir}/ios/Joplin.xcodeproj/project.pbxproj`;
|
||||
await checkDeploymentTargets(pbxprojFilePath);
|
||||
|
||||
|
@@ -40,6 +40,10 @@ export class FileLocker {
|
||||
this.filePath_ = filePath;
|
||||
}
|
||||
|
||||
public get options() {
|
||||
return this.options_;
|
||||
}
|
||||
|
||||
public async lock() {
|
||||
if (!(await this.canLock())) return false;
|
||||
|
||||
|
@@ -1,5 +1,10 @@
|
||||
# Joplin Android Changelog
|
||||
|
||||
## [android-v3.3.4](https://github.com/laurent22/joplin/releases/tag/android-v3.3.4) (Pre-release) - 2025-03-21T18:07:00Z
|
||||
|
||||
- Improved: Voice typing: Improve processing with larger models (#11983 by [@personalizedrefrigerator](https://github.com/personalizedrefrigerator))
|
||||
- Improved: Voice typing: Improve re-download button UI (#11979) (#11955 by [@personalizedrefrigerator](https://github.com/personalizedrefrigerator))
|
||||
|
||||
## [android-v3.3.3](https://github.com/laurent22/joplin/releases/tag/android-v3.3.3) (Pre-release) - 2025-03-16T10:29:52Z
|
||||
|
||||
- New: Add setting migration for ocr.enabled (ab86b95)
|
||||
|
@@ -1,5 +1,31 @@
|
||||
# Joplin Desktop Changelog
|
||||
|
||||
## [v3.3.3](https://github.com/laurent22/joplin/releases/tag/v3.3.3) (Pre-release) - 2025-03-16T11:52:33Z
|
||||
|
||||
- New: Accessibility: Add a menu item that moves focus to the note viewer ([#11967](https://github.com/laurent22/joplin/issues/11967) by [@personalizedrefrigerator](https://github.com/personalizedrefrigerator))
|
||||
- New: Accessibility: Add error indication on Note properties ([#11784](https://github.com/laurent22/joplin/issues/11784) by [@pedr](https://github.com/pedr))
|
||||
- New: Accessibility: Add more standard keyboard shortcuts for the notebook sidebar ([#11892](https://github.com/laurent22/joplin/issues/11892) by [@personalizedrefrigerator](https://github.com/personalizedrefrigerator))
|
||||
- New: Add a button to collapse or expand all folders ([#11905](https://github.com/laurent22/joplin/issues/11905))
|
||||
- New: Add dialog to select a note and link to it ([#11891](https://github.com/laurent22/joplin/issues/11891))
|
||||
- New: Add setting migration for ocr.enabled ([ab86b95](https://github.com/laurent22/joplin/commit/ab86b95))
|
||||
- New: Add support for multiple instances ([#11963](https://github.com/laurent22/joplin/issues/11963))
|
||||
- New: Added keyboard shortcut and menu item for toggleEditorPlugin command ([7e8dee4](https://github.com/laurent22/joplin/commit/7e8dee4))
|
||||
- New: Plugins: Add support for `joplin.shouldUseDarkColors` API ([fe67a44](https://github.com/laurent22/joplin/commit/fe67a44))
|
||||
- Improved: Accessibility: Improve "toggle all notebooks" accessibility ([#11918](https://github.com/laurent22/joplin/issues/11918) by [@personalizedrefrigerator](https://github.com/personalizedrefrigerator))
|
||||
- Improved: Add "Disable synchronisation" to Joplin Cloud prompt message ([#11705](https://github.com/laurent22/joplin/issues/11705)) ([#11696](https://github.com/laurent22/joplin/issues/11696) by [@Vortrix5](https://github.com/Vortrix5))
|
||||
- Improved: Improve Rich Text Editor toolbar structure ([#11869](https://github.com/laurent22/joplin/issues/11869)) ([#11663](https://github.com/laurent22/joplin/issues/11663) by [@j-scheitler1](https://github.com/j-scheitler1))
|
||||
- Improved: Improve download in install script ([#11921](https://github.com/laurent22/joplin/issues/11921) by Helmut K. C. Tessarek)
|
||||
- Improved: Make "toggle all folders" button also expand the folder list ([#11917](https://github.com/laurent22/joplin/issues/11917) by [@personalizedrefrigerator](https://github.com/personalizedrefrigerator))
|
||||
- Improved: Plugins: Mark the LanguageTool Integration plugin as incompatible ([#11715](https://github.com/laurent22/joplin/issues/11715)) ([#11710](https://github.com/laurent22/joplin/issues/11710) by [@personalizedrefrigerator](https://github.com/personalizedrefrigerator))
|
||||
- Improved: Upgrade to Electron 35.0.1 ([#11968](https://github.com/laurent22/joplin/issues/11968) by [@personalizedrefrigerator](https://github.com/personalizedrefrigerator))
|
||||
- Fixed: Fix adding tags to a note through drag-and-drop ([#11911](https://github.com/laurent22/joplin/issues/11911) by [@personalizedrefrigerator](https://github.com/personalizedrefrigerator))
|
||||
- Fixed: Fix ctrl-p doesn't open the goto anything dialog in the Rich Text Editor ([#11926](https://github.com/laurent22/joplin/issues/11926)) ([#11894](https://github.com/laurent22/joplin/issues/11894) by [@personalizedrefrigerator](https://github.com/personalizedrefrigerator))
|
||||
- Fixed: Fix issue with GotoAnything that would prevent it from highlighting search results in note titles ([#11888](https://github.com/laurent22/joplin/issues/11888))
|
||||
- Fixed: Import audio from OneNote as file links ([#11942](https://github.com/laurent22/joplin/issues/11942)) ([#11939](https://github.com/laurent22/joplin/issues/11939) by [@pedr](https://github.com/pedr))
|
||||
- Fixed: Make tab size consistent between Markdown editor and viewer (and RTE) ([#11940](https://github.com/laurent22/joplin/issues/11940)) ([#11673](https://github.com/laurent22/joplin/issues/11673))
|
||||
- Fixed: Preserve attachment file extensions regardless of the mime type ([#11852](https://github.com/laurent22/joplin/issues/11852)) ([#11759](https://github.com/laurent22/joplin/issues/11759) by [@pedr](https://github.com/pedr))
|
||||
- Fixed: Sharing a notebook with nobody prints "No user with ID public_key" ([#11932](https://github.com/laurent22/joplin/issues/11932)) ([#11923](https://github.com/laurent22/joplin/issues/11923) by [@Paramesh-T-S](https://github.com/Paramesh-T-S))
|
||||
|
||||
## [v3.2.13](https://github.com/laurent22/joplin/releases/tag/v3.2.13) - 2025-02-28T14:38:21Z
|
||||
|
||||
- Improved: Plugins: Mark the LanguageTool Integration plugin as incompatible ([#11715](https://github.com/laurent22/joplin/issues/11715)) ([#11710](https://github.com/laurent22/joplin/issues/11710) by [@personalizedrefrigerator](https://github.com/personalizedrefrigerator))
|
||||
|
@@ -1,5 +1,21 @@
|
||||
# Joplin iOS Changelog
|
||||
|
||||
## [ios-v13.3.2](https://github.com/laurent22/joplin/releases/tag/ios-v13.3.2) - 2025-03-16T11:47:05Z
|
||||
|
||||
- New: Add setting migration for ocr.enabled (ab86b95)
|
||||
- Improved: Accessibility: Improve focus handling in the note actions menu and modal dialogs (#11929 by [@personalizedrefrigerator](https://github.com/personalizedrefrigerator))
|
||||
- Improved: Accessibility: Make default modal close button accessible (#11957 by [@personalizedrefrigerator](https://github.com/personalizedrefrigerator))
|
||||
- Improved: Improve encryption config screen accessibility (#11874) (#11846 by [@personalizedrefrigerator](https://github.com/personalizedrefrigerator))
|
||||
- Improved: Re-Add iOS Dark Icon (#11943 by [@itzTheMeow](https://github.com/itzTheMeow))
|
||||
- Improved: Updated packages @bam.tech/react-native-image-resizer (v3.0.11)
|
||||
- Fixed: Accessibility: Fix "new note" and "new to-do" buttons are focusable even while invisible (#11899 by [@personalizedrefrigerator](https://github.com/personalizedrefrigerator))
|
||||
- Fixed: Accessibility: Fix focus gets stuck on "Attach" in the note actions menu (#11958 by [@personalizedrefrigerator](https://github.com/personalizedrefrigerator))
|
||||
- Fixed: Accessibility: Fix missing label on note actions menu dismiss button (#11954 by [@personalizedrefrigerator](https://github.com/personalizedrefrigerator))
|
||||
- Fixed: Accessibility: Fix plugins can't be installed using VoiceOver (#11931 by [@personalizedrefrigerator](https://github.com/personalizedrefrigerator))
|
||||
- Fixed: Fix disabled encryption keys list showing enabled keys (#11861) (#11858 by [@pedr](https://github.com/pedr))
|
||||
- Fixed: Fix voice recorder crash (#11876) (#11864 by [@personalizedrefrigerator](https://github.com/personalizedrefrigerator))
|
||||
- Fixed: Make tab size consistent between Markdown editor and viewer (and RTE) (#11940) (#11673)
|
||||
|
||||
## [ios-v13.3.1](https://github.com/laurent22/joplin/releases/tag/ios-v13.3.1) - 2025-02-19T16:04:34Z
|
||||
|
||||
- New: Add support for plugin editor views (#11831)
|
||||
|
@@ -48,15 +48,10 @@ Joplin can also import OneNote notebooks. To do this:
|
||||
|
||||
### Importing from other applications
|
||||
|
||||
In general the way to import notes from any application into Joplin is to convert the notes to ENEX files (Evernote format) and to import these ENEX files into Joplin using the method above. Most note-taking applications support ENEX files so it should be relatively straightforward. For help about specific applications, see below:
|
||||
|
||||
* Standard Notes: Please see [this tutorial](https://programadorwebvalencia.com/migrate-notes-from-standard-notes-to-joplin/)
|
||||
* Tomboy Notes: Export the notes to ENEX files [as described here](https://askubuntu.com/questions/243691/how-can-i-export-my-tomboy-notes-into-evernote/608551) for example, and import these ENEX files into Joplin.
|
||||
* OneNote: First [import the notes from OneNote into Evernote](https://discussion.evernote.com/topic/107736-is-there-a-way-to-import-from-onenote-into-evernote-on-the-mac/). Then export the ENEX file from Evernote and import it into Joplin.
|
||||
* NixNote: Synchronise with Evernote, then export the ENEX files and import them into Joplin. More info [in this thread](https://discourse.joplinapp.org/t/import-from-nixnote/183/3).
|
||||
In general the way to import notes from other applications into Joplin is to convert the notes to ENEX files (Evernote format), HTML or Markdown, and to import these files into Joplin. For help about specific applications, see this wiki document: [Importing notes from other notebook applications](https://discourse.joplinapp.org/t/importing-notes-from-other-notebook-applications/22425).
|
||||
|
||||
## Exporting
|
||||
|
||||
Joplin can export to the JEX format (Joplin Export file), which is a tar file that can contain multiple notes, notebooks, etc. This is a lossless format in that all the notes, but also metadata such as geo-location, updated time, tags, etc. are preserved. This format is convenient for backup purposes and can be re-imported into Joplin. A "raw" format is also available. This is the same as the JEX format except that the data is saved to a directory and each item represented by a single file.
|
||||
|
||||
Joplin is also capable of exporting to a number of other formats including HTML and PDF which can be done for single notes, notebooks or everything.
|
||||
Joplin is also capable of exporting to a number of other formats including HTML and PDF which can be done for single notes, notebooks or everything.
|
||||
|
Reference in New Issue
Block a user