/* * Copyright (C) 2016 Christian Mollekopf * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) version 3, or any * later version accepted by the membership of KDE e.V. (or its * successor approved by the membership of KDE e.V.), which shall * act as a proxy defined in Section 6 of version 3 of the license. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library. If not, see . */ #include "genericresource.h" #include "entitybuffer.h" #include "pipeline.h" #include "queuedcommand_generated.h" #include "createentity_generated.h" #include "modifyentity_generated.h" #include "deleteentity_generated.h" #include "inspection_generated.h" #include "notification_generated.h" #include "domainadaptor.h" #include "commands.h" #include "index.h" #include "log.h" #include "definitions.h" #include "bufferutils.h" #include "adaptorfactoryregistry.h" #include #include #include //This is the resources entity type, and not the domain type #define ENTITY_TYPE_MAIL "mail" #define ENTITY_TYPE_FOLDER "folder" static int sBatchSize = 100; // This interval directly affects the roundtrip time of single commands static int sCommitInterval = 10; using namespace Sink; #undef DEBUG_AREA #define DEBUG_AREA "resource.commandprocessor" /** * Drives the pipeline using the output from all command queues */ class CommandProcessor : public QObject { Q_OBJECT typedef std::function(void const *, size_t)> InspectionFunction; public: CommandProcessor(Sink::Pipeline *pipeline, QList commandQueues) : QObject(), mPipeline(pipeline), mCommandQueues(commandQueues), mProcessingLock(false) { mLowerBoundRevision = Storage::maxRevision(mPipeline->storage().createTransaction(Storage::ReadOnly, [](const Sink::Storage::Error &error) { Warning() << error.message; })); for (auto queue : mCommandQueues) { const bool ret = connect(queue, &MessageQueue::messageReady, this, &CommandProcessor::process); Q_UNUSED(ret); } } void setOldestUsedRevision(qint64 revision) { mLowerBoundRevision = revision; } void setInspectionCommand(const InspectionFunction &f) { mInspect = f; } signals: void error(int errorCode, const QString &errorMessage); private: bool messagesToProcessAvailable() { for (auto queue : mCommandQueues) { if (!queue->isEmpty()) { return true; } } return false; } private slots: void process() { if (mProcessingLock) { return; } mProcessingLock = true; auto job = processPipeline() .then([this]() { mProcessingLock = false; if (messagesToProcessAvailable()) { process(); } }) .exec(); } KAsync::Job processQueuedCommand(const Sink::QueuedCommand *queuedCommand) { Log() << "Processing command: " << Sink::Commands::name(queuedCommand->commandId()); // Throw command into appropriate pipeline switch (queuedCommand->commandId()) { case Sink::Commands::DeleteEntityCommand: return mPipeline->deletedEntity(queuedCommand->command()->Data(), queuedCommand->command()->size()); case Sink::Commands::ModifyEntityCommand: return mPipeline->modifiedEntity(queuedCommand->command()->Data(), queuedCommand->command()->size()); case Sink::Commands::CreateEntityCommand: return mPipeline->newEntity(queuedCommand->command()->Data(), queuedCommand->command()->size()); case Sink::Commands::InspectionCommand: if (mInspect) { return mInspect(queuedCommand->command()->Data(), queuedCommand->command()->size()).then([]() { return -1; }); } else { return KAsync::error(-1, "Missing inspection command."); } default: return KAsync::error(-1, "Unhandled command"); } } KAsync::Job processQueuedCommand(const QByteArray &data) { flatbuffers::Verifier verifyer(reinterpret_cast(data.constData()), data.size()); if (!Sink::VerifyQueuedCommandBuffer(verifyer)) { Warning() << "invalid buffer"; // return KAsync::error(1, "Invalid Buffer"); } auto queuedCommand = Sink::GetQueuedCommand(data.constData()); const auto commandId = queuedCommand->commandId(); Trace() << "Dequeued Command: " << Sink::Commands::name(commandId); return processQueuedCommand(queuedCommand) .then( [commandId](qint64 createdRevision) -> qint64 { Trace() << "Command pipeline processed: " << Sink::Commands::name(commandId); return createdRevision; }, [](int errorCode, QString errorMessage) { // FIXME propagate error, we didn't handle it Warning() << "Error while processing queue command: " << errorMessage; }); } // Process all messages of this queue KAsync::Job processQueue(MessageQueue *queue) { auto time = QSharedPointer::create(); return KAsync::start([this]() { mPipeline->startTransaction(); }) .then(KAsync::dowhile([queue]() { return !queue->isEmpty(); }, [this, queue, time](KAsync::Future &future) { queue->dequeueBatch(sBatchSize, [this, time](const QByteArray &data) { time->start(); return KAsync::start([this, data, time](KAsync::Future &future) { processQueuedCommand(data) .then([&future, this, time](qint64 createdRevision) { Trace() << "Created revision " << createdRevision << ". Processing took: " << Log::TraceTime(time->elapsed()); future.setFinished(); }) .exec(); }); }) .then([&future, queue]() { future.setFinished(); }, [&future](int i, QString error) { if (i != MessageQueue::ErrorCodes::NoMessageFound) { Warning() << "Error while getting message from messagequeue: " << error; } future.setFinished(); }) .exec(); })) .then([this]() { mPipeline->commit(); }); } KAsync::Job processPipeline() { auto time = QSharedPointer::create(); time->start(); mPipeline->startTransaction(); Trace() << "Cleaning up from " << mPipeline->cleanedUpRevision() + 1 << " to " << mLowerBoundRevision; for (qint64 revision = mPipeline->cleanedUpRevision() + 1; revision <= mLowerBoundRevision; revision++) { mPipeline->cleanupRevision(revision); } mPipeline->commit(); Trace() << "Cleanup done." << Log::TraceTime(time->elapsed()); // Go through all message queues auto it = QSharedPointer>::create(mCommandQueues); return KAsync::dowhile([it]() { return it->hasNext(); }, [it, this](KAsync::Future &future) { auto time = QSharedPointer::create(); time->start(); auto queue = it->next(); processQueue(queue) .then([&future, time]() { Trace() << "Queue processed." << Log::TraceTime(time->elapsed()); future.setFinished(); }) .exec(); }); } private: Sink::Pipeline *mPipeline; // Ordered by priority QList mCommandQueues; bool mProcessingLock; // The lowest revision we no longer need qint64 mLowerBoundRevision; InspectionFunction mInspect; }; #undef DEBUG_AREA #define DEBUG_AREA "resource" GenericResource::GenericResource(const QByteArray &resourceType, const QByteArray &resourceInstanceIdentifier, const QSharedPointer &pipeline, const QSharedPointer &changeReplay, const QSharedPointer &synchronizer) : Sink::Resource(), mUserQueue(Sink::storageLocation(), resourceInstanceIdentifier + ".userqueue"), mSynchronizerQueue(Sink::storageLocation(), resourceInstanceIdentifier + ".synchronizerqueue"), mResourceType(resourceType), mResourceInstanceIdentifier(resourceInstanceIdentifier), mPipeline(pipeline ? pipeline : QSharedPointer::create(resourceInstanceIdentifier)), mChangeReplay(changeReplay), mSynchronizer(synchronizer), mError(0), mClientLowerBoundRevision(std::numeric_limits::max()) { mPipeline->setResourceType(mResourceType); mSynchronizer->setup([this](int commandId, const QByteArray &data) { enqueueCommand(mSynchronizerQueue, commandId, data); }); mProcessor = new CommandProcessor(mPipeline.data(), QList() << &mUserQueue << &mSynchronizerQueue); mProcessor->setInspectionCommand([this](void const *command, size_t size) { flatbuffers::Verifier verifier((const uint8_t *)command, size); if (Sink::Commands::VerifyInspectionBuffer(verifier)) { auto buffer = Sink::Commands::GetInspection(command); int inspectionType = buffer->type(); QByteArray inspectionId = BufferUtils::extractBuffer(buffer->id()); QByteArray entityId = BufferUtils::extractBuffer(buffer->entityId()); QByteArray domainType = BufferUtils::extractBuffer(buffer->domainType()); QByteArray property = BufferUtils::extractBuffer(buffer->property()); QByteArray expectedValueString = BufferUtils::extractBuffer(buffer->expectedValue()); QDataStream s(expectedValueString); QVariant expectedValue; s >> expectedValue; inspect(inspectionType, inspectionId, domainType, entityId, property, expectedValue) .then( [=]() { Log_area("resource.inspection") << "Inspection was successful: " << inspectionType << inspectionId << entityId; Sink::Notification n; n.type = Sink::Commands::NotificationType_Inspection; n.id = inspectionId; n.code = Sink::Commands::NotificationCode_Success; emit notify(n); }, [=](int code, const QString &message) { Warning_area("resource.inspection") << "Inspection failed: " << inspectionType << inspectionId << entityId << message; Sink::Notification n; n.type = Sink::Commands::NotificationType_Inspection; n.message = message; n.id = inspectionId; n.code = Sink::Commands::NotificationCode_Failure; emit notify(n); }) .exec(); return KAsync::null(); } return KAsync::error(-1, "Invalid inspection command."); }); QObject::connect(mProcessor, &CommandProcessor::error, [this](int errorCode, const QString &msg) { onProcessorError(errorCode, msg); }); QObject::connect(mPipeline.data(), &Pipeline::revisionUpdated, this, &Resource::revisionUpdated); enableChangeReplay(true); mClientLowerBoundRevision = mPipeline->cleanedUpRevision(); mProcessor->setOldestUsedRevision(mChangeReplay->getLastReplayedRevision()); mCommitQueueTimer.setInterval(sCommitInterval); mCommitQueueTimer.setSingleShot(true); QObject::connect(&mCommitQueueTimer, &QTimer::timeout, &mUserQueue, &MessageQueue::commit); } GenericResource::~GenericResource() { delete mProcessor; } KAsync::Job GenericResource::inspect( int inspectionType, const QByteArray &inspectionId, const QByteArray &domainType, const QByteArray &entityId, const QByteArray &property, const QVariant &expectedValue) { Warning() << "Inspection not implemented"; return KAsync::null(); } void GenericResource::enableChangeReplay(bool enable) { if (enable) { QObject::connect(mPipeline.data(), &Pipeline::revisionUpdated, mChangeReplay.data(), &ChangeReplay::revisionChanged, Qt::QueuedConnection); QObject::connect(mChangeReplay.data(), &ChangeReplay::changesReplayed, this, &GenericResource::updateLowerBoundRevision); mChangeReplay->revisionChanged(); } else { QObject::disconnect(mPipeline.data(), &Pipeline::revisionUpdated, mChangeReplay.data(), &ChangeReplay::revisionChanged); QObject::disconnect(mChangeReplay.data(), &ChangeReplay::changesReplayed, this, &GenericResource::updateLowerBoundRevision); } } void GenericResource::addType(const QByteArray &type, DomainTypeAdaptorFactoryInterface::Ptr factory, const QVector &preprocessors) { mPipeline->setPreprocessors(type, preprocessors); } void GenericResource::removeDataFromDisk() { removeFromDisk(mResourceInstanceIdentifier); } void GenericResource::removeFromDisk(const QByteArray &instanceIdentifier) { Sink::Storage(Sink::storageLocation(), instanceIdentifier, Sink::Storage::ReadWrite).removeFromDisk(); Sink::Storage(Sink::storageLocation(), instanceIdentifier + ".userqueue", Sink::Storage::ReadWrite).removeFromDisk(); Sink::Storage(Sink::storageLocation(), instanceIdentifier + ".synchronizerqueue", Sink::Storage::ReadWrite).removeFromDisk(); Sink::Storage(Sink::storageLocation(), instanceIdentifier + ".changereplay", Sink::Storage::ReadWrite).removeFromDisk(); Sink::Storage(Sink::storageLocation(), instanceIdentifier + ".synchronization", Sink::Storage::ReadWrite).removeFromDisk(); } qint64 GenericResource::diskUsage(const QByteArray &instanceIdentifier) { auto size = Sink::Storage(Sink::storageLocation(), instanceIdentifier, Sink::Storage::ReadOnly).diskUsage(); size += Sink::Storage(Sink::storageLocation(), instanceIdentifier + ".userqueue", Sink::Storage::ReadOnly).diskUsage(); size += Sink::Storage(Sink::storageLocation(), instanceIdentifier + ".synchronizerqueue", Sink::Storage::ReadOnly).diskUsage(); size += Sink::Storage(Sink::storageLocation(), instanceIdentifier + ".changereplay", Sink::Storage::ReadOnly).diskUsage(); return size; } void GenericResource::onProcessorError(int errorCode, const QString &errorMessage) { Warning() << "Received error from Processor: " << errorCode << errorMessage; mError = errorCode; } int GenericResource::error() const { return mError; } void GenericResource::enqueueCommand(MessageQueue &mq, int commandId, const QByteArray &data) { flatbuffers::FlatBufferBuilder fbb; auto commandData = Sink::EntityBuffer::appendAsVector(fbb, data.constData(), data.size()); auto buffer = Sink::CreateQueuedCommand(fbb, commandId, commandData); Sink::FinishQueuedCommandBuffer(fbb, buffer); mq.enqueue(fbb.GetBufferPointer(), fbb.GetSize()); } void GenericResource::processCommand(int commandId, const QByteArray &data) { static int modifications = 0; mUserQueue.startTransaction(); enqueueCommand(mUserQueue, commandId, data); modifications++; if (modifications >= sBatchSize) { mUserQueue.commit(); modifications = 0; mCommitQueueTimer.stop(); } else { mCommitQueueTimer.start(); } } KAsync::Job GenericResource::synchronizeWithSource() { return KAsync::start([this](KAsync::Future &future) { Log() << " Synchronizing"; // Changereplay would deadlock otherwise when trying to open the synchronization store enableChangeReplay(false); mSynchronizer->synchronize() .then([this, &future]() { Log() << "Done Synchronizing"; enableChangeReplay(true); future.setFinished(); }, [this, &future](int errorCode, const QString &error) { enableChangeReplay(true); future.setError(errorCode, error); }) .exec(); }); } KAsync::Job GenericResource::synchronizeWithSource(Sink::Storage &mainStore, Sink::Storage &synchronizationStore) { return KAsync::null(); } static void waitForDrained(KAsync::Future &f, MessageQueue &queue) { if (queue.isEmpty()) { f.setFinished(); } else { QObject::connect(&queue, &MessageQueue::drained, [&f]() { f.setFinished(); }); } }; KAsync::Job GenericResource::processAllMessages() { // We have to wait for all items to be processed to ensure the synced items are available when a query gets executed. // TODO: report errors while processing sync? // TODO JOBAPI: A helper that waits for n events and then continues? return KAsync::start([this](KAsync::Future &f) { if (mCommitQueueTimer.isActive()) { auto context = new QObject; QObject::connect(&mCommitQueueTimer, &QTimer::timeout, context, [&f, context]() { delete context; f.setFinished(); }); } else { f.setFinished(); } }) .then([this](KAsync::Future &f) { waitForDrained(f, mSynchronizerQueue); }) .then([this](KAsync::Future &f) { waitForDrained(f, mUserQueue); }) .then([this](KAsync::Future &f) { if (mChangeReplay->allChangesReplayed()) { f.setFinished(); } else { auto context = new QObject; QObject::connect(mChangeReplay.data(), &ChangeReplay::changesReplayed, context, [&f, context]() { delete context; f.setFinished(); }); } }); } void GenericResource::updateLowerBoundRevision() { mProcessor->setOldestUsedRevision(qMin(mClientLowerBoundRevision, mChangeReplay->getLastReplayedRevision())); } void GenericResource::setLowerBoundRevision(qint64 revision) { mClientLowerBoundRevision = revision; updateLowerBoundRevision(); } EntityStore::EntityStore(const QByteArray &resourceType, const QByteArray &resourceInstanceIdentifier, Sink::Storage::Transaction &transaction) : mResourceType(resourceType), mResourceInstanceIdentifier(resourceInstanceIdentifier), mTransaction(transaction) { } template T EntityStore::read(const QByteArray &identifier) const { auto typeName = ApplicationDomain::getTypeName(); auto mainDatabase = Storage::mainDatabase(mTransaction, typeName); auto bufferAdaptor = getLatest(mainDatabase, identifier, *Sink::AdaptorFactoryRegistry::instance().getFactory(mResourceType)); Q_ASSERT(bufferAdaptor); return T(mResourceInstanceIdentifier, identifier, 0, bufferAdaptor); } QSharedPointer EntityStore::getLatest(const Sink::Storage::NamedDatabase &db, const QByteArray &uid, DomainTypeAdaptorFactoryInterface &adaptorFactory) { QSharedPointer current; db.findLatest(uid, [¤t, &adaptorFactory](const QByteArray &key, const QByteArray &data) -> bool { Sink::EntityBuffer buffer(const_cast(data.data()), data.size()); if (!buffer.isValid()) { Warning() << "Read invalid buffer from disk"; } else { Trace() << "Found value " << key; current = adaptorFactory.createAdaptor(buffer.entity()); } return false; }, [](const Sink::Storage::Error &error) { Warning() << "Failed to read current value from storage: " << error.message; }); return current; } SyncStore::SyncStore(Sink::Storage::Transaction &transaction) : mTransaction(transaction) { } void SyncStore::recordRemoteId(const QByteArray &bufferType, const QByteArray &localId, const QByteArray &remoteId) { Index("rid.mapping." + bufferType, mTransaction).add(remoteId, localId); Index("localid.mapping." + bufferType, mTransaction).add(localId, remoteId); } void SyncStore::removeRemoteId(const QByteArray &bufferType, const QByteArray &localId, const QByteArray &remoteId) { Index("rid.mapping." + bufferType, mTransaction).remove(remoteId, localId); Index("localid.mapping." + bufferType, mTransaction).remove(localId, remoteId); } void SyncStore::updateRemoteId(const QByteArray &bufferType, const QByteArray &localId, const QByteArray &remoteId) { const auto oldRemoteId = Index("localid.mapping." + bufferType, mTransaction).lookup(localId); removeRemoteId(bufferType, localId, oldRemoteId); recordRemoteId(bufferType, localId, remoteId); } QByteArray SyncStore::resolveRemoteId(const QByteArray &bufferType, const QByteArray &remoteId) { // Lookup local id for remote id, or insert a new pair otherwise Index index("rid.mapping." + bufferType, mTransaction); QByteArray sinkId = index.lookup(remoteId); if (sinkId.isEmpty()) { sinkId = QUuid::createUuid().toString().toUtf8(); index.add(remoteId, sinkId); Index("localid.mapping." + bufferType, mTransaction).add(sinkId, remoteId); } return sinkId; } QByteArray SyncStore::resolveLocalId(const QByteArray &bufferType, const QByteArray &localId) { QByteArray remoteId = Index("localid.mapping." + bufferType, mTransaction).lookup(localId); if (remoteId.isEmpty()) { Warning() << "Couldn't find the remote id for " << localId; return QByteArray(); } return remoteId; } Synchronizer::Synchronizer(const QByteArray &resourceType, const QByteArray &resourceInstanceIdentifier) : mStorage(Sink::storageLocation(), resourceInstanceIdentifier, Sink::Storage::ReadOnly), mSyncStorage(Sink::storageLocation(), resourceInstanceIdentifier + ".synchronization", Sink::Storage::ReadWrite), mResourceType(resourceType), mResourceInstanceIdentifier(resourceInstanceIdentifier) { Trace() << "Starting synchronizer: " << resourceType << resourceInstanceIdentifier; } void Synchronizer::setup(const std::function &enqueueCommandCallback) { mEnqueue = enqueueCommandCallback; } void Synchronizer::enqueueCommand(int commandId, const QByteArray &data) { Q_ASSERT(mEnqueue); mEnqueue(commandId, data); } EntityStore &Synchronizer::store() { if (!mEntityStore) { mEntityStore = QSharedPointer::create(mResourceType, mResourceInstanceIdentifier, mTransaction); } return *mEntityStore; } SyncStore &Synchronizer::syncStore() { if (!mSyncStore) { mSyncStore = QSharedPointer::create(mSyncTransaction); } return *mSyncStore; } void Synchronizer::createEntity(const QByteArray &sinkId, const QByteArray &bufferType, const Sink::ApplicationDomain::ApplicationDomainType &domainObject, DomainTypeAdaptorFactoryInterface &adaptorFactory, std::function callback) { // These changes are coming from the source const auto replayToSource = false; flatbuffers::FlatBufferBuilder entityFbb; adaptorFactory.createBuffer(domainObject, entityFbb); flatbuffers::FlatBufferBuilder fbb; // This is the resource type and not the domain type auto entityId = fbb.CreateString(sinkId.toStdString()); auto type = fbb.CreateString(bufferType.toStdString()); auto delta = Sink::EntityBuffer::appendAsVector(fbb, entityFbb.GetBufferPointer(), entityFbb.GetSize()); auto location = Sink::Commands::CreateCreateEntity(fbb, entityId, type, delta, replayToSource); Sink::Commands::FinishCreateEntityBuffer(fbb, location); callback(BufferUtils::extractBuffer(fbb)); } void Synchronizer::modifyEntity(const QByteArray &sinkId, qint64 revision, const QByteArray &bufferType, const Sink::ApplicationDomain::ApplicationDomainType &domainObject, DomainTypeAdaptorFactoryInterface &adaptorFactory, std::function callback) { // These changes are coming from the source const auto replayToSource = false; flatbuffers::FlatBufferBuilder entityFbb; adaptorFactory.createBuffer(domainObject, entityFbb); flatbuffers::FlatBufferBuilder fbb; auto entityId = fbb.CreateString(sinkId.toStdString()); // This is the resource type and not the domain type auto type = fbb.CreateString(bufferType.toStdString()); auto delta = Sink::EntityBuffer::appendAsVector(fbb, entityFbb.GetBufferPointer(), entityFbb.GetSize()); // TODO removals auto location = Sink::Commands::CreateModifyEntity(fbb, revision, entityId, 0, type, delta, replayToSource); Sink::Commands::FinishModifyEntityBuffer(fbb, location); callback(BufferUtils::extractBuffer(fbb)); } void Synchronizer::deleteEntity(const QByteArray &sinkId, qint64 revision, const QByteArray &bufferType, std::function callback) { // These changes are coming from the source const auto replayToSource = false; flatbuffers::FlatBufferBuilder fbb; auto entityId = fbb.CreateString(sinkId.toStdString()); // This is the resource type and not the domain type auto type = fbb.CreateString(bufferType.toStdString()); auto location = Sink::Commands::CreateDeleteEntity(fbb, revision, entityId, type, replayToSource); Sink::Commands::FinishDeleteEntityBuffer(fbb, location); callback(BufferUtils::extractBuffer(fbb)); } void Synchronizer::scanForRemovals(const QByteArray &bufferType, const std::function &callback)> &entryGenerator, std::function exists) { entryGenerator([this, bufferType, &exists](const QByteArray &key) { auto sinkId = Sink::Storage::uidFromKey(key); Trace() << "Checking for removal " << key; const auto remoteId = syncStore().resolveLocalId(bufferType, sinkId); // If we have no remoteId, the entity hasn't been replayed to the source yet if (!remoteId.isEmpty()) { if (!exists(remoteId)) { Trace() << "Found a removed entity: " << sinkId; deleteEntity(sinkId, Sink::Storage::maxRevision(mTransaction), bufferType, [this](const QByteArray &buffer) { enqueueCommand(Sink::Commands::DeleteEntityCommand, buffer); }); } } }); } void Synchronizer::createOrModify(const QByteArray &bufferType, const QByteArray &remoteId, const Sink::ApplicationDomain::ApplicationDomainType &entity) { Trace() << "Create or modify" << bufferType << remoteId; auto mainDatabase = Storage::mainDatabase(mTransaction, bufferType); const auto sinkId = syncStore().resolveRemoteId(bufferType, remoteId); const auto found = mainDatabase.contains(sinkId); auto adaptorFactory = Sink::AdaptorFactoryRegistry::instance().getFactory(mResourceType, bufferType); if (!found) { Trace() << "Found a new entity: " << remoteId; createEntity( sinkId, bufferType, entity, *adaptorFactory, [this](const QByteArray &buffer) { enqueueCommand(Sink::Commands::CreateEntityCommand, buffer); }); } else { // modification if (auto current = store().getLatest(mainDatabase, sinkId, *adaptorFactory)) { bool changed = false; for (const auto &property : entity.changedProperties()) { if (entity.getProperty(property) != current->getProperty(property)) { Trace() << "Property changed " << sinkId << property; changed = true; } } if (changed) { Trace() << "Found a modified entity: " << remoteId; modifyEntity(sinkId, Sink::Storage::maxRevision(mTransaction), bufferType, entity, *adaptorFactory, [this](const QByteArray &buffer) { enqueueCommand(Sink::Commands::ModifyEntityCommand, buffer); }); } } else { Warning() << "Failed to get current entity"; } } } KAsync::Job Synchronizer::synchronize() { mTransaction = mStorage.createTransaction(Sink::Storage::ReadOnly); mSyncTransaction = mSyncStorage.createTransaction(Sink::Storage::ReadWrite); return synchronizeWithSource().then([this]() { mTransaction.abort(); mSyncTransaction.commit(); mSyncStore.clear(); mEntityStore.clear(); }); } SourceWriteBack::SourceWriteBack(const QByteArray &resourceType, const QByteArray &resourceInstanceIdentifier) : ChangeReplay(resourceInstanceIdentifier), mSyncStorage(Sink::storageLocation(), resourceInstanceIdentifier + ".synchronization", Sink::Storage::ReadWrite), mResourceType(resourceType), mResourceInstanceIdentifier(resourceInstanceIdentifier) { } EntityStore &SourceWriteBack::store() { if (!mEntityStore) { mEntityStore = QSharedPointer::create(mResourceType, mResourceInstanceIdentifier, mTransaction); } return *mEntityStore; } SyncStore &SourceWriteBack::syncStore() { if (!mSyncStore) { mSyncStore = QSharedPointer::create(mSyncTransaction); } return *mSyncStore; } KAsync::Job SourceWriteBack::replay(const QByteArray &type, const QByteArray &key, const QByteArray &value) { mTransaction = mStorage.createTransaction(Sink::Storage::ReadOnly); mSyncTransaction = mSyncStorage.createTransaction(Sink::Storage::ReadWrite); Sink::EntityBuffer buffer(value); const Sink::Entity &entity = buffer.entity(); const auto metadataBuffer = Sink::EntityBuffer::readBuffer(entity.metadata()); Q_ASSERT(metadataBuffer); if (!metadataBuffer->replayToSource()) { Trace() << "Change is coming from the source"; return KAsync::null(); } const qint64 revision = metadataBuffer ? metadataBuffer->revision() : -1; const auto operation = metadataBuffer ? metadataBuffer->operation() : Sink::Operation_Creation; const auto uid = Sink::Storage::uidFromKey(key); QByteArray oldRemoteId; if (operation != Sink::Operation_Creation) { oldRemoteId = syncStore().resolveLocalId(type, uid); } Trace() << "Replaying " << key << type; KAsync::Job job = KAsync::null(); if (type == ENTITY_TYPE_FOLDER) { auto folder = store().read(uid); // const Sink::ApplicationDomain::Folder folder(mResourceInstanceIdentifier, uid, revision, mAdaptorFactories.value(type)->createAdaptor(entity)); job = replay(folder, operation, oldRemoteId); } else if (type == ENTITY_TYPE_MAIL) { auto mail = store().read(uid); // const Sink::ApplicationDomain::Mail mail(mResourceInstanceIdentifier, uid, revision, mAdaptorFactories.value(type)->createAdaptor(entity)); job = replay(mail, operation, oldRemoteId); } return job.then([this, operation, type, uid](const QByteArray &remoteId) { Trace() << "Replayed change with remote id: " << remoteId; if (operation == Sink::Operation_Creation) { if (remoteId.isEmpty()) { Warning() << "Returned an empty remoteId from the creation"; } else { syncStore().recordRemoteId(type, uid, remoteId); } } else if (operation == Sink::Operation_Modification) { if (remoteId.isEmpty()) { Warning() << "Returned an empty remoteId from the creation"; } else { syncStore().updateRemoteId(type, uid, remoteId); } } else if (operation == Sink::Operation_Removal) { syncStore().removeRemoteId(type, uid, remoteId); } else { Warning() << "Unkown operation" << operation; } mTransaction.abort(); mSyncTransaction.commit(); mSyncStore.clear(); mEntityStore.clear(); }, [](int errorCode, const QString &errorMessage) { Warning() << "Failed to replay change: " << errorMessage; }); } KAsync::Job SourceWriteBack::replay(const ApplicationDomain::Mail &, Sink::Operation, const QByteArray &) { return KAsync::null(); } KAsync::Job SourceWriteBack::replay(const ApplicationDomain::Folder &, Sink::Operation, const QByteArray &) { return KAsync::null(); } #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wundefined-reinterpret-cast" #include "genericresource.moc" #pragma clang diagnostic pop