summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristian Mollekopf <chrigi_1@fastmail.fm>2015-04-08 10:04:48 +0200
committerChristian Mollekopf <chrigi_1@fastmail.fm>2015-04-08 10:04:48 +0200
commitf10d223559b16c576093eea080dc0d5638ab3323 (patch)
tree01fd379ca82d11746cc3ff9749d091a8a8bd896b
parent3be442eb322324804486064dddf2458dc2d17bc5 (diff)
downloadsink-f10d223559b16c576093eea080dc0d5638ab3323.tar.gz
sink-f10d223559b16c576093eea080dc0d5638ab3323.zip
cleanup
-rw-r--r--common/clientapi.h3
-rw-r--r--dummyresource/domainadaptor.h1
-rw-r--r--dummyresource/facade.cpp1
-rw-r--r--dummyresource/resourcefactory.cpp19
4 files changed, 8 insertions, 16 deletions
diff --git a/common/clientapi.h b/common/clientapi.h
index 63305ab..0ee934c 100644
--- a/common/clientapi.h
+++ b/common/clientapi.h
@@ -298,6 +298,9 @@ using namespace async;
298 * * what resources to search 298 * * what resources to search
299 * * filters on various properties (parent collection, startDate range, ....) 299 * * filters on various properties (parent collection, startDate range, ....)
300 * * properties we need (for on-demand querying) 300 * * properties we need (for on-demand querying)
301 *
302 * syncOnDemand: Execute a source sync before executing the query
303 * processAll: Ensure all local messages are processed before querying to guarantee an up-to date dataset.
301 */ 304 */
302class Query 305class Query
303{ 306{
diff --git a/dummyresource/domainadaptor.h b/dummyresource/domainadaptor.h
index 9474176..a2fb8a9 100644
--- a/dummyresource/domainadaptor.h
+++ b/dummyresource/domainadaptor.h
@@ -1,4 +1,3 @@
1
2#pragma once 1#pragma once
3 2
4#include "common/domainadaptor.h" 3#include "common/domainadaptor.h"
diff --git a/dummyresource/facade.cpp b/dummyresource/facade.cpp
index 4b7cd53..d196e54 100644
--- a/dummyresource/facade.cpp
+++ b/dummyresource/facade.cpp
@@ -120,6 +120,7 @@ Async::Job<void> DummyResourceFacade::synchronizeResource(bool sync, bool proces
120 //TODO check if a sync is necessary 120 //TODO check if a sync is necessary
121 //TODO Only sync what was requested 121 //TODO Only sync what was requested
122 //TODO timeout 122 //TODO timeout
123 //TODO the synchronization should normally not be necessary: We just return what is already available.
123 124
124 if (sync || processAll) { 125 if (sync || processAll) {
125 return Async::start<void>([=](Async::Future<void> &future) { 126 return Async::start<void>([=](Async::Future<void> &future) {
diff --git a/dummyresource/resourcefactory.cpp b/dummyresource/resourcefactory.cpp
index 869ddbf..ea37466 100644
--- a/dummyresource/resourcefactory.cpp
+++ b/dummyresource/resourcefactory.cpp
@@ -190,7 +190,7 @@ private slots:
190 return; 190 return;
191 } 191 }
192 auto queuedCommand = Akonadi2::GetQueuedCommand(ptr); 192 auto queuedCommand = Akonadi2::GetQueuedCommand(ptr);
193 qDebug() << "Dequeued: " << queuedCommand->commandId(); 193 Trace() << "Dequeued: " << queuedCommand->commandId();
194 //TODO JOBAPI: job lifetime management 194 //TODO JOBAPI: job lifetime management
195 //Right now we're just leaking jobs. In this case we'd like jobs that are heap allocated and delete 195 //Right now we're just leaking jobs. In this case we'd like jobs that are heap allocated and delete
196 //themselves once done. In other cases we'd like jobs that only live as long as their handle though. 196 //themselves once done. In other cases we'd like jobs that only live as long as their handle though.
@@ -255,29 +255,18 @@ void DummyResource::configurePipeline(Akonadi2::Pipeline *pipeline)
255 //Eventually the order should be self configuring, for now it's hardcoded. 255 //Eventually the order should be self configuring, for now it's hardcoded.
256 auto eventIndexer = new SimpleProcessor("summaryprocessor", [eventFactory](const Akonadi2::PipelineState &state, const Akonadi2::Entity &entity) { 256 auto eventIndexer = new SimpleProcessor("summaryprocessor", [eventFactory](const Akonadi2::PipelineState &state, const Akonadi2::Entity &entity) {
257 auto adaptor = eventFactory->createAdaptor(entity); 257 auto adaptor = eventFactory->createAdaptor(entity);
258 // qDebug() << "Summary preprocessor: " << adaptor->getProperty("summary").toString(); 258 // Log() << "Summary preprocessor: " << adaptor->getProperty("summary").toString();
259 }); 259 });
260 260
261 auto uidIndexer = new SimpleProcessor("uidIndexer", [eventFactory](const Akonadi2::PipelineState &state, const Akonadi2::Entity &entity) { 261 auto uidIndexer = new SimpleProcessor("uidIndexer", [eventFactory](const Akonadi2::PipelineState &state, const Akonadi2::Entity &entity) {
262 static Index uidIndex(QStandardPaths::writableLocation(QStandardPaths::GenericDataLocation) + "/akonadi2/storage", "org.kde.dummy.index.uid", Akonadi2::Storage::ReadWrite); 262 static Index uidIndex(QStandardPaths::writableLocation(QStandardPaths::GenericDataLocation) + "/akonadi2/storage", "org.kde.dummy.index.uid", Akonadi2::Storage::ReadWrite);
263 263
264 //TODO: Benchmark if this is performance wise acceptable, or if we have to access the buffer directly
264 auto adaptor = eventFactory->createAdaptor(entity); 265 auto adaptor = eventFactory->createAdaptor(entity);
265 const auto uid = adaptor->getProperty("uid"); 266 const auto uid = adaptor->getProperty("uid");
266 if (uid.isValid()) { 267 if (uid.isValid()) {
267 uidIndex.add(uid.toByteArray(), state.key()); 268 uidIndex.add(uid.toByteArray(), state.key());
268 } 269 }
269
270 //TODO would this be worthwhile for performance reasons?
271 // flatbuffers::Verifier verifyer(entity.local()->Data(), entity.local()->size());
272 // if (!Akonadi2::Domain::Buffer::VerifyEventBuffer(verifyer)) {
273 // qWarning() << "invalid local buffer";
274 // return;
275 // }
276 // auto localEvent = Akonadi2::Domain::Buffer::GetEvent(entity.local()->Data());
277 // if (localEvent && localEvent->uid()) {
278 // qDebug() << "got uid: " << QByteArray::fromRawData(reinterpret_cast<const char *>(localEvent->uid()->Data()), localEvent->uid()->size());
279 // uidIndex.add(QByteArray::fromRawData(reinterpret_cast<const char *>(localEvent->uid()->Data()), localEvent->uid()->size()), state.key());
280 // }
281 }); 270 });
282 271
283 //event is the entitytype and not the domain type 272 //event is the entitytype and not the domain type
@@ -288,7 +277,7 @@ void DummyResource::configurePipeline(Akonadi2::Pipeline *pipeline)
288 277
289void DummyResource::onProcessorError(int errorCode, const QString &errorMessage) 278void DummyResource::onProcessorError(int errorCode, const QString &errorMessage)
290{ 279{
291 qWarning() << "Received error from Processor: " << errorCode << errorMessage; 280 Warning() << "Received error from Processor: " << errorCode << errorMessage;
292 mError = errorCode; 281 mError = errorCode;
293} 282}
294 283