diff options
author | Christian Mollekopf <chrigi_1@fastmail.fm> | 2016-07-20 09:46:37 +0200 |
---|---|---|
committer | Christian Mollekopf <chrigi_1@fastmail.fm> | 2016-09-15 16:14:19 +0200 |
commit | 9a22126970a0e560e05ece5e8cdf7bb0ec5bc7eb (patch) | |
tree | c0cd06ae71233d42a3aa989de000f63013ee20ff | |
parent | 993a9eadb30c55bbb764a1fc123c5dfbb502b1f1 (diff) | |
download | sink-9a22126970a0e560e05ece5e8cdf7bb0ec5bc7eb.tar.gz sink-9a22126970a0e560e05ece5e8cdf7bb0ec5bc7eb.zip |
Debug output
-rw-r--r-- | common/pipeline.cpp | 6 | ||||
-rw-r--r-- | docs/logging.md | 41 | ||||
-rw-r--r-- | examples/imapresource/imapresource.cpp | 2 |
3 files changed, 25 insertions, 24 deletions
diff --git a/common/pipeline.cpp b/common/pipeline.cpp index 000d2b2..1d45340 100644 --- a/common/pipeline.cpp +++ b/common/pipeline.cpp | |||
@@ -189,7 +189,7 @@ KAsync::Job<qint64> Pipeline::newEntity(void const *command, size_t size) | |||
189 | if (key.isEmpty()) { | 189 | if (key.isEmpty()) { |
190 | key = Sink::Storage::generateUid(); | 190 | key = Sink::Storage::generateUid(); |
191 | } | 191 | } |
192 | SinkLog() << "New Entity. Type: " << bufferType << "uid: "<< key << " replayToSource: " << replayToSource; | 192 | SinkTrace() << "New Entity. Type: " << bufferType << "uid: "<< key << " replayToSource: " << replayToSource; |
193 | Q_ASSERT(!key.isEmpty()); | 193 | Q_ASSERT(!key.isEmpty()); |
194 | 194 | ||
195 | { | 195 | { |
@@ -259,7 +259,7 @@ KAsync::Job<qint64> Pipeline::modifiedEntity(void const *command, size_t size) | |||
259 | const bool replayToSource = modifyEntity->replayToSource(); | 259 | const bool replayToSource = modifyEntity->replayToSource(); |
260 | const QByteArray bufferType = QByteArray(reinterpret_cast<char const *>(modifyEntity->domainType()->Data()), modifyEntity->domainType()->size()); | 260 | const QByteArray bufferType = QByteArray(reinterpret_cast<char const *>(modifyEntity->domainType()->Data()), modifyEntity->domainType()->size()); |
261 | const QByteArray key = QByteArray(reinterpret_cast<char const *>(modifyEntity->entityId()->Data()), modifyEntity->entityId()->size()); | 261 | const QByteArray key = QByteArray(reinterpret_cast<char const *>(modifyEntity->entityId()->Data()), modifyEntity->entityId()->size()); |
262 | SinkLog() << "Modified Entity. Type: " << bufferType << "uid: "<< key << " replayToSource: " << replayToSource; | 262 | SinkTrace() << "Modified Entity. Type: " << bufferType << "uid: "<< key << " replayToSource: " << replayToSource; |
263 | if (bufferType.isEmpty() || key.isEmpty()) { | 263 | if (bufferType.isEmpty() || key.isEmpty()) { |
264 | SinkWarning() << "entity type or key " << bufferType << key; | 264 | SinkWarning() << "entity type or key " << bufferType << key; |
265 | return KAsync::error<qint64>(0); | 265 | return KAsync::error<qint64>(0); |
@@ -365,7 +365,7 @@ KAsync::Job<qint64> Pipeline::deletedEntity(void const *command, size_t size) | |||
365 | const bool replayToSource = deleteEntity->replayToSource(); | 365 | const bool replayToSource = deleteEntity->replayToSource(); |
366 | const QByteArray bufferType = QByteArray(reinterpret_cast<char const *>(deleteEntity->domainType()->Data()), deleteEntity->domainType()->size()); | 366 | const QByteArray bufferType = QByteArray(reinterpret_cast<char const *>(deleteEntity->domainType()->Data()), deleteEntity->domainType()->size()); |
367 | const QByteArray key = QByteArray(reinterpret_cast<char const *>(deleteEntity->entityId()->Data()), deleteEntity->entityId()->size()); | 367 | const QByteArray key = QByteArray(reinterpret_cast<char const *>(deleteEntity->entityId()->Data()), deleteEntity->entityId()->size()); |
368 | SinkLog() << "Deleted Entity. Type: " << bufferType << "uid: "<< key << " replayToSource: " << replayToSource; | 368 | SinkTrace() << "Deleted Entity. Type: " << bufferType << "uid: "<< key << " replayToSource: " << replayToSource; |
369 | 369 | ||
370 | bool found = false; | 370 | bool found = false; |
371 | bool alreadyRemoved = false; | 371 | bool alreadyRemoved = false; |
diff --git a/docs/logging.md b/docs/logging.md index 3d5ea61..ac2ff25 100644 --- a/docs/logging.md +++ b/docs/logging.md | |||
@@ -18,25 +18,28 @@ This way we get complete logs also if some resource was not started from the con | |||
18 | * warning: Only warnings, should always be logged. | 18 | * warning: Only warnings, should always be logged. |
19 | * error: Critical messages that should never appear. Should always be logged. | 19 | * error: Critical messages that should never appear. Should always be logged. |
20 | 20 | ||
21 | ## Debug areas | 21 | ## Debug areas and components |
22 | Debug areas split the code into sections that can be enabled/disabled as one. | 22 | Debug areas and components split the code into sections that can be enabled/disabled as one. This gives finer grained control over what is logged or displayed. |
23 | This is supposed to give finer grained control over what is logged or displayed. | 23 | |
24 | 24 | Debug areas are the static part, that typically represent a class or file, but give no information about which runtime-component is executing the given code. | |
25 | Debug areas may align with classes, but don't have to, the should be made so that they are useful. | 25 | |
26 | 26 | Components are the runtime information part that can represent i.e. the resource plugin in the client process or the resource process itself. | |
27 | Areas could be: | 27 | |
28 | 28 | The full debugging area is then assembled as: "Component.Area" | |
29 | * resource.sync.performance | 29 | |
30 | * resource.sync | 30 | This can result in identifiers like: |
31 | * resource.listener | 31 | |
32 | * resource.pipeline | 32 | * $RESOURCE_IDENTIFIER.sync.performance |
33 | * resource.store | 33 | * $RESOURCE_IDENTIFIER.sync |
34 | * resource.communication | 34 | * $RESOURCE_IDENTIFIER.communication |
35 | * client.communication | 35 | * $RESOURCE_IDENTIFIER.pipeline |
36 | * client.communication.org.sink.resource.maildir.identifier1 | 36 | * kube.$RESOURCE_IDENTIFIER.communication |
37 | * client.queryrunner | 37 | * kube.$RESOURCE_IDENTIFIER.queryrunner |
38 | * client.queryrunner.performance | 38 | * kube.actions |
39 | * common.typeindex | 39 | |
40 | ## Logging guidelines | ||
41 | * The trace log level should be used for any information that is not continuously required. | ||
42 | * Messages on the Log level should scale. During a sync with 10k messages we don't want 10k messages on the log level, these should go to trace. | ||
40 | 43 | ||
41 | ## Collected information | 44 | ## Collected information |
42 | Additionally to the regular message we want: | 45 | Additionally to the regular message we want: |
diff --git a/examples/imapresource/imapresource.cpp b/examples/imapresource/imapresource.cpp index 1dc503d..dde218a 100644 --- a/examples/imapresource/imapresource.cpp +++ b/examples/imapresource/imapresource.cpp | |||
@@ -267,8 +267,6 @@ public: | |||
267 | commit(); | 267 | commit(); |
268 | SinkTrace() << "Folder synchronized: " << folder.normalizedPath(); | 268 | SinkTrace() << "Folder synchronized: " << folder.normalizedPath(); |
269 | } | 269 | } |
270 | |||
271 | SinkLog() << "Done Synchronizing"; | ||
272 | future.setFinished(); | 270 | future.setFinished(); |
273 | }); | 271 | }); |
274 | } | 272 | } |