[ltt-dev] [PATCH] LTTng vmcore fix (for crash dumps)
Mathieu Desnoyers
mathieu.desnoyers at polymtl.ca
Mon Mar 23 19:33:49 EDT 2009
Crash dump "LTT_VMCORE" support was broken in many ways. This patch
fixes it. It will be integrated in LTTng 0.114.
Basically, instead of modifying the lost_size in the header, we modify a
new ltt_buf field "commit_seq", for "sequential commits", which counts
the number of consecutive readable bytes in the buffer.
Note that commit_seq should be accessed with a modulo subbuf_size, like
commit_count.
This patch only includes the lockless algo fix, but I'll integrate the
irqoff and locked buffer management algo fixes in the 0.114 release.
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers at polymtl.ca>
---
ltt/ltt-relay-lockless.c | 22 +++++++++++--
ltt/ltt-relay-lockless.h | 77 +++++++++++++++++++++++-----------------------
ltt/ltt-serialize.c | 2 -
ltt/ltt-type-serializer.c | 2 -
4 files changed, 61 insertions(+), 42 deletions(-)
Index: linux-2.6-lttng/ltt/ltt-relay-lockless.h
===================================================================
--- linux-2.6-lttng.orig/ltt/ltt-relay-lockless.h 2009-03-23 17:10:31.000000000 -0400
+++ linux-2.6-lttng/ltt/ltt-relay-lockless.h 2009-03-23 19:02:01.000000000 -0400
@@ -77,6 +77,9 @@ struct ltt_channel_buf_struct {
* Last timestamp written in the buffer.
*/
/* End of first 32 bytes cacheline */
+#ifdef CONFIG_LTT_VMCORE
+ local_t *commit_seq; /* Consecutive commits */
+#endif
atomic_long_t active_readers; /*
* Active readers count
* standard atomic access (shared)
@@ -154,11 +157,15 @@ static __inline__ int last_tsc_overflow(
}
#endif
-static __inline__ void ltt_deliver(struct rchan_buf *buf, unsigned int subbuf_idx,
- void *subbuf)
+static __inline__ void ltt_deliver(struct rchan_buf *buf,
+ unsigned int subbuf_idx,
+ long commit_count)
{
struct ltt_channel_buf_struct *ltt_buf = buf->chan_private;
+#ifdef CONFIG_LTT_VMCORE
+ local_set(<t_buf->commit_seq[subbuf_idx], commit_count);
+#endif
atomic_set(<t_buf->wakeup_readers, 1);
}
@@ -180,7 +187,9 @@ static __inline__ int ltt_relay_try_rese
*tsc = trace_clock_read64();
prefetch(<t_buf->commit_count[SUBBUF_INDEX(*o_begin, rchan)]);
-
+#ifdef CONFIG_LTT_VMCORE
+ prefetch(<t_buf->commit_seq[SUBBUF_INDEX(*o_begin, rchan)]);
+#endif
if (last_tsc_overflow(ltt_buf, *tsc))
*rflags = LTT_RFLAG_ID_SIZE_TSC;
@@ -275,44 +284,35 @@ static __inline__ void ltt_force_switch(
* This function decrements de subbuffer's lost_size each time the commit count
* reaches back the reserve offset (module subbuffer size). It is useful for
* crash dump.
- * We use slot_size - 1 to make sure we deal correctly with the case where we
- * fill the subbuffer completely (so the subbuf index stays in the previous
- * subbuffer).
*/
#ifdef CONFIG_LTT_VMCORE
static __inline__ void ltt_write_commit_counter(struct rchan_buf *buf,
- long buf_offset, size_t slot_size)
+ struct ltt_channel_buf_struct *ltt_buf,
+ long idx, long buf_offset, long commit_count, size_t data_size)
{
- struct ltt_channel_buf_struct *ltt_buf = buf->chan_private;
- struct ltt_subbuffer_header *header;
- long offset, subbuf_idx, commit_count;
- uint32_t lost_old, lost_new;
-
- subbuf_idx = SUBBUF_INDEX(buf_offset - 1, buf->chan);
- offset = buf_offset + slot_size;
- header = (struct ltt_subbuffer_header *)
- ltt_relay_offset_address(buf,
- subbuf_idx * buf->chan->subbuf_size);
- for (;;) {
- lost_old = header->lost_size;
- commit_count =
- local_read(<t_buf->commit_count[subbuf_idx]);
- /* SUBBUF_OFFSET includes commit_count_mask */
- if (likely(!SUBBUF_OFFSET(offset - commit_count, buf->chan))) {
- lost_new = (uint32_t)buf->chan->subbuf_size
- - SUBBUF_OFFSET(commit_count, buf->chan);
- lost_old = cmpxchg_local(&header->lost_size, lost_old,
- lost_new);
- if (likely(lost_old <= lost_new))
- break;
- } else {
- break;
- }
- }
+ long offset;
+ long commit_seq_old;
+
+ offset = buf_offset + data_size;
+
+ /*
+ * SUBBUF_OFFSET includes commit_count_mask. We can simply
+ * compare the offsets within the subbuffer without caring about
+ * buffer full/empty mismatch because offset is never zero here
+ * (subbuffer header and event headers have non-zero length).
+ */
+ if (unlikely(SUBBUF_OFFSET(offset - commit_count, buf->chan)))
+ return;
+
+ commit_seq_old = ltt_buf->commit_seq;
+ while (commit_seq_old < commit_count)
+ commit_seq_old = local_cmpxchg(<t_buf->commit_seq[idx],
+ commit_seq_old, commit_count);
}
#else
static __inline__ void ltt_write_commit_counter(struct rchan_buf *buf,
- long buf_offset, size_t slot_size)
+ struct ltt_channel_buf_struct *ltt_buf,
+ long idx, long buf_offset, long commit_count, size_t data_size)
{
}
#endif
@@ -326,11 +326,13 @@ static __inline__ void ltt_write_commit_
* @ltt_channel : channel structure
* @transport_data: transport-specific data
* @buf_offset : offset following the event header.
+ * @data_size : size of the event data.
* @slot_size : size of the reserved slot.
*/
static __inline__ void ltt_commit_slot(
struct ltt_channel_struct *ltt_channel,
- void **transport_data, long buf_offset, size_t slot_size)
+ void **transport_data, long buf_offset,
+ size_t data_size, size_t slot_size)
{
struct rchan_buf *buf = *transport_data;
struct ltt_channel_buf_struct *ltt_buf = buf->chan_private;
@@ -348,12 +350,13 @@ static __inline__ void ltt_commit_slot(
>> ltt_channel->n_subbufs_order)
- ((commit_count - rchan->subbuf_size)
& ltt_channel->commit_count_mask) == 0))
- ltt_deliver(buf, endidx, NULL);
+ ltt_deliver(buf, endidx, commit_count);
/*
* Update lost_size for each commit. It's needed only for extracting
* ltt buffers from vmcore, after crash.
*/
- ltt_write_commit_counter(buf, buf_offset, slot_size);
+ ltt_write_commit_counter(buf, ltt_buf, endidx,
+ buf_offset, commit_count, data_size);
}
#endif //_LTT_LTT_RELAY_LOCKLESS_H
Index: linux-2.6-lttng/ltt/ltt-serialize.c
===================================================================
--- linux-2.6-lttng.orig/ltt/ltt-serialize.c 2009-03-23 18:11:26.000000000 -0400
+++ linux-2.6-lttng/ltt/ltt-serialize.c 2009-03-23 18:11:56.000000000 -0400
@@ -889,7 +889,7 @@ notrace void ltt_vtrace(const struct mar
va_end(args_copy);
/* Out-of-order commit */
ltt_commit_slot(channel, &transport_data, buf_offset,
- slot_size);
+ data_size, slot_size);
}
__get_cpu_var(ltt_nesting)--;
rcu_read_unlock_sched_notrace();
Index: linux-2.6-lttng/ltt/ltt-type-serializer.c
===================================================================
--- linux-2.6-lttng.orig/ltt/ltt-type-serializer.c 2009-03-23 18:11:23.000000000 -0400
+++ linux-2.6-lttng/ltt/ltt-type-serializer.c 2009-03-23 18:11:45.000000000 -0400
@@ -89,7 +89,7 @@ notrace void _ltt_specialized_trace(cons
}
/* Out-of-order commit */
ltt_commit_slot(channel, &transport_data, buf_offset,
- slot_size);
+ data_size, slot_size);
}
__get_cpu_var(ltt_nesting)--;
rcu_read_unlock_sched_notrace();
Index: linux-2.6-lttng/ltt/ltt-relay-lockless.c
===================================================================
--- linux-2.6-lttng.orig/ltt/ltt-relay-lockless.c 2009-03-23 18:45:55.000000000 -0400
+++ linux-2.6-lttng/ltt/ltt-relay-lockless.c 2009-03-23 19:02:50.000000000 -0400
@@ -790,6 +790,19 @@ static int ltt_relay_create_buffer(struc
kfree(ltt_buf);
return -ENOMEM;
}
+
+#ifdef CONFIG_LTT_VMCORE
+ ltt_buf->commit_seq =
+ kzalloc_node(ALIGN(sizeof(ltt_buf->commit_seq) * n_subbufs,
+ 1 << INTERNODE_CACHE_SHIFT),
+ GFP_KERNEL, cpu_to_node(cpu));
+ if (!ltt_buf->commit_seq) {
+ kfree(ltt_buf->commit_count);
+ kfree(ltt_buf);
+ return -ENOMEM;
+ }
+#endif
+
buf->chan_private = ltt_buf;
kref_get(&trace->kref);
@@ -827,6 +840,9 @@ static void ltt_relay_destroy_buffer(str
kref_put(<t_chan->trace->ltt_transport_kref,
ltt_release_transport);
ltt_relay_print_buffer_errors(ltt_chan, cpu);
+#ifdef CONFIG_LTT_VMCORE
+ kfree(ltt_buf->commit_seq);
+#endif
kfree(ltt_buf->commit_count);
kfree(ltt_buf);
kref_put(&trace->kref, ltt_release_trace);
@@ -1181,7 +1197,7 @@ static void ltt_reserve_switch_old_subbu
>> ltt_channel->n_subbufs_order)
- ((offsets->commit_count - rchan->subbuf_size)
& ltt_channel->commit_count_mask) == 0))
- ltt_deliver(buf, oldidx, NULL);
+ ltt_deliver(buf, oldidx, offsets->commit_count);
}
/*
@@ -1209,7 +1225,7 @@ static void ltt_reserve_switch_new_subbu
>> ltt_channel->n_subbufs_order)
- ((offsets->commit_count - rchan->subbuf_size)
& ltt_channel->commit_count_mask) == 0))
- ltt_deliver(buf, beginidx, NULL);
+ ltt_deliver(buf, beginidx, offsets->commit_count);
}
@@ -1251,7 +1267,7 @@ static void ltt_reserve_end_switch_curre
>> ltt_channel->n_subbufs_order)
- ((offsets->commit_count - rchan->subbuf_size)
& ltt_channel->commit_count_mask) == 0))
- ltt_deliver(buf, endidx, NULL);
+ ltt_deliver(buf, endidx, offsets->commit_count);
}
/*
--
Mathieu Desnoyers
OpenPGP key fingerprint: 8CD5 52C3 8E3C 4140 715F BA06 3F25 A8FE 3BAE 9A68
More information about the lttng-dev
mailing list