forked from Minki/linux
vmcore: clean up read_vmcore()
Rewrite part of read_vmcore() that reads objects in vmcore_list in the same way as part reading ELF headers, by which some duplicated and redundant codes are removed. Signed-off-by: HATAYAMA Daisuke <d.hatayama@jp.fujitsu.com> Acked-by: Vivek Goyal <vgoyal@redhat.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Atsushi Kumagai <kumagai-atsushi@mxc.nes.nec.co.jp> Cc: Lisa Mitchell <lisa.mitchell@hp.com> Cc: Zhang Yanfei <zhangyanfei@cn.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
0fa73b86ef
commit
b27eb18660
@ -118,27 +118,6 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
|
||||
return read;
|
||||
}
|
||||
|
||||
/* Maps vmcore file offset to respective physical address in memroy. */
|
||||
static u64 map_offset_to_paddr(loff_t offset, struct list_head *vc_list,
|
||||
struct vmcore **m_ptr)
|
||||
{
|
||||
struct vmcore *m;
|
||||
u64 paddr;
|
||||
|
||||
list_for_each_entry(m, vc_list, list) {
|
||||
u64 start, end;
|
||||
start = m->offset;
|
||||
end = m->offset + m->size - 1;
|
||||
if (offset >= start && offset <= end) {
|
||||
paddr = m->paddr + offset - start;
|
||||
*m_ptr = m;
|
||||
return paddr;
|
||||
}
|
||||
}
|
||||
*m_ptr = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Read from the ELF header and then the crash dump. On error, negative value is
|
||||
* returned otherwise number of bytes read are returned.
|
||||
*/
|
||||
@ -147,8 +126,8 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer,
|
||||
{
|
||||
ssize_t acc = 0, tmp;
|
||||
size_t tsz;
|
||||
u64 start, nr_bytes;
|
||||
struct vmcore *curr_m = NULL;
|
||||
u64 start;
|
||||
struct vmcore *m = NULL;
|
||||
|
||||
if (buflen == 0 || *fpos >= vmcore_size)
|
||||
return 0;
|
||||
@ -174,33 +153,26 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer,
|
||||
return acc;
|
||||
}
|
||||
|
||||
start = map_offset_to_paddr(*fpos, &vmcore_list, &curr_m);
|
||||
if (!curr_m)
|
||||
return -EINVAL;
|
||||
list_for_each_entry(m, &vmcore_list, list) {
|
||||
if (*fpos < m->offset + m->size) {
|
||||
tsz = m->offset + m->size - *fpos;
|
||||
if (buflen < tsz)
|
||||
tsz = buflen;
|
||||
start = m->paddr + *fpos - m->offset;
|
||||
tmp = read_from_oldmem(buffer, tsz, &start, 1);
|
||||
if (tmp < 0)
|
||||
return tmp;
|
||||
buflen -= tsz;
|
||||
*fpos += tsz;
|
||||
buffer += tsz;
|
||||
acc += tsz;
|
||||
|
||||
while (buflen) {
|
||||
tsz = min_t(size_t, buflen, PAGE_SIZE - (start & ~PAGE_MASK));
|
||||
|
||||
/* Calculate left bytes in current memory segment. */
|
||||
nr_bytes = (curr_m->size - (start - curr_m->paddr));
|
||||
if (tsz > nr_bytes)
|
||||
tsz = nr_bytes;
|
||||
|
||||
tmp = read_from_oldmem(buffer, tsz, &start, 1);
|
||||
if (tmp < 0)
|
||||
return tmp;
|
||||
buflen -= tsz;
|
||||
*fpos += tsz;
|
||||
buffer += tsz;
|
||||
acc += tsz;
|
||||
if (start >= (curr_m->paddr + curr_m->size)) {
|
||||
if (curr_m->list.next == &vmcore_list)
|
||||
return acc; /*EOF*/
|
||||
curr_m = list_entry(curr_m->list.next,
|
||||
struct vmcore, list);
|
||||
start = curr_m->paddr;
|
||||
/* leave now if filled buffer already */
|
||||
if (buflen == 0)
|
||||
return acc;
|
||||
}
|
||||
}
|
||||
|
||||
return acc;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user