Sign Up
Log In
Log In
or
Sign Up
Places
All Projects
Status Monitor
Collapse sidebar
home:olh:xen-unstable
xen
xen.sr-save-guest_data.patch
Overview
Repositories
Revisions
Requests
Users
Attributes
Meta
File xen.sr-save-guest_data.patch of Package xen
From: Olaf Hering <olaf@aepfle.de> Date: Fri, 23 Oct 2020 11:40:45 +0200 Subject: sr save guest_data tools: save: preallocate guest_data array Remove repeated allocation from migration loop. There will never be more than MAX_BATCH_SIZE pages to process in a batch. Allocate the space once. Because this was allocated with calloc: Adjust the loop to clear unused entries as needed. Signed-off-by: Olaf Hering <olaf@aepfle.de> --- tools/libs/guest/xg_sr_common.h | 1 + tools/libs/guest/xg_sr_save.c | 20 +++++---- 2 files changed, 12 insertions(+), 9 deletions(-) --- a/tools/libs/guest/xg_sr_common.h +++ b/tools/libs/guest/xg_sr_common.h @@ -240,24 +240,25 @@ struct xc_sr_context unsigned long p2m_size; size_t pages_sent; size_t overhead_sent; struct precopy_stats stats; xen_pfn_t *batch_pfns; xen_pfn_t *mfns; xen_pfn_t *types; int *errors; struct iovec *iov; uint64_t *rec_pfns; + void **guest_data; unsigned int nr_batch_pfns; unsigned long *deferred_pages; unsigned long nr_deferred_pages; xc_hypercall_buffer_t dirty_bitmap_hbuf; } save; struct /* Restore data. */ { struct xc_sr_restore_ops ops; struct restore_callbacks *callbacks; int send_back_fd; --- a/tools/libs/guest/xg_sr_save.c +++ b/tools/libs/guest/xg_sr_save.c @@ -80,44 +80,41 @@ static int write_checkpoint_record(struct xc_sr_context *ctx) * is constructed in ctx->save.batch_pfns. * * This function: * - gets the types for each pfn in the batch. * - for each pfn with real data: * - maps and attempts to localise the pages. * - construct and writes a PAGE_DATA record into the stream. */ static int write_batch(struct xc_sr_context *ctx) { xc_interface *xch = ctx->xch; void *guest_mapping = NULL; - void **guest_data = NULL; void **local_pages = NULL; int rc = -1; unsigned int i, p, nr_pages = 0, nr_pages_mapped = 0; unsigned int nr_pfns = ctx->save.nr_batch_pfns; void *page, *orig_page; int iovcnt = 0; struct xc_sr_rec_page_data_header hdr = { 0 }; struct xc_sr_record rec = { .type = REC_TYPE_PAGE_DATA, }; assert(nr_pfns != 0); - /* Pointers to page data to send. Mapped gfns or local allocations. */ - guest_data = calloc(nr_pfns, sizeof(*guest_data)); /* Pointers to locally allocated pages. Need freeing. */ local_pages = calloc(nr_pfns, sizeof(*local_pages)); - if ( !guest_data || !local_pages ) + if ( !local_pages ) { ERROR("Unable to allocate arrays for a batch of %u pages", nr_pfns); goto err; } for ( i = 0; i < nr_pfns; ++i ) { ctx->save.types[i] = ctx->save.mfns[i] = ctx->save.ops.pfn_to_gfn(ctx, ctx->save.batch_pfns[i]); /* Likely a ballooned page. */ @@ -156,54 +153,58 @@ static int write_batch(struct xc_sr_context *ctx) guest_mapping = xenforeignmemory_map(xch->fmem, ctx->domid, PROT_READ, nr_pages, ctx->save.mfns, ctx->save.errors); if ( !guest_mapping ) { PERROR("Failed to map guest pages"); goto err; } nr_pages_mapped = nr_pages; for ( i = 0, p = 0; i < nr_pfns; ++i ) { if ( !page_type_has_stream_data(ctx->save.types[i]) ) + { + ctx->save.guest_data[i] = NULL; continue; + } if ( ctx->save.errors[p] ) { ERROR("Mapping of pfn %#"PRIpfn" (mfn %#"PRIpfn") failed %d", ctx->save.batch_pfns[i], ctx->save.mfns[p], ctx->save.errors[p]); goto err; } orig_page = page = guest_mapping + (p * PAGE_SIZE); rc = ctx->save.ops.normalise_page(ctx, ctx->save.types[i], &page); if ( orig_page != page ) local_pages[i] = page; if ( rc ) { + ctx->save.guest_data[i] = NULL; if ( rc == -1 && errno == EAGAIN ) { set_bit(ctx->save.batch_pfns[i], ctx->save.deferred_pages); ++ctx->save.nr_deferred_pages; ctx->save.types[i] = XEN_DOMCTL_PFINFO_XTAB; --nr_pages; } else goto err; } else - guest_data[i] = page; + ctx->save.guest_data[i] = page; rc = -1; ++p; } } hdr.count = nr_pfns; rec.length = sizeof(hdr); rec.length += nr_pfns * sizeof(*ctx->save.rec_pfns); rec.length += nr_pages * PAGE_SIZE; @@ -223,51 +224,50 @@ static int write_batch(struct xc_sr_context *ctx) ctx->save.iov[3].iov_base = ctx->save.rec_pfns; ctx->save.iov[3].iov_len = nr_pfns * sizeof(*ctx->save.rec_pfns); iovcnt = 4; ctx->save.pages_sent += nr_pages; ctx->save.overhead_sent += sizeof(rec) + sizeof(hdr) + nr_pfns * sizeof(*ctx->save.rec_pfns); if ( nr_pages ) { for ( i = 0; i < nr_pfns; ++i ) { - if ( guest_data[i] ) + if ( ctx->save.guest_data[i] ) { - ctx->save.iov[iovcnt].iov_base = guest_data[i]; + ctx->save.iov[iovcnt].iov_base = ctx->save.guest_data[i]; ctx->save.iov[iovcnt].iov_len = PAGE_SIZE; iovcnt++; --nr_pages; } } } if ( writev_exact(ctx->fd, ctx->save.iov, iovcnt) ) { PERROR("Failed to write page data to stream"); goto err; } /* Sanity check we have sent all the pages we expected to. */ assert(nr_pages == 0); rc = ctx->save.nr_batch_pfns = 0; err: if ( guest_mapping ) xenforeignmemory_unmap(xch->fmem, guest_mapping, nr_pages_mapped); for ( i = 0; local_pages && i < nr_pfns; ++i ) free(local_pages[i]); free(local_pages); - free(guest_data); return rc; } /* * Flush a batch of pfns into the stream. */ static int flush_batch(struct xc_sr_context *ctx) { int rc = 0; if ( ctx->save.nr_batch_pfns == 0 ) @@ -827,29 +827,30 @@ static int setup(struct xc_sr_context *ctx) if ( rc ) goto err; dirty_bitmap = xc_hypercall_buffer_alloc_pages( xch, dirty_bitmap, NRPAGES(bitmap_size(ctx->save.p2m_size))); ctx->save.batch_pfns = malloc(MAX_BATCH_SIZE * sizeof(*ctx->save.batch_pfns)); ctx->save.mfns = malloc(MAX_BATCH_SIZE * sizeof(*ctx->save.mfns)); ctx->save.types = malloc(MAX_BATCH_SIZE * sizeof(*ctx->save.types)); ctx->save.errors = malloc(MAX_BATCH_SIZE * sizeof(*ctx->save.errors)); ctx->save.iov = malloc((4 + MAX_BATCH_SIZE) * sizeof(*ctx->save.iov)); ctx->save.rec_pfns = malloc(MAX_BATCH_SIZE * sizeof(*ctx->save.rec_pfns)); + ctx->save.guest_data = malloc(MAX_BATCH_SIZE * sizeof(*ctx->save.guest_data)); ctx->save.deferred_pages = bitmap_alloc(ctx->save.p2m_size); if ( !ctx->save.batch_pfns || !ctx->save.mfns || !ctx->save.types || !ctx->save.errors || !ctx->save.iov || !ctx->save.rec_pfns || - !dirty_bitmap || !ctx->save.deferred_pages ) + !ctx->save.guest_data ||!dirty_bitmap || !ctx->save.deferred_pages ) { ERROR("Unable to allocate memory for dirty bitmaps, batch pfns and" " deferred pages"); rc = -1; errno = ENOMEM; goto err; } rc = 0; err: return rc; @@ -862,24 +863,25 @@ static void cleanup(struct xc_sr_context *ctx) &ctx->save.dirty_bitmap_hbuf); xc_shadow_control(xch, ctx->domid, XEN_DOMCTL_SHADOW_OP_OFF, NULL, 0); if ( ctx->save.ops.cleanup(ctx) ) PERROR("Failed to clean up"); xc_hypercall_buffer_free_pages(xch, dirty_bitmap, NRPAGES(bitmap_size(ctx->save.p2m_size))); free(ctx->save.deferred_pages); + free(ctx->save.guest_data); free(ctx->save.rec_pfns); free(ctx->save.iov); free(ctx->save.errors); free(ctx->save.types); free(ctx->save.mfns); free(ctx->save.batch_pfns); } /* * Save a domain. */ static int save(struct xc_sr_context *ctx, uint16_t guest_type)
Locations
Projects
Search
Status Monitor
Help
OpenBuildService.org
Documentation
API Documentation
Code of Conduct
Contact
Support
@OBShq
Terms
openSUSE Build Service is sponsored by
The Open Build Service is an
openSUSE project
.
Sign Up
Log In
Places
Places
All Projects
Status Monitor