/*
* Copyright (c) 2006, 2008 Reinoud Zandijk
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/* get partition backing up this vpart_num */
pdesc = ump->partitions[ump->vtop[vpart_num]];
switch (ump->vtop_tp[vpart_num]) {
case UDF_VTOP_TYPE_PHYS :
case UDF_VTOP_TYPE_SPARABLE :
/* free space to freed or unallocated space bitmap */
ptov = udf_rw32(pdesc->start_loc);
phys_part = ump->vtop[vpart_num];
/* use unallocated bitmap */
bitmap = &ump->part_unalloc_bits[phys_part];
/* if no bitmaps are defined, bail out */
if (bitmap->bits == NULL)
break;
/* check bits */
KASSERT(bitmap->bits);
ok = 1;
bpos = bitmap->bits + lb_num/8;
bit = lb_num % 8;
while (num_lb > 0) {
bitval = (1 << bit);
DPRINTF(PARANOIA, ("XXX : check %d, %p, bit %d\n",
lb_num, bpos, bit));
KASSERT(bitmap->bits + lb_num/8 == bpos);
if (*bpos & bitval) {
printf("\tlb_num %d is NOT marked busy\n",
lb_num);
ok = 0;
}
lb_num++; num_lb--;
bit = (bit + 1) % 8;
if (bit == 0)
bpos++;
}
if (!ok) {
/* KASSERT(0); */
}
break;
case UDF_VTOP_TYPE_VIRT :
/* TODO check space */
KASSERT(num_lb == 1);
break;
case UDF_VTOP_TYPE_META :
/* TODO check space in the metadata bitmap */
default:
/* not implemented */
break;
}
}
/* check if tail is zero */
DPRINTF(PARANOIA, ("Sanity check blank tail\n"));
for (i = l_ad; i < max_l_ad; i++) {
if (data_pos[i] != 0)
printf( "sanity_check: violation: node byte %d "
"has value %d\n", i, data_pos[i]);
}
/*
* Sequential media reports free space directly (CD/DVD/BD-R), for the
* other media we need the logical volume integrity.
*
* We sum all free space up here regardless of type.
*/
if (ump->discinfo.mmc_cur & MMC_CAP_SEQUENTIAL) {
/* use track info directly summing if there are 2 open */
/* XXX assumption at most two tracks open */
*freeblks = ump->data_track.free_blocks;
if (ump->data_track.tracknr != ump->metadata_track.tracknr)
*freeblks += ump->metadata_track.free_blocks;
*sizeblks = ump->discinfo.last_possible_lba;
} else {
/* free and used space for mountpoint based on logvol integrity */
for (vpart = 0; vpart < num_vpart; vpart++) {
pos1 = &lvid->tables[0] + vpart;
pos2 = &lvid->tables[0] + num_vpart + vpart;
if (udf_rw32(*pos1) != (uint32_t) -1) {
*freeblks += udf_rw32(*pos1);
*sizeblks += udf_rw32(*pos2);
}
}
}
/* adjust for accounted uncommitted blocks */
for (vpart = 0; vpart < num_vpart; vpart++)
*freeblks -= ump->uncommitted_lbs[vpart];
/*
* Sequential media reports free space directly (CD/DVD/BD-R), for the
* other media we need the logical volume integrity.
*
* We sum all free space up here regardless of type.
*/
KASSERT(lvid);
if (ump->discinfo.mmc_cur & MMC_CAP_SEQUENTIAL) {
/* XXX assumption at most two tracks open */
if (vpart_num == ump->data_part) {
*freeblks = ump->data_track.free_blocks;
} else {
*freeblks = ump->metadata_track.free_blocks;
}
} else {
/* free and used space for mountpoint based on logvol integrity */
pos1 = &lvid->tables[0] + vpart_num;
if (udf_rw32(*pos1) != (uint32_t) -1)
*freeblks += udf_rw32(*pos1);
}
translate_again:
part = ump->vtop[vpart];
pdesc = ump->partitions[part];
switch (ump->vtop_tp[vpart]) {
case UDF_VTOP_TYPE_RAW :
/* 1:1 to the end of the device */
*lb_numres = lb_num;
*extres = INT_MAX;
return 0;
case UDF_VTOP_TYPE_PHYS :
/* transform into its disc logical block */
if (lb_num > udf_rw32(pdesc->part_len))
return EINVAL;
*lb_numres = lb_num + udf_rw32(pdesc->start_loc);
/* extent from here to the end of the partition */
*extres = udf_rw32(pdesc->part_len) - lb_num;
return 0;
case UDF_VTOP_TYPE_VIRT :
/* only maps one logical block, lookup in VAT */
if (lb_num >= ump->vat_entries) /* XXX > or >= ? */
return EINVAL;
/* transform into its disc logical block */
if (lb_num > udf_rw32(pdesc->part_len))
return EINVAL;
*lb_numres = lb_num + udf_rw32(pdesc->start_loc);
/* just one logical block */
*extres = 1;
return 0;
case UDF_VTOP_TYPE_SPARABLE :
/* check if the packet containing the lb_num is remapped */
lb_packet = lb_num / ump->sparable_packet_size;
lb_rel = lb_num % ump->sparable_packet_size;
for (rel = 0; rel < udf_rw16(ump->sparing_table->rt_l); rel++) {
sme = &ump->sparing_table->entries[rel];
if (lb_packet == udf_rw32(sme->org)) {
/* NOTE maps to absolute disc logical block! */
*lb_numres = udf_rw32(sme->map) + lb_rel;
*extres = ump->sparable_packet_size - lb_rel;
return 0;
}
}
/* transform into its disc logical block */
if (lb_num > udf_rw32(pdesc->part_len))
return EINVAL;
*lb_numres = lb_num + udf_rw32(pdesc->start_loc);
/* rest of block */
*extres = ump->sparable_packet_size - lb_rel;
return 0;
case UDF_VTOP_TYPE_META :
/* we have to look into the file's allocation descriptors */
/* use metadatafile allocation mutex */
lb_size = udf_rw32(ump->logical_vol->lb_size);
UDF_LOCK_NODE(ump->metadata_node, 0);
/* get first overlapping extent */
foffset = 0;
slot = 0;
for (;;) {
udf_get_adslot(ump->metadata_node,
slot, &s_icb_loc, &eof);
DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, "
"len = %d, lb_num = %d, part = %d\n",
slot, eof,
UDF_EXT_FLAGS(udf_rw32(s_icb_loc.len)),
UDF_EXT_LEN(udf_rw32(s_icb_loc.len)),
udf_rw32(s_icb_loc.loc.lb_num),
udf_rw16(s_icb_loc.loc.part_num)));
if (eof) {
DPRINTF(TRANSLATE,
("Meta partition translation "
"failed: can't seek location\n"));
UDF_UNLOCK_NODE(ump->metadata_node, 0);
return EINVAL;
}
len = udf_rw32(s_icb_loc.len);
flags = UDF_EXT_FLAGS(len);
len = UDF_EXT_LEN(len);
if (flags == UDF_EXT_REDIRECT) {
slot++;
continue;
}
end_foffset = foffset + len;
if (end_foffset > (uint64_t) lb_num * lb_size)
break; /* found */
foffset = end_foffset;
slot++;
}
/* found overlapping slot */
ext_offset = lb_num * lb_size - foffset;
UDF_UNLOCK_NODE(ump->metadata_node, 0);
if (flags != UDF_EXT_ALLOCATED) {
DPRINTF(TRANSLATE, ("Metadata partition translation "
"failed: not allocated\n"));
return EINVAL;
}
/*
* vpart and lb_num are updated, translate again since we
* might be mapped on sparable media
*/
goto translate_again;
default:
printf("UDF vtop translation scheme %d unimplemented yet\n",
ump->vtop_tp[vpart]);
}
return EINVAL;
}
/* XXX provisional primitive braindead version */
/* TODO use ext_res */
void
udf_translate_vtop_list(struct udf_mount *ump, uint32_t sectors,
uint16_t vpart_num, uint64_t *lmapping, uint64_t *pmapping)
{
struct long_ad loc;
uint32_t lb_numres, ext_res;
int sector;
/* mark entry with non free-space initialiser just in case */
lb_map = udf_rw32(0xfffffffe);
udf_vat_write(ump->vat_node, (uint8_t *) &lb_map, 4,
ump->vat_offset + lb_num *4);
ump->vat_last_free_lb = lb_num;
/*
* We check for overall disc space with a margin to prevent critical
* conditions. If disc space is low we try to force a sync() to improve our
* estimates. When confronted with meta-data partition size shortage we know
* we have to check if it can be extended and we need to extend it when
* needed.
*
* A 2nd strategy we could use when disc space is getting low on a disc
* formatted with a meta-data partition is to see if there are sparse areas in
* the meta-data partition and free blocks there for extra data.
*/
int
udf_reserve_space(struct udf_mount *ump, struct udf_node *udf_node,
int udf_c_type, uint16_t vpart_num, uint32_t num_lb, int can_fail)
{
uint64_t freeblks;
uint64_t slack;
int i, error;
slack = 0;
if (can_fail)
slack = UDF_DISC_SLACK;
error = 0;
mutex_enter(&ump->allocate_mutex);
/* check if there is enough space available */
for (i = 0; i < 3; i++) { /* XXX arbitrary number */
udf_calc_vpart_freespace(ump, vpart_num, &freeblks);
if (num_lb + slack < freeblks)
break;
/* issue SYNC */
DPRINTF(RESERVE, ("udf_reserve_space: issuing sync\n"));
mutex_exit(&ump->allocate_mutex);
udf_do_sync(ump, FSCRED, 0);
/* 1/8 second wait */
kpause("udfsync2", false, hz/8, NULL);
mutex_enter(&ump->allocate_mutex);
}
/* check if there is enough space available now */
udf_calc_vpart_freespace(ump, vpart_num, &freeblks);
if (num_lb + slack >= freeblks) {
DPRINTF(RESERVE, ("udf_reserve_space: try to redistribute "
"partition space\n"));
DPRINTF(RESERVE, ("\tvpart %d, type %d is full\n",
vpart_num, ump->vtop_alloc[vpart_num]));
/* Try to redistribute space if possible */
udf_collect_free_space_for_vpart(ump, vpart_num, num_lb + slack);
}
/* check if there is enough space available now */
udf_calc_vpart_freespace(ump, vpart_num, &freeblks);
if (num_lb + slack <= freeblks) {
udf_do_reserve_space(ump, udf_node, vpart_num, num_lb);
} else {
DPRINTF(RESERVE, ("udf_reserve_space: out of disc space\n"));
error = ENOSPC;
}
/* reserve on the backing sequential partition since
* that partition is credited back later */
udf_do_reserve_space(ump, udf_node,
ump->vtop[vpart_num], num_lb);
}
break;
case UDF_ALLOC_SEQUENTIAL :
/* sequential allocation on recordable media */
/* get partition backing up this vpart_num_num */
pdesc = ump->partitions[ump->vtop[vpart_num]];
/* calculate offset from physical base partition */
ptov = udf_rw32(pdesc->start_loc);
/* no use freeing zero length */
if (num_lb == 0)
return;
mutex_enter(&ump->allocate_mutex);
switch (ump->vtop_tp[vpart_num]) {
case UDF_VTOP_TYPE_PHYS :
case UDF_VTOP_TYPE_SPARABLE :
/* free space to freed or unallocated space bitmap */
phys_part = ump->vtop[vpart_num];
/* first try freed space bitmap */
bitmap = &ump->part_freed_bits[phys_part];
/* if not defined, use unallocated bitmap */
if (bitmap->bits == NULL)
bitmap = &ump->part_unalloc_bits[phys_part];
/* if no bitmaps are defined, bail out; XXX OK? */
if (bitmap->bits == NULL)
break;
/* free bits if its defined */
KASSERT(bitmap->bits);
ump->lvclose |= UDF_WRITE_PART_BITMAPS;
udf_bitmap_free(bitmap, lb_num, num_lb);
/*
* Special function to synchronise the metadatamirror file when they change on
* resizing. When the metadatafile is actually duplicated, this action is a
* no-op since they describe different extents on the disc.
*/
/* 2) copy all node descriptors from the meta_node */
slot = 0;
cpy_slot = 0;
for (;;) {
udf_get_adslot(meta_node, slot, &s_ad, &eof);
if (eof)
break;
len = udf_rw32(s_ad.len);
flags = UDF_EXT_FLAGS(len);
len = UDF_EXT_LEN(len);
if (flags == UDF_EXT_REDIRECT) {
slot++;
continue;
}
error = udf_append_adslot(metamirror_node, &cpy_slot, &s_ad);
if (error) {
/* WTF, this shouldn't happen, what to do now? */
panic("udf_synchronise_metadatamirror_node failed!");
}
cpy_slot++;
slot++;
}
/*
* When faced with an out of space but there is still space available on other
* partitions, try to redistribute the space. This is only defined for media
* using Metadata partitions.
*
* There are two formats to deal with. Either its a `normal' metadata
* partition and we can move blocks between a metadata bitmap and its
* companion data spacemap OR its a UDF 2.60 formatted BluRay-R disc with POW
* and a metadata partition.
*/
unit = ump->metadata_alloc_unit_size;
lb_size = udf_rw32(ump->logical_vol->lb_size);
lvid = ump->logvol_integrity;
/* XXX
*
* the following checks will fail for BD-R UDF 2.60! but they are
* read-only for now anyway! Its even doubtful if it is to be allowed
* for these discs.
*/
DPRINTF(RESERVE, ("\tfree space on data partition %"PRIu64" blks\n", data_free_lbs));
DPRINTF(RESERVE, ("\tfree space on metadata partition %"PRIu64" blks\n", meta_free_lbs));
/* give away some of the free meta space, in unit block sizes */
to_trunc = meta_free_lbs/4; /* give out a quarter */
to_trunc = MAX(to_trunc, num_lb);
to_trunc = unit * ((to_trunc + unit-1) / unit); /* round up */
/* scale down if needed and bail out when out of space */
if (to_trunc >= meta_free_lbs)
return num_lb;
/* check extent of bits marked free at the end of the map */
bitmap = &ump->metadata_unalloc_bits;
to_trunc = udf_bitmap_check_trunc_free(bitmap, to_trunc);
to_trunc = unit * (to_trunc / unit); /* round down again */
if (to_trunc == 0)
return num_lb;
DPRINTF(RESERVE, ("\ttruncating %"PRIu64" lbs from the metadata bitmap\n",
to_trunc));
/* get length of the metadata bitmap node file */
bitmap_node = ump->metadatabitmap_node;
if (bitmap_node->fe) {
inf_len = udf_rw64(bitmap_node->fe->inf_len);
} else {
KASSERT(bitmap_node->efe);
inf_len = udf_rw64(bitmap_node->efe->inf_len);
}
inf_len -= to_trunc/8;
/* as per [UDF 2.60/2.2.13.6] : */
/* 1) update the SBD in the metadata bitmap file */
sbd = (struct space_bitmap_desc *) bitmap->blob;
sbd->num_bits = udf_rw32(udf_rw32(sbd->num_bits) - to_trunc);
sbd->num_bytes = udf_rw32(udf_rw32(sbd->num_bytes) - to_trunc/8);
bitmap->max_offset = udf_rw32(sbd->num_bits);
/*
* The truncated space is secured now and can't be allocated anymore.
* Release the allocated mutex so we can shrink the nodes the normal
* way.
*/
mutex_exit(&ump->allocate_mutex);
/* 2) trunc the metadata bitmap information file, freeing blocks */
err = udf_shrink_node(bitmap_node, inf_len);
KASSERT(err == 0);
/* 3) trunc the metadata file and mirror file, freeing blocks */
inf_len = (uint64_t) udf_rw32(sbd->num_bits) * lb_size; /* [4/14.12.4] */
err = udf_shrink_node(ump->metadata_node, inf_len);
KASSERT(err == 0);
if (ump->metadatamirror_node) {
if (ump->metadata_flags & METADATA_DUPLICATED) {
err = udf_shrink_node(ump->metadatamirror_node, inf_len);
} else {
/* extents will be copied on writeout */
}
KASSERT(err == 0);
}
ump->lvclose |= UDF_WRITE_METAPART_NODES;
/* relock before exit */
mutex_enter(&ump->allocate_mutex);
static void
udf_collect_free_space_for_vpart(struct udf_mount *ump,
uint16_t vpart_num, uint32_t num_lb)
{
/* allocated mutex is held */
/* only defined for metadata partitions */
if (ump->vtop_tp[ump->node_part] != UDF_VTOP_TYPE_META) {
DPRINTF(RESERVE, ("\tcan't grow/shrink; no metadata partitioning\n"));
return;
}
if (ump->vtop_tp[vpart_num] == UDF_VTOP_TYPE_META) {
/* try to grow the meta partition */
DPRINTF(RESERVE, ("\ttrying to grow the meta partition\n"));
/* as per [UDF 2.60/2.2.13.5] : extend bitmap and metadata file(s) */
DPRINTF(NOTIMPL, ("\tgrowing meta partition not implemented yet\n"));
} else {
/* try to shrink the metadata partition */
DPRINTF(RESERVE, ("\ttrying to shrink the meta partition\n"));
/* as per [UDF 2.60/2.2.13.6] : either trunc or make sparse */
num_lb = udf_trunc_metadatapart(ump, num_lb);
if (num_lb)
udf_sparsify_metadatapart(ump, num_lb);
}
/*
* Allocate a buf on disc for direct write out. The space doesn't have to be
* contiguous as the caller takes care of this.
*/
void
udf_late_allocate_buf(struct udf_mount *ump, struct buf *buf,
uint64_t *lmapping, struct long_ad *node_ad_cpy, uint16_t *vpart_nump)
{
struct udf_node *udf_node = VTOI(buf->b_vp);
int lb_size, udf_c_type;
int vpart_num, num_lb;
int error, s;
/*
* for each sector in the buf, allocate a sector on disc and record
* its position in the provided mapping array.
*
* If its userdata or FIDs, record its location in its node.
*/
/* select partition to record the buffer on */
vpart_num = *vpart_nump = udf_get_record_vpart(ump, udf_c_type);
if (udf_c_type == UDF_C_NODE) {
/* if not VAT, its already allocated */
if (ump->vtop_alloc[ump->node_part] != UDF_ALLOC_VAT)
return;
/* allocate on its backing sequential partition */
vpart_num = ump->data_part;
}
/* XXX can this still happen? */
/* do allocation on the selected partition */
error = udf_allocate_space(ump, udf_node, udf_c_type,
vpart_num, num_lb, lmapping);
if (error) {
/*
* ARGH! we haven't done our accounting right! it should
* always succeed.
*/
panic("UDF disc allocation accounting gone wrong");
}
/* If its userdata or FIDs, record its allocation in its node. */
if ((udf_c_type == UDF_C_USERDATA) ||
(udf_c_type == UDF_C_FIDS) ||
(udf_c_type == UDF_C_METADATA_SBM))
{
udf_record_allocation_in_node(ump, buf, vpart_num, lmapping,
node_ad_cpy);
/* decrement our outstanding bufs counter */
s = splbio();
udf_node->outstanding_bufs--;
splx(s);
}
}
/* defines same space */
if (a1_flags != a2_flags)
return 1;
if (a1_flags != UDF_EXT_FREE) {
/* the same partition */
if (a1_part != a2_part)
return 1;
/* a2 is successor of a1 */
if (a1_lbnum * lb_size + a1_len != a2_lbnum * lb_size)
return 1;
}
/* merge as most from a2 if possible */
merge_len = MIN(a2_len, max_len - a1_len);
a1_len += merge_len;
a2_len -= merge_len;
a2_lbnum += merge_len/lb_size;
/* just in case we're called on an intern, its EOF */
if (addr_type == UDF_ICB_INTERN_ALLOC) {
memset(icb, 0, sizeof(struct long_ad));
*eof = 1;
return;
}
/* clean up given long_ad since it can be a synthesized one */
flags = UDF_EXT_FLAGS(udf_rw32(icb->len));
if (flags == UDF_EXT_FREE) {
icb->loc.part_num = udf_rw16(0);
icb->loc.lb_num = udf_rw32(0);
}
/* if offset too big, we go to the allocation extensions */
l_ad = udf_rw32(*l_ad_p);
offset = (*slot) * adlen;
extnr = -1;
while (offset >= l_ad) {
/* check if our last entry is a redirect */
if (addr_type == UDF_ICB_SHORT_ALLOC) {
short_ad = (struct short_ad *) (data_pos + l_ad-adlen);
l_icb.len = short_ad->len;
l_icb.loc.part_num = udf_node->loc.loc.part_num;
l_icb.loc.lb_num = short_ad->lb_num;
} else {
KASSERT(addr_type == UDF_ICB_LONG_ALLOC);
long_ad = (struct long_ad *) (data_pos + l_ad-adlen);
l_icb = *long_ad;
}
flags = UDF_EXT_FLAGS(udf_rw32(l_icb.len));
if (flags != UDF_EXT_REDIRECT) {
/* only one past the last one is addressable */
break;
}
/* advance to next extent */
extnr++;
KASSERT(extnr < udf_node->num_extensions);
offset = offset - l_ad;
/* offset is offset within the current (E)FE/AED */
l_ad = udf_rw32(*l_ad_p);
crclen = udf_rw16(dscr->tag.desc_crc_len);
logblks_rec = udf_rw64(*logblks_rec_p);
/* overwriting old piece? */
if (offset < l_ad) {
/* overwrite entry; compensate for the old element */
if (addr_type == UDF_ICB_SHORT_ALLOC) {
short_ad = (struct short_ad *) (data_pos + offset);
o_icb.len = short_ad->len;
o_icb.loc.part_num = udf_rw16(0); /* ignore */
o_icb.loc.lb_num = short_ad->lb_num;
} else if (addr_type == UDF_ICB_LONG_ALLOC) {
long_ad = (struct long_ad *) (data_pos + offset);
o_icb = *long_ad;
} else {
panic("Invalid address type in udf_append_adslot\n");
}
len = udf_rw32(o_icb.len);
if (UDF_EXT_FLAGS(len) == UDF_EXT_ALLOCATED) {
/* adjust counts */
len = UDF_EXT_LEN(len);
logblks_rec -= (len + lb_size -1) / lb_size;
}
}
/* check if we're not appending a redirection */
flags = UDF_EXT_FLAGS(udf_rw32(icb->len));
KASSERT(flags != UDF_EXT_REDIRECT);
/* round down available space */
rest = adlen * ((max_l_ad - offset) / adlen);
if (rest <= adlen) {
/* have to append aed, see if we already have a spare one */
extnr++;
ext = udf_node->ext[extnr];
l_icb = udf_node->ext_loc[extnr];
if (ext == NULL) {
DPRINTF(ALLOC,("adding allocation extent %d\n", extnr));
error = udf_reserve_space(ump, NULL, UDF_C_NODE,
vpart_num, 1, /* can fail */ false);
if (error) {
printf("UDF: couldn't reserve space for AED!\n");
return error;
}
error = udf_allocate_space(ump, NULL, UDF_C_NODE,
vpart_num, 1, &lmapping);
lb_num = lmapping;
if (error)
panic("UDF: couldn't allocate AED!\n");
/* count number of allocation extents in use */
num_extents = 0;
slot = 0;
for (;;) {
udf_get_adslot(udf_node, slot, &s_ad, &eof);
if (eof)
break;
len = udf_rw32(s_ad.len);
flags = UDF_EXT_FLAGS(len);
if (flags == UDF_EXT_REDIRECT)
num_extents++;
slot++;
}
DPRINTF(ALLOC, ("udf_count_alloc_ext counted %d live extents\n",
num_extents));
/* XXX choice: we could delay freeing them on node writeout */
/* free excess entries */
extnr = num_extents;
for (;extnr < udf_node->num_extensions; extnr++) {
DPRINTF(ALLOC, ("freeing alloc ext %d\n", extnr));
/* free dscriptor */
s_ad = udf_node->ext_loc[extnr];
udf_free_logvol_dscr(udf_node->ump, &s_ad,
udf_node->ext[extnr]);
udf_node->ext[extnr] = NULL;
/*
* Adjust the node's allocation descriptors to reflect the new mapping; do
* take note that we might glue to existing allocation descriptors.
*
* XXX Note there can only be one allocation being recorded/mount; maybe
* explicit allocation in schedule thread?
*/
replace_len = till - foffset; /* total amount of bytes to pop */
slot_offset = from - foffset; /* offset in first encounted slot */
KASSERT((slot_offset % lb_size) == 0);
for (;;) {
udf_get_adslot(udf_node, slot, &s_ad, &eof);
if (eof)
break;
len = udf_rw32(s_ad.len);
flags = UDF_EXT_FLAGS(len);
len = UDF_EXT_LEN(len);
lb_num = udf_rw32(s_ad.loc.lb_num);
if (flags == UDF_EXT_REDIRECT) {
slot++;
continue;
}
/* adjust for slot offset */
if (slot_offset) {
DPRINTF(ALLOC, ("\t4s: skipping %d\n", slot_offset));
lb_num += slot_offset / lb_size;
len -= slot_offset;
foffset += slot_offset;
replace_len -= slot_offset;
/* mark adjusted */
slot_offset = 0;
}
/* advance for (the rest of) this slot */
replace = MIN(len, replace_len);
DPRINTF(ALLOC, ("\t4d: replacing %d\n", replace));
/* advance for this slot */
if (replace) {
/* note: dont round DOWN on num_lb since we then
* forget the last partial one */
num_lb = (replace + lb_size - 1) / lb_size;
if (flags != UDF_EXT_FREE) {
udf_free_allocated_space(ump, lb_num,
udf_rw16(s_ad.loc.part_num), num_lb);
}
lb_num += num_lb;
len -= replace;
foffset += replace;
replace_len -= replace;
}
/* do we have a slot tail ? */
if (len) {
KASSERT(foffset % lb_size == 0);
/* we arrived at our point, push remainder */
s_ad.len = udf_rw32(len | flags);
s_ad.loc.lb_num = udf_rw32(lb_num);
if (flags == UDF_EXT_FREE)
s_ad.loc.lb_num = udf_rw32(0);
node_ad_cpy[cpy_slot++] = s_ad;
foffset += len;
slot++;
if (old_size > 0) {
/* allocate some space and copy in the stuff to keep */
evacuated_data = malloc(lb_size, M_UDFTEMP, M_WAITOK);
memset(evacuated_data, 0, lb_size);
/* node is locked, so safe to exit mutex */
UDF_UNLOCK_NODE(udf_node, 0);
/* read in using the `normal' vn_rdwr() */
error = vn_rdwr(UIO_READ, udf_node->vnode,
evacuated_data, old_size, 0,
UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
FSCRED, NULL, NULL);
/* enter again */
UDF_LOCK_NODE(udf_node, 0);
}
/* convert to a normal alloc and select type */
my_part = udf_rw16(udf_node->loc.loc.part_num);
dst_part = udf_get_record_vpart(ump, udf_get_c_type(udf_node));
addr_type = UDF_ICB_SHORT_ALLOC;
if (dst_part != my_part)
addr_type = UDF_ICB_LONG_ALLOC;
/* special case if the old size was zero, then there is no last slot */
if (old_size == 0) {
c_ad.len = udf_rw32(0 | UDF_EXT_FREE);
c_ad.loc.part_num = udf_rw16(0); /* not relevant */
c_ad.loc.lb_num = udf_rw32(0); /* not relevant */
} else {
/* refetch last slot */
slot--;
udf_get_adslot(udf_node, slot, &c_ad, &eof);
}
}
/*
* If the length of the last slot is not a multiple of lb_size, adjust
* length so that it is; don't forget to adjust `append_len'! relevant for
* extending existing files
*/
len = udf_rw32(c_ad.len);
flags = UDF_EXT_FLAGS(len);
len = UDF_EXT_LEN(len);
/* TODO zero appended space in buffer! */
/* using ubc_zerorange(&vp->v_uobj, old_size, */
/* new_size - old_size, UBC_VNODE_FLAGS(vp)); ? */
}
memset(&s_ad, 0, sizeof(struct long_ad));
/* size_diff can be bigger than allowed, so grow in chunks */
append_len = size_diff - lastblock_grow;
while (append_len > 0) {
chunk = MIN(append_len, max_len);
s_ad.len = udf_rw32(chunk | UDF_EXT_FREE);
s_ad.loc.part_num = udf_rw16(0);
s_ad.loc.lb_num = udf_rw32(0);
/* if there is a rest piece in the accumulator, append it */
if (UDF_EXT_LEN(udf_rw32(c_ad.len)) > 0) {
error = udf_append_adslot(udf_node, &slot, &c_ad);
if (error)
goto errorout;
slot++;
}
/* if there is a rest piece that didn't fit, append it */
if (UDF_EXT_LEN(udf_rw32(s_ad.len)) > 0) {
error = udf_append_adslot(udf_node, &slot, &s_ad);
if (error)
goto errorout;
slot++;
}
/*
* Shrink the node by releasing the allocations and truncate the last
* allocation to the new size. If the new size fits into the
* allocation descriptor itself, transform it into an
* UDF_ICB_INTERN_ALLOC.
*/
slot = 0;
cpy_slot = 0;
foffset = 0;
/* 1) copy till first overlap piece to the rewrite buffer */
for (;;) {
udf_get_adslot(udf_node, slot, &s_ad, &eof);
if (eof) {
DPRINTF(WRITE,
("Shrink node failed: "
"encountered EOF\n"));
error = EINVAL;
goto errorout; /* panic? */
}
len = udf_rw32(s_ad.len);
flags = UDF_EXT_FLAGS(len);
len = UDF_EXT_LEN(len);
if (flags == UDF_EXT_REDIRECT) {
slot++;
continue;
}
end_foffset = foffset + len;
if (end_foffset > new_size)
break; /* found */
/* 4) if it will fit into the descriptor then convert */
if (new_size < max_l_ad) {
/*
* rescue/evacuate old piece by reading it in, and convert it
* to internal alloc.
*/
if (new_size == 0) {
/* XXX/TODO only for zero sizing now */
udf_wipe_adslots(udf_node);