1N/A/*
1N/A libparted
1N/A Copyright (C) 1998-2001, 2007, 2009-2010 Free Software Foundation,
1N/A Inc.
1N/A
1N/A This program is free software; you can redistribute it and/or modify
1N/A it under the terms of the GNU General Public License as published by
1N/A the Free Software Foundation; either version 3 of the License, or
1N/A (at your option) any later version.
1N/A
1N/A This program is distributed in the hope that it will be useful,
1N/A but WITHOUT ANY WARRANTY; without even the implied warranty of
1N/A MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1N/A GNU General Public License for more details.
1N/A
1N/A You should have received a copy of the GNU General Public License
1N/A along with this program. If not, see <http://www.gnu.org/licenses/>.
1N/A*/
1N/A
1N/A#include <config.h>
1N/A#include <string.h>
1N/A
1N/A#include "fat.h"
1N/A
1N/A#ifndef DISCOVER_ONLY
1N/A
1N/Astatic int
1N/Aneeds_duplicating (const FatOpContext* ctx, FatFragment frag)
1N/A{
1N/A FatSpecific* old_fs_info = FAT_SPECIFIC (ctx->old_fs);
1N/A FatCluster cluster = fat_frag_to_cluster (ctx->old_fs, frag);
1N/A FatClusterFlag flag;
1N/A
1N/A PED_ASSERT (cluster >= 2 && cluster < old_fs_info->cluster_count + 2,
1N/A return 0);
1N/A
1N/A flag = fat_get_fragment_flag (ctx->old_fs, frag);
1N/A switch (flag) {
1N/A case FAT_FLAG_FREE:
1N/A return 0;
1N/A
1N/A case FAT_FLAG_DIRECTORY:
1N/A return 1;
1N/A
1N/A case FAT_FLAG_FILE:
1N/A return fat_op_context_map_static_fragment (ctx, frag) == -1;
1N/A
1N/A case FAT_FLAG_BAD:
1N/A return 0;
1N/A }
1N/A
1N/A return 0;
1N/A}
1N/A
1N/Astatic int
1N/Asearch_next_fragment (FatOpContext* ctx)
1N/A{
1N/A FatSpecific* fs_info = FAT_SPECIFIC (ctx->old_fs);
1N/A
1N/A for (; ctx->buffer_offset < fs_info->frag_count; ctx->buffer_offset++) {
1N/A if (needs_duplicating (ctx, ctx->buffer_offset))
1N/A return 1;
1N/A }
1N/A return 0; /* all done! */
1N/A}
1N/A
1N/Astatic int
1N/Aread_marked_fragments (FatOpContext* ctx, FatFragment length)
1N/A{
1N/A FatSpecific* fs_info = FAT_SPECIFIC (ctx->old_fs);
1N/A int status;
1N/A FatFragment i;
1N/A
1N/A ped_exception_fetch_all ();
1N/A status = fat_read_fragments (ctx->old_fs, fs_info->buffer,
1N/A ctx->buffer_offset, length);
1N/A ped_exception_leave_all ();
1N/A if (status)
1N/A return 1;
1N/A
1N/A ped_exception_catch ();
1N/A
1N/A/* something bad happened, so read fragments one by one. (The error may
1N/A have occurred on an unused fragment: who cares) */
1N/A for (i = 0; i < length; i++) {
1N/A if (ctx->buffer_map [i]) {
1N/A if (!fat_read_fragment (ctx->old_fs,
1N/A fs_info->buffer + i * fs_info->frag_size,
1N/A ctx->buffer_offset + i))
1N/A return 0;
1N/A }
1N/A }
1N/A
1N/A return 1;
1N/A}
1N/A
1N/Astatic int
1N/Afetch_fragments (FatOpContext* ctx)
1N/A{
1N/A FatSpecific* old_fs_info = FAT_SPECIFIC (ctx->old_fs);
1N/A FatFragment fetch_length = 0;
1N/A FatFragment frag;
1N/A
1N/A for (frag = 0; frag < ctx->buffer_frags; frag++)
1N/A ctx->buffer_map [frag] = -1;
1N/A
1N/A for (frag = 0;
1N/A frag < ctx->buffer_frags
1N/A && ctx->buffer_offset + frag < old_fs_info->frag_count;
1N/A frag++) {
1N/A if (needs_duplicating (ctx, ctx->buffer_offset + frag)) {
1N/A ctx->buffer_map [frag] = 1;
1N/A fetch_length = frag + 1;
1N/A }
1N/A }
1N/A
1N/A if (!read_marked_fragments (ctx, fetch_length))
1N/A return 0;
1N/A
1N/A return 1;
1N/A}
1N/A
1N/A/*****************************************************************************
1N/A * here starts the write code. All assumes that ctx->buffer_map [first] and
1N/A * ctx->buffer_map [last] are occupied by fragments that need to be duplicated.
1N/A *****************************************************************************/
1N/A
1N/A/* finds the first fragment that is not going to get overwritten (that needs to
1N/A get read in) */
1N/Astatic FatFragment
1N/Aget_first_underlay (const FatOpContext* ctx, int first, int last)
1N/A{
1N/A int old;
1N/A FatFragment new;
1N/A
1N/A PED_ASSERT (first <= last, return 0);
1N/A
1N/A new = ctx->buffer_map [first];
1N/A for (old = first + 1; old <= last; old++) {
1N/A if (ctx->buffer_map [old] == -1)
1N/A continue;
1N/A new++;
1N/A if (ctx->buffer_map [old] != new)
1N/A return new;
1N/A }
1N/A return -1;
1N/A}
1N/A
1N/A/* finds the last fragment that is not going to get overwritten (that needs to
1N/A get read in) */
1N/Astatic FatFragment
1N/Aget_last_underlay (const FatOpContext* ctx, int first, int last)
1N/A{
1N/A int old;
1N/A FatFragment new;
1N/A
1N/A PED_ASSERT (first <= last, return 0);
1N/A
1N/A new = ctx->buffer_map [last];
1N/A for (old = last - 1; old >= first; old--) {
1N/A if (ctx->buffer_map [old] == -1)
1N/A continue;
1N/A new--;
1N/A if (ctx->buffer_map [old] != new)
1N/A return new;
1N/A }
1N/A return -1;
1N/A}
1N/A
1N/A/* "underlay" refers to the "static" fragments, that remain unchanged.
1N/A * when writing large chunks at a time, we don't want to clobber these,
1N/A * so we read them in, and write them back again. MUCH quicker that way.
1N/A */
1N/Astatic int
1N/Aquick_group_write_read_underlay (FatOpContext* ctx, int first, int last)
1N/A{
1N/A FatSpecific* new_fs_info = FAT_SPECIFIC (ctx->new_fs);
1N/A FatFragment first_underlay;
1N/A FatFragment last_underlay;
1N/A FatFragment underlay_length;
1N/A
1N/A PED_ASSERT (first <= last, return 0);
1N/A
1N/A first_underlay = get_first_underlay (ctx, first, last);
1N/A if (first_underlay == -1)
1N/A return 1;
1N/A last_underlay = get_last_underlay (ctx, first, last);
1N/A
1N/A PED_ASSERT (first_underlay <= last_underlay, return 0);
1N/A
1N/A underlay_length = last_underlay - first_underlay + 1;
1N/A if (!fat_read_fragments (ctx->new_fs,
1N/A new_fs_info->buffer
1N/A + (first_underlay - ctx->buffer_map [first])
1N/A * new_fs_info->frag_size,
1N/A first_underlay,
1N/A underlay_length))
1N/A return 0;
1N/A return 1;
1N/A}
1N/A
1N/A/* quick_group_write() makes no attempt to recover from errors - just
1N/A * does things fast. If there is an error, slow_group_write() is
1N/A * called.
1N/A * Note: we do syncing writes, to make sure there isn't any
1N/A * error writing out. It's rather difficult recovering from errors
1N/A * further on.
1N/A */
1N/Astatic int
1N/Aquick_group_write (FatOpContext* ctx, int first, int last)
1N/A{
1N/A FatSpecific* old_fs_info = FAT_SPECIFIC (ctx->old_fs);
1N/A FatSpecific* new_fs_info = FAT_SPECIFIC (ctx->new_fs);
1N/A int active_length;
1N/A int i;
1N/A int offset;
1N/A
1N/A PED_ASSERT (first <= last, return 0);
1N/A
1N/A ped_exception_fetch_all ();
1N/A if (!quick_group_write_read_underlay (ctx, first, last))
1N/A goto error;
1N/A
1N/A for (i = first; i <= last; i++) {
1N/A if (ctx->buffer_map [i] == -1)
1N/A continue;
1N/A
1N/A offset = ctx->buffer_map [i] - ctx->buffer_map [first];
1N/A memcpy (new_fs_info->buffer + offset * new_fs_info->frag_size,
1N/A old_fs_info->buffer + i * new_fs_info->frag_size,
1N/A new_fs_info->frag_size);
1N/A }
1N/A
1N/A active_length = ctx->buffer_map [last] - ctx->buffer_map [first] + 1;
1N/A if (!fat_write_sync_fragments (ctx->new_fs, new_fs_info->buffer,
1N/A ctx->buffer_map [first], active_length))
1N/A goto error;
1N/A
1N/A ped_exception_leave_all ();
1N/A return 1;
1N/A
1N/Aerror:
1N/A ped_exception_catch ();
1N/A ped_exception_leave_all ();
1N/A return 0;
1N/A}
1N/A
1N/A/* Writes fragments out, one at a time, avoiding errors on redundant writes
1N/A * on damaged parts of the disk we already know about. If there's an error
1N/A * on one of the required fragments, it gets marked as bad, and a replacement
1N/A * is found.
1N/A */
1N/Astatic int
1N/Aslow_group_write (FatOpContext* ctx, int first, int last)
1N/A{
1N/A FatSpecific* old_fs_info = FAT_SPECIFIC (ctx->old_fs);
1N/A FatSpecific* new_fs_info = FAT_SPECIFIC (ctx->new_fs);
1N/A int i;
1N/A
1N/A PED_ASSERT (first <= last, return 0);
1N/A
1N/A for (i = first; i <= last; i++) {
1N/A if (ctx->buffer_map [i] == -1)
1N/A continue;
1N/A
1N/A while (!fat_write_sync_fragment (ctx->new_fs,
1N/A old_fs_info->buffer + i * old_fs_info->frag_size,
1N/A ctx->buffer_map [i])) {
1N/A fat_table_set_bad (new_fs_info->fat,
1N/A ctx->buffer_map [i]);
1N/A ctx->buffer_map [i] = fat_table_alloc_cluster
1N/A (new_fs_info->fat);
1N/A if (ctx->buffer_map [i] == 0)
1N/A return 0;
1N/A }
1N/A }
1N/A return 1;
1N/A}
1N/A
1N/Astatic int
1N/Aupdate_remap (FatOpContext* ctx, int first, int last)
1N/A{
1N/A int i;
1N/A
1N/A PED_ASSERT (first <= last, return 0);
1N/A
1N/A for (i = first; i <= last; i++) {
1N/A if (ctx->buffer_map [i] == -1)
1N/A continue;
1N/A ctx->remap [ctx->buffer_offset + i] = ctx->buffer_map [i];
1N/A }
1N/A
1N/A return 1;
1N/A}
1N/A
1N/Astatic int
1N/Agroup_write (FatOpContext* ctx, int first, int last)
1N/A{
1N/A PED_ASSERT (first <= last, return 0);
1N/A
1N/A if (!quick_group_write (ctx, first, last)) {
1N/A if (!slow_group_write (ctx, first, last))
1N/A return 0;
1N/A }
1N/A if (!update_remap (ctx, first, last))
1N/A return 0;
1N/A return 1;
1N/A}
1N/A
1N/A/* assumes fragment size and new_fs's cluster size are equal */
1N/Astatic int
1N/Awrite_fragments (FatOpContext* ctx)
1N/A{
1N/A FatSpecific* old_fs_info = FAT_SPECIFIC (ctx->old_fs);
1N/A FatSpecific* new_fs_info = FAT_SPECIFIC (ctx->new_fs);
1N/A int group_start;
1N/A int group_end = -1; /* shut gcc up! */
1N/A FatFragment mapped_length;
1N/A FatFragment i;
1N/A FatCluster new_cluster;
1N/A
1N/A PED_ASSERT (ctx->buffer_offset < old_fs_info->frag_count, return 0);
1N/A
1N/A group_start = -1;
1N/A for (i = 0; i < ctx->buffer_frags; i++) {
1N/A if (ctx->buffer_map [i] == -1)
1N/A continue;
1N/A
1N/A ctx->frags_duped++;
1N/A
1N/A new_cluster = fat_table_alloc_cluster (new_fs_info->fat);
1N/A if (!new_cluster)
1N/A return 0;
1N/A fat_table_set_eof (new_fs_info->fat, new_cluster);
1N/A ctx->buffer_map [i] = fat_cluster_to_frag (ctx->new_fs,
1N/A new_cluster);
1N/A
1N/A if (group_start == -1)
1N/A group_start = group_end = i;
1N/A
1N/A PED_ASSERT (ctx->buffer_map [i]
1N/A >= ctx->buffer_map [group_start],
1N/A return 0);
1N/A
1N/A mapped_length = ctx->buffer_map [i]
1N/A - ctx->buffer_map [group_start] + 1;
1N/A if (mapped_length <= ctx->buffer_frags) {
1N/A group_end = i;
1N/A } else {
1N/A /* ran out of room in the buffer, so write this group,
1N/A * and start a new one...
1N/A */
1N/A if (!group_write (ctx, group_start, group_end))
1N/A return 0;
1N/A group_start = group_end = i;
1N/A }
1N/A }
1N/A
1N/A PED_ASSERT (group_start != -1, return 0);
1N/A
1N/A if (!group_write (ctx, group_start, group_end))
1N/A return 0;
1N/A return 1;
1N/A}
1N/A
1N/A/* default all fragments to unmoved
1N/A */
1N/Astatic void
1N/Ainit_remap (FatOpContext* ctx)
1N/A{
1N/A FatSpecific* old_fs_info = FAT_SPECIFIC (ctx->old_fs);
1N/A FatFragment i;
1N/A
1N/A for (i = 0; i < old_fs_info->frag_count; i++)
1N/A ctx->remap[i] = fat_op_context_map_static_fragment (ctx, i);
1N/A}
1N/A
1N/Astatic FatFragment
1N/Acount_frags_to_dup (FatOpContext* ctx)
1N/A{
1N/A FatSpecific* fs_info = FAT_SPECIFIC (ctx->old_fs);
1N/A FatFragment i;
1N/A FatFragment total;
1N/A
1N/A total = 0;
1N/A
1N/A for (i = 0; i < fs_info->frag_count; i++) {
1N/A if (needs_duplicating (ctx, i))
1N/A total++;
1N/A }
1N/A
1N/A return total;
1N/A}
1N/A
1N/A/* duplicates unreachable file clusters, and all directory clusters
1N/A */
1N/Aint
1N/Afat_duplicate_clusters (FatOpContext* ctx, PedTimer* timer)
1N/A{
1N/A FatFragment total_frags_to_dup;
1N/A
1N/A init_remap (ctx);
1N/A total_frags_to_dup = count_frags_to_dup (ctx);
1N/A
1N/A ped_timer_reset (timer);
1N/A ped_timer_set_state_name (timer, "moving data");
1N/A
1N/A ctx->buffer_offset = 0;
1N/A ctx->frags_duped = 0;
1N/A while (search_next_fragment (ctx)) {
1N/A ped_timer_update (
1N/A timer, 1.0 * ctx->frags_duped / total_frags_to_dup);
1N/A
1N/A if (!fetch_fragments (ctx))
1N/A return 0;
1N/A if (!write_fragments (ctx))
1N/A return 0;
1N/A ctx->buffer_offset += ctx->buffer_frags;
1N/A }
1N/A
1N/A ped_timer_update (timer, 1.0);
1N/A return 1;
1N/A}
1N/A
1N/A#endif /* !DISCOVER_ONLY */