We have a function that reads a file, which as parameters needs the filename and a buffer.
Now we have a problem, the buffer size. The file can be larger or smaller than the buffer, we can't just create a buffer with the size of the file because can happen that the file is a big one (2GB for example). It will use almost all the RAM. Obiously, that is a very very terrible design.
So what I have to do? How I implement a proper buffer for file reading, independently of the file size?
This function should read a file (not working), with a buffer as parameter.
Code: Select all
int fat32_read_file(uint8_t* buffer, struct file fp)
{
if (!hd_exists())
return 1;
hd_read(start_of_root, FAT32_FILES_PER_DIRECTORY * sizeof(struct DirectoryEntry), (uint8_t*)&drce[0]);
uint8_t* buff = 0;
uint8_t* fatbuff = 0;
if (strcmp((char*)drce[fp.file_no].file_name, (char*)fp.file_name) == 0) {
uint8_t fcluster = ((uint32_t)drce[fp.file_no].cluster_number_hi) << 16 | ((uint32_t)drce[fp.file_no].cluster_number_lo);
int32_t ncluster = fcluster;
int32_t file_size = fp.file_size;
/* 1 sector file (less than 512 bytes) */
if (file_size < 512) {
hd_read(fcluster, 512, buff);
buff[file_size] = '\0';
//kputs("%s", (char*)buff);
}
while (file_size > 0) {
uint32_t fsect = start_of_data + bpb.sectors_per_cluster * (ncluster - 2);
uint32_t sector_offset = 0;
for (; file_size > 0; file_size -= 512) {
hd_read(fsect + sector_offset, 512, buff);
buff[file_size > 512 ? 512 : file_size] = '\0';
//kputs("%s", (char*)buff);
memcpy(buffer, buff, 512);
if (++sector_offset > bpb.sectors_per_cluster)
break;
}
uint32_t fsectcurrentcl = ncluster / (512 / sizeof(uint32_t));
hd_read(fat_start + fsectcurrentcl, 512, fatbuff);
uint32_t foffsectcurrentcl = ncluster % (512 / sizeof (uint32_t));
ncluster = ((uint32_t*)&fatbuff)[foffsectcurrentcl] & 0x0FFFFFFF;
}
return 0;
}
return 1;
}
Code: Select all
struct file fat32_open_file(uint8_t* filename)
{
if (!hd_exists() && !filename)
return;
struct file fp;
hd_read(start_of_root, FAT32_FILES_PER_DIRECTORY * sizeof(struct DirectoryEntry), (uint8_t*)&drce[0]);
for (int i = 0; i < FAT32_FILES_PER_DIRECTORY; ++i) {
if (drce[i].file_name[0] == FAT32_NO_FILES)
break;
/* LFN */
if (drce[i].attributes & FAT32_LFN)
continue;
/* folder */
if (drce[i].attributes & FAT32_DIRECTORY)
continue;
/* If the first byte of file_name is 0xE5, means that the file is deleted */
if (drce[i].file_name[0] == FAT32_DELETED_FILE)
continue;
uint8_t fil[12];
fat2human(drce[i].file_name, fil);
trimName(fil, 11);
if (strcmp((char*)fil, (char*)filename) == 0) {
strcpy((char*)fp.file_name, (char*)filename);
fp.file_size = drce[i].file_size;
fp.file_no = i;
return fp;
}
}
kputs("\nFile %s not found\n", filename);
strcpy((char*)fp.file_name, (char*)filename);
fp.file_size = 0;
fp.file_no = 0;
return fp;
}
Code: Select all
struct file
{
uint8_t file_name[11]; /* let's assume fat32 SFN for now */
uint32_t file_size;
uint32_t file_no; /* Number of the file in the directory (starting from 0) */
uint32_t file_pos;
};
Code: Select all
uint8_t a[2048]; /* Main problem here, the buffer is limited to 2048 bytes! */
fat32_read_file(a, fp);
kputs("%s", (char*)a);