Merge pull request #207 from jalessio/fix_a_few_spelling_issues

Fixed a few small spelling issues.
This commit is contained in:
Takeshi Nakatani 2015-07-12 01:02:53 +09:00
commit 07a5a36b6a
6 changed files with 12 additions and 12 deletions

View File

@ -138,7 +138,7 @@ It is necessary to set this value depending on a CPU and a network band.
This option is lated to fd_page_size option and affects it.
.TP
\fB\-o\fR fd_page_size(default="52428800"(50MB))
number of internal management page size for each file discriptor.
number of internal management page size for each file descriptor.
For delayed reading and writing by s3fs, s3fs manages pages which is separated from object. Each pages has a status that data is already loaded(or not loaded yet).
This option should not be changed when you don't have a trouble with performance.
This value is changed automatically by parallel_count and multipart_size values(fd_page_size value = parallel_count * multipart_size).

View File

@ -1053,14 +1053,14 @@ int S3fsCurl::ParallelMultipartUploadRequest(const char* tpath, headers_t& meta,
// duplicate fd
if(-1 == (fd2 = dup(fd)) || 0 != lseek(fd2, 0, SEEK_SET)){
DPRN("Cloud not duplicate file discriptor(errno=%d)", errno);
DPRN("Could not duplicate file descriptor(errno=%d)", errno);
if(-1 != fd2){
close(fd2);
}
return -errno;
}
if(-1 == fstat(fd2, &st)){
DPRN("Invalid file discriptor(errno=%d)", errno);
DPRN("Invalid file descriptor(errno=%d)", errno);
close(fd2);
return -errno;
}
@ -3192,14 +3192,14 @@ int S3fsCurl::MultipartUploadRequest(const char* tpath, headers_t& meta, int fd,
// duplicate fd
if(-1 == (fd2 = dup(fd)) || 0 != lseek(fd2, 0, SEEK_SET)){
DPRN("Cloud not duplicate file discriptor(errno=%d)", errno);
DPRN("Could not duplicate file descriptor(errno=%d)", errno);
if(-1 != fd2){
close(fd2);
}
return -errno;
}
if(-1 == fstat(fd2, &st)){
DPRN("Invalid file discriptor(errno=%d)", errno);
DPRN("Invalid file descriptor(errno=%d)", errno);
close(fd2);
return -errno;
}

View File

@ -65,7 +65,7 @@ struct filepart
{
bool uploaded; // does finish uploading
std::string etag; // expected etag value
int fd; // base file(temporary full file) discriptor
int fd; // base file(temporary full file) descriptor
off_t startpos; // seek fd point for uploading
ssize_t size; // uploading size
etaglist_t* etaglist; // use only parallel upload

View File

@ -1001,10 +1001,10 @@ ssize_t FdEntity::Write(const char* bytes, off_t start, size_t size)
// NOCACHE_PATH_PREFIX symbol needs for not using cache mode.
// Now s3fs I/F functions in s3fs.cpp has left the processing
// to FdManager and FdEntity class. FdManager class manages
// the list of local file stat and file discriptor in conjunction
// the list of local file stat and file descriptor in conjunction
// with the FdEntity class.
// When s3fs is not using local cache, it means FdManager must
// return new temporary file discriptor at each opening it.
// return new temporary file descriptor at each opening it.
// Then FdManager caches fd by key which is dummy file path
// instead of real file path.
// This process may not be complete, but it is easy way can
@ -1176,7 +1176,7 @@ FdEntity* FdManager::GetFdEntity(const char* path, int existfd)
if(0 == strcmp((*iter).second->GetPath(), path)){
return (*iter).second;
}
// found fd, but it is used another file(file discriptor is recycled)
// found fd, but it is used another file(file descriptor is recycled)
// so returns NULL.
break;
}
@ -1259,7 +1259,7 @@ FdEntity* FdManager::ExistOpen(const char* path, int existfd)
return NULL;
}
}else{
// found fd, but it is used another file(file discriptor is recycled)
// found fd, but it is used another file(file descriptor is recycled)
// so returns NULL.
}
break;

View File

@ -102,7 +102,7 @@ class FdEntity
int refcnt; // reference count
std::string path; // object path
std::string cachepath; // local cache file path
int fd; // file discriptor(tmp file or cache file)
int fd; // file descriptor(tmp file or cache file)
FILE* file; // file pointer(tmp file or cache file)
bool is_modify; // if file is changed, this flag is true

View File

@ -1808,7 +1808,7 @@ static int s3fs_utimens_nocopy(const char* path, const struct timespec ts[2])
FPRNN("[path=%s][mtime=%s]", path, str(ts[1].tv_sec).c_str());
if(0 == strcmp(path, "/")){
DPRNNN("Could not change mtime for maount point.");
DPRNNN("Could not change mtime for mount point.");
return -EIO;
}
if(0 != (result = check_parent_object_access(path, X_OK))){