kernel_proxy.cc revision 2a99a7e74a7f215066514fe81d2bfa6639d9eddd
1/* Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 * Use of this source code is governed by a BSD-style license that can be
3 * found in the LICENSE file.
4 */
5#include "nacl_io/kernel_proxy.h"
6
7#include <assert.h>
8#include <errno.h>
9#include <fcntl.h>
10#include <pthread.h>
11#include <string.h>
12#include <iterator>
13#include <string>
14
15#include "nacl_io/kernel_handle.h"
16#include "nacl_io/kernel_wrap_real.h"
17#include "nacl_io/mount.h"
18#include "nacl_io/mount_dev.h"
19#include "nacl_io/mount_html5fs.h"
20#include "nacl_io/mount_http.h"
21#include "nacl_io/mount_mem.h"
22#include "nacl_io/mount_node.h"
23#include "nacl_io/mount_passthrough.h"
24#include "nacl_io/osmman.h"
25#include "nacl_io/osstat.h"
26#include "nacl_io/path.h"
27#include "nacl_io/pepper_interface.h"
28#include "utils/auto_lock.h"
29#include "utils/ref_object.h"
30
31#ifndef MAXPATHLEN
32#define MAXPATHLEN 256
33#endif
34
35// TODO(noelallen) : Grab/Redefine these in the kernel object once available.
36#define USR_ID 1002
37#define GRP_ID 1003
38
39
40
41KernelProxy::KernelProxy()
42   : dev_(0),
43     ppapi_(NULL) {
44}
45
46KernelProxy::~KernelProxy() {
47  delete ppapi_;
48}
49
50void KernelProxy::Init(PepperInterface* ppapi) {
51  ppapi_ = ppapi;
52  cwd_ = "/";
53  dev_ = 1;
54
55  factories_["memfs"] = MountMem::Create<MountMem>;
56  factories_["dev"] = MountDev::Create<MountDev>;
57  factories_["html5fs"] = MountHtml5Fs::Create<MountHtml5Fs>;
58  factories_["httpfs"] = MountHttp::Create<MountHttp>;
59  factories_["passthroughfs"] = MountPassthrough::Create<MountPassthrough>;
60
61  // Create passthrough mount at root
62  StringMap_t smap;
63  mounts_["/"] = MountPassthrough::Create<MountPassthrough>(
64      dev_++, smap, ppapi_);
65  mounts_["/dev"] = MountDev::Create<MountDev>(dev_++, smap, ppapi_);
66
67  // Open the first three in order to get STDIN, STDOUT, STDERR
68  open("/dev/stdin", O_RDONLY);
69  open("/dev/stdout", O_WRONLY);
70  open("/dev/stderr", O_WRONLY);
71}
72
73int KernelProxy::open(const char *path, int oflags) {
74  Path rel;
75
76  Mount* mnt = AcquireMountAndPath(path, &rel);
77  if (mnt == NULL) return -1;
78
79  MountNode* node = mnt->Open(rel, oflags);
80  if (node == NULL) {
81    ReleaseMount(mnt);
82    return -1;
83  }
84
85  KernelHandle* handle = new KernelHandle(mnt, node, oflags);
86  int fd = AllocateFD(handle);
87  mnt->AcquireNode(node);
88
89  ReleaseHandle(handle);
90  ReleaseMount(mnt);
91
92  return fd;
93}
94
95int KernelProxy::close(int fd) {
96  KernelHandle* handle = AcquireHandle(fd);
97
98  if (NULL == handle) return -1;
99
100  Mount* mount = handle->mount_;
101  // Acquire the mount to ensure FreeFD doesn't prematurely destroy it.
102  mount->Acquire();
103
104  // FreeFD will release the handle/mount held by this fd.
105  FreeFD(fd);
106
107  // If this handle is the last reference to its node, releasing it will close
108  // the node.
109  ReleaseHandle(handle);
110
111  // Finally, release the mount.
112  mount->Release();
113
114  return 0;
115}
116
117int KernelProxy::dup(int oldfd) {
118  KernelHandle* handle = AcquireHandle(oldfd);
119  if (NULL == handle) return -1;
120
121  int newfd = AllocateFD(handle);
122  ReleaseHandle(handle);
123
124  return newfd;
125}
126
127int KernelProxy::dup2(int oldfd, int newfd) {
128  // If it's the same file handle, just return
129  if (oldfd == newfd) return newfd;
130
131  KernelHandle* old_handle = AcquireHandle(oldfd);
132  if (NULL == old_handle) return -1;
133
134  FreeAndReassignFD(newfd, old_handle);
135  ReleaseHandle(old_handle);
136  return newfd;
137}
138
139
140char* KernelProxy::getcwd(char* buf, size_t size) {
141  AutoLock lock(&process_lock_);
142  if (size <= 0) {
143    errno = EINVAL;
144    return NULL;
145  }
146  // If size is 0, allocate as much as we need.
147  if (size == 0) {
148    size = cwd_.size() + 1;
149  }
150
151  // Verify the buffer is large enough
152  if (size <= cwd_.size()) {
153    errno = ERANGE;
154    return NULL;
155  }
156
157  // Allocate the buffer if needed
158  if (buf == NULL) {
159    buf = static_cast<char*>(malloc(size));
160  }
161
162  strcpy(buf, cwd_.c_str());
163  return buf;
164}
165
166char* KernelProxy::getwd(char* buf) {
167  if (NULL == buf) {
168    errno = EFAULT;
169    return NULL;
170  }
171  return getcwd(buf, MAXPATHLEN);
172}
173
174int KernelProxy::chmod(const char *path, mode_t mode) {
175  int fd = KernelProxy::open(path, O_RDWR);
176  if (-1 == fd) return -1;
177
178  int ret = fchmod(fd, mode);
179  close(fd);
180  return ret;
181}
182
183int KernelProxy::mkdir(const char *path, mode_t mode) {
184  Path rel;
185  Mount* mnt = AcquireMountAndPath(path, &rel);
186  if (mnt == NULL) return -1;
187
188  int val = mnt->Mkdir(rel, mode);
189  ReleaseMount(mnt);
190  return val;
191}
192
193int KernelProxy::rmdir(const char *path) {
194  Path rel;
195  Mount* mnt = AcquireMountAndPath(path, &rel);
196  if (mnt == NULL) return -1;
197
198  int val = mnt->Rmdir(rel);
199  ReleaseMount(mnt);
200  return val;
201}
202
203int KernelProxy::stat(const char *path, struct stat *buf) {
204  int fd = open(path, O_RDONLY);
205  if (-1 == fd) return -1;
206
207  int ret = fstat(fd, buf);
208  close(fd);
209  return ret;
210}
211
212int KernelProxy::chdir(const char* path) {
213  struct stat statbuf;
214  if (stat(path, &statbuf) == -1)
215    return -1;
216
217  bool is_dir = (statbuf.st_mode & S_IFDIR) != 0;
218  if (is_dir) {
219    AutoLock lock(&process_lock_);
220    cwd_ = GetAbsPathLocked(path).Join();
221    return 0;
222  }
223
224  errno = ENOTDIR;
225  return -1;
226}
227
228int KernelProxy::mount(const char *source, const char *target,
229                       const char *filesystemtype, unsigned long mountflags,
230                       const void *data) {
231  // See if it's already mounted
232  std::string abs_targ;
233
234  // Scope this lock to prevent holding both process and kernel locks
235  {
236    AutoLock lock(&process_lock_);
237    abs_targ = GetAbsPathLocked(target).Join();
238  }
239
240  AutoLock lock(&kernel_lock_);
241  if (mounts_.find(abs_targ) != mounts_.end()) {
242    errno = EBUSY;
243    return -1;
244  }
245
246  // Find a factory of that type
247  MountFactoryMap_t::iterator factory = factories_.find(filesystemtype);
248  if (factory == factories_.end()) {
249    errno = ENODEV;
250    return -1;
251  }
252
253  StringMap_t smap;
254  smap["SOURCE"] = source;
255  smap["TARGET"] = abs_targ;
256
257  if (data) {
258    char* str = strdup(static_cast<const char *>(data));
259    char* ptr = strtok(str,",");
260    char* val;
261    while (ptr != NULL) {
262      val = strchr(ptr, '=');
263      if (val) {
264        *val = 0;
265        smap[ptr] = val + 1;
266      } else {
267        smap[ptr] = "TRUE";
268      }
269      ptr = strtok(NULL, ",");
270    }
271    free(str);
272  }
273
274  Mount* mnt = factory->second(dev_++, smap, ppapi_);
275  if (mnt) {
276    mounts_[abs_targ] = mnt;
277    return 0;
278  }
279  errno = EINVAL;
280  return -1;
281}
282
283int KernelProxy::umount(const char *path) {
284  Path abs_path;
285
286  // Scope this lock to prevent holding both process and kernel locks
287  {
288    AutoLock lock(&process_lock_);
289    abs_path = GetAbsPathLocked(path);
290  }
291
292  AutoLock lock(&kernel_lock_);
293  MountMap_t::iterator it = mounts_.find(abs_path.Join());
294
295  if (mounts_.end() == it) {
296    errno = EINVAL;
297    return -1;
298  }
299
300  if (it->second->RefCount() != 1) {
301    errno = EBUSY;
302    return -1;
303  }
304
305  it->second->Release();
306  mounts_.erase(it);
307  return 0;
308}
309
310ssize_t KernelProxy::read(int fd, void *buf, size_t nbytes) {
311  KernelHandle* handle = AcquireHandle(fd);
312
313  // check if fd is valid and handle exists
314  if (NULL == handle) return -1;
315
316  AutoLock lock(&handle->lock_);
317  ssize_t cnt = handle->node_->Read(handle->offs_, buf, nbytes);
318  if (cnt > 0) handle->offs_ += cnt;
319
320  ReleaseHandle(handle);
321  return cnt;
322}
323
324ssize_t KernelProxy::write(int fd, const void *buf, size_t nbytes) {
325  KernelHandle* handle = AcquireHandle(fd);
326
327  // check if fd is valid and handle exists
328  if (NULL == handle) return -1;
329
330  AutoLock lock(&handle->lock_);
331  ssize_t cnt = handle->node_->Write(handle->offs_, buf, nbytes);
332  if (cnt > 0) handle->offs_ += cnt;
333
334  ReleaseHandle(handle);
335  return cnt;
336}
337
338int KernelProxy::fstat(int fd, struct stat* buf) {
339  KernelHandle* handle = AcquireHandle(fd);
340
341  // check if fd is valid and handle exists
342  if (NULL == handle) return -1;
343
344  int ret = handle->node_->GetStat(buf);
345  ReleaseHandle(handle);
346  return ret;
347}
348
349int KernelProxy::getdents(int fd, void* buf, unsigned int count) {
350  KernelHandle* handle = AcquireHandle(fd);
351
352  // check if fd is valid and handle exists
353  if (NULL == handle) return -1;
354
355  AutoLock lock(&handle->lock_);
356  int cnt = handle->node_->GetDents(handle->offs_,
357      static_cast<dirent *>(buf), count);
358
359  if (cnt > 0) handle->offs_ += cnt;
360
361  ReleaseHandle(handle);
362  return cnt;
363}
364
365int KernelProxy::fsync(int fd) {
366  KernelHandle* handle = AcquireHandle(fd);
367
368  // check if fd is valid and handle exists
369  if (NULL == handle) return -1;
370  int ret = handle->node_->FSync();
371
372  ReleaseHandle(handle);
373  return ret;
374}
375
376int KernelProxy::isatty(int fd) {
377  KernelHandle* handle = AcquireHandle(fd);
378
379  // check if fd is valid and handle exists
380  if (NULL == handle) return -1;
381  int ret = handle->node_->IsaTTY();
382
383  ReleaseHandle(handle);
384  return ret;
385}
386
387off_t KernelProxy::lseek(int fd, off_t offset, int whence) {
388  KernelHandle* handle = AcquireHandle(fd);
389
390  // check if fd is valid and handle exists
391  if (NULL == handle) return -1;
392  int ret = handle->Seek(offset, whence);
393
394  ReleaseHandle(handle);
395  return ret;
396}
397
398int KernelProxy::unlink(const char* path) {
399  Path rel;
400  Mount* mnt = AcquireMountAndPath(path, &rel);
401  if (mnt == NULL) return -1;
402
403  int val = mnt->Unlink(rel);
404  ReleaseMount(mnt);
405  return val;
406}
407
408int KernelProxy::remove(const char* path) {
409  Path rel;
410  Mount* mnt = AcquireMountAndPath(path, &rel);
411  if (mnt == NULL) return -1;
412
413  int val = mnt->Remove(rel);
414  ReleaseMount(mnt);
415  return val;
416}
417
418// TODO(noelallen): Needs implementation.
419int KernelProxy::fchmod(int fd, int mode) {
420  errno = EINVAL;
421  return -1;
422}
423
424int KernelProxy::access(const char* path, int amode) {
425  errno = EINVAL;
426  return -1;
427}
428
429int KernelProxy::link(const char* oldpath, const char* newpath) {
430  errno = EINVAL;
431  return -1;
432}
433
434int KernelProxy::symlink(const char* oldpath, const char* newpath) {
435  errno = EINVAL;
436  return -1;
437}
438
439void* KernelProxy::mmap(void* addr, size_t length, int prot, int flags, int fd,
440                        size_t offset) {
441  // We shouldn't be getting anonymous mmaps here.
442  assert((flags & MAP_ANONYMOUS) == 0);
443  assert(fd != -1);
444
445  KernelHandle* handle = AcquireHandle(fd);
446
447  if (NULL == handle)
448    return MAP_FAILED;
449
450  void* new_addr;
451  {
452    AutoLock lock(&handle->lock_);
453    new_addr = handle->node_->MMap(addr, length, prot, flags, offset);
454    if (new_addr == MAP_FAILED) {
455      ReleaseHandle(handle);
456      return MAP_FAILED;
457    }
458  }
459
460  // Don't release the KernelHandle, it is now owned by the MMapInfo.
461  AutoLock lock(&process_lock_);
462  mmap_info_list_.push_back(MMapInfo(new_addr, length, handle));
463
464  return new_addr;
465}
466
467int KernelProxy::munmap(void* addr, size_t length) {
468  if (addr == NULL || length == 0) {
469    errno = EINVAL;
470    return -1;
471  }
472
473  MMapInfoList_t unmap_list;
474  {
475    AutoLock lock(&process_lock_);
476    int mmap_list_end = mmap_info_list_.size();
477    void* addr_end = static_cast<char*>(addr) + length;
478
479    for (int i = 0; i < mmap_list_end;) {
480      const MMapInfo& mmap_info = mmap_info_list_[i];
481      if (addr < static_cast<char*>(mmap_info.addr) + mmap_info.length &&
482          mmap_info.addr < addr_end)
483        // This memory area should be unmapped; swap it with the last entry in
484        // our list.
485        std::swap(mmap_info_list_[i], mmap_info_list_[--mmap_list_end]);
486      else
487        ++i;
488    }
489
490    int num_to_unmap =- mmap_info_list_.size() - mmap_list_end;
491    if (!num_to_unmap) {
492      // From the Linux mmap man page: "It is not an error if the indicated
493      // range does not contain any mapped pages."
494      return 0;
495    }
496
497    std::copy(mmap_info_list_.begin() + mmap_list_end, mmap_info_list_.end(),
498              std::back_inserter(unmap_list));
499
500    mmap_info_list_.resize(mmap_list_end);
501  }
502
503  // Unmap everything past the new end of the list.
504  for (int i = 0; i < unmap_list.size(); ++i) {
505    const MMapInfo& mmap_info = unmap_list[i];
506    KernelHandle* handle = mmap_info.handle;
507    assert(handle != NULL);
508
509    // Ignore the results from individual munmaps.
510    handle->node_->Munmap(mmap_info.addr, mmap_info.length);
511    ReleaseHandle(handle);
512  }
513
514  return 0;
515}
516
517int KernelProxy::open_resource(const char* path) {
518  Path rel;
519
520  Mount* mnt = AcquireMountAndPath(path, &rel);
521  if (mnt == NULL) return -1;
522
523  MountNode* node = mnt->OpenResource(rel);
524  if (node == NULL) {
525    node = mnt->Open(rel, O_RDONLY);
526    if (node == NULL) {
527      ReleaseMount(mnt);
528      return -1;
529    }
530  }
531
532  // OpenResource failed, try Open().
533
534  KernelHandle* handle = new KernelHandle(mnt, node, O_RDONLY);
535  int fd = AllocateFD(handle);
536  mnt->AcquireNode(node);
537
538  ReleaseHandle(handle);
539  ReleaseMount(mnt);
540
541  return fd;
542}
543