linux_compat.c 56.3 KB
Newer Older
1
2
3
4
/*-
 * Copyright (c) 2010 Isilon Systems, Inc.
 * Copyright (c) 2010 iX Systems, Inc.
 * Copyright (c) 2010 Panasas, Inc.
5
 * Copyright (c) 2013-2018 Mellanox Technologies, Ltd.
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice unmodified, this list of conditions, and the following
 *    disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */

30
31
32
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");

33
34
#include "opt_stack.h"

35
36
37
38
39
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/kernel.h>
#include <sys/sysctl.h>
40
#include <sys/proc.h>
41
#include <sys/sglist.h>
42
#include <sys/sleepqueue.h>
43
#include <sys/refcount.h>
44
45
46
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/bus.h>
47
#include <sys/eventhandler.h>
48
49
50
#include <sys/fcntl.h>
#include <sys/file.h>
#include <sys/filio.h>
51
#include <sys/rwlock.h>
52
#include <sys/mman.h>
53
#include <sys/stack.h>
54
#include <sys/user.h>
55
56
57

#include <vm/vm.h>
#include <vm/pmap.h>
58
59
60
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_pager.h>
61
62
63

#include <machine/stdarg.h>

64
65
66
67
#if defined(__i386__) || defined(__amd64__)
#include <machine/md_var.h>
#endif

68
69
70
71
#include <linux/kobject.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/module.h>
72
#include <linux/moduleparam.h>
73
74
75
76
77
78
#include <linux/cdev.h>
#include <linux/file.h>
#include <linux/sysfs.h>
#include <linux/mm.h>
#include <linux/io.h>
#include <linux/vmalloc.h>
79
#include <linux/netdevice.h>
80
#include <linux/timer.h>
Hans Petter Selasky's avatar
Hans Petter Selasky committed
81
#include <linux/interrupt.h>
82
#include <linux/uaccess.h>
83
#include <linux/list.h>
84
85
#include <linux/kthread.h>
#include <linux/kernel.h>
86
#include <linux/compat.h>
87
#include <linux/poll.h>
88
#include <linux/smp.h>
89
#include <linux/wait_bit.h>
90

91
92
93
#if defined(__i386__) || defined(__amd64__)
#include <asm/smp.h>
#endif
94

95
96
SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
    "LinuxKPI parameters");
97

98
99
100
101
int linuxkpi_debug;
SYSCTL_INT(_compat_linuxkpi, OID_AUTO, debug, CTLFLAG_RWTUN,
    &linuxkpi_debug, 0, "Set to enable pr_debug() prints. Clear to disable.");

102
103
104
105
106
107
108
109
110
MALLOC_DEFINE(M_KMALLOC, "linux", "Linux kmalloc compat");

#include <linux/rbtree.h>
/* Undo Linux compat changes. */
#undef RB_ROOT
#undef file
#undef cdev
#define	RB_ROOT(head)	(head)->rbh_root

111
static void linux_cdev_deref(struct linux_cdev *ldev);
112
113
static struct vm_area_struct *linux_cdev_handle_find(void *handle);

114
115
116
struct kobject linux_class_root;
struct device linux_root_device;
struct class linux_class_misc;
117
118
119
120
struct list_head pci_drivers;
struct list_head pci_devices;
spinlock_t pci_lock;

121
122
unsigned long linux_timer_hz_mask;

123
124
125
wait_queue_head_t linux_bit_waitq;
wait_queue_head_t linux_var_waitq;

126
127
128
129
130
131
132
int
panic_cmp(struct rb_node *one, struct rb_node *two)
{
	panic("no cmp");
}

RB_GENERATE(linux_root, rb_node, __entry, panic_cmp);
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176

int
kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args)
{
	va_list tmp_va;
	int len;
	char *old;
	char *name;
	char dummy;

	old = kobj->name;

	if (old && fmt == NULL)
		return (0);

	/* compute length of string */
	va_copy(tmp_va, args);
	len = vsnprintf(&dummy, 0, fmt, tmp_va);
	va_end(tmp_va);

	/* account for zero termination */
	len++;

	/* check for error */
	if (len < 1)
		return (-EINVAL);

	/* allocate memory for string */
	name = kzalloc(len, GFP_KERNEL);
	if (name == NULL)
		return (-ENOMEM);
	vsnprintf(name, len, fmt, args);
	kobj->name = name;

	/* free old string */
	kfree(old);

	/* filter new string */
	for (; *name != '\0'; name++)
		if (*name == '/')
			*name = '!';
	return (0);
}

177
178
179
180
181
182
183
184
185
186
187
188
189
int
kobject_set_name(struct kobject *kobj, const char *fmt, ...)
{
	va_list args;
	int error;

	va_start(args, fmt);
	error = kobject_set_name_vargs(kobj, fmt, args);
	va_end(args);

	return (error);
}

190
static int
191
192
kobject_add_complete(struct kobject *kobj, struct kobject *parent)
{
193
	const struct kobj_type *t;
194
195
	int error;

196
	kobj->parent = parent;
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
	error = sysfs_create_dir(kobj);
	if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) {
		struct attribute **attr;
		t = kobj->ktype;

		for (attr = t->default_attrs; *attr != NULL; attr++) {
			error = sysfs_create_file(kobj, *attr);
			if (error)
				break;
		}
		if (error)
			sysfs_remove_dir(kobj);
	}
	return (error);
}

int
kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...)
{
	va_list args;
	int error;

	va_start(args, fmt);
	error = kobject_set_name_vargs(kobj, fmt, args);
	va_end(args);
	if (error)
		return (error);

	return kobject_add_complete(kobj, parent);
}

void
229
linux_kobject_release(struct kref *kref)
230
231
232
233
234
235
236
237
238
239
240
241
242
{
	struct kobject *kobj;
	char *name;

	kobj = container_of(kref, struct kobject, kref);
	sysfs_remove_dir(kobj);
	name = kobj->name;
	if (kobj->ktype && kobj->ktype->release)
		kobj->ktype->release(kobj);
	kfree(name);
}

static void
243
linux_kobject_kfree(struct kobject *kobj)
244
245
246
247
{
	kfree(kobj);
}

248
static void
249
linux_kobject_kfree_name(struct kobject *kobj)
250
251
252
253
254
255
{
	if (kobj) {
		kfree(kobj->name);
	}
}

256
257
258
const struct kobj_type linux_kfree_type = {
	.release = linux_kobject_kfree
};
259

260
static void
261
linux_device_release(struct device *dev)
262
{
263
	pr_debug("linux_device_release: %s\n", dev_name(dev));
264
265
266
	kfree(dev);
}

267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
static ssize_t
linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf)
{
	struct class_attribute *dattr;
	ssize_t error;

	dattr = container_of(attr, struct class_attribute, attr);
	error = -EIO;
	if (dattr->show)
		error = dattr->show(container_of(kobj, struct class, kobj),
		    dattr, buf);
	return (error);
}

static ssize_t
linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf,
    size_t count)
{
	struct class_attribute *dattr;
	ssize_t error;

	dattr = container_of(attr, struct class_attribute, attr);
	error = -EIO;
	if (dattr->store)
		error = dattr->store(container_of(kobj, struct class, kobj),
		    dattr, buf, count);
	return (error);
}

static void
linux_class_release(struct kobject *kobj)
{
	struct class *class;

	class = container_of(kobj, struct class, kobj);
	if (class->class_release)
		class->class_release(class);
}

static const struct sysfs_ops linux_class_sysfs = {
	.show  = linux_class_show,
	.store = linux_class_store,
};

const struct kobj_type linux_class_ktype = {
	.release = linux_class_release,
	.sysfs_ops = &linux_class_sysfs
};

static void
linux_dev_release(struct kobject *kobj)
{
	struct device *dev;

	dev = container_of(kobj, struct device, kobj);
	/* This is the precedence defined by linux. */
	if (dev->release)
		dev->release(dev);
	else if (dev->class && dev->class->dev_release)
		dev->class->dev_release(dev);
}

static ssize_t
linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf)
{
	struct device_attribute *dattr;
	ssize_t error;

	dattr = container_of(attr, struct device_attribute, attr);
	error = -EIO;
	if (dattr->show)
		error = dattr->show(container_of(kobj, struct device, kobj),
		    dattr, buf);
	return (error);
}

static ssize_t
linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf,
    size_t count)
{
	struct device_attribute *dattr;
	ssize_t error;

	dattr = container_of(attr, struct device_attribute, attr);
	error = -EIO;
	if (dattr->store)
		error = dattr->store(container_of(kobj, struct device, kobj),
		    dattr, buf, count);
	return (error);
}

static const struct sysfs_ops linux_dev_sysfs = {
	.show  = linux_dev_show,
	.store = linux_dev_store,
};

const struct kobj_type linux_dev_ktype = {
	.release = linux_dev_release,
	.sysfs_ops = &linux_dev_sysfs
};

368
369
370
371
372
373
374
375
376
377
378
379
struct device *
device_create(struct class *class, struct device *parent, dev_t devt,
    void *drvdata, const char *fmt, ...)
{
	struct device *dev;
	va_list args;

	dev = kzalloc(sizeof(*dev), M_WAITOK);
	dev->parent = parent;
	dev->class = class;
	dev->devt = devt;
	dev->driver_data = drvdata;
380
	dev->release = linux_device_release;
381
382
383
384
385
386
387
388
389
	va_start(args, fmt);
	kobject_set_name_vargs(&dev->kobj, fmt, args);
	va_end(args);
	device_register(dev);

	return (dev);
}

int
390
kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype,
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
    struct kobject *parent, const char *fmt, ...)
{
	va_list args;
	int error;

	kobject_init(kobj, ktype);
	kobj->ktype = ktype;
	kobj->parent = parent;
	kobj->name = NULL;

	va_start(args, fmt);
	error = kobject_set_name_vargs(kobj, fmt, args);
	va_end(args);
	if (error)
		return (error);
	return kobject_add_complete(kobj, parent);
}

409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
static void
linux_kq_lock(void *arg)
{
	spinlock_t *s = arg;

	spin_lock(s);
}
static void
linux_kq_unlock(void *arg)
{
	spinlock_t *s = arg;

	spin_unlock(s);
}

static void
425
linux_kq_assert_lock(void *arg, int what)
426
427
428
429
{
#ifdef INVARIANTS
	spinlock_t *s = arg;

430
431
432
433
	if (what == LA_LOCKED)
		mtx_assert(&s->m, MA_OWNED);
	else
		mtx_assert(&s->m, MA_NOTOWNED);
434
435
436
437
#endif
}

static void
438
linux_file_kqfilter_poll(struct linux_file *, int);
439
440
441
442
443
444
445
446
447
448
449
450
451
452

struct linux_file *
linux_file_alloc(void)
{
	struct linux_file *filp;

	filp = kzalloc(sizeof(*filp), GFP_KERNEL);

	/* set initial refcount */
	filp->f_count = 1;

	/* setup fields needed by kqueue support */
	spin_lock_init(&filp->f_kqlock);
	knlist_init(&filp->f_selinfo.si_note, &filp->f_kqlock,
453
	    linux_kq_lock, linux_kq_unlock, linux_kq_assert_lock);
454
455
456
457

	return (filp);
}

458
459
460
461
void
linux_file_free(struct linux_file *filp)
{
	if (filp->_file == NULL) {
462
463
		if (filp->f_shmem != NULL)
			vm_object_deallocate(filp->f_shmem);
464
465
466
467
468
469
470
471
472
473
		kfree(filp);
	} else {
		/*
		 * The close method of the character device or file
		 * will free the linux_file structure:
		 */
		_fdrop(filp->_file, curthread);
	}
}

474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
static int
linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot,
    vm_page_t *mres)
{
	struct vm_area_struct *vmap;

	vmap = linux_cdev_handle_find(vm_obj->handle);

	MPASS(vmap != NULL);
	MPASS(vmap->vm_private_data == vm_obj->handle);

	if (likely(vmap->vm_ops != NULL && offset < vmap->vm_len)) {
		vm_paddr_t paddr = IDX_TO_OFF(vmap->vm_pfn) + offset;
		vm_page_t page;

		if (((*mres)->flags & PG_FICTITIOUS) != 0) {
			/*
			 * If the passed in result page is a fake
			 * page, update it with the new physical
			 * address.
			 */
			page = *mres;
			vm_page_updatefake(page, paddr, vm_obj->memattr);
		} else {
			/*
			 * Replace the passed in "mres" page with our
			 * own fake page and free up the all of the
			 * original pages.
			 */
			VM_OBJECT_WUNLOCK(vm_obj);
			page = vm_page_getfake(paddr, vm_obj->memattr);
			VM_OBJECT_WLOCK(vm_obj);

507
			vm_page_replace(page, vm_obj, (*mres)->pindex, *mres);
508
509
			*mres = page;
		}
510
		vm_page_valid(page);
511
512
513
514
515
		return (VM_PAGER_OK);
	}
	return (VM_PAGER_FAIL);
}

516
517
518
519
520
521
522
523
524
525
526
527
528
529
static int
linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type,
    vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last)
{
	struct vm_area_struct *vmap;
	int err;

	/* get VM area structure */
	vmap = linux_cdev_handle_find(vm_obj->handle);
	MPASS(vmap != NULL);
	MPASS(vmap->vm_private_data == vm_obj->handle);

	VM_OBJECT_WUNLOCK(vm_obj);

530
531
	linux_set_current(curthread);

532
	down_write(&vmap->vm_mm->mmap_sem);
533
	if (unlikely(vmap->vm_ops == NULL)) {
534
535
		err = VM_FAULT_SIGBUS;
	} else {
536
537
538
		struct vm_fault vmf;

		/* fill out VM fault structure */
539
		vmf.virtual_address = (void *)(uintptr_t)IDX_TO_OFF(pidx);
540
541
542
		vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0;
		vmf.pgoff = 0;
		vmf.page = NULL;
543
		vmf.vma = vmap;
544

545
546
547
548
549
550
551
		vmap->vm_pfn_count = 0;
		vmap->vm_pfn_pcount = &vmap->vm_pfn_count;
		vmap->vm_obj = vm_obj;

		err = vmap->vm_ops->fault(vmap, &vmf);

		while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) {
552
			kern_yield(PRI_USER);
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
			err = vmap->vm_ops->fault(vmap, &vmf);
		}
	}

	/* translate return code */
	switch (err) {
	case VM_FAULT_OOM:
		err = VM_PAGER_AGAIN;
		break;
	case VM_FAULT_SIGBUS:
		err = VM_PAGER_BAD;
		break;
	case VM_FAULT_NOPAGE:
		/*
		 * By contract the fault handler will return having
		 * busied all the pages itself. If pidx is already
		 * found in the object, it will simply xbusy the first
		 * page and return with vm_pfn_count set to 1.
		 */
		*first = vmap->vm_pfn_first;
		*last = *first + vmap->vm_pfn_count - 1;
		err = VM_PAGER_OK;
		break;
	default:
		err = VM_PAGER_ERROR;
		break;
	}
	up_write(&vmap->vm_mm->mmap_sem);
	VM_OBJECT_WLOCK(vm_obj);
	return (err);
}

static struct rwlock linux_vma_lock;
static TAILQ_HEAD(, vm_area_struct) linux_vma_head =
    TAILQ_HEAD_INITIALIZER(linux_vma_head);

589
590
591
static void
linux_cdev_handle_free(struct vm_area_struct *vmap)
{
592
593
594
595
	/* Drop reference on vm_file */
	if (vmap->vm_file != NULL)
		fput(vmap->vm_file);

596
597
598
599
600
601
	/* Drop reference on mm_struct */
	mmput(vmap->vm_mm);

	kfree(vmap);
}

602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
static void
linux_cdev_handle_remove(struct vm_area_struct *vmap)
{
	rw_wlock(&linux_vma_lock);
	TAILQ_REMOVE(&linux_vma_head, vmap, vm_entry);
	rw_wunlock(&linux_vma_lock);
}

static struct vm_area_struct *
linux_cdev_handle_find(void *handle)
{
	struct vm_area_struct *vmap;

	rw_rlock(&linux_vma_lock);
	TAILQ_FOREACH(vmap, &linux_vma_head, vm_entry) {
		if (vmap->vm_private_data == handle)
			break;
	}
	rw_runlock(&linux_vma_lock);
	return (vmap);
}

static int
linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
		      vm_ooffset_t foff, struct ucred *cred, u_short *color)
{

629
	MPASS(linux_cdev_handle_find(handle) != NULL);
630
631
632
633
634
635
636
637
638
639
640
641
642
	*color = 0;
	return (0);
}

static void
linux_cdev_pager_dtor(void *handle)
{
	const struct vm_operations_struct *vm_ops;
	struct vm_area_struct *vmap;

	vmap = linux_cdev_handle_find(handle);
	MPASS(vmap != NULL);

643
644
645
646
647
648
	/*
	 * Remove handle before calling close operation to prevent
	 * other threads from reusing the handle pointer.
	 */
	linux_cdev_handle_remove(vmap);

649
650
651
652
653
654
	down_write(&vmap->vm_mm->mmap_sem);
	vm_ops = vmap->vm_ops;
	if (likely(vm_ops != NULL))
		vm_ops->close(vmap);
	up_write(&vmap->vm_mm->mmap_sem);

655
	linux_cdev_handle_free(vmap);
656
657
}

658
659
660
static struct cdev_pager_ops linux_cdev_pager_ops[2] = {
  {
	/* OBJT_MGTDEVICE */
661
662
663
	.cdev_pg_populate	= linux_cdev_pager_populate,
	.cdev_pg_ctor	= linux_cdev_pager_ctor,
	.cdev_pg_dtor	= linux_cdev_pager_dtor
664
665
666
667
668
669
670
  },
  {
	/* OBJT_DEVICE */
	.cdev_pg_fault	= linux_cdev_pager_fault,
	.cdev_pg_ctor	= linux_cdev_pager_ctor,
	.cdev_pg_dtor	= linux_cdev_pager_dtor
  },
671
672
};

673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
int
zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
    unsigned long size)
{
	vm_object_t obj;
	vm_page_t m;

	obj = vma->vm_obj;
	if (obj == NULL || (obj->flags & OBJ_UNMANAGED) != 0)
		return (-ENOTSUP);
	VM_OBJECT_RLOCK(obj);
	for (m = vm_page_find_least(obj, OFF_TO_IDX(address));
	    m != NULL && m->pindex < OFF_TO_IDX(address + size);
	    m = TAILQ_NEXT(m, listq))
		pmap_remove_all(m);
	VM_OBJECT_RUNLOCK(obj);
	return (0);
}

692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
static struct file_operations dummy_ldev_ops = {
	/* XXXKIB */
};

static struct linux_cdev dummy_ldev = {
	.ops = &dummy_ldev_ops,
};

#define	LDEV_SI_DTR	0x0001
#define	LDEV_SI_REF	0x0002

static void
linux_get_fop(struct linux_file *filp, const struct file_operations **fop,
    struct linux_cdev **dev)
{
	struct linux_cdev *ldev;
	u_int siref;

	ldev = filp->f_cdev;
	*fop = filp->f_op;
	if (ldev != NULL) {
		for (siref = ldev->siref;;) {
			if ((siref & LDEV_SI_DTR) != 0) {
				ldev = &dummy_ldev;
				siref = ldev->siref;
				*fop = ldev->ops;
				MPASS((ldev->siref & LDEV_SI_DTR) == 0);
			} else if (atomic_fcmpset_int(&ldev->siref, &siref,
			    siref + LDEV_SI_REF)) {
				break;
			}
		}
	}
	*dev = ldev;
}

static void
linux_drop_fop(struct linux_cdev *ldev)
{

	if (ldev == NULL)
		return;
	MPASS((ldev->siref & ~LDEV_SI_DTR) != 0);
	atomic_subtract_int(&ldev->siref, LDEV_SI_REF);
}

738
739
#define	OPW(fp,td,code) ({			\
	struct file *__fpop;			\
740
	__typeof(code) __retval;		\
741
742
743
744
745
746
747
748
						\
	__fpop = (td)->td_fpop;			\
	(td)->td_fpop = (fp);			\
	__retval = (code);			\
	(td)->td_fpop = __fpop;			\
	__retval;				\
})

749
static int
750
751
linux_dev_fdopen(struct cdev *dev, int fflags, struct thread *td,
    struct file *file)
752
753
754
{
	struct linux_cdev *ldev;
	struct linux_file *filp;
755
	const struct file_operations *fop;
756
757
758
	int error;

	ldev = dev->si_drv1;
759
760

	filp = linux_file_alloc();
761
762
	filp->f_dentry = &filp->f_dentry_store;
	filp->f_op = ldev->ops;
763
	filp->f_mode = file->f_flag;
764
	filp->f_flags = file->f_flag;
765
	filp->f_vnode = file->f_vnode;
766
	filp->_file = file;
767
	refcount_acquire(&ldev->refs);
768
	filp->f_cdev = ldev;
769

770
	linux_set_current(td);
771
	linux_get_fop(filp, &fop, &ldev);
772

773
774
775
776
777
	if (fop->open != NULL) {
		error = -fop->open(file->f_vnode, filp);
		if (error != 0) {
			linux_drop_fop(ldev);
			linux_cdev_deref(filp->f_cdev);
778
			kfree(filp);
779
			return (error);
780
781
782
		}
	}

783
784
	/* hold on to the vnode - used for fstat() */
	vhold(filp->f_vnode);
785

786
787
	/* release the file from devfs */
	finit(file, filp->f_mode, DTYPE_DEV, filp, &linuxfileops);
788
	linux_drop_fop(ldev);
789
	return (ENXIO);
790
791
}

792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
#define	LINUX_IOCTL_MIN_PTR 0x10000UL
#define	LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX)

static inline int
linux_remap_address(void **uaddr, size_t len)
{
	uintptr_t uaddr_val = (uintptr_t)(*uaddr);

	if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR &&
	    uaddr_val < LINUX_IOCTL_MAX_PTR)) {
		struct task_struct *pts = current;
		if (pts == NULL) {
			*uaddr = NULL;
			return (1);
		}

		/* compute data offset */
		uaddr_val -= LINUX_IOCTL_MIN_PTR;

		/* check that length is within bounds */
		if ((len > IOCPARM_MAX) ||
		    (uaddr_val + len) > pts->bsd_ioctl_len) {
			*uaddr = NULL;
			return (1);
		}

		/* re-add kernel buffer address */
		uaddr_val += (uintptr_t)pts->bsd_ioctl_data;

		/* update address location */
		*uaddr = (void *)uaddr_val;
		return (1);
	}
	return (0);
}

int
linux_copyin(const void *uaddr, void *kaddr, size_t len)
{
	if (linux_remap_address(__DECONST(void **, &uaddr), len)) {
		if (uaddr == NULL)
			return (-EFAULT);
		memcpy(kaddr, uaddr, len);
		return (0);
	}
	return (-copyin(uaddr, kaddr, len));
}

int
linux_copyout(const void *kaddr, void *uaddr, size_t len)
{
	if (linux_remap_address(&uaddr, len)) {
		if (uaddr == NULL)
			return (-EFAULT);
		memcpy(uaddr, kaddr, len);
		return (0);
	}
	return (-copyout(kaddr, uaddr, len));
}

852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
size_t
linux_clear_user(void *_uaddr, size_t _len)
{
	uint8_t *uaddr = _uaddr;
	size_t len = _len;

	/* make sure uaddr is aligned before going into the fast loop */
	while (((uintptr_t)uaddr & 7) != 0 && len > 7) {
		if (subyte(uaddr, 0))
			return (_len);
		uaddr++;
		len--;
	}

	/* zero 8 bytes at a time */
	while (len > 7) {
868
#ifdef __LP64__
869
870
		if (suword64(uaddr, 0))
			return (_len);
871
872
873
874
875
876
#else
		if (suword32(uaddr, 0))
			return (_len);
		if (suword32(uaddr + 4, 0))
			return (_len);
#endif
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
		uaddr += 8;
		len -= 8;
	}

	/* zero fill end, if any */
	while (len > 0) {
		if (subyte(uaddr, 0))
			return (_len);
		uaddr++;
		len--;
	}
	return (0);
}

int
892
linux_access_ok(const void *uaddr, size_t len)
893
894
895
896
897
898
899
900
901
902
903
904
905
{
	uintptr_t saddr;
	uintptr_t eaddr;

	/* get start and end address */
	saddr = (uintptr_t)uaddr;
	eaddr = (uintptr_t)uaddr + len;

	/* verify addresses are valid for userspace */
	return ((saddr == eaddr) ||
	    (eaddr > saddr && eaddr <= VM_MAXUSER_ADDRESS));
}

906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
/*
 * This function should return either EINTR or ERESTART depending on
 * the signal type sent to this thread:
 */
static int
linux_get_error(struct task_struct *task, int error)
{
	/* check for signal type interrupt code */
	if (error == EINTR || error == ERESTARTSYS || error == ERESTART) {
		error = -linux_schedule_get_interrupt_value(task);
		if (error == 0)
			error = EINTR;
	}
	return (error);
}

922
static int
923
linux_file_ioctl_sub(struct file *fp, struct linux_file *filp,
924
925
    const struct file_operations *fop, u_long cmd, caddr_t data,
    struct thread *td)
926
{
927
	struct task_struct *task = current;
928
	unsigned size;
929
930
	int error;

931
932
933
934
935
936
937
938
939
	size = IOCPARM_LEN(cmd);
	/* refer to logic in sys_ioctl() */
	if (size > 0) {
		/*
		 * Setup hint for linux_copyin() and linux_copyout().
		 *
		 * Background: Linux code expects a user-space address
		 * while FreeBSD supplies a kernel-space address.
		 */
940
941
		task->bsd_ioctl_data = data;
		task->bsd_ioctl_len = size;
942
943
944
945
946
		data = (void *)LINUX_IOCTL_MIN_PTR;
	} else {
		/* fetch user-space pointer */
		data = *(void **)data;
	}
947
948
949
#if defined(__amd64__)
	if (td->td_proc->p_elf_machine == EM_386) {
		/* try the compat IOCTL handler first */
950
951
952
953
		if (fop->compat_ioctl != NULL) {
			error = -OPW(fp, td, fop->compat_ioctl(filp,
			    cmd, (u_long)data));
		} else {
954
			error = ENOTTY;
955
		}
956
957

		/* fallback to the regular IOCTL handler, if any */
958
959
960
961
		if (error == ENOTTY && fop->unlocked_ioctl != NULL) {
			error = -OPW(fp, td, fop->unlocked_ioctl(filp,
			    cmd, (u_long)data));
		}
962
963
	} else
#endif
964
965
966
967
968
969
970
971
	{
		if (fop->unlocked_ioctl != NULL) {
			error = -OPW(fp, td, fop->unlocked_ioctl(filp,
			    cmd, (u_long)data));
		} else {
			error = ENOTTY;
		}
	}
972
	if (size > 0) {
973
974
		task->bsd_ioctl_data = NULL;
		task->bsd_ioctl_len = 0;
975
	}
976

977
978
	if (error == EWOULDBLOCK) {
		/* update kqfilter status, if any */
979
		linux_file_kqfilter_poll(filp,
980
		    LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE);
981
982
983
	} else {
		error = linux_get_error(task, error);
	}
984
985
986
	return (error);
}

987
988
#define	LINUX_POLL_TABLE_NORMAL ((poll_table *)1)

989
990
991
992
993
994
995
996
997
998
999
1000
/*
 * This function atomically updates the poll wakeup state and returns
 * the previous state at the time of update.
 */
static uint8_t
linux_poll_wakeup_state(atomic_t *v, const uint8_t *pstate)
{
	int c, old;

	c = v->counter;

	while ((old = atomic_cmpxchg(v, c, pstate[c])) != c)
For faster browsing, not all history is shown. View entire blame