device.c 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142
  1. /*
  2. * Server-side device support
  3. *
  4. * Copyright (C) 2007 Alexandre Julliard
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
  19. */
  20. #include "config.h"
  21. #include <assert.h>
  22. #include <fcntl.h>
  23. #include <stdio.h>
  24. #include <stdlib.h>
  25. #include <stdarg.h>
  26. #include "ntstatus.h"
  27. #define WIN32_NO_STATUS
  28. #include "windef.h"
  29. #include "winternl.h"
  30. #include "ddk/wdm.h"
  31. #include "wine/rbtree.h"
  32. #include "object.h"
  33. #include "file.h"
  34. #include "handle.h"
  35. #include "request.h"
  36. #include "process.h"
  37. /* IRP object */
  38. struct irp_call
  39. {
  40. struct object obj; /* object header */
  41. struct list dev_entry; /* entry in device queue */
  42. struct list mgr_entry; /* entry in manager queue */
  43. struct device_file *file; /* file containing this irp */
  44. struct thread *thread; /* thread that queued the irp */
  45. struct async *async; /* pending async op */
  46. irp_params_t params; /* irp parameters */
  47. struct iosb *iosb; /* I/O status block */
  48. int canceled; /* the call was canceled */
  49. client_ptr_t user_ptr; /* client side pointer */
  50. };
  51. static void irp_call_dump( struct object *obj, int verbose );
  52. static void irp_call_destroy( struct object *obj );
  53. static const struct object_ops irp_call_ops =
  54. {
  55. sizeof(struct irp_call), /* size */
  56. &no_type, /* type */
  57. irp_call_dump, /* dump */
  58. no_add_queue, /* add_queue */
  59. NULL, /* remove_queue */
  60. NULL, /* signaled */
  61. NULL, /* satisfied */
  62. no_signal, /* signal */
  63. no_get_fd, /* get_fd */
  64. default_map_access, /* map_access */
  65. default_get_sd, /* get_sd */
  66. default_set_sd, /* set_sd */
  67. no_get_full_name, /* get_full_name */
  68. no_lookup_name, /* lookup_name */
  69. no_link_name, /* link_name */
  70. NULL, /* unlink_name */
  71. no_open_file, /* open_file */
  72. no_kernel_obj_list, /* get_kernel_obj_list */
  73. no_close_handle, /* close_handle */
  74. irp_call_destroy /* destroy */
  75. };
  76. /* device manager (a list of devices managed by the same client process) */
  77. struct device_manager
  78. {
  79. struct object obj; /* object header */
  80. struct list devices; /* list of devices */
  81. struct list requests; /* list of pending irps across all devices */
  82. struct irp_call *current_call; /* call currently executed on client side */
  83. struct wine_rb_tree kernel_objects; /* map of objects that have client side pointer associated */
  84. };
  85. static void device_manager_dump( struct object *obj, int verbose );
  86. static int device_manager_signaled( struct object *obj, struct wait_queue_entry *entry );
  87. static void device_manager_destroy( struct object *obj );
  88. static const struct object_ops device_manager_ops =
  89. {
  90. sizeof(struct device_manager), /* size */
  91. &no_type, /* type */
  92. device_manager_dump, /* dump */
  93. add_queue, /* add_queue */
  94. remove_queue, /* remove_queue */
  95. device_manager_signaled, /* signaled */
  96. no_satisfied, /* satisfied */
  97. no_signal, /* signal */
  98. no_get_fd, /* get_fd */
  99. default_map_access, /* map_access */
  100. default_get_sd, /* get_sd */
  101. default_set_sd, /* set_sd */
  102. no_get_full_name, /* get_full_name */
  103. no_lookup_name, /* lookup_name */
  104. no_link_name, /* link_name */
  105. NULL, /* unlink_name */
  106. no_open_file, /* open_file */
  107. no_kernel_obj_list, /* get_kernel_obj_list */
  108. no_close_handle, /* close_handle */
  109. device_manager_destroy /* destroy */
  110. };
  111. /* device (a single device object) */
  112. static const WCHAR device_name[] = {'D','e','v','i','c','e'};
  113. struct type_descr device_type =
  114. {
  115. { device_name, sizeof(device_name) }, /* name */
  116. FILE_ALL_ACCESS, /* valid_access */
  117. { /* mapping */
  118. FILE_GENERIC_READ,
  119. FILE_GENERIC_WRITE,
  120. FILE_GENERIC_EXECUTE,
  121. FILE_ALL_ACCESS
  122. },
  123. };
  124. struct device
  125. {
  126. struct object obj; /* object header */
  127. struct device_manager *manager; /* manager for this device (or NULL if deleted) */
  128. char *unix_path; /* path to unix device if any */
  129. struct list kernel_object; /* list of kernel object pointers */
  130. struct list entry; /* entry in device manager list */
  131. struct list files; /* list of open files */
  132. };
  133. static void device_dump( struct object *obj, int verbose );
  134. static void device_destroy( struct object *obj );
  135. static struct object *device_open_file( struct object *obj, unsigned int access,
  136. unsigned int sharing, unsigned int options );
  137. static struct list *device_get_kernel_obj_list( struct object *obj );
  138. static const struct object_ops device_ops =
  139. {
  140. sizeof(struct device), /* size */
  141. &device_type, /* type */
  142. device_dump, /* dump */
  143. no_add_queue, /* add_queue */
  144. NULL, /* remove_queue */
  145. NULL, /* signaled */
  146. no_satisfied, /* satisfied */
  147. no_signal, /* signal */
  148. no_get_fd, /* get_fd */
  149. default_map_access, /* map_access */
  150. default_get_sd, /* get_sd */
  151. default_set_sd, /* set_sd */
  152. default_get_full_name, /* get_full_name */
  153. no_lookup_name, /* lookup_name */
  154. directory_link_name, /* link_name */
  155. default_unlink_name, /* unlink_name */
  156. device_open_file, /* open_file */
  157. device_get_kernel_obj_list, /* get_kernel_obj_list */
  158. no_close_handle, /* close_handle */
  159. device_destroy /* destroy */
  160. };
  161. /* device file (an open file handle to a device) */
  162. struct device_file
  163. {
  164. struct object obj; /* object header */
  165. struct device *device; /* device for this file */
  166. struct fd *fd; /* file descriptor for irp */
  167. struct list kernel_object; /* list of kernel object pointers */
  168. int closed; /* closed file flag */
  169. struct list entry; /* entry in device list */
  170. struct list requests; /* list of pending irp requests */
  171. };
  172. static void device_file_dump( struct object *obj, int verbose );
  173. static struct fd *device_file_get_fd( struct object *obj );
  174. static WCHAR *device_file_get_full_name( struct object *obj, data_size_t *len );
  175. static struct list *device_file_get_kernel_obj_list( struct object *obj );
  176. static int device_file_close_handle( struct object *obj, struct process *process, obj_handle_t handle );
  177. static void device_file_destroy( struct object *obj );
  178. static enum server_fd_type device_file_get_fd_type( struct fd *fd );
  179. static void device_file_read( struct fd *fd, struct async *async, file_pos_t pos );
  180. static void device_file_write( struct fd *fd, struct async *async, file_pos_t pos );
  181. static void device_file_flush( struct fd *fd, struct async *async );
  182. static void device_file_ioctl( struct fd *fd, ioctl_code_t code, struct async *async );
  183. static void device_file_cancel_async( struct fd *fd, struct async *async );
  184. static void device_file_get_volume_info( struct fd *fd, struct async *async, unsigned int info_class );
  185. static const struct object_ops device_file_ops =
  186. {
  187. sizeof(struct device_file), /* size */
  188. &file_type, /* type */
  189. device_file_dump, /* dump */
  190. add_queue, /* add_queue */
  191. remove_queue, /* remove_queue */
  192. default_fd_signaled, /* signaled */
  193. no_satisfied, /* satisfied */
  194. no_signal, /* signal */
  195. device_file_get_fd, /* get_fd */
  196. default_map_access, /* map_access */
  197. default_get_sd, /* get_sd */
  198. default_set_sd, /* set_sd */
  199. device_file_get_full_name, /* get_full_name */
  200. no_lookup_name, /* lookup_name */
  201. no_link_name, /* link_name */
  202. NULL, /* unlink_name */
  203. no_open_file, /* open_file */
  204. device_file_get_kernel_obj_list, /* get_kernel_obj_list */
  205. device_file_close_handle, /* close_handle */
  206. device_file_destroy /* destroy */
  207. };
  208. static const struct fd_ops device_file_fd_ops =
  209. {
  210. default_fd_get_poll_events, /* get_poll_events */
  211. default_poll_event, /* poll_event */
  212. device_file_get_fd_type, /* get_fd_type */
  213. device_file_read, /* read */
  214. device_file_write, /* write */
  215. device_file_flush, /* flush */
  216. default_fd_get_file_info, /* get_file_info */
  217. device_file_get_volume_info, /* get_volume_info */
  218. device_file_ioctl, /* ioctl */
  219. device_file_cancel_async, /* cancel_async */
  220. default_fd_queue_async, /* queue_async */
  221. default_fd_reselect_async, /* reselect_async */
  222. };
  223. struct list *no_kernel_obj_list( struct object *obj )
  224. {
  225. return NULL;
  226. }
  227. struct kernel_object
  228. {
  229. struct device_manager *manager;
  230. client_ptr_t user_ptr;
  231. struct object *object;
  232. int owned;
  233. struct list list_entry;
  234. struct wine_rb_entry rb_entry;
  235. };
  236. static int compare_kernel_object( const void *k, const struct wine_rb_entry *entry )
  237. {
  238. struct kernel_object *ptr = WINE_RB_ENTRY_VALUE( entry, struct kernel_object, rb_entry );
  239. return memcmp( k, &ptr->user_ptr, sizeof(client_ptr_t) );
  240. }
  241. static struct kernel_object *kernel_object_from_obj( struct device_manager *manager, struct object *obj )
  242. {
  243. struct kernel_object *kernel_object;
  244. struct list *list;
  245. if (!(list = obj->ops->get_kernel_obj_list( obj ))) return NULL;
  246. LIST_FOR_EACH_ENTRY( kernel_object, list, struct kernel_object, list_entry )
  247. {
  248. if (kernel_object->manager != manager) continue;
  249. return kernel_object;
  250. }
  251. return NULL;
  252. }
  253. static client_ptr_t get_kernel_object_ptr( struct device_manager *manager, struct object *obj )
  254. {
  255. struct kernel_object *kernel_object = kernel_object_from_obj( manager, obj );
  256. return kernel_object ? kernel_object->user_ptr : 0;
  257. }
  258. static struct kernel_object *set_kernel_object( struct device_manager *manager, struct object *obj, client_ptr_t user_ptr )
  259. {
  260. struct kernel_object *kernel_object;
  261. struct list *list;
  262. if (!(list = obj->ops->get_kernel_obj_list( obj ))) return NULL;
  263. if (!(kernel_object = malloc( sizeof(*kernel_object) ))) return NULL;
  264. kernel_object->manager = manager;
  265. kernel_object->user_ptr = user_ptr;
  266. kernel_object->object = obj;
  267. kernel_object->owned = 0;
  268. if (wine_rb_put( &manager->kernel_objects, &user_ptr, &kernel_object->rb_entry ))
  269. {
  270. /* kernel_object pointer already set */
  271. free( kernel_object );
  272. return NULL;
  273. }
  274. list_add_head( list, &kernel_object->list_entry );
  275. return kernel_object;
  276. }
  277. static struct kernel_object *kernel_object_from_ptr( struct device_manager *manager, client_ptr_t client_ptr )
  278. {
  279. struct wine_rb_entry *entry = wine_rb_get( &manager->kernel_objects, &client_ptr );
  280. return entry ? WINE_RB_ENTRY_VALUE( entry, struct kernel_object, rb_entry ) : NULL;
  281. }
  282. static void grab_kernel_object( struct kernel_object *ptr )
  283. {
  284. if (!ptr->owned)
  285. {
  286. grab_object( ptr->object );
  287. ptr->owned = 1;
  288. }
  289. }
  290. static void irp_call_dump( struct object *obj, int verbose )
  291. {
  292. struct irp_call *irp = (struct irp_call *)obj;
  293. fprintf( stderr, "IRP call file=%p\n", irp->file );
  294. }
  295. static void irp_call_destroy( struct object *obj )
  296. {
  297. struct irp_call *irp = (struct irp_call *)obj;
  298. if (irp->async)
  299. {
  300. async_terminate( irp->async, STATUS_CANCELLED );
  301. release_object( irp->async );
  302. }
  303. if (irp->iosb) release_object( irp->iosb );
  304. if (irp->file) release_object( irp->file );
  305. if (irp->thread) release_object( irp->thread );
  306. }
  307. static struct irp_call *create_irp( struct device_file *file, const irp_params_t *params, struct async *async )
  308. {
  309. struct irp_call *irp;
  310. if (file && !file->device->manager) /* it has been deleted */
  311. {
  312. set_error( STATUS_FILE_DELETED );
  313. return NULL;
  314. }
  315. if ((irp = alloc_object( &irp_call_ops )))
  316. {
  317. irp->file = file ? (struct device_file *)grab_object( file ) : NULL;
  318. irp->thread = NULL;
  319. irp->async = NULL;
  320. irp->params = *params;
  321. irp->iosb = NULL;
  322. irp->canceled = 0;
  323. irp->user_ptr = 0;
  324. if (async) irp->iosb = async_get_iosb( async );
  325. }
  326. return irp;
  327. }
  328. static void set_irp_result( struct irp_call *irp, unsigned int status,
  329. const void *out_data, data_size_t out_size, data_size_t result )
  330. {
  331. struct device_file *file = irp->file;
  332. if (!file) return; /* already finished */
  333. /* remove it from the device queue */
  334. list_remove( &irp->dev_entry );
  335. irp->file = NULL;
  336. if (irp->async)
  337. {
  338. out_size = min( irp->iosb->out_size, out_size );
  339. async_request_complete_alloc( irp->async, status, result, out_size, out_data );
  340. release_object( irp->async );
  341. irp->async = NULL;
  342. }
  343. release_object( irp ); /* no longer on the device queue */
  344. release_object( file );
  345. }
  346. static void device_dump( struct object *obj, int verbose )
  347. {
  348. fputs( "Device\n", stderr );
  349. }
  350. static void device_destroy( struct object *obj )
  351. {
  352. struct device *device = (struct device *)obj;
  353. assert( list_empty( &device->files ));
  354. free( device->unix_path );
  355. if (device->manager) list_remove( &device->entry );
  356. }
  357. static void add_irp_to_queue( struct device_manager *manager, struct irp_call *irp, struct thread *thread )
  358. {
  359. grab_object( irp ); /* grab reference for queued irp */
  360. irp->thread = thread ? (struct thread *)grab_object( thread ) : NULL;
  361. if (irp->file) list_add_tail( &irp->file->requests, &irp->dev_entry );
  362. list_add_tail( &manager->requests, &irp->mgr_entry );
  363. if (list_head( &manager->requests ) == &irp->mgr_entry) wake_up( &manager->obj, 0 ); /* first one */
  364. }
  365. static struct object *device_open_file( struct object *obj, unsigned int access,
  366. unsigned int sharing, unsigned int options )
  367. {
  368. struct device *device = (struct device *)obj;
  369. struct device_file *file;
  370. struct unicode_str nt_name;
  371. if (!(file = alloc_object( &device_file_ops ))) return NULL;
  372. file->device = (struct device *)grab_object( device );
  373. file->closed = 0;
  374. list_init( &file->kernel_object );
  375. list_init( &file->requests );
  376. list_add_tail( &device->files, &file->entry );
  377. if (device->unix_path)
  378. {
  379. mode_t mode = 0666;
  380. access = file->obj.ops->map_access( &file->obj, access );
  381. nt_name.str = device->obj.ops->get_full_name( &device->obj, &nt_name.len );
  382. file->fd = open_fd( NULL, device->unix_path, nt_name, O_NONBLOCK, &mode, access, sharing, options );
  383. if (file->fd) set_fd_user( file->fd, &device_file_fd_ops, &file->obj );
  384. }
  385. else file->fd = alloc_pseudo_fd( &device_file_fd_ops, &file->obj, options );
  386. if (!file->fd)
  387. {
  388. release_object( file );
  389. return NULL;
  390. }
  391. allow_fd_caching( file->fd );
  392. if (device->manager)
  393. {
  394. struct irp_call *irp;
  395. irp_params_t params;
  396. memset( &params, 0, sizeof(params) );
  397. params.create.type = IRP_CALL_CREATE;
  398. params.create.access = access;
  399. params.create.sharing = sharing;
  400. params.create.options = options;
  401. params.create.device = get_kernel_object_ptr( device->manager, &device->obj );
  402. if ((irp = create_irp( file, &params, NULL )))
  403. {
  404. add_irp_to_queue( device->manager, irp, current );
  405. release_object( irp );
  406. }
  407. }
  408. return &file->obj;
  409. }
  410. static struct list *device_get_kernel_obj_list( struct object *obj )
  411. {
  412. struct device *device = (struct device *)obj;
  413. return &device->kernel_object;
  414. }
  415. static void device_file_dump( struct object *obj, int verbose )
  416. {
  417. struct device_file *file = (struct device_file *)obj;
  418. fprintf( stderr, "File on device %p\n", file->device );
  419. }
  420. static struct fd *device_file_get_fd( struct object *obj )
  421. {
  422. struct device_file *file = (struct device_file *)obj;
  423. return (struct fd *)grab_object( file->fd );
  424. }
  425. static WCHAR *device_file_get_full_name( struct object *obj, data_size_t *len )
  426. {
  427. struct device_file *file = (struct device_file *)obj;
  428. return file->device->obj.ops->get_full_name( &file->device->obj, len );
  429. }
  430. static struct list *device_file_get_kernel_obj_list( struct object *obj )
  431. {
  432. struct device_file *file = (struct device_file *)obj;
  433. return &file->kernel_object;
  434. }
  435. static int device_file_close_handle( struct object *obj, struct process *process, obj_handle_t handle )
  436. {
  437. struct device_file *file = (struct device_file *)obj;
  438. if (!file->closed && file->device->manager && obj->handle_count == 1) /* last handle */
  439. {
  440. struct irp_call *irp;
  441. irp_params_t params;
  442. file->closed = 1;
  443. memset( &params, 0, sizeof(params) );
  444. params.close.type = IRP_CALL_CLOSE;
  445. if ((irp = create_irp( file, &params, NULL )))
  446. {
  447. add_irp_to_queue( file->device->manager, irp, current );
  448. release_object( irp );
  449. }
  450. }
  451. return 1;
  452. }
  453. static void device_file_destroy( struct object *obj )
  454. {
  455. struct device_file *file = (struct device_file *)obj;
  456. struct irp_call *irp, *next;
  457. LIST_FOR_EACH_ENTRY_SAFE( irp, next, &file->requests, struct irp_call, dev_entry )
  458. {
  459. list_remove( &irp->dev_entry );
  460. release_object( irp ); /* no longer on the device queue */
  461. }
  462. if (file->fd) release_object( file->fd );
  463. list_remove( &file->entry );
  464. release_object( file->device );
  465. }
  466. static int fill_irp_params( struct device_manager *manager, struct irp_call *irp, irp_params_t *params )
  467. {
  468. switch (irp->params.type)
  469. {
  470. case IRP_CALL_NONE:
  471. case IRP_CALL_FREE:
  472. case IRP_CALL_CANCEL:
  473. break;
  474. case IRP_CALL_CREATE:
  475. irp->params.create.file = alloc_handle( current->process, irp->file,
  476. irp->params.create.access, 0 );
  477. if (!irp->params.create.file) return 0;
  478. break;
  479. case IRP_CALL_CLOSE:
  480. irp->params.close.file = get_kernel_object_ptr( manager, &irp->file->obj );
  481. break;
  482. case IRP_CALL_READ:
  483. irp->params.read.file = get_kernel_object_ptr( manager, &irp->file->obj );
  484. irp->params.read.out_size = irp->iosb->out_size;
  485. break;
  486. case IRP_CALL_WRITE:
  487. irp->params.write.file = get_kernel_object_ptr( manager, &irp->file->obj );
  488. break;
  489. case IRP_CALL_FLUSH:
  490. irp->params.flush.file = get_kernel_object_ptr( manager, &irp->file->obj );
  491. break;
  492. case IRP_CALL_IOCTL:
  493. irp->params.ioctl.file = get_kernel_object_ptr( manager, &irp->file->obj );
  494. irp->params.ioctl.out_size = irp->iosb->out_size;
  495. break;
  496. case IRP_CALL_VOLUME:
  497. irp->params.volume.file = get_kernel_object_ptr( manager, &irp->file->obj );
  498. irp->params.volume.out_size = irp->iosb->out_size;
  499. break;
  500. }
  501. *params = irp->params;
  502. return 1;
  503. }
  504. static void free_irp_params( struct irp_call *irp )
  505. {
  506. switch (irp->params.type)
  507. {
  508. case IRP_CALL_CREATE:
  509. close_handle( current->process, irp->params.create.file );
  510. break;
  511. default:
  512. break;
  513. }
  514. }
  515. /* queue an irp to the device */
  516. static void queue_irp( struct device_file *file, const irp_params_t *params, struct async *async )
  517. {
  518. struct irp_call *irp = create_irp( file, params, async );
  519. if (!irp) return;
  520. fd_queue_async( file->fd, async, ASYNC_TYPE_WAIT );
  521. irp->async = (struct async *)grab_object( async );
  522. add_irp_to_queue( file->device->manager, irp, current );
  523. release_object( irp );
  524. async_set_unknown_status( async );
  525. }
  526. static enum server_fd_type device_file_get_fd_type( struct fd *fd )
  527. {
  528. return FD_TYPE_DEVICE;
  529. }
  530. static void device_file_get_volume_info( struct fd *fd, struct async *async, unsigned int info_class )
  531. {
  532. struct device_file *file = get_fd_user( fd );
  533. irp_params_t params;
  534. memset( &params, 0, sizeof(params) );
  535. params.volume.type = IRP_CALL_VOLUME;
  536. params.volume.info_class = info_class;
  537. queue_irp( file, &params, async );
  538. }
  539. static void device_file_read( struct fd *fd, struct async *async, file_pos_t pos )
  540. {
  541. struct device_file *file = get_fd_user( fd );
  542. irp_params_t params;
  543. memset( &params, 0, sizeof(params) );
  544. params.read.type = IRP_CALL_READ;
  545. params.read.key = 0;
  546. params.read.pos = pos;
  547. queue_irp( file, &params, async );
  548. }
  549. static void device_file_write( struct fd *fd, struct async *async, file_pos_t pos )
  550. {
  551. struct device_file *file = get_fd_user( fd );
  552. irp_params_t params;
  553. memset( &params, 0, sizeof(params) );
  554. params.write.type = IRP_CALL_WRITE;
  555. params.write.key = 0;
  556. params.write.pos = pos;
  557. queue_irp( file, &params, async );
  558. }
  559. static void device_file_flush( struct fd *fd, struct async *async )
  560. {
  561. struct device_file *file = get_fd_user( fd );
  562. irp_params_t params;
  563. memset( &params, 0, sizeof(params) );
  564. params.flush.type = IRP_CALL_FLUSH;
  565. queue_irp( file, &params, async );
  566. }
  567. static void device_file_ioctl( struct fd *fd, ioctl_code_t code, struct async *async )
  568. {
  569. struct device_file *file = get_fd_user( fd );
  570. irp_params_t params;
  571. memset( &params, 0, sizeof(params) );
  572. params.ioctl.type = IRP_CALL_IOCTL;
  573. params.ioctl.code = code;
  574. queue_irp( file, &params, async );
  575. }
  576. static void cancel_irp_call( struct irp_call *irp )
  577. {
  578. struct irp_call *cancel_irp;
  579. irp_params_t params;
  580. irp->canceled = 1;
  581. if (!irp->user_ptr || !irp->file || !irp->file->device->manager) return;
  582. memset( &params, 0, sizeof(params) );
  583. params.cancel.type = IRP_CALL_CANCEL;
  584. params.cancel.irp = irp->user_ptr;
  585. if ((cancel_irp = create_irp( NULL, &params, NULL )))
  586. {
  587. add_irp_to_queue( irp->file->device->manager, cancel_irp, NULL );
  588. release_object( cancel_irp );
  589. }
  590. }
  591. static void device_file_cancel_async( struct fd *fd, struct async *async )
  592. {
  593. struct device_file *file = get_fd_user( fd );
  594. struct irp_call *irp;
  595. LIST_FOR_EACH_ENTRY( irp, &file->requests, struct irp_call, dev_entry )
  596. {
  597. if (irp->async == async)
  598. {
  599. cancel_irp_call( irp );
  600. return;
  601. }
  602. }
  603. }
  604. static struct device *create_device( struct object *root, const struct unicode_str *name,
  605. struct device_manager *manager )
  606. {
  607. struct device *device;
  608. if ((device = create_named_object( root, &device_ops, name, 0, NULL )))
  609. {
  610. device->unix_path = NULL;
  611. device->manager = manager;
  612. grab_object( device );
  613. list_add_tail( &manager->devices, &device->entry );
  614. list_init( &device->kernel_object );
  615. list_init( &device->files );
  616. }
  617. return device;
  618. }
  619. struct object *create_unix_device( struct object *root, const struct unicode_str *name,
  620. unsigned int attr, const struct security_descriptor *sd,
  621. const char *unix_path )
  622. {
  623. struct device *device;
  624. if ((device = create_named_object( root, &device_ops, name, attr, sd )))
  625. {
  626. device->unix_path = strdup( unix_path );
  627. device->manager = NULL; /* no manager, requests go straight to the Unix device */
  628. list_init( &device->kernel_object );
  629. list_init( &device->files );
  630. }
  631. return &device->obj;
  632. }
  633. /* terminate requests when the underlying device is deleted */
  634. static void delete_file( struct device_file *file )
  635. {
  636. struct irp_call *irp, *next;
  637. /* the pending requests may be the only thing holding a reference to the file */
  638. grab_object( file );
  639. /* terminate all pending requests */
  640. LIST_FOR_EACH_ENTRY_SAFE( irp, next, &file->requests, struct irp_call, dev_entry )
  641. {
  642. list_remove( &irp->mgr_entry );
  643. set_irp_result( irp, STATUS_FILE_DELETED, NULL, 0, 0 );
  644. }
  645. release_object( file );
  646. }
  647. static void delete_device( struct device *device )
  648. {
  649. struct device_file *file, *next;
  650. if (!device->manager) return; /* already deleted */
  651. LIST_FOR_EACH_ENTRY_SAFE( file, next, &device->files, struct device_file, entry )
  652. delete_file( file );
  653. unlink_named_object( &device->obj );
  654. list_remove( &device->entry );
  655. device->manager = NULL;
  656. release_object( device );
  657. }
  658. static void device_manager_dump( struct object *obj, int verbose )
  659. {
  660. fprintf( stderr, "Device manager\n" );
  661. }
  662. static int device_manager_signaled( struct object *obj, struct wait_queue_entry *entry )
  663. {
  664. struct device_manager *manager = (struct device_manager *)obj;
  665. return !list_empty( &manager->requests );
  666. }
  667. static void device_manager_destroy( struct object *obj )
  668. {
  669. struct device_manager *manager = (struct device_manager *)obj;
  670. struct kernel_object *kernel_object;
  671. struct list *ptr;
  672. if (manager->current_call)
  673. {
  674. release_object( manager->current_call );
  675. manager->current_call = NULL;
  676. }
  677. while (manager->kernel_objects.root)
  678. {
  679. kernel_object = WINE_RB_ENTRY_VALUE( manager->kernel_objects.root, struct kernel_object, rb_entry );
  680. wine_rb_remove( &manager->kernel_objects, &kernel_object->rb_entry );
  681. list_remove( &kernel_object->list_entry );
  682. if (kernel_object->owned) release_object( kernel_object->object );
  683. free( kernel_object );
  684. }
  685. while ((ptr = list_head( &manager->devices )))
  686. {
  687. struct device *device = LIST_ENTRY( ptr, struct device, entry );
  688. delete_device( device );
  689. }
  690. while ((ptr = list_head( &manager->requests )))
  691. {
  692. struct irp_call *irp = LIST_ENTRY( ptr, struct irp_call, mgr_entry );
  693. list_remove( &irp->mgr_entry );
  694. assert( !irp->file && !irp->async );
  695. release_object( irp );
  696. }
  697. }
  698. static struct device_manager *create_device_manager(void)
  699. {
  700. struct device_manager *manager;
  701. if ((manager = alloc_object( &device_manager_ops )))
  702. {
  703. manager->current_call = NULL;
  704. list_init( &manager->devices );
  705. list_init( &manager->requests );
  706. wine_rb_init( &manager->kernel_objects, compare_kernel_object );
  707. }
  708. return manager;
  709. }
  710. void free_kernel_objects( struct object *obj )
  711. {
  712. struct list *ptr, *list;
  713. if (!(list = obj->ops->get_kernel_obj_list( obj ))) return;
  714. while ((ptr = list_head( list )))
  715. {
  716. struct kernel_object *kernel_object = LIST_ENTRY( ptr, struct kernel_object, list_entry );
  717. struct irp_call *irp;
  718. irp_params_t params;
  719. assert( !kernel_object->owned );
  720. memset( &params, 0, sizeof(params) );
  721. params.free.type = IRP_CALL_FREE;
  722. params.free.obj = kernel_object->user_ptr;
  723. if ((irp = create_irp( NULL, &params, NULL )))
  724. {
  725. add_irp_to_queue( kernel_object->manager, irp, NULL );
  726. release_object( irp );
  727. }
  728. list_remove( &kernel_object->list_entry );
  729. wine_rb_remove( &kernel_object->manager->kernel_objects, &kernel_object->rb_entry );
  730. free( kernel_object );
  731. }
  732. }
  733. /* create a device manager */
  734. DECL_HANDLER(create_device_manager)
  735. {
  736. struct device_manager *manager = create_device_manager();
  737. if (manager)
  738. {
  739. reply->handle = alloc_handle( current->process, manager, req->access, req->attributes );
  740. release_object( manager );
  741. }
  742. }
  743. /* create a device */
  744. DECL_HANDLER(create_device)
  745. {
  746. struct device *device;
  747. struct unicode_str name = get_req_unicode_str();
  748. struct device_manager *manager;
  749. struct object *root = NULL;
  750. if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
  751. 0, &device_manager_ops )))
  752. return;
  753. if (req->rootdir && !(root = get_directory_obj( current->process, req->rootdir )))
  754. {
  755. release_object( manager );
  756. return;
  757. }
  758. if ((device = create_device( root, &name, manager )))
  759. {
  760. struct kernel_object *ptr = set_kernel_object( manager, &device->obj, req->user_ptr );
  761. if (ptr)
  762. grab_kernel_object( ptr );
  763. else
  764. set_error( STATUS_NO_MEMORY );
  765. release_object( device );
  766. }
  767. if (root) release_object( root );
  768. release_object( manager );
  769. }
  770. /* delete a device */
  771. DECL_HANDLER(delete_device)
  772. {
  773. struct device_manager *manager;
  774. struct kernel_object *ref;
  775. struct device *device;
  776. if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
  777. 0, &device_manager_ops )))
  778. return;
  779. if ((ref = kernel_object_from_ptr( manager, req->device )) && ref->object->ops == &device_ops)
  780. {
  781. device = (struct device *)grab_object( ref->object );
  782. delete_device( device );
  783. release_object( device );
  784. }
  785. else set_error( STATUS_INVALID_HANDLE );
  786. release_object( manager );
  787. }
  788. /* retrieve the next pending device irp request */
  789. DECL_HANDLER(get_next_device_request)
  790. {
  791. struct irp_call *irp;
  792. struct device_manager *manager;
  793. struct list *ptr;
  794. struct iosb *iosb;
  795. if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
  796. 0, &device_manager_ops )))
  797. return;
  798. /* process result of previous call */
  799. if (manager->current_call)
  800. {
  801. irp = manager->current_call;
  802. irp->user_ptr = req->user_ptr;
  803. if (irp->async)
  804. {
  805. if (req->pending)
  806. set_async_pending( irp->async );
  807. async_set_initial_status( irp->async, req->status );
  808. if (req->prev)
  809. {
  810. set_irp_result( irp, req->iosb_status, get_req_data(), get_req_data_size(), req->result );
  811. close_handle( current->process, req->prev ); /* avoid an extra round-trip for close */
  812. }
  813. else
  814. {
  815. async_wake_obj( irp->async );
  816. if (irp->canceled)
  817. {
  818. /* if it was canceled during dispatch, we couldn't queue cancel
  819. * call without client pointer, so we need to do it now */
  820. cancel_irp_call( irp );
  821. }
  822. }
  823. }
  824. else
  825. {
  826. set_irp_result( irp, req->status, NULL, 0, 0 );
  827. }
  828. free_irp_params( irp );
  829. release_object( irp );
  830. manager->current_call = NULL;
  831. }
  832. clear_error();
  833. if ((ptr = list_head( &manager->requests )))
  834. {
  835. struct thread *thread;
  836. irp = LIST_ENTRY( ptr, struct irp_call, mgr_entry );
  837. thread = irp->thread ? irp->thread : current;
  838. reply->client_thread = get_kernel_object_ptr( manager, &thread->obj );
  839. reply->client_tid = get_thread_id( thread );
  840. iosb = irp->iosb;
  841. if (iosb)
  842. reply->in_size = iosb->in_size;
  843. if (iosb && iosb->in_size > get_reply_max_size())
  844. set_error( STATUS_BUFFER_OVERFLOW );
  845. else if (!irp->file || (reply->next = alloc_handle( current->process, irp, 0, 0 )))
  846. {
  847. if (fill_irp_params( manager, irp, &reply->params ))
  848. {
  849. if (iosb)
  850. {
  851. set_reply_data_ptr( iosb->in_data, iosb->in_size );
  852. iosb->in_data = NULL;
  853. iosb->in_size = 0;
  854. }
  855. list_remove( &irp->mgr_entry );
  856. list_init( &irp->mgr_entry );
  857. /* we already own the object if it's only on manager queue */
  858. if (irp->file) grab_object( irp );
  859. manager->current_call = irp;
  860. }
  861. else close_handle( current->process, reply->next );
  862. }
  863. }
  864. else set_error( STATUS_PENDING );
  865. release_object( manager );
  866. }
  867. /* store results of an async irp */
  868. DECL_HANDLER(set_irp_result)
  869. {
  870. struct irp_call *irp;
  871. if ((irp = (struct irp_call *)get_handle_obj( current->process, req->handle, 0, &irp_call_ops )))
  872. {
  873. set_irp_result( irp, req->status, get_req_data(), get_req_data_size(), req->size );
  874. close_handle( current->process, req->handle ); /* avoid an extra round-trip for close */
  875. release_object( irp );
  876. }
  877. }
  878. /* get kernel pointer from server object */
  879. DECL_HANDLER(get_kernel_object_ptr)
  880. {
  881. struct device_manager *manager;
  882. struct object *object = NULL;
  883. if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
  884. 0, &device_manager_ops )))
  885. return;
  886. if ((object = get_handle_obj( current->process, req->handle, 0, NULL )))
  887. {
  888. reply->user_ptr = get_kernel_object_ptr( manager, object );
  889. release_object( object );
  890. }
  891. release_object( manager );
  892. }
  893. /* associate kernel pointer with server object */
  894. DECL_HANDLER(set_kernel_object_ptr)
  895. {
  896. struct device_manager *manager;
  897. struct object *object = NULL;
  898. if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
  899. 0, &device_manager_ops )))
  900. return;
  901. if (!(object = get_handle_obj( current->process, req->handle, 0, NULL )))
  902. {
  903. release_object( manager );
  904. return;
  905. }
  906. if (!set_kernel_object( manager, object, req->user_ptr ))
  907. set_error( STATUS_INVALID_HANDLE );
  908. release_object( object );
  909. release_object( manager );
  910. }
  911. /* grab server object reference from kernel object pointer */
  912. DECL_HANDLER(grab_kernel_object)
  913. {
  914. struct device_manager *manager;
  915. struct kernel_object *ref;
  916. if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
  917. 0, &device_manager_ops )))
  918. return;
  919. if ((ref = kernel_object_from_ptr( manager, req->user_ptr )) && !ref->owned)
  920. grab_kernel_object( ref );
  921. else
  922. set_error( STATUS_INVALID_HANDLE );
  923. release_object( manager );
  924. }
  925. /* release server object reference from kernel object pointer */
  926. DECL_HANDLER(release_kernel_object)
  927. {
  928. struct device_manager *manager;
  929. struct kernel_object *ref;
  930. if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
  931. 0, &device_manager_ops )))
  932. return;
  933. if ((ref = kernel_object_from_ptr( manager, req->user_ptr )) && ref->owned)
  934. {
  935. ref->owned = 0;
  936. release_object( ref->object );
  937. }
  938. else set_error( STATUS_INVALID_HANDLE );
  939. release_object( manager );
  940. }
  941. /* get handle from kernel object pointer */
  942. DECL_HANDLER(get_kernel_object_handle)
  943. {
  944. struct device_manager *manager;
  945. struct kernel_object *ref;
  946. if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
  947. 0, &device_manager_ops )))
  948. return;
  949. if ((ref = kernel_object_from_ptr( manager, req->user_ptr )))
  950. reply->handle = alloc_handle( current->process, ref->object, req->access, 0 );
  951. else
  952. set_error( STATUS_INVALID_HANDLE );
  953. release_object( manager );
  954. }