sock.c 107 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532
  1. /*
  2. * Server-side socket management
  3. *
  4. * Copyright (C) 1999 Marcus Meissner, Ove Kåven
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
  19. *
  20. * FIXME: we use read|write access in all cases. Shouldn't we depend that
  21. * on the access of the current handle?
  22. */
  23. #include "config.h"
  24. #include <assert.h>
  25. #include <fcntl.h>
  26. #include <stdarg.h>
  27. #include <stdio.h>
  28. #include <string.h>
  29. #include <stdlib.h>
  30. #include <errno.h>
  31. #ifdef HAVE_IFADDRS_H
  32. # include <ifaddrs.h>
  33. #endif
  34. #ifdef HAVE_NET_IF_H
  35. # include <net/if.h>
  36. #endif
  37. #ifdef HAVE_NETINET_IN_H
  38. # include <netinet/in.h>
  39. #endif
  40. #include <poll.h>
  41. #include <sys/time.h>
  42. #include <sys/types.h>
  43. #include <sys/socket.h>
  44. #include <sys/ioctl.h>
  45. #ifdef HAVE_SYS_FILIO_H
  46. # include <sys/filio.h>
  47. #endif
  48. #include <time.h>
  49. #include <unistd.h>
  50. #include <limits.h>
  51. #ifdef HAVE_LINUX_FILTER_H
  52. # include <linux/filter.h>
  53. #endif
  54. #ifdef HAVE_LINUX_RTNETLINK_H
  55. # include <linux/rtnetlink.h>
  56. #endif
  57. #ifdef HAVE_NETIPX_IPX_H
  58. # include <netipx/ipx.h>
  59. #elif defined(HAVE_LINUX_IPX_H)
  60. # ifdef HAVE_ASM_TYPES_H
  61. # include <asm/types.h>
  62. # endif
  63. # ifdef HAVE_LINUX_TYPES_H
  64. # include <linux/types.h>
  65. # endif
  66. # include <linux/ipx.h>
  67. #endif
  68. #if defined(SOL_IPX) || defined(SO_DEFAULT_HEADERS)
  69. # define HAS_IPX
  70. #endif
  71. #ifdef HAVE_LINUX_IRDA_H
  72. # ifdef HAVE_LINUX_TYPES_H
  73. # include <linux/types.h>
  74. # endif
  75. # include <linux/irda.h>
  76. # define HAS_IRDA
  77. #endif
  78. #include "ntstatus.h"
  79. #define WIN32_NO_STATUS
  80. #include "windef.h"
  81. #include "winternl.h"
  82. #include "winerror.h"
  83. #define USE_WS_PREFIX
  84. #include "winsock2.h"
  85. #include "ws2tcpip.h"
  86. #include "wsipx.h"
  87. #include "af_irda.h"
  88. #include "wine/afd.h"
  89. #include "process.h"
  90. #include "file.h"
  91. #include "handle.h"
  92. #include "thread.h"
  93. #include "request.h"
  94. #include "user.h"
  95. #if defined(linux) && !defined(IP_UNICAST_IF)
  96. #define IP_UNICAST_IF 50
  97. #endif
  98. static const char magic_loopback_addr[] = {127, 12, 34, 56};
  99. union win_sockaddr
  100. {
  101. struct WS_sockaddr addr;
  102. struct WS_sockaddr_in in;
  103. struct WS_sockaddr_in6 in6;
  104. struct WS_sockaddr_ipx ipx;
  105. SOCKADDR_IRDA irda;
  106. };
  107. static struct list poll_list = LIST_INIT( poll_list );
  108. struct poll_req
  109. {
  110. struct list entry;
  111. struct async *async;
  112. struct iosb *iosb;
  113. struct timeout_user *timeout;
  114. timeout_t orig_timeout;
  115. int exclusive;
  116. unsigned int count;
  117. struct
  118. {
  119. struct sock *sock;
  120. int mask;
  121. obj_handle_t handle;
  122. int flags;
  123. unsigned int status;
  124. } sockets[1];
  125. };
  126. struct accept_req
  127. {
  128. struct list entry;
  129. struct async *async;
  130. struct iosb *iosb;
  131. struct sock *sock, *acceptsock;
  132. int accepted;
  133. unsigned int recv_len, local_len;
  134. };
  135. struct connect_req
  136. {
  137. struct async *async;
  138. struct iosb *iosb;
  139. struct sock *sock;
  140. unsigned int addr_len, send_len, send_cursor;
  141. };
  142. enum connection_state
  143. {
  144. SOCK_LISTENING,
  145. SOCK_UNCONNECTED,
  146. SOCK_CONNECTING,
  147. SOCK_CONNECTED,
  148. SOCK_CONNECTIONLESS,
  149. };
  150. struct sock
  151. {
  152. struct object obj; /* object header */
  153. struct fd *fd; /* socket file descriptor */
  154. enum connection_state state; /* connection state */
  155. unsigned int mask; /* event mask */
  156. /* pending AFD_POLL_* events which have not yet been reported to the application */
  157. unsigned int pending_events;
  158. /* AFD_POLL_* events which have already been reported and should not be
  159. * selected for again until reset by a relevant call.
  160. *
  161. * For example, if AFD_POLL_READ is set here and not in pending_events, it
  162. * has already been reported and consumed, and we should not report it
  163. * again, even if POLLIN is signaled, until it is reset by e.g recv().
  164. *
  165. * If an event has been signaled and not consumed yet, it will be set in
  166. * both pending_events and reported_events (as we should only ever report
  167. * any event once until it is reset.) */
  168. unsigned int reported_events;
  169. unsigned int flags; /* socket flags */
  170. unsigned short proto; /* socket protocol */
  171. unsigned short type; /* socket type */
  172. unsigned short family; /* socket family */
  173. struct event *event; /* event object */
  174. user_handle_t window; /* window to send the message to */
  175. unsigned int message; /* message to send */
  176. obj_handle_t wparam; /* message wparam (socket handle) */
  177. int errors[AFD_POLL_BIT_COUNT]; /* event errors */
  178. timeout_t connect_time;/* time the socket was connected */
  179. struct sock *deferred; /* socket that waits for a deferred accept */
  180. struct async_queue read_q; /* queue for asynchronous reads */
  181. struct async_queue write_q; /* queue for asynchronous writes */
  182. struct async_queue ifchange_q; /* queue for interface change notifications */
  183. struct async_queue accept_q; /* queue for asynchronous accepts */
  184. struct async_queue connect_q; /* queue for asynchronous connects */
  185. struct async_queue poll_q; /* queue for asynchronous polls */
  186. struct object *ifchange_obj; /* the interface change notification object */
  187. struct list ifchange_entry; /* entry in ifchange notification list */
  188. struct list accept_list; /* list of pending accept requests */
  189. struct accept_req *accept_recv_req; /* pending accept-into request which will recv on this socket */
  190. struct connect_req *connect_req; /* pending connection request */
  191. struct poll_req *main_poll; /* main poll */
  192. union win_sockaddr addr; /* socket name */
  193. int addr_len; /* socket name length */
  194. unsigned int rcvbuf; /* advisory recv buffer size */
  195. unsigned int sndbuf; /* advisory send buffer size */
  196. unsigned int rcvtimeo; /* receive timeout in ms */
  197. unsigned int sndtimeo; /* send timeout in ms */
  198. unsigned int rd_shutdown : 1; /* is the read end shut down? */
  199. unsigned int wr_shutdown : 1; /* is the write end shut down? */
  200. unsigned int wr_shutdown_pending : 1; /* is a write shutdown pending? */
  201. unsigned int hangup : 1; /* has the read end received a hangup? */
  202. unsigned int aborted : 1; /* did we get a POLLERR or irregular POLLHUP? */
  203. unsigned int nonblocking : 1; /* is the socket nonblocking? */
  204. unsigned int bound : 1; /* is the socket bound? */
  205. };
  206. static void sock_dump( struct object *obj, int verbose );
  207. static struct fd *sock_get_fd( struct object *obj );
  208. static int sock_close_handle( struct object *obj, struct process *process, obj_handle_t handle );
  209. static void sock_destroy( struct object *obj );
  210. static struct object *sock_get_ifchange( struct sock *sock );
  211. static void sock_release_ifchange( struct sock *sock );
  212. static int sock_get_poll_events( struct fd *fd );
  213. static void sock_poll_event( struct fd *fd, int event );
  214. static enum server_fd_type sock_get_fd_type( struct fd *fd );
  215. static void sock_ioctl( struct fd *fd, ioctl_code_t code, struct async *async );
  216. static void sock_cancel_async( struct fd *fd, struct async *async );
  217. static void sock_queue_async( struct fd *fd, struct async *async, int type, int count );
  218. static void sock_reselect_async( struct fd *fd, struct async_queue *queue );
  219. static int accept_into_socket( struct sock *sock, struct sock *acceptsock );
  220. static struct sock *accept_socket( struct sock *sock );
  221. static int sock_get_ntstatus( int err );
  222. static unsigned int sock_get_error( int err );
  223. static void poll_socket( struct sock *poll_sock, struct async *async, int exclusive, timeout_t timeout,
  224. unsigned int count, const struct afd_poll_socket_64 *sockets );
  225. static const struct object_ops sock_ops =
  226. {
  227. sizeof(struct sock), /* size */
  228. &file_type, /* type */
  229. sock_dump, /* dump */
  230. add_queue, /* add_queue */
  231. remove_queue, /* remove_queue */
  232. default_fd_signaled, /* signaled */
  233. no_satisfied, /* satisfied */
  234. no_signal, /* signal */
  235. sock_get_fd, /* get_fd */
  236. default_map_access, /* map_access */
  237. default_get_sd, /* get_sd */
  238. default_set_sd, /* set_sd */
  239. no_get_full_name, /* get_full_name */
  240. no_lookup_name, /* lookup_name */
  241. no_link_name, /* link_name */
  242. NULL, /* unlink_name */
  243. no_open_file, /* open_file */
  244. no_kernel_obj_list, /* get_kernel_obj_list */
  245. sock_close_handle, /* close_handle */
  246. sock_destroy /* destroy */
  247. };
  248. static const struct fd_ops sock_fd_ops =
  249. {
  250. sock_get_poll_events, /* get_poll_events */
  251. sock_poll_event, /* poll_event */
  252. sock_get_fd_type, /* get_fd_type */
  253. no_fd_read, /* read */
  254. no_fd_write, /* write */
  255. no_fd_flush, /* flush */
  256. default_fd_get_file_info, /* get_file_info */
  257. no_fd_get_volume_info, /* get_volume_info */
  258. sock_ioctl, /* ioctl */
  259. sock_cancel_async, /* cancel_async */
  260. sock_queue_async, /* queue_async */
  261. sock_reselect_async /* reselect_async */
  262. };
  263. union unix_sockaddr
  264. {
  265. struct sockaddr addr;
  266. struct sockaddr_in in;
  267. struct sockaddr_in6 in6;
  268. #ifdef HAS_IPX
  269. struct sockaddr_ipx ipx;
  270. #endif
  271. #ifdef HAS_IRDA
  272. struct sockaddr_irda irda;
  273. #endif
  274. };
  275. static int sockaddr_from_unix( const union unix_sockaddr *uaddr, struct WS_sockaddr *wsaddr, socklen_t wsaddrlen )
  276. {
  277. memset( wsaddr, 0, wsaddrlen );
  278. switch (uaddr->addr.sa_family)
  279. {
  280. case AF_INET:
  281. {
  282. struct WS_sockaddr_in win = {0};
  283. if (wsaddrlen < sizeof(win)) return -1;
  284. win.sin_family = WS_AF_INET;
  285. win.sin_port = uaddr->in.sin_port;
  286. memcpy( &win.sin_addr, &uaddr->in.sin_addr, sizeof(win.sin_addr) );
  287. memcpy( wsaddr, &win, sizeof(win) );
  288. return sizeof(win);
  289. }
  290. case AF_INET6:
  291. {
  292. struct WS_sockaddr_in6 win = {0};
  293. if (wsaddrlen < sizeof(win)) return -1;
  294. win.sin6_family = WS_AF_INET6;
  295. win.sin6_port = uaddr->in6.sin6_port;
  296. win.sin6_flowinfo = uaddr->in6.sin6_flowinfo;
  297. memcpy( &win.sin6_addr, &uaddr->in6.sin6_addr, sizeof(win.sin6_addr) );
  298. #ifdef HAVE_STRUCT_SOCKADDR_IN6_SIN6_SCOPE_ID
  299. win.sin6_scope_id = uaddr->in6.sin6_scope_id;
  300. #endif
  301. memcpy( wsaddr, &win, sizeof(win) );
  302. return sizeof(win);
  303. }
  304. #ifdef HAS_IPX
  305. case AF_IPX:
  306. {
  307. struct WS_sockaddr_ipx win = {0};
  308. if (wsaddrlen < sizeof(win)) return -1;
  309. win.sa_family = WS_AF_IPX;
  310. memcpy( win.sa_netnum, &uaddr->ipx.sipx_network, sizeof(win.sa_netnum) );
  311. memcpy( win.sa_nodenum, &uaddr->ipx.sipx_node, sizeof(win.sa_nodenum) );
  312. win.sa_socket = uaddr->ipx.sipx_port;
  313. memcpy( wsaddr, &win, sizeof(win) );
  314. return sizeof(win);
  315. }
  316. #endif
  317. #ifdef HAS_IRDA
  318. case AF_IRDA:
  319. {
  320. SOCKADDR_IRDA win;
  321. if (wsaddrlen < sizeof(win)) return -1;
  322. win.irdaAddressFamily = WS_AF_IRDA;
  323. memcpy( win.irdaDeviceID, &uaddr->irda.sir_addr, sizeof(win.irdaDeviceID) );
  324. if (uaddr->irda.sir_lsap_sel != LSAP_ANY)
  325. snprintf( win.irdaServiceName, sizeof(win.irdaServiceName), "LSAP-SEL%u", uaddr->irda.sir_lsap_sel );
  326. else
  327. memcpy( win.irdaServiceName, uaddr->irda.sir_name, sizeof(win.irdaServiceName) );
  328. memcpy( wsaddr, &win, sizeof(win) );
  329. return sizeof(win);
  330. }
  331. #endif
  332. case AF_UNSPEC:
  333. return 0;
  334. default:
  335. return -1;
  336. }
  337. }
  338. static socklen_t sockaddr_to_unix( const struct WS_sockaddr *wsaddr, int wsaddrlen, union unix_sockaddr *uaddr )
  339. {
  340. memset( uaddr, 0, sizeof(*uaddr) );
  341. switch (wsaddr->sa_family)
  342. {
  343. case WS_AF_INET:
  344. {
  345. struct WS_sockaddr_in win = {0};
  346. if (wsaddrlen < sizeof(win)) return 0;
  347. memcpy( &win, wsaddr, sizeof(win) );
  348. uaddr->in.sin_family = AF_INET;
  349. uaddr->in.sin_port = win.sin_port;
  350. memcpy( &uaddr->in.sin_addr, &win.sin_addr, sizeof(win.sin_addr) );
  351. return sizeof(uaddr->in);
  352. }
  353. case WS_AF_INET6:
  354. {
  355. struct WS_sockaddr_in6 win = {0};
  356. if (wsaddrlen < sizeof(win)) return 0;
  357. memcpy( &win, wsaddr, sizeof(win) );
  358. uaddr->in6.sin6_family = AF_INET6;
  359. uaddr->in6.sin6_port = win.sin6_port;
  360. uaddr->in6.sin6_flowinfo = win.sin6_flowinfo;
  361. memcpy( &uaddr->in6.sin6_addr, &win.sin6_addr, sizeof(win.sin6_addr) );
  362. #ifdef HAVE_STRUCT_SOCKADDR_IN6_SIN6_SCOPE_ID
  363. uaddr->in6.sin6_scope_id = win.sin6_scope_id;
  364. #endif
  365. return sizeof(uaddr->in6);
  366. }
  367. #ifdef HAS_IPX
  368. case WS_AF_IPX:
  369. {
  370. struct WS_sockaddr_ipx win = {0};
  371. if (wsaddrlen < sizeof(win)) return 0;
  372. memcpy( &win, wsaddr, sizeof(win) );
  373. uaddr->ipx.sipx_family = AF_IPX;
  374. memcpy( &uaddr->ipx.sipx_network, win.sa_netnum, sizeof(win.sa_netnum) );
  375. memcpy( &uaddr->ipx.sipx_node, win.sa_nodenum, sizeof(win.sa_nodenum) );
  376. uaddr->ipx.sipx_port = win.sa_socket;
  377. return sizeof(uaddr->ipx);
  378. }
  379. #endif
  380. #ifdef HAS_IRDA
  381. case WS_AF_IRDA:
  382. {
  383. SOCKADDR_IRDA win = {0};
  384. unsigned int lsap_sel;
  385. if (wsaddrlen < sizeof(win)) return 0;
  386. memcpy( &win, wsaddr, sizeof(win) );
  387. uaddr->irda.sir_family = AF_IRDA;
  388. if (sscanf( win.irdaServiceName, "LSAP-SEL%u", &lsap_sel ) == 1)
  389. uaddr->irda.sir_lsap_sel = lsap_sel;
  390. else
  391. {
  392. uaddr->irda.sir_lsap_sel = LSAP_ANY;
  393. memcpy( uaddr->irda.sir_name, win.irdaServiceName, sizeof(win.irdaServiceName) );
  394. }
  395. memcpy( &uaddr->irda.sir_addr, win.irdaDeviceID, sizeof(win.irdaDeviceID) );
  396. return sizeof(uaddr->irda);
  397. }
  398. #endif
  399. case WS_AF_UNSPEC:
  400. switch (wsaddrlen)
  401. {
  402. default: /* likely an ipv4 address */
  403. case sizeof(struct WS_sockaddr_in):
  404. return sizeof(uaddr->in);
  405. #ifdef HAS_IPX
  406. case sizeof(struct WS_sockaddr_ipx):
  407. return sizeof(uaddr->ipx);
  408. #endif
  409. #ifdef HAS_IRDA
  410. case sizeof(SOCKADDR_IRDA):
  411. return sizeof(uaddr->irda);
  412. #endif
  413. case sizeof(struct WS_sockaddr_in6):
  414. return sizeof(uaddr->in6);
  415. }
  416. default:
  417. return 0;
  418. }
  419. }
  420. /* some events are generated at the same time but must be sent in a particular
  421. * order (e.g. CONNECT must be sent before READ) */
  422. static const enum afd_poll_bit event_bitorder[] =
  423. {
  424. AFD_POLL_BIT_CONNECT,
  425. AFD_POLL_BIT_CONNECT_ERR,
  426. AFD_POLL_BIT_ACCEPT,
  427. AFD_POLL_BIT_OOB,
  428. AFD_POLL_BIT_WRITE,
  429. AFD_POLL_BIT_READ,
  430. AFD_POLL_BIT_RESET,
  431. AFD_POLL_BIT_HUP,
  432. AFD_POLL_BIT_CLOSE,
  433. };
  434. typedef enum {
  435. SOCK_SHUTDOWN_ERROR = -1,
  436. SOCK_SHUTDOWN_EOF = 0,
  437. SOCK_SHUTDOWN_POLLHUP = 1
  438. } sock_shutdown_t;
  439. static sock_shutdown_t sock_shutdown_type = SOCK_SHUTDOWN_ERROR;
  440. static sock_shutdown_t sock_check_pollhup(void)
  441. {
  442. sock_shutdown_t ret = SOCK_SHUTDOWN_ERROR;
  443. int fd[2], n;
  444. struct pollfd pfd;
  445. char dummy;
  446. if ( socketpair( AF_UNIX, SOCK_STREAM, 0, fd ) ) return ret;
  447. if ( shutdown( fd[0], 1 ) ) goto out;
  448. pfd.fd = fd[1];
  449. pfd.events = POLLIN;
  450. pfd.revents = 0;
  451. /* Solaris' poll() sometimes returns nothing if given a 0ms timeout here */
  452. n = poll( &pfd, 1, 1 );
  453. if ( n != 1 ) goto out; /* error or timeout */
  454. if ( pfd.revents & POLLHUP )
  455. ret = SOCK_SHUTDOWN_POLLHUP;
  456. else if ( pfd.revents & POLLIN &&
  457. read( fd[1], &dummy, 1 ) == 0 )
  458. ret = SOCK_SHUTDOWN_EOF;
  459. out:
  460. close( fd[0] );
  461. close( fd[1] );
  462. return ret;
  463. }
  464. void sock_init(void)
  465. {
  466. sock_shutdown_type = sock_check_pollhup();
  467. switch ( sock_shutdown_type )
  468. {
  469. case SOCK_SHUTDOWN_EOF:
  470. if (debug_level) fprintf( stderr, "sock_init: shutdown() causes EOF\n" );
  471. break;
  472. case SOCK_SHUTDOWN_POLLHUP:
  473. if (debug_level) fprintf( stderr, "sock_init: shutdown() causes POLLHUP\n" );
  474. break;
  475. default:
  476. fprintf( stderr, "sock_init: ERROR in sock_check_pollhup()\n" );
  477. sock_shutdown_type = SOCK_SHUTDOWN_EOF;
  478. }
  479. }
  480. static int sock_reselect( struct sock *sock )
  481. {
  482. int ev = sock_get_poll_events( sock->fd );
  483. if (debug_level)
  484. fprintf(stderr,"sock_reselect(%p): new mask %x\n", sock, ev);
  485. set_fd_events( sock->fd, ev );
  486. return ev;
  487. }
  488. static unsigned int afd_poll_flag_to_win32( unsigned int flags )
  489. {
  490. static const unsigned int map[] =
  491. {
  492. FD_READ, /* READ */
  493. FD_OOB, /* OOB */
  494. FD_WRITE, /* WRITE */
  495. FD_CLOSE, /* HUP */
  496. FD_CLOSE, /* RESET */
  497. 0, /* CLOSE */
  498. FD_CONNECT, /* CONNECT */
  499. FD_ACCEPT, /* ACCEPT */
  500. FD_CONNECT, /* CONNECT_ERR */
  501. };
  502. unsigned int i, ret = 0;
  503. for (i = 0; i < ARRAY_SIZE(map); ++i)
  504. {
  505. if (flags & (1 << i)) ret |= map[i];
  506. }
  507. return ret;
  508. }
  509. /* wake anybody waiting on the socket event or send the associated message */
  510. static void sock_wake_up( struct sock *sock )
  511. {
  512. unsigned int events = sock->pending_events & sock->mask;
  513. int i;
  514. if (sock->event)
  515. {
  516. if (debug_level) fprintf(stderr, "signalling events %x ptr %p\n", events, sock->event );
  517. if (events)
  518. set_event( sock->event );
  519. }
  520. if (sock->window)
  521. {
  522. if (debug_level) fprintf(stderr, "signalling events %x win %08x\n", events, sock->window );
  523. for (i = 0; i < ARRAY_SIZE(event_bitorder); i++)
  524. {
  525. enum afd_poll_bit event = event_bitorder[i];
  526. if (events & (1 << event))
  527. {
  528. lparam_t lparam = afd_poll_flag_to_win32(1 << event) | (sock_get_error( sock->errors[event] ) << 16);
  529. post_message( sock->window, sock->message, sock->wparam, lparam );
  530. }
  531. }
  532. sock->pending_events = 0;
  533. sock_reselect( sock );
  534. }
  535. }
  536. static inline int sock_error( struct fd *fd )
  537. {
  538. unsigned int optval = 0;
  539. socklen_t optlen = sizeof(optval);
  540. getsockopt( get_unix_fd(fd), SOL_SOCKET, SO_ERROR, (void *) &optval, &optlen);
  541. return optval;
  542. }
  543. static void free_accept_req( void *private )
  544. {
  545. struct accept_req *req = private;
  546. list_remove( &req->entry );
  547. if (req->acceptsock)
  548. {
  549. req->acceptsock->accept_recv_req = NULL;
  550. release_object( req->acceptsock );
  551. }
  552. release_object( req->async );
  553. release_object( req->iosb );
  554. release_object( req->sock );
  555. free( req );
  556. }
  557. static void fill_accept_output( struct accept_req *req )
  558. {
  559. const data_size_t out_size = req->iosb->out_size;
  560. struct async *async = req->async;
  561. union unix_sockaddr unix_addr;
  562. struct WS_sockaddr *win_addr;
  563. unsigned int remote_len;
  564. socklen_t unix_len;
  565. int fd, size = 0;
  566. char *out_data;
  567. int win_len;
  568. if (!(out_data = mem_alloc( out_size )))
  569. {
  570. async_terminate( async, get_error() );
  571. return;
  572. }
  573. fd = get_unix_fd( req->acceptsock->fd );
  574. if (req->recv_len && (size = recv( fd, out_data, req->recv_len, 0 )) < 0)
  575. {
  576. if (!req->accepted && errno == EWOULDBLOCK)
  577. {
  578. req->accepted = 1;
  579. sock_reselect( req->acceptsock );
  580. return;
  581. }
  582. async_terminate( async, sock_get_ntstatus( errno ) );
  583. free( out_data );
  584. return;
  585. }
  586. if (req->local_len)
  587. {
  588. if (req->local_len < sizeof(int))
  589. {
  590. async_terminate( async, STATUS_BUFFER_TOO_SMALL );
  591. free( out_data );
  592. return;
  593. }
  594. unix_len = sizeof(unix_addr);
  595. win_addr = (struct WS_sockaddr *)(out_data + req->recv_len + sizeof(int));
  596. if (getsockname( fd, &unix_addr.addr, &unix_len ) < 0 ||
  597. (win_len = sockaddr_from_unix( &unix_addr, win_addr, req->local_len - sizeof(int) )) < 0)
  598. {
  599. async_terminate( async, sock_get_ntstatus( errno ) );
  600. free( out_data );
  601. return;
  602. }
  603. memcpy( out_data + req->recv_len, &win_len, sizeof(int) );
  604. }
  605. unix_len = sizeof(unix_addr);
  606. win_addr = (struct WS_sockaddr *)(out_data + req->recv_len + req->local_len + sizeof(int));
  607. remote_len = out_size - req->recv_len - req->local_len;
  608. if (getpeername( fd, &unix_addr.addr, &unix_len ) < 0 ||
  609. (win_len = sockaddr_from_unix( &unix_addr, win_addr, remote_len - sizeof(int) )) < 0)
  610. {
  611. async_terminate( async, sock_get_ntstatus( errno ) );
  612. free( out_data );
  613. return;
  614. }
  615. memcpy( out_data + req->recv_len + req->local_len, &win_len, sizeof(int) );
  616. async_request_complete( req->async, STATUS_SUCCESS, size, out_size, out_data );
  617. }
  618. static void complete_async_accept( struct sock *sock, struct accept_req *req )
  619. {
  620. struct sock *acceptsock = req->acceptsock;
  621. struct async *async = req->async;
  622. if (debug_level) fprintf( stderr, "completing accept request for socket %p\n", sock );
  623. if (acceptsock)
  624. {
  625. if (!accept_into_socket( sock, acceptsock ))
  626. {
  627. async_terminate( async, get_error() );
  628. return;
  629. }
  630. fill_accept_output( req );
  631. }
  632. else
  633. {
  634. obj_handle_t handle;
  635. if (!(acceptsock = accept_socket( sock )))
  636. {
  637. async_terminate( async, get_error() );
  638. return;
  639. }
  640. handle = alloc_handle_no_access_check( async_get_thread( async )->process, &acceptsock->obj,
  641. GENERIC_READ | GENERIC_WRITE | SYNCHRONIZE, OBJ_INHERIT );
  642. acceptsock->wparam = handle;
  643. sock_reselect( acceptsock );
  644. release_object( acceptsock );
  645. if (!handle)
  646. {
  647. async_terminate( async, get_error() );
  648. return;
  649. }
  650. async_request_complete_alloc( req->async, STATUS_SUCCESS, 0, sizeof(handle), &handle );
  651. }
  652. }
  653. static void complete_async_accept_recv( struct accept_req *req )
  654. {
  655. if (debug_level) fprintf( stderr, "completing accept recv request for socket %p\n", req->acceptsock );
  656. assert( req->recv_len );
  657. fill_accept_output( req );
  658. }
  659. static void free_connect_req( void *private )
  660. {
  661. struct connect_req *req = private;
  662. req->sock->connect_req = NULL;
  663. release_object( req->async );
  664. release_object( req->iosb );
  665. release_object( req->sock );
  666. free( req );
  667. }
  668. static void complete_async_connect( struct sock *sock )
  669. {
  670. struct connect_req *req = sock->connect_req;
  671. const char *in_buffer;
  672. size_t len;
  673. int ret;
  674. if (debug_level) fprintf( stderr, "completing connect request for socket %p\n", sock );
  675. sock->state = SOCK_CONNECTED;
  676. if (!req->send_len)
  677. {
  678. async_terminate( req->async, STATUS_SUCCESS );
  679. return;
  680. }
  681. in_buffer = (const char *)req->iosb->in_data + sizeof(struct afd_connect_params) + req->addr_len;
  682. len = req->send_len - req->send_cursor;
  683. ret = send( get_unix_fd( sock->fd ), in_buffer + req->send_cursor, len, 0 );
  684. if (ret < 0 && errno != EWOULDBLOCK)
  685. async_terminate( req->async, sock_get_ntstatus( errno ) );
  686. else if (ret == len)
  687. async_request_complete( req->async, STATUS_SUCCESS, req->send_len, 0, NULL );
  688. else
  689. req->send_cursor += ret;
  690. }
  691. static void free_poll_req( void *private )
  692. {
  693. struct poll_req *req = private;
  694. unsigned int i;
  695. if (req->timeout) remove_timeout_user( req->timeout );
  696. for (i = 0; i < req->count; ++i)
  697. release_object( req->sockets[i].sock );
  698. release_object( req->async );
  699. release_object( req->iosb );
  700. list_remove( &req->entry );
  701. free( req );
  702. }
  703. static int is_oobinline( struct sock *sock )
  704. {
  705. int oobinline;
  706. socklen_t len = sizeof(oobinline);
  707. return !getsockopt( get_unix_fd( sock->fd ), SOL_SOCKET, SO_OOBINLINE, (char *)&oobinline, &len ) && oobinline;
  708. }
  709. static int get_poll_flags( struct sock *sock, int event )
  710. {
  711. int flags = 0;
  712. /* A connection-mode socket which has never been connected does not return
  713. * write or hangup events, but Linux reports POLLOUT | POLLHUP. */
  714. if (sock->state == SOCK_UNCONNECTED)
  715. event &= ~(POLLOUT | POLLHUP);
  716. if (event & POLLIN)
  717. {
  718. if (sock->state == SOCK_LISTENING)
  719. flags |= AFD_POLL_ACCEPT;
  720. else
  721. flags |= AFD_POLL_READ;
  722. }
  723. if (event & POLLPRI)
  724. flags |= is_oobinline( sock ) ? AFD_POLL_READ : AFD_POLL_OOB;
  725. if (event & POLLOUT)
  726. flags |= AFD_POLL_WRITE;
  727. if (sock->state == SOCK_CONNECTED)
  728. flags |= AFD_POLL_CONNECT;
  729. if (event & POLLHUP)
  730. flags |= AFD_POLL_HUP;
  731. if (event & POLLERR)
  732. flags |= AFD_POLL_CONNECT_ERR;
  733. return flags;
  734. }
  735. static void complete_async_poll( struct poll_req *req, unsigned int status )
  736. {
  737. unsigned int i, signaled_count = 0;
  738. for (i = 0; i < req->count; ++i)
  739. {
  740. struct sock *sock = req->sockets[i].sock;
  741. if (sock->main_poll == req)
  742. sock->main_poll = NULL;
  743. }
  744. if (!status)
  745. {
  746. for (i = 0; i < req->count; ++i)
  747. {
  748. if (req->sockets[i].flags)
  749. ++signaled_count;
  750. }
  751. }
  752. if (is_machine_64bit( async_get_thread( req->async )->process->machine ))
  753. {
  754. size_t output_size = offsetof( struct afd_poll_params_64, sockets[signaled_count] );
  755. struct afd_poll_params_64 *output;
  756. if (!(output = mem_alloc( output_size )))
  757. {
  758. async_terminate( req->async, get_error() );
  759. return;
  760. }
  761. memset( output, 0, output_size );
  762. output->timeout = req->orig_timeout;
  763. output->exclusive = req->exclusive;
  764. for (i = 0; i < req->count; ++i)
  765. {
  766. if (!req->sockets[i].flags) continue;
  767. output->sockets[output->count].socket = req->sockets[i].handle;
  768. output->sockets[output->count].flags = req->sockets[i].flags;
  769. output->sockets[output->count].status = req->sockets[i].status;
  770. ++output->count;
  771. }
  772. assert( output->count == signaled_count );
  773. async_request_complete( req->async, status, output_size, output_size, output );
  774. }
  775. else
  776. {
  777. size_t output_size = offsetof( struct afd_poll_params_32, sockets[signaled_count] );
  778. struct afd_poll_params_32 *output;
  779. if (!(output = mem_alloc( output_size )))
  780. {
  781. async_terminate( req->async, get_error() );
  782. return;
  783. }
  784. memset( output, 0, output_size );
  785. output->timeout = req->orig_timeout;
  786. output->exclusive = req->exclusive;
  787. for (i = 0; i < req->count; ++i)
  788. {
  789. if (!req->sockets[i].flags) continue;
  790. output->sockets[output->count].socket = req->sockets[i].handle;
  791. output->sockets[output->count].flags = req->sockets[i].flags;
  792. output->sockets[output->count].status = req->sockets[i].status;
  793. ++output->count;
  794. }
  795. assert( output->count == signaled_count );
  796. async_request_complete( req->async, status, output_size, output_size, output );
  797. }
  798. }
  799. static void complete_async_polls( struct sock *sock, int event, int error )
  800. {
  801. int flags = get_poll_flags( sock, event );
  802. struct poll_req *req, *next;
  803. LIST_FOR_EACH_ENTRY_SAFE( req, next, &poll_list, struct poll_req, entry )
  804. {
  805. unsigned int i;
  806. if (req->iosb->status != STATUS_PENDING) continue;
  807. for (i = 0; i < req->count; ++i)
  808. {
  809. if (req->sockets[i].sock != sock) continue;
  810. if (!(req->sockets[i].mask & flags)) continue;
  811. if (debug_level)
  812. fprintf( stderr, "completing poll for socket %p, wanted %#x got %#x\n",
  813. sock, req->sockets[i].mask, flags );
  814. req->sockets[i].flags = req->sockets[i].mask & flags;
  815. req->sockets[i].status = sock_get_ntstatus( error );
  816. complete_async_poll( req, STATUS_SUCCESS );
  817. break;
  818. }
  819. }
  820. }
  821. static void async_poll_timeout( void *private )
  822. {
  823. struct poll_req *req = private;
  824. req->timeout = NULL;
  825. if (req->iosb->status != STATUS_PENDING) return;
  826. complete_async_poll( req, STATUS_TIMEOUT );
  827. }
  828. static int sock_dispatch_asyncs( struct sock *sock, int event, int error )
  829. {
  830. if (event & (POLLIN | POLLPRI))
  831. {
  832. struct accept_req *req;
  833. LIST_FOR_EACH_ENTRY( req, &sock->accept_list, struct accept_req, entry )
  834. {
  835. if (req->iosb->status == STATUS_PENDING && !req->accepted)
  836. {
  837. complete_async_accept( sock, req );
  838. break;
  839. }
  840. }
  841. if (sock->accept_recv_req && sock->accept_recv_req->iosb->status == STATUS_PENDING)
  842. complete_async_accept_recv( sock->accept_recv_req );
  843. }
  844. if ((event & POLLOUT) && sock->connect_req && sock->connect_req->iosb->status == STATUS_PENDING)
  845. complete_async_connect( sock );
  846. if (event & (POLLIN | POLLPRI) && async_waiting( &sock->read_q ))
  847. {
  848. if (debug_level) fprintf( stderr, "activating read queue for socket %p\n", sock );
  849. async_wake_up( &sock->read_q, STATUS_ALERTED );
  850. event &= ~(POLLIN | POLLPRI);
  851. }
  852. if (event & POLLOUT && async_waiting( &sock->write_q ))
  853. {
  854. if (debug_level) fprintf( stderr, "activating write queue for socket %p\n", sock );
  855. async_wake_up( &sock->write_q, STATUS_ALERTED );
  856. event &= ~POLLOUT;
  857. }
  858. if (event & (POLLERR | POLLHUP))
  859. {
  860. int status = sock_get_ntstatus( error );
  861. struct accept_req *req, *next;
  862. if (sock->rd_shutdown || sock->hangup)
  863. async_wake_up( &sock->read_q, status );
  864. if (sock->wr_shutdown)
  865. async_wake_up( &sock->write_q, status );
  866. LIST_FOR_EACH_ENTRY_SAFE( req, next, &sock->accept_list, struct accept_req, entry )
  867. {
  868. if (req->iosb->status == STATUS_PENDING)
  869. async_terminate( req->async, status );
  870. }
  871. if (sock->accept_recv_req && sock->accept_recv_req->iosb->status == STATUS_PENDING)
  872. async_terminate( sock->accept_recv_req->async, status );
  873. if (sock->connect_req)
  874. async_terminate( sock->connect_req->async, status );
  875. }
  876. return event;
  877. }
  878. static void post_socket_event( struct sock *sock, enum afd_poll_bit event_bit, int error )
  879. {
  880. unsigned int event = (1 << event_bit);
  881. if (!(sock->reported_events & event))
  882. {
  883. sock->pending_events |= event;
  884. sock->reported_events |= event;
  885. sock->errors[event_bit] = error;
  886. }
  887. }
  888. static void sock_dispatch_events( struct sock *sock, enum connection_state prevstate, int event, int error )
  889. {
  890. switch (prevstate)
  891. {
  892. case SOCK_UNCONNECTED:
  893. break;
  894. case SOCK_CONNECTING:
  895. if (event & POLLOUT)
  896. {
  897. post_socket_event( sock, AFD_POLL_BIT_CONNECT, 0 );
  898. sock->errors[AFD_POLL_BIT_CONNECT_ERR] = 0;
  899. }
  900. if (event & (POLLERR | POLLHUP))
  901. post_socket_event( sock, AFD_POLL_BIT_CONNECT_ERR, error );
  902. break;
  903. case SOCK_LISTENING:
  904. if (event & (POLLIN | POLLERR | POLLHUP))
  905. post_socket_event( sock, AFD_POLL_BIT_ACCEPT, error );
  906. break;
  907. case SOCK_CONNECTED:
  908. case SOCK_CONNECTIONLESS:
  909. if (event & POLLIN)
  910. post_socket_event( sock, AFD_POLL_BIT_READ, 0 );
  911. if (event & POLLOUT)
  912. post_socket_event( sock, AFD_POLL_BIT_WRITE, 0 );
  913. if (event & POLLPRI)
  914. post_socket_event( sock, AFD_POLL_BIT_OOB, 0 );
  915. if (event & (POLLERR | POLLHUP))
  916. post_socket_event( sock, AFD_POLL_BIT_HUP, error );
  917. break;
  918. }
  919. sock_wake_up( sock );
  920. }
  921. static void sock_poll_event( struct fd *fd, int event )
  922. {
  923. struct sock *sock = get_fd_user( fd );
  924. int hangup_seen = 0;
  925. enum connection_state prevstate = sock->state;
  926. int error = 0;
  927. assert( sock->obj.ops == &sock_ops );
  928. if (debug_level)
  929. fprintf(stderr, "socket %p select event: %x\n", sock, event);
  930. /* we may change event later, remove from loop here */
  931. if (event & (POLLERR|POLLHUP)) set_fd_events( sock->fd, -1 );
  932. switch (sock->state)
  933. {
  934. case SOCK_UNCONNECTED:
  935. break;
  936. case SOCK_CONNECTING:
  937. if (event & (POLLERR|POLLHUP))
  938. {
  939. sock->state = SOCK_UNCONNECTED;
  940. event &= ~POLLOUT;
  941. error = sock_error( fd );
  942. }
  943. else if (event & POLLOUT)
  944. {
  945. sock->state = SOCK_CONNECTED;
  946. sock->connect_time = current_time;
  947. }
  948. break;
  949. case SOCK_LISTENING:
  950. if (event & (POLLERR|POLLHUP))
  951. error = sock_error( fd );
  952. break;
  953. case SOCK_CONNECTED:
  954. case SOCK_CONNECTIONLESS:
  955. if (sock->type == WS_SOCK_STREAM && (event & POLLIN))
  956. {
  957. char dummy;
  958. int nr;
  959. /* Linux 2.4 doesn't report POLLHUP if only one side of the socket
  960. * has been closed, so we need to check for it explicitly here */
  961. nr = recv( get_unix_fd( fd ), &dummy, 1, MSG_PEEK );
  962. if ( nr == 0 )
  963. {
  964. hangup_seen = 1;
  965. event &= ~POLLIN;
  966. }
  967. else if ( nr < 0 )
  968. {
  969. event &= ~POLLIN;
  970. /* EAGAIN can happen if an async recv() falls between the server's poll()
  971. call and the invocation of this routine */
  972. if ( errno != EAGAIN )
  973. {
  974. error = errno;
  975. event |= POLLERR;
  976. if ( debug_level )
  977. fprintf( stderr, "recv error on socket %p: %d\n", sock, errno );
  978. }
  979. }
  980. }
  981. if (hangup_seen || (sock_shutdown_type == SOCK_SHUTDOWN_POLLHUP && (event & POLLHUP)))
  982. {
  983. sock->hangup = 1;
  984. }
  985. else if (event & (POLLHUP | POLLERR))
  986. {
  987. sock->aborted = 1;
  988. if (debug_level)
  989. fprintf( stderr, "socket %p aborted by error %d, event %#x\n", sock, error, event );
  990. }
  991. if (hangup_seen)
  992. event |= POLLHUP;
  993. break;
  994. }
  995. complete_async_polls( sock, event, error );
  996. event = sock_dispatch_asyncs( sock, event, error );
  997. sock_dispatch_events( sock, prevstate, event, error );
  998. sock_reselect( sock );
  999. }
  1000. static void sock_dump( struct object *obj, int verbose )
  1001. {
  1002. struct sock *sock = (struct sock *)obj;
  1003. assert( obj->ops == &sock_ops );
  1004. fprintf( stderr, "Socket fd=%p, state=%x, mask=%x, pending=%x, reported=%x\n",
  1005. sock->fd, sock->state,
  1006. sock->mask, sock->pending_events, sock->reported_events );
  1007. }
  1008. static int poll_flags_from_afd( struct sock *sock, int flags )
  1009. {
  1010. int ev = 0;
  1011. /* A connection-mode socket which has never been connected does
  1012. * not return write or hangup events, but Linux returns
  1013. * POLLOUT | POLLHUP. */
  1014. if (sock->state == SOCK_UNCONNECTED)
  1015. return -1;
  1016. if (flags & (AFD_POLL_READ | AFD_POLL_ACCEPT))
  1017. ev |= POLLIN;
  1018. if ((flags & AFD_POLL_HUP) && sock->type == WS_SOCK_STREAM)
  1019. ev |= POLLIN;
  1020. if (flags & AFD_POLL_OOB)
  1021. ev |= is_oobinline( sock ) ? POLLIN : POLLPRI;
  1022. if (flags & AFD_POLL_WRITE)
  1023. ev |= POLLOUT;
  1024. return ev;
  1025. }
  1026. static int sock_get_poll_events( struct fd *fd )
  1027. {
  1028. struct sock *sock = get_fd_user( fd );
  1029. unsigned int mask = sock->mask & ~sock->reported_events;
  1030. struct poll_req *req;
  1031. int ev = 0;
  1032. assert( sock->obj.ops == &sock_ops );
  1033. if (!sock->type) /* not initialized yet */
  1034. return -1;
  1035. switch (sock->state)
  1036. {
  1037. case SOCK_UNCONNECTED:
  1038. /* A connection-mode Windows socket which has never been connected does
  1039. * not return any events, but Linux returns POLLOUT | POLLHUP. Hence we
  1040. * need to return -1 here, to prevent the socket from being polled on at
  1041. * all. */
  1042. return -1;
  1043. case SOCK_CONNECTING:
  1044. return POLLOUT;
  1045. case SOCK_LISTENING:
  1046. if (!list_empty( &sock->accept_list ) || (mask & AFD_POLL_ACCEPT))
  1047. ev |= POLLIN;
  1048. break;
  1049. case SOCK_CONNECTED:
  1050. case SOCK_CONNECTIONLESS:
  1051. if (sock->hangup && sock->wr_shutdown && !sock->wr_shutdown_pending)
  1052. {
  1053. /* Linux returns POLLHUP if a socket is both SHUT_RD and SHUT_WR, or
  1054. * if both the socket and its peer are SHUT_WR.
  1055. *
  1056. * We don't use SHUT_RD, so we can only encounter this in the latter
  1057. * case. In that case there can't be any pending read requests (they
  1058. * would have already been completed with a length of zero), the
  1059. * above condition ensures that we don't have any pending write
  1060. * requests, and nothing that can change about the socket state that
  1061. * would complete a pending poll request. */
  1062. return -1;
  1063. }
  1064. if (sock->aborted)
  1065. return -1;
  1066. if (sock->accept_recv_req)
  1067. {
  1068. ev |= POLLIN;
  1069. }
  1070. else if (async_queued( &sock->read_q ))
  1071. {
  1072. if (async_waiting( &sock->read_q )) ev |= POLLIN | POLLPRI;
  1073. }
  1074. else
  1075. {
  1076. /* Don't ask for POLLIN if we got a hangup. We won't receive more
  1077. * data anyway, but we will get POLLIN if SOCK_SHUTDOWN_EOF. */
  1078. if (!sock->hangup)
  1079. {
  1080. if (mask & AFD_POLL_READ)
  1081. ev |= POLLIN;
  1082. if (mask & AFD_POLL_OOB)
  1083. ev |= POLLPRI;
  1084. }
  1085. /* We use POLLIN with 0 bytes recv() as hangup indication for stream sockets. */
  1086. if (sock->state == SOCK_CONNECTED && (mask & AFD_POLL_HUP) && !(sock->reported_events & AFD_POLL_READ))
  1087. ev |= POLLIN;
  1088. }
  1089. if (async_queued( &sock->write_q ))
  1090. {
  1091. if (async_waiting( &sock->write_q )) ev |= POLLOUT;
  1092. }
  1093. else if (!sock->wr_shutdown && (mask & AFD_POLL_WRITE))
  1094. {
  1095. ev |= POLLOUT;
  1096. }
  1097. break;
  1098. }
  1099. LIST_FOR_EACH_ENTRY( req, &poll_list, struct poll_req, entry )
  1100. {
  1101. unsigned int i;
  1102. for (i = 0; i < req->count; ++i)
  1103. {
  1104. if (req->sockets[i].sock != sock) continue;
  1105. ev |= poll_flags_from_afd( sock, req->sockets[i].mask );
  1106. }
  1107. }
  1108. return ev;
  1109. }
  1110. static enum server_fd_type sock_get_fd_type( struct fd *fd )
  1111. {
  1112. return FD_TYPE_SOCKET;
  1113. }
  1114. static void sock_cancel_async( struct fd *fd, struct async *async )
  1115. {
  1116. struct poll_req *req;
  1117. LIST_FOR_EACH_ENTRY( req, &poll_list, struct poll_req, entry )
  1118. {
  1119. unsigned int i;
  1120. if (req->async != async)
  1121. continue;
  1122. for (i = 0; i < req->count; i++)
  1123. {
  1124. struct sock *sock = req->sockets[i].sock;
  1125. if (sock->main_poll == req)
  1126. sock->main_poll = NULL;
  1127. }
  1128. }
  1129. async_terminate( async, STATUS_CANCELLED );
  1130. }
  1131. static void sock_queue_async( struct fd *fd, struct async *async, int type, int count )
  1132. {
  1133. struct sock *sock = get_fd_user( fd );
  1134. struct async_queue *queue;
  1135. assert( sock->obj.ops == &sock_ops );
  1136. switch (type)
  1137. {
  1138. case ASYNC_TYPE_READ:
  1139. if (sock->rd_shutdown)
  1140. {
  1141. set_error( STATUS_PIPE_DISCONNECTED );
  1142. return;
  1143. }
  1144. queue = &sock->read_q;
  1145. break;
  1146. case ASYNC_TYPE_WRITE:
  1147. if (sock->wr_shutdown)
  1148. {
  1149. set_error( STATUS_PIPE_DISCONNECTED );
  1150. return;
  1151. }
  1152. queue = &sock->write_q;
  1153. break;
  1154. default:
  1155. set_error( STATUS_INVALID_PARAMETER );
  1156. return;
  1157. }
  1158. if (sock->state != SOCK_CONNECTED)
  1159. {
  1160. set_error( STATUS_PIPE_DISCONNECTED );
  1161. return;
  1162. }
  1163. queue_async( queue, async );
  1164. sock_reselect( sock );
  1165. set_error( STATUS_PENDING );
  1166. }
  1167. static void sock_reselect_async( struct fd *fd, struct async_queue *queue )
  1168. {
  1169. struct sock *sock = get_fd_user( fd );
  1170. if (sock->wr_shutdown_pending && list_empty( &sock->write_q.queue ))
  1171. {
  1172. shutdown( get_unix_fd( sock->fd ), SHUT_WR );
  1173. sock->wr_shutdown_pending = 0;
  1174. }
  1175. /* Don't reselect the ifchange queue; we always ask for POLLIN.
  1176. * Don't reselect an uninitialized socket; we can't call set_fd_events() on
  1177. * a pseudo-fd. */
  1178. if (queue != &sock->ifchange_q && sock->type)
  1179. sock_reselect( sock );
  1180. }
  1181. static struct fd *sock_get_fd( struct object *obj )
  1182. {
  1183. struct sock *sock = (struct sock *)obj;
  1184. return (struct fd *)grab_object( sock->fd );
  1185. }
  1186. static int sock_close_handle( struct object *obj, struct process *process, obj_handle_t handle )
  1187. {
  1188. struct sock *sock = (struct sock *)obj;
  1189. if (sock->obj.handle_count == 1) /* last handle */
  1190. {
  1191. struct accept_req *accept_req, *accept_next;
  1192. struct poll_req *poll_req, *poll_next;
  1193. if (sock->accept_recv_req)
  1194. async_terminate( sock->accept_recv_req->async, STATUS_CANCELLED );
  1195. LIST_FOR_EACH_ENTRY_SAFE( accept_req, accept_next, &sock->accept_list, struct accept_req, entry )
  1196. async_terminate( accept_req->async, STATUS_CANCELLED );
  1197. if (sock->connect_req)
  1198. async_terminate( sock->connect_req->async, STATUS_CANCELLED );
  1199. LIST_FOR_EACH_ENTRY_SAFE( poll_req, poll_next, &poll_list, struct poll_req, entry )
  1200. {
  1201. struct iosb *iosb = poll_req->iosb;
  1202. BOOL signaled = FALSE;
  1203. unsigned int i;
  1204. if (iosb->status != STATUS_PENDING) continue;
  1205. for (i = 0; i < poll_req->count; ++i)
  1206. {
  1207. if (poll_req->sockets[i].sock == sock)
  1208. {
  1209. signaled = TRUE;
  1210. poll_req->sockets[i].flags = AFD_POLL_CLOSE;
  1211. poll_req->sockets[i].status = 0;
  1212. }
  1213. }
  1214. if (signaled) complete_async_poll( poll_req, STATUS_SUCCESS );
  1215. }
  1216. }
  1217. return 1;
  1218. }
  1219. static void sock_destroy( struct object *obj )
  1220. {
  1221. struct sock *sock = (struct sock *)obj;
  1222. assert( obj->ops == &sock_ops );
  1223. /* FIXME: special socket shutdown stuff? */
  1224. if ( sock->deferred )
  1225. release_object( sock->deferred );
  1226. async_wake_up( &sock->ifchange_q, STATUS_CANCELLED );
  1227. sock_release_ifchange( sock );
  1228. free_async_queue( &sock->read_q );
  1229. free_async_queue( &sock->write_q );
  1230. free_async_queue( &sock->ifchange_q );
  1231. free_async_queue( &sock->accept_q );
  1232. free_async_queue( &sock->connect_q );
  1233. free_async_queue( &sock->poll_q );
  1234. if (sock->event) release_object( sock->event );
  1235. if (sock->fd)
  1236. {
  1237. /* shut the socket down to force pending poll() calls in the client to return */
  1238. shutdown( get_unix_fd(sock->fd), SHUT_RDWR );
  1239. release_object( sock->fd );
  1240. }
  1241. }
  1242. static struct sock *create_socket(void)
  1243. {
  1244. struct sock *sock;
  1245. if (!(sock = alloc_object( &sock_ops ))) return NULL;
  1246. sock->fd = NULL;
  1247. sock->state = SOCK_UNCONNECTED;
  1248. sock->mask = 0;
  1249. sock->pending_events = 0;
  1250. sock->reported_events = 0;
  1251. sock->flags = 0;
  1252. sock->proto = 0;
  1253. sock->type = 0;
  1254. sock->family = 0;
  1255. sock->event = NULL;
  1256. sock->window = 0;
  1257. sock->message = 0;
  1258. sock->wparam = 0;
  1259. sock->connect_time = 0;
  1260. sock->deferred = NULL;
  1261. sock->ifchange_obj = NULL;
  1262. sock->accept_recv_req = NULL;
  1263. sock->connect_req = NULL;
  1264. sock->main_poll = NULL;
  1265. memset( &sock->addr, 0, sizeof(sock->addr) );
  1266. sock->addr_len = 0;
  1267. sock->rd_shutdown = 0;
  1268. sock->wr_shutdown = 0;
  1269. sock->wr_shutdown_pending = 0;
  1270. sock->hangup = 0;
  1271. sock->aborted = 0;
  1272. sock->nonblocking = 0;
  1273. sock->bound = 0;
  1274. sock->rcvbuf = 0;
  1275. sock->sndbuf = 0;
  1276. sock->rcvtimeo = 0;
  1277. sock->sndtimeo = 0;
  1278. init_async_queue( &sock->read_q );
  1279. init_async_queue( &sock->write_q );
  1280. init_async_queue( &sock->ifchange_q );
  1281. init_async_queue( &sock->accept_q );
  1282. init_async_queue( &sock->connect_q );
  1283. init_async_queue( &sock->poll_q );
  1284. memset( sock->errors, 0, sizeof(sock->errors) );
  1285. list_init( &sock->accept_list );
  1286. return sock;
  1287. }
  1288. static int get_unix_family( int family )
  1289. {
  1290. switch (family)
  1291. {
  1292. case WS_AF_INET: return AF_INET;
  1293. case WS_AF_INET6: return AF_INET6;
  1294. #ifdef HAS_IPX
  1295. case WS_AF_IPX: return AF_IPX;
  1296. #endif
  1297. #ifdef AF_IRDA
  1298. case WS_AF_IRDA: return AF_IRDA;
  1299. #endif
  1300. case WS_AF_UNSPEC: return AF_UNSPEC;
  1301. default: return -1;
  1302. }
  1303. }
  1304. static int get_unix_type( int type )
  1305. {
  1306. switch (type)
  1307. {
  1308. case WS_SOCK_DGRAM: return SOCK_DGRAM;
  1309. case WS_SOCK_RAW: return SOCK_RAW;
  1310. case WS_SOCK_STREAM: return SOCK_STREAM;
  1311. default: return -1;
  1312. }
  1313. }
  1314. static int get_unix_protocol( int protocol )
  1315. {
  1316. if (protocol >= WS_NSPROTO_IPX && protocol <= WS_NSPROTO_IPX + 255)
  1317. return protocol;
  1318. switch (protocol)
  1319. {
  1320. case WS_IPPROTO_ICMP: return IPPROTO_ICMP;
  1321. case WS_IPPROTO_IGMP: return IPPROTO_IGMP;
  1322. case WS_IPPROTO_IP: return IPPROTO_IP;
  1323. case WS_IPPROTO_IPV4: return IPPROTO_IPIP;
  1324. case WS_IPPROTO_IPV6: return IPPROTO_IPV6;
  1325. case WS_IPPROTO_RAW: return IPPROTO_RAW;
  1326. case WS_IPPROTO_TCP: return IPPROTO_TCP;
  1327. case WS_IPPROTO_UDP: return IPPROTO_UDP;
  1328. default: return -1;
  1329. }
  1330. }
  1331. static void set_dont_fragment( int fd, int level, int value )
  1332. {
  1333. int optname;
  1334. if (level == IPPROTO_IP)
  1335. {
  1336. #ifdef IP_DONTFRAG
  1337. optname = IP_DONTFRAG;
  1338. #elif defined(IP_MTU_DISCOVER) && defined(IP_PMTUDISC_DO) && defined(IP_PMTUDISC_DONT)
  1339. optname = IP_MTU_DISCOVER;
  1340. value = value ? IP_PMTUDISC_DO : IP_PMTUDISC_DONT;
  1341. #else
  1342. return;
  1343. #endif
  1344. }
  1345. else
  1346. {
  1347. #ifdef IPV6_DONTFRAG
  1348. optname = IPV6_DONTFRAG;
  1349. #elif defined(IPV6_MTU_DISCOVER) && defined(IPV6_PMTUDISC_DO) && defined(IPV6_PMTUDISC_DONT)
  1350. optname = IPV6_MTU_DISCOVER;
  1351. value = value ? IPV6_PMTUDISC_DO : IPV6_PMTUDISC_DONT;
  1352. #else
  1353. return;
  1354. #endif
  1355. }
  1356. setsockopt( fd, level, optname, &value, sizeof(value) );
  1357. }
  1358. static int init_socket( struct sock *sock, int family, int type, int protocol, unsigned int flags )
  1359. {
  1360. unsigned int options = 0;
  1361. int sockfd, unix_type, unix_family, unix_protocol, value;
  1362. socklen_t len;
  1363. unix_family = get_unix_family( family );
  1364. unix_type = get_unix_type( type );
  1365. unix_protocol = get_unix_protocol( protocol );
  1366. if (unix_protocol < 0)
  1367. {
  1368. if (type && unix_type < 0)
  1369. set_win32_error( WSAESOCKTNOSUPPORT );
  1370. else
  1371. set_win32_error( WSAEPROTONOSUPPORT );
  1372. return -1;
  1373. }
  1374. if (unix_family < 0)
  1375. {
  1376. if (family >= 0 && unix_type < 0)
  1377. set_win32_error( WSAESOCKTNOSUPPORT );
  1378. else
  1379. set_win32_error( WSAEAFNOSUPPORT );
  1380. return -1;
  1381. }
  1382. sockfd = socket( unix_family, unix_type, unix_protocol );
  1383. if (sockfd == -1)
  1384. {
  1385. if (errno == EINVAL) set_win32_error( WSAESOCKTNOSUPPORT );
  1386. else set_win32_error( sock_get_error( errno ));
  1387. return -1;
  1388. }
  1389. fcntl(sockfd, F_SETFL, O_NONBLOCK); /* make socket nonblocking */
  1390. if (family == WS_AF_IPX && protocol >= WS_NSPROTO_IPX && protocol <= WS_NSPROTO_IPX + 255)
  1391. {
  1392. #ifdef HAS_IPX
  1393. int ipx_type = protocol - WS_NSPROTO_IPX;
  1394. #ifdef SOL_IPX
  1395. setsockopt( sockfd, SOL_IPX, IPX_TYPE, &ipx_type, sizeof(ipx_type) );
  1396. #else
  1397. struct ipx val;
  1398. /* Should we retrieve val using a getsockopt call and then
  1399. * set the modified one? */
  1400. val.ipx_pt = ipx_type;
  1401. setsockopt( sockfd, 0, SO_DEFAULT_HEADERS, &val, sizeof(val) );
  1402. #endif
  1403. #endif
  1404. }
  1405. if (unix_family == AF_INET || unix_family == AF_INET6)
  1406. {
  1407. /* ensure IP_DONTFRAGMENT is disabled for SOCK_DGRAM and SOCK_RAW, enabled for SOCK_STREAM */
  1408. if (unix_type == SOCK_DGRAM || unix_type == SOCK_RAW) /* in Linux the global default can be enabled */
  1409. set_dont_fragment( sockfd, unix_family == AF_INET6 ? IPPROTO_IPV6 : IPPROTO_IP, FALSE );
  1410. else if (unix_type == SOCK_STREAM)
  1411. set_dont_fragment( sockfd, unix_family == AF_INET6 ? IPPROTO_IPV6 : IPPROTO_IP, TRUE );
  1412. }
  1413. #ifdef IPV6_V6ONLY
  1414. if (unix_family == AF_INET6)
  1415. {
  1416. static const int enable = 1;
  1417. setsockopt( sockfd, IPPROTO_IPV6, IPV6_V6ONLY, &enable, sizeof(enable) );
  1418. }
  1419. #endif
  1420. len = sizeof(value);
  1421. if (!getsockopt( sockfd, SOL_SOCKET, SO_RCVBUF, &value, &len ))
  1422. sock->rcvbuf = value;
  1423. len = sizeof(value);
  1424. if (!getsockopt( sockfd, SOL_SOCKET, SO_SNDBUF, &value, &len ))
  1425. sock->sndbuf = value;
  1426. sock->state = (type == WS_SOCK_STREAM ? SOCK_UNCONNECTED : SOCK_CONNECTIONLESS);
  1427. sock->flags = flags;
  1428. sock->proto = protocol;
  1429. sock->type = type;
  1430. sock->family = family;
  1431. if (sock->fd)
  1432. {
  1433. options = get_fd_options( sock->fd );
  1434. release_object( sock->fd );
  1435. }
  1436. if (!(sock->fd = create_anonymous_fd( &sock_fd_ops, sockfd, &sock->obj, options )))
  1437. {
  1438. return -1;
  1439. }
  1440. /* We can't immediately allow caching for a connection-mode socket, since it
  1441. * might be accepted into (changing the underlying fd object.) */
  1442. if (sock->type != WS_SOCK_STREAM) allow_fd_caching( sock->fd );
  1443. return 0;
  1444. }
  1445. /* accepts a socket and inits it */
  1446. static int accept_new_fd( struct sock *sock )
  1447. {
  1448. /* Try to accept(2). We can't be safe that this an already connected socket
  1449. * or that accept() is allowed on it. In those cases we will get -1/errno
  1450. * return.
  1451. */
  1452. struct sockaddr saddr;
  1453. socklen_t slen = sizeof(saddr);
  1454. int acceptfd = accept( get_unix_fd(sock->fd), &saddr, &slen );
  1455. if (acceptfd != -1)
  1456. fcntl( acceptfd, F_SETFL, O_NONBLOCK );
  1457. else
  1458. set_error( sock_get_ntstatus( errno ));
  1459. return acceptfd;
  1460. }
  1461. /* accept a socket (creates a new fd) */
  1462. static struct sock *accept_socket( struct sock *sock )
  1463. {
  1464. struct sock *acceptsock;
  1465. int acceptfd;
  1466. if (get_unix_fd( sock->fd ) == -1) return NULL;
  1467. if ( sock->deferred )
  1468. {
  1469. acceptsock = sock->deferred;
  1470. sock->deferred = NULL;
  1471. }
  1472. else
  1473. {
  1474. union unix_sockaddr unix_addr;
  1475. socklen_t unix_len;
  1476. if ((acceptfd = accept_new_fd( sock )) == -1) return NULL;
  1477. if (!(acceptsock = create_socket()))
  1478. {
  1479. close( acceptfd );
  1480. return NULL;
  1481. }
  1482. /* newly created socket gets the same properties of the listening socket */
  1483. acceptsock->state = SOCK_CONNECTED;
  1484. acceptsock->bound = 1;
  1485. acceptsock->nonblocking = sock->nonblocking;
  1486. acceptsock->mask = sock->mask;
  1487. acceptsock->proto = sock->proto;
  1488. acceptsock->type = sock->type;
  1489. acceptsock->family = sock->family;
  1490. acceptsock->window = sock->window;
  1491. acceptsock->message = sock->message;
  1492. acceptsock->connect_time = current_time;
  1493. if (sock->event) acceptsock->event = (struct event *)grab_object( sock->event );
  1494. acceptsock->flags = sock->flags;
  1495. if (!(acceptsock->fd = create_anonymous_fd( &sock_fd_ops, acceptfd, &acceptsock->obj,
  1496. get_fd_options( sock->fd ) )))
  1497. {
  1498. release_object( acceptsock );
  1499. return NULL;
  1500. }
  1501. unix_len = sizeof(unix_addr);
  1502. if (!getsockname( acceptfd, &unix_addr.addr, &unix_len ))
  1503. acceptsock->addr_len = sockaddr_from_unix( &unix_addr, &acceptsock->addr.addr, sizeof(acceptsock->addr) );
  1504. }
  1505. clear_error();
  1506. sock->pending_events &= ~AFD_POLL_ACCEPT;
  1507. sock->reported_events &= ~AFD_POLL_ACCEPT;
  1508. sock_reselect( sock );
  1509. return acceptsock;
  1510. }
  1511. static int accept_into_socket( struct sock *sock, struct sock *acceptsock )
  1512. {
  1513. union unix_sockaddr unix_addr;
  1514. socklen_t unix_len;
  1515. int acceptfd;
  1516. struct fd *newfd;
  1517. if (get_unix_fd( sock->fd ) == -1) return FALSE;
  1518. if ( sock->deferred )
  1519. {
  1520. newfd = dup_fd_object( sock->deferred->fd, 0, 0,
  1521. get_fd_options( acceptsock->fd ) );
  1522. if ( !newfd )
  1523. return FALSE;
  1524. set_fd_user( newfd, &sock_fd_ops, &acceptsock->obj );
  1525. release_object( sock->deferred );
  1526. sock->deferred = NULL;
  1527. }
  1528. else
  1529. {
  1530. if ((acceptfd = accept_new_fd( sock )) == -1)
  1531. return FALSE;
  1532. if (!(newfd = create_anonymous_fd( &sock_fd_ops, acceptfd, &acceptsock->obj,
  1533. get_fd_options( acceptsock->fd ) )))
  1534. return FALSE;
  1535. }
  1536. acceptsock->state = SOCK_CONNECTED;
  1537. acceptsock->pending_events = 0;
  1538. acceptsock->reported_events = 0;
  1539. acceptsock->proto = sock->proto;
  1540. acceptsock->type = sock->type;
  1541. acceptsock->family = sock->family;
  1542. acceptsock->wparam = 0;
  1543. acceptsock->deferred = NULL;
  1544. acceptsock->connect_time = current_time;
  1545. fd_copy_completion( acceptsock->fd, newfd );
  1546. release_object( acceptsock->fd );
  1547. acceptsock->fd = newfd;
  1548. unix_len = sizeof(unix_addr);
  1549. if (!getsockname( get_unix_fd( newfd ), &unix_addr.addr, &unix_len ))
  1550. acceptsock->addr_len = sockaddr_from_unix( &unix_addr, &acceptsock->addr.addr, sizeof(acceptsock->addr) );
  1551. clear_error();
  1552. sock->pending_events &= ~AFD_POLL_ACCEPT;
  1553. sock->reported_events &= ~AFD_POLL_ACCEPT;
  1554. sock_reselect( sock );
  1555. return TRUE;
  1556. }
  1557. #ifdef IP_BOUND_IF
  1558. static int bind_to_iface_name( int fd, in_addr_t bind_addr, const char *name )
  1559. {
  1560. static const int enable = 1;
  1561. unsigned int index;
  1562. if (!(index = if_nametoindex( name )))
  1563. return -1;
  1564. if (setsockopt( fd, IPPROTO_IP, IP_BOUND_IF, &index, sizeof(index) ))
  1565. return -1;
  1566. return setsockopt( fd, SOL_SOCKET, SO_REUSEADDR, &enable, sizeof(enable) );
  1567. }
  1568. #elif defined(IP_UNICAST_IF) && defined(SO_ATTACH_FILTER) && defined(SO_BINDTODEVICE)
  1569. struct interface_filter
  1570. {
  1571. struct sock_filter iface_memaddr;
  1572. struct sock_filter iface_rule;
  1573. struct sock_filter ip_memaddr;
  1574. struct sock_filter ip_rule;
  1575. struct sock_filter return_keep;
  1576. struct sock_filter return_dump;
  1577. };
  1578. # define FILTER_JUMP_DUMP(here) (u_char)(offsetof(struct interface_filter, return_dump) \
  1579. -offsetof(struct interface_filter, here)-sizeof(struct sock_filter)) \
  1580. /sizeof(struct sock_filter)
  1581. # define FILTER_JUMP_KEEP(here) (u_char)(offsetof(struct interface_filter, return_keep) \
  1582. -offsetof(struct interface_filter, here)-sizeof(struct sock_filter)) \
  1583. /sizeof(struct sock_filter)
  1584. # define FILTER_JUMP_NEXT() (u_char)(0)
  1585. # define SKF_NET_DESTIP 16 /* offset in the network header to the destination IP */
  1586. static struct interface_filter generic_interface_filter =
  1587. {
  1588. /* This filter rule allows incoming packets on the specified interface, which works for all
  1589. * remotely generated packets and for locally generated broadcast packets. */
  1590. BPF_STMT(BPF_LD+BPF_W+BPF_ABS, SKF_AD_OFF+SKF_AD_IFINDEX),
  1591. BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, 0xdeadbeef, FILTER_JUMP_KEEP(iface_rule), FILTER_JUMP_NEXT()),
  1592. /* This rule allows locally generated packets targeted at the specific IP address of the chosen
  1593. * adapter (local packets not destined for the broadcast address do not have IFINDEX set) */
  1594. BPF_STMT(BPF_LD+BPF_W+BPF_ABS, SKF_NET_OFF+SKF_NET_DESTIP),
  1595. BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, 0xdeadbeef, FILTER_JUMP_KEEP(ip_rule), FILTER_JUMP_DUMP(ip_rule)),
  1596. BPF_STMT(BPF_RET+BPF_K, (u_int)-1), /* keep packet */
  1597. BPF_STMT(BPF_RET+BPF_K, 0) /* dump packet */
  1598. };
  1599. static int bind_to_iface_name( int fd, in_addr_t bind_addr, const char *name )
  1600. {
  1601. struct interface_filter specific_interface_filter;
  1602. struct sock_fprog filter_prog;
  1603. static const int enable = 1;
  1604. unsigned int index;
  1605. in_addr_t ifindex;
  1606. if (!setsockopt( fd, SOL_SOCKET, SO_BINDTODEVICE, name, strlen( name ) + 1 ))
  1607. return 0;
  1608. /* SO_BINDTODEVICE requires NET_CAP_RAW until Linux 5.7. */
  1609. if (debug_level)
  1610. fprintf( stderr, "setsockopt SO_BINDTODEVICE fd %d, name %s failed: %s, falling back to SO_REUSE_ADDR\n",
  1611. fd, name, strerror( errno ));
  1612. if (!(index = if_nametoindex( name )))
  1613. return -1;
  1614. ifindex = htonl( index );
  1615. if (setsockopt( fd, IPPROTO_IP, IP_UNICAST_IF, &ifindex, sizeof(ifindex) ) < 0)
  1616. return -1;
  1617. specific_interface_filter = generic_interface_filter;
  1618. specific_interface_filter.iface_rule.k = index;
  1619. specific_interface_filter.ip_rule.k = htonl( bind_addr );
  1620. filter_prog.len = sizeof(generic_interface_filter) / sizeof(struct sock_filter);
  1621. filter_prog.filter = (struct sock_filter *)&specific_interface_filter;
  1622. if (setsockopt( fd, SOL_SOCKET, SO_ATTACH_FILTER, &filter_prog, sizeof(filter_prog) ))
  1623. return -1;
  1624. return setsockopt( fd, SOL_SOCKET, SO_REUSEADDR, &enable, sizeof(enable) );
  1625. }
  1626. #else
  1627. static int bind_to_iface_name( int fd, in_addr_t bind_addr, const char *name )
  1628. {
  1629. errno = EOPNOTSUPP;
  1630. return -1;
  1631. }
  1632. #endif /* LINUX_BOUND_IF */
  1633. /* Take bind() calls on any name corresponding to a local network adapter and
  1634. * restrict the given socket to operating only on the specified interface. This
  1635. * restriction consists of two components:
  1636. * 1) An outgoing packet restriction suggesting the egress interface for all
  1637. * packets.
  1638. * 2) An incoming packet restriction dropping packets not meant for the
  1639. * interface.
  1640. * If the function succeeds in placing these restrictions, then the name for the
  1641. * bind() may safely be changed to INADDR_ANY, permitting the transmission and
  1642. * receipt of broadcast packets on the socket. This behavior is only relevant to
  1643. * UDP sockets and is needed for applications that expect to be able to receive
  1644. * broadcast packets on a socket that is bound to a specific network interface.
  1645. */
  1646. static int bind_to_interface( struct sock *sock, const struct sockaddr_in *addr )
  1647. {
  1648. in_addr_t bind_addr = addr->sin_addr.s_addr;
  1649. struct ifaddrs *ifaddrs, *ifaddr;
  1650. int fd = get_unix_fd( sock->fd );
  1651. int err = 0;
  1652. if (bind_addr == htonl( INADDR_ANY ) || bind_addr == htonl( INADDR_LOOPBACK ))
  1653. return 0;
  1654. if (sock->type != WS_SOCK_DGRAM)
  1655. return 0;
  1656. if (getifaddrs( &ifaddrs ) < 0) return 0;
  1657. for (ifaddr = ifaddrs; ifaddr != NULL; ifaddr = ifaddr->ifa_next)
  1658. {
  1659. if (ifaddr->ifa_addr && ifaddr->ifa_addr->sa_family == AF_INET
  1660. && ((struct sockaddr_in *)ifaddr->ifa_addr)->sin_addr.s_addr == bind_addr)
  1661. {
  1662. if ((err = bind_to_iface_name( fd, bind_addr, ifaddr->ifa_name )) < 0)
  1663. {
  1664. if (debug_level)
  1665. fprintf( stderr, "failed to bind to interface: %s\n", strerror( errno ) );
  1666. }
  1667. break;
  1668. }
  1669. }
  1670. freeifaddrs( ifaddrs );
  1671. return !err;
  1672. }
  1673. #ifdef HAVE_STRUCT_SOCKADDR_IN6_SIN6_SCOPE_ID
  1674. static unsigned int get_ipv6_interface_index( const struct in6_addr *addr )
  1675. {
  1676. struct ifaddrs *ifaddrs, *ifaddr;
  1677. if (getifaddrs( &ifaddrs ) < 0) return 0;
  1678. for (ifaddr = ifaddrs; ifaddr != NULL; ifaddr = ifaddr->ifa_next)
  1679. {
  1680. if (ifaddr->ifa_addr && ifaddr->ifa_addr->sa_family == AF_INET6
  1681. && !memcmp( &((struct sockaddr_in6 *)ifaddr->ifa_addr)->sin6_addr, addr, sizeof(*addr) ))
  1682. {
  1683. unsigned int index = if_nametoindex( ifaddr->ifa_name );
  1684. if (!index)
  1685. {
  1686. if (debug_level)
  1687. fprintf( stderr, "Unable to look up interface index for %s: %s\n",
  1688. ifaddr->ifa_name, strerror( errno ) );
  1689. continue;
  1690. }
  1691. freeifaddrs( ifaddrs );
  1692. return index;
  1693. }
  1694. }
  1695. freeifaddrs( ifaddrs );
  1696. return 0;
  1697. }
  1698. #endif
  1699. /* return an errno value mapped to a WSA error */
  1700. static unsigned int sock_get_error( int err )
  1701. {
  1702. switch (err)
  1703. {
  1704. case EINTR: return WSAEINTR;
  1705. case EBADF: return WSAEBADF;
  1706. case EPERM:
  1707. case EACCES: return WSAEACCES;
  1708. case EFAULT: return WSAEFAULT;
  1709. case EINVAL: return WSAEINVAL;
  1710. case EMFILE: return WSAEMFILE;
  1711. case EINPROGRESS:
  1712. case EWOULDBLOCK: return WSAEWOULDBLOCK;
  1713. case EALREADY: return WSAEALREADY;
  1714. case ENOTSOCK: return WSAENOTSOCK;
  1715. case EDESTADDRREQ: return WSAEDESTADDRREQ;
  1716. case EMSGSIZE: return WSAEMSGSIZE;
  1717. case EPROTOTYPE: return WSAEPROTOTYPE;
  1718. case ENOPROTOOPT: return WSAENOPROTOOPT;
  1719. case EPROTONOSUPPORT: return WSAEPROTONOSUPPORT;
  1720. case ESOCKTNOSUPPORT: return WSAESOCKTNOSUPPORT;
  1721. case EOPNOTSUPP: return WSAEOPNOTSUPP;
  1722. case EPFNOSUPPORT: return WSAEPFNOSUPPORT;
  1723. case EAFNOSUPPORT: return WSAEAFNOSUPPORT;
  1724. case EADDRINUSE: return WSAEADDRINUSE;
  1725. case EADDRNOTAVAIL: return WSAEADDRNOTAVAIL;
  1726. case ENETDOWN: return WSAENETDOWN;
  1727. case ENETUNREACH: return WSAENETUNREACH;
  1728. case ENETRESET: return WSAENETRESET;
  1729. case ECONNABORTED: return WSAECONNABORTED;
  1730. case EPIPE:
  1731. case ECONNRESET: return WSAECONNRESET;
  1732. case ENOBUFS: return WSAENOBUFS;
  1733. case EISCONN: return WSAEISCONN;
  1734. case ENOTCONN: return WSAENOTCONN;
  1735. case ESHUTDOWN: return WSAESHUTDOWN;
  1736. case ETOOMANYREFS: return WSAETOOMANYREFS;
  1737. case ETIMEDOUT: return WSAETIMEDOUT;
  1738. case ECONNREFUSED: return WSAECONNREFUSED;
  1739. case ELOOP: return WSAELOOP;
  1740. case ENAMETOOLONG: return WSAENAMETOOLONG;
  1741. case EHOSTDOWN: return WSAEHOSTDOWN;
  1742. case EHOSTUNREACH: return WSAEHOSTUNREACH;
  1743. case ENOTEMPTY: return WSAENOTEMPTY;
  1744. #ifdef EPROCLIM
  1745. case EPROCLIM: return WSAEPROCLIM;
  1746. #endif
  1747. #ifdef EUSERS
  1748. case EUSERS: return WSAEUSERS;
  1749. #endif
  1750. #ifdef EDQUOT
  1751. case EDQUOT: return WSAEDQUOT;
  1752. #endif
  1753. #ifdef ESTALE
  1754. case ESTALE: return WSAESTALE;
  1755. #endif
  1756. #ifdef EREMOTE
  1757. case EREMOTE: return WSAEREMOTE;
  1758. #endif
  1759. case 0: return 0;
  1760. default:
  1761. errno = err;
  1762. perror("wineserver: sock_get_error() can't map error");
  1763. return WSAEFAULT;
  1764. }
  1765. }
  1766. static int sock_get_ntstatus( int err )
  1767. {
  1768. switch ( err )
  1769. {
  1770. case EBADF: return STATUS_INVALID_HANDLE;
  1771. case EBUSY: return STATUS_DEVICE_BUSY;
  1772. case EPERM:
  1773. case EACCES: return STATUS_ACCESS_DENIED;
  1774. case EFAULT: return STATUS_ACCESS_VIOLATION;
  1775. case EINVAL: return STATUS_INVALID_PARAMETER;
  1776. case ENFILE:
  1777. case EMFILE: return STATUS_TOO_MANY_OPENED_FILES;
  1778. case EINPROGRESS:
  1779. case EWOULDBLOCK: return STATUS_DEVICE_NOT_READY;
  1780. case EALREADY: return STATUS_NETWORK_BUSY;
  1781. case ENOTSOCK: return STATUS_OBJECT_TYPE_MISMATCH;
  1782. case EDESTADDRREQ: return STATUS_INVALID_PARAMETER;
  1783. case EMSGSIZE: return STATUS_BUFFER_OVERFLOW;
  1784. case EPROTONOSUPPORT:
  1785. case ESOCKTNOSUPPORT:
  1786. case EPFNOSUPPORT:
  1787. case EAFNOSUPPORT:
  1788. case EPROTOTYPE: return STATUS_NOT_SUPPORTED;
  1789. case ENOPROTOOPT: return STATUS_INVALID_PARAMETER;
  1790. case EOPNOTSUPP: return STATUS_NOT_SUPPORTED;
  1791. case EADDRINUSE: return STATUS_SHARING_VIOLATION;
  1792. /* Linux returns ENODEV when specifying an invalid sin6_scope_id;
  1793. * Windows returns STATUS_INVALID_ADDRESS_COMPONENT */
  1794. case ENODEV:
  1795. case EADDRNOTAVAIL: return STATUS_INVALID_ADDRESS_COMPONENT;
  1796. case ECONNREFUSED: return STATUS_CONNECTION_REFUSED;
  1797. case ESHUTDOWN: return STATUS_PIPE_DISCONNECTED;
  1798. case ENOTCONN: return STATUS_INVALID_CONNECTION;
  1799. case ETIMEDOUT: return STATUS_IO_TIMEOUT;
  1800. case ENETUNREACH: return STATUS_NETWORK_UNREACHABLE;
  1801. case EHOSTUNREACH: return STATUS_HOST_UNREACHABLE;
  1802. case ENETDOWN: return STATUS_NETWORK_BUSY;
  1803. case EPIPE:
  1804. case ECONNRESET: return STATUS_CONNECTION_RESET;
  1805. case ECONNABORTED: return STATUS_CONNECTION_ABORTED;
  1806. case EISCONN: return STATUS_CONNECTION_ACTIVE;
  1807. case 0: return STATUS_SUCCESS;
  1808. default:
  1809. errno = err;
  1810. perror("wineserver: sock_get_ntstatus() can't map error");
  1811. return STATUS_UNSUCCESSFUL;
  1812. }
  1813. }
  1814. static struct accept_req *alloc_accept_req( struct sock *sock, struct sock *acceptsock, struct async *async,
  1815. const struct afd_accept_into_params *params )
  1816. {
  1817. struct accept_req *req = mem_alloc( sizeof(*req) );
  1818. if (req)
  1819. {
  1820. req->async = (struct async *)grab_object( async );
  1821. req->iosb = async_get_iosb( async );
  1822. req->sock = (struct sock *)grab_object( sock );
  1823. req->acceptsock = acceptsock;
  1824. if (acceptsock) grab_object( acceptsock );
  1825. req->accepted = 0;
  1826. req->recv_len = 0;
  1827. req->local_len = 0;
  1828. if (params)
  1829. {
  1830. req->recv_len = params->recv_len;
  1831. req->local_len = params->local_len;
  1832. }
  1833. }
  1834. return req;
  1835. }
  1836. static void sock_ioctl( struct fd *fd, ioctl_code_t code, struct async *async )
  1837. {
  1838. struct sock *sock = get_fd_user( fd );
  1839. int unix_fd;
  1840. assert( sock->obj.ops == &sock_ops );
  1841. if (code != IOCTL_AFD_WINE_CREATE && (unix_fd = get_unix_fd( fd )) < 0) return;
  1842. switch(code)
  1843. {
  1844. case IOCTL_AFD_WINE_CREATE:
  1845. {
  1846. const struct afd_create_params *params = get_req_data();
  1847. if (get_req_data_size() != sizeof(*params))
  1848. {
  1849. set_error( STATUS_INVALID_PARAMETER );
  1850. return;
  1851. }
  1852. init_socket( sock, params->family, params->type, params->protocol, params->flags );
  1853. return;
  1854. }
  1855. case IOCTL_AFD_WINE_ACCEPT:
  1856. {
  1857. struct sock *acceptsock;
  1858. obj_handle_t handle;
  1859. if (get_reply_max_size() != sizeof(handle))
  1860. {
  1861. set_error( STATUS_BUFFER_TOO_SMALL );
  1862. return;
  1863. }
  1864. if (!(acceptsock = accept_socket( sock )))
  1865. {
  1866. struct accept_req *req;
  1867. if (sock->nonblocking) return;
  1868. if (get_error() != STATUS_DEVICE_NOT_READY) return;
  1869. if (!(req = alloc_accept_req( sock, NULL, async, NULL ))) return;
  1870. list_add_tail( &sock->accept_list, &req->entry );
  1871. async_set_completion_callback( async, free_accept_req, req );
  1872. queue_async( &sock->accept_q, async );
  1873. sock_reselect( sock );
  1874. set_error( STATUS_PENDING );
  1875. return;
  1876. }
  1877. handle = alloc_handle( current->process, &acceptsock->obj,
  1878. GENERIC_READ | GENERIC_WRITE | SYNCHRONIZE, OBJ_INHERIT );
  1879. acceptsock->wparam = handle;
  1880. sock_reselect( acceptsock );
  1881. release_object( acceptsock );
  1882. set_reply_data( &handle, sizeof(handle) );
  1883. return;
  1884. }
  1885. case IOCTL_AFD_WINE_ACCEPT_INTO:
  1886. {
  1887. static const int access = FILE_READ_ATTRIBUTES | FILE_WRITE_ATTRIBUTES | FILE_READ_DATA;
  1888. const struct afd_accept_into_params *params = get_req_data();
  1889. struct sock *acceptsock;
  1890. unsigned int remote_len;
  1891. struct accept_req *req;
  1892. if (get_req_data_size() != sizeof(*params) ||
  1893. get_reply_max_size() < params->recv_len ||
  1894. get_reply_max_size() - params->recv_len < params->local_len)
  1895. {
  1896. set_error( STATUS_BUFFER_TOO_SMALL );
  1897. return;
  1898. }
  1899. remote_len = get_reply_max_size() - params->recv_len - params->local_len;
  1900. if (remote_len < sizeof(int))
  1901. {
  1902. set_error( STATUS_INVALID_PARAMETER );
  1903. return;
  1904. }
  1905. if (!(acceptsock = (struct sock *)get_handle_obj( current->process, params->accept_handle, access, &sock_ops )))
  1906. return;
  1907. if (acceptsock->accept_recv_req)
  1908. {
  1909. release_object( acceptsock );
  1910. set_error( STATUS_INVALID_PARAMETER );
  1911. return;
  1912. }
  1913. if (!(req = alloc_accept_req( sock, acceptsock, async, params )))
  1914. {
  1915. release_object( acceptsock );
  1916. return;
  1917. }
  1918. list_add_tail( &sock->accept_list, &req->entry );
  1919. acceptsock->accept_recv_req = req;
  1920. release_object( acceptsock );
  1921. acceptsock->wparam = params->accept_handle;
  1922. async_set_completion_callback( async, free_accept_req, req );
  1923. queue_async( &sock->accept_q, async );
  1924. sock_reselect( sock );
  1925. set_error( STATUS_PENDING );
  1926. return;
  1927. }
  1928. case IOCTL_AFD_LISTEN:
  1929. {
  1930. const struct afd_listen_params *params = get_req_data();
  1931. if (get_req_data_size() < sizeof(*params))
  1932. {
  1933. set_error( STATUS_INVALID_PARAMETER );
  1934. return;
  1935. }
  1936. if (!sock->bound)
  1937. {
  1938. set_error( STATUS_INVALID_PARAMETER );
  1939. return;
  1940. }
  1941. if (listen( unix_fd, params->backlog ) < 0)
  1942. {
  1943. set_error( sock_get_ntstatus( errno ) );
  1944. return;
  1945. }
  1946. sock->state = SOCK_LISTENING;
  1947. /* a listening socket can no longer be accepted into */
  1948. allow_fd_caching( sock->fd );
  1949. /* we may already be selecting for AFD_POLL_ACCEPT */
  1950. sock_reselect( sock );
  1951. return;
  1952. }
  1953. case IOCTL_AFD_WINE_CONNECT:
  1954. {
  1955. const struct afd_connect_params *params = get_req_data();
  1956. const struct WS_sockaddr *addr;
  1957. union unix_sockaddr unix_addr;
  1958. struct connect_req *req;
  1959. socklen_t unix_len;
  1960. int send_len, ret;
  1961. if (get_req_data_size() < sizeof(*params) ||
  1962. get_req_data_size() - sizeof(*params) < params->addr_len)
  1963. {
  1964. set_error( STATUS_BUFFER_TOO_SMALL );
  1965. return;
  1966. }
  1967. send_len = get_req_data_size() - sizeof(*params) - params->addr_len;
  1968. addr = (const struct WS_sockaddr *)(params + 1);
  1969. if (!params->synchronous && !sock->bound)
  1970. {
  1971. set_error( STATUS_INVALID_PARAMETER );
  1972. return;
  1973. }
  1974. if (sock->accept_recv_req)
  1975. {
  1976. set_error( STATUS_INVALID_PARAMETER );
  1977. return;
  1978. }
  1979. if (sock->connect_req)
  1980. {
  1981. set_error( STATUS_INVALID_PARAMETER );
  1982. return;
  1983. }
  1984. switch (sock->state)
  1985. {
  1986. case SOCK_LISTENING:
  1987. set_error( STATUS_INVALID_PARAMETER );
  1988. return;
  1989. case SOCK_CONNECTING:
  1990. /* FIXME: STATUS_ADDRESS_ALREADY_ASSOCIATED probably isn't right,
  1991. * but there's no status code that maps to WSAEALREADY... */
  1992. set_error( params->synchronous ? STATUS_ADDRESS_ALREADY_ASSOCIATED : STATUS_INVALID_PARAMETER );
  1993. return;
  1994. case SOCK_CONNECTED:
  1995. set_error( STATUS_CONNECTION_ACTIVE );
  1996. return;
  1997. case SOCK_UNCONNECTED:
  1998. case SOCK_CONNECTIONLESS:
  1999. break;
  2000. }
  2001. unix_len = sockaddr_to_unix( addr, params->addr_len, &unix_addr );
  2002. if (!unix_len)
  2003. {
  2004. set_error( STATUS_INVALID_ADDRESS );
  2005. return;
  2006. }
  2007. if (unix_addr.addr.sa_family == AF_INET && !memcmp( &unix_addr.in.sin_addr, magic_loopback_addr, 4 ))
  2008. unix_addr.in.sin_addr.s_addr = htonl( INADDR_LOOPBACK );
  2009. ret = connect( unix_fd, &unix_addr.addr, unix_len );
  2010. if (ret < 0 && errno != EINPROGRESS)
  2011. {
  2012. set_error( sock_get_ntstatus( errno ) );
  2013. return;
  2014. }
  2015. /* a connected or connecting socket can no longer be accepted into */
  2016. allow_fd_caching( sock->fd );
  2017. unix_len = sizeof(unix_addr);
  2018. if (!sock->bound && !getsockname( unix_fd, &unix_addr.addr, &unix_len ))
  2019. sock->addr_len = sockaddr_from_unix( &unix_addr, &sock->addr.addr, sizeof(sock->addr) );
  2020. sock->bound = 1;
  2021. if (!ret)
  2022. {
  2023. sock->state = SOCK_CONNECTED;
  2024. if (!send_len) return;
  2025. }
  2026. sock->state = SOCK_CONNECTING;
  2027. if (params->synchronous && sock->nonblocking)
  2028. {
  2029. sock_reselect( sock );
  2030. set_error( STATUS_DEVICE_NOT_READY );
  2031. return;
  2032. }
  2033. if (!(req = mem_alloc( sizeof(*req) )))
  2034. return;
  2035. req->async = (struct async *)grab_object( async );
  2036. req->iosb = async_get_iosb( async );
  2037. req->sock = (struct sock *)grab_object( sock );
  2038. req->addr_len = params->addr_len;
  2039. req->send_len = send_len;
  2040. req->send_cursor = 0;
  2041. async_set_completion_callback( async, free_connect_req, req );
  2042. sock->connect_req = req;
  2043. queue_async( &sock->connect_q, async );
  2044. sock_reselect( sock );
  2045. set_error( STATUS_PENDING );
  2046. return;
  2047. }
  2048. case IOCTL_AFD_WINE_SHUTDOWN:
  2049. {
  2050. unsigned int how;
  2051. if (get_req_data_size() < sizeof(int))
  2052. {
  2053. set_error( STATUS_BUFFER_TOO_SMALL );
  2054. return;
  2055. }
  2056. how = *(int *)get_req_data();
  2057. if (how > SD_BOTH)
  2058. {
  2059. set_error( STATUS_INVALID_PARAMETER );
  2060. return;
  2061. }
  2062. if (sock->state != SOCK_CONNECTED && sock->state != SOCK_CONNECTIONLESS)
  2063. {
  2064. set_error( STATUS_INVALID_CONNECTION );
  2065. return;
  2066. }
  2067. if (how != SD_SEND)
  2068. {
  2069. sock->rd_shutdown = 1;
  2070. }
  2071. if (how != SD_RECEIVE)
  2072. {
  2073. sock->wr_shutdown = 1;
  2074. if (list_empty( &sock->write_q.queue ))
  2075. shutdown( unix_fd, SHUT_WR );
  2076. else
  2077. sock->wr_shutdown_pending = 1;
  2078. }
  2079. if (how == SD_BOTH)
  2080. {
  2081. if (sock->event) release_object( sock->event );
  2082. sock->event = NULL;
  2083. sock->window = 0;
  2084. sock->mask = 0;
  2085. sock->nonblocking = 1;
  2086. }
  2087. sock_reselect( sock );
  2088. return;
  2089. }
  2090. case IOCTL_AFD_WINE_ADDRESS_LIST_CHANGE:
  2091. {
  2092. int force_async;
  2093. if (get_req_data_size() < sizeof(int))
  2094. {
  2095. set_error( STATUS_BUFFER_TOO_SMALL );
  2096. return;
  2097. }
  2098. force_async = *(int *)get_req_data();
  2099. if (sock->nonblocking && !force_async)
  2100. {
  2101. set_error( STATUS_DEVICE_NOT_READY );
  2102. return;
  2103. }
  2104. if (!sock_get_ifchange( sock )) return;
  2105. queue_async( &sock->ifchange_q, async );
  2106. set_error( STATUS_PENDING );
  2107. return;
  2108. }
  2109. case IOCTL_AFD_WINE_FIONBIO:
  2110. if (get_req_data_size() < sizeof(int))
  2111. {
  2112. set_error( STATUS_BUFFER_TOO_SMALL );
  2113. return;
  2114. }
  2115. if (*(int *)get_req_data())
  2116. {
  2117. sock->nonblocking = 1;
  2118. }
  2119. else
  2120. {
  2121. if (sock->mask)
  2122. {
  2123. set_error( STATUS_INVALID_PARAMETER );
  2124. return;
  2125. }
  2126. sock->nonblocking = 0;
  2127. }
  2128. return;
  2129. case IOCTL_AFD_GET_EVENTS:
  2130. {
  2131. struct afd_get_events_params params = {0};
  2132. unsigned int i;
  2133. if (get_reply_max_size() < sizeof(params))
  2134. {
  2135. set_error( STATUS_INVALID_PARAMETER );
  2136. return;
  2137. }
  2138. params.flags = sock->pending_events & sock->mask;
  2139. for (i = 0; i < ARRAY_SIZE( params.status ); ++i)
  2140. params.status[i] = sock_get_ntstatus( sock->errors[i] );
  2141. sock->pending_events = 0;
  2142. sock_reselect( sock );
  2143. set_reply_data( &params, sizeof(params) );
  2144. return;
  2145. }
  2146. case IOCTL_AFD_EVENT_SELECT:
  2147. {
  2148. struct event *event = NULL;
  2149. obj_handle_t event_handle;
  2150. int mask;
  2151. set_async_pending( async );
  2152. if (is_machine_64bit( current->process->machine ))
  2153. {
  2154. const struct afd_event_select_params_64 *params = get_req_data();
  2155. if (get_req_data_size() < sizeof(*params))
  2156. {
  2157. set_error( STATUS_INVALID_PARAMETER );
  2158. return;
  2159. }
  2160. event_handle = params->event;
  2161. mask = params->mask;
  2162. }
  2163. else
  2164. {
  2165. const struct afd_event_select_params_32 *params = get_req_data();
  2166. if (get_req_data_size() < sizeof(*params))
  2167. {
  2168. set_error( STATUS_INVALID_PARAMETER );
  2169. return;
  2170. }
  2171. event_handle = params->event;
  2172. mask = params->mask;
  2173. }
  2174. if ((event_handle || mask) &&
  2175. !(event = get_event_obj( current->process, event_handle, EVENT_MODIFY_STATE )))
  2176. {
  2177. set_error( STATUS_INVALID_PARAMETER );
  2178. return;
  2179. }
  2180. if (sock->event) release_object( sock->event );
  2181. sock->event = event;
  2182. sock->mask = mask;
  2183. sock->window = 0;
  2184. sock->message = 0;
  2185. sock->wparam = 0;
  2186. sock->nonblocking = 1;
  2187. sock_reselect( sock );
  2188. /* Explicitly wake the socket up if the mask causes it to become
  2189. * signaled. Note that reselecting isn't enough, since we might already
  2190. * have had events recorded in sock->reported_events and we don't want
  2191. * to select for them again. */
  2192. sock_wake_up( sock );
  2193. return;
  2194. }
  2195. case IOCTL_AFD_WINE_MESSAGE_SELECT:
  2196. {
  2197. const struct afd_message_select_params *params = get_req_data();
  2198. if (get_req_data_size() < sizeof(params))
  2199. {
  2200. set_error( STATUS_BUFFER_TOO_SMALL );
  2201. return;
  2202. }
  2203. if (sock->event) release_object( sock->event );
  2204. if (params->window)
  2205. {
  2206. sock->pending_events = 0;
  2207. sock->reported_events = 0;
  2208. }
  2209. sock->event = NULL;
  2210. sock->mask = params->mask;
  2211. sock->window = params->window;
  2212. sock->message = params->message;
  2213. sock->wparam = params->handle;
  2214. sock->nonblocking = 1;
  2215. sock_reselect( sock );
  2216. return;
  2217. }
  2218. case IOCTL_AFD_BIND:
  2219. {
  2220. const struct afd_bind_params *params = get_req_data();
  2221. union unix_sockaddr unix_addr, bind_addr;
  2222. data_size_t in_size;
  2223. socklen_t unix_len;
  2224. /* the ioctl is METHOD_NEITHER, so ntdll gives us the output buffer as
  2225. * input */
  2226. if (get_req_data_size() < get_reply_max_size())
  2227. {
  2228. set_error( STATUS_BUFFER_TOO_SMALL );
  2229. return;
  2230. }
  2231. in_size = get_req_data_size() - get_reply_max_size();
  2232. if (in_size < offsetof(struct afd_bind_params, addr.sa_data)
  2233. || get_reply_max_size() < in_size - sizeof(int))
  2234. {
  2235. set_error( STATUS_INVALID_PARAMETER );
  2236. return;
  2237. }
  2238. if (sock->bound)
  2239. {
  2240. set_error( STATUS_ADDRESS_ALREADY_ASSOCIATED );
  2241. return;
  2242. }
  2243. unix_len = sockaddr_to_unix( &params->addr, in_size - sizeof(int), &unix_addr );
  2244. if (!unix_len)
  2245. {
  2246. set_error( STATUS_INVALID_ADDRESS );
  2247. return;
  2248. }
  2249. bind_addr = unix_addr;
  2250. if (unix_addr.addr.sa_family == AF_INET)
  2251. {
  2252. if (!memcmp( &unix_addr.in.sin_addr, magic_loopback_addr, 4 )
  2253. || bind_to_interface( sock, &unix_addr.in ))
  2254. bind_addr.in.sin_addr.s_addr = htonl( INADDR_ANY );
  2255. }
  2256. else if (unix_addr.addr.sa_family == AF_INET6)
  2257. {
  2258. #ifdef HAVE_STRUCT_SOCKADDR_IN6_SIN6_SCOPE_ID
  2259. /* Windows allows specifying zero to use the default scope. Linux
  2260. * interprets it as an interface index and requires that it be
  2261. * nonzero. */
  2262. if (!unix_addr.in6.sin6_scope_id)
  2263. bind_addr.in6.sin6_scope_id = get_ipv6_interface_index( &unix_addr.in6.sin6_addr );
  2264. #endif
  2265. }
  2266. set_async_pending( async );
  2267. if (bind( unix_fd, &bind_addr.addr, unix_len ) < 0)
  2268. {
  2269. if (errno == EADDRINUSE)
  2270. {
  2271. int reuse;
  2272. socklen_t len = sizeof(reuse);
  2273. if (!getsockopt( unix_fd, SOL_SOCKET, SO_REUSEADDR, (char *)&reuse, &len ) && reuse)
  2274. errno = EACCES;
  2275. }
  2276. set_error( sock_get_ntstatus( errno ) );
  2277. return;
  2278. }
  2279. sock->bound = 1;
  2280. unix_len = sizeof(bind_addr);
  2281. if (!getsockname( unix_fd, &bind_addr.addr, &unix_len ))
  2282. {
  2283. /* store the interface or magic loopback address instead of the
  2284. * actual unix address */
  2285. if (bind_addr.addr.sa_family == AF_INET)
  2286. bind_addr.in.sin_addr = unix_addr.in.sin_addr;
  2287. sock->addr_len = sockaddr_from_unix( &bind_addr, &sock->addr.addr, sizeof(sock->addr) );
  2288. }
  2289. if (get_reply_max_size() >= sock->addr_len)
  2290. set_reply_data( &sock->addr, sock->addr_len );
  2291. return;
  2292. }
  2293. case IOCTL_AFD_GETSOCKNAME:
  2294. if (!sock->bound)
  2295. {
  2296. set_error( STATUS_INVALID_PARAMETER );
  2297. return;
  2298. }
  2299. if (get_reply_max_size() < sock->addr_len)
  2300. {
  2301. set_error( STATUS_BUFFER_TOO_SMALL );
  2302. return;
  2303. }
  2304. set_reply_data( &sock->addr, sock->addr_len );
  2305. return;
  2306. case IOCTL_AFD_WINE_DEFER:
  2307. {
  2308. const obj_handle_t *handle = get_req_data();
  2309. struct sock *acceptsock;
  2310. if (get_req_data_size() < sizeof(*handle))
  2311. {
  2312. set_error( STATUS_BUFFER_TOO_SMALL );
  2313. return;
  2314. }
  2315. acceptsock = (struct sock *)get_handle_obj( current->process, *handle, 0, &sock_ops );
  2316. if (!acceptsock) return;
  2317. sock->deferred = acceptsock;
  2318. return;
  2319. }
  2320. case IOCTL_AFD_WINE_GET_INFO:
  2321. {
  2322. struct afd_get_info_params params;
  2323. if (get_reply_max_size() < sizeof(params))
  2324. {
  2325. set_error( STATUS_BUFFER_TOO_SMALL );
  2326. return;
  2327. }
  2328. params.family = sock->family;
  2329. params.type = sock->type;
  2330. params.protocol = sock->proto;
  2331. set_reply_data( &params, sizeof(params) );
  2332. return;
  2333. }
  2334. case IOCTL_AFD_WINE_GET_SO_ACCEPTCONN:
  2335. {
  2336. int listening = (sock->state == SOCK_LISTENING);
  2337. if (get_reply_max_size() < sizeof(listening))
  2338. {
  2339. set_error( STATUS_BUFFER_TOO_SMALL );
  2340. return;
  2341. }
  2342. set_reply_data( &listening, sizeof(listening) );
  2343. return;
  2344. }
  2345. case IOCTL_AFD_WINE_GET_SO_ERROR:
  2346. {
  2347. int error;
  2348. socklen_t len = sizeof(error);
  2349. unsigned int i;
  2350. if (get_reply_max_size() < sizeof(error))
  2351. {
  2352. set_error( STATUS_BUFFER_TOO_SMALL );
  2353. return;
  2354. }
  2355. if (getsockopt( unix_fd, SOL_SOCKET, SO_ERROR, (char *)&error, &len ) < 0)
  2356. {
  2357. set_error( sock_get_ntstatus( errno ) );
  2358. return;
  2359. }
  2360. if (!error)
  2361. {
  2362. for (i = 0; i < ARRAY_SIZE( sock->errors ); ++i)
  2363. {
  2364. if (sock->errors[i])
  2365. {
  2366. error = sock_get_error( sock->errors[i] );
  2367. break;
  2368. }
  2369. }
  2370. }
  2371. set_reply_data( &error, sizeof(error) );
  2372. return;
  2373. }
  2374. case IOCTL_AFD_WINE_GET_SO_RCVBUF:
  2375. {
  2376. int rcvbuf = sock->rcvbuf;
  2377. if (get_reply_max_size() < sizeof(rcvbuf))
  2378. {
  2379. set_error( STATUS_BUFFER_TOO_SMALL );
  2380. return;
  2381. }
  2382. set_reply_data( &rcvbuf, sizeof(rcvbuf) );
  2383. return;
  2384. }
  2385. case IOCTL_AFD_WINE_SET_SO_RCVBUF:
  2386. {
  2387. DWORD rcvbuf;
  2388. if (get_req_data_size() < sizeof(rcvbuf))
  2389. {
  2390. set_error( STATUS_BUFFER_TOO_SMALL );
  2391. return;
  2392. }
  2393. rcvbuf = *(DWORD *)get_req_data();
  2394. if (!setsockopt( unix_fd, SOL_SOCKET, SO_RCVBUF, (char *)&rcvbuf, sizeof(rcvbuf) ))
  2395. sock->rcvbuf = rcvbuf;
  2396. else
  2397. set_error( sock_get_ntstatus( errno ) );
  2398. return;
  2399. }
  2400. case IOCTL_AFD_WINE_GET_SO_RCVTIMEO:
  2401. {
  2402. DWORD rcvtimeo = sock->rcvtimeo;
  2403. if (get_reply_max_size() < sizeof(rcvtimeo))
  2404. {
  2405. set_error( STATUS_BUFFER_TOO_SMALL );
  2406. return;
  2407. }
  2408. set_reply_data( &rcvtimeo, sizeof(rcvtimeo) );
  2409. return;
  2410. }
  2411. case IOCTL_AFD_WINE_SET_SO_RCVTIMEO:
  2412. {
  2413. DWORD rcvtimeo;
  2414. if (get_req_data_size() < sizeof(rcvtimeo))
  2415. {
  2416. set_error( STATUS_BUFFER_TOO_SMALL );
  2417. return;
  2418. }
  2419. rcvtimeo = *(DWORD *)get_req_data();
  2420. sock->rcvtimeo = rcvtimeo;
  2421. return;
  2422. }
  2423. case IOCTL_AFD_WINE_GET_SO_SNDBUF:
  2424. {
  2425. int sndbuf = sock->sndbuf;
  2426. if (get_reply_max_size() < sizeof(sndbuf))
  2427. {
  2428. set_error( STATUS_BUFFER_TOO_SMALL );
  2429. return;
  2430. }
  2431. set_reply_data( &sndbuf, sizeof(sndbuf) );
  2432. return;
  2433. }
  2434. case IOCTL_AFD_WINE_SET_SO_SNDBUF:
  2435. {
  2436. DWORD sndbuf;
  2437. if (get_req_data_size() < sizeof(sndbuf))
  2438. {
  2439. set_error( STATUS_BUFFER_TOO_SMALL );
  2440. return;
  2441. }
  2442. sndbuf = *(DWORD *)get_req_data();
  2443. #ifdef __APPLE__
  2444. if (!sndbuf)
  2445. {
  2446. /* setsockopt fails if a zero value is passed */
  2447. sock->sndbuf = sndbuf;
  2448. return;
  2449. }
  2450. #endif
  2451. if (!setsockopt( unix_fd, SOL_SOCKET, SO_SNDBUF, (char *)&sndbuf, sizeof(sndbuf) ))
  2452. sock->sndbuf = sndbuf;
  2453. else
  2454. set_error( sock_get_ntstatus( errno ) );
  2455. return;
  2456. }
  2457. case IOCTL_AFD_WINE_GET_SO_SNDTIMEO:
  2458. {
  2459. DWORD sndtimeo = sock->sndtimeo;
  2460. if (get_reply_max_size() < sizeof(sndtimeo))
  2461. {
  2462. set_error( STATUS_BUFFER_TOO_SMALL );
  2463. return;
  2464. }
  2465. set_reply_data( &sndtimeo, sizeof(sndtimeo) );
  2466. return;
  2467. }
  2468. case IOCTL_AFD_WINE_SET_SO_SNDTIMEO:
  2469. {
  2470. DWORD sndtimeo;
  2471. if (get_req_data_size() < sizeof(sndtimeo))
  2472. {
  2473. set_error( STATUS_BUFFER_TOO_SMALL );
  2474. return;
  2475. }
  2476. sndtimeo = *(DWORD *)get_req_data();
  2477. sock->sndtimeo = sndtimeo;
  2478. return;
  2479. }
  2480. case IOCTL_AFD_WINE_GET_SO_CONNECT_TIME:
  2481. {
  2482. DWORD time = ~0u;
  2483. if (get_reply_max_size() < sizeof(time))
  2484. {
  2485. set_error( STATUS_BUFFER_TOO_SMALL );
  2486. return;
  2487. }
  2488. if (sock->state == SOCK_CONNECTED)
  2489. time = (current_time - sock->connect_time) / 10000000;
  2490. set_reply_data( &time, sizeof(time) );
  2491. return;
  2492. }
  2493. case IOCTL_AFD_POLL:
  2494. {
  2495. if (get_reply_max_size() < get_req_data_size())
  2496. {
  2497. set_error( STATUS_INVALID_PARAMETER );
  2498. return;
  2499. }
  2500. if (is_machine_64bit( current->process->machine ))
  2501. {
  2502. const struct afd_poll_params_64 *params = get_req_data();
  2503. if (get_req_data_size() < sizeof(struct afd_poll_params_64) ||
  2504. get_req_data_size() < offsetof( struct afd_poll_params_64, sockets[params->count] ))
  2505. {
  2506. set_error( STATUS_INVALID_PARAMETER );
  2507. return;
  2508. }
  2509. poll_socket( sock, async, params->exclusive, params->timeout, params->count, params->sockets );
  2510. }
  2511. else
  2512. {
  2513. const struct afd_poll_params_32 *params = get_req_data();
  2514. struct afd_poll_socket_64 *sockets;
  2515. unsigned int i;
  2516. if (get_req_data_size() < sizeof(struct afd_poll_params_32) ||
  2517. get_req_data_size() < offsetof( struct afd_poll_params_32, sockets[params->count] ))
  2518. {
  2519. set_error( STATUS_INVALID_PARAMETER );
  2520. return;
  2521. }
  2522. if (!(sockets = mem_alloc( params->count * sizeof(*sockets) ))) return;
  2523. for (i = 0; i < params->count; ++i)
  2524. {
  2525. sockets[i].socket = params->sockets[i].socket;
  2526. sockets[i].flags = params->sockets[i].flags;
  2527. sockets[i].status = params->sockets[i].status;
  2528. }
  2529. poll_socket( sock, async, params->exclusive, params->timeout, params->count, sockets );
  2530. free( sockets );
  2531. }
  2532. return;
  2533. }
  2534. default:
  2535. set_error( STATUS_NOT_SUPPORTED );
  2536. return;
  2537. }
  2538. }
  2539. static int poll_single_socket( struct sock *sock, int mask )
  2540. {
  2541. struct pollfd pollfd;
  2542. pollfd.fd = get_unix_fd( sock->fd );
  2543. pollfd.events = poll_flags_from_afd( sock, mask );
  2544. if (pollfd.events < 0 || poll( &pollfd, 1, 0 ) < 0)
  2545. return 0;
  2546. if (sock->state == SOCK_CONNECTING && (pollfd.revents & (POLLERR | POLLHUP)))
  2547. pollfd.revents &= ~POLLOUT;
  2548. if ((mask & AFD_POLL_HUP) && (pollfd.revents & POLLIN) && sock->type == WS_SOCK_STREAM)
  2549. {
  2550. char dummy;
  2551. if (!recv( get_unix_fd( sock->fd ), &dummy, 1, MSG_PEEK ))
  2552. {
  2553. pollfd.revents &= ~POLLIN;
  2554. pollfd.revents |= POLLHUP;
  2555. }
  2556. }
  2557. return get_poll_flags( sock, pollfd.revents ) & mask;
  2558. }
  2559. static void handle_exclusive_poll(struct poll_req *req)
  2560. {
  2561. unsigned int i;
  2562. for (i = 0; i < req->count; ++i)
  2563. {
  2564. struct sock *sock = req->sockets[i].sock;
  2565. struct poll_req *main_poll = sock->main_poll;
  2566. if (main_poll && main_poll->exclusive && req->exclusive)
  2567. {
  2568. complete_async_poll( main_poll, STATUS_SUCCESS );
  2569. main_poll = NULL;
  2570. }
  2571. if (!main_poll)
  2572. sock->main_poll = req;
  2573. }
  2574. }
  2575. static void poll_socket( struct sock *poll_sock, struct async *async, int exclusive, timeout_t timeout,
  2576. unsigned int count, const struct afd_poll_socket_64 *sockets )
  2577. {
  2578. BOOL signaled = FALSE;
  2579. struct poll_req *req;
  2580. unsigned int i, j;
  2581. if (!count)
  2582. {
  2583. set_error( STATUS_INVALID_PARAMETER );
  2584. return;
  2585. }
  2586. if (!(req = mem_alloc( offsetof( struct poll_req, sockets[count] ) )))
  2587. return;
  2588. req->timeout = NULL;
  2589. if (timeout && timeout != TIMEOUT_INFINITE &&
  2590. !(req->timeout = add_timeout_user( timeout, async_poll_timeout, req )))
  2591. {
  2592. free( req );
  2593. return;
  2594. }
  2595. req->orig_timeout = timeout;
  2596. for (i = 0; i < count; ++i)
  2597. {
  2598. req->sockets[i].sock = (struct sock *)get_handle_obj( current->process, sockets[i].socket, 0, &sock_ops );
  2599. if (!req->sockets[i].sock)
  2600. {
  2601. for (j = 0; j < i; ++j) release_object( req->sockets[i].sock );
  2602. if (req->timeout) remove_timeout_user( req->timeout );
  2603. free( req );
  2604. return;
  2605. }
  2606. req->sockets[i].handle = sockets[i].socket;
  2607. req->sockets[i].mask = sockets[i].flags;
  2608. req->sockets[i].flags = 0;
  2609. }
  2610. req->exclusive = exclusive;
  2611. req->count = count;
  2612. req->async = (struct async *)grab_object( async );
  2613. req->iosb = async_get_iosb( async );
  2614. handle_exclusive_poll(req);
  2615. list_add_tail( &poll_list, &req->entry );
  2616. async_set_completion_callback( async, free_poll_req, req );
  2617. queue_async( &poll_sock->poll_q, async );
  2618. for (i = 0; i < count; ++i)
  2619. {
  2620. struct sock *sock = req->sockets[i].sock;
  2621. int mask = req->sockets[i].mask;
  2622. int flags = poll_single_socket( sock, mask );
  2623. if (flags)
  2624. {
  2625. signaled = TRUE;
  2626. req->sockets[i].flags = flags;
  2627. req->sockets[i].status = sock_get_ntstatus( sock_error( sock->fd ) );
  2628. }
  2629. /* FIXME: do other error conditions deserve a similar treatment? */
  2630. if (sock->state != SOCK_CONNECTING && sock->errors[AFD_POLL_BIT_CONNECT_ERR] && (mask & AFD_POLL_CONNECT_ERR))
  2631. {
  2632. signaled = TRUE;
  2633. req->sockets[i].flags |= AFD_POLL_CONNECT_ERR;
  2634. req->sockets[i].status = sock_get_ntstatus( sock->errors[AFD_POLL_BIT_CONNECT_ERR] );
  2635. }
  2636. }
  2637. if (!timeout || signaled)
  2638. complete_async_poll( req, STATUS_SUCCESS );
  2639. for (i = 0; i < req->count; ++i)
  2640. sock_reselect( req->sockets[i].sock );
  2641. set_error( STATUS_PENDING );
  2642. }
  2643. #ifdef HAVE_LINUX_RTNETLINK_H
  2644. /* only keep one ifchange object around, all sockets waiting for wakeups will look to it */
  2645. static struct object *ifchange_object;
  2646. static void ifchange_dump( struct object *obj, int verbose );
  2647. static struct fd *ifchange_get_fd( struct object *obj );
  2648. static void ifchange_destroy( struct object *obj );
  2649. static int ifchange_get_poll_events( struct fd *fd );
  2650. static void ifchange_poll_event( struct fd *fd, int event );
  2651. struct ifchange
  2652. {
  2653. struct object obj; /* object header */
  2654. struct fd *fd; /* interface change file descriptor */
  2655. struct list sockets; /* list of sockets to send interface change notifications */
  2656. };
  2657. static const struct object_ops ifchange_ops =
  2658. {
  2659. sizeof(struct ifchange), /* size */
  2660. &no_type, /* type */
  2661. ifchange_dump, /* dump */
  2662. no_add_queue, /* add_queue */
  2663. NULL, /* remove_queue */
  2664. NULL, /* signaled */
  2665. no_satisfied, /* satisfied */
  2666. no_signal, /* signal */
  2667. ifchange_get_fd, /* get_fd */
  2668. default_map_access, /* map_access */
  2669. default_get_sd, /* get_sd */
  2670. default_set_sd, /* set_sd */
  2671. no_get_full_name, /* get_full_name */
  2672. no_lookup_name, /* lookup_name */
  2673. no_link_name, /* link_name */
  2674. NULL, /* unlink_name */
  2675. no_open_file, /* open_file */
  2676. no_kernel_obj_list, /* get_kernel_obj_list */
  2677. no_close_handle, /* close_handle */
  2678. ifchange_destroy /* destroy */
  2679. };
  2680. static const struct fd_ops ifchange_fd_ops =
  2681. {
  2682. ifchange_get_poll_events, /* get_poll_events */
  2683. ifchange_poll_event, /* poll_event */
  2684. NULL, /* get_fd_type */
  2685. no_fd_read, /* read */
  2686. no_fd_write, /* write */
  2687. no_fd_flush, /* flush */
  2688. no_fd_get_file_info, /* get_file_info */
  2689. no_fd_get_volume_info, /* get_volume_info */
  2690. no_fd_ioctl, /* ioctl */
  2691. NULL, /* cancel_async */
  2692. NULL, /* queue_async */
  2693. NULL /* reselect_async */
  2694. };
  2695. static void ifchange_dump( struct object *obj, int verbose )
  2696. {
  2697. assert( obj->ops == &ifchange_ops );
  2698. fprintf( stderr, "Interface change\n" );
  2699. }
  2700. static struct fd *ifchange_get_fd( struct object *obj )
  2701. {
  2702. struct ifchange *ifchange = (struct ifchange *)obj;
  2703. return (struct fd *)grab_object( ifchange->fd );
  2704. }
  2705. static void ifchange_destroy( struct object *obj )
  2706. {
  2707. struct ifchange *ifchange = (struct ifchange *)obj;
  2708. assert( obj->ops == &ifchange_ops );
  2709. release_object( ifchange->fd );
  2710. /* reset the global ifchange object so that it will be recreated if it is needed again */
  2711. assert( obj == ifchange_object );
  2712. ifchange_object = NULL;
  2713. }
  2714. static int ifchange_get_poll_events( struct fd *fd )
  2715. {
  2716. return POLLIN;
  2717. }
  2718. /* wake up all the sockets waiting for a change notification event */
  2719. static void ifchange_wake_up( struct object *obj, unsigned int status )
  2720. {
  2721. struct ifchange *ifchange = (struct ifchange *)obj;
  2722. struct list *ptr, *next;
  2723. assert( obj->ops == &ifchange_ops );
  2724. assert( obj == ifchange_object );
  2725. LIST_FOR_EACH_SAFE( ptr, next, &ifchange->sockets )
  2726. {
  2727. struct sock *sock = LIST_ENTRY( ptr, struct sock, ifchange_entry );
  2728. assert( sock->ifchange_obj );
  2729. async_wake_up( &sock->ifchange_q, status ); /* issue ifchange notification for the socket */
  2730. sock_release_ifchange( sock ); /* remove socket from list and decrement ifchange refcount */
  2731. }
  2732. }
  2733. static void ifchange_poll_event( struct fd *fd, int event )
  2734. {
  2735. struct object *ifchange = get_fd_user( fd );
  2736. unsigned int status = STATUS_PENDING;
  2737. char buffer[PIPE_BUF];
  2738. int r;
  2739. r = recv( get_unix_fd(fd), buffer, sizeof(buffer), MSG_DONTWAIT );
  2740. if (r < 0)
  2741. {
  2742. if (errno == EWOULDBLOCK || (EWOULDBLOCK != EAGAIN && errno == EAGAIN))
  2743. return; /* retry when poll() says the socket is ready */
  2744. status = sock_get_ntstatus( errno );
  2745. }
  2746. else if (r > 0)
  2747. {
  2748. struct nlmsghdr *nlh;
  2749. for (nlh = (struct nlmsghdr *)buffer; NLMSG_OK(nlh, r); nlh = NLMSG_NEXT(nlh, r))
  2750. {
  2751. if (nlh->nlmsg_type == NLMSG_DONE)
  2752. break;
  2753. if (nlh->nlmsg_type == RTM_NEWADDR || nlh->nlmsg_type == RTM_DELADDR)
  2754. status = STATUS_SUCCESS;
  2755. }
  2756. }
  2757. else status = STATUS_CANCELLED;
  2758. if (status != STATUS_PENDING) ifchange_wake_up( ifchange, status );
  2759. }
  2760. #endif
  2761. /* we only need one of these interface notification objects, all of the sockets dependent upon
  2762. * it will wake up when a notification event occurs */
  2763. static struct object *get_ifchange( void )
  2764. {
  2765. #ifdef HAVE_LINUX_RTNETLINK_H
  2766. struct ifchange *ifchange;
  2767. struct sockaddr_nl addr;
  2768. int unix_fd;
  2769. if (ifchange_object)
  2770. {
  2771. /* increment the refcount for each socket that uses the ifchange object */
  2772. return grab_object( ifchange_object );
  2773. }
  2774. /* create the socket we need for processing interface change notifications */
  2775. unix_fd = socket( PF_NETLINK, SOCK_RAW, NETLINK_ROUTE );
  2776. if (unix_fd == -1)
  2777. {
  2778. set_error( sock_get_ntstatus( errno ));
  2779. return NULL;
  2780. }
  2781. fcntl( unix_fd, F_SETFL, O_NONBLOCK ); /* make socket nonblocking */
  2782. memset( &addr, 0, sizeof(addr) );
  2783. addr.nl_family = AF_NETLINK;
  2784. addr.nl_groups = RTMGRP_IPV4_IFADDR;
  2785. /* bind the socket to the special netlink kernel interface */
  2786. if (bind( unix_fd, (struct sockaddr *)&addr, sizeof(addr) ) == -1)
  2787. {
  2788. close( unix_fd );
  2789. set_error( sock_get_ntstatus( errno ));
  2790. return NULL;
  2791. }
  2792. if (!(ifchange = alloc_object( &ifchange_ops )))
  2793. {
  2794. close( unix_fd );
  2795. set_error( STATUS_NO_MEMORY );
  2796. return NULL;
  2797. }
  2798. list_init( &ifchange->sockets );
  2799. if (!(ifchange->fd = create_anonymous_fd( &ifchange_fd_ops, unix_fd, &ifchange->obj, 0 )))
  2800. {
  2801. release_object( ifchange );
  2802. set_error( STATUS_NO_MEMORY );
  2803. return NULL;
  2804. }
  2805. set_fd_events( ifchange->fd, POLLIN ); /* enable read wakeup on the file descriptor */
  2806. /* the ifchange object is now successfully configured */
  2807. ifchange_object = &ifchange->obj;
  2808. return &ifchange->obj;
  2809. #else
  2810. set_error( STATUS_NOT_SUPPORTED );
  2811. return NULL;
  2812. #endif
  2813. }
  2814. /* add the socket to the interface change notification list */
  2815. static void ifchange_add_sock( struct object *obj, struct sock *sock )
  2816. {
  2817. #ifdef HAVE_LINUX_RTNETLINK_H
  2818. struct ifchange *ifchange = (struct ifchange *)obj;
  2819. list_add_tail( &ifchange->sockets, &sock->ifchange_entry );
  2820. #endif
  2821. }
  2822. /* create a new ifchange queue for a specific socket or, if one already exists, reuse the existing one */
  2823. static struct object *sock_get_ifchange( struct sock *sock )
  2824. {
  2825. struct object *ifchange;
  2826. if (sock->ifchange_obj) /* reuse existing ifchange_obj for this socket */
  2827. return sock->ifchange_obj;
  2828. if (!(ifchange = get_ifchange()))
  2829. return NULL;
  2830. /* add the socket to the ifchange notification list */
  2831. ifchange_add_sock( ifchange, sock );
  2832. sock->ifchange_obj = ifchange;
  2833. return ifchange;
  2834. }
  2835. /* destroy an existing ifchange queue for a specific socket */
  2836. static void sock_release_ifchange( struct sock *sock )
  2837. {
  2838. if (sock->ifchange_obj)
  2839. {
  2840. list_remove( &sock->ifchange_entry );
  2841. release_object( sock->ifchange_obj );
  2842. sock->ifchange_obj = NULL;
  2843. }
  2844. }
  2845. static void socket_device_dump( struct object *obj, int verbose );
  2846. static struct object *socket_device_lookup_name( struct object *obj, struct unicode_str *name,
  2847. unsigned int attr, struct object *root );
  2848. static struct object *socket_device_open_file( struct object *obj, unsigned int access,
  2849. unsigned int sharing, unsigned int options );
  2850. static const struct object_ops socket_device_ops =
  2851. {
  2852. sizeof(struct object), /* size */
  2853. &device_type, /* type */
  2854. socket_device_dump, /* dump */
  2855. no_add_queue, /* add_queue */
  2856. NULL, /* remove_queue */
  2857. NULL, /* signaled */
  2858. no_satisfied, /* satisfied */
  2859. no_signal, /* signal */
  2860. no_get_fd, /* get_fd */
  2861. default_map_access, /* map_access */
  2862. default_get_sd, /* get_sd */
  2863. default_set_sd, /* set_sd */
  2864. default_get_full_name, /* get_full_name */
  2865. socket_device_lookup_name, /* lookup_name */
  2866. directory_link_name, /* link_name */
  2867. default_unlink_name, /* unlink_name */
  2868. socket_device_open_file, /* open_file */
  2869. no_kernel_obj_list, /* get_kernel_obj_list */
  2870. no_close_handle, /* close_handle */
  2871. no_destroy /* destroy */
  2872. };
  2873. static void socket_device_dump( struct object *obj, int verbose )
  2874. {
  2875. fputs( "Socket device\n", stderr );
  2876. }
  2877. static struct object *socket_device_lookup_name( struct object *obj, struct unicode_str *name,
  2878. unsigned int attr, struct object *root )
  2879. {
  2880. if (name) name->len = 0;
  2881. return NULL;
  2882. }
  2883. static struct object *socket_device_open_file( struct object *obj, unsigned int access,
  2884. unsigned int sharing, unsigned int options )
  2885. {
  2886. struct sock *sock;
  2887. if (!(sock = create_socket())) return NULL;
  2888. if (!(sock->fd = alloc_pseudo_fd( &sock_fd_ops, &sock->obj, options )))
  2889. {
  2890. release_object( sock );
  2891. return NULL;
  2892. }
  2893. return &sock->obj;
  2894. }
  2895. struct object *create_socket_device( struct object *root, const struct unicode_str *name,
  2896. unsigned int attr, const struct security_descriptor *sd )
  2897. {
  2898. return create_named_object( root, &socket_device_ops, name, attr, sd );
  2899. }
  2900. DECL_HANDLER(recv_socket)
  2901. {
  2902. struct sock *sock = (struct sock *)get_handle_obj( current->process, req->async.handle, 0, &sock_ops );
  2903. unsigned int status = req->status;
  2904. timeout_t timeout = 0;
  2905. struct async *async;
  2906. struct fd *fd;
  2907. if (!sock) return;
  2908. fd = sock->fd;
  2909. /* recv() returned EWOULDBLOCK, i.e. no data available yet */
  2910. if (status == STATUS_DEVICE_NOT_READY && !sock->nonblocking)
  2911. {
  2912. /* Set a timeout on the async if necessary.
  2913. *
  2914. * We want to do this *only* if the client gave us STATUS_DEVICE_NOT_READY.
  2915. * If the client gave us STATUS_PENDING, it expects the async to always
  2916. * block (it was triggered by WSARecv*() with a valid OVERLAPPED
  2917. * structure) and for the timeout not to be respected. */
  2918. if (is_fd_overlapped( fd ))
  2919. timeout = (timeout_t)sock->rcvtimeo * -10000;
  2920. status = STATUS_PENDING;
  2921. }
  2922. if ((status == STATUS_PENDING || status == STATUS_DEVICE_NOT_READY) && sock->rd_shutdown)
  2923. status = STATUS_PIPE_DISCONNECTED;
  2924. sock->pending_events &= ~(req->oob ? AFD_POLL_OOB : AFD_POLL_READ);
  2925. sock->reported_events &= ~(req->oob ? AFD_POLL_OOB : AFD_POLL_READ);
  2926. if ((async = create_request_async( fd, get_fd_comp_flags( fd ), &req->async )))
  2927. {
  2928. if (status == STATUS_SUCCESS)
  2929. {
  2930. struct iosb *iosb = async_get_iosb( async );
  2931. iosb->result = req->total;
  2932. release_object( iosb );
  2933. }
  2934. set_error( status );
  2935. if (timeout)
  2936. async_set_timeout( async, timeout, STATUS_IO_TIMEOUT );
  2937. if (status == STATUS_PENDING)
  2938. queue_async( &sock->read_q, async );
  2939. /* always reselect; we changed reported_events above */
  2940. sock_reselect( sock );
  2941. reply->wait = async_handoff( async, NULL, 0 );
  2942. reply->options = get_fd_options( fd );
  2943. release_object( async );
  2944. }
  2945. release_object( sock );
  2946. }
  2947. DECL_HANDLER(send_socket)
  2948. {
  2949. struct sock *sock = (struct sock *)get_handle_obj( current->process, req->async.handle, 0, &sock_ops );
  2950. unsigned int status = req->status;
  2951. timeout_t timeout = 0;
  2952. struct async *async;
  2953. struct fd *fd;
  2954. if (!sock) return;
  2955. fd = sock->fd;
  2956. if (sock->type == WS_SOCK_DGRAM)
  2957. {
  2958. /* sendto() and sendmsg() implicitly binds a socket */
  2959. union unix_sockaddr unix_addr;
  2960. socklen_t unix_len = sizeof(unix_addr);
  2961. if (!sock->bound && !getsockname( get_unix_fd( fd ), &unix_addr.addr, &unix_len ))
  2962. sock->addr_len = sockaddr_from_unix( &unix_addr, &sock->addr.addr, sizeof(sock->addr) );
  2963. sock->bound = 1;
  2964. }
  2965. if (status != STATUS_SUCCESS)
  2966. {
  2967. /* send() calls only clear and reselect events if unsuccessful. */
  2968. sock->pending_events &= ~AFD_POLL_WRITE;
  2969. sock->reported_events &= ~AFD_POLL_WRITE;
  2970. }
  2971. /* If we had a short write and the socket is nonblocking (and the client is
  2972. * not trying to force the operation to be asynchronous), return success.
  2973. * Windows actually refuses to send any data in this case, and returns
  2974. * EWOULDBLOCK, but we have no way of doing that. */
  2975. if (status == STATUS_DEVICE_NOT_READY && req->total && sock->nonblocking)
  2976. status = STATUS_SUCCESS;
  2977. /* send() returned EWOULDBLOCK or a short write, i.e. cannot send all data yet */
  2978. if (status == STATUS_DEVICE_NOT_READY && !sock->nonblocking)
  2979. {
  2980. /* Set a timeout on the async if necessary.
  2981. *
  2982. * We want to do this *only* if the client gave us STATUS_DEVICE_NOT_READY.
  2983. * If the client gave us STATUS_PENDING, it expects the async to always
  2984. * block (it was triggered by WSASend*() with a valid OVERLAPPED
  2985. * structure) and for the timeout not to be respected. */
  2986. if (is_fd_overlapped( fd ))
  2987. timeout = (timeout_t)sock->sndtimeo * -10000;
  2988. status = STATUS_PENDING;
  2989. }
  2990. if ((status == STATUS_PENDING || status == STATUS_DEVICE_NOT_READY) && sock->wr_shutdown)
  2991. status = STATUS_PIPE_DISCONNECTED;
  2992. if ((async = create_request_async( fd, get_fd_comp_flags( fd ), &req->async )))
  2993. {
  2994. if (status == STATUS_SUCCESS)
  2995. {
  2996. struct iosb *iosb = async_get_iosb( async );
  2997. iosb->result = req->total;
  2998. release_object( iosb );
  2999. }
  3000. set_error( status );
  3001. if (timeout)
  3002. async_set_timeout( async, timeout, STATUS_IO_TIMEOUT );
  3003. if (status == STATUS_PENDING)
  3004. queue_async( &sock->write_q, async );
  3005. /* always reselect; we changed reported_events above */
  3006. sock_reselect( sock );
  3007. reply->wait = async_handoff( async, NULL, 0 );
  3008. reply->options = get_fd_options( fd );
  3009. release_object( async );
  3010. }
  3011. release_object( sock );
  3012. }