1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
|
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2010 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Xen inter-domain backend - GLDv3 driver edition.
*
* A traditional GLDv3 driver used to communicate with a guest
* domain. This driver is typically plumbed underneath the IP stack
* or a software ethernet bridge.
*/
#include "xnb.h"
#include <sys/sunddi.h>
#include <sys/conf.h>
#include <sys/modctl.h>
#include <sys/strsubr.h>
#include <sys/dlpi.h>
#include <sys/pattr.h>
#include <sys/mac_provider.h>
#include <sys/mac_ether.h>
#include <xen/sys/xendev.h>
#include <sys/note.h>
/* Required driver entry points for GLDv3 */
static int xnbu_m_start(void *);
static void xnbu_m_stop(void *);
static int xnbu_m_set_mac_addr(void *, const uint8_t *);
static int xnbu_m_set_multicast(void *, boolean_t, const uint8_t *);
static int xnbu_m_set_promiscuous(void *, boolean_t);
static int xnbu_m_stat(void *, uint_t, uint64_t *);
static boolean_t xnbu_m_getcapab(void *, mac_capab_t, void *);
static mblk_t *xnbu_m_send(void *, mblk_t *);
typedef struct xnbu {
mac_handle_t u_mh;
boolean_t u_need_sched;
} xnbu_t;
static mac_callbacks_t xnbu_callbacks = {
MC_GETCAPAB,
xnbu_m_stat,
xnbu_m_start,
xnbu_m_stop,
xnbu_m_set_promiscuous,
xnbu_m_set_multicast,
xnbu_m_set_mac_addr,
xnbu_m_send,
NULL,
NULL,
xnbu_m_getcapab
};
static void
xnbu_to_host(xnb_t *xnbp, mblk_t *mp)
{
xnbu_t *xnbup = xnbp->xnb_flavour_data;
boolean_t sched = B_FALSE;
ASSERT(mp != NULL);
mac_rx(xnbup->u_mh, NULL, mp);
mutex_enter(&xnbp->xnb_rx_lock);
/*
* If a transmit attempt failed because we ran out of ring
* space and there is now some space, re-enable the transmit
* path.
*/
if (xnbup->u_need_sched &&
RING_HAS_UNCONSUMED_REQUESTS(&xnbp->xnb_rx_ring)) {
sched = B_TRUE;
xnbup->u_need_sched = B_FALSE;
}
mutex_exit(&xnbp->xnb_rx_lock);
if (sched)
mac_tx_update(xnbup->u_mh);
}
static mblk_t *
xnbu_cksum_from_peer(xnb_t *xnbp, mblk_t *mp, uint16_t flags)
{
/*
* Take a conservative approach - if the checksum is blank
* then we fill it in.
*
* If the consumer of the packet is IP then we might actually
* only need fill it in if the data is not validated, but how
* do we know who might end up with the packet?
*/
if ((flags & NETTXF_csum_blank) != 0) {
/*
* The checksum is blank. We must fill it in here.
*/
mp = xnb_process_cksum_flags(xnbp, mp, 0);
/*
* Because we calculated the checksum ourselves we
* know that it must be good, so we assert this.
*/
flags |= NETTXF_data_validated;
}
if ((flags & NETTXF_data_validated) != 0) {
/*
* The checksum is asserted valid.
*/
mac_hcksum_set(mp, 0, 0, 0, 0, HCK_FULLCKSUM_OK);
}
return (mp);
}
static uint16_t
xnbu_cksum_to_peer(xnb_t *xnbp, mblk_t *mp)
{
_NOTE(ARGUNUSED(xnbp));
uint16_t r = 0;
uint32_t pflags;
mac_hcksum_get(mp, NULL, NULL, NULL, NULL, &pflags);
/*
* If the protocol stack has requested checksum
* offload, inform the peer that we have not
* calculated the checksum.
*/
if ((pflags & HCK_FULLCKSUM) != 0)
r |= NETRXF_csum_blank;
return (r);
}
static boolean_t
xnbu_start_connect(xnb_t *xnbp)
{
xnbu_t *xnbup = xnbp->xnb_flavour_data;
mac_link_update(xnbup->u_mh, LINK_STATE_UP);
/*
* We are able to send packets now - bring them on.
*/
mac_tx_update(xnbup->u_mh);
return (B_TRUE);
}
static boolean_t
xnbu_peer_connected(xnb_t *xnbp)
{
_NOTE(ARGUNUSED(xnbp));
return (B_TRUE);
}
static void
xnbu_peer_disconnected(xnb_t *xnbp)
{
xnbu_t *xnbup = xnbp->xnb_flavour_data;
mac_link_update(xnbup->u_mh, LINK_STATE_DOWN);
}
/*ARGSUSED*/
static boolean_t
xnbu_hotplug_connected(xnb_t *xnbp)
{
return (B_TRUE);
}
static mblk_t *
xnbu_m_send(void *arg, mblk_t *mp)
{
xnb_t *xnbp = arg;
xnbu_t *xnbup = xnbp->xnb_flavour_data;
boolean_t sched = B_FALSE;
mp = xnb_copy_to_peer(arg, mp);
mutex_enter(&xnbp->xnb_rx_lock);
/*
* If we consumed all of the mblk_t's offered, perhaps we need
* to indicate that we can accept more. Otherwise we are full
* and need to wait for space.
*/
if (mp == NULL) {
sched = xnbup->u_need_sched;
xnbup->u_need_sched = B_FALSE;
} else {
xnbup->u_need_sched = B_TRUE;
}
mutex_exit(&xnbp->xnb_rx_lock);
/*
* If a previous transmit attempt failed because the ring
* was full, try again now.
*/
if (sched)
mac_tx_update(xnbup->u_mh);
return (mp);
}
/*
* xnbu_m_set_mac_addr() -- set the physical network address on the board
*/
/* ARGSUSED */
static int
xnbu_m_set_mac_addr(void *arg, const uint8_t *macaddr)
{
xnb_t *xnbp = arg;
xnbu_t *xnbup = xnbp->xnb_flavour_data;
bcopy(macaddr, xnbp->xnb_mac_addr, ETHERADDRL);
mac_unicst_update(xnbup->u_mh, xnbp->xnb_mac_addr);
return (0);
}
/*
* xnbu_m_set_multicast() -- set (enable) or disable a multicast address
*/
/*ARGSUSED*/
static int
xnbu_m_set_multicast(void *arg, boolean_t add, const uint8_t *mca)
{
/*
* We always accept all packets from the peer, so nothing to
* do for enable or disable.
*/
return (0);
}
/*
* xnbu_m_set_promiscuous() -- set or reset promiscuous mode on the board
*/
/* ARGSUSED */
static int
xnbu_m_set_promiscuous(void *arg, boolean_t on)
{
/*
* We always accept all packets from the peer, so nothing to
* do for enable or disable.
*/
return (0);
}
/*
* xnbu_m_start() -- start the board receiving and enable interrupts.
*/
/*ARGSUSED*/
static int
xnbu_m_start(void *arg)
{
return (0);
}
/*
* xnbu_m_stop() - disable hardware
*/
/*ARGSUSED*/
static void
xnbu_m_stop(void *arg)
{
}
static int
xnbu_m_stat(void *arg, uint_t stat, uint64_t *val)
{
xnb_t *xnbp = arg;
mutex_enter(&xnbp->xnb_tx_lock);
mutex_enter(&xnbp->xnb_rx_lock);
#define map_stat(q, r) \
case (MAC_STAT_##q): \
*val = xnbp->xnb_stat_##r; \
break
switch (stat) {
map_stat(IPACKETS, opackets);
map_stat(OPACKETS, ipackets);
map_stat(RBYTES, obytes);
map_stat(OBYTES, rbytes);
default:
mutex_exit(&xnbp->xnb_rx_lock);
mutex_exit(&xnbp->xnb_tx_lock);
return (ENOTSUP);
}
#undef map_stat
mutex_exit(&xnbp->xnb_rx_lock);
mutex_exit(&xnbp->xnb_tx_lock);
return (0);
}
static boolean_t
xnbu_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
{
_NOTE(ARGUNUSED(arg));
switch (cap) {
case MAC_CAPAB_HCKSUM: {
uint32_t *capab = cap_data;
*capab = HCKSUM_INET_PARTIAL;
break;
}
default:
return (B_FALSE);
}
return (B_TRUE);
}
/*
* All packets are passed to the peer, so adding and removing
* multicast addresses is meaningless.
*/
static boolean_t
xnbu_mcast_add(xnb_t *xnbp, ether_addr_t *addr)
{
_NOTE(ARGUNUSED(xnbp, addr));
return (B_TRUE);
}
static boolean_t
xnbu_mcast_del(xnb_t *xnbp, ether_addr_t *addr)
{
_NOTE(ARGUNUSED(xnbp, addr));
return (B_TRUE);
}
static int
xnbu_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
{
static xnb_flavour_t flavour = {
xnbu_to_host, xnbu_peer_connected, xnbu_peer_disconnected,
xnbu_hotplug_connected, xnbu_start_connect,
xnbu_cksum_from_peer, xnbu_cksum_to_peer,
xnbu_mcast_add, xnbu_mcast_del,
};
xnbu_t *xnbup;
xnb_t *xnbp;
mac_register_t *mr;
int err;
switch (cmd) {
case DDI_ATTACH:
break;
case DDI_RESUME:
return (DDI_SUCCESS);
default:
return (DDI_FAILURE);
}
xnbup = kmem_zalloc(sizeof (*xnbup), KM_SLEEP);
if ((mr = mac_alloc(MAC_VERSION)) == NULL) {
kmem_free(xnbup, sizeof (*xnbup));
return (DDI_FAILURE);
}
if (xnb_attach(dip, &flavour, xnbup) != DDI_SUCCESS) {
mac_free(mr);
kmem_free(xnbup, sizeof (*xnbup));
return (DDI_FAILURE);
}
xnbp = ddi_get_driver_private(dip);
ASSERT(xnbp != NULL);
mr->m_dip = dip;
mr->m_driver = xnbp;
/*
* Initialize pointers to device specific functions which will be
* used by the generic layer.
*/
mr->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
mr->m_src_addr = xnbp->xnb_mac_addr;
mr->m_callbacks = &xnbu_callbacks;
mr->m_min_sdu = 0;
mr->m_max_sdu = XNBMAXPKT;
/*
* xnbu is a virtual device, and it is not associated with any
* physical device. Its margin size is determined by the maximum
* packet size it can handle, which is PAGESIZE.
*/
mr->m_margin = PAGESIZE - XNBMAXPKT - sizeof (struct ether_header);
(void) memset(xnbp->xnb_mac_addr, 0xff, ETHERADDRL);
xnbp->xnb_mac_addr[0] &= 0xfe;
xnbup->u_need_sched = B_FALSE;
/*
* Register ourselves with the GLDv3 interface.
*/
err = mac_register(mr, &xnbup->u_mh);
mac_free(mr);
if (err != 0) {
xnb_detach(dip);
kmem_free(xnbup, sizeof (*xnbup));
return (DDI_FAILURE);
}
mac_link_update(xnbup->u_mh, LINK_STATE_DOWN);
return (DDI_SUCCESS);
}
/*ARGSUSED*/
int
xnbu_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
{
xnb_t *xnbp = ddi_get_driver_private(dip);
xnbu_t *xnbup = xnbp->xnb_flavour_data;
switch (cmd) {
case DDI_DETACH:
break;
case DDI_SUSPEND:
return (DDI_SUCCESS);
default:
return (DDI_FAILURE);
}
ASSERT(xnbp != NULL);
ASSERT(xnbup != NULL);
mutex_enter(&xnbp->xnb_tx_lock);
mutex_enter(&xnbp->xnb_rx_lock);
if (!xnbp->xnb_detachable || xnbp->xnb_connected ||
(xnbp->xnb_tx_buf_count > 0)) {
mutex_exit(&xnbp->xnb_rx_lock);
mutex_exit(&xnbp->xnb_tx_lock);
return (DDI_FAILURE);
}
mutex_exit(&xnbp->xnb_rx_lock);
mutex_exit(&xnbp->xnb_tx_lock);
/*
* Attempt to unregister the mac.
*/
if ((xnbup->u_mh != NULL) && (mac_unregister(xnbup->u_mh) != 0))
return (DDI_FAILURE);
kmem_free(xnbup, sizeof (*xnbup));
xnb_detach(dip);
return (DDI_SUCCESS);
}
DDI_DEFINE_STREAM_OPS(ops, nulldev, nulldev, xnbu_attach, xnbu_detach,
nodev, NULL, D_MP, NULL, ddi_quiesce_not_supported);
static struct modldrv modldrv = {
&mod_driverops, "xnbu driver", &ops
};
static struct modlinkage modlinkage = {
MODREV_1, &modldrv, NULL
};
int
_init(void)
{
int i;
mac_init_ops(&ops, "xnbu");
i = mod_install(&modlinkage);
if (i != DDI_SUCCESS)
mac_fini_ops(&ops);
return (i);
}
int
_fini(void)
{
int i;
i = mod_remove(&modlinkage);
if (i == DDI_SUCCESS)
mac_fini_ops(&ops);
return (i);
}
int
_info(struct modinfo *modinfop)
{
return (mod_info(&modlinkage, modinfop));
}
|