summaryrefslogtreecommitdiff
path: root/usr/src/uts/common/io/ppp/sppp/sppp.c
blob: 1d9e9fed702897d30ff3be894853dfbdcbb54d85 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
/*
 * sppp.c - Solaris STREAMS PPP multiplexing pseudo-driver
 *
 * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
 * Use is subject to license terms.
 * Copyright (c) 2016 by Delphix. All rights reserved.
 * Copyright 2019, Joyent, Inc.
 *
 * Permission to use, copy, modify, and distribute this software and its
 * documentation is hereby granted, provided that the above copyright
 * notice appears in all copies.
 *
 * SUN MAKES NO REPRESENTATION OR WARRANTIES ABOUT THE SUITABILITY OF
 * THE SOFTWARE, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
 * TO THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
 * PARTICULAR PURPOSE, OR NON-INFRINGEMENT.  SUN SHALL NOT BE LIABLE FOR
 * ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR
 * DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES
 *
 * Copyright (c) 1994 The Australian National University.
 * All rights reserved.
 *
 * Permission to use, copy, modify, and distribute this software and its
 * documentation is hereby granted, provided that the above copyright
 * notice appears in all copies.  This software is provided without any
 * warranty, express or implied. The Australian National University
 * makes no representations about the suitability of this software for
 * any purpose.
 *
 * IN NO EVENT SHALL THE AUSTRALIAN NATIONAL UNIVERSITY BE LIABLE TO ANY
 * PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
 * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF
 * THE AUSTRALIAN NATIONAL UNIVERSITY HAS BEEN ADVISED OF THE POSSIBILITY
 * OF SUCH DAMAGE.
 *
 * THE AUSTRALIAN NATIONAL UNIVERSITY SPECIFICALLY DISCLAIMS ANY WARRANTIES,
 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
 * AND FITNESS FOR A PARTICULAR PURPOSE.  THE SOFTWARE PROVIDED HEREUNDER IS
 * ON AN "AS IS" BASIS, AND THE AUSTRALIAN NATIONAL UNIVERSITY HAS NO
 * OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS,
 * OR MODIFICATIONS.
 *
 * This driver is derived from the original SVR4 STREAMS PPP driver
 * originally written by Paul Mackerras <paul.mackerras@cs.anu.edu.au>.
 *
 * Adi Masputra <adi.masputra@sun.com> rewrote and restructured the code
 * for improved performance and scalability.
 */

#define	RCSID	"$Id: sppp.c,v 1.0 2000/05/08 01:10:12 masputra Exp $"

#include <sys/types.h>
#include <sys/debug.h>
#include <sys/param.h>
#include <sys/stat.h>
#include <sys/stream.h>
#include <sys/stropts.h>
#include <sys/sysmacros.h>
#include <sys/errno.h>
#include <sys/time.h>
#include <sys/cmn_err.h>
#include <sys/kmem.h>
#include <sys/conf.h>
#include <sys/dlpi.h>
#include <sys/ddi.h>
#include <sys/kstat.h>
#include <sys/strsun.h>
#include <sys/ethernet.h>
#include <sys/policy.h>
#include <sys/zone.h>
#include <net/ppp_defs.h>
#include <net/pppio.h>
#include "sppp.h"
#include "s_common.h"

/*
 * This is used to tag official Solaris sources.  Please do not define
 * "INTERNAL_BUILD" when building this software outside of Sun Microsystems.
 */
#ifdef INTERNAL_BUILD
/* MODINFO is limited to 32 characters. */
const char sppp_module_description[] = "PPP 4.0 mux";
#else /* INTERNAL_BUILD */
const char sppp_module_description[] = "ANU PPP mux";

/* LINTED */
static const char buildtime[] = "Built " __DATE__ " at " __TIME__
#ifdef DEBUG
" DEBUG"
#endif
"\n";
#endif /* INTERNAL_BUILD */

static void	sppp_inner_ioctl(queue_t *, mblk_t *);
static void	sppp_outer_ioctl(queue_t *, mblk_t *);
static queue_t	*sppp_send(queue_t *, mblk_t **, spppstr_t *);
static queue_t	*sppp_recv(queue_t *, mblk_t **, spppstr_t *);
static void	sppp_recv_nondata(queue_t *, mblk_t *, spppstr_t *);
static queue_t	*sppp_outpkt(queue_t *, mblk_t **, int, spppstr_t *);
static spppstr_t *sppp_inpkt(queue_t *, mblk_t *, spppstr_t *);
static int	sppp_kstat_update(kstat_t *, int);
static void	sppp_release_pkts(sppa_t *, uint16_t);

/*
 * sps_list contains the list of active per-stream instance state structures
 * ordered on the minor device number (see sppp.h for details). All streams
 * opened to this driver are threaded together in this list.
 */
static spppstr_t *sps_list = NULL;
/*
 * ppa_list contains the list of active per-attachment instance state
 * structures ordered on the ppa id number (see sppp.h for details). All of
 * the ppa structures created once per PPPIO_NEWPPA ioctl are threaded together
 * in this list. There is exactly one ppa structure for a given PPP interface,
 * and multiple sps streams (upper streams) may share a ppa by performing
 * an attachment explicitly (PPPIO_ATTACH) or implicitly (DL_ATTACH_REQ).
 */
static sppa_t *ppa_list = NULL;

static const char *kstats_names[] = { SPPP_KSTATS_NAMES };
static const char *kstats64_names[] = { SPPP_KSTATS64_NAMES };

/*
 * map proto (which is an IANA defined ppp network protocol) to
 * a bit position indicated by NP_* in ppa_npflag
 */
static uint32_t
sppp_ppp2np(uint16_t proto)
{
	switch (proto) {
	case PPP_IP:
		return (NP_IP);
	case PPP_IPV6:
		return (NP_IPV6);
	default:
		return (0);
	}
}

/*
 * sppp_open()
 *
 * MT-Perimeters:
 *    exclusive inner, exclusive outer.
 *
 * Description:
 *    Common open procedure for module.
 */
/* ARGSUSED */
int
sppp_open(queue_t *q, dev_t *devp, int oflag, int sflag, cred_t *credp)
{
	spppstr_t	*sps;
	spppstr_t	**nextmn;
	minor_t		mn;

	ASSERT(q != NULL && devp != NULL);
	ASSERT(sflag != MODOPEN);

	if (q->q_ptr != NULL) {
		return (0);		/* already open */
	}
	if (sflag != CLONEOPEN) {
		return (OPENFAIL);
	}
	/*
	 * The sps list is sorted using the minor number as the key. The
	 * following code walks the list to find the lowest valued minor
	 * number available to be used.
	 */
	mn = 0;
	for (nextmn = &sps_list; (sps = *nextmn) != NULL;
	    nextmn = &sps->sps_nextmn) {
		if (sps->sps_mn_id != mn) {
			break;
		}
		++mn;
	}
	sps = (spppstr_t *)kmem_zalloc(sizeof (spppstr_t), KM_SLEEP);
	ASSERT(sps != NULL);		/* KM_SLEEP must never return NULL */
	sps->sps_nextmn = *nextmn;	/* insert stream in global list */
	*nextmn = sps;
	sps->sps_mn_id = mn;		/* save minor id for this stream */
	sps->sps_rq = q;		/* save read queue pointer */
	sps->sps_sap = -1;		/* no sap bound to stream */
	sps->sps_dlstate = DL_UNATTACHED; /* dlpi state is unattached */
	sps->sps_npmode = NPMODE_DROP;	/* drop all packets initially */
	sps->sps_zoneid = crgetzoneid(credp);
	q->q_ptr = WR(q)->q_ptr = (caddr_t)sps;
	/*
	 * We explicitly disable the automatic queue scheduling for the
	 * write-side to obtain complete control over queuing during transmit.
	 * Packets will be queued at the upper write queue and the service
	 * routine will not be called until it gets scheduled by having the
	 * lower write service routine call the qenable(WR(uq)) for all streams
	 * attached to the same ppa instance.
	 */
	noenable(WR(q));
	*devp = makedevice(getmajor(*devp), mn);
	qprocson(q);
	return (0);
}

/*
 * Free storage used by a PPA.  This is not called until the last PPA
 * user closes their connection or reattaches to a different PPA.
 */
static void
sppp_free_ppa(sppa_t *ppa)
{
	sppa_t **nextppa;

	ASSERT(ppa->ppa_refcnt == 1);
	if (ppa->ppa_kstats != NULL) {
		kstat_delete(ppa->ppa_kstats);
		ppa->ppa_kstats = NULL;
	}
	mutex_destroy(&ppa->ppa_sta_lock);
	mutex_destroy(&ppa->ppa_npmutex);
	rw_destroy(&ppa->ppa_sib_lock);
	nextppa = &ppa_list;
	while (*nextppa != NULL) {
		if (*nextppa == ppa) {
			*nextppa = ppa->ppa_nextppa;
			break;
		}
		nextppa = &(*nextppa)->ppa_nextppa;
	}
	kmem_free(ppa, sizeof (*ppa));
}

/*
 * Create a new PPA.  Caller must be exclusive on outer perimeter.
 */
sppa_t *
sppp_create_ppa(uint32_t ppa_id, zoneid_t zoneid)
{
	sppa_t *ppa;
	sppa_t *curppa;
	sppa_t **availppa;
	char unit[32];		/* Unit name */
	const char **cpp;
	kstat_t *ksp;
	kstat_named_t *knt;

	/*
	 * NOTE: unit *must* be named for the driver
	 * name plus the ppa number so that netstat
	 * can find the statistics.
	 */
	(void) sprintf(unit, "%s" "%d", PPP_DRV_NAME, ppa_id);
	/*
	 * Make sure we can allocate a buffer to
	 * contain the ppa to be sent upstream, as
	 * well as the actual ppa structure and its
	 * associated kstat structure.
	 */
	ppa = (sppa_t *)kmem_zalloc(sizeof (sppa_t),
	    KM_NOSLEEP);
	ksp = kstat_create(PPP_DRV_NAME, ppa_id, unit, "net", KSTAT_TYPE_NAMED,
	    sizeof (sppp_kstats_t) / sizeof (kstat_named_t), 0);

	if (ppa == NULL || ksp == NULL) {
		if (ppa != NULL) {
			kmem_free(ppa, sizeof (sppa_t));
		}
		if (ksp != NULL) {
			kstat_delete(ksp);
		}
		return (NULL);
	}
	ppa->ppa_kstats = ksp;		/* chain kstat structure */
	ppa->ppa_ppa_id = ppa_id;	/* record ppa id */
	ppa->ppa_zoneid = zoneid;	/* zone that owns this PPA */
	ppa->ppa_mtu = PPP_MAXMTU;	/* 65535-(PPP_HDRLEN+PPP_FCSLEN) */
	ppa->ppa_mru = PPP_MAXMRU;	/* 65000 */

	mutex_init(&ppa->ppa_sta_lock, NULL, MUTEX_DRIVER, NULL);
	mutex_init(&ppa->ppa_npmutex, NULL, MUTEX_DRIVER, NULL);
	rw_init(&ppa->ppa_sib_lock, NULL, RW_DRIVER, NULL);

	/*
	 * Prepare and install kstat counters.  Note that for netstat
	 * -i to work, there needs to be "ipackets", "opackets",
	 * "ierrors", and "oerrors" kstat named variables.
	 */
	knt = (kstat_named_t *)ksp->ks_data;
	for (cpp = kstats_names; cpp < kstats_names + Dim(kstats_names);
	    cpp++) {
		kstat_named_init(knt, *cpp, KSTAT_DATA_UINT32);
		knt++;
	}
	for (cpp = kstats64_names; cpp < kstats64_names + Dim(kstats64_names);
	    cpp++) {
		kstat_named_init(knt, *cpp, KSTAT_DATA_UINT64);
		knt++;
	}
	ksp->ks_update = sppp_kstat_update;
	ksp->ks_private = (void *)ppa;
	kstat_install(ksp);

	/* link to the next ppa and insert into global list */
	availppa = &ppa_list;
	while ((curppa = *availppa) != NULL) {
		if (ppa_id < curppa->ppa_ppa_id)
			break;
		availppa = &curppa->ppa_nextppa;
	}
	ppa->ppa_nextppa = *availppa;
	*availppa = ppa;
	return (ppa);
}

/*
 * sppp_close()
 *
 * MT-Perimeters:
 *    exclusive inner, exclusive outer.
 *
 * Description:
 *    Common close procedure for module.
 */
/* ARGSUSED */
int
sppp_close(queue_t *q, int flags __unused, cred_t *credp __unused)
{
	spppstr_t	*sps;
	spppstr_t	**nextmn;
	spppstr_t	*sib;
	sppa_t		*ppa;
	mblk_t		*mp;

	ASSERT(q != NULL && q->q_ptr != NULL);
	sps = (spppstr_t *)q->q_ptr;
	qprocsoff(q);

	ppa = sps->sps_ppa;
	if (ppa == NULL) {
		ASSERT(!IS_SPS_CONTROL(sps));
		goto close_unattached;
	}
	if (IS_SPS_CONTROL(sps)) {
		uint32_t	cnt = 0;

		ASSERT(ppa != NULL);
		ASSERT(ppa->ppa_ctl == sps);
		ppa->ppa_ctl = NULL;
		/*
		 * STREAMS framework always issues I_UNLINK prior to close,
		 * since we only allow I_LINK under the control stream.
		 * A given ppa structure has at most one lower stream pointed
		 * by the ppa_lower_wq field, because we only allow a single
		 * linkage (I_LINK) to be done on the control stream.
		 */
		ASSERT(ppa->ppa_lower_wq == NULL);
		/*
		 * Walk through all of sibling streams attached to this ppa,
		 * and remove all references to this ppa. We have exclusive
		 * access for the entire driver here, so there's no need
		 * to hold ppa_sib_lock.
		 */
		cnt++;
		sib = ppa->ppa_streams;
		while (sib != NULL) {
			ASSERT(ppa == sib->sps_ppa);
			sib->sps_npmode = NPMODE_DROP;
			sib->sps_flags &= ~(SPS_PIOATTACH | SPS_CACHED);
			/*
			 * There should be a preallocated hangup
			 * message here.  Fetch it and send it up to
			 * the stream head.  This will cause IP to
			 * mark the interface as "down."
			 */
			if ((mp = sib->sps_hangup) != NULL) {
				sib->sps_hangup = NULL;
				/*
				 * M_HANGUP works with IP, but snoop
				 * is lame and requires M_ERROR.  Send
				 * up a clean error code instead.
				 *
				 * XXX if snoop is fixed, fix this, too.
				 */
				MTYPE(mp) = M_ERROR;
				*mp->b_wptr++ = ENXIO;
				putnext(sib->sps_rq, mp);
			}
			qenable(WR(sib->sps_rq));
			cnt++;
			sib = sib->sps_nextsib;
		}
		ASSERT(ppa->ppa_refcnt == cnt);
	} else {
		ASSERT(ppa->ppa_streams != NULL);
		ASSERT(ppa->ppa_ctl != sps);
		mp = NULL;
		if (sps->sps_sap == PPP_IP) {
			ppa->ppa_ip_cache = NULL;
			mp = create_lsmsg(PPP_LINKSTAT_IPV4_UNBOUND);
		} else if (sps->sps_sap == PPP_IPV6) {
			ppa->ppa_ip6_cache = NULL;
			mp = create_lsmsg(PPP_LINKSTAT_IPV6_UNBOUND);
		}
		/* Tell the daemon the bad news. */
		if (mp != NULL && ppa->ppa_ctl != NULL &&
		    (sps->sps_npmode == NPMODE_PASS ||
		    sps->sps_npmode == NPMODE_QUEUE)) {
			putnext(ppa->ppa_ctl->sps_rq, mp);
		} else {
			freemsg(mp);
		}
		/*
		 * Walk through all of sibling streams attached to the
		 * same ppa, and remove this stream from the sibling
		 * streams list. We have exclusive access for the
		 * entire driver here, so there's no need to hold
		 * ppa_sib_lock.
		 */
		sib = ppa->ppa_streams;
		if (sib == sps) {
			ppa->ppa_streams = sps->sps_nextsib;
		} else {
			while (sib->sps_nextsib != NULL) {
				if (sib->sps_nextsib == sps) {
					sib->sps_nextsib = sps->sps_nextsib;
					break;
				}
				sib = sib->sps_nextsib;
			}
		}
		sps->sps_nextsib = NULL;
		freemsg(sps->sps_hangup);
		sps->sps_hangup = NULL;
		/*
		 * Check if this is a promiscous stream. If the SPS_PROMISC bit
		 * is still set, it means that the stream is closed without
		 * ever having issued DL_DETACH_REQ or DL_PROMISCOFF_REQ.
		 * In this case, we simply decrement the promiscous counter,
		 * and it's safe to do it without holding ppa_sib_lock since
		 * we're exclusive (inner and outer) at this point.
		 */
		if (IS_SPS_PROMISC(sps)) {
			ASSERT(ppa->ppa_promicnt > 0);
			ppa->ppa_promicnt--;
		}
	}
	/* If we're the only one left, then delete now. */
	if (ppa->ppa_refcnt <= 1)
		sppp_free_ppa(ppa);
	else
		ppa->ppa_refcnt--;
close_unattached:
	q->q_ptr = WR(q)->q_ptr = NULL;
	for (nextmn = &sps_list; *nextmn != NULL;
	    nextmn = &(*nextmn)->sps_nextmn) {
		if (*nextmn == sps) {
			*nextmn = sps->sps_nextmn;
			break;
		}
	}
	kmem_free(sps, sizeof (spppstr_t));
	return (0);
}

static void
sppp_ioctl(struct queue *q, mblk_t *mp)
{
	spppstr_t	*sps;
	spppstr_t	*nextsib;
	sppa_t		*ppa;
	struct iocblk	*iop;
	mblk_t		*nmp;
	enum NPmode	npmode;
	struct ppp_idle	*pip;
	struct ppp_stats64 *psp;
	struct ppp_comp_stats *pcsp;
	hrtime_t	hrtime;
	int		sap;
	int		count = 0;
	int		error = EINVAL;

	sps = (spppstr_t *)q->q_ptr;
	ppa = sps->sps_ppa;

	iop = (struct iocblk *)mp->b_rptr;
	switch (iop->ioc_cmd) {
	case PPPIO_NPMODE:
		if (!IS_SPS_CONTROL(sps)) {
			break;		/* return EINVAL */
		} else if (iop->ioc_count != 2 * sizeof (uint32_t) ||
		    (mp->b_cont == NULL)) {
			error = EPROTO;
			break;
		}
		ASSERT(ppa != NULL);
		ASSERT(mp->b_cont->b_rptr != NULL);
		ASSERT(sps->sps_npmode == NPMODE_PASS);
		sap = ((uint32_t *)mp->b_cont->b_rptr)[0];
		npmode = (enum NPmode)((uint32_t *)mp->b_cont->b_rptr)[1];
		/*
		 * Walk the sibling streams which belong to the same
		 * ppa, and try to find a stream with matching sap
		 * number.
		 */
		rw_enter(&ppa->ppa_sib_lock, RW_WRITER);
		for (nextsib = ppa->ppa_streams; nextsib != NULL;
		    nextsib = nextsib->sps_nextsib) {
			if (nextsib->sps_sap == sap) {
				break;	/* found it */
			}
		}
		if (nextsib == NULL) {
			rw_exit(&ppa->ppa_sib_lock);
			break;		/* return EINVAL */
		} else {
			nextsib->sps_npmode = npmode;
			if ((nextsib->sps_npmode != NPMODE_QUEUE) &&
			    (WR(nextsib->sps_rq)->q_first != NULL)) {
				qenable(WR(nextsib->sps_rq));
			}
		}
		rw_exit(&ppa->ppa_sib_lock);
		error = 0;	/* return success */
		break;
	case PPPIO_GIDLE:
		if (ppa == NULL) {
			ASSERT(!IS_SPS_CONTROL(sps));
			error = ENOLINK;
			break;
		} else if (!IS_PPA_TIMESTAMP(ppa)) {
			break;		/* return EINVAL */
		}
		if ((nmp = allocb(sizeof (struct ppp_idle),
		    BPRI_MED)) == NULL) {
			mutex_enter(&ppa->ppa_sta_lock);
			ppa->ppa_allocbfail++;
			mutex_exit(&ppa->ppa_sta_lock);
			error = ENOSR;
			break;
		}
		if (mp->b_cont != NULL) {
			freemsg(mp->b_cont);
		}
		mp->b_cont = nmp;
		pip = (struct ppp_idle *)nmp->b_wptr;
		nmp->b_wptr += sizeof (struct ppp_idle);
		/*
		 * Get current timestamp and subtract the tx and rx
		 * timestamps to get the actual idle time to be
		 * returned.
		 */
		hrtime = gethrtime();
		pip->xmit_idle = (hrtime - ppa->ppa_lasttx) / 1000000000ul;
		pip->recv_idle = (hrtime - ppa->ppa_lastrx) / 1000000000ul;
		count = msgsize(nmp);
		error = 0;
		break;		/* return success (error is 0) */
	case PPPIO_GTYPE:
		nmp = allocb(sizeof (uint32_t), BPRI_MED);
		if (nmp == NULL) {
			error = ENOSR;
			break;
		}
		if (mp->b_cont != NULL) {
			freemsg(mp->b_cont);
		}
		mp->b_cont = nmp;
		/*
		 * Let the requestor know that we are the PPP
		 * multiplexer (PPPTYP_MUX).
		 */
		*(uint32_t *)nmp->b_wptr = PPPTYP_MUX;
		nmp->b_wptr += sizeof (uint32_t);
		count = msgsize(nmp);
		error = 0;		/* return success */
		break;
	case PPPIO_GETSTAT64:
		if (ppa == NULL) {
			break;		/* return EINVAL */
		} else if ((ppa->ppa_lower_wq != NULL) &&
		    !IS_PPA_LASTMOD(ppa)) {
			mutex_enter(&ppa->ppa_sta_lock);
			/*
			 * We match sps_ioc_id on the M_IOC{ACK,NAK},
			 * so if the response hasn't come back yet,
			 * new ioctls must be queued instead.
			 */
			if (IS_SPS_IOCQ(sps)) {
				mutex_exit(&ppa->ppa_sta_lock);
				if (!putq(q, mp)) {
					error = EAGAIN;
					break;
				}
				return;
			} else {
				ppa->ppa_ioctlsfwd++;
				/*
				 * Record the ioctl CMD & ID - this will be
				 * used to check the ACK or NAK responses
				 * coming from below.
				 */
				sps->sps_ioc_id = iop->ioc_id;
				sps->sps_flags |= SPS_IOCQ;
				mutex_exit(&ppa->ppa_sta_lock);
			}
			putnext(ppa->ppa_lower_wq, mp);
			return;	/* don't ack or nak the request */
		}
		nmp = allocb(sizeof (*psp), BPRI_MED);
		if (nmp == NULL) {
			mutex_enter(&ppa->ppa_sta_lock);
			ppa->ppa_allocbfail++;
			mutex_exit(&ppa->ppa_sta_lock);
			error = ENOSR;
			break;
		}
		if (mp->b_cont != NULL) {
			freemsg(mp->b_cont);
		}
		mp->b_cont = nmp;
		psp = (struct ppp_stats64 *)nmp->b_wptr;
		/*
		 * Copy the contents of ppp_stats64 structure for this
		 * ppa and return them to the caller.
		 */
		mutex_enter(&ppa->ppa_sta_lock);
		bcopy(&ppa->ppa_stats, psp, sizeof (*psp));
		mutex_exit(&ppa->ppa_sta_lock);
		nmp->b_wptr += sizeof (*psp);
		count = sizeof (*psp);
		error = 0;		/* return success */
		break;
	case PPPIO_GETCSTAT:
		if (ppa == NULL) {
			break;		/* return EINVAL */
		} else if ((ppa->ppa_lower_wq != NULL) &&
		    !IS_PPA_LASTMOD(ppa)) {
			mutex_enter(&ppa->ppa_sta_lock);
			/*
			 * See comments in PPPIO_GETSTAT64 case
			 * in sppp_ioctl().
			 */
			if (IS_SPS_IOCQ(sps)) {
				mutex_exit(&ppa->ppa_sta_lock);
				if (!putq(q, mp)) {
					error = EAGAIN;
					break;
				}
				return;
			} else {
				ppa->ppa_ioctlsfwd++;
				/*
				 * Record the ioctl CMD & ID - this will be
				 * used to check the ACK or NAK responses
				 * coming from below.
				 */
				sps->sps_ioc_id = iop->ioc_id;
				sps->sps_flags |= SPS_IOCQ;
				mutex_exit(&ppa->ppa_sta_lock);
			}
			putnext(ppa->ppa_lower_wq, mp);
			return;	/* don't ack or nak the request */
		}
		nmp = allocb(sizeof (struct ppp_comp_stats), BPRI_MED);
		if (nmp == NULL) {
			mutex_enter(&ppa->ppa_sta_lock);
			ppa->ppa_allocbfail++;
			mutex_exit(&ppa->ppa_sta_lock);
			error = ENOSR;
			break;
		}
		if (mp->b_cont != NULL) {
			freemsg(mp->b_cont);
		}
		mp->b_cont = nmp;
		pcsp = (struct ppp_comp_stats *)nmp->b_wptr;
		nmp->b_wptr += sizeof (struct ppp_comp_stats);
		bzero((caddr_t)pcsp, sizeof (struct ppp_comp_stats));
		count = msgsize(nmp);
		error = 0;		/* return success */
		break;
	}

	if (error == 0) {
		/* Success; tell the user. */
		miocack(q, mp, count, 0);
	} else {
		/* Failure; send error back upstream. */
		miocnak(q, mp, 0, error);
	}
}

/*
 * sppp_uwput()
 *
 * MT-Perimeters:
 *    shared inner, shared outer.
 *
 * Description:
 *    Upper write-side put procedure. Messages from above arrive here.
 */
int
sppp_uwput(queue_t *q, mblk_t *mp)
{
	queue_t		*nextq;
	spppstr_t	*sps;
	sppa_t		*ppa;
	struct iocblk	*iop;
	int		error;

	ASSERT(q != NULL && q->q_ptr != NULL);
	ASSERT(mp != NULL && mp->b_rptr != NULL);
	sps = (spppstr_t *)q->q_ptr;
	ppa = sps->sps_ppa;

	switch (MTYPE(mp)) {
	case M_PCPROTO:
	case M_PROTO:
		if (IS_SPS_CONTROL(sps)) {
			ASSERT(ppa != NULL);
			/*
			 * Intentionally change this to a high priority
			 * message so it doesn't get queued up. M_PROTO is
			 * specifically used for signalling between pppd and its
			 * kernel-level component(s), such as ppptun, so we
			 * make sure that it doesn't get queued up behind
			 * data messages.
			 */
			MTYPE(mp) = M_PCPROTO;
			if ((ppa->ppa_lower_wq != NULL) &&
			    canputnext(ppa->ppa_lower_wq)) {
				mutex_enter(&ppa->ppa_sta_lock);
				ppa->ppa_mctlsfwd++;
				mutex_exit(&ppa->ppa_sta_lock);
				putnext(ppa->ppa_lower_wq, mp);
			} else {
				mutex_enter(&ppa->ppa_sta_lock);
				ppa->ppa_mctlsfwderr++;
				mutex_exit(&ppa->ppa_sta_lock);
				freemsg(mp);
			}
		} else {
			(void) sppp_mproto(q, mp, sps);
			return (0);
		}
		break;
	case M_DATA:
		if ((nextq = sppp_send(q, &mp, sps)) != NULL)
			putnext(nextq, mp);
		break;
	case M_IOCTL:
		error = EINVAL;
		iop = (struct iocblk *)mp->b_rptr;
		switch (iop->ioc_cmd) {
		case DLIOCRAW:
		case DL_IOC_HDR_INFO:
		case PPPIO_ATTACH:
		case PPPIO_DEBUG:
		case PPPIO_DETACH:
		case PPPIO_LASTMOD:
		case PPPIO_MRU:
		case PPPIO_MTU:
		case PPPIO_USETIMESTAMP:
		case PPPIO_BLOCKNP:
		case PPPIO_UNBLOCKNP:
			qwriter(q, mp, sppp_inner_ioctl, PERIM_INNER);
			return (0);
		case I_LINK:
		case I_UNLINK:
		case PPPIO_NEWPPA:
			qwriter(q, mp, sppp_outer_ioctl, PERIM_OUTER);
			return (0);
		case PPPIO_NPMODE:
		case PPPIO_GIDLE:
		case PPPIO_GTYPE:
		case PPPIO_GETSTAT64:
		case PPPIO_GETCSTAT:
			/*
			 * These require additional auto variables to
			 * handle, so (for optimization reasons)
			 * they're moved off to a separate function.
			 */
			sppp_ioctl(q, mp);
			return (0);
		case PPPIO_GETSTAT:
			break;			/* 32 bit interface gone */
		default:
			if (iop->ioc_cr == NULL ||
			    secpolicy_ppp_config(iop->ioc_cr) != 0) {
				error = EPERM;
				break;
			} else if ((ppa == NULL) ||
			    (ppa->ppa_lower_wq == NULL)) {
				break;		/* return EINVAL */
			}
			mutex_enter(&ppa->ppa_sta_lock);
			/*
			 * See comments in PPPIO_GETSTAT64 case
			 * in sppp_ioctl().
			 */
			if (IS_SPS_IOCQ(sps)) {
				mutex_exit(&ppa->ppa_sta_lock);
				if (!putq(q, mp)) {
					error = EAGAIN;
					break;
				}
				return (0);
			} else {
				ppa->ppa_ioctlsfwd++;
				/*
				 * Record the ioctl CMD & ID -
				 * this will be used to check the
				 * ACK or NAK responses coming from below.
				 */
				sps->sps_ioc_id = iop->ioc_id;
				sps->sps_flags |= SPS_IOCQ;
				mutex_exit(&ppa->ppa_sta_lock);
			}
			putnext(ppa->ppa_lower_wq, mp);
			return (0);	/* don't ack or nak the request */
		}
		/* Failure; send error back upstream. */
		miocnak(q, mp, 0, error);
		break;
	case M_FLUSH:
		if (*mp->b_rptr & FLUSHW) {
			flushq(q, FLUSHDATA);
		}
		if (*mp->b_rptr & FLUSHR) {
			*mp->b_rptr &= ~FLUSHW;
			qreply(q, mp);
		} else {
			freemsg(mp);
		}
		break;
	default:
		freemsg(mp);
		break;
	}
	return (0);
}

/*
 * sppp_uwsrv()
 *
 * MT-Perimeters:
 *    exclusive inner, shared outer.
 *
 * Description:
 *    Upper write-side service procedure. Note that this procedure does
 *    not get called when a message is placed on our write-side queue, since
 *    automatic queue scheduling has been turned off by noenable() when
 *    the queue was opened. We do this on purpose, as we explicitly control
 *    the write-side queue. Therefore, this procedure gets called when
 *    the lower write service procedure qenable() the upper write stream queue.
 */
int
sppp_uwsrv(queue_t *q)
{
	spppstr_t	*sps;
	sppa_t		*ppa;
	mblk_t		*mp;
	queue_t		*nextq;
	struct iocblk	*iop;

	ASSERT(q != NULL && q->q_ptr != NULL);
	sps = (spppstr_t *)q->q_ptr;

	while ((mp = getq(q)) != NULL) {
		if (MTYPE(mp) == M_IOCTL) {
			ppa = sps->sps_ppa;
			if ((ppa == NULL) || (ppa->ppa_lower_wq == NULL)) {
				miocnak(q, mp, 0, EINVAL);
				continue;
			}

			iop = (struct iocblk *)mp->b_rptr;
			mutex_enter(&ppa->ppa_sta_lock);
			/*
			 * See comments in PPPIO_GETSTAT64 case
			 * in sppp_ioctl().
			 */
			if (IS_SPS_IOCQ(sps)) {
				mutex_exit(&ppa->ppa_sta_lock);
				if (putbq(q, mp) == 0)
					miocnak(q, mp, 0, EAGAIN);
				break;
			} else {
				ppa->ppa_ioctlsfwd++;
				sps->sps_ioc_id = iop->ioc_id;
				sps->sps_flags |= SPS_IOCQ;
				mutex_exit(&ppa->ppa_sta_lock);
				putnext(ppa->ppa_lower_wq, mp);
			}
		} else if ((nextq =
		    sppp_outpkt(q, &mp, msgdsize(mp), sps)) == NULL) {
			if (mp != NULL) {
				if (putbq(q, mp) == 0)
					freemsg(mp);
				break;
			}
		} else {
			putnext(nextq, mp);
		}
	}
	return (0);
}

void
sppp_remove_ppa(spppstr_t *sps)
{
	spppstr_t *nextsib;
	sppa_t *ppa = sps->sps_ppa;

	rw_enter(&ppa->ppa_sib_lock, RW_WRITER);
	if (ppa->ppa_refcnt <= 1) {
		rw_exit(&ppa->ppa_sib_lock);
		sppp_free_ppa(ppa);
	} else {
		nextsib = ppa->ppa_streams;
		if (nextsib == sps) {
			ppa->ppa_streams = sps->sps_nextsib;
		} else {
			while (nextsib->sps_nextsib != NULL) {
				if (nextsib->sps_nextsib == sps) {
					nextsib->sps_nextsib =
					    sps->sps_nextsib;
					break;
				}
				nextsib = nextsib->sps_nextsib;
			}
		}
		ppa->ppa_refcnt--;
		/*
		 * And if this stream was marked as promiscuous
		 * (SPS_PROMISC), then we need to update the
		 * promiscuous streams count. This should only happen
		 * when DL_DETACH_REQ is issued prior to marking the
		 * stream as non-promiscuous, through
		 * DL_PROMISCOFF_REQ request.
		 */
		if (IS_SPS_PROMISC(sps)) {
			ASSERT(ppa->ppa_promicnt > 0);
			ppa->ppa_promicnt--;
		}
		rw_exit(&ppa->ppa_sib_lock);
	}
	sps->sps_nextsib = NULL;
	sps->sps_ppa = NULL;
	freemsg(sps->sps_hangup);
	sps->sps_hangup = NULL;
}

sppa_t *
sppp_find_ppa(uint32_t ppa_id)
{
	sppa_t *ppa;

	for (ppa = ppa_list; ppa != NULL; ppa = ppa->ppa_nextppa) {
		if (ppa->ppa_ppa_id == ppa_id) {
			break;	/* found the ppa */
		}
	}
	return (ppa);
}

/*
 * sppp_inner_ioctl()
 *
 * MT-Perimeters:
 *    exclusive inner, shared outer
 *
 * Description:
 *    Called by sppp_uwput as a result of receiving ioctls which require
 *    an exclusive access at the inner perimeter.
 */
static void
sppp_inner_ioctl(queue_t *q, mblk_t *mp)
{
	spppstr_t	*sps;
	sppa_t		*ppa;
	struct iocblk	*iop;
	mblk_t		*nmp;
	int		error = EINVAL;
	int		count = 0;
	int		dbgcmd;
	int		mru, mtu;
	uint32_t	ppa_id;
	hrtime_t	hrtime;
	uint16_t	proto;

	ASSERT(q != NULL && q->q_ptr != NULL);
	ASSERT(mp != NULL && mp->b_rptr != NULL);

	sps = (spppstr_t *)q->q_ptr;
	ppa = sps->sps_ppa;
	iop = (struct iocblk *)mp->b_rptr;
	switch (iop->ioc_cmd) {
	case DLIOCRAW:
		if (IS_SPS_CONTROL(sps)) {
			break;		/* return EINVAL */
		}
		sps->sps_flags |= SPS_RAWDATA;
		error = 0;		/* return success */
		break;
	case DL_IOC_HDR_INFO:
		if (IS_SPS_CONTROL(sps)) {
			break;		/* return EINVAL */
		} else if ((mp->b_cont == NULL) ||
		    *((t_uscalar_t *)mp->b_cont->b_rptr) != DL_UNITDATA_REQ ||
		    (MBLKL(mp->b_cont) < (sizeof (dl_unitdata_req_t) +
		    SPPP_ADDRL))) {
			error = EPROTO;
			break;
		} else if (ppa == NULL) {
			error = ENOLINK;
			break;
		}
		if ((nmp = allocb(PPP_HDRLEN, BPRI_MED)) == NULL) {
			mutex_enter(&ppa->ppa_sta_lock);
			ppa->ppa_allocbfail++;
			mutex_exit(&ppa->ppa_sta_lock);
			error = ENOMEM;
			break;
		}
		*(uchar_t *)nmp->b_wptr++ = PPP_ALLSTATIONS;
		*(uchar_t *)nmp->b_wptr++ = PPP_UI;
		*(uchar_t *)nmp->b_wptr++ = sps->sps_sap >> 8;
		*(uchar_t *)nmp->b_wptr++ = sps->sps_sap & 0xff;
		ASSERT(MBLKL(nmp) == PPP_HDRLEN);

		linkb(mp, nmp);
		sps->sps_flags |= SPS_FASTPATH;
		error = 0;		/* return success */
		count = msgsize(nmp);
		break;
	case PPPIO_ATTACH:
		if (IS_SPS_CONTROL(sps) || IS_SPS_PIOATTACH(sps) ||
		    (sps->sps_dlstate != DL_UNATTACHED) ||
		    (iop->ioc_count != sizeof (uint32_t))) {
			break;		/* return EINVAL */
		} else if (mp->b_cont == NULL) {
			error = EPROTO;
			break;
		}
		ASSERT(mp->b_cont->b_rptr != NULL);
		/* If there's something here, it's detached. */
		if (ppa != NULL) {
			sppp_remove_ppa(sps);
		}
		ppa_id = *(uint32_t *)mp->b_cont->b_rptr;
		ppa = sppp_find_ppa(ppa_id);
		/*
		 * If we can't find it, then it's either because the requestor
		 * has supplied a wrong ppa_id to be attached to, or because
		 * the control stream for the specified ppa_id has been closed
		 * before we get here.
		 */
		if (ppa == NULL) {
			error = ENOENT;
			break;
		}
		if (iop->ioc_cr == NULL ||
		    ppa->ppa_zoneid != crgetzoneid(iop->ioc_cr)) {
			error = EPERM;
			break;
		}
		/*
		 * Preallocate the hangup message so that we're always
		 * able to send this upstream in the event of a
		 * catastrophic failure.
		 */
		if ((sps->sps_hangup = allocb(1, BPRI_MED)) == NULL) {
			error = ENOSR;
			break;
		}
		/*
		 * There are two ways to attach a stream to a ppa: one is
		 * through DLPI (DL_ATTACH_REQ) and the other is through
		 * PPPIO_ATTACH. This is why we need to distinguish whether or
		 * not a stream was allocated via PPPIO_ATTACH, so that we can
		 * properly detach it when we receive PPPIO_DETACH ioctl
		 * request.
		 */
		sps->sps_flags |= SPS_PIOATTACH;
		sps->sps_ppa = ppa;
		/*
		 * Add this stream to the head of the list of sibling streams
		 * which belong to the same ppa as specified.
		 */
		rw_enter(&ppa->ppa_sib_lock, RW_WRITER);
		ppa->ppa_refcnt++;
		sps->sps_nextsib = ppa->ppa_streams;
		ppa->ppa_streams = sps;
		rw_exit(&ppa->ppa_sib_lock);
		error = 0;		/* return success */
		break;
	case PPPIO_BLOCKNP:
	case PPPIO_UNBLOCKNP:
		if (iop->ioc_cr == NULL ||
		    secpolicy_ppp_config(iop->ioc_cr) != 0) {
			error = EPERM;
			break;
		}
		error = miocpullup(mp, sizeof (uint16_t));
		if (error != 0)
			break;
		ASSERT(mp->b_cont->b_rptr != NULL);
		proto = *(uint16_t *)mp->b_cont->b_rptr;
		if (iop->ioc_cmd == PPPIO_BLOCKNP) {
			uint32_t npflagpos = sppp_ppp2np(proto);
			/*
			 * Mark proto as blocked in ppa_npflag until the
			 * corresponding queues for proto have been plumbed.
			 */
			if (npflagpos != 0) {
				mutex_enter(&ppa->ppa_npmutex);
				ppa->ppa_npflag |= (1 << npflagpos);
				mutex_exit(&ppa->ppa_npmutex);
			} else {
				error = EINVAL;
			}
		} else {
			/*
			 * reset ppa_npflag and release proto
			 * packets that were being held in control queue.
			 */
			sppp_release_pkts(ppa, proto);
		}
		break;
	case PPPIO_DEBUG:
		if (iop->ioc_cr == NULL ||
		    secpolicy_ppp_config(iop->ioc_cr) != 0) {
			error = EPERM;
			break;
		} else if (iop->ioc_count != sizeof (uint32_t)) {
			break;		/* return EINVAL */
		} else if (mp->b_cont == NULL) {
			error = EPROTO;
			break;
		}
		ASSERT(mp->b_cont->b_rptr != NULL);
		dbgcmd = *(uint32_t *)mp->b_cont->b_rptr;
		/*
		 * We accept PPPDBG_LOG + PPPDBG_DRIVER value as an indication
		 * that SPS_KDEBUG needs to be enabled for this upper stream.
		 */
		if (dbgcmd == PPPDBG_LOG + PPPDBG_DRIVER) {
			sps->sps_flags |= SPS_KDEBUG;
			error = 0;	/* return success */
			break;
		}
		/*
		 * Otherwise, for any other values, we send them down only if
		 * there is an attachment and if the attachment has something
		 * linked underneath it.
		 */
		if ((ppa == NULL) || (ppa->ppa_lower_wq == NULL)) {
			error = ENOLINK;
			break;
		}
		mutex_enter(&ppa->ppa_sta_lock);
		/*
		 * See comments in PPPIO_GETSTAT64 case
		 * in sppp_ioctl().
		 */
		if (IS_SPS_IOCQ(sps)) {
			mutex_exit(&ppa->ppa_sta_lock);
			if (!putq(q, mp)) {
				error = EAGAIN;
				break;
			}
			return;
		} else {
			ppa->ppa_ioctlsfwd++;
			/*
			 * Record the ioctl CMD & ID -
			 * this will be used to check the
			 * ACK or NAK responses coming from below.
			 */
			sps->sps_ioc_id = iop->ioc_id;
			sps->sps_flags |= SPS_IOCQ;
			mutex_exit(&ppa->ppa_sta_lock);
		}
		putnext(ppa->ppa_lower_wq, mp);
		return;			/* don't ack or nak the request */
	case PPPIO_DETACH:
		if (!IS_SPS_PIOATTACH(sps)) {
			break;		/* return EINVAL */
		}
		/*
		 * The SPS_PIOATTACH flag set on the stream tells us that
		 * the ppa field is still valid. In the event that the control
		 * stream be closed prior to this stream's detachment, the
		 * SPS_PIOATTACH flag would have been cleared from this stream
		 * during close; in that case we won't get here.
		 */
		ASSERT(ppa != NULL);
		ASSERT(ppa->ppa_ctl != sps);
		ASSERT(sps->sps_dlstate == DL_UNATTACHED);

		/*
		 * We don't actually detach anything until the stream is
		 * closed or reattached.
		 */

		sps->sps_flags &= ~SPS_PIOATTACH;
		error = 0;		/* return success */
		break;
	case PPPIO_LASTMOD:
		if (!IS_SPS_CONTROL(sps)) {
			break;		/* return EINVAL */
		}
		ASSERT(ppa != NULL);
		ppa->ppa_flags |= PPA_LASTMOD;
		error = 0;		/* return success */
		break;
	case PPPIO_MRU:
		if (!IS_SPS_CONTROL(sps) ||
		    (iop->ioc_count != sizeof (uint32_t))) {
			break;		/* return EINVAL */
		} else if (mp->b_cont == NULL) {
			error = EPROTO;
			break;
		}
		ASSERT(ppa != NULL);
		ASSERT(mp->b_cont->b_rptr != NULL);
		mru = *(uint32_t *)mp->b_cont->b_rptr;
		if ((mru <= 0) || (mru > PPP_MAXMRU)) {
			error = EPROTO;
			break;
		}
		if (mru < PPP_MRU) {
			mru = PPP_MRU;
		}
		ppa->ppa_mru = (uint16_t)mru;
		/*
		 * If there's something beneath this driver for the ppa, then
		 * inform it (or them) of the MRU size. Only do this is we
		 * are not the last PPP module on the stream.
		 */
		if (!IS_PPA_LASTMOD(ppa) && (ppa->ppa_lower_wq != NULL)) {
			(void) putctl4(ppa->ppa_lower_wq, M_CTL, PPPCTL_MRU,
			    mru);
		}
		error = 0;		/* return success */
		break;
	case PPPIO_MTU:
		if (!IS_SPS_CONTROL(sps) ||
		    (iop->ioc_count != sizeof (uint32_t))) {
			break;		/* return EINVAL */
		} else if (mp->b_cont == NULL) {
			error = EPROTO;
			break;
		}
		ASSERT(ppa != NULL);
		ASSERT(mp->b_cont->b_rptr != NULL);
		mtu = *(uint32_t *)mp->b_cont->b_rptr;
		if ((mtu <= 0) || (mtu > PPP_MAXMTU)) {
			error = EPROTO;
			break;
		}
		ppa->ppa_mtu = (uint16_t)mtu;
		/*
		 * If there's something beneath this driver for the ppa, then
		 * inform it (or them) of the MTU size. Only do this if we
		 * are not the last PPP module on the stream.
		 */
		if (!IS_PPA_LASTMOD(ppa) && (ppa->ppa_lower_wq != NULL)) {
			(void) putctl4(ppa->ppa_lower_wq, M_CTL, PPPCTL_MTU,
			    mtu);
		}
		error = 0;		/* return success */
		break;
	case PPPIO_USETIMESTAMP:
		if (!IS_SPS_CONTROL(sps)) {
			break;		/* return EINVAL */
		}
		if (!IS_PPA_TIMESTAMP(ppa)) {
			hrtime = gethrtime();
			ppa->ppa_lasttx = ppa->ppa_lastrx = hrtime;
			ppa->ppa_flags |= PPA_TIMESTAMP;
		}
		error = 0;
		break;
	}

	if (error == 0) {
		/* Success; tell the user */
		miocack(q, mp, count, 0);
	} else {
		/* Failure; send error back upstream */
		miocnak(q, mp, 0, error);
	}
}

/*
 * sppp_outer_ioctl()
 *
 * MT-Perimeters:
 *    exclusive inner, exclusive outer
 *
 * Description:
 *    Called by sppp_uwput as a result of receiving ioctls which require
 *    an exclusive access at the outer perimeter.
 */
static void
sppp_outer_ioctl(queue_t *q, mblk_t *mp)
{
	spppstr_t	*sps = q->q_ptr;
	spppstr_t	*nextsib;
	queue_t		*lwq;
	sppa_t		*ppa;
	struct iocblk	*iop;
	int		error = EINVAL;
	int		count = 0;
	uint32_t	ppa_id;
	mblk_t		*nmp;
	zoneid_t	zoneid;

	sps = (spppstr_t *)q->q_ptr;
	ppa = sps->sps_ppa;
	iop = (struct iocblk *)mp->b_rptr;
	switch (iop->ioc_cmd) {
	case I_LINK:
		if (!IS_SPS_CONTROL(sps)) {
			break;		/* return EINVAL */
		} else if (ppa->ppa_lower_wq != NULL) {
			error = EEXIST;
			break;
		}
		ASSERT(ppa->ppa_ctl != NULL);
		ASSERT(sps->sps_npmode == NPMODE_PASS);
		ASSERT(mp->b_cont != NULL && mp->b_cont->b_rptr != NULL);

		lwq = ((struct linkblk *)mp->b_cont->b_rptr)->l_qbot;
		ASSERT(lwq != NULL);

		ppa->ppa_lower_wq = lwq;
		lwq->q_ptr = RD(lwq)->q_ptr = (caddr_t)ppa;
		/*
		 * Unblock upper network streams which now feed this lower
		 * stream. We don't need to hold ppa_sib_lock here, since we
		 * are writer at the outer perimeter.
		 */
		if (WR(sps->sps_rq)->q_first != NULL)
			qenable(WR(sps->sps_rq));
		for (nextsib = ppa->ppa_streams; nextsib != NULL;
		    nextsib = nextsib->sps_nextsib) {
			nextsib->sps_npmode = NPMODE_PASS;
			if (WR(nextsib->sps_rq)->q_first != NULL) {
				qenable(WR(nextsib->sps_rq));
			}
		}

		/*
		 * Also unblock (run once) our lower read-side queue.  This is
		 * where packets received while doing the I_LINK may be
		 * languishing; see sppp_lrsrv.
		 */
		qenable(RD(lwq));

		/*
		 * Send useful information down to the modules which are now
		 * linked below this driver (for this particular ppa). Only
		 * do this if we are not the last PPP module on the stream.
		 */
		if (!IS_PPA_LASTMOD(ppa)) {
			(void) putctl8(lwq, M_CTL, PPPCTL_UNIT,
			    ppa->ppa_ppa_id);
			(void) putctl4(lwq, M_CTL, PPPCTL_MRU, ppa->ppa_mru);
			(void) putctl4(lwq, M_CTL, PPPCTL_MTU, ppa->ppa_mtu);
		}

		if (IS_SPS_KDEBUG(sps)) {
			SPDEBUG(PPP_DRV_NAME
			    "/%d: I_LINK lwq=0x%p sps=0x%p flags=0x%b ppa=0x%p "
			    "flags=0x%b\n", sps->sps_mn_id,
			    (void *)ppa->ppa_lower_wq, (void *)sps,
			    sps->sps_flags, SPS_FLAGS_STR,
			    (void *)ppa, ppa->ppa_flags,
			    PPA_FLAGS_STR);
		}
		error = 0;		/* return success */
		break;
	case I_UNLINK:
		ASSERT(IS_SPS_CONTROL(sps));
		ASSERT(ppa != NULL);
		lwq = ppa->ppa_lower_wq;
		ASSERT(mp->b_cont != NULL && mp->b_cont->b_rptr != NULL);
		ASSERT(lwq == ((struct linkblk *)mp->b_cont->b_rptr)->l_qbot);

		if (IS_SPS_KDEBUG(sps)) {
			SPDEBUG(PPP_DRV_NAME
			    "/%d: I_UNLINK lwq=0x%p sps=0x%p flags=0x%b "
			    "ppa=0x%p flags=0x%b\n", sps->sps_mn_id,
			    (void *)lwq, (void *)sps, sps->sps_flags,
			    SPS_FLAGS_STR, (void *)ppa, ppa->ppa_flags,
			    PPA_FLAGS_STR);
		}
		/*
		 * While accessing the outer perimeter exclusively, we
		 * disassociate our ppa's lower_wq from the lower stream linked
		 * beneath us, and we also disassociate our control stream from
		 * the q_ptr of the lower stream.
		 */
		lwq->q_ptr = RD(lwq)->q_ptr = NULL;
		ppa->ppa_lower_wq = NULL;
		/*
		 * Unblock streams which now feed back up the control stream,
		 * and acknowledge the request. We don't need to hold
		 * ppa_sib_lock here, since we are writer at the outer
		 * perimeter.
		 */
		if (WR(sps->sps_rq)->q_first != NULL)
			qenable(WR(sps->sps_rq));
		for (nextsib = ppa->ppa_streams; nextsib != NULL;
		    nextsib = nextsib->sps_nextsib) {
			if (WR(nextsib->sps_rq)->q_first != NULL) {
				qenable(WR(nextsib->sps_rq));
			}
		}
		error = 0;		/* return success */
		break;
	case PPPIO_NEWPPA:
		/*
		 * Do sanity check to ensure that we don't accept PPPIO_NEWPPA
		 * on a stream which DLPI is used (since certain DLPI messages
		 * will cause state transition reflected in sps_dlstate,
		 * changing it from its default DL_UNATTACHED value). In other
		 * words, we won't allow a network/snoop stream to become
		 * a control stream.
		 */
		if (iop->ioc_cr == NULL ||
		    secpolicy_ppp_config(iop->ioc_cr) != 0) {
			error = EPERM;
			break;
		} else if (IS_SPS_CONTROL(sps) || IS_SPS_PIOATTACH(sps) ||
		    (ppa != NULL) || (sps->sps_dlstate != DL_UNATTACHED)) {
			break;		/* return EINVAL */
		}
		/* Get requested unit number (if any) */
		if (iop->ioc_count == sizeof (uint32_t) && mp->b_cont != NULL)
			ppa_id = *(uint32_t *)mp->b_cont->b_rptr;
		else
			ppa_id = 0;
		/* Get mblk to use for response message */
		nmp = allocb(sizeof (uint32_t), BPRI_MED);
		if (nmp == NULL) {
			error = ENOSR;
			break;
		}
		if (mp->b_cont != NULL) {
			freemsg(mp->b_cont);
		}
		mp->b_cont = nmp;		/* chain our response mblk */
		/*
		 * Walk the global ppa list and determine the lowest
		 * available ppa_id number to be used.
		 */
		if (ppa_id == (uint32_t)-1)
			ppa_id = 0;
		zoneid = crgetzoneid(iop->ioc_cr);
		for (ppa = ppa_list; ppa != NULL; ppa = ppa->ppa_nextppa) {
			if (ppa_id == (uint32_t)-2) {
				if (ppa->ppa_ctl == NULL &&
				    ppa->ppa_zoneid == zoneid)
					break;
			} else {
				if (ppa_id < ppa->ppa_ppa_id)
					break;
				if (ppa_id == ppa->ppa_ppa_id)
					++ppa_id;
			}
		}
		if (ppa_id == (uint32_t)-2) {
			if (ppa == NULL) {
				error = ENXIO;
				break;
			}
			/* Clear timestamp and lastmod flags */
			ppa->ppa_flags = 0;
		} else {
			ppa = sppp_create_ppa(ppa_id, zoneid);
			if (ppa == NULL) {
				error = ENOMEM;
				break;
			}
		}

		sps->sps_ppa = ppa;		/* chain the ppa structure */
		sps->sps_npmode = NPMODE_PASS;	/* network packets may travel */
		sps->sps_flags |= SPS_CONTROL;	/* this is the control stream */

		ppa->ppa_refcnt++;		/* new PPA reference */
		ppa->ppa_ctl = sps;		/* back ptr to upper stream */
		/*
		 * Return the newly created ppa_id to the requestor and
		 * acnowledge the request.
		 */
		*(uint32_t *)nmp->b_wptr = ppa->ppa_ppa_id;
		nmp->b_wptr += sizeof (uint32_t);

		if (IS_SPS_KDEBUG(sps)) {
			SPDEBUG(PPP_DRV_NAME
			    "/%d: PPPIO_NEWPPA ppa_id=%d sps=0x%p flags=0x%b "
			    "ppa=0x%p flags=0x%b\n", sps->sps_mn_id, ppa_id,
			    (void *)sps, sps->sps_flags, SPS_FLAGS_STR,
			    (void *)ppa, ppa->ppa_flags,
			    PPA_FLAGS_STR);
		}
		count = msgsize(nmp);
		error = 0;
		break;
	}

	if (error == 0) {
		/* Success; tell the user. */
		miocack(q, mp, count, 0);
	} else {
		/* Failure; send error back upstream. */
		miocnak(q, mp, 0, error);
	}
}

/*
 * sppp_send()
 *
 * MT-Perimeters:
 *    shared inner, shared outer.
 *
 * Description:
 *    Called by sppp_uwput to handle M_DATA message type.  Returns
 *    queue_t for putnext, or NULL to mean that the packet was
 *    handled internally.
 */
static queue_t *
sppp_send(queue_t *q, mblk_t **mpp, spppstr_t *sps)
{
	mblk_t	*mp;
	sppa_t	*ppa;
	int	is_promisc;
	int	msize;
	int	error = 0;
	queue_t	*nextq;

	ASSERT(mpp != NULL);
	mp = *mpp;
	ASSERT(q != NULL && q->q_ptr != NULL);
	ASSERT(mp != NULL && mp->b_rptr != NULL);
	ASSERT(sps != NULL);
	ASSERT(q->q_ptr == sps);
	/*
	 * We only let M_DATA through if the sender is either the control
	 * stream (for PPP control packets) or one of the network streams
	 * (for IP packets) in IP fastpath mode. If this stream is not attached
	 * to any ppas, then discard data coming down through this stream.
	 */
	ppa = sps->sps_ppa;
	if (ppa == NULL) {
		ASSERT(!IS_SPS_CONTROL(sps));
		error = ENOLINK;
	} else if (!IS_SPS_CONTROL(sps) && !IS_SPS_FASTPATH(sps)) {
		error = EPROTO;
	}
	if (error != 0) {
		merror(q, mp, error);
		return (NULL);
	}
	msize = msgdsize(mp);
	if (msize > (ppa->ppa_mtu + PPP_HDRLEN)) {
		/* Log, and send it anyway */
		mutex_enter(&ppa->ppa_sta_lock);
		ppa->ppa_otoolongs++;
		mutex_exit(&ppa->ppa_sta_lock);
	} else if (msize < PPP_HDRLEN) {
		/*
		 * Log, and send it anyway. We log it because we get things
		 * in M_DATA form here, which tells us that the sender is
		 * either IP in fastpath transmission mode, or pppd. In both
		 * cases, they are currently expected to send the 4-bytes
		 * PPP header in front of any possible payloads.
		 */
		mutex_enter(&ppa->ppa_sta_lock);
		ppa->ppa_orunts++;
		mutex_exit(&ppa->ppa_sta_lock);
	}

	if (IS_SPS_KDEBUG(sps)) {
		SPDEBUG(PPP_DRV_NAME
		    "/%d: M_DATA send (%d bytes) sps=0x%p flags=0x%b "
		    "ppa=0x%p flags=0x%b\n", sps->sps_mn_id, msize,
		    (void *)sps, sps->sps_flags, SPS_FLAGS_STR,
		    (void *)ppa, ppa->ppa_flags, PPA_FLAGS_STR);
	}
	/*
	 * Should there be any promiscuous stream(s), send the data up
	 * for each promiscuous stream that we recognize. Make sure that
	 * for fastpath, we skip the PPP header in the M_DATA mblk. We skip
	 * the control stream as we obviously never allow the control stream
	 * to become promiscous and bind to PPP_ALLSAP.
	 */
	rw_enter(&ppa->ppa_sib_lock, RW_READER);
	is_promisc = sps->sps_ppa->ppa_promicnt;
	if (is_promisc) {
		ASSERT(ppa->ppa_streams != NULL);
		sppp_dlprsendup(ppa->ppa_streams, mp, sps->sps_sap, B_TRUE);
	}
	rw_exit(&ppa->ppa_sib_lock);
	/*
	 * Only time-stamp the packet with hrtime if the upper stream
	 * is configured to do so.  PPP control (negotiation) messages
	 * are never considered link activity; only data is activity.
	 */
	if (!IS_SPS_CONTROL(sps) && IS_PPA_TIMESTAMP(ppa)) {
		ppa->ppa_lasttx = gethrtime();
	}
	/*
	 * If there's already a message in the write-side service queue,
	 * then queue this message there as well, otherwise, try to send
	 * it down to the module immediately below us.
	 */
	if (q->q_first != NULL ||
	    (nextq = sppp_outpkt(q, mpp, msize, sps)) == NULL) {
		mp = *mpp;
		if (mp != NULL && putq(q, mp) == 0) {
			mutex_enter(&ppa->ppa_sta_lock);
			ppa->ppa_oqdropped++;
			mutex_exit(&ppa->ppa_sta_lock);
			freemsg(mp);
		}
		return (NULL);
	}
	return (nextq);
}

/*
 * sppp_outpkt()
 *
 * MT-Perimeters:
 *    shared inner, shared outer (if called from sppp_wput, sppp_dlunitdatareq).
 *    exclusive inner, shared outer (if called from sppp_wsrv).
 *
 * Description:
 *    Called from 1) sppp_uwput when processing a M_DATA fastpath message,
 *    or 2) sppp_uwsrv when processing the upper write-side service queue.
 *    For both cases, it prepares to send the data to the module below
 *    this driver if there is a lower stream linked underneath. If none, then
 *    the data will be sent upstream via the control channel to pppd.
 *
 * Returns:
 *	Non-NULL queue_t if message should be sent now, otherwise
 *	if *mpp == NULL, then message was freed, otherwise put *mpp
 *	(back) on the queue.  (Does not do putq/putbq, since it's
 *	called both from srv and put procedures.)
 */
static queue_t *
sppp_outpkt(queue_t *q, mblk_t **mpp, int msize, spppstr_t *sps)
{
	mblk_t		*mp;
	sppa_t		*ppa;
	enum NPmode	npmode;
	mblk_t		*mpnew;

	ASSERT(mpp != NULL);
	mp = *mpp;
	ASSERT(q != NULL && q->q_ptr != NULL);
	ASSERT(mp != NULL && mp->b_rptr != NULL);
	ASSERT(sps != NULL);

	ppa = sps->sps_ppa;
	npmode = sps->sps_npmode;

	if (npmode == NPMODE_QUEUE) {
		ASSERT(!IS_SPS_CONTROL(sps));
		return (NULL);	/* queue it for later */
	} else if (ppa == NULL || ppa->ppa_ctl == NULL ||
	    npmode == NPMODE_DROP || npmode == NPMODE_ERROR) {
		/*
		 * This can not be the control stream, as it must always have
		 * a valid ppa, and its npmode must always be NPMODE_PASS.
		 */
		ASSERT(!IS_SPS_CONTROL(sps));
		if (npmode == NPMODE_DROP) {
			freemsg(mp);
		} else {
			/*
			 * If we no longer have the control stream, or if the
			 * mode is set to NPMODE_ERROR, then we need to tell IP
			 * that the interface need to be marked as down. In
			 * other words, we tell IP to be quiescent.
			 */
			merror(q, mp, EPROTO);
		}
		*mpp = NULL;
		return (NULL);	/* don't queue it */
	}
	/*
	 * Do we have a driver stream linked underneath ? If not, we need to
	 * notify pppd that the link needs to be brought up and configure
	 * this upper stream to drop subsequent outgoing packets. This is
	 * for demand-dialing, in which case pppd has done the IP plumbing
	 * but hasn't linked the driver stream underneath us. Therefore, when
	 * a packet is sent down the IP interface, a notification message
	 * will be sent up the control stream to pppd in order for it to
	 * establish the physical link. The driver stream is then expected
	 * to be linked underneath after physical link establishment is done.
	 */
	if (ppa->ppa_lower_wq == NULL) {
		ASSERT(ppa->ppa_ctl != NULL);
		ASSERT(ppa->ppa_ctl->sps_rq != NULL);

		*mpp = NULL;
		mpnew = create_lsmsg(PPP_LINKSTAT_NEEDUP);
		if (mpnew == NULL) {
			freemsg(mp);
			mutex_enter(&ppa->ppa_sta_lock);
			ppa->ppa_allocbfail++;
			mutex_exit(&ppa->ppa_sta_lock);
			return (NULL);	/* don't queue it */
		}
		/* Include the data in the message for logging. */
		mpnew->b_cont = mp;
		mutex_enter(&ppa->ppa_sta_lock);
		ppa->ppa_lsneedup++;
		mutex_exit(&ppa->ppa_sta_lock);
		/*
		 * We need to set the mode to NPMODE_DROP, but should only
		 * do so when this stream is not the control stream.
		 */
		if (!IS_SPS_CONTROL(sps)) {
			sps->sps_npmode = NPMODE_DROP;
		}
		putnext(ppa->ppa_ctl->sps_rq, mpnew);
		return (NULL);	/* don't queue it */
	}
	/*
	 * If so, then try to send it down. The lower queue is only ever
	 * detached while holding an exclusive lock on the whole driver,
	 * so we can be confident that the lower queue is still there.
	 */
	if (bcanputnext(ppa->ppa_lower_wq, mp->b_band)) {
		mutex_enter(&ppa->ppa_sta_lock);
		ppa->ppa_stats.p.ppp_opackets++;
		if (IS_SPS_CONTROL(sps)) {
			ppa->ppa_opkt_ctl++;
		}
		ppa->ppa_stats.p.ppp_obytes += msize;
		mutex_exit(&ppa->ppa_sta_lock);
		return (ppa->ppa_lower_wq);	/* don't queue it */
	}
	return (NULL);	/* queue it for later */
}

/*
 * sppp_lwsrv()
 *
 * MT-Perimeters:
 *    exclusive inner, shared outer.
 *
 * Description:
 *    Lower write-side service procedure. No messages are ever placed on
 *    the write queue here, this just back-enables all upper write side
 *    service procedures.
 */
int
sppp_lwsrv(queue_t *q)
{
	sppa_t		*ppa;
	spppstr_t	*nextsib;

	ASSERT(q != NULL && q->q_ptr != NULL);
	ppa = (sppa_t *)q->q_ptr;
	ASSERT(ppa != NULL);

	rw_enter(&ppa->ppa_sib_lock, RW_READER);
	if ((nextsib = ppa->ppa_ctl) != NULL &&
	    WR(nextsib->sps_rq)->q_first != NULL)
		qenable(WR(nextsib->sps_rq));
	for (nextsib = ppa->ppa_streams; nextsib != NULL;
	    nextsib = nextsib->sps_nextsib) {
		if (WR(nextsib->sps_rq)->q_first != NULL) {
			qenable(WR(nextsib->sps_rq));
		}
	}
	rw_exit(&ppa->ppa_sib_lock);
	return (0);
}

/*
 * sppp_lrput()
 *
 * MT-Perimeters:
 *    shared inner, shared outer.
 *
 * Description:
 *    Lower read-side put procedure. Messages from below get here.
 *    Data messages are handled separately to limit stack usage
 *    going into IP.
 *
 *    Note that during I_UNLINK processing, it's possible for a downstream
 *    message to enable upstream data (due to pass_wput() removing the
 *    SQ_BLOCKED flag), and thus we must protect against a NULL sppa pointer.
 *    In this case, the only thing above us is passthru, and we might as well
 *    discard.
 */
int
sppp_lrput(queue_t *q, mblk_t *mp)
{
	sppa_t		*ppa;
	spppstr_t	*sps;

	if ((ppa = q->q_ptr) == NULL) {
		freemsg(mp);
		return (0);
	}

	sps = ppa->ppa_ctl;

	if (MTYPE(mp) != M_DATA) {
		sppp_recv_nondata(q, mp, sps);
	} else if (sps == NULL) {
		freemsg(mp);
	} else if ((q = sppp_recv(q, &mp, sps)) != NULL) {
		putnext(q, mp);
	}
	return (0);
}

/*
 * sppp_lrsrv()
 *
 * MT-Perimeters:
 *    exclusive inner, shared outer.
 *
 * Description:
 *    Lower read-side service procedure.  This is run once after the I_LINK
 *    occurs in order to clean up any packets that came in while we were
 *    transferring in the lower stream.  Otherwise, it's not used.
 */
int
sppp_lrsrv(queue_t *q)
{
	mblk_t *mp;

	while ((mp = getq(q)) != NULL)
		(void) sppp_lrput(q, mp);
	return (0);
}

/*
 * sppp_recv_nondata()
 *
 * MT-Perimeters:
 *    shared inner, shared outer.
 *
 * Description:
 *    All received non-data messages come through here.
 */
static void
sppp_recv_nondata(queue_t *q, mblk_t *mp, spppstr_t *ctlsps)
{
	sppa_t		*ppa;
	spppstr_t	*destsps;
	struct iocblk	*iop;

	ppa = (sppa_t *)q->q_ptr;
	ctlsps = ppa->ppa_ctl;

	switch (MTYPE(mp)) {
	case M_CTL:
		mutex_enter(&ppa->ppa_sta_lock);
		if (*mp->b_rptr == PPPCTL_IERROR) {
			ppa->ppa_stats.p.ppp_ierrors++;
			ppa->ppa_ierr_low++;
			ppa->ppa_mctlsknown++;
		} else if (*mp->b_rptr == PPPCTL_OERROR) {
			ppa->ppa_stats.p.ppp_oerrors++;
			ppa->ppa_oerr_low++;
			ppa->ppa_mctlsknown++;
		} else {
			ppa->ppa_mctlsunknown++;
		}
		mutex_exit(&ppa->ppa_sta_lock);
		freemsg(mp);
		break;
	case M_IOCTL:
		miocnak(q, mp, 0, EINVAL);
		break;
	case M_IOCACK:
	case M_IOCNAK:
		iop = (struct iocblk *)mp->b_rptr;
		ASSERT(iop != NULL);
		/*
		 * Attempt to match up the response with the stream that the
		 * request came from. If ioc_id doesn't match the one that we
		 * recorded, then discard this message.
		 */
		rw_enter(&ppa->ppa_sib_lock, RW_READER);
		if ((destsps = ctlsps) == NULL ||
		    destsps->sps_ioc_id != iop->ioc_id) {
			destsps = ppa->ppa_streams;
			while (destsps != NULL) {
				if (destsps->sps_ioc_id == iop->ioc_id) {
					break;	/* found the upper stream */
				}
				destsps = destsps->sps_nextsib;
			}
		}
		rw_exit(&ppa->ppa_sib_lock);
		if (destsps == NULL) {
			mutex_enter(&ppa->ppa_sta_lock);
			ppa->ppa_ioctlsfwderr++;
			mutex_exit(&ppa->ppa_sta_lock);
			freemsg(mp);
			break;
		}
		mutex_enter(&ppa->ppa_sta_lock);
		ppa->ppa_ioctlsfwdok++;

		/*
		 * Clear SPS_IOCQ and enable the lower write side queue,
		 * this would allow the upper stream service routine
		 * to start processing the queue for pending messages.
		 * sppp_lwsrv -> sppp_uwsrv.
		 */
		destsps->sps_flags &= ~SPS_IOCQ;
		mutex_exit(&ppa->ppa_sta_lock);
		qenable(WR(destsps->sps_rq));

		putnext(destsps->sps_rq, mp);
		break;
	case M_HANGUP:
		/*
		 * Free the original mblk_t. We don't really want to send
		 * a M_HANGUP message upstream, so we need to translate this
		 * message into something else.
		 */
		freemsg(mp);
		if (ctlsps == NULL)
			break;
		mp = create_lsmsg(PPP_LINKSTAT_HANGUP);
		if (mp == NULL) {
			mutex_enter(&ppa->ppa_sta_lock);
			ppa->ppa_allocbfail++;
			mutex_exit(&ppa->ppa_sta_lock);
			break;
		}
		mutex_enter(&ppa->ppa_sta_lock);
		ppa->ppa_lsdown++;
		mutex_exit(&ppa->ppa_sta_lock);
		putnext(ctlsps->sps_rq, mp);
		break;
	case M_FLUSH:
		if (*mp->b_rptr & FLUSHR) {
			flushq(q, FLUSHDATA);
		}
		if (*mp->b_rptr & FLUSHW) {
			*mp->b_rptr &= ~FLUSHR;
			qreply(q, mp);
		} else {
			freemsg(mp);
		}
		break;
	default:
		if (ctlsps != NULL &&
		    (queclass(mp) == QPCTL) || canputnext(ctlsps->sps_rq)) {
			putnext(ctlsps->sps_rq, mp);
		} else {
			mutex_enter(&ppa->ppa_sta_lock);
			ppa->ppa_iqdropped++;
			mutex_exit(&ppa->ppa_sta_lock);
			freemsg(mp);
		}
		break;
	}
}

/*
 * sppp_recv()
 *
 * MT-Perimeters:
 *    shared inner, shared outer.
 *
 * Description:
 *    Receive function called by sppp_lrput.  Finds appropriate
 *    receive stream and does accounting.
 */
static queue_t *
sppp_recv(queue_t *q, mblk_t **mpp, spppstr_t *ctlsps)
{
	mblk_t		*mp;
	int		len;
	sppa_t		*ppa;
	spppstr_t	*destsps;
	mblk_t		*zmp;
	uint32_t	npflagpos;

	ASSERT(mpp != NULL);
	mp = *mpp;
	ASSERT(q != NULL && q->q_ptr != NULL);
	ASSERT(mp != NULL && mp->b_rptr != NULL);
	ASSERT(ctlsps != NULL);
	ASSERT(IS_SPS_CONTROL(ctlsps));
	ppa = ctlsps->sps_ppa;
	ASSERT(ppa != NULL && ppa->ppa_ctl != NULL);

	len = msgdsize(mp);
	mutex_enter(&ppa->ppa_sta_lock);
	ppa->ppa_stats.p.ppp_ibytes += len;
	mutex_exit(&ppa->ppa_sta_lock);
	/*
	 * If the entire data size of the mblk is less than the length of the
	 * PPP header, then free it. We can't do much with such message anyway,
	 * since we can't really determine what the PPP protocol type is.
	 */
	if (len < PPP_HDRLEN) {
		/* Log, and free it */
		mutex_enter(&ppa->ppa_sta_lock);
		ppa->ppa_irunts++;
		mutex_exit(&ppa->ppa_sta_lock);
		freemsg(mp);
		return (NULL);
	} else if (len > (ppa->ppa_mru + PPP_HDRLEN)) {
		/* Log, and accept it anyway */
		mutex_enter(&ppa->ppa_sta_lock);
		ppa->ppa_itoolongs++;
		mutex_exit(&ppa->ppa_sta_lock);
	}
	/*
	 * We need at least be able to read the PPP protocol from the header,
	 * so if the first message block is too small, then we concatenate the
	 * rest of the following blocks into one message.
	 */
	if (MBLKL(mp) < PPP_HDRLEN) {
		zmp = msgpullup(mp, PPP_HDRLEN);
		freemsg(mp);
		mp = zmp;
		if (mp == NULL) {
			mutex_enter(&ppa->ppa_sta_lock);
			ppa->ppa_allocbfail++;
			mutex_exit(&ppa->ppa_sta_lock);
			return (NULL);
		}
		*mpp = mp;
	}
	/*
	 * Hold this packet in the control-queue until
	 * the matching network-layer upper stream for the PPP protocol (sap)
	 * has not been plumbed and configured
	 */
	npflagpos = sppp_ppp2np(PPP_PROTOCOL(mp->b_rptr));
	mutex_enter(&ppa->ppa_npmutex);
	if (npflagpos != 0 && (ppa->ppa_npflag & (1 << npflagpos))) {
		/*
		 * proto is currently blocked; Hold up to 4 packets
		 * in the kernel.
		 */
		if (ppa->ppa_holdpkts[npflagpos] > 3 ||
		    putq(ctlsps->sps_rq, mp) == 0)
			freemsg(mp);
		else
			ppa->ppa_holdpkts[npflagpos]++;
		mutex_exit(&ppa->ppa_npmutex);
		return (NULL);
	}
	mutex_exit(&ppa->ppa_npmutex);
	/*
	 * Try to find a matching network-layer upper stream for the specified
	 * PPP protocol (sap), and if none is found, send this frame up the
	 * control stream.
	 */
	destsps = sppp_inpkt(q, mp, ctlsps);
	if (destsps == NULL) {
		mutex_enter(&ppa->ppa_sta_lock);
		ppa->ppa_ipkt_ctl++;
		mutex_exit(&ppa->ppa_sta_lock);
		if (canputnext(ctlsps->sps_rq)) {
			if (IS_SPS_KDEBUG(ctlsps)) {
				SPDEBUG(PPP_DRV_NAME
				    "/%d: M_DATA recv (%d bytes) sps=0x%p "
				    "flags=0x%b ppa=0x%p flags=0x%b\n",
				    ctlsps->sps_mn_id, len, (void *)ctlsps,
				    ctlsps->sps_flags, SPS_FLAGS_STR,
				    (void *)ppa, ppa->ppa_flags,
				    PPA_FLAGS_STR);
			}
			return (ctlsps->sps_rq);
		} else {
			mutex_enter(&ppa->ppa_sta_lock);
			ppa->ppa_iqdropped++;
			mutex_exit(&ppa->ppa_sta_lock);
			freemsg(mp);
			return (NULL);
		}
	}
	if (canputnext(destsps->sps_rq)) {
		if (IS_SPS_KDEBUG(destsps)) {
			SPDEBUG(PPP_DRV_NAME
			    "/%d: M_DATA recv (%d bytes) sps=0x%p flags=0x%b "
			    "ppa=0x%p flags=0x%b\n", destsps->sps_mn_id, len,
			    (void *)destsps, destsps->sps_flags,
			    SPS_FLAGS_STR, (void *)ppa, ppa->ppa_flags,
			    PPA_FLAGS_STR);
		}
		/*
		 * If fastpath is enabled on the network-layer stream, then
		 * make sure we skip over the PPP header, otherwise, we wrap
		 * the message in a DLPI message.
		 */
		if (IS_SPS_FASTPATH(destsps)) {
			mp->b_rptr += PPP_HDRLEN;
			return (destsps->sps_rq);
		} else {
			spppstr_t *uqs = (spppstr_t *)destsps->sps_rq->q_ptr;
			ASSERT(uqs != NULL);
			mp->b_rptr += PPP_HDRLEN;
			mp = sppp_dladdud(uqs, mp, uqs->sps_sap, B_FALSE);
			if (mp != NULL) {
				*mpp = mp;
				return (destsps->sps_rq);
			} else {
				mutex_enter(&ppa->ppa_sta_lock);
				ppa->ppa_allocbfail++;
				mutex_exit(&ppa->ppa_sta_lock);
				/* mp already freed by sppp_dladdud */
				return (NULL);
			}
		}
	} else {
		mutex_enter(&ppa->ppa_sta_lock);
		ppa->ppa_iqdropped++;
		mutex_exit(&ppa->ppa_sta_lock);
		freemsg(mp);
		return (NULL);
	}
}

/*
 * sppp_inpkt()
 *
 * MT-Perimeters:
 *    shared inner, shared outer.
 *
 * Description:
 *    Find the destination upper stream for the received packet, called
 *    from sppp_recv.
 *
 * Returns:
 *    ptr to destination upper network stream, or NULL for control stream.
 */
/* ARGSUSED */
static spppstr_t *
sppp_inpkt(queue_t *q, mblk_t *mp, spppstr_t *ctlsps)
{
	spppstr_t	*destsps = NULL;
	sppa_t		*ppa;
	uint16_t	proto;
	int		is_promisc;

	ASSERT(q != NULL && q->q_ptr != NULL);
	ASSERT(mp != NULL && mp->b_rptr != NULL);
	ASSERT(IS_SPS_CONTROL(ctlsps));
	ppa = ctlsps->sps_ppa;
	ASSERT(ppa != NULL);
	/*
	 * From RFC 1661 (Section 2):
	 *
	 * The Protocol field is one or two octets, and its value identifies
	 * the datagram encapsulated in the Information field of the packet.
	 * The field is transmitted and received most significant octet first.
	 *
	 * The structure of this field is consistent with the ISO 3309
	 * extension mechanism for address fields.  All Protocols MUST be odd;
	 * the least significant bit of the least significant octet MUST equal
	 * "1".  Also, all Protocols MUST be assigned such that the least
	 * significant bit of the most significant octet equals "0". Frames
	 * received which don't comply with these rules MUST be treated as
	 * having an unrecognized Protocol.
	 *
	 * Protocol field values in the "0***" to "3***" range identify the
	 * network-layer protocol of specific packets, and values in the
	 * "8***" to "b***" range identify packets belonging to the associated
	 * Network Control Protocols (NCPs), if any.
	 *
	 * Protocol field values in the "4***" to "7***" range are used for
	 * protocols with low volume traffic which have no associated NCP.
	 * Protocol field values in the "c***" to "f***" range identify packets
	 * as link-layer Control Protocols (such as LCP).
	 */
	proto = PPP_PROTOCOL(mp->b_rptr);
	mutex_enter(&ppa->ppa_sta_lock);
	ppa->ppa_stats.p.ppp_ipackets++;
	mutex_exit(&ppa->ppa_sta_lock);
	/*
	 * We check if this is not a network-layer protocol, and if so,
	 * then send this packet up the control stream.
	 */
	if (proto > 0x7fff) {
		goto inpkt_done;	/* send it up the control stream */
	}
	/*
	 * Try to grab the destination upper stream from the network-layer
	 * stream cache for this ppa for PPP_IP (0x0021) or PPP_IPV6 (0x0057)
	 * protocol types. Otherwise, if the type is not known to the cache,
	 * or if its sap can't be matched with any of the upper streams, then
	 * send this packet up the control stream so that it can be rejected.
	 */
	if (proto == PPP_IP) {
		destsps = ppa->ppa_ip_cache;
	} else if (proto == PPP_IPV6) {
		destsps = ppa->ppa_ip6_cache;
	}
	/*
	 * Toss this one away up the control stream if there's no matching sap;
	 * this way the protocol can be rejected (destsps is NULL).
	 */

inpkt_done:
	/*
	 * Only time-stamp the packet with hrtime if the upper stream
	 * is configured to do so.  PPP control (negotiation) messages
	 * are never considered link activity; only data is activity.
	 */
	if (destsps != NULL && IS_PPA_TIMESTAMP(ppa)) {
		ppa->ppa_lastrx = gethrtime();
	}
	/*
	 * Should there be any promiscuous stream(s), send the data up for
	 * each promiscuous stream that we recognize. We skip the control
	 * stream as we obviously never allow the control stream to become
	 * promiscous and bind to PPP_ALLSAP.
	 */
	rw_enter(&ppa->ppa_sib_lock, RW_READER);
	is_promisc = ppa->ppa_promicnt;
	if (is_promisc) {
		ASSERT(ppa->ppa_streams != NULL);
		sppp_dlprsendup(ppa->ppa_streams, mp, proto, B_TRUE);
	}
	rw_exit(&ppa->ppa_sib_lock);
	return (destsps);
}

/*
 * sppp_kstat_update()
 *
 * Description:
 *    Update per-ppa kstat interface statistics.
 */
static int
sppp_kstat_update(kstat_t *ksp, int rw)
{
	register sppa_t		*ppa;
	register sppp_kstats_t	*pppkp;
	register struct pppstat64 *sp;

	if (rw == KSTAT_WRITE) {
		return (EACCES);
	}

	ppa = (sppa_t *)ksp->ks_private;
	ASSERT(ppa != NULL);

	pppkp = (sppp_kstats_t *)ksp->ks_data;
	sp = &ppa->ppa_stats.p;

	mutex_enter(&ppa->ppa_sta_lock);
	pppkp->allocbfail.value.ui32	= ppa->ppa_allocbfail;
	pppkp->mctlsfwd.value.ui32	= ppa->ppa_mctlsfwd;
	pppkp->mctlsfwderr.value.ui32	= ppa->ppa_mctlsfwderr;
	pppkp->rbytes.value.ui32	= sp->ppp_ibytes;
	pppkp->rbytes64.value.ui64	= sp->ppp_ibytes;
	pppkp->ierrors.value.ui32	= sp->ppp_ierrors;
	pppkp->ierrors_lower.value.ui32	= ppa->ppa_ierr_low;
	pppkp->ioctlsfwd.value.ui32	= ppa->ppa_ioctlsfwd;
	pppkp->ioctlsfwdok.value.ui32	= ppa->ppa_ioctlsfwdok;
	pppkp->ioctlsfwderr.value.ui32	= ppa->ppa_ioctlsfwderr;
	pppkp->ipackets.value.ui32	= sp->ppp_ipackets;
	pppkp->ipackets64.value.ui64	= sp->ppp_ipackets;
	pppkp->ipackets_ctl.value.ui32	= ppa->ppa_ipkt_ctl;
	pppkp->iqdropped.value.ui32	= ppa->ppa_iqdropped;
	pppkp->irunts.value.ui32	= ppa->ppa_irunts;
	pppkp->itoolongs.value.ui32	= ppa->ppa_itoolongs;
	pppkp->lsneedup.value.ui32	= ppa->ppa_lsneedup;
	pppkp->lsdown.value.ui32	= ppa->ppa_lsdown;
	pppkp->mctlsknown.value.ui32	= ppa->ppa_mctlsknown;
	pppkp->mctlsunknown.value.ui32	= ppa->ppa_mctlsunknown;
	pppkp->obytes.value.ui32	= sp->ppp_obytes;
	pppkp->obytes64.value.ui64	= sp->ppp_obytes;
	pppkp->oerrors.value.ui32	= sp->ppp_oerrors;
	pppkp->oerrors_lower.value.ui32	= ppa->ppa_oerr_low;
	pppkp->opackets.value.ui32	= sp->ppp_opackets;
	pppkp->opackets64.value.ui64	= sp->ppp_opackets;
	pppkp->opackets_ctl.value.ui32	= ppa->ppa_opkt_ctl;
	pppkp->oqdropped.value.ui32	= ppa->ppa_oqdropped;
	pppkp->otoolongs.value.ui32	= ppa->ppa_otoolongs;
	pppkp->orunts.value.ui32	= ppa->ppa_orunts;
	mutex_exit(&ppa->ppa_sta_lock);

	return (0);
}

/*
 * Turn off proto in ppa_npflag to indicate that
 * the corresponding network protocol has been plumbed.
 * Release proto packets that were being held in the control
 * queue in anticipation of this event.
 */
static void
sppp_release_pkts(sppa_t *ppa, uint16_t proto)
{
	uint32_t npflagpos = sppp_ppp2np(proto);
	int count;
	mblk_t *mp;
	uint16_t mp_proto;
	queue_t *q;
	spppstr_t *destsps;

	ASSERT(ppa != NULL);

	if (npflagpos == 0 || (ppa->ppa_npflag & (1 << npflagpos)) == 0)
		return;

	mutex_enter(&ppa->ppa_npmutex);
	ppa->ppa_npflag &= ~(1 << npflagpos);
	count = ppa->ppa_holdpkts[npflagpos];
	ppa->ppa_holdpkts[npflagpos] = 0;
	mutex_exit(&ppa->ppa_npmutex);

	q = ppa->ppa_ctl->sps_rq;

	while (count > 0) {
		mp = getq(q);
		ASSERT(mp != NULL);

		mp_proto = PPP_PROTOCOL(mp->b_rptr);
		if (mp_proto !=  proto) {
			(void) putq(q, mp);
			continue;
		}
		count--;
		destsps = NULL;
		if (mp_proto == PPP_IP) {
			destsps = ppa->ppa_ip_cache;
		} else if (mp_proto == PPP_IPV6) {
			destsps = ppa->ppa_ip6_cache;
		}
		ASSERT(destsps != NULL);

		if (IS_SPS_FASTPATH(destsps)) {
			mp->b_rptr += PPP_HDRLEN;
		} else {
			spppstr_t *uqs = (spppstr_t *)destsps->sps_rq->q_ptr;
			ASSERT(uqs != NULL);
			mp->b_rptr += PPP_HDRLEN;
			mp = sppp_dladdud(uqs, mp, uqs->sps_sap, B_FALSE);
			if (mp == NULL) {
				mutex_enter(&ppa->ppa_sta_lock);
				ppa->ppa_allocbfail++;
				mutex_exit(&ppa->ppa_sta_lock);
				/* mp already freed by sppp_dladdud */
				continue;
			}
		}

		if (canputnext(destsps->sps_rq)) {
			putnext(destsps->sps_rq, mp);
		} else {
			mutex_enter(&ppa->ppa_sta_lock);
			ppa->ppa_iqdropped++;
			mutex_exit(&ppa->ppa_sta_lock);
			freemsg(mp);
			continue;
		}
	}
}